From 7f5e51b940d65cf541403a50af74163b9aed5cb8 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 21 Aug 2014 18:11:46 +0200 Subject: MDEV-34 delete storage/ndb and sql/*ndb* (and collateral changes) remove: * NDB from everywhere * IM from mtr-v1 * packaging/rpm-oel and packaging/rpm-uln * few unused spec files * plug.in file * .bzrignore --- .bzrignore | 1462 -- .gitignore | 1 - BUILD/SETUP.sh | 3 +- BUILD/build_mccge.sh | 1876 -- BUILD/compile-amd64-debug-max-no-ndb | 2 +- BUILD/compile-amd64-gprof-no-ndb | 2 +- BUILD/compile-amd64-max-sci | 23 - BUILD/compile-bintar | 2 +- BUILD/compile-dist | 2 +- BUILD/compile-ndb-autotest | 24 - BUILD/compile-pentium-debug-max-no-ndb | 2 +- BUILD/compile-pentium-valgrind-max-no-ndb | 2 +- BUILD/compile-pentium64-max-sci | 25 - BUILD/compile-ppc-debug-max-no-ndb | 2 +- CMakeLists.txt | 6 - client/mysql.cc | 2 - client/mysqltest.cc | 142 - cmake/make_dist.cmake.in | 9 - cmake/mysql_version.cmake | 8 - cmake/package_name.cmake | 7 +- config.h.cmake | 4 - debian/README.Maintainer | 3 - debian/mariadb-server-10.1.mysql.init | 2 +- debian/mariadb-server-10.1.preinst | 12 - debian/mariadb-server-10.1.templates | 6 - debian/mariadb-test-10.1.dirs | 13 - debian/patches/00list | 1 - .../patches/02_no_builtin_ndbcluster_plugin.dpatch | 18 - debian/po/ar.po | 15 - debian/po/ca.po | 15 - debian/po/cs.po | 15 - debian/po/da.po | 15 - debian/po/de.po | 18 - debian/po/es.po | 15 - debian/po/eu.po | 23 - debian/po/fr.po | 18 - debian/po/gl.po | 15 - debian/po/it.po | 17 - debian/po/ja.po | 17 - debian/po/nb.po | 15 - debian/po/nl.po | 15 - debian/po/pt.po | 15 - debian/po/pt_BR.po | 15 - debian/po/ro.po | 15 - debian/po/ru.po | 17 - debian/po/sv.po | 18 - debian/po/templates.pot | 15 - debian/po/tr.po | 15 - extra/CMakeLists.txt | 10 +- extra/perror.c | 58 +- include/my_base.h | 2 - include/my_global.h | 3 - include/mysqld_default_groups.h | 3 - man/mysql-test-run.pl.1 | 69 - man/mysqldump.1 | 8 - man/ndbd.8 | 819 - man/ndbd_redo_log_reader.1 | 176 - man/ndbmtd.8 | 388 - man/perror.1 | 31 - mysql-test/CMakeLists.txt | 10 +- mysql-test/collections/default.experimental | 2 +- mysql-test/collections/mysql-trunk.daily | 3 +- mysql-test/extra/rpl_tests/rpl_auto_increment.test | 1 - mysql-test/extra/rpl_tests/rpl_ddl.test | 5 +- .../extra/rpl_tests/rpl_extra_col_slave.test | 5 - mysql-test/extra/rpl_tests/rpl_foreign_key.test | 4 +- .../rpl_tests/rpl_implicit_commit_binlog.test | 284 +- mysql-test/extra/rpl_tests/rpl_innodb.test | 2 - mysql-test/extra/rpl_tests/rpl_loadfile.test | 4 - mysql-test/extra/rpl_tests/rpl_mixing_engines.test | 7 +- .../extra/rpl_tests/rpl_ndb_2multi_basic.test | 119 - mysql-test/extra/rpl_tests/rpl_ndb_2multi_eng.test | 347 - .../extra/rpl_tests/rpl_ndb_apply_status.test | 307 - mysql-test/extra/rpl_tests/rpl_partition.test | 4 +- mysql-test/extra/rpl_tests/rpl_row_blob.test | 8 - mysql-test/extra/rpl_tests/rpl_row_func003.test | 3 - mysql-test/extra/rpl_tests/rpl_row_sp003.test | 2 - mysql-test/extra/rpl_tests/rpl_sv_relay_space.test | 2 - mysql-test/extra/rpl_tests/rpl_trig004.test | 6 +- mysql-test/include/ctype_utf8mb4.inc | 36 - mysql-test/include/default_ndbd.cnf | 27 - mysql-test/include/have_multi_ndb.inc | 52 - mysql-test/include/have_ndb.inc | 2 - mysql-test/include/have_ndb_extra.inc | 2 - mysql-test/include/have_ndbapi_examples.inc | 4 - mysql-test/include/loaddata_autocom.inc | 3 - mysql-test/include/mtr_check.sql | 4 +- mysql-test/include/mtr_warnings.sql | 4 - mysql-test/include/ndb_backup.inc | 48 - mysql-test/include/ndb_backup_print.inc | 9 - mysql-test/include/ndb_default_cluster.inc | 4 - mysql-test/include/ndb_master-slave.inc | 12 - mysql-test/include/ndb_master-slave_2ch.inc | 67 - mysql-test/include/ndb_not_readonly.inc | 36 - mysql-test/include/ndb_restore_master.inc | 8 - mysql-test/include/ndb_restore_slave_eoption.inc | 11 - mysql-test/include/ndb_setup_slave.inc | 27 - mysql-test/include/ndb_wait_connected.inc | 26 - mysql-test/include/not_ndb.inc | 7 - mysql-test/include/not_ndb_default.inc | 4 - mysql-test/include/ps_query.inc | 1 - mysql-test/include/rpl_init.inc | 16 +- mysql-test/include/rpl_multi_engine2.inc | 4 - mysql-test/include/safe_set_to_maybe_ro_var.inc | 23 - mysql-test/include/select_ndb_apply_status.inc | 13 - mysql-test/include/wait_for_ndb_to_binlog.inc | 26 - mysql-test/lib/My/ConfigFactory.pm | 156 - mysql-test/lib/mtr_cases.pm | 27 - mysql-test/lib/v1/mtr_cases.pl | 53 - mysql-test/lib/v1/mtr_process.pl | 136 +- mysql-test/lib/v1/mtr_report.pl | 4 - mysql-test/lib/v1/mysql-test-run.pl | 1158 +- mysql-test/lib/v1/ndb_config_1_node.ini | 47 - mysql-test/lib/v1/ndb_config_2_node.ini | 55 - mysql-test/mysql-test-run.pl | 454 - mysql-test/r/have_ndb_extra.require | 3 - mysql-test/r/have_ndbapi_examples.require | 2 - mysql-test/r/information_schema.result | 2 +- mysql-test/r/information_schema_all_engines.result | 2 +- mysql-test/r/ndb_default_cluster.require | 2 - mysql-test/r/not_ndb.require | 2 - mysql-test/r/not_ndb_default.require | 2 - mysql-test/std_data/funcs_1/ndb_tb1.txt | 10 - mysql-test/std_data/funcs_1/ndb_tb2.txt | 10 - mysql-test/std_data/funcs_1/ndb_tb3.txt | 10 - mysql-test/std_data/funcs_1/ndb_tb4.txt | 10 - mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data | Bin 116228 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data | Bin 113556 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl | Bin 7936 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-1.1.log | Bin 7592 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl | Bin 7936 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-1.2.log | Bin 7128 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-2-0.1.Data | Bin 19084 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-2-0.2.Data | Bin 16392 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-2.1.ctl | Bin 2592 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-2.1.log | Bin 44 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-2.2.ctl | Bin 2592 -> 0 bytes mysql-test/std_data/ndb_backup50/BACKUP-2.2.log | Bin 44 -> 0 bytes mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data | Bin 110596 -> 0 bytes mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data | Bin 103180 -> 0 bytes mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl | Bin 12320 -> 0 bytes mysql-test/std_data/ndb_backup51/BACKUP-1.1.log | Bin 7592 -> 0 bytes mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl | Bin 12320 -> 0 bytes mysql-test/std_data/ndb_backup51/BACKUP-1.2.log | Bin 6996 -> 0 bytes .../ndb_backup51_data_be/BACKUP-1-0.1.Data | Bin 43068 -> 0 bytes .../ndb_backup51_data_be/BACKUP-1-0.2.Data | Bin 30276 -> 0 bytes .../std_data/ndb_backup51_data_be/BACKUP-1.1.ctl | Bin 33964 -> 0 bytes .../std_data/ndb_backup51_data_be/BACKUP-1.1.log | Bin 44 -> 0 bytes .../std_data/ndb_backup51_data_be/BACKUP-1.2.ctl | Bin 33964 -> 0 bytes .../std_data/ndb_backup51_data_be/BACKUP-1.2.log | Bin 44 -> 0 bytes .../ndb_backup51_data_le/BACKUP-1-0.1.Data | Bin 23788 -> 0 bytes .../ndb_backup51_data_le/BACKUP-1-0.2.Data | Bin 49556 -> 0 bytes .../std_data/ndb_backup51_data_le/BACKUP-1.1.ctl | Bin 33964 -> 0 bytes .../std_data/ndb_backup51_data_le/BACKUP-1.1.log | Bin 44 -> 0 bytes .../std_data/ndb_backup51_data_le/BACKUP-1.2.ctl | Bin 33964 -> 0 bytes .../std_data/ndb_backup51_data_le/BACKUP-1.2.log | Bin 44 -> 0 bytes mysql-test/std_data/ndb_config_config.ini | 55 - mysql-test/std_data/ndb_config_mycnf1.cnf | 15 - mysql-test/std_data/ndb_config_mycnf2.cnf | 31 - .../suite/binlog/r/binlog_multi_engine.result | 106 - .../suite/binlog/r/binlog_old_versions.result | 2 +- mysql-test/suite/binlog/t/binlog_multi_engine.test | 110 - mysql-test/suite/binlog/t/binlog_old_versions.test | 4 +- .../binlog/t/binlog_row_mysqlbinlog_verbose.test | 19 +- mysql-test/suite/binlog/t/binlog_unsafe.test | 4 - mysql-test/suite/engines/funcs/t/rpl_bit.test | 1 - .../engines/funcs/t/rpl_err_ignoredtable.test | 2 - mysql-test/suite/engines/funcs/t/rpl_loadfile.test | 4 - mysql-test/suite/engines/funcs/t/rpl_log_pos.test | 1 - mysql-test/suite/engines/funcs/t/rpl_ps.test | 2 - .../suite/engines/funcs/t/rpl_rbr_to_sbr.test | 1 - .../engines/funcs/t/rpl_row_max_relay_size.test | 1 - .../suite/engines/funcs/t/rpl_row_sp005.test | 2 - .../suite/engines/funcs/t/rpl_row_sp009.test | 2 - .../suite/engines/funcs/t/rpl_row_until.test | 1 - .../suite/engines/funcs/t/rpl_row_view01.test | 7 - .../suite/engines/funcs/t/rpl_sp_effects.test | 1 - .../engines/funcs/t/rpl_switch_stm_row_mixed.test | 1 - mysql-test/suite/engines/iuds/t/type_bit_iuds.test | 128 - .../suite/engines/rr_trx/run_stress_tx_rr.pl | 3 +- mysql-test/suite/federated/federatedx.test | 3 +- .../suite/funcs_1/datadict/datadict_load.inc | 14 - .../suite/funcs_1/datadict/processlist_priv.inc | 1 - .../suite/funcs_1/datadict/processlist_val.inc | 1 - mysql-test/suite/funcs_1/datadict/tables2.inc | 8 +- mysql-test/suite/funcs_1/include/ndb_tb1.inc | 70 - mysql-test/suite/funcs_1/include/ndb_tb2.inc | 63 - mysql-test/suite/funcs_1/include/ndb_tb3.inc | 70 - mysql-test/suite/funcs_1/include/ndb_tb4.inc | 70 - mysql-test/suite/funcs_1/r/is_tables_innodb.result | 12 +- mysql-test/suite/funcs_1/r/is_tables_is.result | 12 +- .../suite/funcs_1/r/is_tables_is_embedded.result | 12 +- mysql-test/suite/funcs_1/r/is_tables_memory.result | 12 +- mysql-test/suite/funcs_1/r/is_tables_myisam.result | 12 +- .../funcs_1/r/is_tables_myisam_embedded.result | 12 +- mysql-test/suite/funcs_1/r/is_tables_mysql.result | 12 +- .../funcs_1/r/is_tables_mysql_embedded.result | 12 +- .../suite/funcs_1/triggers/triggers_1011ext.inc | 2 - mysql-test/suite/funcs_1/views/views_master.inc | 9 +- .../suite/funcs_2/charset/charset_master.test | 4 +- mysql-test/suite/funcs_2/readme.txt | 5 - .../suite/innodb/r/innodb_multi_update.result | 2 +- mysql-test/suite/innodb/t/innodb_multi_update.test | 2 +- mysql-test/suite/parts/inc/partition.pre | 33 - .../suite/parts/inc/partition_auto_increment.inc | 2 +- mysql-test/suite/parts/inc/partition_check.inc | 7 - mysql-test/suite/parts/inc/partition_mgm.inc | 5 - .../parts/inc/partition_supported_sql_funcs.inc | 2 - mysql-test/suite/parts/inc/partition_syntax_2.inc | 22 - mysql-test/suite/parts/inc/partition_trigg3.inc | 2 +- mysql-test/suite/parts/inc/partition_value.inc | 6 - .../r/partition_auto_increment_blackhole.result | 2 +- .../suite/rpl/r/rpl_foreign_key_innodb.result | 2 +- .../suite/rpl/r/rpl_row_basic_8partition.result | 3 - mysql-test/suite/rpl/r/rpl_row_trig004.result | 2 +- mysql-test/suite/rpl/r/rpl_row_view01.result | 3 - mysql-test/suite/rpl/t/rpl_EE_err.test | 1 - mysql-test/suite/rpl/t/rpl_auto_increment.test | 1 - mysql-test/suite/rpl/t/rpl_bit.test | 1 - mysql-test/suite/rpl/t/rpl_bug26395.test | 3 - mysql-test/suite/rpl/t/rpl_commit_after_flush.test | 1 - mysql-test/suite/rpl/t/rpl_ddl.test | 1 - mysql-test/suite/rpl/t/rpl_deadlock_innodb.test | 1 - mysql-test/suite/rpl/t/rpl_delete_no_where.test | 4 - mysql-test/suite/rpl/t/rpl_drop_temp.test | 2 - mysql-test/suite/rpl/t/rpl_err_ignoredtable.test | 2 - mysql-test/suite/rpl/t/rpl_failed_optimize.test | 1 - mysql-test/suite/rpl/t/rpl_foreign_key_innodb.test | 1 - mysql-test/suite/rpl/t/rpl_insert_id.test | 1 - mysql-test/suite/rpl/t/rpl_insert_id_pk.test | 1 - mysql-test/suite/rpl/t/rpl_insert_ignore.test | 1 - mysql-test/suite/rpl/t/rpl_loaddata.test | 1 - mysql-test/suite/rpl/t/rpl_loadfile.test | 1 - mysql-test/suite/rpl/t/rpl_log_pos.test | 1 - mysql-test/suite/rpl/t/rpl_multi_engine.test | 4 - mysql-test/suite/rpl/t/rpl_multi_update.test | 1 - mysql-test/suite/rpl/t/rpl_multi_update2.test | 3 - mysql-test/suite/rpl/t/rpl_multi_update3.test | 3 - mysql-test/suite/rpl/t/rpl_optimize.test | 3 - mysql-test/suite/rpl/t/rpl_ps.test | 3 - mysql-test/suite/rpl/t/rpl_rbr_to_sbr.test | 1 - mysql-test/suite/rpl/t/rpl_relay_space_innodb.test | 1 - mysql-test/suite/rpl/t/rpl_relay_space_myisam.test | 1 - mysql-test/suite/rpl/t/rpl_relayrotate.test | 3 - mysql-test/suite/rpl/t/rpl_row_001.test | 2 - mysql-test/suite/rpl/t/rpl_row_USER.test | 1 - mysql-test/suite/rpl/t/rpl_row_UUID.test | 2 - mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test | 4 - .../suite/rpl/t/rpl_row_basic_8partition.test | 5 +- mysql-test/suite/rpl/t/rpl_row_blob_innodb.test | 2 - mysql-test/suite/rpl/t/rpl_row_blob_myisam.test | 2 - mysql-test/suite/rpl/t/rpl_row_delayed_ins.test | 1 - mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test | 4 - mysql-test/suite/rpl/t/rpl_row_func003.test | 2 - .../suite/rpl/t/rpl_row_loaddata_concurrent.test | 1 - mysql-test/suite/rpl/t/rpl_row_log.test | 7 +- mysql-test/suite/rpl/t/rpl_row_log_innodb.test | 6 +- mysql-test/suite/rpl/t/rpl_row_max_relay_size.test | 1 - .../suite/rpl/t/rpl_row_rec_comp_myisam.test | 1 - mysql-test/suite/rpl/t/rpl_row_sp002_innodb.test | 1 - mysql-test/suite/rpl/t/rpl_row_sp003.test | 2 - mysql-test/suite/rpl/t/rpl_row_sp005.test | 2 - mysql-test/suite/rpl/t/rpl_row_sp006_InnoDB.test | 2 - mysql-test/suite/rpl/t/rpl_row_sp007_innodb.test | 1 - mysql-test/suite/rpl/t/rpl_row_sp009.test | 2 - mysql-test/suite/rpl/t/rpl_row_trig004.test | 4 - mysql-test/suite/rpl/t/rpl_row_until.test | 1 - mysql-test/suite/rpl/t/rpl_row_view01.test | 13 - mysql-test/suite/rpl/t/rpl_sp_effects.test | 1 - .../suite/rpl/t/rpl_stm_loaddata_concurrent.test | 1 - mysql-test/suite/rpl/t/rpl_stm_loadfile.test | 1 - .../suite/rpl/t/rpl_switch_stm_row_mixed.test | 1 - mysql-test/suite/rpl/t/rpl_truncate_2myisam.test | 1 - mysql-test/suite/rpl/t/rpl_truncate_3innodb.test | 1 - mysql-test/suite/stress/include/ddl7.inc | 4 +- .../suite/sys_vars/r/have_ndbcluster_basic.result | 53 - mysql-test/t/information_schema.test | 2 +- mysql-test/t/information_schema_all_engines.test | 2 +- mysql-test/t/log_tables.test | 2 - mysql-test/t/mysqlbinlog.test | 15 - mysql-test/t/mysqld--help.test | 2 +- mysql-test/t/sp.test | 2 +- mysql-test/t/system_mysql_db_fix50117.test | 3 - mysql-test/valgrind.supp | 15 - mysys/my_compress.c | 125 - packaging/rpm-oel/CMakeLists.txt | 36 - packaging/rpm-oel/filter-provides.sh | 6 - packaging/rpm-oel/filter-requires.sh | 6 - packaging/rpm-oel/my.cnf | 31 - packaging/rpm-oel/my_config.h | 30 - .../rpm-oel/mysql-5.5-libmysqlclient-symbols.patch | 982 - packaging/rpm-oel/mysql-systemd-start | 52 - packaging/rpm-oel/mysql.conf | 1 - packaging/rpm-oel/mysql.init | 209 - packaging/rpm-oel/mysql.spec.in | 1666 -- packaging/rpm-oel/mysql_config.sh | 28 - packaging/rpm-oel/mysqld.service | 48 - packaging/rpm-uln/CMakeLists.txt | 38 - packaging/rpm-uln/README-ULN | 15 - packaging/rpm-uln/README.mysql-docs | 4 - packaging/rpm-uln/filter-requires-mysql.sh | 3 - packaging/rpm-uln/generate-tarball.sh | 15 - packaging/rpm-uln/my.cnf | 10 - packaging/rpm-uln/my_config.h | 29 - packaging/rpm-uln/mysql-5.5-errno.patch | 21 - packaging/rpm-uln/mysql-5.5-fix-tests.patch | 34 - packaging/rpm-uln/mysql-5.5-libdir.patch | 28 - packaging/rpm-uln/mysql-5.5-mtr1.patch | 25 - packaging/rpm-uln/mysql-5.5-stack-guard.patch | 140 - packaging/rpm-uln/mysql-5.5-testing.patch | 23 - packaging/rpm-uln/mysql-chain-certs.patch | 45 - packaging/rpm-uln/mysql-embedded-check.c | 26 - packaging/rpm-uln/mysql-expired-certs.patch | 555 - packaging/rpm-uln/mysql-install-test.patch | 33 - packaging/rpm-uln/mysql-strmov.patch | 32 - packaging/rpm-uln/mysql.init | 209 - packaging/rpm-uln/mysql.spec.sh | 1991 -- packaging/rpm-uln/scriptstub.c | 32 - plugin/handler_socket/plug.in | 20 - scripts/make_binary_distribution.sh | 386 - scripts/mysql_install_db.sh | 2 +- sql-bench/server-cfg.sh | 8 - sql/ha_ndbcluster.cc | 11060 ----------- sql/ha_ndbcluster.h | 599 - sql/ha_ndbcluster_binlog.cc | 4425 ----- sql/ha_ndbcluster_binlog.h | 239 - sql/ha_ndbcluster_cond.cc | 1475 -- sql/ha_ndbcluster_cond.h | 500 - sql/ha_ndbcluster_tables.h | 29 - sql/ha_partition.h | 33 +- sql/handler.cc | 144 +- sql/handler.h | 23 - sql/lex.h | 2 - sql/log.cc | 9 - sql/log_event.cc | 152 +- sql/log_event_old.cc | 133 - sql/multi_range_read.cc | 6 - sql/mysqld.cc | 6 - sql/opt_range.cc | 8 - sql/partition_info.cc | 9 +- sql/rpl_constants.h | 2 - sql/share/errmsg-utf8.txt | 10 +- sql/slave.cc | 23 +- sql/sql_class.h | 8 +- sql/sql_db.cc | 13 - sql/sql_join_cache.h | 2 +- sql/sql_partition.cc | 2 +- sql/sql_partition_admin.cc | 7 +- sql/sql_plugin.cc | 19 +- sql/sql_repl.cc | 9 - sql/sql_select.cc | 9 - sql/sql_table.cc | 21 - sql/sql_table.h | 3 +- sql/sql_yacc.yy | 2 - storage/innobase/handler/i_s.cc | 2 +- storage/ndb/MAINTAINERS | 165 - storage/ndb/Makefile.am | 47 - storage/ndb/bin/.empty | 0 storage/ndb/bin/check-regression.sh | 180 - storage/ndb/bin/makeTestPrograms_html.sh | 22 - storage/ndb/config/common.mk.am | 30 - storage/ndb/config/make-win-dsw.sh | 57 - storage/ndb/config/type_kernel.mk.am | 36 - storage/ndb/config/type_mgmapiclient.mk.am | 17 - storage/ndb/config/type_ndbapi.mk.am | 32 - storage/ndb/config/type_ndbapiclient.mk.am | 17 - storage/ndb/config/type_ndbapitest.mk.am | 32 - storage/ndb/config/type_ndbapitools.mk.am | 33 - storage/ndb/config/type_util.mk.am | 25 - storage/ndb/config/win-includes | 24 - storage/ndb/config/win-lib.am | 116 - storage/ndb/config/win-libraries | 72 - storage/ndb/config/win-name | 23 - storage/ndb/config/win-prg.am | 114 - storage/ndb/config/win-sources | 24 - storage/ndb/demos/1-node/1-api-3/Ndb.cfg | 2 - storage/ndb/demos/1-node/1-db-2/Ndb.cfg | 2 - storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg | 2 - .../ndb/demos/1-node/1-mgm-1/template_config.ini | 70 - storage/ndb/demos/2-node/2-api-4/Ndb.cfg | 2 - storage/ndb/demos/2-node/2-api-5/Ndb.cfg | 2 - storage/ndb/demos/2-node/2-api-6/Ndb.cfg | 2 - storage/ndb/demos/2-node/2-api-7/Ndb.cfg | 2 - storage/ndb/demos/2-node/2-db-2/Ndb.cfg | 2 - storage/ndb/demos/2-node/2-db-3/Ndb.cfg | 2 - storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg | 2 - .../ndb/demos/2-node/2-mgm-1/template_config.ini | 157 - .../config-templates/config_template-1-REP.ini | 87 - .../demos/config-templates/config_template-4.ini | 336 - .../config-templates/config_template-install.ini | 64 - storage/ndb/demos/run_demo1-PS-SS_common.sh | 50 - storage/ndb/demos/run_demo1-PS.sh | 30 - storage/ndb/demos/run_demo1-SS.sh | 30 - storage/ndb/demos/run_demo1.sh | 41 - storage/ndb/demos/run_demo2.sh | 54 - storage/ndb/docs/Makefile.am | 130 - storage/ndb/docs/README | 30 - storage/ndb/docs/doxygen/Doxyfile.mgmapi | 894 - storage/ndb/docs/doxygen/Doxyfile.ndb | 955 - storage/ndb/docs/doxygen/Doxyfile.ndbapi | 893 - storage/ndb/docs/doxygen/Doxyfile.odbc | 939 - storage/ndb/docs/doxygen/Doxyfile.test | 923 - storage/ndb/docs/doxygen/postdoxy.pl | 112 - storage/ndb/docs/doxygen/predoxy.pl | 49 - storage/ndb/docs/wl2077.txt | 48 - storage/ndb/include/Makefile.am | 66 - storage/ndb/include/debugger/DebuggerNames.hpp | 71 - storage/ndb/include/debugger/EventLogger.hpp | 178 - storage/ndb/include/debugger/GrepError.hpp | 94 - .../ndb/include/debugger/SignalLoggerManager.hpp | 174 - storage/ndb/include/editline/editline.h | 38 - storage/ndb/include/kernel/AttributeDescriptor.hpp | 236 - storage/ndb/include/kernel/AttributeHeader.hpp | 247 - storage/ndb/include/kernel/AttributeList.hpp | 38 - storage/ndb/include/kernel/BlockNumbers.h | 88 - storage/ndb/include/kernel/GlobalSignalNumbers.h | 990 - storage/ndb/include/kernel/GrepEvent.hpp | 59 - storage/ndb/include/kernel/Interpreter.hpp | 284 - storage/ndb/include/kernel/LogLevel.hpp | 163 - storage/ndb/include/kernel/NodeBitmask.hpp | 89 - storage/ndb/include/kernel/NodeInfo.hpp | 103 - storage/ndb/include/kernel/NodeState.hpp | 319 - storage/ndb/include/kernel/RefConvert.hpp | 47 - .../ndb/include/kernel/kernel_config_parameters.h | 67 - storage/ndb/include/kernel/kernel_types.h | 83 - storage/ndb/include/kernel/ndb_limits.h | 165 - storage/ndb/include/kernel/signaldata/AbortAll.hpp | 88 - storage/ndb/include/kernel/signaldata/AccFrag.hpp | 89 - storage/ndb/include/kernel/signaldata/AccLock.hpp | 66 - storage/ndb/include/kernel/signaldata/AccScan.hpp | 224 - .../include/kernel/signaldata/AccSizeAltReq.hpp | 53 - .../ndb/include/kernel/signaldata/AllocNodeId.hpp | 67 - .../ndb/include/kernel/signaldata/AlterIndx.hpp | 271 - storage/ndb/include/kernel/signaldata/AlterTab.hpp | 125 - .../ndb/include/kernel/signaldata/AlterTable.hpp | 287 - .../ndb/include/kernel/signaldata/AlterTrig.hpp | 288 - .../ndb/include/kernel/signaldata/ApiBroadcast.hpp | 31 - .../include/kernel/signaldata/ApiRegSignalData.hpp | 93 - .../ndb/include/kernel/signaldata/ApiVersion.hpp | 59 - .../include/kernel/signaldata/ArbitSignalData.hpp | 157 - storage/ndb/include/kernel/signaldata/AttrInfo.hpp | 54 - .../include/kernel/signaldata/BackupContinueB.hpp | 40 - .../ndb/include/kernel/signaldata/BackupImpl.hpp | 385 - .../include/kernel/signaldata/BackupSignalData.hpp | 261 - .../include/kernel/signaldata/BlockCommitOrd.hpp | 62 - .../ndb/include/kernel/signaldata/BuildIndx.hpp | 308 - .../include/kernel/signaldata/CheckNodeGroups.hpp | 61 - .../include/kernel/signaldata/CloseComReqConf.hpp | 53 - storage/ndb/include/kernel/signaldata/CmInit.hpp | 48 - .../include/kernel/signaldata/CmRegSignalData.hpp | 213 - .../ndb/include/kernel/signaldata/CmvmiCfgConf.hpp | 49 - .../include/kernel/signaldata/CntrMasterConf.hpp | 47 - .../include/kernel/signaldata/CntrMasterReq.hpp | 50 - .../ndb/include/kernel/signaldata/CntrStart.hpp | 85 - .../include/kernel/signaldata/ConfigParamId.hpp | 71 - .../kernel/signaldata/ContinueFragmented.hpp | 36 - .../ndb/include/kernel/signaldata/CopyActive.hpp | 84 - storage/ndb/include/kernel/signaldata/CopyFrag.hpp | 138 - .../ndb/include/kernel/signaldata/CopyGCIReq.hpp | 63 - .../ndb/include/kernel/signaldata/CreateEvnt.hpp | 485 - .../include/kernel/signaldata/CreateFilegroup.hpp | 202 - .../kernel/signaldata/CreateFilegroupImpl.hpp | 195 - .../ndb/include/kernel/signaldata/CreateFrag.hpp | 61 - .../kernel/signaldata/CreateFragmentation.hpp | 99 - .../ndb/include/kernel/signaldata/CreateIndx.hpp | 301 - .../ndb/include/kernel/signaldata/CreateObj.hpp | 107 - .../ndb/include/kernel/signaldata/CreateTab.hpp | 109 - .../ndb/include/kernel/signaldata/CreateTable.hpp | 150 - .../ndb/include/kernel/signaldata/CreateTrig.hpp | 423 - storage/ndb/include/kernel/signaldata/DiAddTab.hpp | 92 - .../ndb/include/kernel/signaldata/DiGetNodes.hpp | 62 - storage/ndb/include/kernel/signaldata/DictLock.hpp | 78 - .../ndb/include/kernel/signaldata/DictObjOp.hpp | 104 - .../include/kernel/signaldata/DictSchemaInfo.hpp | 45 - .../include/kernel/signaldata/DictSizeAltReq.hpp | 51 - .../ndb/include/kernel/signaldata/DictStart.hpp | 54 - .../ndb/include/kernel/signaldata/DictTabInfo.hpp | 727 - .../ndb/include/kernel/signaldata/DihAddFrag.hpp | 62 - .../ndb/include/kernel/signaldata/DihContinueB.hpp | 77 - .../ndb/include/kernel/signaldata/DihFragCount.hpp | 67 - .../include/kernel/signaldata/DihSizeAltReq.hpp | 50 - .../ndb/include/kernel/signaldata/DihStartTab.hpp | 65 - .../include/kernel/signaldata/DihSwitchReplica.hpp | 72 - .../include/kernel/signaldata/DisconnectRep.hpp | 61 - .../include/kernel/signaldata/DropFilegroup.hpp | 196 - .../kernel/signaldata/DropFilegroupImpl.hpp | 171 - storage/ndb/include/kernel/signaldata/DropIndx.hpp | 257 - storage/ndb/include/kernel/signaldata/DropObj.hpp | 118 - storage/ndb/include/kernel/signaldata/DropTab.hpp | 116 - .../ndb/include/kernel/signaldata/DropTabFile.hpp | 64 - .../ndb/include/kernel/signaldata/DropTable.hpp | 83 - storage/ndb/include/kernel/signaldata/DropTrig.hpp | 300 - .../ndb/include/kernel/signaldata/DumpStateOrd.hpp | 157 - storage/ndb/include/kernel/signaldata/EmptyLcp.hpp | 77 - storage/ndb/include/kernel/signaldata/EndTo.hpp | 49 - .../ndb/include/kernel/signaldata/EventReport.hpp | 102 - .../kernel/signaldata/EventSubscribeReq.hpp | 66 - .../ndb/include/kernel/signaldata/ExecFragReq.hpp | 43 - storage/ndb/include/kernel/signaldata/Extent.hpp | 122 - storage/ndb/include/kernel/signaldata/FailRep.hpp | 66 - .../ndb/include/kernel/signaldata/FireTrigOrd.hpp | 216 - .../ndb/include/kernel/signaldata/FsAppendReq.hpp | 58 - .../ndb/include/kernel/signaldata/FsCloseReq.hpp | 88 - storage/ndb/include/kernel/signaldata/FsConf.hpp | 81 - .../ndb/include/kernel/signaldata/FsOpenReq.hpp | 335 - .../include/kernel/signaldata/FsReadWriteReq.hpp | 172 - storage/ndb/include/kernel/signaldata/FsRef.hpp | 100 - .../ndb/include/kernel/signaldata/FsRemoveReq.hpp | 78 - storage/ndb/include/kernel/signaldata/GCPSave.hpp | 98 - .../ndb/include/kernel/signaldata/GetTabInfo.hpp | 128 - .../ndb/include/kernel/signaldata/GetTableId.hpp | 93 - storage/ndb/include/kernel/signaldata/GrepImpl.hpp | 891 - .../ndb/include/kernel/signaldata/HotSpareRep.hpp | 48 - .../ndb/include/kernel/signaldata/IndxAttrInfo.hpp | 56 - .../ndb/include/kernel/signaldata/IndxKeyInfo.hpp | 56 - .../kernel/signaldata/InvalidateNodeLCPConf.hpp | 41 - .../kernel/signaldata/InvalidateNodeLCPReq.hpp | 42 - storage/ndb/include/kernel/signaldata/KeyInfo.hpp | 48 - storage/ndb/include/kernel/signaldata/LCP.hpp | 219 - .../include/kernel/signaldata/LgmanContinueB.hpp | 39 - .../ndb/include/kernel/signaldata/ListTables.hpp | 178 - storage/ndb/include/kernel/signaldata/LqhFrag.hpp | 260 - storage/ndb/include/kernel/signaldata/LqhKey.hpp | 613 - .../include/kernel/signaldata/LqhSizeAltReq.hpp | 53 - .../ndb/include/kernel/signaldata/LqhTransConf.hpp | 218 - .../include/kernel/signaldata/ManagementServer.hpp | 87 - .../ndb/include/kernel/signaldata/MasterGCP.hpp | 84 - .../ndb/include/kernel/signaldata/MasterLCP.hpp | 86 - .../include/kernel/signaldata/NFCompleteRep.hpp | 64 - storage/ndb/include/kernel/signaldata/NdbSttor.hpp | 85 - .../include/kernel/signaldata/NdbfsContinueB.hpp | 36 - storage/ndb/include/kernel/signaldata/NextScan.hpp | 60 - .../ndb/include/kernel/signaldata/NodeFailRep.hpp | 42 - .../kernel/signaldata/NodeStateSignalData.hpp | 94 - .../ndb/include/kernel/signaldata/PackedSignal.hpp | 43 - .../include/kernel/signaldata/PgmanContinueB.hpp | 37 - .../ndb/include/kernel/signaldata/PrepDropTab.hpp | 172 - .../include/kernel/signaldata/PrepFailReqRef.hpp | 49 - .../ndb/include/kernel/signaldata/ReadConfig.hpp | 40 - .../include/kernel/signaldata/ReadNodesConf.hpp | 85 - .../ndb/include/kernel/signaldata/RelTabMem.hpp | 69 - storage/ndb/include/kernel/signaldata/RepImpl.hpp | 500 - .../include/kernel/signaldata/RestoreContinueB.hpp | 38 - .../ndb/include/kernel/signaldata/RestoreImpl.hpp | 66 - .../ndb/include/kernel/signaldata/ResumeReq.hpp | 69 - storage/ndb/include/kernel/signaldata/RouteOrd.hpp | 35 - storage/ndb/include/kernel/signaldata/ScanFrag.hpp | 399 - storage/ndb/include/kernel/signaldata/ScanTab.hpp | 474 - .../include/kernel/signaldata/SetLogLevelOrd.hpp | 85 - .../ndb/include/kernel/signaldata/SetVarReq.hpp | 84 - .../ndb/include/kernel/signaldata/SignalData.hpp | 227 - .../include/kernel/signaldata/SignalDataPrint.hpp | 36 - .../include/kernel/signaldata/SignalDroppedRep.hpp | 44 - .../ndb/include/kernel/signaldata/SrFragidConf.hpp | 43 - .../ndb/include/kernel/signaldata/StartFragReq.hpp | 48 - .../ndb/include/kernel/signaldata/StartInfo.hpp | 84 - storage/ndb/include/kernel/signaldata/StartMe.hpp | 63 - storage/ndb/include/kernel/signaldata/StartOrd.hpp | 48 - .../ndb/include/kernel/signaldata/StartPerm.hpp | 75 - storage/ndb/include/kernel/signaldata/StartRec.hpp | 61 - storage/ndb/include/kernel/signaldata/StartTo.hpp | 50 - storage/ndb/include/kernel/signaldata/StopMe.hpp | 70 - storage/ndb/include/kernel/signaldata/StopPerm.hpp | 96 - storage/ndb/include/kernel/signaldata/StopReq.hpp | 217 - storage/ndb/include/kernel/signaldata/SumaImpl.hpp | 548 - .../ndb/include/kernel/signaldata/SystemError.hpp | 59 - .../ndb/include/kernel/signaldata/TamperOrd.hpp | 40 - storage/ndb/include/kernel/signaldata/TcCommit.hpp | 76 - .../ndb/include/kernel/signaldata/TcContinueB.hpp | 52 - storage/ndb/include/kernel/signaldata/TcHbRep.hpp | 64 - storage/ndb/include/kernel/signaldata/TcIndx.hpp | 126 - .../ndb/include/kernel/signaldata/TcKeyConf.hpp | 132 - .../include/kernel/signaldata/TcKeyFailConf.hpp | 53 - storage/ndb/include/kernel/signaldata/TcKeyRef.hpp | 53 - storage/ndb/include/kernel/signaldata/TcKeyReq.hpp | 530 - .../include/kernel/signaldata/TcRollbackRep.hpp | 51 - .../ndb/include/kernel/signaldata/TcSizeAltReq.hpp | 52 - storage/ndb/include/kernel/signaldata/TestOrd.hpp | 229 - .../ndb/include/kernel/signaldata/TransIdAI.hpp | 59 - .../ndb/include/kernel/signaldata/TrigAttrInfo.hpp | 138 - .../include/kernel/signaldata/TsmanContinueB.hpp | 37 - .../ndb/include/kernel/signaldata/TupCommit.hpp | 52 - storage/ndb/include/kernel/signaldata/TupFrag.hpp | 210 - storage/ndb/include/kernel/signaldata/TupKey.hpp | 126 - .../include/kernel/signaldata/TupSizeAltReq.hpp | 58 - storage/ndb/include/kernel/signaldata/TuxBound.hpp | 59 - .../ndb/include/kernel/signaldata/TuxContinueB.hpp | 30 - storage/ndb/include/kernel/signaldata/TuxMaint.hpp | 70 - .../include/kernel/signaldata/TuxSizeAltReq.hpp | 48 - storage/ndb/include/kernel/signaldata/UpdateTo.hpp | 59 - .../include/kernel/signaldata/UpgradeStartup.hpp | 54 - .../ndb/include/kernel/signaldata/UtilDelete.hpp | 121 - .../ndb/include/kernel/signaldata/UtilExecute.hpp | 135 - storage/ndb/include/kernel/signaldata/UtilLock.hpp | 334 - .../ndb/include/kernel/signaldata/UtilPrepare.hpp | 161 - .../ndb/include/kernel/signaldata/UtilRelease.hpp | 83 - .../ndb/include/kernel/signaldata/UtilSequence.hpp | 101 - storage/ndb/include/kernel/signaldata/WaitGCP.hpp | 112 - storage/ndb/include/kernel/trigger_definitions.h | 62 - storage/ndb/include/logger/ConsoleLogHandler.hpp | 57 - storage/ndb/include/logger/FileLogHandler.hpp | 110 - storage/ndb/include/logger/LogHandler.hpp | 221 - storage/ndb/include/logger/Logger.hpp | 301 - storage/ndb/include/logger/SysLogHandler.hpp | 99 - storage/ndb/include/mgmapi/mgmapi.h | 1186 -- .../ndb/include/mgmapi/mgmapi_config_parameters.h | 208 - .../mgmapi/mgmapi_config_parameters_debug.h | 24 - storage/ndb/include/mgmapi/mgmapi_debug.h | 154 - storage/ndb/include/mgmapi/mgmapi_error.h | 122 - storage/ndb/include/mgmapi/ndb_logevent.h | 706 - storage/ndb/include/mgmapi/ndb_logevent.txt | 56 - storage/ndb/include/mgmapi/ndbd_exit_codes.h | 167 - storage/ndb/include/mgmcommon/ConfigRetriever.hpp | 112 - storage/ndb/include/mgmcommon/IPCConfig.hpp | 83 - .../ndb/include/mgmcommon/MgmtErrorReporter.hpp | 68 - storage/ndb/include/ndb_constants.h | 101 - storage/ndb/include/ndb_global.h.in | 150 - storage/ndb/include/ndb_init.h | 32 - storage/ndb/include/ndb_net.h | 22 - storage/ndb/include/ndb_types.h.in | 81 - storage/ndb/include/ndb_version.h.in | 141 - storage/ndb/include/ndbapi/Ndb.hpp | 1806 -- storage/ndb/include/ndbapi/NdbApi.hpp | 35 - storage/ndb/include/ndbapi/NdbBlob.hpp | 410 - storage/ndb/include/ndbapi/NdbDictionary.hpp | 1944 -- storage/ndb/include/ndbapi/NdbError.hpp | 250 - storage/ndb/include/ndbapi/NdbEventOperation.hpp | 268 - storage/ndb/include/ndbapi/NdbIndexOperation.hpp | 190 - .../ndb/include/ndbapi/NdbIndexScanOperation.hpp | 206 - storage/ndb/include/ndbapi/NdbIndexStat.hpp | 147 - storage/ndb/include/ndbapi/NdbOperation.hpp | 1356 -- storage/ndb/include/ndbapi/NdbPool.hpp | 36 - storage/ndb/include/ndbapi/NdbRecAttr.hpp | 477 - storage/ndb/include/ndbapi/NdbReceiver.hpp | 150 - storage/ndb/include/ndbapi/NdbScanFilter.hpp | 205 - storage/ndb/include/ndbapi/NdbScanOperation.hpp | 318 - storage/ndb/include/ndbapi/NdbTransaction.hpp | 1059 - .../ndb/include/ndbapi/ndb_cluster_connection.hpp | 148 - storage/ndb/include/ndbapi/ndb_opt_defaults.h | 23 - storage/ndb/include/ndbapi/ndbapi_limits.h | 33 - storage/ndb/include/ndbapi/ndberror.h | 110 - storage/ndb/include/newtonapi/dba.h | 730 - storage/ndb/include/newtonapi/defs/pcn_types.h | 34 - storage/ndb/include/portlib/NdbCondition.h | 94 - storage/ndb/include/portlib/NdbConfig.h | 39 - storage/ndb/include/portlib/NdbDaemon.h | 72 - storage/ndb/include/portlib/NdbEnv.h | 34 - storage/ndb/include/portlib/NdbHost.h | 43 - storage/ndb/include/portlib/NdbMain.h | 26 - storage/ndb/include/portlib/NdbMem.h | 81 - storage/ndb/include/portlib/NdbMutex.h | 110 - storage/ndb/include/portlib/NdbSleep.h | 38 - storage/ndb/include/portlib/NdbTCP.h | 84 - storage/ndb/include/portlib/NdbThread.h | 110 - storage/ndb/include/portlib/NdbTick.h | 61 - storage/ndb/include/portlib/PortDefs.h | 53 - storage/ndb/include/portlib/prefetch.h | 69 - .../include/transporter/TransporterCallback.hpp | 358 - .../include/transporter/TransporterDefinitions.hpp | 131 - .../include/transporter/TransporterRegistry.hpp | 343 - storage/ndb/include/util/BaseString.hpp | 285 - storage/ndb/include/util/Bitmask.hpp | 966 - storage/ndb/include/util/ConfigValues.hpp | 271 - storage/ndb/include/util/File.hpp | 211 - storage/ndb/include/util/InputStream.hpp | 66 - storage/ndb/include/util/NdbAutoPtr.hpp | 58 - storage/ndb/include/util/NdbOut.hpp | 132 - storage/ndb/include/util/NdbSqlUtil.hpp | 185 - storage/ndb/include/util/OutputStream.hpp | 71 - storage/ndb/include/util/Parser.hpp | 294 - storage/ndb/include/util/Properties.hpp | 250 - storage/ndb/include/util/SimpleProperties.hpp | 301 - storage/ndb/include/util/SocketAuthenticator.hpp | 40 - storage/ndb/include/util/SocketClient.hpp | 49 - storage/ndb/include/util/SocketServer.hpp | 142 - storage/ndb/include/util/UtilBuffer.hpp | 103 - storage/ndb/include/util/Vector.hpp | 364 - storage/ndb/include/util/basestring_vsnprintf.h | 29 - storage/ndb/include/util/md5_hash.hpp | 34 - storage/ndb/include/util/ndb_opts.h | 182 - storage/ndb/include/util/ndb_rand.h | 34 - storage/ndb/include/util/random.h | 84 - storage/ndb/include/util/socket_io.h | 51 - storage/ndb/include/util/uucode.h | 36 - storage/ndb/include/util/version.h | 44 - storage/ndb/lib/.empty | 0 storage/ndb/ndb_configure.m4 | 349 - storage/ndb/ndbapi-examples/Makefile | 27 - .../ndb/ndbapi-examples/mgmapi_logevent/Makefile | 24 - .../ndb/ndbapi-examples/mgmapi_logevent/main.cpp | 154 - .../ndb/ndbapi-examples/mgmapi_logevent2/Makefile | 24 - .../ndb/ndbapi-examples/mgmapi_logevent2/main.cpp | 225 - storage/ndb/ndbapi-examples/ndbapi_async/Makefile | 24 - .../ndbapi-examples/ndbapi_async/ndbapi_async.cpp | 492 - .../ndb/ndbapi-examples/ndbapi_async/readme.txt | 3 - storage/ndb/ndbapi-examples/ndbapi_async1/Makefile | 23 - .../ndbapi_async1/ndbapi_async1.cpp | 199 - storage/ndb/ndbapi-examples/ndbapi_event/Makefile | 24 - .../ndbapi-examples/ndbapi_event/ndbapi_event.cpp | 356 - .../ndb/ndbapi-examples/ndbapi_retries/Makefile | 23 - .../ndbapi_retries/ndbapi_retries.cpp | 291 - storage/ndb/ndbapi-examples/ndbapi_scan/Makefile | 24 - .../ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp | 845 - storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt | 3 - storage/ndb/ndbapi-examples/ndbapi_simple/Makefile | 24 - .../ndbapi_simple/ndbapi_simple.cpp | 298 - .../ndbapi-examples/ndbapi_simple_dual/Makefile | 24 - .../ndbapi-examples/ndbapi_simple_dual/main.cpp | 348 - .../ndbapi-examples/ndbapi_simple_index/Makefile | 24 - .../ndbapi-examples/ndbapi_simple_index/main.cpp | 274 - storage/ndb/plug.in | 7 - storage/ndb/src/Makefile.am | 51 - storage/ndb/src/common/Makefile.am | 31 - storage/ndb/src/common/debugger/BlockNames.cpp | 42 - storage/ndb/src/common/debugger/DebuggerNames.cpp | 154 - storage/ndb/src/common/debugger/EventLogger.cpp | 1155 -- storage/ndb/src/common/debugger/GrepError.cpp | 133 - storage/ndb/src/common/debugger/Makefile.am | 38 - .../src/common/debugger/SignalLoggerManager.cpp | 507 - .../ndb/src/common/debugger/signaldata/AccLock.cpp | 75 - .../src/common/debugger/signaldata/AlterIndx.cpp | 35 - .../src/common/debugger/signaldata/AlterTab.cpp | 38 - .../src/common/debugger/signaldata/AlterTable.cpp | 38 - .../src/common/debugger/signaldata/AlterTrig.cpp | 51 - .../src/common/debugger/signaldata/BackupImpl.cpp | 134 - .../debugger/signaldata/BackupSignalData.cpp | 133 - .../common/debugger/signaldata/CloseComReqConf.cpp | 52 - .../src/common/debugger/signaldata/CntrStart.cpp | 53 - .../src/common/debugger/signaldata/ContinueB.cpp | 35 - .../ndb/src/common/debugger/signaldata/CopyGCI.cpp | 58 - .../src/common/debugger/signaldata/CreateEvnt.cpp | 38 - .../debugger/signaldata/CreateFragmentation.cpp | 55 - .../src/common/debugger/signaldata/CreateIndx.cpp | 38 - .../src/common/debugger/signaldata/CreateTrig.cpp | 120 - .../src/common/debugger/signaldata/DictTabInfo.cpp | 310 - .../common/debugger/signaldata/DihContinueB.cpp | 220 - .../debugger/signaldata/DihSwitchReplicaReq.cpp | 48 - .../common/debugger/signaldata/DisconnectRep.cpp | 30 - .../src/common/debugger/signaldata/DropIndx.cpp | 38 - .../ndb/src/common/debugger/signaldata/DropTab.cpp | 50 - .../src/common/debugger/signaldata/DropTrig.cpp | 89 - .../ndb/src/common/debugger/signaldata/FailRep.cpp | 31 - .../src/common/debugger/signaldata/FireTrigOrd.cpp | 56 - .../src/common/debugger/signaldata/FsAppendReq.cpp | 38 - .../src/common/debugger/signaldata/FsCloseReq.cpp | 41 - .../ndb/src/common/debugger/signaldata/FsConf.cpp | 33 - .../src/common/debugger/signaldata/FsOpenReq.cpp | 59 - .../common/debugger/signaldata/FsReadWriteReq.cpp | 95 - .../ndb/src/common/debugger/signaldata/FsRef.cpp | 46 - .../ndb/src/common/debugger/signaldata/GCPSave.cpp | 78 - .../common/debugger/signaldata/IndxAttrInfo.cpp | 31 - .../src/common/debugger/signaldata/IndxKeyInfo.cpp | 31 - storage/ndb/src/common/debugger/signaldata/LCP.cpp | 89 - .../ndb/src/common/debugger/signaldata/LqhFrag.cpp | 63 - .../ndb/src/common/debugger/signaldata/LqhKey.cpp | 183 - .../src/common/debugger/signaldata/LqhTrans.cpp | 40 - .../ndb/src/common/debugger/signaldata/Makefile.am | 59 - .../src/common/debugger/signaldata/MasterLCP.cpp | 87 - .../common/debugger/signaldata/NFCompleteRep.cpp | 44 - .../src/common/debugger/signaldata/NdbSttor.cpp | 50 - .../common/debugger/signaldata/NdbfsContinueB.cpp | 41 - .../common/debugger/signaldata/PackedSignal.cpp | 106 - .../src/common/debugger/signaldata/PrepDropTab.cpp | 50 - .../common/debugger/signaldata/PrepFailReqRef.cpp | 52 - .../common/debugger/signaldata/ReadNodesConf.cpp | 40 - .../src/common/debugger/signaldata/ScanFrag.cpp | 42 - .../ndb/src/common/debugger/signaldata/ScanTab.cpp | 158 - .../common/debugger/signaldata/SignalDataPrint.cpp | 212 - .../debugger/signaldata/SignalDroppedRep.cpp | 34 - .../src/common/debugger/signaldata/SignalNames.cpp | 649 - .../src/common/debugger/signaldata/StartRec.cpp | 77 - .../src/common/debugger/signaldata/SumaImpl.cpp | 218 - .../src/common/debugger/signaldata/SystemError.cpp | 40 - .../ndb/src/common/debugger/signaldata/TcIndx.cpp | 74 - .../src/common/debugger/signaldata/TcKeyConf.cpp | 69 - .../src/common/debugger/signaldata/TcKeyRef.cpp | 28 - .../src/common/debugger/signaldata/TcKeyReq.cpp | 115 - .../common/debugger/signaldata/TcRollbackRep.cpp | 28 - .../common/debugger/signaldata/TrigAttrInfo.cpp | 53 - .../src/common/debugger/signaldata/TupCommit.cpp | 28 - .../ndb/src/common/debugger/signaldata/TupKey.cpp | 50 - .../src/common/debugger/signaldata/TuxMaint.cpp | 45 - .../src/common/debugger/signaldata/UtilDelete.cpp | 65 - .../src/common/debugger/signaldata/UtilExecute.cpp | 59 - .../src/common/debugger/signaldata/UtilLock.cpp | 158 - .../src/common/debugger/signaldata/UtilPrepare.cpp | 64 - .../common/debugger/signaldata/UtilSequence.cpp | 67 - .../ndb/src/common/debugger/signaldata/print.awk | 71 - .../ndb/src/common/logger/ConsoleLogHandler.cpp | 68 - storage/ndb/src/common/logger/FileLogHandler.cpp | 251 - storage/ndb/src/common/logger/LogHandler.cpp | 208 - storage/ndb/src/common/logger/LogHandlerList.cpp | 181 - storage/ndb/src/common/logger/LogHandlerList.hpp | 94 - storage/ndb/src/common/logger/Logger.cpp | 398 - storage/ndb/src/common/logger/Makefile.am | 38 - storage/ndb/src/common/logger/SysLogHandler.cpp | 159 - .../logger/listtest/LogHandlerListUnitTest.cpp | 164 - .../logger/listtest/LogHandlerListUnitTest.hpp | 40 - storage/ndb/src/common/logger/listtest/Makefile | 14 - .../common/logger/loggertest/LoggerUnitTest.cpp | 189 - .../common/logger/loggertest/LoggerUnitTest.hpp | 49 - storage/ndb/src/common/logger/loggertest/Makefile | 16 - .../ndb/src/common/mgmcommon/ConfigRetriever.cpp | 391 - storage/ndb/src/common/mgmcommon/IPCConfig.cpp | 370 - storage/ndb/src/common/mgmcommon/Makefile.am | 41 - .../ndb/src/common/mgmcommon/printConfig/Makefile | 16 - .../common/mgmcommon/printConfig/printConfig.cpp | 89 - storage/ndb/src/common/portlib/Makefile.am | 56 - storage/ndb/src/common/portlib/NdbCondition.c | 142 - storage/ndb/src/common/portlib/NdbConfig.c | 145 - storage/ndb/src/common/portlib/NdbDaemon.c | 171 - storage/ndb/src/common/portlib/NdbEnv.c | 34 - storage/ndb/src/common/portlib/NdbHost.c | 34 - storage/ndb/src/common/portlib/NdbMem.c | 83 - storage/ndb/src/common/portlib/NdbMutex.c | 91 - storage/ndb/src/common/portlib/NdbPortLibTest.cpp | 603 - storage/ndb/src/common/portlib/NdbSleep.c | 43 - storage/ndb/src/common/portlib/NdbTCP.cpp | 132 - storage/ndb/src/common/portlib/NdbThread.c | 193 - storage/ndb/src/common/portlib/NdbTick.c | 104 - storage/ndb/src/common/portlib/memtest.c | 243 - storage/ndb/src/common/portlib/mmstest.cpp | 76 - storage/ndb/src/common/portlib/munmaptest.cpp | 246 - .../ndb/src/common/portlib/win32/NdbCondition.c | 178 - storage/ndb/src/common/portlib/win32/NdbDaemon.c | 44 - storage/ndb/src/common/portlib/win32/NdbEnv.c | 31 - storage/ndb/src/common/portlib/win32/NdbHost.c | 52 - storage/ndb/src/common/portlib/win32/NdbMem.c | 283 - storage/ndb/src/common/portlib/win32/NdbMutex.c | 73 - storage/ndb/src/common/portlib/win32/NdbSleep.c | 32 - storage/ndb/src/common/portlib/win32/NdbTCP.c | 71 - storage/ndb/src/common/portlib/win32/NdbThread.c | 114 - storage/ndb/src/common/portlib/win32/NdbTick.c | 64 - storage/ndb/src/common/transporter/Makefile.am | 48 - storage/ndb/src/common/transporter/Packer.cpp | 517 - storage/ndb/src/common/transporter/Packer.hpp | 85 - .../ndb/src/common/transporter/SCI_Transporter.cpp | 910 - .../ndb/src/common/transporter/SCI_Transporter.hpp | 384 - storage/ndb/src/common/transporter/SHM_Buffer.hpp | 233 - .../ndb/src/common/transporter/SHM_Transporter.cpp | 377 - .../ndb/src/common/transporter/SHM_Transporter.hpp | 177 - .../common/transporter/SHM_Transporter.unix.cpp | 126 - .../common/transporter/SHM_Transporter.win32.cpp | 178 - storage/ndb/src/common/transporter/SendBuffer.cpp | 89 - storage/ndb/src/common/transporter/SendBuffer.hpp | 190 - .../ndb/src/common/transporter/TCP_Transporter.cpp | 436 - .../ndb/src/common/transporter/TCP_Transporter.hpp | 234 - storage/ndb/src/common/transporter/Transporter.cpp | 237 - storage/ndb/src/common/transporter/Transporter.hpp | 193 - .../transporter/TransporterInternalDefinitions.hpp | 298 - .../src/common/transporter/TransporterRegistry.cpp | 1448 -- .../ndb/src/common/transporter/basictest/Makefile | 15 - .../transporter/basictest/basicTransporterTest.cpp | 512 - storage/ndb/src/common/transporter/buddy.cpp | 325 - storage/ndb/src/common/transporter/buddy.hpp | 172 - .../src/common/transporter/failoverSCI/Makefile | 18 - .../common/transporter/failoverSCI/failoverSCI.cpp | 863 - .../ndb/src/common/transporter/perftest/Makefile | 15 - .../transporter/perftest/perfTransporterTest.cpp | 712 - .../ndb/src/common/transporter/priotest/Makefile | 15 - .../common/transporter/priotest/prioSCI/Makefile | 17 - .../transporter/priotest/prioSCI/prioSCI.cpp | 29 - .../common/transporter/priotest/prioSHM/Makefile | 13 - .../transporter/priotest/prioSHM/prioSHM.cpp | 26 - .../common/transporter/priotest/prioTCP/Makefile | 13 - .../transporter/priotest/prioTCP/prioTCP.cpp | 26 - .../transporter/priotest/prioTransporterTest.cpp | 708 - .../transporter/priotest/prioTransporterTest.hpp | 34 - storage/ndb/src/common/util/BaseString.cpp | 553 - storage/ndb/src/common/util/Bitmask.cpp | 118 - storage/ndb/src/common/util/ConfigValues.cpp | 803 - storage/ndb/src/common/util/File.cpp | 199 - storage/ndb/src/common/util/InputStream.cpp | 81 - storage/ndb/src/common/util/Makefile.am | 62 - storage/ndb/src/common/util/NdbOut.cpp | 173 - storage/ndb/src/common/util/NdbSqlUtil.cpp | 1016 - storage/ndb/src/common/util/OutputStream.cpp | 95 - storage/ndb/src/common/util/Parser.cpp | 356 - storage/ndb/src/common/util/Properties.cpp | 1136 -- storage/ndb/src/common/util/SimpleProperties.cpp | 530 - .../ndb/src/common/util/SocketAuthenticator.cpp | 91 - storage/ndb/src/common/util/SocketClient.cpp | 203 - storage/ndb/src/common/util/SocketServer.cpp | 357 - storage/ndb/src/common/util/basestring_vsnprintf.c | 71 - .../ndb/src/common/util/filetest/FileUnitTest.cpp | 237 - .../ndb/src/common/util/filetest/FileUnitTest.hpp | 41 - storage/ndb/src/common/util/filetest/Makefile | 14 - storage/ndb/src/common/util/getarg.cat3 | 237 - storage/ndb/src/common/util/md5_hash.cpp | 239 - storage/ndb/src/common/util/ndb_init.c | 54 - storage/ndb/src/common/util/ndb_rand.c | 41 - storage/ndb/src/common/util/new.cpp | 58 - storage/ndb/src/common/util/random.c | 284 - storage/ndb/src/common/util/socket_io.cpp | 333 - storage/ndb/src/common/util/strdup.c | 28 - .../ndb/src/common/util/testConfigValues/Makefile | 12 - .../util/testConfigValues/testConfigValues.cpp | 138 - .../ndb/src/common/util/testProperties/Makefile | 9 - .../common/util/testProperties/testProperties.cpp | 195 - .../src/common/util/testSimpleProperties/Makefile | 12 - .../common/util/testSimpleProperties/sp_test.cpp | 95 - storage/ndb/src/common/util/uucode.c | 234 - storage/ndb/src/common/util/version.c | 255 - storage/ndb/src/cw/Makefile.am | 19 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp | 215 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp | 216 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw | 29 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h | 40 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico | Bin 1078 -> 0 bytes storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc | 193 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln | 21 - storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo | Bin 8704 -> 0 bytes storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj | 240 - storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO | Bin 1078 -> 0 bytes storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp | 436 - storage/ndb/src/cw/cpcc-win32/C++/Open.ICO | Bin 1078 -> 0 bytes storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp | 24 - storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h | 69 - storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp | 19 - storage/ndb/src/cw/cpcc-win32/C++/TreeView.h | 19 - storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp | Bin 622 -> 0 bytes storage/ndb/src/cw/cpcc-win32/C++/resource.h | 90 - storage/ndb/src/cw/cpcc-win32/C++/small.ico | Bin 318 -> 0 bytes storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp | Bin 622 -> 0 bytes storage/ndb/src/cw/cpcc-win32/csharp/App.ico | Bin 1078 -> 0 bytes storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs | 1416 -- storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs | 272 - .../src/cw/cpcc-win32/csharp/ComputerAddDialog.cs | 258 - .../cw/cpcc-win32/csharp/ComputerRemoveDialog.cs | 244 - storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO | Bin 1078 -> 0 bytes storage/ndb/src/cw/cpcc-win32/csharp/Database.cs | 178 - .../ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj | 240 - .../src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user | 48 - storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb | Bin 19456 -> 0 bytes storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln | 21 - .../ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs | 1899 -- storage/ndb/src/cw/cpcc-win32/csharp/Process.cs | 160 - .../cw/cpcc-win32/csharp/ProcessDefineDialog.cs | 451 - .../cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs | 57 - .../csharp/simpleparser/SimpleCPCParser.cs | 376 - .../cw/cpcc-win32/csharp/socketcomm/SocketComm.cs | 223 - .../cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs | 42 - .../src/cw/cpcc-win32/csharp/startDatabaseDlg.cs | 267 - .../cpcc-win32/csharp/telnetclient/telnetClient.cs | 424 - storage/ndb/src/cw/cpcd/APIService.cpp | 401 - storage/ndb/src/cw/cpcd/APIService.hpp | 65 - storage/ndb/src/cw/cpcd/CPCD.cpp | 435 - storage/ndb/src/cw/cpcd/CPCD.hpp | 390 - storage/ndb/src/cw/cpcd/Makefile.am | 32 - storage/ndb/src/cw/cpcd/Monitor.cpp | 79 - storage/ndb/src/cw/cpcd/Process.cpp | 486 - storage/ndb/src/cw/cpcd/common.cpp | 98 - storage/ndb/src/cw/cpcd/common.hpp | 36 - storage/ndb/src/cw/cpcd/main.cpp | 183 - storage/ndb/src/cw/test/socketclient/Makefile | 24 - .../src/cw/test/socketclient/socketClientTest.cpp | 64 - storage/ndb/src/cw/util/ClientInterface.cpp | 185 - storage/ndb/src/cw/util/ClientInterface.hpp | 49 - storage/ndb/src/cw/util/Makefile | 10 - storage/ndb/src/cw/util/SocketRegistry.cpp | 213 - storage/ndb/src/cw/util/SocketRegistry.hpp | 290 - storage/ndb/src/cw/util/SocketService.cpp | 60 - storage/ndb/src/cw/util/SocketService.hpp | 46 - .../WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT | 77 - .../ndb/src/external/WIN32.x86/sci/lib/scilib.lib | Bin 17918 -> 0 bytes .../src/external/WIN32.x86/sci/lib/scilib_md.lib | Bin 18000 -> 0 bytes .../src/external/WIN32.x86/sci/lib/scilib_mt.lib | Bin 17924 -> 0 bytes .../src/external/WIN32.x86/sci/lib/sisci_api.lib | Bin 264284 -> 0 bytes .../external/WIN32.x86/sci/lib/sisci_api_md.lib | Bin 265578 -> 0 bytes .../external/WIN32.x86/sci/lib/sisci_api_mt.lib | Bin 264386 -> 0 bytes storage/ndb/src/kernel/Makefile.am | 73 - storage/ndb/src/kernel/SimBlockList.cpp | 136 - storage/ndb/src/kernel/blocks/ERROR_codes.txt | 565 - storage/ndb/src/kernel/blocks/Makefile.am | 68 - storage/ndb/src/kernel/blocks/NodeRestart.new.txt | 82 - storage/ndb/src/kernel/blocks/NodeRestart.txt | 80 - storage/ndb/src/kernel/blocks/OptNR.txt | 49 - storage/ndb/src/kernel/blocks/Start.txt | 97 - .../ndb/src/kernel/blocks/SystemRestart.new.txt | 61 - storage/ndb/src/kernel/blocks/SystemRestart.txt | 61 - storage/ndb/src/kernel/blocks/backup/Backup.cpp | 5144 ----- storage/ndb/src/kernel/blocks/backup/Backup.hpp | 752 - storage/ndb/src/kernel/blocks/backup/Backup.txt | 427 - .../ndb/src/kernel/blocks/backup/BackupFormat.hpp | 197 - .../ndb/src/kernel/blocks/backup/BackupInit.cpp | 259 - storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp | 360 - storage/ndb/src/kernel/blocks/backup/Makefile.am | 26 - storage/ndb/src/kernel/blocks/backup/read.cpp | 523 - storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp | 1453 -- storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp | 118 - storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp | 1106 -- storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp | 220 - storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 8549 -------- .../ndb/src/kernel/blocks/dbdict/CreateIndex.txt | 152 - .../src/kernel/blocks/dbdict/CreateTable.new.txt | 29 - .../ndb/src/kernel/blocks/dbdict/CreateTable.txt | 35 - storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 16702 ---------------- storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 2707 --- storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt | 88 - storage/ndb/src/kernel/blocks/dbdict/DictLock.txt | 98 - storage/ndb/src/kernel/blocks/dbdict/DropTable.txt | 140 - storage/ndb/src/kernel/blocks/dbdict/Event.txt | 102 - storage/ndb/src/kernel/blocks/dbdict/Makefile.am | 33 - .../src/kernel/blocks/dbdict/Master_AddTable.sfl | 765 - .../ndb/src/kernel/blocks/dbdict/SchemaFile.hpp | 90 - .../src/kernel/blocks/dbdict/Slave_AddTable.sfl | 436 - .../src/kernel/blocks/dbdict/printSchemaFile.cpp | 285 - storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 1673 -- storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp | 333 - storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 15878 --------------- storage/ndb/src/kernel/blocks/dbdih/LCP.txt | 35 - storage/ndb/src/kernel/blocks/dbdih/Makefile.am | 27 - storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp | 275 - .../ndb/src/kernel/blocks/dbdih/printSysfile.cpp | 160 - .../src/kernel/blocks/dbdih/printSysfile/Makefile | 12 - .../blocks/dbdih/printSysfile/printSysfile.cpp | 158 - storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 3029 --- storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp | 415 - storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 19357 ------------------- storage/ndb/src/kernel/blocks/dblqh/Makefile.am | 29 - .../kernel/blocks/dblqh/redoLogReader/reader.cpp | 416 - .../kernel/blocks/dblqh/redoLogReader/records.cpp | 336 - .../kernel/blocks/dblqh/redoLogReader/records.hpp | 250 - storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp | 1978 -- storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp | 378 - storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 13612 ------------- .../src/kernel/blocks/dbtup/AttributeOffset.hpp | 136 - storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 3070 --- storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp | 423 - .../ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp | 275 - .../ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp | 749 - storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp | 450 - .../ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp | 1893 -- .../ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp | 3205 --- .../ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp | 285 - storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp | 749 - storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp | 737 - storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 1486 -- .../ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp | 390 - .../ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp | 602 - .../ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp | 1687 -- storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp | 1209 -- .../src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp | 238 - .../ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp | 317 - .../ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp | 1291 -- .../ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp | 420 - storage/ndb/src/kernel/blocks/dbtup/Makefile.am | 26 - storage/ndb/src/kernel/blocks/dbtup/Notes.txt | 198 - .../ndb/src/kernel/blocks/dbtup/Undo_buffer.cpp | 116 - .../ndb/src/kernel/blocks/dbtup/Undo_buffer.hpp | 57 - .../ndb/src/kernel/blocks/dbtup/test_varpage.cpp | 297 - storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp | 480 - storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp | 266 - storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp | 1208 -- storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp | 175 - storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp | 417 - storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp | 338 - storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp | 182 - storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp | 512 - storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp | 590 - storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp | 1123 -- .../ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp | 432 - storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp | 159 - storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp | 717 - storage/ndb/src/kernel/blocks/dbtux/Times.txt | 151 - storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html | 120 - storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp | 2608 --- storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp | 485 - storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt | 68 - storage/ndb/src/kernel/blocks/diskpage.cpp | 90 - storage/ndb/src/kernel/blocks/diskpage.hpp | 242 - storage/ndb/src/kernel/blocks/lgman.cpp | 3209 --- storage/ndb/src/kernel/blocks/lgman.hpp | 367 - storage/ndb/src/kernel/blocks/mutexes.hpp | 39 - storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp | 388 - .../ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp | 121 - .../ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 3094 --- .../src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp | 102 - storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp | 1387 -- storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp | 252 - .../blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp | 695 - .../src/kernel/blocks/ndbfs/AsyncFileTest/Makefile | 27 - .../ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp | 20 - .../ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp | 116 - storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp | 192 - storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp | 98 - .../ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp | 18 - .../ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp | 179 - .../kernel/blocks/ndbfs/MemoryChannelTest/Makefile | 13 - .../ndbfs/MemoryChannelTest/MemoryChannelTest.cpp | 193 - storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp | 1109 -- storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp | 131 - storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp | 120 - storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp | 261 - storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp | 215 - storage/ndb/src/kernel/blocks/new-block.tar.gz | Bin 1816 -> 0 bytes storage/ndb/src/kernel/blocks/pgman.cpp | 2514 --- storage/ndb/src/kernel/blocks/pgman.hpp | 682 - storage/ndb/src/kernel/blocks/print_file.cpp | 417 - storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 471 - storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp | 145 - storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 5268 ----- storage/ndb/src/kernel/blocks/qmgr/timer.hpp | 72 - storage/ndb/src/kernel/blocks/record_types.hpp | 59 - storage/ndb/src/kernel/blocks/restore.cpp | 1286 -- storage/ndb/src/kernel/blocks/restore.hpp | 159 - storage/ndb/src/kernel/blocks/suma/Suma.cpp | 5202 ----- storage/ndb/src/kernel/blocks/suma/Suma.hpp | 635 - storage/ndb/src/kernel/blocks/suma/Suma.txt | 192 - storage/ndb/src/kernel/blocks/suma/SumaInit.cpp | 136 - storage/ndb/src/kernel/blocks/trix/Trix.cpp | 997 - storage/ndb/src/kernel/blocks/trix/Trix.hpp | 189 - storage/ndb/src/kernel/blocks/tsman.cpp | 2224 --- storage/ndb/src/kernel/blocks/tsman.hpp | 453 - .../ndb/src/kernel/error/ErrorHandlingMacros.hpp | 57 - storage/ndb/src/kernel/error/ErrorReporter.cpp | 406 - storage/ndb/src/kernel/error/ErrorReporter.hpp | 57 - storage/ndb/src/kernel/error/Makefile.am | 38 - storage/ndb/src/kernel/error/TimeModule.cpp | 109 - storage/ndb/src/kernel/error/TimeModule.hpp | 46 - storage/ndb/src/kernel/error/ndbd_exit_codes.c | 275 - storage/ndb/src/kernel/main.cpp | 653 - storage/ndb/src/kernel/vm/Array.hpp | 165 - storage/ndb/src/kernel/vm/ArrayPool.hpp | 985 - storage/ndb/src/kernel/vm/CArray.hpp | 149 - storage/ndb/src/kernel/vm/Callback.hpp | 24 - storage/ndb/src/kernel/vm/ClusterConfiguration.cpp | 484 - storage/ndb/src/kernel/vm/ClusterConfiguration.hpp | 105 - storage/ndb/src/kernel/vm/Configuration.cpp | 902 - storage/ndb/src/kernel/vm/Configuration.hpp | 152 - storage/ndb/src/kernel/vm/DLCFifoList.hpp | 119 - storage/ndb/src/kernel/vm/DLCHashTable.hpp | 82 - storage/ndb/src/kernel/vm/DLFifoList.hpp | 475 - storage/ndb/src/kernel/vm/DLHashTable.hpp | 519 - storage/ndb/src/kernel/vm/DLHashTable2.hpp | 515 - storage/ndb/src/kernel/vm/DLList.hpp | 438 - storage/ndb/src/kernel/vm/DataBuffer.hpp | 533 - storage/ndb/src/kernel/vm/DynArr256.cpp | 1021 - storage/ndb/src/kernel/vm/DynArr256.hpp | 85 - storage/ndb/src/kernel/vm/Emulator.cpp | 292 - storage/ndb/src/kernel/vm/Emulator.hpp | 109 - storage/ndb/src/kernel/vm/FastScheduler.cpp | 500 - storage/ndb/src/kernel/vm/FastScheduler.hpp | 345 - storage/ndb/src/kernel/vm/GlobalData.hpp | 123 - storage/ndb/src/kernel/vm/KeyDescriptor.hpp | 43 - storage/ndb/src/kernel/vm/KeyTable.hpp | 52 - storage/ndb/src/kernel/vm/KeyTable2.hpp | 115 - storage/ndb/src/kernel/vm/KeyTable2Ref.hpp | 65 - storage/ndb/src/kernel/vm/LinearPool.hpp | 656 - storage/ndb/src/kernel/vm/LongSignal.hpp | 80 - storage/ndb/src/kernel/vm/Makefile.am | 86 - storage/ndb/src/kernel/vm/Mutex.cpp | 287 - storage/ndb/src/kernel/vm/Mutex.hpp | 267 - storage/ndb/src/kernel/vm/NdbdSuperPool.cpp | 228 - storage/ndb/src/kernel/vm/NdbdSuperPool.hpp | 55 - storage/ndb/src/kernel/vm/Pool.cpp | 44 - storage/ndb/src/kernel/vm/Pool.hpp | 341 - storage/ndb/src/kernel/vm/Prio.hpp | 32 - storage/ndb/src/kernel/vm/RWPool.cpp | 230 - storage/ndb/src/kernel/vm/RWPool.hpp | 78 - storage/ndb/src/kernel/vm/RequestTracker.hpp | 58 - storage/ndb/src/kernel/vm/Rope.cpp | 189 - storage/ndb/src/kernel/vm/Rope.hpp | 117 - storage/ndb/src/kernel/vm/SLFifoList.hpp | 343 - storage/ndb/src/kernel/vm/SLList.hpp | 402 - storage/ndb/src/kernel/vm/SafeCounter.cpp | 159 - storage/ndb/src/kernel/vm/SafeCounter.hpp | 307 - storage/ndb/src/kernel/vm/SectionReader.cpp | 143 - storage/ndb/src/kernel/vm/SectionReader.hpp | 49 - storage/ndb/src/kernel/vm/SignalCounter.hpp | 166 - storage/ndb/src/kernel/vm/SimBlockList.hpp | 48 - .../ndb/src/kernel/vm/SimplePropertiesSection.cpp | 223 - storage/ndb/src/kernel/vm/SimulatedBlock.cpp | 2082 -- storage/ndb/src/kernel/vm/SimulatedBlock.hpp | 824 - storage/ndb/src/kernel/vm/SuperPool.cpp | 750 - storage/ndb/src/kernel/vm/SuperPool.hpp | 592 - storage/ndb/src/kernel/vm/ThreadConfig.cpp | 183 - storage/ndb/src/kernel/vm/ThreadConfig.hpp | 39 - storage/ndb/src/kernel/vm/TimeQueue.cpp | 208 - storage/ndb/src/kernel/vm/TimeQueue.hpp | 62 - storage/ndb/src/kernel/vm/TransporterCallback.cpp | 512 - storage/ndb/src/kernel/vm/VMSignal.cpp | 34 - storage/ndb/src/kernel/vm/VMSignal.hpp | 208 - storage/ndb/src/kernel/vm/WOPool.cpp | 137 - storage/ndb/src/kernel/vm/WOPool.hpp | 123 - storage/ndb/src/kernel/vm/WaitQueue.hpp | 35 - storage/ndb/src/kernel/vm/WatchDog.cpp | 200 - storage/ndb/src/kernel/vm/WatchDog.hpp | 56 - storage/ndb/src/kernel/vm/al_test/Makefile | 12 - .../ndb/src/kernel/vm/al_test/arrayListTest.cpp | 317 - .../ndb/src/kernel/vm/al_test/arrayPoolTest.cpp | 298 - storage/ndb/src/kernel/vm/al_test/main.cpp | 69 - storage/ndb/src/kernel/vm/bench_pool.cpp | 608 - storage/ndb/src/kernel/vm/mem.txt | 34 - storage/ndb/src/kernel/vm/ndbd_malloc.cpp | 65 - storage/ndb/src/kernel/vm/ndbd_malloc.hpp | 26 - storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp | 1016 - storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp | 200 - storage/ndb/src/kernel/vm/pc.hpp | 237 - storage/ndb/src/kernel/vm/testCopy/Makefile | 9 - storage/ndb/src/kernel/vm/testCopy/rr.cpp | 32 - storage/ndb/src/kernel/vm/testCopy/testCopy.cpp | 341 - storage/ndb/src/kernel/vm/testDataBuffer/Makefile | 10 - .../kernel/vm/testDataBuffer/testDataBuffer.cpp | 188 - storage/ndb/src/kernel/vm/testLongSig/Makefile | 9 - .../ndb/src/kernel/vm/testLongSig/testLongSig.cpp | 333 - .../kernel/vm/testSimplePropertiesSection/Makefile | 10 - .../kernel/vm/testSimplePropertiesSection/test.cpp | 171 - storage/ndb/src/kernel/vm/testSuperPool.cpp | 364 - storage/ndb/src/libndb.ver.in | 18 - storage/ndb/src/mgmapi/LocalConfig.cpp | 319 - storage/ndb/src/mgmapi/LocalConfig.hpp | 68 - storage/ndb/src/mgmapi/Makefile.am | 45 - storage/ndb/src/mgmapi/mgmapi.cpp | 2866 --- storage/ndb/src/mgmapi/mgmapi_configuration.cpp | 209 - storage/ndb/src/mgmapi/mgmapi_configuration.hpp | 48 - storage/ndb/src/mgmapi/mgmapi_internal.h | 79 - storage/ndb/src/mgmapi/ndb_logevent.cpp | 519 - storage/ndb/src/mgmapi/ndb_logevent.hpp | 34 - storage/ndb/src/mgmapi/test/Makefile | 13 - storage/ndb/src/mgmapi/test/keso.c | 470 - storage/ndb/src/mgmapi/test/mgmSrvApi.cpp | 126 - storage/ndb/src/mgmclient/CommandInterpreter.cpp | 2744 --- storage/ndb/src/mgmclient/Makefile.am | 72 - storage/ndb/src/mgmclient/main.cpp | 199 - storage/ndb/src/mgmclient/ndb_mgmclient.h | 33 - storage/ndb/src/mgmclient/ndb_mgmclient.hpp | 33 - storage/ndb/src/mgmclient/test_cpcd/Makefile | 17 - storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp | 157 - storage/ndb/src/mgmsrv/Config.cpp | 181 - storage/ndb/src/mgmsrv/Config.hpp | 82 - storage/ndb/src/mgmsrv/ConfigInfo.cpp | 3827 ---- storage/ndb/src/mgmsrv/ConfigInfo.hpp | 152 - storage/ndb/src/mgmsrv/ERROR_codes.txt | 29 - storage/ndb/src/mgmsrv/InitConfigFileParser.cpp | 953 - storage/ndb/src/mgmsrv/InitConfigFileParser.hpp | 145 - storage/ndb/src/mgmsrv/Makefile.am | 73 - storage/ndb/src/mgmsrv/MgmtSrvr.cpp | 3017 --- storage/ndb/src/mgmsrv/MgmtSrvr.hpp | 648 - storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp | 76 - .../src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp | 22 - storage/ndb/src/mgmsrv/Services.cpp | 1818 -- storage/ndb/src/mgmsrv/Services.hpp | 136 - storage/ndb/src/mgmsrv/SignalQueue.cpp | 104 - storage/ndb/src/mgmsrv/SignalQueue.hpp | 100 - storage/ndb/src/mgmsrv/convertStrToInt.cpp | 43 - storage/ndb/src/mgmsrv/convertStrToInt.hpp | 25 - storage/ndb/src/mgmsrv/main.cpp | 382 - storage/ndb/src/mgmsrv/mkconfig/Makefile | 13 - storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp | 61 - storage/ndb/src/mgmsrv/ndb_mgmd_error.h | 34 - storage/ndb/src/ndbapi/API.hpp | 26 - storage/ndb/src/ndbapi/ClusterMgr.cpp | 909 - storage/ndb/src/ndbapi/ClusterMgr.hpp | 259 - storage/ndb/src/ndbapi/DictCache.cpp | 470 - storage/ndb/src/ndbapi/DictCache.hpp | 104 - storage/ndb/src/ndbapi/Makefile.am | 87 - storage/ndb/src/ndbapi/Ndb.cpp | 1920 -- storage/ndb/src/ndbapi/NdbApiSignal.cpp | 298 - storage/ndb/src/ndbapi/NdbApiSignal.hpp | 225 - storage/ndb/src/ndbapi/NdbBlob.cpp | 2029 -- storage/ndb/src/ndbapi/NdbBlobImpl.hpp | 43 - storage/ndb/src/ndbapi/NdbDictionary.cpp | 2047 -- storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 5319 ----- storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp | 1221 -- storage/ndb/src/ndbapi/NdbErrorOut.cpp | 45 - storage/ndb/src/ndbapi/NdbEventOperation.cpp | 219 - storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp | 3007 --- storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp | 655 - storage/ndb/src/ndbapi/NdbImpl.hpp | 304 - storage/ndb/src/ndbapi/NdbIndexOperation.cpp | 191 - storage/ndb/src/ndbapi/NdbIndexStat.cpp | 493 - storage/ndb/src/ndbapi/NdbLinHash.hpp | 455 - storage/ndb/src/ndbapi/NdbOperation.cpp | 438 - storage/ndb/src/ndbapi/NdbOperationDefine.cpp | 795 - storage/ndb/src/ndbapi/NdbOperationExec.cpp | 576 - storage/ndb/src/ndbapi/NdbOperationInt.cpp | 1187 -- storage/ndb/src/ndbapi/NdbOperationScan.cpp | 16 - storage/ndb/src/ndbapi/NdbOperationSearch.cpp | 646 - storage/ndb/src/ndbapi/NdbPool.cpp | 71 - storage/ndb/src/ndbapi/NdbPoolImpl.cpp | 528 - storage/ndb/src/ndbapi/NdbPoolImpl.hpp | 166 - storage/ndb/src/ndbapi/NdbRecAttr.cpp | 533 - storage/ndb/src/ndbapi/NdbReceiver.cpp | 314 - storage/ndb/src/ndbapi/NdbScanFilter.cpp | 838 - storage/ndb/src/ndbapi/NdbScanOperation.cpp | 1858 -- storage/ndb/src/ndbapi/NdbTransaction.cpp | 2221 --- storage/ndb/src/ndbapi/NdbTransactionScan.cpp | 122 - storage/ndb/src/ndbapi/NdbUtil.cpp | 65 - storage/ndb/src/ndbapi/NdbUtil.hpp | 104 - storage/ndb/src/ndbapi/NdbWaiter.hpp | 105 - storage/ndb/src/ndbapi/Ndberr.cpp | 89 - storage/ndb/src/ndbapi/Ndbif.cpp | 1481 -- storage/ndb/src/ndbapi/Ndbinit.cpp | 219 - storage/ndb/src/ndbapi/Ndblist.cpp | 592 - storage/ndb/src/ndbapi/ObjectMap.cpp | 63 - storage/ndb/src/ndbapi/ObjectMap.hpp | 114 - storage/ndb/src/ndbapi/ScanOperation.txt | 56 - storage/ndb/src/ndbapi/SignalSender.cpp | 283 - storage/ndb/src/ndbapi/SignalSender.hpp | 83 - storage/ndb/src/ndbapi/TransporterFacade.cpp | 1538 -- storage/ndb/src/ndbapi/TransporterFacade.hpp | 418 - storage/ndb/src/ndbapi/ndb_cluster_connection.cpp | 694 - .../ndb/src/ndbapi/ndb_cluster_connection_impl.hpp | 86 - storage/ndb/src/ndbapi/ndb_internal.hpp | 27 - storage/ndb/src/ndbapi/ndberror.c | 837 - storage/ndb/src/ndbapi/ndberror_check.c | 39 - storage/ndb/src/ndbapi/signal-sender/Makefile | 19 - .../ndb/src/ndbapi/signal-sender/SignalSender.cpp | 237 - .../ndb/src/ndbapi/signal-sender/SignalSender.hpp | 82 - storage/ndb/test/Makefile.am | 24 - storage/ndb/test/include/AtrtClient.hpp | 57 - storage/ndb/test/include/CpcClient.hpp | 103 - storage/ndb/test/include/DbUtil.hpp | 177 - .../ndb/test/include/HugoAsynchTransactions.hpp | 75 - storage/ndb/test/include/HugoCalculator.hpp | 53 - storage/ndb/test/include/HugoOperations.hpp | 137 - storage/ndb/test/include/HugoTransactions.hpp | 155 - storage/ndb/test/include/NDBT.hpp | 39 - storage/ndb/test/include/NDBT_DataSet.hpp | 140 - .../ndb/test/include/NDBT_DataSetTransaction.hpp | 162 - storage/ndb/test/include/NDBT_Error.hpp | 101 - storage/ndb/test/include/NDBT_Output.hpp | 30 - storage/ndb/test/include/NDBT_ResultRow.hpp | 61 - storage/ndb/test/include/NDBT_ReturnCodes.h | 42 - storage/ndb/test/include/NDBT_Stats.hpp | 74 - storage/ndb/test/include/NDBT_Table.hpp | 94 - storage/ndb/test/include/NDBT_Tables.hpp | 56 - storage/ndb/test/include/NDBT_Test.hpp | 470 - storage/ndb/test/include/NDBT_Thread.hpp | 227 - storage/ndb/test/include/NdbBackup.hpp | 54 - storage/ndb/test/include/NdbConfig.hpp | 39 - storage/ndb/test/include/NdbGrep.hpp | 53 - storage/ndb/test/include/NdbMixRestarter.hpp | 75 - storage/ndb/test/include/NdbRestarter.hpp | 121 - storage/ndb/test/include/NdbRestarts.hpp | 120 - storage/ndb/test/include/NdbSchemaCon.hpp | 147 - storage/ndb/test/include/NdbSchemaOp.hpp | 546 - storage/ndb/test/include/NdbTest.hpp | 35 - storage/ndb/test/include/NdbTimer.hpp | 109 - storage/ndb/test/include/TestNdbEventOperation.hpp | 24 - storage/ndb/test/include/UtilTransactions.hpp | 136 - storage/ndb/test/include/getarg.h | 115 - storage/ndb/test/ndbapi/InsertRecs.cpp | 571 - storage/ndb/test/ndbapi/Makefile.am | 192 - storage/ndb/test/ndbapi/ScanFilter.hpp | 131 - storage/ndb/test/ndbapi/ScanFunctions.hpp | 352 - storage/ndb/test/ndbapi/ScanInterpretTest.hpp | 515 - storage/ndb/test/ndbapi/TraceNdbApi.cpp | 543 - storage/ndb/test/ndbapi/VerifyNdbApi.cpp | 151 - storage/ndb/test/ndbapi/acid.cpp | 533 - storage/ndb/test/ndbapi/acid2.cpp | 693 - storage/ndb/test/ndbapi/acrt/NdbRepStress.cpp | 457 - storage/ndb/test/ndbapi/adoInsertRecs.cpp | 363 - storage/ndb/test/ndbapi/asyncGenerator.cpp | 571 - storage/ndb/test/ndbapi/bank/Bank.cpp | 2521 --- storage/ndb/test/ndbapi/bank/Bank.hpp | 147 - storage/ndb/test/ndbapi/bank/BankLoad.cpp | 602 - storage/ndb/test/ndbapi/bank/Makefile.am | 36 - storage/ndb/test/ndbapi/bank/bankCreator.cpp | 68 - storage/ndb/test/ndbapi/bank/bankMakeGL.cpp | 64 - storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp | 64 - storage/ndb/test/ndbapi/bank/bankTimer.cpp | 67 - .../ndb/test/ndbapi/bank/bankTransactionMaker.cpp | 67 - .../ndb/test/ndbapi/bank/bankValidateAllGLs.cpp | 65 - storage/ndb/test/ndbapi/bank/testBank.cpp | 153 - storage/ndb/test/ndbapi/bench/asyncGenerator.cpp | 570 - storage/ndb/test/ndbapi/bench/dbGenerator.h | 63 - storage/ndb/test/ndbapi/bench/dbPopulate.cpp | 244 - storage/ndb/test/ndbapi/bench/dbPopulate.h | 59 - storage/ndb/test/ndbapi/bench/macros.h | 51 - .../ndb/test/ndbapi/bench/mainAsyncGenerator.cpp | 503 - storage/ndb/test/ndbapi/bench/mainPopulate.cpp | 81 - storage/ndb/test/ndbapi/bench/ndb_async1.cpp | 647 - storage/ndb/test/ndbapi/bench/ndb_async2.cpp | 757 - storage/ndb/test/ndbapi/bench/ndb_error.hpp | 81 - storage/ndb/test/ndbapi/bench/ndb_schema.hpp | 78 - .../ndb/test/ndbapi/bench/ndb_user_transaction.cpp | 825 - .../test/ndbapi/bench/ndb_user_transaction2.cpp | 825 - .../test/ndbapi/bench/ndb_user_transaction3.cpp | 793 - .../test/ndbapi/bench/ndb_user_transaction4.cpp | 770 - .../test/ndbapi/bench/ndb_user_transaction5.cpp | 769 - .../test/ndbapi/bench/ndb_user_transaction6.cpp | 561 - storage/ndb/test/ndbapi/bench/testData.h | 156 - storage/ndb/test/ndbapi/bench/testDefinitions.h | 90 - storage/ndb/test/ndbapi/bench/userInterface.cpp | 744 - storage/ndb/test/ndbapi/bench/userInterface.h | 151 - storage/ndb/test/ndbapi/benchronja.cpp | 1208 -- storage/ndb/test/ndbapi/bulk_copy.cpp | 276 - storage/ndb/test/ndbapi/cdrserver.cpp | 1628 -- storage/ndb/test/ndbapi/celloDb.cpp | 1504 -- storage/ndb/test/ndbapi/create_all_tabs.cpp | 69 - storage/ndb/test/ndbapi/create_tab.cpp | 138 - storage/ndb/test/ndbapi/drop_all_tabs.cpp | 63 - storage/ndb/test/ndbapi/flexAsynch.cpp | 1002 - storage/ndb/test/ndbapi/flexBench.cpp | 1166 -- storage/ndb/test/ndbapi/flexHammer.cpp | 888 - storage/ndb/test/ndbapi/flexScan.cpp | 1662 -- storage/ndb/test/ndbapi/flexTT.cpp | 944 - storage/ndb/test/ndbapi/flexTimedAsynch.cpp | 859 - storage/ndb/test/ndbapi/flex_bench_mysql.cpp | 1751 -- storage/ndb/test/ndbapi/index.cpp | 998 - storage/ndb/test/ndbapi/index2.cpp | 836 - storage/ndb/test/ndbapi/initronja.cpp | 332 - storage/ndb/test/ndbapi/interpreterInTup.cpp | 1518 -- storage/ndb/test/ndbapi/mainAsyncGenerator.cpp | 391 - storage/ndb/test/ndbapi/msa.cpp | 1206 -- storage/ndb/test/ndbapi/ndb_async1.cpp | 647 - storage/ndb/test/ndbapi/ndb_async2.cpp | 754 - storage/ndb/test/ndbapi/ndb_user_populate.cpp | 165 - storage/ndb/test/ndbapi/ndb_user_transaction.cpp | 825 - storage/ndb/test/ndbapi/ndb_user_transaction2.cpp | 825 - storage/ndb/test/ndbapi/ndb_user_transaction3.cpp | 793 - storage/ndb/test/ndbapi/ndb_user_transaction4.cpp | 770 - storage/ndb/test/ndbapi/ndb_user_transaction5.cpp | 769 - storage/ndb/test/ndbapi/ndb_user_transaction6.cpp | 561 - storage/ndb/test/ndbapi/restarter.cpp | 130 - storage/ndb/test/ndbapi/restarter2.cpp | 117 - storage/ndb/test/ndbapi/restarts.cpp | 116 - storage/ndb/test/ndbapi/size.cpp | 28 - storage/ndb/test/ndbapi/slow_select.cpp | 243 - storage/ndb/test/ndbapi/testBackup.cpp | 559 - storage/ndb/test/ndbapi/testBasic.cpp | 1850 -- storage/ndb/test/ndbapi/testBasicAsynch.cpp | 187 - storage/ndb/test/ndbapi/testBitfield.cpp | 623 - storage/ndb/test/ndbapi/testBlobs.cpp | 2230 --- storage/ndb/test/ndbapi/testDataBuffers.cpp | 642 - storage/ndb/test/ndbapi/testDeadlock.cpp | 523 - storage/ndb/test/ndbapi/testDict.cpp | 3301 ---- storage/ndb/test/ndbapi/testGrepVerify.cpp | 118 - storage/ndb/test/ndbapi/testIndex.cpp | 1758 -- storage/ndb/test/ndbapi/testIndexStat.cpp | 1405 -- storage/ndb/test/ndbapi/testInterpreter.cpp | 443 - storage/ndb/test/ndbapi/testLcp.cpp | 558 - storage/ndb/test/ndbapi/testMgm.cpp | 839 - storage/ndb/test/ndbapi/testNDBT.cpp | 174 - storage/ndb/test/ndbapi/testNdbApi.cpp | 1736 -- storage/ndb/test/ndbapi/testNodeRestart.cpp | 2517 --- storage/ndb/test/ndbapi/testOIBasic.cpp | 5850 ------ storage/ndb/test/ndbapi/testOperations.cpp | 796 - storage/ndb/test/ndbapi/testOrderedIndex.cpp | 225 - storage/ndb/test/ndbapi/testPartitioning.cpp | 448 - storage/ndb/test/ndbapi/testReadPerf.cpp | 409 - storage/ndb/test/ndbapi/testRestartGci.cpp | 222 - storage/ndb/test/ndbapi/testSRBank.cpp | 298 - storage/ndb/test/ndbapi/testScan.cpp | 1757 -- storage/ndb/test/ndbapi/testScanFilter.cpp | 861 - storage/ndb/test/ndbapi/testScanInterpreter.cpp | 281 - storage/ndb/test/ndbapi/testScanPerf.cpp | 372 - storage/ndb/test/ndbapi/testSystemRestart.cpp | 1863 -- storage/ndb/test/ndbapi/testTimeout.cpp | 562 - storage/ndb/test/ndbapi/testTransactions.cpp | 419 - storage/ndb/test/ndbapi/test_event.cpp | 2051 -- storage/ndb/test/ndbapi/test_event_merge.cpp | 2331 --- storage/ndb/test/ndbapi/test_event_multi_table.cpp | 558 - storage/ndb/test/ndbapi/userInterface.cpp | 117 - storage/ndb/test/ndbnet/test.run | 19 - storage/ndb/test/ndbnet/testError.run | 284 - storage/ndb/test/ndbnet/testMNF.run | 295 - storage/ndb/test/ndbnet/testNR.run | 76 - storage/ndb/test/ndbnet/testNR1.run | 79 - storage/ndb/test/ndbnet/testNR4.run | 95 - storage/ndb/test/ndbnet/testSRhang.run | 66 - storage/ndb/test/ndbnet/testTR295.run | 93 - storage/ndb/test/newtonapi/basic_test/Makefile | 25 - .../ndb/test/newtonapi/basic_test/basic/Makefile | 14 - .../ndb/test/newtonapi/basic_test/basic/basic.cpp | 321 - .../test/newtonapi/basic_test/bulk_read/Makefile | 14 - .../newtonapi/basic_test/bulk_read/br_test.cpp | 262 - storage/ndb/test/newtonapi/basic_test/common.cpp | 133 - storage/ndb/test/newtonapi/basic_test/common.hpp | 66 - .../test/newtonapi/basic_test/ptr_binding/Makefile | 14 - .../basic_test/ptr_binding/ptr_binding_test.cpp | 264 - .../ndb/test/newtonapi/basic_test/too_basic.cpp | 105 - storage/ndb/test/newtonapi/perf_test/Makefile | 14 - storage/ndb/test/newtonapi/perf_test/perf.cpp | 647 - storage/ndb/test/odbc/SQL99_test/Makefile | 26 - storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp | 2145 -- storage/ndb/test/odbc/SQL99_test/SQL99_test.h | 261 - storage/ndb/test/odbc/client/Makefile | 95 - storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp | 53 - .../ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp | 59 - storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp | 82 - storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp | 109 - storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp | 115 - .../ndb/test/odbc/client/SQLAllocHandleTest.cpp | 314 - .../ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp | 259 - storage/ndb/test/odbc/client/SQLBindColTest.cpp | 537 - .../ndb/test/odbc/client/SQLBindParameterTest.cpp | 219 - storage/ndb/test/odbc/client/SQLCancelTest.cpp | 254 - .../ndb/test/odbc/client/SQLCloseCursorTest.cpp | 92 - .../ndb/test/odbc/client/SQLColAttributeTest.cpp | 328 - .../ndb/test/odbc/client/SQLColAttributeTest1.cpp | 143 - .../ndb/test/odbc/client/SQLColAttributeTest2.cpp | 277 - .../ndb/test/odbc/client/SQLColAttributeTest3.cpp | 275 - storage/ndb/test/odbc/client/SQLConnectTest.cpp | 165 - storage/ndb/test/odbc/client/SQLCopyDescTest.cpp | 140 - .../ndb/test/odbc/client/SQLDescribeColTest.cpp | 260 - storage/ndb/test/odbc/client/SQLDisconnectTest.cpp | 155 - .../ndb/test/odbc/client/SQLDriverConnectTest.cpp | 96 - storage/ndb/test/odbc/client/SQLEndTranTest.cpp | 108 - storage/ndb/test/odbc/client/SQLErrorTest.cpp | 107 - storage/ndb/test/odbc/client/SQLExecDirectTest.cpp | 353 - storage/ndb/test/odbc/client/SQLExecuteTest.cpp | 122 - .../ndb/test/odbc/client/SQLFetchScrollTest.cpp | 82 - storage/ndb/test/odbc/client/SQLFetchTest.cpp | 438 - storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp | 195 - storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp | 182 - .../ndb/test/odbc/client/SQLGetConnectAttrTest.cpp | 131 - .../ndb/test/odbc/client/SQLGetCursorNameTest.cpp | 221 - storage/ndb/test/odbc/client/SQLGetDataTest.cpp | 358 - .../ndb/test/odbc/client/SQLGetDescFieldTest.cpp | 113 - storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp | 95 - .../ndb/test/odbc/client/SQLGetDiagFieldTest.cpp | 236 - .../test/odbc/client/SQLGetDiagRecSimpleTest.cpp | 167 - storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp | 207 - storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp | 110 - .../ndb/test/odbc/client/SQLGetFunctionsTest.cpp | 284 - storage/ndb/test/odbc/client/SQLGetInfoTest.cpp | 215 - .../ndb/test/odbc/client/SQLGetStmtAttrTest.cpp | 155 - .../ndb/test/odbc/client/SQLGetTypeInfoTest.cpp | 202 - .../ndb/test/odbc/client/SQLMoreResultsTest.cpp | 91 - .../ndb/test/odbc/client/SQLNumResultColsTest.cpp | 202 - storage/ndb/test/odbc/client/SQLParamDataTest.cpp | 105 - storage/ndb/test/odbc/client/SQLPrepareTest.cpp | 285 - storage/ndb/test/odbc/client/SQLPutDataTest.cpp | 108 - storage/ndb/test/odbc/client/SQLRowCountTest.cpp | 203 - .../ndb/test/odbc/client/SQLSetConnectAttrTest.cpp | 131 - .../ndb/test/odbc/client/SQLSetCursorNameTest.cpp | 215 - .../ndb/test/odbc/client/SQLSetDescFieldTest.cpp | 100 - storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp | 99 - storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp | 108 - .../ndb/test/odbc/client/SQLSetStmtAttrTest.cpp | 108 - storage/ndb/test/odbc/client/SQLTablesTest.cpp | 227 - storage/ndb/test/odbc/client/SQLTransactTest.cpp | 305 - storage/ndb/test/odbc/client/common.hpp | 81 - storage/ndb/test/odbc/client/main.cpp | 158 - storage/ndb/test/odbc/dm-iodbc/Makefile | 38 - storage/ndb/test/odbc/dm-unixodbc/Makefile | 39 - storage/ndb/test/odbc/driver/Makefile | 30 - storage/ndb/test/odbc/driver/testOdbcDriver.cpp | 4948 ----- storage/ndb/test/odbc/test_compiler/Makefile | 21 - .../ndb/test/odbc/test_compiler/test_compiler.cpp | 233 - storage/ndb/test/run-test/16node-tests.txt | 749 - storage/ndb/test/run-test/ATRT_SETUP_README.txt | 292 - storage/ndb/test/run-test/ATRT_USAGE_README.txt | 224 - storage/ndb/test/run-test/Makefile.am | 55 - storage/ndb/test/run-test/README | 43 - storage/ndb/test/run-test/README.ATRT | 34 - storage/ndb/test/run-test/atrt-analyze-result.sh | 30 - storage/ndb/test/run-test/atrt-clear-result.sh | 22 - storage/ndb/test/run-test/atrt-example.tgz | Bin 2196 -> 0 bytes storage/ndb/test/run-test/atrt-gather-result.sh | 34 - storage/ndb/test/run-test/atrt-mysql-test-run | 36 - storage/ndb/test/run-test/atrt-setup.sh | 24 - storage/ndb/test/run-test/atrt-testBackup | 24 - storage/ndb/test/run-test/atrt.hpp | 161 - storage/ndb/test/run-test/autotest-boot.sh | 205 - storage/ndb/test/run-test/autotest-run.sh | 287 - storage/ndb/test/run-test/basic.txt | 779 - storage/ndb/test/run-test/conf-dl145a.cnf | 26 - storage/ndb/test/run-test/conf-ndbmaster.cnf | 23 - storage/ndb/test/run-test/conf-repl.cnf | 28 - storage/ndb/test/run-test/conf-test.cnf | 26 - storage/ndb/test/run-test/daily-basic-tests.txt | 1072 - storage/ndb/test/run-test/daily-devel-tests.txt | 260 - storage/ndb/test/run-test/example-my.cnf | 116 - storage/ndb/test/run-test/example.conf | 10 - storage/ndb/test/run-test/files.cpp | 402 - storage/ndb/test/run-test/main.cpp | 1249 -- storage/ndb/test/run-test/make-config.sh | 119 - storage/ndb/test/run-test/make-html-reports.sh | 210 - storage/ndb/test/run-test/make-index.sh | 261 - storage/ndb/test/run-test/ndb-autotest.sh | 424 - storage/ndb/test/run-test/setup.cpp | 990 - storage/ndb/test/run-test/test-tests.txt | 24 - storage/ndb/test/run-test/upgrade-boot.sh | 235 - storage/ndb/test/src/AtrtClient.cpp | 216 - storage/ndb/test/src/CpcClient.cpp | 552 - storage/ndb/test/src/DbUtil.cpp | 678 - storage/ndb/test/src/HugoAsynchTransactions.cpp | 486 - storage/ndb/test/src/HugoCalculator.cpp | 314 - storage/ndb/test/src/HugoOperations.cpp | 753 - storage/ndb/test/src/HugoTransactions.cpp | 1864 -- storage/ndb/test/src/Makefile.am | 47 - storage/ndb/test/src/NDBT_Error.cpp | 283 - storage/ndb/test/src/NDBT_Output.cpp | 36 - storage/ndb/test/src/NDBT_ResultRow.cpp | 156 - storage/ndb/test/src/NDBT_ReturnCodes.cpp | 50 - storage/ndb/test/src/NDBT_Table.cpp | 92 - storage/ndb/test/src/NDBT_Tables.cpp | 1116 -- storage/ndb/test/src/NDBT_Test.cpp | 1525 -- storage/ndb/test/src/NDBT_Thread.cpp | 284 - storage/ndb/test/src/NdbBackup.cpp | 452 - storage/ndb/test/src/NdbConfig.cpp | 83 - storage/ndb/test/src/NdbGrep.cpp | 333 - storage/ndb/test/src/NdbMixRestarter.cpp | 313 - storage/ndb/test/src/NdbRestarter.cpp | 751 - storage/ndb/test/src/NdbRestarts.cpp | 875 - storage/ndb/test/src/NdbSchemaCon.cpp | 169 - storage/ndb/test/src/NdbSchemaOp.cpp | 219 - storage/ndb/test/src/UtilTransactions.cpp | 1472 -- storage/ndb/test/src/getarg.c | 608 - storage/ndb/test/tools/Makefile.am | 47 - storage/ndb/test/tools/connect.cpp | 151 - storage/ndb/test/tools/copy_tab.cpp | 103 - storage/ndb/test/tools/cpcc.cpp | 352 - storage/ndb/test/tools/create_index.cpp | 111 - storage/ndb/test/tools/hugoCalculator.cpp | 71 - storage/ndb/test/tools/hugoFill.cpp | 86 - storage/ndb/test/tools/hugoLoad.cpp | 120 - storage/ndb/test/tools/hugoLockRecords.cpp | 96 - storage/ndb/test/tools/hugoPkDelete.cpp | 181 - storage/ndb/test/tools/hugoPkRead.cpp | 178 - storage/ndb/test/tools/hugoPkReadRecord.cpp | 146 - storage/ndb/test/tools/hugoPkUpdate.cpp | 183 - storage/ndb/test/tools/hugoScanRead.cpp | 132 - storage/ndb/test/tools/hugoScanUpdate.cpp | 114 - storage/ndb/test/tools/listen.cpp | 375 - storage/ndb/test/tools/log_listner.cpp | 106 - storage/ndb/test/tools/rep_latency.cpp | 305 - storage/ndb/test/tools/restart.cpp | 84 - storage/ndb/test/tools/transproxy.cpp | 361 - storage/ndb/test/tools/verify_index.cpp | 91 - storage/ndb/tools/Makefile.am | 183 - storage/ndb/tools/clean-links.sh | 37 - storage/ndb/tools/delete_all.cpp | 228 - storage/ndb/tools/desc.cpp | 353 - storage/ndb/tools/drop_index.cpp | 98 - storage/ndb/tools/drop_tab.cpp | 99 - storage/ndb/tools/listTables.cpp | 328 - storage/ndb/tools/make-errors.pl | 197 - storage/ndb/tools/make-links.sh | 36 - storage/ndb/tools/ndb_config.cpp | 561 - storage/ndb/tools/ndb_error_reporter | 104 - storage/ndb/tools/ndb_size.pl | 1794 -- storage/ndb/tools/ndb_test_platform.cpp | 95 - storage/ndb/tools/ndbsql.cpp | 957 - storage/ndb/tools/restore/Restore.cpp | 1295 -- storage/ndb/tools/restore/Restore.hpp | 429 - storage/ndb/tools/restore/consumer.cpp | 117 - storage/ndb/tools/restore/consumer.hpp | 48 - storage/ndb/tools/restore/consumer_printer.cpp | 75 - storage/ndb/tools/restore/consumer_printer.hpp | 57 - storage/ndb/tools/restore/consumer_restore.cpp | 1593 -- storage/ndb/tools/restore/consumer_restore.hpp | 124 - storage/ndb/tools/restore/consumer_restorem.cpp | 654 - storage/ndb/tools/restore/ndb_nodegroup_map.h | 35 - storage/ndb/tools/restore/restore_main.cpp | 978 - storage/ndb/tools/rgrep | 210 - storage/ndb/tools/select_all.cpp | 446 - storage/ndb/tools/select_count.cpp | 218 - storage/ndb/tools/waiter.cpp | 290 - storage/spider/plug.in | 6 - storage/xtradb/handler/i_s.cc | 2 +- support-files/CMakeLists.txt | 11 - support-files/MySQL-shared-compat.spec.sh | 96 - support-files/compiler_warnings.supp | 8 - support-files/config.huge.ini.sh | 228 - support-files/config.medium.ini.sh | 139 - support-files/config.small.ini.sh | 80 - support-files/mysql.spec.sh | 2111 -- support-files/ndb-config-2-node.ini.sh | 58 - 1663 files changed, 138 insertions(+), 530970 deletions(-) delete mode 100644 .bzrignore delete mode 100755 BUILD/build_mccge.sh delete mode 100644 BUILD/compile-amd64-max-sci delete mode 100755 BUILD/compile-ndb-autotest delete mode 100644 BUILD/compile-pentium64-max-sci delete mode 100644 debian/patches/02_no_builtin_ndbcluster_plugin.dpatch delete mode 100644 man/ndbd.8 delete mode 100644 man/ndbd_redo_log_reader.1 delete mode 100644 man/ndbmtd.8 delete mode 100644 mysql-test/extra/rpl_tests/rpl_ndb_2multi_basic.test delete mode 100644 mysql-test/extra/rpl_tests/rpl_ndb_2multi_eng.test delete mode 100644 mysql-test/extra/rpl_tests/rpl_ndb_apply_status.test delete mode 100644 mysql-test/include/default_ndbd.cnf delete mode 100644 mysql-test/include/have_multi_ndb.inc delete mode 100644 mysql-test/include/have_ndb.inc delete mode 100644 mysql-test/include/have_ndb_extra.inc delete mode 100644 mysql-test/include/have_ndbapi_examples.inc delete mode 100644 mysql-test/include/ndb_backup.inc delete mode 100644 mysql-test/include/ndb_backup_print.inc delete mode 100644 mysql-test/include/ndb_default_cluster.inc delete mode 100644 mysql-test/include/ndb_master-slave.inc delete mode 100644 mysql-test/include/ndb_master-slave_2ch.inc delete mode 100644 mysql-test/include/ndb_not_readonly.inc delete mode 100644 mysql-test/include/ndb_restore_master.inc delete mode 100644 mysql-test/include/ndb_restore_slave_eoption.inc delete mode 100644 mysql-test/include/ndb_setup_slave.inc delete mode 100644 mysql-test/include/ndb_wait_connected.inc delete mode 100644 mysql-test/include/not_ndb.inc delete mode 100644 mysql-test/include/not_ndb_default.inc delete mode 100644 mysql-test/include/safe_set_to_maybe_ro_var.inc delete mode 100644 mysql-test/include/select_ndb_apply_status.inc delete mode 100644 mysql-test/include/wait_for_ndb_to_binlog.inc delete mode 100644 mysql-test/lib/v1/ndb_config_1_node.ini delete mode 100644 mysql-test/lib/v1/ndb_config_2_node.ini delete mode 100644 mysql-test/r/have_ndb_extra.require delete mode 100644 mysql-test/r/have_ndbapi_examples.require delete mode 100644 mysql-test/r/ndb_default_cluster.require delete mode 100644 mysql-test/r/not_ndb.require delete mode 100644 mysql-test/r/not_ndb_default.require delete mode 100644 mysql-test/std_data/funcs_1/ndb_tb1.txt delete mode 100644 mysql-test/std_data/funcs_1/ndb_tb2.txt delete mode 100644 mysql-test/std_data/funcs_1/ndb_tb3.txt delete mode 100644 mysql-test/std_data/funcs_1/ndb_tb4.txt delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-1.1.log delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-1.2.log delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-2-0.1.Data delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-2-0.2.Data delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-2.1.ctl delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-2.1.log delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-2.2.ctl delete mode 100644 mysql-test/std_data/ndb_backup50/BACKUP-2.2.log delete mode 100644 mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data delete mode 100644 mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data delete mode 100644 mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl delete mode 100644 mysql-test/std_data/ndb_backup51/BACKUP-1.1.log delete mode 100644 mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl delete mode 100644 mysql-test/std_data/ndb_backup51/BACKUP-1.2.log delete mode 100644 mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.1.Data delete mode 100644 mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.2.Data delete mode 100644 mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.ctl delete mode 100644 mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.log delete mode 100644 mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.ctl delete mode 100644 mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.log delete mode 100644 mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.1.Data delete mode 100644 mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.2.Data delete mode 100644 mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.ctl delete mode 100644 mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.log delete mode 100644 mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.ctl delete mode 100644 mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.log delete mode 100644 mysql-test/std_data/ndb_config_config.ini delete mode 100644 mysql-test/std_data/ndb_config_mycnf1.cnf delete mode 100644 mysql-test/std_data/ndb_config_mycnf2.cnf delete mode 100644 mysql-test/suite/binlog/r/binlog_multi_engine.result delete mode 100644 mysql-test/suite/binlog/t/binlog_multi_engine.test delete mode 100644 mysql-test/suite/funcs_1/include/ndb_tb1.inc delete mode 100644 mysql-test/suite/funcs_1/include/ndb_tb2.inc delete mode 100644 mysql-test/suite/funcs_1/include/ndb_tb3.inc delete mode 100644 mysql-test/suite/funcs_1/include/ndb_tb4.inc delete mode 100644 mysql-test/suite/sys_vars/r/have_ndbcluster_basic.result delete mode 100644 packaging/rpm-oel/CMakeLists.txt delete mode 100755 packaging/rpm-oel/filter-provides.sh delete mode 100755 packaging/rpm-oel/filter-requires.sh delete mode 100644 packaging/rpm-oel/my.cnf delete mode 100644 packaging/rpm-oel/my_config.h delete mode 100644 packaging/rpm-oel/mysql-5.5-libmysqlclient-symbols.patch delete mode 100644 packaging/rpm-oel/mysql-systemd-start delete mode 100644 packaging/rpm-oel/mysql.conf delete mode 100644 packaging/rpm-oel/mysql.init delete mode 100644 packaging/rpm-oel/mysql.spec.in delete mode 100644 packaging/rpm-oel/mysql_config.sh delete mode 100644 packaging/rpm-oel/mysqld.service delete mode 100644 packaging/rpm-uln/CMakeLists.txt delete mode 100644 packaging/rpm-uln/README-ULN delete mode 100644 packaging/rpm-uln/README.mysql-docs delete mode 100755 packaging/rpm-uln/filter-requires-mysql.sh delete mode 100755 packaging/rpm-uln/generate-tarball.sh delete mode 100644 packaging/rpm-uln/my.cnf delete mode 100644 packaging/rpm-uln/my_config.h delete mode 100644 packaging/rpm-uln/mysql-5.5-errno.patch delete mode 100644 packaging/rpm-uln/mysql-5.5-fix-tests.patch delete mode 100644 packaging/rpm-uln/mysql-5.5-libdir.patch delete mode 100644 packaging/rpm-uln/mysql-5.5-mtr1.patch delete mode 100644 packaging/rpm-uln/mysql-5.5-stack-guard.patch delete mode 100644 packaging/rpm-uln/mysql-5.5-testing.patch delete mode 100644 packaging/rpm-uln/mysql-chain-certs.patch delete mode 100644 packaging/rpm-uln/mysql-embedded-check.c delete mode 100644 packaging/rpm-uln/mysql-expired-certs.patch delete mode 100644 packaging/rpm-uln/mysql-install-test.patch delete mode 100644 packaging/rpm-uln/mysql-strmov.patch delete mode 100644 packaging/rpm-uln/mysql.init delete mode 100644 packaging/rpm-uln/mysql.spec.sh delete mode 100644 packaging/rpm-uln/scriptstub.c delete mode 100644 plugin/handler_socket/plug.in delete mode 100644 scripts/make_binary_distribution.sh delete mode 100644 sql/ha_ndbcluster.cc delete mode 100644 sql/ha_ndbcluster.h delete mode 100644 sql/ha_ndbcluster_binlog.cc delete mode 100644 sql/ha_ndbcluster_binlog.h delete mode 100644 sql/ha_ndbcluster_cond.cc delete mode 100644 sql/ha_ndbcluster_cond.h delete mode 100644 sql/ha_ndbcluster_tables.h delete mode 100644 storage/ndb/MAINTAINERS delete mode 100644 storage/ndb/Makefile.am delete mode 100644 storage/ndb/bin/.empty delete mode 100755 storage/ndb/bin/check-regression.sh delete mode 100755 storage/ndb/bin/makeTestPrograms_html.sh delete mode 100644 storage/ndb/config/common.mk.am delete mode 100755 storage/ndb/config/make-win-dsw.sh delete mode 100644 storage/ndb/config/type_kernel.mk.am delete mode 100644 storage/ndb/config/type_mgmapiclient.mk.am delete mode 100644 storage/ndb/config/type_ndbapi.mk.am delete mode 100644 storage/ndb/config/type_ndbapiclient.mk.am delete mode 100644 storage/ndb/config/type_ndbapitest.mk.am delete mode 100644 storage/ndb/config/type_ndbapitools.mk.am delete mode 100644 storage/ndb/config/type_util.mk.am delete mode 100755 storage/ndb/config/win-includes delete mode 100644 storage/ndb/config/win-lib.am delete mode 100755 storage/ndb/config/win-libraries delete mode 100755 storage/ndb/config/win-name delete mode 100644 storage/ndb/config/win-prg.am delete mode 100755 storage/ndb/config/win-sources delete mode 100644 storage/ndb/demos/1-node/1-api-3/Ndb.cfg delete mode 100644 storage/ndb/demos/1-node/1-db-2/Ndb.cfg delete mode 100644 storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg delete mode 100644 storage/ndb/demos/1-node/1-mgm-1/template_config.ini delete mode 100644 storage/ndb/demos/2-node/2-api-4/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-api-5/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-api-6/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-api-7/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-db-2/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-db-3/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg delete mode 100644 storage/ndb/demos/2-node/2-mgm-1/template_config.ini delete mode 100644 storage/ndb/demos/config-templates/config_template-1-REP.ini delete mode 100644 storage/ndb/demos/config-templates/config_template-4.ini delete mode 100644 storage/ndb/demos/config-templates/config_template-install.ini delete mode 100644 storage/ndb/demos/run_demo1-PS-SS_common.sh delete mode 100755 storage/ndb/demos/run_demo1-PS.sh delete mode 100755 storage/ndb/demos/run_demo1-SS.sh delete mode 100755 storage/ndb/demos/run_demo1.sh delete mode 100755 storage/ndb/demos/run_demo2.sh delete mode 100644 storage/ndb/docs/Makefile.am delete mode 100644 storage/ndb/docs/README delete mode 100644 storage/ndb/docs/doxygen/Doxyfile.mgmapi delete mode 100644 storage/ndb/docs/doxygen/Doxyfile.ndb delete mode 100644 storage/ndb/docs/doxygen/Doxyfile.ndbapi delete mode 100644 storage/ndb/docs/doxygen/Doxyfile.odbc delete mode 100644 storage/ndb/docs/doxygen/Doxyfile.test delete mode 100755 storage/ndb/docs/doxygen/postdoxy.pl delete mode 100755 storage/ndb/docs/doxygen/predoxy.pl delete mode 100644 storage/ndb/docs/wl2077.txt delete mode 100644 storage/ndb/include/Makefile.am delete mode 100644 storage/ndb/include/debugger/DebuggerNames.hpp delete mode 100644 storage/ndb/include/debugger/EventLogger.hpp delete mode 100644 storage/ndb/include/debugger/GrepError.hpp delete mode 100644 storage/ndb/include/debugger/SignalLoggerManager.hpp delete mode 100644 storage/ndb/include/editline/editline.h delete mode 100644 storage/ndb/include/kernel/AttributeDescriptor.hpp delete mode 100644 storage/ndb/include/kernel/AttributeHeader.hpp delete mode 100644 storage/ndb/include/kernel/AttributeList.hpp delete mode 100644 storage/ndb/include/kernel/BlockNumbers.h delete mode 100644 storage/ndb/include/kernel/GlobalSignalNumbers.h delete mode 100644 storage/ndb/include/kernel/GrepEvent.hpp delete mode 100644 storage/ndb/include/kernel/Interpreter.hpp delete mode 100644 storage/ndb/include/kernel/LogLevel.hpp delete mode 100644 storage/ndb/include/kernel/NodeBitmask.hpp delete mode 100644 storage/ndb/include/kernel/NodeInfo.hpp delete mode 100644 storage/ndb/include/kernel/NodeState.hpp delete mode 100644 storage/ndb/include/kernel/RefConvert.hpp delete mode 100644 storage/ndb/include/kernel/kernel_config_parameters.h delete mode 100644 storage/ndb/include/kernel/kernel_types.h delete mode 100644 storage/ndb/include/kernel/ndb_limits.h delete mode 100644 storage/ndb/include/kernel/signaldata/AbortAll.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AccFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AccLock.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AccScan.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AllocNodeId.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AlterIndx.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AlterTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AlterTable.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AlterTrig.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ApiBroadcast.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ApiVersion.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/AttrInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/BackupContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/BackupImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/BackupSignalData.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/BuildIndx.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CmInit.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CntrStart.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ConfigParamId.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CopyActive.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CopyFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateEvnt.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateIndx.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateObj.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateTable.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/CreateTrig.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DiAddTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DiGetNodes.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DictLock.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DictObjOp.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DictStart.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DictTabInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DihAddFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DihContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DihFragCount.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DihStartTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DisconnectRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropFilegroup.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropFilegroupImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropIndx.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropObj.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropTabFile.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropTable.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DropTrig.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/EmptyLcp.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/EndTo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/EventReport.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ExecFragReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/Extent.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FailRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsAppendReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsCloseReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsOpenReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsRef.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/GCPSave.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/GetTabInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/GetTableId.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/GrepImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/HotSpareRep.hpp delete mode 100755 storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp delete mode 100755 storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/KeyInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/LCP.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/LgmanContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ListTables.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/LqhFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/LqhKey.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/LqhTransConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ManagementServer.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/MasterGCP.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/MasterLCP.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/NdbSttor.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/NextScan.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/NodeFailRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/PackedSignal.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/PgmanContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/PrepDropTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ReadConfig.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/RelTabMem.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/RepImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/RestoreContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/RestoreImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ResumeReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/RouteOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ScanFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/ScanTab.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SetVarReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SignalData.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SrFragidConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartFragReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartMe.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartPerm.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartRec.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StartTo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StopMe.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StopPerm.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/StopReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SumaImpl.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/SystemError.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TamperOrd.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcCommit.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcHbRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcIndx.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcKeyConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcKeyRef.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcKeyReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TestOrd.hpp delete mode 100755 storage/ndb/include/kernel/signaldata/TransIdAI.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TsmanContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TupCommit.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TupFrag.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TupKey.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TuxBound.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TuxContinueB.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TuxMaint.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UpdateTo.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UtilDelete.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UtilExecute.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UtilLock.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UtilPrepare.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UtilRelease.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/UtilSequence.hpp delete mode 100644 storage/ndb/include/kernel/signaldata/WaitGCP.hpp delete mode 100644 storage/ndb/include/kernel/trigger_definitions.h delete mode 100644 storage/ndb/include/logger/ConsoleLogHandler.hpp delete mode 100644 storage/ndb/include/logger/FileLogHandler.hpp delete mode 100644 storage/ndb/include/logger/LogHandler.hpp delete mode 100644 storage/ndb/include/logger/Logger.hpp delete mode 100644 storage/ndb/include/logger/SysLogHandler.hpp delete mode 100644 storage/ndb/include/mgmapi/mgmapi.h delete mode 100644 storage/ndb/include/mgmapi/mgmapi_config_parameters.h delete mode 100644 storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h delete mode 100644 storage/ndb/include/mgmapi/mgmapi_debug.h delete mode 100644 storage/ndb/include/mgmapi/mgmapi_error.h delete mode 100644 storage/ndb/include/mgmapi/ndb_logevent.h delete mode 100644 storage/ndb/include/mgmapi/ndb_logevent.txt delete mode 100644 storage/ndb/include/mgmapi/ndbd_exit_codes.h delete mode 100644 storage/ndb/include/mgmcommon/ConfigRetriever.hpp delete mode 100644 storage/ndb/include/mgmcommon/IPCConfig.hpp delete mode 100644 storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp delete mode 100644 storage/ndb/include/ndb_constants.h delete mode 100644 storage/ndb/include/ndb_global.h.in delete mode 100644 storage/ndb/include/ndb_init.h delete mode 100644 storage/ndb/include/ndb_net.h delete mode 100644 storage/ndb/include/ndb_types.h.in delete mode 100644 storage/ndb/include/ndb_version.h.in delete mode 100644 storage/ndb/include/ndbapi/Ndb.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbApi.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbBlob.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbDictionary.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbError.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbEventOperation.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbIndexOperation.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbIndexStat.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbOperation.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbPool.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbRecAttr.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbReceiver.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbScanFilter.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbScanOperation.hpp delete mode 100644 storage/ndb/include/ndbapi/NdbTransaction.hpp delete mode 100644 storage/ndb/include/ndbapi/ndb_cluster_connection.hpp delete mode 100644 storage/ndb/include/ndbapi/ndb_opt_defaults.h delete mode 100644 storage/ndb/include/ndbapi/ndbapi_limits.h delete mode 100644 storage/ndb/include/ndbapi/ndberror.h delete mode 100644 storage/ndb/include/newtonapi/dba.h delete mode 100644 storage/ndb/include/newtonapi/defs/pcn_types.h delete mode 100644 storage/ndb/include/portlib/NdbCondition.h delete mode 100644 storage/ndb/include/portlib/NdbConfig.h delete mode 100644 storage/ndb/include/portlib/NdbDaemon.h delete mode 100644 storage/ndb/include/portlib/NdbEnv.h delete mode 100644 storage/ndb/include/portlib/NdbHost.h delete mode 100644 storage/ndb/include/portlib/NdbMain.h delete mode 100644 storage/ndb/include/portlib/NdbMem.h delete mode 100644 storage/ndb/include/portlib/NdbMutex.h delete mode 100644 storage/ndb/include/portlib/NdbSleep.h delete mode 100644 storage/ndb/include/portlib/NdbTCP.h delete mode 100644 storage/ndb/include/portlib/NdbThread.h delete mode 100644 storage/ndb/include/portlib/NdbTick.h delete mode 100644 storage/ndb/include/portlib/PortDefs.h delete mode 100644 storage/ndb/include/portlib/prefetch.h delete mode 100644 storage/ndb/include/transporter/TransporterCallback.hpp delete mode 100644 storage/ndb/include/transporter/TransporterDefinitions.hpp delete mode 100644 storage/ndb/include/transporter/TransporterRegistry.hpp delete mode 100644 storage/ndb/include/util/BaseString.hpp delete mode 100644 storage/ndb/include/util/Bitmask.hpp delete mode 100644 storage/ndb/include/util/ConfigValues.hpp delete mode 100644 storage/ndb/include/util/File.hpp delete mode 100644 storage/ndb/include/util/InputStream.hpp delete mode 100644 storage/ndb/include/util/NdbAutoPtr.hpp delete mode 100644 storage/ndb/include/util/NdbOut.hpp delete mode 100644 storage/ndb/include/util/NdbSqlUtil.hpp delete mode 100644 storage/ndb/include/util/OutputStream.hpp delete mode 100644 storage/ndb/include/util/Parser.hpp delete mode 100644 storage/ndb/include/util/Properties.hpp delete mode 100644 storage/ndb/include/util/SimpleProperties.hpp delete mode 100644 storage/ndb/include/util/SocketAuthenticator.hpp delete mode 100644 storage/ndb/include/util/SocketClient.hpp delete mode 100644 storage/ndb/include/util/SocketServer.hpp delete mode 100644 storage/ndb/include/util/UtilBuffer.hpp delete mode 100644 storage/ndb/include/util/Vector.hpp delete mode 100644 storage/ndb/include/util/basestring_vsnprintf.h delete mode 100644 storage/ndb/include/util/md5_hash.hpp delete mode 100644 storage/ndb/include/util/ndb_opts.h delete mode 100644 storage/ndb/include/util/ndb_rand.h delete mode 100644 storage/ndb/include/util/random.h delete mode 100644 storage/ndb/include/util/socket_io.h delete mode 100644 storage/ndb/include/util/uucode.h delete mode 100644 storage/ndb/include/util/version.h delete mode 100644 storage/ndb/lib/.empty delete mode 100644 storage/ndb/ndb_configure.m4 delete mode 100644 storage/ndb/ndbapi-examples/Makefile delete mode 100644 storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile delete mode 100644 storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp delete mode 100644 storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile delete mode 100644 storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_async/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_async/readme.txt delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_async1/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_event/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_retries/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_scan/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_simple/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile delete mode 100644 storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp delete mode 100644 storage/ndb/plug.in delete mode 100644 storage/ndb/src/Makefile.am delete mode 100644 storage/ndb/src/common/Makefile.am delete mode 100644 storage/ndb/src/common/debugger/BlockNames.cpp delete mode 100644 storage/ndb/src/common/debugger/DebuggerNames.cpp delete mode 100644 storage/ndb/src/common/debugger/EventLogger.cpp delete mode 100644 storage/ndb/src/common/debugger/GrepError.cpp delete mode 100644 storage/ndb/src/common/debugger/Makefile.am delete mode 100644 storage/ndb/src/common/debugger/SignalLoggerManager.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/AccLock.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/AlterTab.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/AlterTable.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CntrStart.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/ContinueB.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DropIndx.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DropTab.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/DropTrig.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FailRep.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FsConf.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/FsRef.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/GCPSave.cpp delete mode 100755 storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp delete mode 100755 storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/LCP.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/LqhKey.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/Makefile.am delete mode 100644 storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/ScanTab.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/SignalNames.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/StartRec.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/SystemError.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TcIndx.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TcRollbackRep.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TupCommit.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TupKey.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/UtilLock.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp delete mode 100644 storage/ndb/src/common/debugger/signaldata/print.awk delete mode 100644 storage/ndb/src/common/logger/ConsoleLogHandler.cpp delete mode 100644 storage/ndb/src/common/logger/FileLogHandler.cpp delete mode 100644 storage/ndb/src/common/logger/LogHandler.cpp delete mode 100644 storage/ndb/src/common/logger/LogHandlerList.cpp delete mode 100644 storage/ndb/src/common/logger/LogHandlerList.hpp delete mode 100644 storage/ndb/src/common/logger/Logger.cpp delete mode 100644 storage/ndb/src/common/logger/Makefile.am delete mode 100644 storage/ndb/src/common/logger/SysLogHandler.cpp delete mode 100644 storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp delete mode 100644 storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp delete mode 100644 storage/ndb/src/common/logger/listtest/Makefile delete mode 100644 storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp delete mode 100644 storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp delete mode 100644 storage/ndb/src/common/logger/loggertest/Makefile delete mode 100644 storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp delete mode 100644 storage/ndb/src/common/mgmcommon/IPCConfig.cpp delete mode 100644 storage/ndb/src/common/mgmcommon/Makefile.am delete mode 100644 storage/ndb/src/common/mgmcommon/printConfig/Makefile delete mode 100644 storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp delete mode 100644 storage/ndb/src/common/portlib/Makefile.am delete mode 100644 storage/ndb/src/common/portlib/NdbCondition.c delete mode 100644 storage/ndb/src/common/portlib/NdbConfig.c delete mode 100644 storage/ndb/src/common/portlib/NdbDaemon.c delete mode 100644 storage/ndb/src/common/portlib/NdbEnv.c delete mode 100644 storage/ndb/src/common/portlib/NdbHost.c delete mode 100644 storage/ndb/src/common/portlib/NdbMem.c delete mode 100644 storage/ndb/src/common/portlib/NdbMutex.c delete mode 100644 storage/ndb/src/common/portlib/NdbPortLibTest.cpp delete mode 100644 storage/ndb/src/common/portlib/NdbSleep.c delete mode 100644 storage/ndb/src/common/portlib/NdbTCP.cpp delete mode 100644 storage/ndb/src/common/portlib/NdbThread.c delete mode 100644 storage/ndb/src/common/portlib/NdbTick.c delete mode 100644 storage/ndb/src/common/portlib/memtest.c delete mode 100644 storage/ndb/src/common/portlib/mmstest.cpp delete mode 100644 storage/ndb/src/common/portlib/munmaptest.cpp delete mode 100644 storage/ndb/src/common/portlib/win32/NdbCondition.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbDaemon.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbEnv.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbHost.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbMem.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbMutex.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbSleep.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbTCP.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbThread.c delete mode 100644 storage/ndb/src/common/portlib/win32/NdbTick.c delete mode 100644 storage/ndb/src/common/transporter/Makefile.am delete mode 100644 storage/ndb/src/common/transporter/Packer.cpp delete mode 100644 storage/ndb/src/common/transporter/Packer.hpp delete mode 100644 storage/ndb/src/common/transporter/SCI_Transporter.cpp delete mode 100644 storage/ndb/src/common/transporter/SCI_Transporter.hpp delete mode 100644 storage/ndb/src/common/transporter/SHM_Buffer.hpp delete mode 100644 storage/ndb/src/common/transporter/SHM_Transporter.cpp delete mode 100644 storage/ndb/src/common/transporter/SHM_Transporter.hpp delete mode 100644 storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp delete mode 100644 storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp delete mode 100644 storage/ndb/src/common/transporter/SendBuffer.cpp delete mode 100644 storage/ndb/src/common/transporter/SendBuffer.hpp delete mode 100644 storage/ndb/src/common/transporter/TCP_Transporter.cpp delete mode 100644 storage/ndb/src/common/transporter/TCP_Transporter.hpp delete mode 100644 storage/ndb/src/common/transporter/Transporter.cpp delete mode 100644 storage/ndb/src/common/transporter/Transporter.hpp delete mode 100644 storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp delete mode 100644 storage/ndb/src/common/transporter/TransporterRegistry.cpp delete mode 100644 storage/ndb/src/common/transporter/basictest/Makefile delete mode 100644 storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp delete mode 100644 storage/ndb/src/common/transporter/buddy.cpp delete mode 100644 storage/ndb/src/common/transporter/buddy.hpp delete mode 100644 storage/ndb/src/common/transporter/failoverSCI/Makefile delete mode 100644 storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp delete mode 100644 storage/ndb/src/common/transporter/perftest/Makefile delete mode 100644 storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp delete mode 100644 storage/ndb/src/common/transporter/priotest/Makefile delete mode 100644 storage/ndb/src/common/transporter/priotest/prioSCI/Makefile delete mode 100644 storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp delete mode 100644 storage/ndb/src/common/transporter/priotest/prioSHM/Makefile delete mode 100644 storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp delete mode 100644 storage/ndb/src/common/transporter/priotest/prioTCP/Makefile delete mode 100644 storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp delete mode 100644 storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp delete mode 100644 storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp delete mode 100644 storage/ndb/src/common/util/BaseString.cpp delete mode 100644 storage/ndb/src/common/util/Bitmask.cpp delete mode 100644 storage/ndb/src/common/util/ConfigValues.cpp delete mode 100644 storage/ndb/src/common/util/File.cpp delete mode 100644 storage/ndb/src/common/util/InputStream.cpp delete mode 100644 storage/ndb/src/common/util/Makefile.am delete mode 100644 storage/ndb/src/common/util/NdbOut.cpp delete mode 100644 storage/ndb/src/common/util/NdbSqlUtil.cpp delete mode 100644 storage/ndb/src/common/util/OutputStream.cpp delete mode 100644 storage/ndb/src/common/util/Parser.cpp delete mode 100644 storage/ndb/src/common/util/Properties.cpp delete mode 100644 storage/ndb/src/common/util/SimpleProperties.cpp delete mode 100644 storage/ndb/src/common/util/SocketAuthenticator.cpp delete mode 100644 storage/ndb/src/common/util/SocketClient.cpp delete mode 100644 storage/ndb/src/common/util/SocketServer.cpp delete mode 100644 storage/ndb/src/common/util/basestring_vsnprintf.c delete mode 100644 storage/ndb/src/common/util/filetest/FileUnitTest.cpp delete mode 100644 storage/ndb/src/common/util/filetest/FileUnitTest.hpp delete mode 100644 storage/ndb/src/common/util/filetest/Makefile delete mode 100644 storage/ndb/src/common/util/getarg.cat3 delete mode 100644 storage/ndb/src/common/util/md5_hash.cpp delete mode 100644 storage/ndb/src/common/util/ndb_init.c delete mode 100644 storage/ndb/src/common/util/ndb_rand.c delete mode 100644 storage/ndb/src/common/util/new.cpp delete mode 100644 storage/ndb/src/common/util/random.c delete mode 100644 storage/ndb/src/common/util/socket_io.cpp delete mode 100644 storage/ndb/src/common/util/strdup.c delete mode 100644 storage/ndb/src/common/util/testConfigValues/Makefile delete mode 100644 storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp delete mode 100644 storage/ndb/src/common/util/testProperties/Makefile delete mode 100644 storage/ndb/src/common/util/testProperties/testProperties.cpp delete mode 100644 storage/ndb/src/common/util/testSimpleProperties/Makefile delete mode 100644 storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp delete mode 100644 storage/ndb/src/common/util/uucode.c delete mode 100644 storage/ndb/src/common/util/version.c delete mode 100644 storage/ndb/src/cw/Makefile.am delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/Open.ICO delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/TreeView.h delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/resource.h delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/small.ico delete mode 100644 storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/App.ico delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/Database.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/Process.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs delete mode 100644 storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs delete mode 100644 storage/ndb/src/cw/cpcd/APIService.cpp delete mode 100644 storage/ndb/src/cw/cpcd/APIService.hpp delete mode 100644 storage/ndb/src/cw/cpcd/CPCD.cpp delete mode 100644 storage/ndb/src/cw/cpcd/CPCD.hpp delete mode 100644 storage/ndb/src/cw/cpcd/Makefile.am delete mode 100644 storage/ndb/src/cw/cpcd/Monitor.cpp delete mode 100644 storage/ndb/src/cw/cpcd/Process.cpp delete mode 100644 storage/ndb/src/cw/cpcd/common.cpp delete mode 100644 storage/ndb/src/cw/cpcd/common.hpp delete mode 100644 storage/ndb/src/cw/cpcd/main.cpp delete mode 100644 storage/ndb/src/cw/test/socketclient/Makefile delete mode 100644 storage/ndb/src/cw/test/socketclient/socketClientTest.cpp delete mode 100644 storage/ndb/src/cw/util/ClientInterface.cpp delete mode 100644 storage/ndb/src/cw/util/ClientInterface.hpp delete mode 100644 storage/ndb/src/cw/util/Makefile delete mode 100644 storage/ndb/src/cw/util/SocketRegistry.cpp delete mode 100644 storage/ndb/src/cw/util/SocketRegistry.hpp delete mode 100644 storage/ndb/src/cw/util/SocketService.cpp delete mode 100644 storage/ndb/src/cw/util/SocketService.hpp delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib delete mode 100644 storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib delete mode 100644 storage/ndb/src/kernel/Makefile.am delete mode 100644 storage/ndb/src/kernel/SimBlockList.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ERROR_codes.txt delete mode 100644 storage/ndb/src/kernel/blocks/Makefile.am delete mode 100644 storage/ndb/src/kernel/blocks/NodeRestart.new.txt delete mode 100644 storage/ndb/src/kernel/blocks/NodeRestart.txt delete mode 100644 storage/ndb/src/kernel/blocks/OptNR.txt delete mode 100644 storage/ndb/src/kernel/blocks/Start.txt delete mode 100644 storage/ndb/src/kernel/blocks/SystemRestart.new.txt delete mode 100644 storage/ndb/src/kernel/blocks/SystemRestart.txt delete mode 100644 storage/ndb/src/kernel/blocks/backup/Backup.cpp delete mode 100644 storage/ndb/src/kernel/blocks/backup/Backup.hpp delete mode 100644 storage/ndb/src/kernel/blocks/backup/Backup.txt delete mode 100644 storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp delete mode 100644 storage/ndb/src/kernel/blocks/backup/BackupInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp delete mode 100644 storage/ndb/src/kernel/blocks/backup/Makefile.am delete mode 100644 storage/ndb/src/kernel/blocks/backup/read.cpp delete mode 100644 storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp delete mode 100644 storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/DictLock.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/DropTable.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Event.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Makefile.am delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl delete mode 100644 storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/LCP.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/Makefile.am delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/printSysfile/Makefile delete mode 100644 storage/ndb/src/kernel/blocks/dbdih/printSysfile/printSysfile.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/Makefile.am delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/Makefile.am delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/Notes.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/test_varpage.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/Times.txt delete mode 100644 storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html delete mode 100644 storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp delete mode 100644 storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp delete mode 100644 storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt delete mode 100644 storage/ndb/src/kernel/blocks/diskpage.cpp delete mode 100644 storage/ndb/src/kernel/blocks/diskpage.hpp delete mode 100644 storage/ndb/src/kernel/blocks/lgman.cpp delete mode 100644 storage/ndb/src/kernel/blocks/lgman.hpp delete mode 100644 storage/ndb/src/kernel/blocks/mutexes.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/Makefile delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp delete mode 100644 storage/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp delete mode 100644 storage/ndb/src/kernel/blocks/new-block.tar.gz delete mode 100644 storage/ndb/src/kernel/blocks/pgman.cpp delete mode 100644 storage/ndb/src/kernel/blocks/pgman.hpp delete mode 100644 storage/ndb/src/kernel/blocks/print_file.cpp delete mode 100644 storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp delete mode 100644 storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp delete mode 100644 storage/ndb/src/kernel/blocks/qmgr/timer.hpp delete mode 100644 storage/ndb/src/kernel/blocks/record_types.hpp delete mode 100644 storage/ndb/src/kernel/blocks/restore.cpp delete mode 100644 storage/ndb/src/kernel/blocks/restore.hpp delete mode 100644 storage/ndb/src/kernel/blocks/suma/Suma.cpp delete mode 100644 storage/ndb/src/kernel/blocks/suma/Suma.hpp delete mode 100644 storage/ndb/src/kernel/blocks/suma/Suma.txt delete mode 100644 storage/ndb/src/kernel/blocks/suma/SumaInit.cpp delete mode 100644 storage/ndb/src/kernel/blocks/trix/Trix.cpp delete mode 100644 storage/ndb/src/kernel/blocks/trix/Trix.hpp delete mode 100644 storage/ndb/src/kernel/blocks/tsman.cpp delete mode 100644 storage/ndb/src/kernel/blocks/tsman.hpp delete mode 100644 storage/ndb/src/kernel/error/ErrorHandlingMacros.hpp delete mode 100644 storage/ndb/src/kernel/error/ErrorReporter.cpp delete mode 100644 storage/ndb/src/kernel/error/ErrorReporter.hpp delete mode 100644 storage/ndb/src/kernel/error/Makefile.am delete mode 100644 storage/ndb/src/kernel/error/TimeModule.cpp delete mode 100644 storage/ndb/src/kernel/error/TimeModule.hpp delete mode 100644 storage/ndb/src/kernel/error/ndbd_exit_codes.c delete mode 100644 storage/ndb/src/kernel/main.cpp delete mode 100644 storage/ndb/src/kernel/vm/Array.hpp delete mode 100644 storage/ndb/src/kernel/vm/ArrayPool.hpp delete mode 100644 storage/ndb/src/kernel/vm/CArray.hpp delete mode 100644 storage/ndb/src/kernel/vm/Callback.hpp delete mode 100644 storage/ndb/src/kernel/vm/ClusterConfiguration.cpp delete mode 100644 storage/ndb/src/kernel/vm/ClusterConfiguration.hpp delete mode 100644 storage/ndb/src/kernel/vm/Configuration.cpp delete mode 100644 storage/ndb/src/kernel/vm/Configuration.hpp delete mode 100644 storage/ndb/src/kernel/vm/DLCFifoList.hpp delete mode 100644 storage/ndb/src/kernel/vm/DLCHashTable.hpp delete mode 100644 storage/ndb/src/kernel/vm/DLFifoList.hpp delete mode 100644 storage/ndb/src/kernel/vm/DLHashTable.hpp delete mode 100644 storage/ndb/src/kernel/vm/DLHashTable2.hpp delete mode 100644 storage/ndb/src/kernel/vm/DLList.hpp delete mode 100644 storage/ndb/src/kernel/vm/DataBuffer.hpp delete mode 100644 storage/ndb/src/kernel/vm/DynArr256.cpp delete mode 100644 storage/ndb/src/kernel/vm/DynArr256.hpp delete mode 100644 storage/ndb/src/kernel/vm/Emulator.cpp delete mode 100644 storage/ndb/src/kernel/vm/Emulator.hpp delete mode 100644 storage/ndb/src/kernel/vm/FastScheduler.cpp delete mode 100644 storage/ndb/src/kernel/vm/FastScheduler.hpp delete mode 100644 storage/ndb/src/kernel/vm/GlobalData.hpp delete mode 100644 storage/ndb/src/kernel/vm/KeyDescriptor.hpp delete mode 100644 storage/ndb/src/kernel/vm/KeyTable.hpp delete mode 100644 storage/ndb/src/kernel/vm/KeyTable2.hpp delete mode 100644 storage/ndb/src/kernel/vm/KeyTable2Ref.hpp delete mode 100644 storage/ndb/src/kernel/vm/LinearPool.hpp delete mode 100644 storage/ndb/src/kernel/vm/LongSignal.hpp delete mode 100644 storage/ndb/src/kernel/vm/Makefile.am delete mode 100644 storage/ndb/src/kernel/vm/Mutex.cpp delete mode 100644 storage/ndb/src/kernel/vm/Mutex.hpp delete mode 100644 storage/ndb/src/kernel/vm/NdbdSuperPool.cpp delete mode 100644 storage/ndb/src/kernel/vm/NdbdSuperPool.hpp delete mode 100644 storage/ndb/src/kernel/vm/Pool.cpp delete mode 100644 storage/ndb/src/kernel/vm/Pool.hpp delete mode 100644 storage/ndb/src/kernel/vm/Prio.hpp delete mode 100644 storage/ndb/src/kernel/vm/RWPool.cpp delete mode 100644 storage/ndb/src/kernel/vm/RWPool.hpp delete mode 100644 storage/ndb/src/kernel/vm/RequestTracker.hpp delete mode 100644 storage/ndb/src/kernel/vm/Rope.cpp delete mode 100644 storage/ndb/src/kernel/vm/Rope.hpp delete mode 100644 storage/ndb/src/kernel/vm/SLFifoList.hpp delete mode 100644 storage/ndb/src/kernel/vm/SLList.hpp delete mode 100644 storage/ndb/src/kernel/vm/SafeCounter.cpp delete mode 100644 storage/ndb/src/kernel/vm/SafeCounter.hpp delete mode 100644 storage/ndb/src/kernel/vm/SectionReader.cpp delete mode 100644 storage/ndb/src/kernel/vm/SectionReader.hpp delete mode 100644 storage/ndb/src/kernel/vm/SignalCounter.hpp delete mode 100644 storage/ndb/src/kernel/vm/SimBlockList.hpp delete mode 100644 storage/ndb/src/kernel/vm/SimplePropertiesSection.cpp delete mode 100644 storage/ndb/src/kernel/vm/SimulatedBlock.cpp delete mode 100644 storage/ndb/src/kernel/vm/SimulatedBlock.hpp delete mode 100644 storage/ndb/src/kernel/vm/SuperPool.cpp delete mode 100644 storage/ndb/src/kernel/vm/SuperPool.hpp delete mode 100644 storage/ndb/src/kernel/vm/ThreadConfig.cpp delete mode 100644 storage/ndb/src/kernel/vm/ThreadConfig.hpp delete mode 100644 storage/ndb/src/kernel/vm/TimeQueue.cpp delete mode 100644 storage/ndb/src/kernel/vm/TimeQueue.hpp delete mode 100644 storage/ndb/src/kernel/vm/TransporterCallback.cpp delete mode 100644 storage/ndb/src/kernel/vm/VMSignal.cpp delete mode 100644 storage/ndb/src/kernel/vm/VMSignal.hpp delete mode 100644 storage/ndb/src/kernel/vm/WOPool.cpp delete mode 100644 storage/ndb/src/kernel/vm/WOPool.hpp delete mode 100644 storage/ndb/src/kernel/vm/WaitQueue.hpp delete mode 100644 storage/ndb/src/kernel/vm/WatchDog.cpp delete mode 100644 storage/ndb/src/kernel/vm/WatchDog.hpp delete mode 100644 storage/ndb/src/kernel/vm/al_test/Makefile delete mode 100644 storage/ndb/src/kernel/vm/al_test/arrayListTest.cpp delete mode 100644 storage/ndb/src/kernel/vm/al_test/arrayPoolTest.cpp delete mode 100644 storage/ndb/src/kernel/vm/al_test/main.cpp delete mode 100644 storage/ndb/src/kernel/vm/bench_pool.cpp delete mode 100644 storage/ndb/src/kernel/vm/mem.txt delete mode 100644 storage/ndb/src/kernel/vm/ndbd_malloc.cpp delete mode 100644 storage/ndb/src/kernel/vm/ndbd_malloc.hpp delete mode 100644 storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp delete mode 100644 storage/ndb/src/kernel/vm/ndbd_malloc_impl.hpp delete mode 100644 storage/ndb/src/kernel/vm/pc.hpp delete mode 100644 storage/ndb/src/kernel/vm/testCopy/Makefile delete mode 100644 storage/ndb/src/kernel/vm/testCopy/rr.cpp delete mode 100644 storage/ndb/src/kernel/vm/testCopy/testCopy.cpp delete mode 100644 storage/ndb/src/kernel/vm/testDataBuffer/Makefile delete mode 100644 storage/ndb/src/kernel/vm/testDataBuffer/testDataBuffer.cpp delete mode 100644 storage/ndb/src/kernel/vm/testLongSig/Makefile delete mode 100644 storage/ndb/src/kernel/vm/testLongSig/testLongSig.cpp delete mode 100644 storage/ndb/src/kernel/vm/testSimplePropertiesSection/Makefile delete mode 100644 storage/ndb/src/kernel/vm/testSimplePropertiesSection/test.cpp delete mode 100644 storage/ndb/src/kernel/vm/testSuperPool.cpp delete mode 100644 storage/ndb/src/libndb.ver.in delete mode 100644 storage/ndb/src/mgmapi/LocalConfig.cpp delete mode 100644 storage/ndb/src/mgmapi/LocalConfig.hpp delete mode 100644 storage/ndb/src/mgmapi/Makefile.am delete mode 100644 storage/ndb/src/mgmapi/mgmapi.cpp delete mode 100644 storage/ndb/src/mgmapi/mgmapi_configuration.cpp delete mode 100644 storage/ndb/src/mgmapi/mgmapi_configuration.hpp delete mode 100644 storage/ndb/src/mgmapi/mgmapi_internal.h delete mode 100644 storage/ndb/src/mgmapi/ndb_logevent.cpp delete mode 100644 storage/ndb/src/mgmapi/ndb_logevent.hpp delete mode 100644 storage/ndb/src/mgmapi/test/Makefile delete mode 100644 storage/ndb/src/mgmapi/test/keso.c delete mode 100644 storage/ndb/src/mgmapi/test/mgmSrvApi.cpp delete mode 100644 storage/ndb/src/mgmclient/CommandInterpreter.cpp delete mode 100644 storage/ndb/src/mgmclient/Makefile.am delete mode 100644 storage/ndb/src/mgmclient/main.cpp delete mode 100644 storage/ndb/src/mgmclient/ndb_mgmclient.h delete mode 100644 storage/ndb/src/mgmclient/ndb_mgmclient.hpp delete mode 100644 storage/ndb/src/mgmclient/test_cpcd/Makefile delete mode 100644 storage/ndb/src/mgmclient/test_cpcd/test_cpcd.cpp delete mode 100644 storage/ndb/src/mgmsrv/Config.cpp delete mode 100644 storage/ndb/src/mgmsrv/Config.hpp delete mode 100644 storage/ndb/src/mgmsrv/ConfigInfo.cpp delete mode 100644 storage/ndb/src/mgmsrv/ConfigInfo.hpp delete mode 100644 storage/ndb/src/mgmsrv/ERROR_codes.txt delete mode 100644 storage/ndb/src/mgmsrv/InitConfigFileParser.cpp delete mode 100644 storage/ndb/src/mgmsrv/InitConfigFileParser.hpp delete mode 100644 storage/ndb/src/mgmsrv/Makefile.am delete mode 100644 storage/ndb/src/mgmsrv/MgmtSrvr.cpp delete mode 100644 storage/ndb/src/mgmsrv/MgmtSrvr.hpp delete mode 100644 storage/ndb/src/mgmsrv/MgmtSrvrConfig.cpp delete mode 100644 storage/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp delete mode 100644 storage/ndb/src/mgmsrv/Services.cpp delete mode 100644 storage/ndb/src/mgmsrv/Services.hpp delete mode 100644 storage/ndb/src/mgmsrv/SignalQueue.cpp delete mode 100644 storage/ndb/src/mgmsrv/SignalQueue.hpp delete mode 100644 storage/ndb/src/mgmsrv/convertStrToInt.cpp delete mode 100644 storage/ndb/src/mgmsrv/convertStrToInt.hpp delete mode 100644 storage/ndb/src/mgmsrv/main.cpp delete mode 100644 storage/ndb/src/mgmsrv/mkconfig/Makefile delete mode 100644 storage/ndb/src/mgmsrv/mkconfig/mkconfig.cpp delete mode 100644 storage/ndb/src/mgmsrv/ndb_mgmd_error.h delete mode 100644 storage/ndb/src/ndbapi/API.hpp delete mode 100644 storage/ndb/src/ndbapi/ClusterMgr.cpp delete mode 100644 storage/ndb/src/ndbapi/ClusterMgr.hpp delete mode 100644 storage/ndb/src/ndbapi/DictCache.cpp delete mode 100644 storage/ndb/src/ndbapi/DictCache.hpp delete mode 100644 storage/ndb/src/ndbapi/Makefile.am delete mode 100644 storage/ndb/src/ndbapi/Ndb.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbApiSignal.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbApiSignal.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbBlob.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbBlobImpl.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbDictionary.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbErrorOut.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbEventOperation.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbImpl.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbIndexOperation.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbIndexStat.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbLinHash.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbOperation.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbOperationDefine.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbOperationExec.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbOperationInt.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbOperationScan.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbOperationSearch.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbPool.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbPoolImpl.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbPoolImpl.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbRecAttr.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbReceiver.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbScanFilter.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbScanOperation.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbTransaction.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbTransactionScan.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbUtil.cpp delete mode 100644 storage/ndb/src/ndbapi/NdbUtil.hpp delete mode 100644 storage/ndb/src/ndbapi/NdbWaiter.hpp delete mode 100644 storage/ndb/src/ndbapi/Ndberr.cpp delete mode 100644 storage/ndb/src/ndbapi/Ndbif.cpp delete mode 100644 storage/ndb/src/ndbapi/Ndbinit.cpp delete mode 100644 storage/ndb/src/ndbapi/Ndblist.cpp delete mode 100644 storage/ndb/src/ndbapi/ObjectMap.cpp delete mode 100644 storage/ndb/src/ndbapi/ObjectMap.hpp delete mode 100644 storage/ndb/src/ndbapi/ScanOperation.txt delete mode 100644 storage/ndb/src/ndbapi/SignalSender.cpp delete mode 100644 storage/ndb/src/ndbapi/SignalSender.hpp delete mode 100644 storage/ndb/src/ndbapi/TransporterFacade.cpp delete mode 100644 storage/ndb/src/ndbapi/TransporterFacade.hpp delete mode 100644 storage/ndb/src/ndbapi/ndb_cluster_connection.cpp delete mode 100644 storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp delete mode 100644 storage/ndb/src/ndbapi/ndb_internal.hpp delete mode 100644 storage/ndb/src/ndbapi/ndberror.c delete mode 100644 storage/ndb/src/ndbapi/ndberror_check.c delete mode 100644 storage/ndb/src/ndbapi/signal-sender/Makefile delete mode 100644 storage/ndb/src/ndbapi/signal-sender/SignalSender.cpp delete mode 100644 storage/ndb/src/ndbapi/signal-sender/SignalSender.hpp delete mode 100644 storage/ndb/test/Makefile.am delete mode 100644 storage/ndb/test/include/AtrtClient.hpp delete mode 100644 storage/ndb/test/include/CpcClient.hpp delete mode 100755 storage/ndb/test/include/DbUtil.hpp delete mode 100644 storage/ndb/test/include/HugoAsynchTransactions.hpp delete mode 100644 storage/ndb/test/include/HugoCalculator.hpp delete mode 100644 storage/ndb/test/include/HugoOperations.hpp delete mode 100644 storage/ndb/test/include/HugoTransactions.hpp delete mode 100644 storage/ndb/test/include/NDBT.hpp delete mode 100644 storage/ndb/test/include/NDBT_DataSet.hpp delete mode 100644 storage/ndb/test/include/NDBT_DataSetTransaction.hpp delete mode 100644 storage/ndb/test/include/NDBT_Error.hpp delete mode 100644 storage/ndb/test/include/NDBT_Output.hpp delete mode 100644 storage/ndb/test/include/NDBT_ResultRow.hpp delete mode 100644 storage/ndb/test/include/NDBT_ReturnCodes.h delete mode 100644 storage/ndb/test/include/NDBT_Stats.hpp delete mode 100644 storage/ndb/test/include/NDBT_Table.hpp delete mode 100644 storage/ndb/test/include/NDBT_Tables.hpp delete mode 100644 storage/ndb/test/include/NDBT_Test.hpp delete mode 100644 storage/ndb/test/include/NDBT_Thread.hpp delete mode 100644 storage/ndb/test/include/NdbBackup.hpp delete mode 100644 storage/ndb/test/include/NdbConfig.hpp delete mode 100644 storage/ndb/test/include/NdbGrep.hpp delete mode 100644 storage/ndb/test/include/NdbMixRestarter.hpp delete mode 100644 storage/ndb/test/include/NdbRestarter.hpp delete mode 100644 storage/ndb/test/include/NdbRestarts.hpp delete mode 100644 storage/ndb/test/include/NdbSchemaCon.hpp delete mode 100644 storage/ndb/test/include/NdbSchemaOp.hpp delete mode 100644 storage/ndb/test/include/NdbTest.hpp delete mode 100644 storage/ndb/test/include/NdbTimer.hpp delete mode 100644 storage/ndb/test/include/TestNdbEventOperation.hpp delete mode 100644 storage/ndb/test/include/UtilTransactions.hpp delete mode 100644 storage/ndb/test/include/getarg.h delete mode 100644 storage/ndb/test/ndbapi/InsertRecs.cpp delete mode 100644 storage/ndb/test/ndbapi/Makefile.am delete mode 100644 storage/ndb/test/ndbapi/ScanFilter.hpp delete mode 100644 storage/ndb/test/ndbapi/ScanFunctions.hpp delete mode 100644 storage/ndb/test/ndbapi/ScanInterpretTest.hpp delete mode 100644 storage/ndb/test/ndbapi/TraceNdbApi.cpp delete mode 100644 storage/ndb/test/ndbapi/VerifyNdbApi.cpp delete mode 100644 storage/ndb/test/ndbapi/acid.cpp delete mode 100644 storage/ndb/test/ndbapi/acid2.cpp delete mode 100644 storage/ndb/test/ndbapi/acrt/NdbRepStress.cpp delete mode 100644 storage/ndb/test/ndbapi/adoInsertRecs.cpp delete mode 100644 storage/ndb/test/ndbapi/asyncGenerator.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/Bank.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/Bank.hpp delete mode 100644 storage/ndb/test/ndbapi/bank/BankLoad.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/Makefile.am delete mode 100644 storage/ndb/test/ndbapi/bank/bankCreator.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/bankMakeGL.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/bankTimer.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp delete mode 100644 storage/ndb/test/ndbapi/bank/testBank.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/asyncGenerator.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/dbGenerator.h delete mode 100644 storage/ndb/test/ndbapi/bench/dbPopulate.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/dbPopulate.h delete mode 100644 storage/ndb/test/ndbapi/bench/macros.h delete mode 100644 storage/ndb/test/ndbapi/bench/mainAsyncGenerator.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/mainPopulate.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_async1.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_async2.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_error.hpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_schema.hpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_user_transaction.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_user_transaction2.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_user_transaction3.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_user_transaction4.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_user_transaction5.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/ndb_user_transaction6.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/testData.h delete mode 100644 storage/ndb/test/ndbapi/bench/testDefinitions.h delete mode 100644 storage/ndb/test/ndbapi/bench/userInterface.cpp delete mode 100644 storage/ndb/test/ndbapi/bench/userInterface.h delete mode 100644 storage/ndb/test/ndbapi/benchronja.cpp delete mode 100644 storage/ndb/test/ndbapi/bulk_copy.cpp delete mode 100644 storage/ndb/test/ndbapi/cdrserver.cpp delete mode 100644 storage/ndb/test/ndbapi/celloDb.cpp delete mode 100644 storage/ndb/test/ndbapi/create_all_tabs.cpp delete mode 100644 storage/ndb/test/ndbapi/create_tab.cpp delete mode 100644 storage/ndb/test/ndbapi/drop_all_tabs.cpp delete mode 100644 storage/ndb/test/ndbapi/flexAsynch.cpp delete mode 100644 storage/ndb/test/ndbapi/flexBench.cpp delete mode 100644 storage/ndb/test/ndbapi/flexHammer.cpp delete mode 100644 storage/ndb/test/ndbapi/flexScan.cpp delete mode 100644 storage/ndb/test/ndbapi/flexTT.cpp delete mode 100644 storage/ndb/test/ndbapi/flexTimedAsynch.cpp delete mode 100644 storage/ndb/test/ndbapi/flex_bench_mysql.cpp delete mode 100644 storage/ndb/test/ndbapi/index.cpp delete mode 100644 storage/ndb/test/ndbapi/index2.cpp delete mode 100644 storage/ndb/test/ndbapi/initronja.cpp delete mode 100644 storage/ndb/test/ndbapi/interpreterInTup.cpp delete mode 100644 storage/ndb/test/ndbapi/mainAsyncGenerator.cpp delete mode 100644 storage/ndb/test/ndbapi/msa.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_async1.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_async2.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_populate.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_transaction.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_transaction2.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_transaction3.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_transaction4.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_transaction5.cpp delete mode 100644 storage/ndb/test/ndbapi/ndb_user_transaction6.cpp delete mode 100644 storage/ndb/test/ndbapi/restarter.cpp delete mode 100644 storage/ndb/test/ndbapi/restarter2.cpp delete mode 100644 storage/ndb/test/ndbapi/restarts.cpp delete mode 100644 storage/ndb/test/ndbapi/size.cpp delete mode 100644 storage/ndb/test/ndbapi/slow_select.cpp delete mode 100644 storage/ndb/test/ndbapi/testBackup.cpp delete mode 100644 storage/ndb/test/ndbapi/testBasic.cpp delete mode 100644 storage/ndb/test/ndbapi/testBasicAsynch.cpp delete mode 100644 storage/ndb/test/ndbapi/testBitfield.cpp delete mode 100644 storage/ndb/test/ndbapi/testBlobs.cpp delete mode 100644 storage/ndb/test/ndbapi/testDataBuffers.cpp delete mode 100644 storage/ndb/test/ndbapi/testDeadlock.cpp delete mode 100644 storage/ndb/test/ndbapi/testDict.cpp delete mode 100644 storage/ndb/test/ndbapi/testGrepVerify.cpp delete mode 100644 storage/ndb/test/ndbapi/testIndex.cpp delete mode 100644 storage/ndb/test/ndbapi/testIndexStat.cpp delete mode 100644 storage/ndb/test/ndbapi/testInterpreter.cpp delete mode 100644 storage/ndb/test/ndbapi/testLcp.cpp delete mode 100644 storage/ndb/test/ndbapi/testMgm.cpp delete mode 100644 storage/ndb/test/ndbapi/testNDBT.cpp delete mode 100644 storage/ndb/test/ndbapi/testNdbApi.cpp delete mode 100644 storage/ndb/test/ndbapi/testNodeRestart.cpp delete mode 100644 storage/ndb/test/ndbapi/testOIBasic.cpp delete mode 100644 storage/ndb/test/ndbapi/testOperations.cpp delete mode 100644 storage/ndb/test/ndbapi/testOrderedIndex.cpp delete mode 100644 storage/ndb/test/ndbapi/testPartitioning.cpp delete mode 100644 storage/ndb/test/ndbapi/testReadPerf.cpp delete mode 100644 storage/ndb/test/ndbapi/testRestartGci.cpp delete mode 100644 storage/ndb/test/ndbapi/testSRBank.cpp delete mode 100644 storage/ndb/test/ndbapi/testScan.cpp delete mode 100644 storage/ndb/test/ndbapi/testScanFilter.cpp delete mode 100644 storage/ndb/test/ndbapi/testScanInterpreter.cpp delete mode 100644 storage/ndb/test/ndbapi/testScanPerf.cpp delete mode 100644 storage/ndb/test/ndbapi/testSystemRestart.cpp delete mode 100644 storage/ndb/test/ndbapi/testTimeout.cpp delete mode 100644 storage/ndb/test/ndbapi/testTransactions.cpp delete mode 100644 storage/ndb/test/ndbapi/test_event.cpp delete mode 100644 storage/ndb/test/ndbapi/test_event_merge.cpp delete mode 100644 storage/ndb/test/ndbapi/test_event_multi_table.cpp delete mode 100644 storage/ndb/test/ndbapi/userInterface.cpp delete mode 100644 storage/ndb/test/ndbnet/test.run delete mode 100644 storage/ndb/test/ndbnet/testError.run delete mode 100644 storage/ndb/test/ndbnet/testMNF.run delete mode 100644 storage/ndb/test/ndbnet/testNR.run delete mode 100644 storage/ndb/test/ndbnet/testNR1.run delete mode 100644 storage/ndb/test/ndbnet/testNR4.run delete mode 100644 storage/ndb/test/ndbnet/testSRhang.run delete mode 100644 storage/ndb/test/ndbnet/testTR295.run delete mode 100644 storage/ndb/test/newtonapi/basic_test/Makefile delete mode 100644 storage/ndb/test/newtonapi/basic_test/basic/Makefile delete mode 100644 storage/ndb/test/newtonapi/basic_test/basic/basic.cpp delete mode 100644 storage/ndb/test/newtonapi/basic_test/bulk_read/Makefile delete mode 100644 storage/ndb/test/newtonapi/basic_test/bulk_read/br_test.cpp delete mode 100644 storage/ndb/test/newtonapi/basic_test/common.cpp delete mode 100644 storage/ndb/test/newtonapi/basic_test/common.hpp delete mode 100644 storage/ndb/test/newtonapi/basic_test/ptr_binding/Makefile delete mode 100644 storage/ndb/test/newtonapi/basic_test/ptr_binding/ptr_binding_test.cpp delete mode 100644 storage/ndb/test/newtonapi/basic_test/too_basic.cpp delete mode 100644 storage/ndb/test/newtonapi/perf_test/Makefile delete mode 100644 storage/ndb/test/newtonapi/perf_test/perf.cpp delete mode 100644 storage/ndb/test/odbc/SQL99_test/Makefile delete mode 100644 storage/ndb/test/odbc/SQL99_test/SQL99_test.cpp delete mode 100644 storage/ndb/test/odbc/SQL99_test/SQL99_test.h delete mode 100644 storage/ndb/test/odbc/client/Makefile delete mode 100644 storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE.cpp delete mode 100644 storage/ndb/test/odbc/client/NDBT_ALLOCHANDLE_HDBC.cpp delete mode 100644 storage/ndb/test/odbc/client/NDBT_SQLConnect.cpp delete mode 100644 storage/ndb/test/odbc/client/NDBT_SQLPrepare.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLAllocEnvTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLAllocHandleTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLAllocHandleTest_bf.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLBindColTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLBindParameterTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLCancelTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLCloseCursorTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLColAttributeTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLColAttributeTest1.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLColAttributeTest2.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLColAttributeTest3.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLConnectTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLCopyDescTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLDescribeColTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLDisconnectTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLDriverConnectTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLEndTranTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLErrorTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLExecDirectTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLExecuteTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLFetchScrollTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLFetchTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLFreeHandleTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLFreeStmtTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetConnectAttrTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetCursorNameTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetDataTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetDescFieldTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetDescRecTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetDiagFieldTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetDiagRecSimpleTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetDiagRecTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetEnvAttrTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetFunctionsTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetInfoTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetStmtAttrTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLGetTypeInfoTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLMoreResultsTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLNumResultColsTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLParamDataTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLPrepareTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLPutDataTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLRowCountTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLSetConnectAttrTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLSetCursorNameTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLSetDescFieldTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLSetDescRecTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLSetEnvAttrTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLSetStmtAttrTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLTablesTest.cpp delete mode 100644 storage/ndb/test/odbc/client/SQLTransactTest.cpp delete mode 100644 storage/ndb/test/odbc/client/common.hpp delete mode 100644 storage/ndb/test/odbc/client/main.cpp delete mode 100644 storage/ndb/test/odbc/dm-iodbc/Makefile delete mode 100644 storage/ndb/test/odbc/dm-unixodbc/Makefile delete mode 100644 storage/ndb/test/odbc/driver/Makefile delete mode 100644 storage/ndb/test/odbc/driver/testOdbcDriver.cpp delete mode 100644 storage/ndb/test/odbc/test_compiler/Makefile delete mode 100644 storage/ndb/test/odbc/test_compiler/test_compiler.cpp delete mode 100644 storage/ndb/test/run-test/16node-tests.txt delete mode 100644 storage/ndb/test/run-test/ATRT_SETUP_README.txt delete mode 100644 storage/ndb/test/run-test/ATRT_USAGE_README.txt delete mode 100644 storage/ndb/test/run-test/Makefile.am delete mode 100644 storage/ndb/test/run-test/README delete mode 100644 storage/ndb/test/run-test/README.ATRT delete mode 100755 storage/ndb/test/run-test/atrt-analyze-result.sh delete mode 100755 storage/ndb/test/run-test/atrt-clear-result.sh delete mode 100644 storage/ndb/test/run-test/atrt-example.tgz delete mode 100755 storage/ndb/test/run-test/atrt-gather-result.sh delete mode 100755 storage/ndb/test/run-test/atrt-mysql-test-run delete mode 100755 storage/ndb/test/run-test/atrt-setup.sh delete mode 100755 storage/ndb/test/run-test/atrt-testBackup delete mode 100644 storage/ndb/test/run-test/atrt.hpp delete mode 100644 storage/ndb/test/run-test/autotest-boot.sh delete mode 100644 storage/ndb/test/run-test/autotest-run.sh delete mode 100644 storage/ndb/test/run-test/basic.txt delete mode 100644 storage/ndb/test/run-test/conf-dl145a.cnf delete mode 100644 storage/ndb/test/run-test/conf-ndbmaster.cnf delete mode 100644 storage/ndb/test/run-test/conf-repl.cnf delete mode 100644 storage/ndb/test/run-test/conf-test.cnf delete mode 100644 storage/ndb/test/run-test/daily-basic-tests.txt delete mode 100644 storage/ndb/test/run-test/daily-devel-tests.txt delete mode 100644 storage/ndb/test/run-test/example-my.cnf delete mode 100644 storage/ndb/test/run-test/example.conf delete mode 100644 storage/ndb/test/run-test/files.cpp delete mode 100644 storage/ndb/test/run-test/main.cpp delete mode 100755 storage/ndb/test/run-test/make-config.sh delete mode 100755 storage/ndb/test/run-test/make-html-reports.sh delete mode 100755 storage/ndb/test/run-test/make-index.sh delete mode 100755 storage/ndb/test/run-test/ndb-autotest.sh delete mode 100644 storage/ndb/test/run-test/setup.cpp delete mode 100644 storage/ndb/test/run-test/test-tests.txt delete mode 100644 storage/ndb/test/run-test/upgrade-boot.sh delete mode 100644 storage/ndb/test/src/AtrtClient.cpp delete mode 100644 storage/ndb/test/src/CpcClient.cpp delete mode 100755 storage/ndb/test/src/DbUtil.cpp delete mode 100644 storage/ndb/test/src/HugoAsynchTransactions.cpp delete mode 100644 storage/ndb/test/src/HugoCalculator.cpp delete mode 100644 storage/ndb/test/src/HugoOperations.cpp delete mode 100644 storage/ndb/test/src/HugoTransactions.cpp delete mode 100644 storage/ndb/test/src/Makefile.am delete mode 100644 storage/ndb/test/src/NDBT_Error.cpp delete mode 100644 storage/ndb/test/src/NDBT_Output.cpp delete mode 100644 storage/ndb/test/src/NDBT_ResultRow.cpp delete mode 100644 storage/ndb/test/src/NDBT_ReturnCodes.cpp delete mode 100644 storage/ndb/test/src/NDBT_Table.cpp delete mode 100644 storage/ndb/test/src/NDBT_Tables.cpp delete mode 100644 storage/ndb/test/src/NDBT_Test.cpp delete mode 100644 storage/ndb/test/src/NDBT_Thread.cpp delete mode 100644 storage/ndb/test/src/NdbBackup.cpp delete mode 100644 storage/ndb/test/src/NdbConfig.cpp delete mode 100644 storage/ndb/test/src/NdbGrep.cpp delete mode 100644 storage/ndb/test/src/NdbMixRestarter.cpp delete mode 100644 storage/ndb/test/src/NdbRestarter.cpp delete mode 100644 storage/ndb/test/src/NdbRestarts.cpp delete mode 100644 storage/ndb/test/src/NdbSchemaCon.cpp delete mode 100644 storage/ndb/test/src/NdbSchemaOp.cpp delete mode 100644 storage/ndb/test/src/UtilTransactions.cpp delete mode 100644 storage/ndb/test/src/getarg.c delete mode 100644 storage/ndb/test/tools/Makefile.am delete mode 100644 storage/ndb/test/tools/connect.cpp delete mode 100644 storage/ndb/test/tools/copy_tab.cpp delete mode 100644 storage/ndb/test/tools/cpcc.cpp delete mode 100644 storage/ndb/test/tools/create_index.cpp delete mode 100644 storage/ndb/test/tools/hugoCalculator.cpp delete mode 100644 storage/ndb/test/tools/hugoFill.cpp delete mode 100644 storage/ndb/test/tools/hugoLoad.cpp delete mode 100644 storage/ndb/test/tools/hugoLockRecords.cpp delete mode 100644 storage/ndb/test/tools/hugoPkDelete.cpp delete mode 100644 storage/ndb/test/tools/hugoPkRead.cpp delete mode 100644 storage/ndb/test/tools/hugoPkReadRecord.cpp delete mode 100644 storage/ndb/test/tools/hugoPkUpdate.cpp delete mode 100644 storage/ndb/test/tools/hugoScanRead.cpp delete mode 100644 storage/ndb/test/tools/hugoScanUpdate.cpp delete mode 100644 storage/ndb/test/tools/listen.cpp delete mode 100644 storage/ndb/test/tools/log_listner.cpp delete mode 100644 storage/ndb/test/tools/rep_latency.cpp delete mode 100644 storage/ndb/test/tools/restart.cpp delete mode 100644 storage/ndb/test/tools/transproxy.cpp delete mode 100644 storage/ndb/test/tools/verify_index.cpp delete mode 100644 storage/ndb/tools/Makefile.am delete mode 100755 storage/ndb/tools/clean-links.sh delete mode 100644 storage/ndb/tools/delete_all.cpp delete mode 100644 storage/ndb/tools/desc.cpp delete mode 100644 storage/ndb/tools/drop_index.cpp delete mode 100644 storage/ndb/tools/drop_tab.cpp delete mode 100644 storage/ndb/tools/listTables.cpp delete mode 100644 storage/ndb/tools/make-errors.pl delete mode 100755 storage/ndb/tools/make-links.sh delete mode 100644 storage/ndb/tools/ndb_config.cpp delete mode 100644 storage/ndb/tools/ndb_error_reporter delete mode 100644 storage/ndb/tools/ndb_size.pl delete mode 100644 storage/ndb/tools/ndb_test_platform.cpp delete mode 100644 storage/ndb/tools/ndbsql.cpp delete mode 100644 storage/ndb/tools/restore/Restore.cpp delete mode 100644 storage/ndb/tools/restore/Restore.hpp delete mode 100644 storage/ndb/tools/restore/consumer.cpp delete mode 100644 storage/ndb/tools/restore/consumer.hpp delete mode 100644 storage/ndb/tools/restore/consumer_printer.cpp delete mode 100644 storage/ndb/tools/restore/consumer_printer.hpp delete mode 100644 storage/ndb/tools/restore/consumer_restore.cpp delete mode 100644 storage/ndb/tools/restore/consumer_restore.hpp delete mode 100644 storage/ndb/tools/restore/consumer_restorem.cpp delete mode 100644 storage/ndb/tools/restore/ndb_nodegroup_map.h delete mode 100644 storage/ndb/tools/restore/restore_main.cpp delete mode 100755 storage/ndb/tools/rgrep delete mode 100644 storage/ndb/tools/select_all.cpp delete mode 100644 storage/ndb/tools/select_count.cpp delete mode 100644 storage/ndb/tools/waiter.cpp delete mode 100644 storage/spider/plug.in delete mode 100644 support-files/MySQL-shared-compat.spec.sh delete mode 100644 support-files/config.huge.ini.sh delete mode 100644 support-files/config.medium.ini.sh delete mode 100644 support-files/config.small.ini.sh delete mode 100644 support-files/mysql.spec.sh delete mode 100644 support-files/ndb-config-2-node.ini.sh diff --git a/.bzrignore b/.bzrignore deleted file mode 100644 index ba23211d3a3..00000000000 --- a/.bzrignore +++ /dev/null @@ -1,1462 +0,0 @@ -*-t -*.a -*.bb -*.bbg -*.bin -*.cdf -*.core -*.d -*.da -*.dgcov -*.dir/ -*.dll -*.dsp -*.dylib -*.diff -*.exe -*.exp -*.gcda -*.gcno -*.gcov -*.idb -*.ilk -*.la -*.lai -*.lib -*.lo -*.manifest -*.map -*.o -*.obj -*.old -*.pch -*.pdb -*.Plo -*.Po -*.reject -*.res -*.rule -*.sbr -*.so -*.so.* -*.spec -*.Tpo -*.user -*.vcproj -*.vcproj.cmake -*.vcxproj -*.vcxproj.filters -Debug -MySql.sdf -Win32 -RelWithDebInfo -*~ -.*.swp -./CMakeCache.txt -./config.h -./fix-project-files -./mysql*.ds? -./MySql.ncb -./MySql.sln -./MySql.suo -./prepare -./README.build-files -.defs.mk -.depend -.depend.mk -.DS_Store -.gdb_history -.gdbinit -.o -.out -.snprj/* -.vimrc -ac_available_languages_fragment -BitKeeper/ -client/*.ds? -client/completion_hash.cpp -client/decimal.c -client/dtoa.c -client/echo -client/insert_test -client/link_sources -client/log_event.cc -client/log_event.h -client/log_event_old.cc -client/log_event_old.h -client/mf_iocache.c -client/mf_iocache.cc -client/my_decimal.cc -client/my_decimal.h -client/my_user.c -client/mysql -client/mysql.cpp -client/mysql_upgrade -client/mysqladmin -client/mysqladmin.c -client/mysqladmin.cpp -client/mysqlbinlog -client/mysqlbinlog.cpp -client/mysqlcheck -client/mysqldump -client/mysqlimport -client/mysqlmanager-pwgen -client/mysqlmanagerc -client/mysqlshow -client/mysqlslap -client/mysqltest -client/mysqltestmanager-pwgen -client/mysqltestmanagerc -client/mysys_priv.h -client/readline.cpp -client/rpl_constants.h -client/rpl_filter.cc -client/rpl_filter.h -client/rpl_record_old.cc -client/rpl_record_old.h -client/rpl_tblmap.cc -client/rpl_tblmap.h -client/rpl_utility.cc -client/rpl_utility.h -client/rpl_utility.cc -client/select_test -client/sql_const.h -client/sql_list.cc -client/sql_list.h -client/sql_string.cpp -client/ssl_test -client/thimble -client/thread_test -client/tmp.diff -client/transaction.h -client/async_example -client_debug/* -client_release/* -client_test -cmake_install.cmake -CMakeFiles/ -cmd-line-utils/libedit/common.h -cmd-line-utils/libedit/makelist -comments -comon.h -comp_err/*.ds? -compile -config.h -contrib/*.ds? -COPYING -COPYING.LIB -core -core.* -cscope.in.out -cscope.out -cscope.po.out -CTestTestfile.cmake -dbug/*.ds? -dbug/dbug_analyze -dbug/example*.r -dbug/factorial -dbug/factorial.r -dbug/main.r -dbug/output*.r -dbug/tests -dbug/user.ps -dbug/user.t -debian/control -debian/defs.mk -debian/dist/Debian/mariadb-server-10.0.files -debian/dist/Ubuntu/mariadb-server-10.0.files -debug/ -depcomp -Docs/INSTALL-BINARY -examples/*.ds? -examples/udf_example/udf_example.def -EXCEPTIONS-CLIENT -extra/charset2html -extra/comp_err -extra/created_include_files -extra/innochecksum -extra/libevent/event-config.h -extra/my_print_defaults -extra/mysql_install -extra/mysql_tzinfo_to_sql -extra/mysql_waitpid -extra/mysqld_ername.h -extra/mysqld_error.h -extra/perror -extra/replace -extra/resolve_stack_dump -extra/resolveip -extra/sql_state.h -extra/tztime.cc -extra/yassl/taocrypt/benchmark/benchmark -extra/yassl/taocrypt/test/test -extra/yassl/testsuite/testsuite -fcns.c -fcns.h -gdbinit -gmon.out -hardcopy.0 -heap/*.ds? -heap/hp_test1 -heap/hp_test2 -help -help.c -help.h -include/abi_check -include/check_abi -include/link_sources -include/my_config.h -include/my_global.h -include/mysql_h.ic -include/mysql_version.h -include/mysqld_ername.h -include/mysqld_error.h -include/mysqld_error.h.rule -include/openssl -include/probes_mysql_dtrace.h -include/readline -include/readline/*.h -include/readline/readline.h -include/sql_state.h -include/widec.h -insert_test -install -install-sh -INSTALL-SOURCE -INSTALL-WIN-SOURCE -item_xmlfunc.cc -lib_debug/* -lib_release/* -libmysql/*.c -libmysql/*.ds? -libmysql/conf_to_src -libmysql/debug/libmysql.exp -libmysql/libmysql.ver -libmysql/libmysql_exports_file.cc -libmysql/link_sources -libmysql/merge_archives_mysqlclient.cmake -libmysql/my_static.h -libmysql/my_time.c -libmysql/mysys_priv.h -libmysql/net.c -libmysql/release/libmysql.exp -libmysql/vio_priv.h -libmysql/viosocket.o.6WmSJk -libmysql_r/*.c -libmysql_r/client_settings.h -libmysql_r/conf_to_src -libmysql_r/link_sources -libmysql_r/my_static.h -libmysql_r/mysys_priv.h -libmysql_r/vio_priv.h -libmysqld/*.ds? -libmysqld/backup_dir -libmysqld/client.c -libmysqld/client_plugin.c -libmysqld/client_settings.h -libmysqld/cmake_dummy.c -libmysqld/convert.cc -libmysqld/create_options.cc -libmysqld/datadict.cc -libmysqld/debug_sync.cc -libmysqld/derror.cc -libmysqld/des_key_file.cc -libmysqld/discover.cc -libmysqld/discover_xt.cc -libmysqld/emb_qcache.cpp -libmysqld/errmsg.c -libmysqld/event.cc -libmysqld/event_data_objects.cc -libmysqld/event_db_repository.cc -libmysqld/event_executor.cc -libmysqld/event_parse_data.cc -libmysqld/event_queue.cc -libmysqld/event_scheduler.cc -libmysqld/event_timed.cc -libmysqld/events.cc -libmysqld/examples/client_test.c -libmysqld/examples/client_test.cc -libmysqld/examples/completion_hash.cc -libmysqld/examples/completion_hash.h -libmysqld/examples/link_sources -libmysqld/examples/my_readline.h -libmysqld/examples/mysql -libmysqld/examples/mysql.cc -libmysqld/examples/mysql_client_test.c -libmysqld/examples/mysql_client_test_embedded -libmysqld/examples/mysql_embedded -libmysqld/examples/mysqltest -libmysqld/examples/mysqltest.c -libmysqld/examples/mysqltest.cc -libmysqld/examples/mysqltest_embedded -libmysqld/examples/readline.cc -libmysqld/examples/sql_string.cc -libmysqld/examples/sql_string.h -libmysqld/examples/test-gdbinit -libmysqld/field.cc -libmysqld/field_conv.cc -libmysqld/filesort.cc -libmysqld/get_password.c -libmysqld/gstream.cc -libmysqld/ha_archive.cc -libmysqld/ha_berkeley.cc -libmysqld/ha_blackhole.cc -libmysqld/ha_example.cc -libmysqld/ha_federated.cc -libmysqld/ha_federatedx.cc -libmysqld/ha_heap.cc -libmysqld/ha_innobase.cc -libmysqld/ha_innodb.cc -libmysqld/ha_maria.cc -libmysqld/ha_myisam.cc -libmysqld/ha_myisammrg.cc -libmysqld/ha_ndbcluster.cc -libmysqld/ha_ndbcluster_binlog.cc -libmysqld/ha_ndbcluster_cond.cc -libmysqld/ha_partition.cc -libmysqld/ha_tina.cc -libmysqld/handler.cc -libmysqld/handlerton.cc -libmysqld/hash_filo.cc -libmysqld/hostname.cc -libmysqld/init.cc -libmysqld/item.cc -libmysqld/item_buff.cc -libmysqld/item_cmpfunc.cc -libmysqld/item_create.cc -libmysqld/item_func.cc -libmysqld/item_geofunc.cc -libmysqld/item_row.cc -libmysqld/item_strfunc.cc -libmysqld/item_subselect.cc -libmysqld/item_sum.cc -libmysqld/item_timefunc.cc -libmysqld/item_uniq.cc -libmysqld/key.cc -libmysqld/keycaches.cc -libmysqld/lex_hash.h -libmysqld/lib_sql.cpp -libmysqld/libmysql.c -libmysqld/link_sources -libmysqld/lock.cc -libmysqld/log.cc -libmysqld/log_event.cc -libmysqld/log_event_old.cc -libmysqld/md5.c -libmysqld/mdl.cc -libmysqld/merge_archives_mysqlserver.cmake -libmysqld/message.h -libmysqld/message.rc -libmysqld/mf_iocache.cc -libmysqld/mini_client.cc -libmysqld/multi_range_read.cc -libmysqld/my_decimal.cc -libmysqld/my_time.c -libmysqld/my_user.c -libmysqld/mysqlserver_depends.c -libmysqld/myxt_xt.cc -libmysqld/net_pkg.cc -libmysqld/net_serv.cc -libmysqld/opt_ft.cc -libmysqld/opt_index_cond_pushdown.cc -libmysqld/opt_range.cc -libmysqld/opt_subselect.cc -libmysqld/opt_sum.cc -libmysqld/opt_table_elimination.cc -libmysqld/pack.c -libmysqld/parse_file.cc -libmysqld/partition_info.cc -libmysqld/password.c -libmysqld/procedure.cc -libmysqld/protocol.cc -libmysqld/protocol_cursor.cc -libmysqld/records.cc -libmysqld/repl_failsafe.cc -libmysqld/rpl_filter.cc -libmysqld/rpl_handler.cc -libmysqld/rpl_injector.cc -libmysqld/rpl_record.cc -libmysqld/rpl_record_old.cc -libmysqld/rpl_utility.cc -libmysqld/scheduler.cc -libmysqld/set_var.cc -libmysqld/sha2.cc -libmysqld/simple-test -libmysqld/slave.cc -libmysqld/sp.cc -libmysqld/sp_cache.cc -libmysqld/sp_head.cc -libmysqld/sp_pcontext.cc -libmysqld/sp_rcontext.cc -libmysqld/spatial.cc -libmysqld/sql_acl.cc -libmysqld/sql_admin.cc -libmysqld/sql_alter.cc -libmysqld/sql_analyse.cc -libmysqld/sql_audit.cc -libmysqld/sql_base.cc -libmysqld/sql_builtin.cc -libmysqld/sql_cache.cc -libmysqld/sql_class.cc -libmysqld/sql_command -libmysqld/sql_connect.cc -libmysqld/sql_crypt.cc -libmysqld/sql_cursor.cc -libmysqld/sql_cursor.h -libmysqld/sql_db.cc -libmysqld/sql_delete.cc -libmysqld/sql_derived.cc -libmysqld/sql_do.cc -libmysqld/sql_error.cc -libmysqld/sql_expression_cache.cc -libmysqld/sql_handler.cc -libmysqld/sql_help.cc -libmysqld/sql_insert.cc -libmysqld/sql_join_cache.cc -libmysqld/sql_lex.cc -libmysqld/sql_list.cc -libmysqld/sql_load.cc -libmysqld/sql_locale.cc -libmysqld/sql_manager.cc -libmysqld/sql_map.cc -libmysqld/sql_olap.cc -libmysqld/sql_parse.cc -libmysqld/sql_partition.cc -libmysqld/sql_partition_admin.cc -libmysqld/sql_plugin.cc -libmysqld/sql_prepare.cc -libmysqld/sql_profile.cc -libmysqld/sql_reload.cc -libmysqld/sql_rename.cc -libmysqld/sql_repl.cc -libmysqld/sql_select.cc -libmysqld/sql_servers.cc -libmysqld/sql_show.cc -libmysqld/sql_signal.cc -libmysqld/sql_state.c -libmysqld/sql_string.cc -libmysqld/sql_table.cc -libmysqld/sql_tablespace.cc -libmysqld/sql_test.cc -libmysqld/sql_time.cc -libmysqld/sql_trigger.cc -libmysqld/sql_truncate.cc -libmysqld/sql_udf.cc -libmysqld/sql_union.cc -libmysqld/sql_unions.cc -libmysqld/sql_update.cc -libmysqld/sql_view.cc -libmysqld/sql_yacc.cc -libmysqld/sql_yacc.cpp -libmysqld/sql_yacc.h -libmysqld/stacktrace.c -libmysqld/strfunc.cc -libmysqld/sys_vars.cc -libmysqld/table.cc -libmysqld/thr_malloc.cc -libmysqld/transaction.cc -libmysqld/tztime.cc -libmysqld/uniques.cc -libmysqld/unireg.cc -libmysqld/discover_xt.cc -libmysqld/myxt_xt.cc -libmysqld/rpl_reporting.cc -libmysqld/rpl_utility.cc -libmysqltest/*.ds? -libmysqltest/mytest.c -libtool -linked_client_sources -linked_include_sources -linked_libmysql_r_sources -linked_libmysql_sources -linked_libmysqld_sources -linked_libmysqldex_sources -linked_server_sources -linked_tools_sources -locked -ltmain.sh -ma_test_recovery.output -make_dist.cmake -Makefile -Makefile.in -MinSizeRel/ -MIRRORS -missing -mkinstalldirs -my_print_defaults/*.ds? -myisam_ftdump/*.ds? -myisamchk/*.ds? -myisamlog/*.ds? -myisammrg/*.ds? -myisampack/*.ds? -mysql-test/*.ds? -mysql-test/install_test_db -mysql-test/lib/init_db.sql -mysql-test/lib/My/SafeProcess/my_safe_process -mysql-test/mtr -mysql-test/mysql-test-run -mysql-test/r/*.err -mysql-test/r/*.log -mysql-test/r/*.out -mysql-test/r/*.reject -mysql-test/var -mysql.kdevprj -mysql.proj -mysqlbinlog/*.ds? -mysqlcheck/*.ds? -mysqld.S -mysqld.sym -mysqldemb/*.ds? -mysqlserver/*.ds? -mysys/*.ds? -mysys/charset2html -mysys/getopt.c -mysys/getopt1.c -mysys/main.cc -mysys/my_new.cpp -mysys/raid.cpp -mysys/ste5KbMa -mysys/test_atomic -mysys/test_bitmap -mysys/test_charset -mysys/test_dir -mysys/test_gethwaddr -mysys/test_io_cache -mysys/test_thr_alarm -mysys/test_thr_lock -mysys/test_vsnprintf -mysys/testhash -mysys/thr_lock -perror/*.ds? -regex/*.ds? -regex/re -Release/ -RelWithDebInfo/ -replace/*.ds? -scripts/comp_sql -scripts/convert-debug-for-diff -scripts/fill_func_tables -scripts/fill_func_tables.sql -scripts/fill_help_tables -scripts/fill_help_tables.sql -scripts/make_binary_distribution -scripts/make_sharedlib_distribution -scripts/make_win_binary_distribution -scripts/make_win_src_distribution -scripts/make_win_src_distribution_old -scripts/msql2mysql -scripts/mysql_config -scripts/mysql_convert_table_format -scripts/mysql_create_system_tables -scripts/mysql_explain_log -scripts/mysql_find_rows -scripts/mysql_fix_extensions -scripts/mysql_fix_privilege_tables -scripts/mysql_fix_privilege_tables.sql -scripts/mysql_fix_privilege_tables.sql.rule -scripts/mysql_fix_privilege_tables_sql.c -scripts/mysql_fix_privilege_tables_sql.c.rule -scripts/mysql_install_db -scripts/mysql_secure_installation -scripts/mysql_setpermission -scripts/mysql_tableinfo -scripts/mysql_upgrade -scripts/mysql_upgrade_shell -scripts/mysql_zap -scripts/mysqlaccess -scripts/mysqlbug -scripts/mysqld_multi -scripts/mysqld_safe -scripts/mysqldumpslow -scripts/mysqlhotcopy -scripts/mysqlhotcopy.sh.rej -scripts/safe_mysqld -select_test -sql-bench/bench-count-distinct -sql-bench/bench-init.pl -sql-bench/compare-results -sql-bench/compare-results-all -sql-bench/copy-db -sql-bench/crash-me -sql-bench/gif/* -sql-bench/graph-compare-results -sql-bench/innotest1 -sql-bench/innotest1a -sql-bench/innotest1b -sql-bench/innotest2 -sql-bench/innotest2a -sql-bench/innotest2b -sql-bench/output/* -sql-bench/Results-linux/ATIS-mysql_bdb-Linux_2.2.14_my_SMP_i686 -sql-bench/run-all-tests -sql-bench/server-cfg -sql-bench/template.html -sql-bench/test-alter-table -sql-bench/test-ATIS -sql-bench/test-big-tables -sql-bench/test-connect -sql-bench/test-create -sql-bench/test-insert -sql-bench/test-select -sql-bench/test-transactions -sql-bench/test-wisconsin -sql/*.cpp -sql/*.ds? -sql/client.c -sql/client_plugin.c -sql/Doxyfile -sql/gen_lex_hash -sql/lex_hash.h -sql/lex_hash.h.rule -sql/link_sources -sql/message.h -sql/message.mc -sql/message.rc -sql/mini_client_errors.c -sql/my_time.c -sql/my_user.c -sql/mysql_tzinfo_to_sql -sql/mysql_tzinfo_to_sql.cc -sql/mysql_tzinfo_to_sql_tztime.cc -sql/mysqlbinlog -sql/mysqld -sql/mysqld-purecov -sql/mysqld-purify -sql/mysqld-quantify -sql/new.cc -sql/pack.c -sql/share/*/*.sys -sql/sql_builtin.cc -sql/sql_yacc.cc -sql/sql_yacc.h -sql/sql_yacc.h.rule -sql/sql_yacc.output -sql_error.cc -sql_prepare.cc -sql_priv.h -stamp-h -storage/archive/archive_reader -storage/archive/archive_test -storage/heap/hp_test1 -storage/heap/hp_test2 -storage/maria/*.MAD -storage/maria/*.MAI -storage/maria/aria.log -storage/maria/aria_chk -storage/maria/aria_control -storage/maria/aria_dump_log -storage/maria/aria_ftdump -storage/maria/aria_log -storage/maria/aria_log.* -storage/maria/aria_pack -storage/maria/aria_read_log -storage/maria/ma_rt_test -storage/maria/ma_sp_test -storage/maria/ma_test1 -storage/maria/ma_test2 -storage/maria/ma_test3 -storage/maria/ma_test_all -storage/maria/tmp -storage/maria/unittest/ma_pagecache_consist_1k-t-big -storage/maria/unittest/ma_pagecache_consist_1kHC-t-big -storage/maria/unittest/ma_pagecache_consist_1kRD-t-big -storage/maria/unittest/ma_pagecache_consist_1kWR-t-big -storage/maria/unittest/ma_pagecache_consist_64k-t-big -storage/maria/unittest/ma_pagecache_consist_64kHC-t-big -storage/maria/unittest/ma_pagecache_consist_64kRD-t-big -storage/maria/unittest/ma_pagecache_consist_64kWR-t-big -storage/maria/unittest/ma_pagecache_single_64k-t-big -storage/maria/unittest/ma_test_loghandler_long-t-big -storage/maria/unittest/maria_control -storage/maria/unittest/mf_pagecache_consist_1k-t-big -storage/maria/unittest/mf_pagecache_consist_1kHC-t-big -storage/maria/unittest/mf_pagecache_consist_1kRD-t-big -storage/maria/unittest/mf_pagecache_consist_1kWR-t-big -storage/maria/unittest/mf_pagecache_consist_64k-t-big -storage/maria/unittest/mf_pagecache_consist_64kHC-t-big -storage/maria/unittest/mf_pagecache_consist_64kRD-t-big -storage/maria/unittest/mf_pagecache_consist_64kWR-t-big -storage/maria/unittest/mf_pagecache_single_64k-t-big -storage/maria/unittest/page_cache_test_file_1 -storage/maria/unittest/pagecache_debug.log -storage/maria/unittest/tmp -storage/myisam/FT1.MYD -storage/myisam/FT1.MYI -storage/myisam/ft_dump -storage/myisam/ft_eval -storage/myisam/ft_test1 -storage/myisam/ftbench/data -storage/myisam/ftbench/t -storage/myisam/ftbench/var/* -storage/myisam/mi_test1 -storage/myisam/mi_test2 -storage/myisam/mi_test3 -storage/myisam/mi_test_all -storage/myisam/myisam.log -storage/myisam/myisam_ftdump -storage/myisam/myisamchk -storage/myisam/myisamlog -storage/myisam/myisampack -storage/myisam/rt_test -storage/myisam/rt_test.MYD -storage/myisam/rt_test.MYI -storage/myisam/sp_test -storage/myisam/test1.MYD -storage/myisam/test1.MYI -storage/myisam/test2.MYD -storage/myisam/test2.MYI -storage/ndb/bin/acid -storage/ndb/bin/async-lmc-bench-l-p10.sh -storage/ndb/bin/async-lmc-bench-l.sh -storage/ndb/bin/async-lmc-bench-p10.sh -storage/ndb/bin/async-lmc-bench.sh -storage/ndb/bin/atrt -storage/ndb/bin/atrt-analyze-result.sh -storage/ndb/bin/atrt-clear-result.sh -storage/ndb/bin/atrt-gather-result.sh -storage/ndb/bin/atrt-setup.sh -storage/ndb/bin/bankCreator -storage/ndb/bin/bankMakeGL -storage/ndb/bin/bankSumAccounts -storage/ndb/bin/bankTimer -storage/ndb/bin/bankTransactionMaker -storage/ndb/bin/bankValidateAllGLs -storage/ndb/bin/basicTransporterTest -storage/ndb/bin/benchronja -storage/ndb/bin/bulk_copy -storage/ndb/bin/copy_tab -storage/ndb/bin/create_all_tabs -storage/ndb/bin/create_index -storage/ndb/bin/create_tab -storage/ndb/bin/DbAsyncGenerator -storage/ndb/bin/DbCreate -storage/ndb/bin/delete_all -storage/ndb/bin/desc -storage/ndb/bin/drop_all_tabs -storage/ndb/bin/drop_index -storage/ndb/bin/drop_tab -storage/ndb/bin/flexAsynch -storage/ndb/bin/flexBench -storage/ndb/bin/flexHammer -storage/ndb/bin/flexScan -storage/ndb/bin/flexTT -storage/ndb/bin/hugoCalculator -storage/ndb/bin/hugoFill -storage/ndb/bin/hugoLoad -storage/ndb/bin/hugoLockRecords -storage/ndb/bin/hugoPkDelete -storage/ndb/bin/hugoPkRead -storage/ndb/bin/hugoPkReadRecord -storage/ndb/bin/hugoPkUpdate -storage/ndb/bin/hugoScanRead -storage/ndb/bin/hugoScanUpdate -storage/ndb/bin/index -storage/ndb/bin/index2 -storage/ndb/bin/initronja -storage/ndb/bin/interpreterInTup -storage/ndb/bin/list_tables -storage/ndb/bin/make-config.sh -storage/ndb/bin/mgmtclient -storage/ndb/bin/mgmtsrvr -storage/ndb/bin/mkconfig -storage/ndb/bin/ndb -storage/ndb/bin/ndb_cpcc -storage/ndb/bin/ndb_cpcd -storage/ndb/bin/ndb_rep -storage/ndb/bin/ndbsql -storage/ndb/bin/newton_basic -storage/ndb/bin/newton_br -storage/ndb/bin/newton_pb -storage/ndb/bin/newton_perf -storage/ndb/bin/perfTransporterTest -storage/ndb/bin/printConfig -storage/ndb/bin/printSchemafile -storage/ndb/bin/printSysfile -storage/ndb/bin/redoLogFileReader -storage/ndb/bin/restart -storage/ndb/bin/restarter -storage/ndb/bin/restarter2 -storage/ndb/bin/restarts -storage/ndb/bin/restore -storage/ndb/bin/select_all -storage/ndb/bin/select_count -storage/ndb/bin/telco -storage/ndb/bin/test_cpcd -storage/ndb/bin/test_event -storage/ndb/bin/testBackup -storage/ndb/bin/testBank -storage/ndb/bin/testBasic -storage/ndb/bin/testBasicAsynch -storage/ndb/bin/testCopy -storage/ndb/bin/testDataBuffers -storage/ndb/bin/testDict -storage/ndb/bin/testGrep -storage/ndb/bin/testGrepVerify -storage/ndb/bin/testIndex -storage/ndb/bin/testInterpreter -storage/ndb/bin/testKernelDataBuffer -storage/ndb/bin/testLongSig -storage/ndb/bin/testMgm -storage/ndb/bin/testMgmapi -storage/ndb/bin/testNdbApi -storage/ndb/bin/testNodeRestart -storage/ndb/bin/testOdbcDriver -storage/ndb/bin/testOIBasic -storage/ndb/bin/testOperations -storage/ndb/bin/testRestartGci -storage/ndb/bin/testScan -storage/ndb/bin/testScanInterpreter -storage/ndb/bin/testSimplePropertiesSection -storage/ndb/bin/testSystemRestart -storage/ndb/bin/testTimeout -storage/ndb/bin/testTransactions -storage/ndb/bin/verify_index -storage/ndb/bin/waiter -storage/ndb/config/config.mk -storage/ndb/examples/ndbapi_example1/ndbapi_example1 -storage/ndb/examples/ndbapi_example2/ndbapi_example2 -storage/ndb/examples/ndbapi_example3/ndbapi_example3 -storage/ndb/examples/ndbapi_example5/ndbapi_example5 -storage/ndb/examples/select_all/select_all -storage/ndb/include/ndb_global.h -storage/ndb/include/ndb_types.h -storage/ndb/include/ndb_version.h -storage/ndb/lib/libMGM_API.so -storage/ndb/lib/libNDB_API.so -storage/ndb/lib/libNDB_ODBC.so -storage/ndb/lib/libndbclient.so -storage/ndb/lib/libndbclient_extra.so -storage/ndb/lib/libNEWTON_API.so -storage/ndb/lib/libNEWTON_BASICTEST_COMMON.so -storage/ndb/lib/libREP_API.so -storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent -storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2 -storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async -storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1 -storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event -storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries -storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan -storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple -storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual -storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index -storage/ndb/src/common/mgmcommon/printConfig/*.d -storage/ndb/src/common/util/testBitmask.cpp -storage/ndb/src/cw/cpcd/ndb_cpcd -storage/ndb/src/dummy.cpp -storage/ndb/src/kernel/blocks/backup/ndb_print_backup_file -storage/ndb/src/kernel/blocks/backup/restore/ndb_restore -storage/ndb/src/kernel/blocks/dbdict/ndb_print_schema_file -storage/ndb/src/kernel/blocks/dbdih/ndb_print_sys_file -storage/ndb/src/kernel/blocks/dbtup/test_varpage -storage/ndb/src/kernel/blocks/ndb_print_file -storage/ndb/src/kernel/ndbd -storage/ndb/src/libndb.ver -storage/ndb/src/mgmclient/ndb_mgm -storage/ndb/src/mgmclient/test_cpcd/*.d -storage/ndb/src/mgmsrv/ndb_mgmd -storage/ndb/src/ndbapi/ndberror_check -storage/ndb/test/ndbapi/bank/bankCreator -storage/ndb/test/ndbapi/bank/bankMakeGL -storage/ndb/test/ndbapi/bank/bankSumAccounts -storage/ndb/test/ndbapi/bank/bankTimer -storage/ndb/test/ndbapi/bank/bankTransactionMaker -storage/ndb/test/ndbapi/bank/bankValidateAllGLs -storage/ndb/test/ndbapi/bank/testBank -storage/ndb/test/ndbapi/create_all_tabs -storage/ndb/test/ndbapi/create_tab -storage/ndb/test/ndbapi/DbAsyncGenerator -storage/ndb/test/ndbapi/DbCreate -storage/ndb/test/ndbapi/drop_all_tabs -storage/ndb/test/ndbapi/flexAsynch -storage/ndb/test/ndbapi/flexBench -storage/ndb/test/ndbapi/flexHammer -storage/ndb/test/ndbapi/flexTT -storage/ndb/test/ndbapi/ndbapi_slow_select -storage/ndb/test/ndbapi/test_event -storage/ndb/test/ndbapi/test_event_merge -storage/ndb/test/ndbapi/testBackup -storage/ndb/test/ndbapi/testBasic -storage/ndb/test/ndbapi/testBasicAsynch -storage/ndb/test/ndbapi/testBitfield -storage/ndb/test/ndbapi/testBlobs -storage/ndb/test/ndbapi/testDataBuffers -storage/ndb/test/ndbapi/testDeadlock -storage/ndb/test/ndbapi/testDict -storage/ndb/test/ndbapi/testIndex -storage/ndb/test/ndbapi/testIndexStat -storage/ndb/test/ndbapi/testInterpreter -storage/ndb/test/ndbapi/testLcp -storage/ndb/test/ndbapi/testMgm -storage/ndb/test/ndbapi/testNdbApi -storage/ndb/test/ndbapi/testNodeRestart -storage/ndb/test/ndbapi/testOIBasic -storage/ndb/test/ndbapi/testOperations -storage/ndb/test/ndbapi/testPartitioning -storage/ndb/test/ndbapi/testReadPerf -storage/ndb/test/ndbapi/testRestartGci -storage/ndb/test/ndbapi/testScan -storage/ndb/test/ndbapi/testScanInterpreter -storage/ndb/test/ndbapi/testScanPerf -storage/ndb/test/ndbapi/testSRBank -storage/ndb/test/ndbapi/testSystemRestart -storage/ndb/test/ndbapi/testTimeout -storage/ndb/test/ndbapi/testTransactions -storage/ndb/test/run-test/atrt -storage/ndb/test/tools/copy_tab -storage/ndb/test/tools/create_index -storage/ndb/test/tools/hugoCalculator -storage/ndb/test/tools/hugoFill -storage/ndb/test/tools/hugoLoad -storage/ndb/test/tools/hugoLockRecords -storage/ndb/test/tools/hugoPkDelete -storage/ndb/test/tools/hugoPkRead -storage/ndb/test/tools/hugoPkReadRecord -storage/ndb/test/tools/hugoPkUpdate -storage/ndb/test/tools/hugoScanRead -storage/ndb/test/tools/hugoScanUpdate -storage/ndb/test/tools/listen_event -storage/ndb/test/tools/ndb_cpcc -storage/ndb/test/tools/rep_latency -storage/ndb/test/tools/restart -storage/ndb/test/tools/verify_index -storage/ndb/tools/ndb_config -storage/ndb/tools/ndb_delete_all -storage/ndb/tools/ndb_desc -storage/ndb/tools/ndb_drop_index -storage/ndb/tools/ndb_drop_table -storage/ndb/tools/ndb_restore -storage/ndb/tools/ndb_select_all -storage/ndb/tools/ndb_select_count -storage/ndb/tools/ndb_show_tables -storage/ndb/tools/ndb_test_platform -storage/ndb/tools/ndb_waiter -storage/xtradb/configure.lineno -storage/xtradb/conftest.s1 -storage/xtradb/conftest.subs -storage/xtradb/ib_config.h -storage/xtradb/ib_config.h.in -storage/xtradb/mkinstalldirs -storage/xtradb/stamp-h1 -strings/*.ds? -strings/conf_to_src -strings/ctype_autoconf.c -strings/ctype_extra_sources.c -strings/str_test -strings/test_decimal -support-files/*.ini -support-files/binary-configure -support-files/MacOSX/Description.plist -support-files/MacOSX/Info.plist -support-files/MacOSX/postflight -support-files/MacOSX/postinstall -support-files/MacOSX/preflight -support-files/MacOSX/preinstall -support-files/MacOSX/ReadMe.txt -support-files/MacOSX/StartupParameters.plist -support-files/my-huge.cnf -support-files/my-innodb-heavy-4G.cnf -support-files/my-large.cnf -support-files/my-medium.cnf -support-files/my-small.cnf -support-files/mysql-3.23.25-beta.spec -support-files/mysql-3.23.26-beta.spec -support-files/mysql-3.23.27-beta.spec -support-files/mysql-3.23.28-gamma.spec -support-files/mysql-3.23.29-gamma.spec -support-files/mysql-log-rotate -support-files/mysql.server -support-files/mysql.spec -support-files/mysqld_multi.server -support-files/ndb-config-2-node.ini -TAGS -test/ndbapi/bank/bankCreator -test/ndbapi/bank/bankMakeGL -test/ndbapi/bank/bankSumAccounts -test/ndbapi/bank/bankTimer -test/ndbapi/bank/bankTransactionMaker -test/ndbapi/bank/bankValidateAllGLs -test/ndbapi/bank/testBank -test/ndbapi/create_all_tabs -test/ndbapi/create_tab -test/ndbapi/drop_all_tabs -test/ndbapi/flexAsynch -test/ndbapi/flexBench -test/ndbapi/flexHammer -test/ndbapi/flexTT -test/ndbapi/test_event -test/ndbapi/testBackup -test/ndbapi/testBasic -test/ndbapi/testBasicAsynch -test/ndbapi/testBlobs -test/ndbapi/testDataBuffers -test/ndbapi/testDeadlock -test/ndbapi/testDict -test/ndbapi/testIndex -test/ndbapi/testMgm -test/ndbapi/testNdbApi -test/ndbapi/testNodeRestart -test/ndbapi/testOIBasic -test/ndbapi/testOperations -test/ndbapi/testRestartGci -test/ndbapi/testScan -test/ndbapi/testScanInterpreter -test/ndbapi/testScanPerf -test/ndbapi/testSystemRestart -test/ndbapi/testTimeout -test/ndbapi/testTransactions -test/run-test/atrt -test/tools/copy_tab -test/tools/create_index -test/tools/hugoCalculator -test/tools/hugoFill -test/tools/hugoLoad -test/tools/hugoLockRecords -test/tools/hugoPkDelete -test/tools/hugoPkRead -test/tools/hugoPkReadRecord -test/tools/hugoPkUpdate -test/tools/hugoScanRead -test/tools/hugoScanUpdate -test/tools/ndb_cpcc -test/tools/restart -test/tools/verify_index -test1/* -test?.MA? -test_xml -tests/*.ds? -tests/bug25714 -tests/client_test -tests/connect_test -tests/mysql_client_test -tests/async_queries -thr_insert_test/* -thr_test/* -thread_test -tmp -tmp/* -tools/my_vsnprintf.c -tools/mysqlmanager -tools/mysqlmngd -tools/mysqltestmanager -tools/mysys_priv.h -unittest/examples/*.t -unittest/maria_control -unittest/mysys/*.t -unittest/mysys/mf_pagecache_consist_1k-t-big -unittest/mysys/mf_pagecache_consist_1kHC-t-big -unittest/mysys/mf_pagecache_consist_1kRD-t-big -unittest/mysys/mf_pagecache_consist_1kWR-t-big -unittest/mysys/mf_pagecache_consist_64k-t-big -unittest/mysys/mf_pagecache_consist_64kHC-t-big -unittest/mysys/mf_pagecache_consist_64kRD-t-big -unittest/mysys/mf_pagecache_consist_64kWR-t-big -unittest/mysys/mf_pagecache_single_64k-t-big -unittest/mytap/t/*.t -unittest/page_cache_test_file_1 -unittest/pagecache_debug.log -unittest/tmp/* -unittest/unit -vi.h -vio/*.ds? -vio/test-ssl -vio/test-sslclient -vio/test-sslserver -vio/viotest-ssl -vio/viotest-sslconnect.cpp -vio/viotest.cpp -ylwrap -zlib/*.ds? -sql-bench/test-table-elimination -sql/share/bulgarian -sql/share/czech -sql/share/danish -sql/share/dutch -sql/share/english -sql/share/estonian -sql/share/french -sql/share/german -sql/share/greek -sql/share/hungarian -sql/share/italian -sql/share/japanese -sql/share/japanese-sjis -sql/share/korean -sql/share/norwegian -sql/share/norwegian-ny -sql/share/polish -sql/share/portuguese -sql/share/romanian -sql/share/russian -sql/share/serbian -sql/share/slovak -sql/share/spanish -sql/share/swedish -sql/share/ukrainian -CPackConfig.cmake -CPackSourceConfig.cmake -Docs/INFO_BIN -Docs/INFO_SRC -tags -Testing -info_macros.cmake -VERSION.dep -configure -libmysqld/examples/mysqltest.cc -extra/libevent/event-config.h -libmysqld/opt_table_elimination.cc -libmysqld/ha_federatedx.cc -libmysqld/multi_range_read.cc -libmysqld/opt_index_cond_pushdown.cc -libmysqld/opt_subselect.cc -libmysqld/sql_join_cache.cc -client/rpl_filter.cc -client/rpl_filter.h -client/sql_list.cc -client/sql_list.h -libmysqld/create_options.cc -libmysqld/sql_expression_cache.cc -mysql-test/mtr_command -scripts/convert-debug-for-diff -client/strings_def.h -libmysql/strings_def.h -libmysql_r/strings_def.h -storage/maria/aria_log_control -scripts/mytop -include/*.h.tmp -cmd-line-utils/libedit/emacs.h -mysql-test/collections/default.release -support-files/plugins.files -client/mysql_plugin -*.resource.txt -plugin/handler_socket/perl-Net-HandlerSocket/HandlerSocket.c -plugin/handler_socket/perl-Net-HandlerSocket/blib -plugin/handler_socket/perl-Net-HandlerSocket/pm_to_blib -plugin/handler_socket/perl-Net-HandlerSocket/HandlerSocket.bs -plugin/handler_socket/perl-Net-HandlerSocket/Makefile.PL -libmysqld/libmysqld_exports_file.cc -libmysqld/gcalc_slicescan.cc -libmysqld/gcalc_tools.cc -libmysqld/my_apc.cc -sql/share/errmsg.sys -sql/share/mysql -install_manifest.txt -sql/db.opt -./_CPack_Packages -./install_manifest_*.txt -typescript -storage/perfschema/gen_pfs_lex_token -storage/perfschema/pfs_lex_token.h -*.bak -*.OLD -mysql-test/collections/default.release.done -sql/sql_yacc.hh -packaging/solaris/postinstall-solaris -extra/jemalloc/jemalloc-* -extra/jemalloc/build -*.tdb -pcre/config.h -pcre/CTestCustom.ctest -pcre/pcre_grep_test.sh -pcre/pcre_scanner_unittest -pcre/pcre_stringpiece_unittest -pcre/pcre_test.sh -pcre/pcrecpp_unittest -pcre/pcregrep -pcre/pcretest -pcre/pcre.h -testNinput -teststderr -testtemp1 -testtemp2 -testtry -storage/tokudb/ft-index/CTestCustom.cmake -storage/tokudb/ft-index/DartConfiguration.tcl -storage/tokudb/ft-index/ctags-stamp -storage/tokudb/ft-index/valgrind.suppressions -storage/tokudb/ft-index/xz -storage/tokudb/ft-index/buildheader/db.h -storage/tokudb/ft-index/buildheader/make_tdb -storage/tokudb/ft-index/buildheader/runcat.sh -storage/tokudb/ft-index/ft/ftverify -storage/tokudb/ft-index/ft/log_code.cc -storage/tokudb/ft-index/ft/log_header.h -storage/tokudb/ft-index/ft/log_print.cc -storage/tokudb/ft-index/ft/logformat -storage/tokudb/ft-index/ft/tdb-recover -storage/tokudb/ft-index/ft/tdb_logprint -storage/tokudb/ft-index/ft/tokuftdump -storage/tokudb/ft-index/ft/tests/benchmark-test -storage/tokudb/ft-index/ft/tests/block_allocator_test -storage/tokudb/ft-index/ft/tests/bnc-insert-benchmark -storage/tokudb/ft-index/ft/tests/cachetable-4357 -storage/tokudb/ft-index/ft/tests/cachetable-4365 -storage/tokudb/ft-index/ft/tests/cachetable-5097 -storage/tokudb/ft-index/ft/tests/cachetable-5978 -storage/tokudb/ft-index/ft/tests/cachetable-5978-2 -storage/tokudb/ft-index/ft/tests/cachetable-all-write -storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pending -storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-pinned-nodes -storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-prefetched-nodes -storage/tokudb/ft-index/ft/tests/cachetable-checkpoint-test -storage/tokudb/ft-index/ft/tests/cachetable-checkpointer-class -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-checkpoint2 -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-attrs-accumulate -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-empty-cachetable -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-everything-pinned -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-nothing-needs-flushing -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-same-fullhash -storage/tokudb/ft-index/ft/tests/cachetable-cleaner-thread-simple -storage/tokudb/ft-index/ft/tests/cachetable-clock-all-pinned -storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction -storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction2 -storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction3 -storage/tokudb/ft-index/ft/tests/cachetable-clock-eviction4 -storage/tokudb/ft-index/ft/tests/cachetable-clone-checkpoint -storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch -storage/tokudb/ft-index/ft/tests/cachetable-clone-partial-fetch-pinned-node -storage/tokudb/ft-index/ft/tests/cachetable-clone-pin-nonblocking -storage/tokudb/ft-index/ft/tests/cachetable-clone-unpin-remove -storage/tokudb/ft-index/ft/tests/cachetable-count-pinned-test -storage/tokudb/ft-index/ft/tests/cachetable-debug-test -storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test -storage/tokudb/ft-index/ft/tests/cachetable-eviction-close-test2 -storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test -storage/tokudb/ft-index/ft/tests/cachetable-eviction-getandpin-test2 -storage/tokudb/ft-index/ft/tests/cachetable-evictor-class -storage/tokudb/ft-index/ft/tests/cachetable-fd-test -storage/tokudb/ft-index/ft/tests/cachetable-fetch-inducing-evictor -storage/tokudb/ft-index/ft/tests/cachetable-flush-during-cleaner -storage/tokudb/ft-index/ft/tests/cachetable-flush-test -storage/tokudb/ft-index/ft/tests/cachetable-getandpin-test -storage/tokudb/ft-index/ft/tests/cachetable-kibbutz_and_flush_cachefile -storage/tokudb/ft-index/ft/tests/cachetable-partial-fetch -storage/tokudb/ft-index/ft/tests/cachetable-pin-checkpoint -storage/tokudb/ft-index/ft/tests/cachetable-pin-nonblocking-checkpoint-clean -storage/tokudb/ft-index/ft/tests/cachetable-prefetch-checkpoint-test -storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-leak-test -storage/tokudb/ft-index/ft/tests/cachetable-prefetch-close-test -storage/tokudb/ft-index/ft/tests/cachetable-prefetch-flowcontrol-test -storage/tokudb/ft-index/ft/tests/cachetable-prefetch-getandpin-test -storage/tokudb/ft-index/ft/tests/cachetable-prefetch-maybegetandpin-test -storage/tokudb/ft-index/ft/tests/cachetable-prefetch2-test -storage/tokudb/ft-index/ft/tests/cachetable-put-checkpoint -storage/tokudb/ft-index/ft/tests/cachetable-put-test -storage/tokudb/ft-index/ft/tests/cachetable-rwlock-test -storage/tokudb/ft-index/ft/tests/cachetable-simple-clone -storage/tokudb/ft-index/ft/tests/cachetable-simple-clone2 -storage/tokudb/ft-index/ft/tests/cachetable-simple-maybe-get-pin -storage/tokudb/ft-index/ft/tests/cachetable-simple-pin -storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-cheap -storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-dep-nodes -storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking -storage/tokudb/ft-index/ft/tests/cachetable-simple-pin-nonblocking-cheap -storage/tokudb/ft-index/ft/tests/cachetable-simple-put-dep-nodes -storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin -storage/tokudb/ft-index/ft/tests/cachetable-simple-read-pin-nonblocking -storage/tokudb/ft-index/ft/tests/cachetable-simple-unpin-remove-checkpoint -storage/tokudb/ft-index/ft/tests/cachetable-simple-verify -storage/tokudb/ft-index/ft/tests/cachetable-test -storage/tokudb/ft-index/ft/tests/cachetable-unpin-and-remove-test -storage/tokudb/ft-index/ft/tests/cachetable-unpin-remove-and-checkpoint -storage/tokudb/ft-index/ft/tests/cachetable-unpin-test -storage/tokudb/ft-index/ft/tests/cachetable-writer-thread-limit -storage/tokudb/ft-index/ft/tests/comparator-test -storage/tokudb/ft-index/ft/tests/compress-test -storage/tokudb/ft-index/ft/tests/dbufio-test -storage/tokudb/ft-index/ft/tests/dbufio-test-destroy -storage/tokudb/ft-index/ft/tests/fifo-test -storage/tokudb/ft-index/ft/tests/ft-bfe-query -storage/tokudb/ft-index/ft/tests/ft-clock-test -storage/tokudb/ft-index/ft/tests/ft-serialize-benchmark -storage/tokudb/ft-index/ft/tests/ft-serialize-sub-block-test -storage/tokudb/ft-index/ft/tests/ft-serialize-test -storage/tokudb/ft-index/ft/tests/ft-test -storage/tokudb/ft-index/ft/tests/ft-test-cursor -storage/tokudb/ft-index/ft/tests/ft-test-cursor-2 -storage/tokudb/ft-index/ft/tests/ft-test-header -storage/tokudb/ft-index/ft/tests/ft-test0 -storage/tokudb/ft-index/ft/tests/ft-test1 -storage/tokudb/ft-index/ft/tests/ft-test2 -storage/tokudb/ft-index/ft/tests/ft-test3 -storage/tokudb/ft-index/ft/tests/ft-test4 -storage/tokudb/ft-index/ft/tests/ft-test5 -storage/tokudb/ft-index/ft/tests/ftloader-test -storage/tokudb/ft-index/ft/tests/ftloader-test-bad-generate -storage/tokudb/ft-index/ft/tests/ftloader-test-extractor -storage/tokudb/ft-index/ft/tests/ftloader-test-extractor-errors -storage/tokudb/ft-index/ft/tests/ftloader-test-merge-files-dbufio -storage/tokudb/ft-index/ft/tests/ftloader-test-open -storage/tokudb/ft-index/ft/tests/ftloader-test-vm -storage/tokudb/ft-index/ft/tests/ftloader-test-writer -storage/tokudb/ft-index/ft/tests/ftloader-test-writer-errors -storage/tokudb/ft-index/ft/tests/is_empty -storage/tokudb/ft-index/ft/tests/keyrange -storage/tokudb/ft-index/ft/tests/keytest -storage/tokudb/ft-index/ft/tests/le-cursor-provdel -storage/tokudb/ft-index/ft/tests/le-cursor-right -storage/tokudb/ft-index/ft/tests/le-cursor-walk -storage/tokudb/ft-index/ft/tests/list-test -storage/tokudb/ft-index/ft/tests/log-test -storage/tokudb/ft-index/ft/tests/log-test-maybe-trim -storage/tokudb/ft-index/ft/tests/log-test2 -storage/tokudb/ft-index/ft/tests/log-test3 -storage/tokudb/ft-index/ft/tests/log-test4 -storage/tokudb/ft-index/ft/tests/log-test5 -storage/tokudb/ft-index/ft/tests/log-test6 -storage/tokudb/ft-index/ft/tests/log-test7 -storage/tokudb/ft-index/ft/tests/logcursor-bad-checksum -storage/tokudb/ft-index/ft/tests/logcursor-empty-logdir -storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile -storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-2 -storage/tokudb/ft-index/ft/tests/logcursor-empty-logfile-3 -storage/tokudb/ft-index/ft/tests/logcursor-print -storage/tokudb/ft-index/ft/tests/logcursor-timestamp -storage/tokudb/ft-index/ft/tests/logfilemgr-create-destroy -storage/tokudb/ft-index/ft/tests/logfilemgr-print -storage/tokudb/ft-index/ft/tests/make-tree -storage/tokudb/ft-index/ft/tests/minicron-test -storage/tokudb/ft-index/ft/tests/msnfilter -storage/tokudb/ft-index/ft/tests/omt-test -storage/tokudb/ft-index/ft/tests/orthopush-flush -storage/tokudb/ft-index/ft/tests/pqueue-test -storage/tokudb/ft-index/ft/tests/queue-test -storage/tokudb/ft-index/ft/tests/quicklz-test -storage/tokudb/ft-index/ft/tests/recovery-bad-last-entry -storage/tokudb/ft-index/ft/tests/recovery-cbegin -storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend -storage/tokudb/ft-index/ft/tests/recovery-cbegin-cend-hello -storage/tokudb/ft-index/ft/tests/recovery-cend-cbegin -storage/tokudb/ft-index/ft/tests/recovery-datadir-is-file -storage/tokudb/ft-index/ft/tests/recovery-empty -storage/tokudb/ft-index/ft/tests/recovery-fopen-missing-file -storage/tokudb/ft-index/ft/tests/recovery-hello -storage/tokudb/ft-index/ft/tests/recovery-lsn-error-during-forward-scan -storage/tokudb/ft-index/ft/tests/recovery-no-datadir -storage/tokudb/ft-index/ft/tests/recovery-no-log -storage/tokudb/ft-index/ft/tests/recovery-no-logdir -storage/tokudb/ft-index/ft/tests/recovery-test5123 -storage/tokudb/ft-index/ft/tests/shortcut -storage/tokudb/ft-index/ft/tests/subblock-test-checksum -storage/tokudb/ft-index/ft/tests/subblock-test-compression -storage/tokudb/ft-index/ft/tests/subblock-test-index -storage/tokudb/ft-index/ft/tests/subblock-test-size -storage/tokudb/ft-index/ft/tests/test-assert -storage/tokudb/ft-index/ft/tests/test-bjm -storage/tokudb/ft-index/ft/tests/test-checkpoint-during-flush -storage/tokudb/ft-index/ft/tests/test-checkpoint-during-merge -storage/tokudb/ft-index/ft/tests/test-checkpoint-during-rebalance -storage/tokudb/ft-index/ft/tests/test-checkpoint-during-split -storage/tokudb/ft-index/ft/tests/test-del-inorder -storage/tokudb/ft-index/ft/tests/test-dirty-flushes-on-cleaner -storage/tokudb/ft-index/ft/tests/test-dump-ft -storage/tokudb/ft-index/ft/tests/test-flushes-on-cleaner -storage/tokudb/ft-index/ft/tests/test-ft-overflow -storage/tokudb/ft-index/ft/tests/test-hot-with-bounds -storage/tokudb/ft-index/ft/tests/test-inc-split -storage/tokudb/ft-index/ft/tests/test-leafentry-child-txn -storage/tokudb/ft-index/ft/tests/test-leafentry-nested -storage/tokudb/ft-index/ft/tests/test-merges-on-cleaner -storage/tokudb/ft-index/ft/tests/test-oldest-referenced-xid-flush -storage/tokudb/ft-index/ft/tests/test-pick-child-to-flush -storage/tokudb/ft-index/ft/tests/test-txn-child-manager -storage/tokudb/ft-index/ft/tests/test1308a -storage/tokudb/ft-index/ft/tests/test3681 -storage/tokudb/ft-index/ft/tests/test3856 -storage/tokudb/ft-index/ft/tests/test3884 -storage/tokudb/ft-index/ft/tests/test4115 -storage/tokudb/ft-index/ft/tests/test4244 -storage/tokudb/ft-index/ft/tests/test_block_allocator_merge -storage/tokudb/ft-index/ft/tests/test_logcursor -storage/tokudb/ft-index/ft/tests/test_oexcl -storage/tokudb/ft-index/ft/tests/test_toku_malloc_plain_free -storage/tokudb/ft-index/ft/tests/upgrade_test_simple -storage/tokudb/ft-index/ft/tests/verify-bad-msn -storage/tokudb/ft-index/ft/tests/verify-bad-pivots -storage/tokudb/ft-index/ft/tests/verify-dup-in-leaf -storage/tokudb/ft-index/ft/tests/verify-dup-pivots -storage/tokudb/ft-index/ft/tests/verify-misrouted-msgs -storage/tokudb/ft-index/ft/tests/verify-unsorted-leaf -storage/tokudb/ft-index/ft/tests/verify-unsorted-pivots -storage/tokudb/ft-index/ft/tests/x1764-test -storage/tokudb/ft-index/ft/tests/xid_lsn_independent -storage/tokudb/ft-index/ft/tests/ybt-test -storage/tokudb/ft-index/locktree/tests/concurrent_tree_create_destroy -storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_acquire_release -storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_remove -storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_insert_serial_large -storage/tokudb/ft-index/locktree/tests/concurrent_tree_lkr_remove_all -storage/tokudb/ft-index/locktree/tests/lock_request_create_set -storage/tokudb/ft-index/locktree/tests/lock_request_get_set_keys -storage/tokudb/ft-index/locktree/tests/lock_request_start_deadlock -storage/tokudb/ft-index/locktree/tests/lock_request_start_pending -storage/tokudb/ft-index/locktree/tests/locktree_conflicts -storage/tokudb/ft-index/locktree/tests/locktree_create_destroy -storage/tokudb/ft-index/locktree/tests/locktree_infinity -storage/tokudb/ft-index/locktree/tests/locktree_misc -storage/tokudb/ft-index/locktree/tests/locktree_overlapping_relock -storage/tokudb/ft-index/locktree/tests/locktree_simple_lock -storage/tokudb/ft-index/locktree/tests/locktree_single_txnid_optimization -storage/tokudb/ft-index/locktree/tests/manager_create_destroy -storage/tokudb/ft-index/locktree/tests/manager_locktree_map -storage/tokudb/ft-index/locktree/tests/manager_params -storage/tokudb/ft-index/locktree/tests/manager_reference_release_lt -storage/tokudb/ft-index/locktree/tests/manager_status -storage/tokudb/ft-index/locktree/tests/range_buffer_test -storage/tokudb/ft-index/locktree/tests/txnid_set_test -storage/tokudb/ft-index/locktree/tests/wfg_test -storage/tokudb/ft-index/portability/merge_archives_tokuportability_static.cmake -storage/tokudb/ft-index/portability/tokuportability_static_depends.cc -storage/tokudb/ft-index/portability/tests/test-active-cpus -storage/tokudb/ft-index/portability/tests/test-cache-line-boundary-fails -storage/tokudb/ft-index/portability/tests/test-cpu-freq -storage/tokudb/ft-index/portability/tests/test-cpu-freq-openlimit17 -storage/tokudb/ft-index/portability/tests/test-fair-rwlock -storage/tokudb/ft-index/portability/tests/test-filesystem-sizes -storage/tokudb/ft-index/portability/tests/test-flock -storage/tokudb/ft-index/portability/tests/test-fsync -storage/tokudb/ft-index/portability/tests/test-fsync-directory -storage/tokudb/ft-index/portability/tests/test-gettime -storage/tokudb/ft-index/portability/tests/test-gettimeofday -storage/tokudb/ft-index/portability/tests/test-hugepage -storage/tokudb/ft-index/portability/tests/test-max-data -storage/tokudb/ft-index/portability/tests/test-memory-status -storage/tokudb/ft-index/portability/tests/test-pagesize -storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rdlock -storage/tokudb/ft-index/portability/tests/test-pthread-rwlock-rwr -storage/tokudb/ft-index/portability/tests/test-pwrite4g -storage/tokudb/ft-index/portability/tests/test-snprintf -storage/tokudb/ft-index/portability/tests/test-stat -storage/tokudb/ft-index/portability/tests/test-toku-malloc -storage/tokudb/ft-index/portability/tests/test-xid -storage/tokudb/ft-index/portability/tests/try-assert-zero -storage/tokudb/ft-index/portability/tests/try-assert0 -storage/tokudb/ft-index/portability/tests/try-leak-lost -storage/tokudb/ft-index/portability/tests/try-leak-reachable -storage/tokudb/ft-index/portability/tests/try-uninit -storage/tokudb/ft-index/src/merge_archives_tokufractaltree_static.cmake -storage/tokudb/ft-index/src/tokufractaltree_static_depends.cc -storage/tokudb/ft-index/src/tests/recovery_fileops_unit_dir -storage/tokudb/ft-index/portability/toku_config.h -storage/tokudb/ft-index/util/tests/marked-omt-test -storage/tokudb/ft-index/util/tests/omt-tmpl-test -storage/tokudb/ft-index/util/tests/sort-tmpl-test -storage/tokudb/ft-index/util/tests/test-kibbutz -storage/tokudb/ft-index/util/tests/test-kibbutz2 -storage/tokudb/ft-index/util/tests/test-rwlock -storage/tokudb/ft-index/util/tests/test-rwlock-cheapness -storage/tokudb/ft-index/util/tests/test_circular_buffer -storage/tokudb/ft-index/util/tests/test_doubly_linked_list -storage/tokudb/ft-index/util/tests/test_partitioned_counter -storage/tokudb/ft-index/util/tests/test_partitioned_counter_5833 -storage/tokudb/ft-index/util/tests/threadpool-test -storage/tokudb/ft-index/util/tests/threadpool-testrunf -storage/tokudb/ft-index/tools/tokudb_dump -storage/tokudb/ft-index/tools/tokudb_gen -storage/tokudb/ft-index/tools/tokudb_load -storage/connect/connect.cnf -storage/cassandra/cassandra.cnf -libmysql/libmysql_versions.ld -scripts/mysql_config.pl -pcre/pcre_chartables.c -pcre/test*grep diff --git a/.gitignore b/.gitignore index 2c7e5c890ae..35c4c3ace07 100644 --- a/.gitignore +++ b/.gitignore @@ -205,7 +205,6 @@ support-files/mysql.10.0.11.spec support-files/mysql.server support-files/mysql.spec support-files/mysqld_multi.server -support-files/ndb-config-2-node.ini support-files/wsrep.cnf support-files/wsrep_notify tags diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index 860560767a7..a0fddfb2413 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -205,9 +205,8 @@ fi max_no_embedded_configs="$SSL_LIBRARY --with-plugins=max" max_no_qc_configs="$SSL_LIBRARY --with-plugins=max --without-query-cache" -max_no_ndb_configs="$SSL_LIBRARY --with-plugins=max-no-ndb --with-embedded-server --with-libevent" max_configs="$SSL_LIBRARY --with-plugins=max --with-embedded-server --with-libevent" -all_configs="$SSL_LIBRARY --with-plugins=max --with-plugin-ndbcluster --with-embedded-server --with-innodb_plugin --with-libevent" +all_configs="$SSL_LIBRARY --with-plugins=max --with-embedded-server --with-innodb_plugin --with-libevent" # # CPU and platform specific compilation flags. diff --git a/BUILD/build_mccge.sh b/BUILD/build_mccge.sh deleted file mode 100755 index 78e5350dc1b..00000000000 --- a/BUILD/build_mccge.sh +++ /dev/null @@ -1,1876 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2008, 2010, Oracle. -# Copyright (c) 2009-2011 Monty Program Ab -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU Library General Public -# License as published by the Free Software Foundation; version 2 -# of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Library General Public License for more details. -# -# You should have received a copy of the GNU Library General Public -# License along with this library; if not, write to the Free -# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, -# MA 02110-1301, USA - -die() -{ - echo "ERROR: $@"; exit 1; -} - -get_key_value() -{ - echo "$1" | sed 's/^--[a-zA-Z_-]*=//' -} - -developer_usage() -{ -cat < BUILD/build_mccge.sh - - This performs the following operations: - 1) Detects the operating system. Currently, Linux, FreeBSD, Solaris - 8/9/10/11, and Mac OS X are supported by this script. - 2) Detect the type of CPU being used. Currently supported processors - are: x86 for all supported operating systems, Itanium for Linux - with GCC, and x86 + SPARC for Solaris using the Forte compiler and - finally x86 on Linux using the Intel compiler. - 3) Invokes the GCC compiler. - 4) Builds a set of of binaries ; for more information about these, - see --extended-help. - 5) Default compiler is always gcc. - - The default version assumes that you have a source code tarball from - which you are building, and thus autoconf and automake do not need to - be run. If you have downloaded a launchpad tree then you should read - --developer-help. - - If your building on a Solaris SPARC machine and you want to compile - using SunStudio you must set - --compiler=forte; if you want to build using the Intel compiler on - Linux, you need to set --compiler=icc. If you want to use the AMD - compiler Open64 set --compiler=open64. - - A synonym for forte is SunStudio, so one can also use - --compiler=SunStudio. - - If you want to make sure that a 64-bit version is built then you - should add the flag --64. This is always set on Solaris machines and - when check-cpu is able to discover that a 64-bit CPU is being used. If - you want to ensure that a 32-bit binary is produced, use --32 instead. - - If you need the binaries to be installed in a different location from - /usr/local/mysql, then you should set --prefix to point to where you - want the binaries installed. - - Using a data directory other than the default (PREFIX/data) can be - done when starting the server, or by invoking this script with - the --datadir option. - - If you want your binaries stripped of surplus debug or other - information, use the --strip option. - - If you want debug information in the binary (for example, to be - able to send gdb core dumps to support), then you should add the - flag --with-debug; if you want a production build with only debugging - information in the binary then use --debug. -EOF -} - -usage() -{ -cat < x86 and 32-bit binary - x86_64 => x86 and 64 bit binary - --warning-mode=[extra|pedantic|normal|no] Set warning mode level - --warnings Set warning mode to normal - --32 Build a 32-bit binary even if CPU is 64-bit - --64 Build a 64-bit binary even if not sure a - 64-bit CPU is being used - --package=[pro|classic] Select package to build - --parallelism=number Define parallelism in make - --strip Strip binaries - --error-inject Enable error injection into MariaDB Server and - data nodes - --valgrind Build with valgrind - --fast Optimise for CPU architecture built on - --static-linking Statically link system libraries into binaries - --use-tcmalloc Link with tcmalloc instead of standard malloc (Linux only) - --with-flags * Pass extra --with-xxx options to configure -EOF - if test "x$1" != "x" ; then - echo "Failure Message: $1" - fi -} - -extended_usage() -{ - cat </bin (can be overridden) - - --localstatedir: /data, unless --datadir is used, in which - case it defaults to /data (can be overridden by setting - --localstatedir explicitly). - - --enable-local-infile: Enable use of the LOAD DATA FROM LOCAL INFILE - command (cannot be overridden). - - --enable-thread-safe-client: Enable the multi-threaded mysql client - library (cannot be overridden). - - --with-big-tables: Enable use of tables with more than 4G rows (cannot - be overridden). - - --with-extra-charsets=all: Enable use of all character sets supported - by MySQL (cannot be overridden). - - --with-ssl: Enable use of yaSSL library included in the MySQL source - if possible (GCC and same CC and CXX). - (cannot be overridden). - - --with-pic: Build all binaries using position independent assembler - to avoid problems with dynamic linkers (cannot be overridden). - - --without-example-engine: Ensure that the example engine isn't built, - it cannot do any useful things, it's merely intended as documentation. - (cannot be overridden) - - --with-csv-storage-engine: Ensure that the CSV storage engine is - included in all builds. Since CSV is required for log tables in - MySQL 5.1, this option cannot be overridden. - - (Note that MyISAM support is always built into the MySQL Server; the - server *cannot* be built without MyISAM.) - - --with-mysqld-ldflags=-static - --with-client-ldflags=-static - Ensures that binaries for, respectively, the MySQL server and client - are built with static libraries except for the system libraries, - which use dynamically loaded libraries provided by the operating - system. Building with --developer sets these to all-static instead, - to build everything statically. - - In addition there are some configure options that are specific to - Linux operating systems: - - --enable-assembler - Include assembler code optimisations for a number of mostly string - methods. Used for x86 processors only. - - Neither of the preceding options can be disabled. - - MySQL Cluster Carrier Grade edition also adds the following options - (also used by the extended package): - - --with-ndbcluster - Include the NDB Cluster storage engine, its kernel, management - server, and client, as well as support for the NDB and MGM APIs. - - --without-ndb-debug - Do not include specific NDB debug code, not even in debug versions - (cannot be overridden). - - Package-specific options: - ------------------------- - --with-innodb - Specifically included in the "pro" package. - - --with-comment - Sets the comment for the MySQL version, by package, as described - above. - - --with-server-suffix - Sets the server suffix on the MySQL version, by package, as - described above. - - Other options used: - ------------------- - --with-readline - Use the GPL readline library for command editing functions. - - --with-libedit - Use the BSD licensed library for command editing functions. - - --with-zlib-dir=bundled - Use the zlib package bundled with MySQL. - - --with-mysqld-libs=-lmtmalloc - Used on Solaris to ensure that the proper malloc library is used. - Investigations have shown mtmalloc to be the best choice on Solaris, - also umem has good performance on Solaris but better debugging - capabilities. - - Compiler options: - ----------------- - - This section describes the compiler options for each of the different - platforms supported by this script. - - The --fast option adds -mtune=cpu_arg to the C/C++ flags (provides - support for Nocona, K8, and other processors), this option is valid - when gcc is the compiler. - - Use of the --debug option adds -g to the C/C++ flags. - - In all cases it is possible to override the definition of CC and CXX - by calling the script as follows: - CC="/usr/local/bin/gcc" CXX="/usr/local/bin/gcc" BUILD/build_mccge.sh - - Feedback profiler on gcc - ------------------------ - Using gcc --generate-feedback=path causes the following flags to be added - to the compiler flags. - - --fprofile-generate - --fprofile-dir=path - - Using gcc with --use-feedback=path causes the following flags to be added - to the compiler flags. --fprofile-correction indicates MySQL is a multi- - threaded application and thus counters can be inconsistent with each other - and the compiler should take this into account. - - --fprofile-use - --fprofile-dir=path - --fprofile-correction - - Feedback compilation using Open64 - --------------------------------- - - Using Open64 with --generate-feedback=path causes the following flags to - be added to the compiler flags. - - -fb-create path/feedback - - Using Open64 with --use-feedback=path causes the following flags to be - added to the compiler flags. - - --fb-opt path/feedback - - Linux/x86+Itanium/gcc - ------------- - For debug builds -O is used and otherwise -O3 is used.Discovery of a - Nocona or Core 2 Duo CPU causes a 64-bit binary to be built; - otherwise, the binary is 32-bit. To build a 64-bit binary, -m64 is - added to the C/C++ flags. (To build a 32-bit binary on a 64-bit CPU, - use the --32 option as described previously.) - - When gcc 4.5 is used and the user set --with-link-time-optimizer then - also --flto is added to compiler flags and linker flags. - - Linux/x86+Itanium/icc - ------------- - Flags used: - CC = icc -static-libgcc -static-intel - C++ = icpc -static-libgcc -static-intel - C/C++ flags = -mp -restrict - - On Itanium we also add -no-ftz and to CC and C++ flags. - - Note that if the user of this script sets CC or CXX explicitly then - also -static-libgcc and -static-intel needs to be set in the CC and - CXX. - - The non-debug versions also add the following: - C/C++ flags += -O3 unroll2 -ip - - The fast version adds (if --with-link-time-optimizer is used): - C/C++ flags += -ipo - - On discovery of a Core 2 Duo architecture while using icc, -xT is also - added to the C/C++ flags; this provides optimisations specific to Core - 2 Duo. This is added only when the --fast flag is set. - - Linux/x86/Open64 - ---------------- - For normal builds use -O3, when fast flag is set one also adds - --march=auto to generate optimized builds for the CPU used. If - --with-link-time-optimizer is set also -ipa is set. There is also - a special flag --with-mso which can be set to get --mso set which - activates optimisation for multi-core scalability. - - FreeBSD/x86/gcc - --------------- - No flags are used. Instead, configure determines the proper flags to - use. - - Solaris/x86/gcc - --------------- - All builds on Solaris are by default 64-bit, so -m64 is always used in - the C/C++ flags. LDFLAGS is set to -m64 -O/-O2/-O3. If for - some reason a 32-bit Solaris is used it is necessary to add the flag - --32 to the script invocation. Due to bugs in compiling with -O3 on - Solaris only -O2 is used by default, when --fast flag is used -O3 will - be used instead. - - Sets -m64 (default) or -m32 (if specifically set) in LDFLAGS and - C/C++ flags. - - Solaris/Sparc/Forte - ------------------- - Uses cc as CC and CC as CXX - Note that SunStudio uses different binaries for C and C++ compilers. - - Set -m64 (default) or -m32 (if specifically set) in ASFLAGS, - LDFLAGS and C/C++ flags. - - Sets ASFLAGS=LDFLAGS=compiler flags=xarch=sparc, so that we compile - Sparc v9 binaries, also -mt is set in all those since we're always - building a multithreaded program. - - C flags = -xstrconst This flag is set only on SPARC - C++ flags = -noex - - Set the following C/C++ flags: - -fsimple=1 - -ftrap=%none - -nofstore This flag is set only on x86 - -xbuiltin=%all - -xlibmil - -xlibmopt - - Set the C++ flag: - -noex - -features=no%except This flag is set only on x86 - - When compiling with fast we set (-ipo only used if we have - set --with-link-time-optimizer): - C/C++ flags: -xtarget=native -xunroll=3 -xipo - LDFLAGS: -xipo - - When not compiling with fast we always set -xtarget=generic - - When compiling with fast on SPARC we also set: - C/C++ flags: -xbinopt=prepare - LDFLAGS: -xbinopt=prepare - - When compiling with fast on x86 we also set: - C/C++ flags: -xregs=frameptr - When not compiling with fast we set on x86 - C/C++ flags: -xregs=no%frameptr - - On SPARC we set - ASFLAGS = LDFLAGS = C/C++ flags = -xarch=sparc - - The optimisation level is - -xO Debug builds - -xO2 Production build on SPARC - -xO3 Production build on x86 - -xO4 Fast builds on SPARC/x86 - - MacOSX/x86/gcc - -------------- - C/C++ flags include -fno-common -arch i386. - When 64-bits builds then i386 is replaced by x86_64. - - Non-debug versions also add -Os -felide-constructors, where "-Os" - means the build is space-optimised as long as the space optimisations - do not negatively affect performance. Debug versions use -O. - - Mac OS X builds will always be 32-bit by default, when --64 is added - the build will be 64 bit instead. Thus the flag --m64 is added only - when specifically given as an option. -EOF -} - -with_usage() -{ - cat < /dev/null 2>&1 - then - make=gmake - else - make=make - fi - if test "x`$make --version | grep GNU`" = "x" ; then - die "Only gmake is supported" - fi -} - -# -# Find a libtoolize binary, both libtoolize and glibtoolize are -# ok, use first found. -# -set_libtoolize_version() -{ - LIBTOOLIZE=not_found - save_ifs="$IFS"; IFS=':' - for dir in $PATH - do - if test -x $dir/libtoolize - then - LIBTOOLIZE=libtoolize - echo "Found libtoolize in $dir" - break - fi - if test -x $dir/glibtoolize - then - LIBTOOLIZE=glibtoolize - echo "Found glibtoolize in $dir" - break - fi - done - IFS="$save_ifs" - if test "x$LIBTOOLIZE" = "xnot_found" ; then - die "Found no libtoolize version, quitting here" - fi - return -} - -# -# If ccache (a compiler cache which reduces build time) -# (http://samba.org/ccache) is installed, use it. -# We use 'grep' and hope that 'grep' works as expected -# (returns 0 if finds lines) -# We do not use ccache when gcov is used. Also only when -# gcc is used. -# -set_ccache_usage() -{ - if test "x$compiler" = "xgcc" ; then - if ccache -V > /dev/null 2>&1 && test "$USING_GCOV" != "1" - then - echo "$CC" | grep "ccache" > /dev/null || CC="ccache $CC" - echo "$CXX" | grep "ccache" > /dev/null || CXX="ccache $CXX" - fi - fi -} - -# -# Set flags for various build configurations. -# Used in -valgrind builds -# -set_valgrind_flags() -{ - if test "x$valgrind_flag" = "xyes" ; then - loc_valgrind_flags="-UFORCE_INIT_OF_VARS -DHAVE_purify " - loc_valgrind_flags="$loc_valgrind_flags -DMYSQL_SERVER_SUFFIX=-valgrind-max" - compiler_flags="$compiler_flags $loc_valgrind_flags" - with_flags="$with_flags --with-valgrind" - fi -} - -# -# Set up warnings; default is to use no warnings, but if warning_mode -# is used a lot of warning flags are set up. These flags are valid only -# for gcc, so for other compilers we ignore the warning_mode. -# -set_warning_flags() -{ - if test "x$developer_flag" = "xyes" && test "x$warning_mode" = "x" ; then - warning_mode="normal" - fi - if test "x$compiler" = "xgcc" ; then - if test "x$warning_mode" = "normal" || test "x$warning_mode" = "extra" ; then -# Both C and C++ warnings - warnings="$warnings -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs" - warnings="$warnings -Wcomment -W" - warnings="$warnings -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare" - warnings="$warnings -Wwrite-strings -Wunused-function -Wunused-label" - warnings="$warnings -Wunused-value -Wunused-variable -Wno-uninitialized" - - if test "x$warning_mode" = "extra" ; then - warnings="$warnings -Wshadow" - fi -# C warnings - c_warnings="$warnings -Wunused-parameter" -# C++ warnings - cxx_warnings="$warnings -Woverloaded-virtual -Wsign-promo -Wreorder" - cxx_warnings="$warnings -Wctor-dtor-privacy -Wnon-virtual-dtor" - compiler_flags="$compiler_flags -Wuninitialized" - elif test "x$warning_mode" = "xpedantic" ; then - warnings="-W -Wall -ansi -pedantic -Wno-long-long -D_POSIX_SOURCE" - c_warnings="$warnings" - cxx_warnings="$warnings -std=c++98" -# Reset CPU flags (-mtune), they don't work in -pedantic mode - check_cpu_cflags="" - fi - fi -} - -# -# Used in -debug builds -# -set_with_debug_flags() -{ - if test "x$with_debug_flag" = "xyes" ; then - if test "x$developer_flag" = "xyes" ; then - loc_debug_flags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG" - loc_debug_flags="$loc_debug_flags -Wuninitialized -DFORCE_INIT_OF_VARS" - loc_debug_flags="$loc_debug_flags -DSAFEMALLOC" - compiler_flags="$compiler_flags $loc_debug_flags" - fi - compiler_flags="$compiler_flags $extra_debug_flags" - fi -} - -# -# Flag for optimizing builds for developers. -# -set_no_omit_frame_pointer_for_developers() -{ - if test "x$fast_flag" != "xno" ; then - if test "x$developer_flag" = "xyes" && test "x$compiler" = "xgcc" ; then -# Be as fast as we can be without losing our ability to backtrace. - compiler_flags="$compiler_flags -fno-omit-frame-pointer" - fi - fi -} - -# -# Add -g to all builds that requested debug information in build -# -set_debug_flag() -{ - if test "x$compile_debug_flag" = "xyes" ; then - compiler_flags="$compiler_flags -g" - fi -} - -# -# We compile in SSL support if we can, this isn't possible if CXX -# and CC aren't the same and we're not using GCC. -# -set_ssl() -{ - if test "x$compiler" = "xgcc" && \ - test "x$CC" = "x$CXX" ; then - base_configs="$base_configs --with-ssl" - fi -} - -# -# Base options used by all packages -# -# SSL library to use. --with-ssl selects the bundled yaSSL -# implementation of SSL. To use openSSL, you must point out the location -# of the openSSL headers and libs on your system. -# For example: --with-ssl=/usr -# -set_base_configs() -{ - base_configs="$base_configs --prefix=$prefix" - base_configs="$base_configs --libexecdir=$prefix/bin" - base_configs="$base_configs --with-zlib-dir=bundled" - if test "x$datadir" = "x" ; then - base_configs="$base_configs --localstatedir=$prefix/data" - else - base_configs="$base_configs --localstatedir=$datadir" - fi - if test "x$with_debug_flag" = "xyes" ; then - base_configs="$base_configs --with-debug" - fi - base_configs="$base_configs --enable-local-infile" - base_configs="$base_configs --enable-thread-safe-client" - base_configs="$base_configs --with-big-tables" - base_configs="$base_configs --with-extra-charsets=all" - if test "x$with_fast_mutexes" = "xyes" ; then - base_configs="$base_configs --with-fast-mutexes" - fi - base_configs="$base_configs --with-pic" - base_configs="$base_configs --with-csv-storage-engine" - if test "x$with_perfschema" != "xno" ; then - base_configs="$base_configs --with-perfschema" - fi - base_configs="$base_configs --with-libevent" -} - -# -# Add all standard engines and partitioning -# -set_max_engines() -{ - engine_configs="--with-plugins=max --with-plugin-maria --with-maria-tmp-tables" - engine_configs="$engine_configs --without-plugin-innodb_plugin" - base_configs="$base_configs $engine_configs" -} - -set_ndb_engine() -{ - base_configs="$base_configs --with-ndbcluster" - base_configs="$base_configs --without-ndb-debug" -} - -set_pro_package() -{ - if test "x$without_comment" != "xyes" ; then - base_configs="$base_configs --with-comment=\"Pro $version_text built from source\"" - fi - if test "x$with_debug_flag" = "xyes" ; then - base_configs="$base_configs --with-server-suffix=\"-debug\"" - fi -} - -set_classic_package() -{ - if test "x$without_comment" != "xyes" ; then - base_configs="$base_configs --with-comment=\"Classic $version_text built from source\"" - fi - if test "x$with_debug_flag" = "xyes" ; then - base_configs="$base_configs --with-server-suffix=\"-debug\"" - fi - base_configs="$base_configs --without-example-storage-engine" -} - -# -# Special handling of readline; use readline from the MySQL -# distribution if building a GPL version, otherwise use libedit. -# -set_readline_package() -{ - if test -d "$path/../cmd-line-utils/readline" && test "x$gpl" = "xyes" ; then - base_configs="$base_configs --with-readline" - elif test -d "$path/../cmd-line-utils/libedit" ; then - base_configs="$base_configs --with-libedit" - fi -} - -# -# If fast flag set by user we also add architecture as discovered to -# compiler flags to make binary optimised for architecture at hand. -# We use this feature on gcc compilers. -# -set_gcc_special_options() -{ - if test "x$fast_flag" = "xyes" && test "x$compiler" = "xgcc" ; then - compiler_flags="$compiler_flags $check_cpu_cflags" - fi -} - -# -# If we discover a Core 2 Duo architecture and we have enabled the fast -# flag, we enable a compile especially optimised for Core 2 Duo. This -# feature is currently available on Intel's icc compiler only. -# -set_icc_special_options() -{ - if test "x$fast_flag" = "xyes" && test "x$cpu_arg" = "xcore2" && \ - test "x$compiler" = "xicc" ; then - compiler_flags="$compiler_flags -xT" - fi -} - -set_cc_and_cxx_for_gcc() -{ - if test "x$CC" = "x" ; then - CC="gcc -static-libgcc -fno-exceptions" - fi - if test "x$CXX" = "x" ; then - CXX="gcc -static-libgcc -fno-exceptions" - fi -} - -set_cc_and_cxx_for_icc() -{ - if test "x$CC" = "x" ; then - CC="icc -static-intel -static-libgcc" - fi - if test "x$CXX" = "x" ; then - CXX="icpc -static-intel -static-libgcc" - fi -} - -set_cc_and_cxx_for_open64() -{ - if test "x$CC" = "x" ; then - CC="opencc -static-libgcc -fno-exceptions" - fi - if test "x$CXX" = "x" ; then - CXX="openCC -static-libgcc -fno-exceptions" - fi -} - -set_cc_and_cxx_for_forte() -{ - if test "x$CC" = "x" ; then - CC="cc" - fi - if test "x$CXX" = "x" ; then - CXX="CC" - fi -} - -# -# FreeBSD Section -# -set_bsd_configs() -{ - if test "x$cpu_base_type" != "xx86" ; then - usage "Only x86 CPUs supported for FreeBSD" - exit 1 - fi - if test "x$compiler" != "xgcc" ; then - usage "Only gcc supported for FreeBSD" - exit 1 - fi - base_configs="$base_configs --enable-assembler" - if test "x$fast_flag" != "xno" ; then - compiler_flags="$compiler_flags -O3" - else - compiler_flags="$compiler_flags -O0" - fi - set_cc_and_cxx_for_gcc -} - -check_64_bits() -{ - echo "Checking for 32/64-bits compilation" - echo "int main() { return 0; }" > temp_test.c - if test "x$m64" = "xyes" ; then - cmd="$CC $compile_flags -m64 temp_test.c" - if ! $cmd 2>1 ; then - m64="no" - echo "Changing to 32-bits since 64-bits didn't work" - else - echo "Will use 64-bits" - fi - else - cmd="$CC $compile_flags -m32 temp_test.c" - if ! $cmd 2>1 ; then - m64="yes" - echo "Changing to 64-bits since 32-bits didn't work" - else - echo "Will use 32-bits" - fi - fi - rm temp_test.c -} - -# -# Get GCC version -# -get_gcc_version() -{ - # check if compiler is gcc and dump its version - cc_verno=`$cc -dumpversion 2>/dev/null` - if test "x$?" = "x0" ; then - set -- `echo $cc_verno | tr '.' ' '` - cc_ver="GCC" - cc_major=$1 - cc_minor=$2 - cc_patch=$3 - gcc_version=`expr $cc_major '*' 100 '+' $cc_minor` - fi -} - -# -# Link time optimizer (interprocedural optimizations) for Open64 -# -check_for_open64_link_time_optimizer() -{ - if test "x$with_link_time_optimizer" = "xyes" ; then - compiler_flags="$compiler_flags -ipa" - LDFLAGS="$LDFLAGS -ipa" - fi -} - -# -# Link time optimizer (interprocedural optimizations) for icc -# -check_for_icc_link_time_optimizer() -{ - if test "x$with_link_time_optimizer" = "xyes" ; then - compiler_flags="$compiler_flags -ipo" - LDFLAGS="$LDFLAGS -ipo" - fi -} - -# -# Link time optimizer (interprocedural optimizations) for forte -# -check_for_forte_link_time_optimizer() -{ - if test "x$with_link_time_optimizer" = "xyes" ; then - compiler_flags="$compiler_flags -ipo" - LDFLAGS="$LDFLAGS -ipo" - fi -} - -# -# Link Time Optimizer in GCC (LTO) uses a parameter -flto -# which was added to GCC 4.5, if --with-link-time-optimizer -# is set then use this feature -# -check_for_gcc_link_time_optimizer() -{ - get_gcc_version - if test "$gcc_version" -ge 405 && \ - test "x$with_link_time_optimizer" = "xyes" ; then - compiler_flags="$compiler_flags -flto" - LDFLAGS="$LDFLAGS -flto" - fi -} - -set_feedback_for_gcc() -{ - if test "x$GENERATE_FEEDBACK_PATH" != "x" ; then - compiler_flags="$compiler_flags -fprofile-generate" - compiler_flags="$compiler_flags -fprofile-dir=$GENERATE_FEEDBACK_PATH" - elif test "x$USE_FEEDBACK_PATH" != "x" ; then - compiler_flags="$compiler_flags -fprofile-use" - compiler_flags="$compiler_flags -fprofile-correction" - compiler_flags="$compiler_flags -fprofile-dir=$USE_FEEDBACK_PATH" - fi -} - -set_feedback_for_open64() -{ - if test "x$GENERATE_FEEDBACK_PATH" != "x" ; then - compiler_flags="$compiler_flags --fb-create=$GENERATE_FEEDBACK_PATH/feedback" - elif test "x$USE_FEEDBACK_PATH" != "x" ; then - compiler_flags="$compiler_flags --fb-opt=$USE_FEEDBACK_PATH/feedback" - fi -} - -# -# Linux Section -# -set_linux_configs() -{ -# Default to use --with-fast-mutexes on Linux - if test "x$with_fast_mutexes" = "x" ; then - base_configs="$base_configs --with-fast-mutexes" - fi - if test "x$cpu_base_type" != "xx86" && \ - test "x$cpu_base_type" != "xitanium" ; then - usage "Only x86 and Itanium CPUs supported for Linux" - exit 1 - fi - if test "x$use_tcmalloc" = "xyes" ; then - base_configs="$base_configs --with-mysqld-libs=-ltcmalloc_minimal" - fi - if test "x$cpu_base_type" = "xx86" ; then - base_configs="$base_configs --enable-assembler" - fi - if test "x$compiler" = "xgcc" ; then - set_cc_and_cxx_for_gcc - if test "x$fast_flag" != "xno" ; then - if test "x$fast_flag" = "xyes" ; then - compiler_flags="$compiler_flags -O3" - check_for_gcc_link_time_optimizer - else - compiler_flags="$compiler_flags -O3" - fi - else - compiler_flags="$compiler_flags -O0" - fi - set_feedback_for_gcc -# configure will set proper compiler flags for gcc on Linux - elif test "x$compiler" = "xicc" ; then - compiler_flags="$compiler_flags -mp -restrict" - set_cc_and_cxx_for_icc - if test "x$cpu_base_type" = "xitanium" ; then - compiler_flags="$compiler_flags -no-ftz" - fi - if test "x$fast_flag" != "xno" ; then - compiler_flags="$compiler_flags -O3 -unroll2 -ip" - if test "x$fast_flag" = "xyes" ; then - check_for_icc_link_time_optimizer - fi - fi - elif test "x$compiler" = "xopen64" ; then - set_cc_and_cxx_for_open64 - if test "x$fast_flag" != "xno" ; then - if test "x$fast_flag" = "xyes" ; then - compiler_flags="$compiler_flags -O3" -# Generate code specific for the machine you run on - compiler_flags="$compiler_flags -march=auto" - check_for_open64_link_time_optimizer - if test "x$with_mso" = "xyes" ; then - compiler_flags="$compiler_flags -mso" - fi - else - compiler_flags="$compiler_flags -O3" - fi - fi - set_feedback_for_open64 - else - usage "Only gcc,icc and Open64 compilers supported for Linux" - exit 1 - fi - check_64_bits - if test "x$m64" = "xyes" ; then - compiler_flags="$compiler_flags -m64" - else - compiler_flags="$compiler_flags -m32" - fi -} - -# -# Solaris Section -# -set_solaris_configs() -{ -# Use mtmalloc as malloc, see Tim Cook blog -# For information on optimal compiler settings, see article at -# http://developers.sun.com/solaris/articles/mysql_perf_tune.html -# by Luojia Chen at Sun. - base_configs="$base_configs --with-named-curses=-lcurses" - case "`uname -a`" in - *5.8* | *5.9* ) - ;; - - *5.10* | *5.11*) - base_configs="$base_configs --with-mysqld-libs=-lmtmalloc" - ;; - *) - usage "Only versions 8,9, 10 and 11 supported for Solaris" - exit 1 - esac - if test "x$cpu_base_type" != "xx86" && \ - test "x$cpu_base_type" != "xsparc" ; then - usage "Only x86 and Sparc CPUs supported for Solaris" - exit 1 - fi - if test "x$compiler" != "xgcc" && \ - test "x$compiler" != "xforte" ; then - usage "Only gcc and Forte compilers supported for Solaris" - exit 1 - fi - if test "x$m64" = "xyes" ; then - compiler_flags="$compiler_flags -m64" - LDFLAGS="-m64" - ASFLAGS="$ASFLAGS -m64" - else - compiler_flags="$compiler_flags -m32" - LDFLAGS="-m32" - ASFLAGS="$ASFLAGS -m32" - fi - if test "x$compiler" = "xgcc" ; then - set_cc_and_cxx_for_gcc - if test "x$cpu_base_type" != "xx86" ; then - usage "gcc currently not supported for Solaris on SPARC" - exit 1 - fi - if test "x$fast_flag" = "xyes" ; then - LDFLAGS="$LDFLAGS -O3" - compiler_flags="$compiler_flags -O3" - check_for_gcc_link_time_optimizer - else - if test "x$fast_flag" = "xgeneric" ; then - LDFLAGS="$LDFLAGS -O2" - compiler_flags="$compiler_flags -O2" - else - LDFLAGS="$LDFLAGS -O0" - compiler_flags="$compiler_flags -O0" - fi - fi - else -#Using Forte compiler (SunStudio) - set_cc_and_cxx_for_forte - compiler_flags="$compiler_flags -mt" - LDFLAGS="$LDFLAGS -mt" - compiler_flags="$compiler_flags -fsimple=1" - compiler_flags="$compiler_flags -ftrap=%none" - compiler_flags="$compiler_flags -xbuiltin=%all" - compiler_flags="$compiler_flags -xlibmil" - compiler_flags="$compiler_flags -xlibmopt" - if test "x$fast_flag" = "xyes" ; then - compiler_flags="$compiler_flags -xtarget=native" - compiler_flags="$compiler_flags -xunroll=3" - check_for_forte_link_time_optimizer - else - compiler_flags="$compiler_flags -xtarget=generic" - fi - if test "x$cpu_base_type" = "xx86" ; then - compiler_flags="$compiler_flags -nofstore" - base_cxx_flags="$base_cxx_flags -features=no%except" - if test "x$fast_flag" = "xyes" ; then - compiler_flags="$compiler_flags -xregs=frameptr" - compiler_flags="$compiler_flags -xO4" - else - compiler_flags="$compiler_flags -xregs=no%frameptr" - if test "x$fast_flag" = "xgeneric" ; then - compiler_flags="$compiler_flags -xO2" - else - compiler_flags="$compiler_flags -xO0" - fi - fi - else -#Using SPARC cpu with SunStudio (Forte) compiler - ASFLAGS="$ASFLAGS -xarch=sparc" - LDFLAGS="$LDFLAGS -xarch=sparc" - base_cxxflags="$base_cxxflags -noex" - base_cflags="$base_cflags -xstrconst" - compiler_flags="$compiler_flags -xarch=sparc" - if test "x$fast_flag" = "xyes" ; then - compiler_flags="$compiler_flags -xbinopt=prepare" - LDFLAGS="$LDFLAGS -xbinopt=prepare" - compiler_flags="$compiler_flags -xO4" - elif test "x$fast_flag" = "xgeneric" ; then - compiler_flags="$compiler_flags -xO3" - else - compiler_flags="$compiler_flags -xO0" - fi - fi - fi -} - -# -# Mac OS X Section -# -set_macosx_configs() -{ - if test "x$cpu_base_type" != "xx86" || test "x$compiler" != "xgcc" ; then - usage "Only gcc/x86 supported for Mac OS X" - exit 1 - fi -# -# Optimize for space as long as it doesn't affect performance, use some -# optimisations also when not in fast mode. -# - base_cxxflags="$base_cxxflags -felide-constructors" - compiler_flags="$compiler_flags -fno-common" - if test "x$m64" = "xyes" ; then - compiler_flags="$compiler_flags -m64" - compiler_flags="$compiler_flags -arch x86_64" - else - compiler_flags="$compiler_flags -m32" - compiler_flags="$compiler_flags -arch i386" - fi - if test "x$fast_flag" != "xno" ; then - compiler_flags="$compiler_flags -Os" - else - compiler_flags="$compiler_flags -O0" - fi - set_cc_and_cxx_for_gcc -} - -# -# Use static linking for own modules and dynamic linking for system -# modules unless specifically requested to do everything statically. -# Should normally not be used; static_linking_flag kept in case someone -# really needs it. Available only if developer flag is also set. -# -set_static_link_configs() -{ - if test "x$static_linking_flag" = "xyes" && test "x$developer_flag" = "xyes" ; then - loc_static_link="--with-mysqld-ldflags=\"-all-static\"" - loc_static_link="$loc_static_link --with-client-ldflags=\"-all-static\"" - else - loc_static_link="--with-mysqld-ldflags=\"-static\"" - loc_static_link="$loc_static_link --with-client-ldflags=\"-static\"" - fi - base_configs="$base_configs $loc_static_link" -} - -# -# Enable error injection in MySQL Server (for developer build only - -# extra check for developer flag required). -# -set_error_inject_configs() -{ - if test "x$error_inject_flag" = "xyes" && test "x$developer_flag" = "xyes" ; then - base_configs="$base_configs --with-error-inject" - if test "x$package" = "xndb" || test "x$package" = "xextended" ; then - base_configs="$base_configs --with-ndb-ccflags='-DERROR_INSERT'" - fi - fi -} - -set_default_package() -{ - if test "x$package" = "x" ; then - package="extended" - fi -} - -set_defaults_based_on_environment() -{ - if test ! -z "$MYSQL_DEVELOPER" ; then - developer_flag="yes" - fi - if test ! -z "$MYSQL_DEVELOPER_DEBUG" ; then - with_debug_flag="yes" - fast_flag="no" - fi - if test ! -z "$MYSQL_DEVELOPER_PACKAGE" ; then - package="$MYSQL_DEVELOPER_PACKAGE" - parse_package - fi -} - -######################################################################## - -if test ! -f sql/mysqld.cc ; then - die "You must run this script from the MySQL top-level directory" -fi - -cpu_type= -package= -prefix="/usr/local/mysql" -parallelism="8" -fast_flag="generic" -compiler="gcc" -gpl="yes" -version_text= -developer_flag="no" -just_configure= -warning_mode= -with_flags= -error_inject_flag= -with_debug_flag= -compile_debug_flag= -strip_flag= -valgrind_flag= -static_linking_flag= -compiler_flags= -os= -cpu_base_type= -warnings= -c_warnings= -cflags= -base_cflags= -cxx_warnings= -base_cxxflags= -base_configs= -debug_flags= -cxxflags= -extra_debug_flags= -m64= -explicit_size_set= -datadir= -commands= -engine_configs= -ASFLAGS= -LDFLAGS= -use_tcmalloc= -without_comment="yes" -with_fast_mutexes= -with_perfschema="yes" -with_link_time_optimizer= -with_mso= -gcc_version="0" -generate_feedback_path= -use_feedback_path= - -set_defaults_based_on_environment - -parse_options "$@" - -set_default_package - -set -e - -# -# Check for the CPU and set up CPU specific flags. We may reset them -# later. -# This call sets the cpu_arg and check_cpu_args parameters -# -path=`dirname $0` -if test "x$compiler" = "xgcc" ; then - compiler= -fi -. "$path/check-cpu" -if test "x$compiler" = "x" ; then - compiler="gcc" -fi -check_os -set_cpu_base -if test "x$?" = "x1" ; then - exit 1 -fi - -# -# Set up c_warnings and cxx_warnings; add to compiler_flags. -# Possibly reset check_cpu_flags. -# -set_warning_flags - -# -# Add to compiler_flags. -# -set_valgrind_flags -set_with_debug_flags -set_no_omit_frame_pointer_for_developers -set_debug_flag -set_gcc_special_options -set_icc_special_options - -# -# Definitions of various packages possible to compile. The default is to -# build a source variant including all storage engines except InnoDB. -# -set_base_configs - version_text="GPL version" -if test "x$package" = "xpro" ; then - set_max_engines - set_pro_package -elif test "x$package" = "xclassic" ; then - set_classic_package -else - die "No supported package was used, internal error" -fi -set_readline_package -set_static_link_configs -set_error_inject_configs - -# -# This section handles flags for specific combinations of compilers, -# operating systems, and processors. -# - -if test "x$os" = "xlinux" ; then - set_linux_configs -elif test "x$os" = "xSolaris" ; then - set_solaris_configs -elif test "x$os" = "xMacOSX" ; then - set_macosx_configs -elif test "x$os" = "xbsd" ; then - set_bsd_configs -else - die "Operating system not supported by this script" -fi -set_ssl -# -# Final step before setting up commands is to set up proper make and -# proper libtoolize versions, and to determine whether to use ccache. -# -set_make_version -set_ccache_usage - -# -# Set up commands variable from variables prepared for base -# configurations, compiler flags, and warnings flags. -# -init_configure_commands - -if test "x$just_configure" != "xyes" ; then - add_make_commands -fi - -# -# The commands variable now contains the entire command to be run for -# the build; we either execute it, or merely print it out. -# -echo "Running command:" -echo "$commands" -if test "x$just_print" != "xyes" ; then - eval "set -x; $commands" -fi diff --git a/BUILD/compile-amd64-debug-max-no-ndb b/BUILD/compile-amd64-debug-max-no-ndb index 100a17163a4..d45a51eba54 100755 --- a/BUILD/compile-amd64-debug-max-no-ndb +++ b/BUILD/compile-amd64-debug-max-no-ndb @@ -20,6 +20,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$amd64_cflags $debug_cflags" -extra_configs="$amd64_configs $debug_configs $max_no_ndb_configs" +extra_configs="$amd64_configs $debug_configs $max_configs" . "$path/FINISH.sh" diff --git a/BUILD/compile-amd64-gprof-no-ndb b/BUILD/compile-amd64-gprof-no-ndb index 9fd4c67155c..ef684274658 100755 --- a/BUILD/compile-amd64-gprof-no-ndb +++ b/BUILD/compile-amd64-gprof-no-ndb @@ -2,6 +2,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$amd64_cflags -pg -g" -extra_configs="$amd64_configs $max_no_ndb_configs --disable-shared $static_link" +extra_configs="$amd64_configs $max_configs --disable-shared $static_link" . "$path/FINISH.sh" diff --git a/BUILD/compile-amd64-max-sci b/BUILD/compile-amd64-max-sci deleted file mode 100644 index fbcbba7920b..00000000000 --- a/BUILD/compile-amd64-max-sci +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/sh - -# Copyright (C) 2007 MySQL AB -# Use is subject to license terms -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -path=`dirname $0` -. "$path/SETUP.sh" -extra_flags="$amd64_cflags $fast_cflags -g" -extra_configs="$amd64_configs $max_configs --with-ndb-sci=/opt/DIS" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-bintar b/BUILD/compile-bintar index 2b039e439c0..8434bf57a52 100755 --- a/BUILD/compile-bintar +++ b/BUILD/compile-bintar @@ -73,7 +73,7 @@ CC="gcc -static-libgcc" CXX="g++ -static-libgcc" CFLAGS="$FLAGS" CXXFLAGS="$FLAG --without-docs --with-extra-charsets=all \ --with-libwrap --with-ssl --with-readline --with-libevent --with-zlib-dir=bundled \ --with-partition --with-embedded-server \ - --with-plugins=max-no-ndb \ + --with-plugins=max \ --without-plugin-innodb_plugin make $AM_MAKEFLAGS diff --git a/BUILD/compile-dist b/BUILD/compile-dist index becfea638f8..853fd339769 100755 --- a/BUILD/compile-dist +++ b/BUILD/compile-dist @@ -78,5 +78,5 @@ fi ./configure \ --with-embedded-server \ --with-perfschema \ - --with-plugins=max-no-ndb + --with-plugins=max $gmake -j4 diff --git a/BUILD/compile-ndb-autotest b/BUILD/compile-ndb-autotest deleted file mode 100755 index 691b309fb60..00000000000 --- a/BUILD/compile-ndb-autotest +++ /dev/null @@ -1,24 +0,0 @@ -#! /bin/sh - -# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_configs="$max_configs --with-ndb-test --with-ndb-ccflags='-DERROR_INSERT'" -extra_flags="$fast_cflags $max_cflags -g" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-debug-max-no-ndb b/BUILD/compile-pentium-debug-max-no-ndb index fa8069414b2..705164c20bc 100755 --- a/BUILD/compile-pentium-debug-max-no-ndb +++ b/BUILD/compile-pentium-debug-max-no-ndb @@ -19,6 +19,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$pentium_cflags $debug_cflags" -extra_configs="$pentium_configs $debug_configs $max_no_ndb_configs" +extra_configs="$pentium_configs $debug_configs $max_configs" . "$path/FINISH.sh" diff --git a/BUILD/compile-pentium-valgrind-max-no-ndb b/BUILD/compile-pentium-valgrind-max-no-ndb index c3ebb47cc22..4eb47cb2fe2 100755 --- a/BUILD/compile-pentium-valgrind-max-no-ndb +++ b/BUILD/compile-pentium-valgrind-max-no-ndb @@ -21,6 +21,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$pentium_cflags $debug_cflags $valgrind_flags" -extra_configs="$pentium_configs $debug_configs $valgrind_configs $max_no_ndb_configs" +extra_configs="$pentium_configs $debug_configs $valgrind_configs $max_configs" . "$path/FINISH.sh" diff --git a/BUILD/compile-pentium64-max-sci b/BUILD/compile-pentium64-max-sci deleted file mode 100644 index a19351308c5..00000000000 --- a/BUILD/compile-pentium64-max-sci +++ /dev/null @@ -1,25 +0,0 @@ -#! /bin/sh - -# Copyright (C) 2007 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -path=`dirname $0` -. "$path/SETUP.sh" - -extra_flags="$pentium64_cflags $fast_cflags -g" -extra_configs="$pentium_configs $max_configs --with-ndb-sci=/opt/DIS" - -. "$path/FINISH.sh" diff --git a/BUILD/compile-ppc-debug-max-no-ndb b/BUILD/compile-ppc-debug-max-no-ndb index ba7fe9aee5b..0642ddf7a57 100755 --- a/BUILD/compile-ppc-debug-max-no-ndb +++ b/BUILD/compile-ppc-debug-max-no-ndb @@ -20,6 +20,6 @@ path=`dirname $0` . "$path/SETUP.sh" extra_flags="$ppc_cflags $debug_cflags" -extra_configs="$debug_configs $max_no_ndb_configs" +extra_configs="$debug_configs $max_configs" . "$path/FINISH.sh" diff --git a/CMakeLists.txt b/CMakeLists.txt index e80f8d628fc..e7076e668e6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -404,12 +404,6 @@ IF(NOT WITHOUT_SERVER) ADD_SUBDIRECTORY(mysql-test) ADD_SUBDIRECTORY(mysql-test/lib/My/SafeProcess) ADD_SUBDIRECTORY(sql-bench) - - IF(EXISTS ${CMAKE_SOURCE_DIR}/internal/CMakeLists.txt) - ADD_SUBDIRECTORY(internal) - ENDIF() - ADD_SUBDIRECTORY(packaging/rpm-uln) - ADD_SUBDIRECTORY(packaging/rpm-oel) ENDIF() IF(UNIX) diff --git a/client/mysql.cc b/client/mysql.cc index b1570e32a80..510bceecac2 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -590,8 +590,6 @@ static COMMANDS commands[] = { { "NAMES", 0, 0, 0, ""}, { "NATIONAL", 0, 0, 0, ""}, { "NATURAL", 0, 0, 0, ""}, - { "NDB", 0, 0, 0, ""}, - { "NDBCLUSTER", 0, 0, 0, ""}, { "NCHAR", 0, 0, 0, ""}, { "NEW", 0, 0, 0, ""}, { "NEXT", 0, 0, 0, ""}, diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 287fd5e3764..635b9b51cda 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -4740,10 +4740,6 @@ void do_sync_with_master(struct st_command *command) } -/* - when ndb binlog is on, this call will wait until last updated epoch - (locally in the mysqld) has been received into the binlog -*/ int do_save_master_pos() { MYSQL_RES *res; @@ -4752,144 +4748,6 @@ int do_save_master_pos() const char *query; DBUG_ENTER("do_save_master_pos"); -#ifdef HAVE_NDB_BINLOG - /* - Wait for ndb binlog to be up-to-date with all changes - done on the local mysql server - */ - { - ulong have_ndbcluster; - if (mysql_query(mysql, query= "show variables like 'have_ndbcluster'")) - die("'%s' failed: %d %s", query, - mysql_errno(mysql), mysql_error(mysql)); - if (!(res= mysql_store_result(mysql))) - die("mysql_store_result() returned NULL for '%s'", query); - if (!(row= mysql_fetch_row(res))) - die("Query '%s' returned empty result", query); - - have_ndbcluster= strcmp("YES", row[1]) == 0; - mysql_free_result(res); - - if (have_ndbcluster) - { - ulonglong start_epoch= 0, handled_epoch= 0, - latest_epoch=0, latest_trans_epoch=0, - latest_handled_binlog_epoch= 0, latest_received_binlog_epoch= 0, - latest_applied_binlog_epoch= 0; - int count= 0; - int do_continue= 1; - while (do_continue) - { - const char binlog[]= "binlog"; - const char latest_epoch_str[]= - "latest_epoch="; - const char latest_trans_epoch_str[]= - "latest_trans_epoch="; - const char latest_received_binlog_epoch_str[]= - "latest_received_binlog_epoch"; - const char latest_handled_binlog_epoch_str[]= - "latest_handled_binlog_epoch="; - const char latest_applied_binlog_epoch_str[]= - "latest_applied_binlog_epoch="; - if (count) - my_sleep(100*1000); /* 100ms */ - if (mysql_query(mysql, query= "show engine ndb status")) - die("failed in '%s': %d %s", query, - mysql_errno(mysql), mysql_error(mysql)); - if (!(res= mysql_store_result(mysql))) - die("mysql_store_result() returned NULL for '%s'", query); - while ((row= mysql_fetch_row(res))) - { - if (strcmp(row[1], binlog) == 0) - { - const char *status= row[2]; - - /* latest_epoch */ - while (*status && strncmp(status, latest_epoch_str, - sizeof(latest_epoch_str)-1)) - status++; - if (*status) - { - status+= sizeof(latest_epoch_str)-1; - latest_epoch= strtoull(status, (char**) 0, 10); - } - else - die("result does not contain '%s' in '%s'", - latest_epoch_str, query); - /* latest_trans_epoch */ - while (*status && strncmp(status, latest_trans_epoch_str, - sizeof(latest_trans_epoch_str)-1)) - status++; - if (*status) - { - status+= sizeof(latest_trans_epoch_str)-1; - latest_trans_epoch= strtoull(status, (char**) 0, 10); - } - else - die("result does not contain '%s' in '%s'", - latest_trans_epoch_str, query); - /* latest_received_binlog_epoch */ - while (*status && - strncmp(status, latest_received_binlog_epoch_str, - sizeof(latest_received_binlog_epoch_str)-1)) - status++; - if (*status) - { - status+= sizeof(latest_received_binlog_epoch_str)-1; - latest_received_binlog_epoch= strtoull(status, (char**) 0, 10); - } - else - die("result does not contain '%s' in '%s'", - latest_received_binlog_epoch_str, query); - /* latest_handled_binlog */ - while (*status && - strncmp(status, latest_handled_binlog_epoch_str, - sizeof(latest_handled_binlog_epoch_str)-1)) - status++; - if (*status) - { - status+= sizeof(latest_handled_binlog_epoch_str)-1; - latest_handled_binlog_epoch= strtoull(status, (char**) 0, 10); - } - else - die("result does not contain '%s' in '%s'", - latest_handled_binlog_epoch_str, query); - /* latest_applied_binlog_epoch */ - while (*status && - strncmp(status, latest_applied_binlog_epoch_str, - sizeof(latest_applied_binlog_epoch_str)-1)) - status++; - if (*status) - { - status+= sizeof(latest_applied_binlog_epoch_str)-1; - latest_applied_binlog_epoch= strtoull(status, (char**) 0, 10); - } - else - die("result does not contain '%s' in '%s'", - latest_applied_binlog_epoch_str, query); - if (count == 0) - start_epoch= latest_trans_epoch; - break; - } - } - if (!row) - die("result does not contain '%s' in '%s'", - binlog, query); - if (latest_handled_binlog_epoch > handled_epoch) - count= 0; - handled_epoch= latest_handled_binlog_epoch; - count++; - if (latest_handled_binlog_epoch >= start_epoch) - do_continue= 0; - else if (count > 300) /* 30s */ - { - break; - } - mysql_free_result(res); - } - } - } -#endif if (mysql_query(mysql, query= "show master status")) die("failed in 'show master status': %d %s", mysql_errno(mysql), mysql_error(mysql)); diff --git a/cmake/make_dist.cmake.in b/cmake/make_dist.cmake.in index 2f88c47e17f..b8c8877ef61 100644 --- a/cmake/make_dist.cmake.in +++ b/cmake/make_dist.cmake.in @@ -116,15 +116,6 @@ CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc.h CONFIGURE_FILE(${CMAKE_BINARY_DIR}/sql/sql_yacc.cc ${PACKAGE_DIR}/sql/sql_yacc.cc COPYONLY) -# Copy spec files -SET(SPECFILENAME "mysql.${VERSION}.spec") -IF("${VERSION}" MATCHES "-ndb-") - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - SET(SPECFILENAME "mysql-cluster-${NDBVERSION}.spec") -ENDIF() -CONFIGURE_FILE(${CMAKE_BINARY_DIR}/support-files/${SPECFILENAME} - ${PACKAGE_DIR}/support-files/${SPECFILENAME} COPYONLY) - # Add documentation, if user has specified where to find them IF(MYSQL_DOCS_LOCATION) MESSAGE("Copying documentation files from " ${MYSQL_DOCS_LOCATION}) diff --git a/cmake/mysql_version.cmake b/cmake/mysql_version.cmake index f147f6ef2bf..1802b1a2678 100644 --- a/cmake/mysql_version.cmake +++ b/cmake/mysql_version.cmake @@ -59,10 +59,6 @@ IF(NOT "${MAJOR_VERSION}" MATCHES "[0-9]+" OR MESSAGE(STATUS "MariaDB ${VERSION}") SET(MYSQL_BASE_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}" CACHE INTERNAL "MySQL Base version") SET(MYSQL_NO_DASH_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}") - # Use NDBVERSION irregardless of whether this is Cluster or not, if not - # then the regex will be ignored anyway. - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - STRING(REPLACE "-" "_" MYSQL_RPM_VERSION "${NDBVERSION}") MATH(EXPR MYSQL_VERSION_ID "10000*${MAJOR_VERSION} + 100*${MINOR_VERSION} + ${PATCH_VERSION}") MARK_AS_ADVANCED(VERSION MYSQL_VERSION_ID MYSQL_BASE_VERSION) SET(CPACK_PACKAGE_VERSION_MAJOR ${MAJOR_VERSION}) @@ -93,10 +89,6 @@ ENDIF() IF(NOT CPACK_SOURCE_PACKAGE_FILE_NAME) SET(CPACK_SOURCE_PACKAGE_FILE_NAME "mariadb-${VERSION}") - IF("${VERSION}" MATCHES "-ndb-") - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - SET(CPACK_SOURCE_PACKAGE_FILE_NAME "mysql-cluster-gpl-${NDBVERSION}") - ENDIF() ENDIF() SET(CPACK_PACKAGE_CONTACT "MariaDB team ") SET(CPACK_PACKAGE_VENDOR "Monty Program AB") diff --git a/cmake/package_name.cmake b/cmake/package_name.cmake index 4ba8fc18e3f..c1c335f91f9 100644 --- a/cmake/package_name.cmake +++ b/cmake/package_name.cmake @@ -126,12 +126,7 @@ IF(NOT VERSION) SET(PRODUCT_TAG) ENDIF() - IF("${VERSION}" MATCHES "-ndb-") - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - SET(package_name "mysql-cluster${PRODUCT_TAG}-${NDBVERSION}-${SYSTEM_NAME_AND_PROCESSOR}") - ELSE() - SET(package_name "mariadb${PRODUCT_TAG}-${VERSION}-${SYSTEM_NAME_AND_PROCESSOR}") - ENDIF() + SET(package_name "mariadb${PRODUCT_TAG}-${VERSION}-${SYSTEM_NAME_AND_PROCESSOR}") MESSAGE(STATUS "Packaging as: ${package_name}") diff --git a/config.h.cmake b/config.h.cmake index 2bb62516d2b..e0e19f37261 100644 --- a/config.h.cmake +++ b/config.h.cmake @@ -604,10 +604,6 @@ #cmakedefine WITH_CSV_STORAGE_ENGINE 1 #cmakedefine WITH_PARTITION_STORAGE_ENGINE 1 #cmakedefine WITH_PERFSCHEMA_STORAGE_ENGINE 1 -#cmakedefine WITH_NDBCLUSTER_STORAGE_ENGINE 1 -#if (WITH_NDBCLUSTER_STORAGE_ENGINE) && !defined(EMBEDDED_LIBRARY) -# define HAVE_NDB_BINLOG 1 -#endif #cmakedefine WITH_ARIA_STORAGE_ENGINE 1 #cmakedefine USE_ARIA_FOR_TMP_TABLES 1 diff --git a/debian/README.Maintainer b/debian/README.Maintainer index b1e0a602781..9e44495ee3b 100644 --- a/debian/README.Maintainer +++ b/debian/README.Maintainer @@ -45,9 +45,6 @@ debhelper (>=4.1.16): autoconf (>= 2.13-20), automake1.7 Try to get rid of them. -doxygen, tetex-bin, tetex-extra, gs - for ndb/docs/*tex - # # Remarks to the start scripts # diff --git a/debian/mariadb-server-10.1.mysql.init b/debian/mariadb-server-10.1.mysql.init index 7854007afc6..d1975bcbca7 100644 --- a/debian/mariadb-server-10.1.mysql.init +++ b/debian/mariadb-server-10.1.mysql.init @@ -108,7 +108,7 @@ case "${1:-''}" in # Start MariaDB! /usr/bin/mysqld_safe "${@:2}" > /dev/null 2>&1 & - # 6s was reported in #352070 to be too few when using ndbcluster + # 6s was reported in #352070 to be too little for i in $(seq 1 "${MYSQLD_STARTUP_TIMEOUT:-30}"); do sleep 1 if mysqld_status check_alive nowarn ; then break; fi diff --git a/debian/mariadb-server-10.1.preinst b/debian/mariadb-server-10.1.preinst index 49651c41195..1df01b2dcee 100644 --- a/debian/mariadb-server-10.1.preinst +++ b/debian/mariadb-server-10.1.preinst @@ -48,18 +48,6 @@ stop_server() { this_version=10.1 -# Abort if an NDB cluster is in use. -if egrep -qi -r '^[^#]*ndb.connectstring|^[[:space:]]*\[[[:space:]]*ndb_mgmd' /etc/mysql/; then - db_fset mysql-server/no_upgrade_when_using_ndb seen false || true - db_input high mysql-server/no_upgrade_when_using_ndb || true - db_go - db_stop - exit 1 -fi - -# Abort if skip-bdb option is enabled, required for 5.0 -> 5.1 upgrades. -#TODO - # Safe the user from stupidities. show_downgrade_warning=0 for i in `ls $DATADIR/debian-*.flag 2>/dev/null`; do diff --git a/debian/mariadb-server-10.1.templates b/debian/mariadb-server-10.1.templates index fd83d2af293..f64dd02d7bd 100644 --- a/debian/mariadb-server-10.1.templates +++ b/debian/mariadb-server-10.1.templates @@ -81,9 +81,3 @@ Type: error _Description: Password input error The two passwords you entered were not the same. Please try again. -Template: mysql-server/no_upgrade_when_using_ndb -Type: error -_Description: NDB Cluster seems to be in use - MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new - mysql-cluster package and remove all lines starting with "ndb" from - all config files below /etc/mysql/. diff --git a/debian/mariadb-test-10.1.dirs b/debian/mariadb-test-10.1.dirs index f45bf43f7e9..1a488d98195 100644 --- a/debian/mariadb-test-10.1.dirs +++ b/debian/mariadb-test-10.1.dirs @@ -19,9 +19,6 @@ usr/share/mysql/mysql-test/suite/parts usr/share/mysql/mysql-test/suite/parts/inc usr/share/mysql/mysql-test/suite/parts/t usr/share/mysql/mysql-test/suite/parts/r -usr/share/mysql/mysql-test/suite/rpl_ndb -usr/share/mysql/mysql-test/suite/rpl_ndb/t -usr/share/mysql/mysql-test/suite/rpl_ndb/r usr/share/mysql/mysql-test/suite/bugs usr/share/mysql/mysql-test/suite/bugs/t usr/share/mysql/mysql-test/suite/bugs/r @@ -46,9 +43,6 @@ usr/share/mysql/mysql-test/suite/jp/t usr/share/mysql/mysql-test/suite/jp/r usr/share/mysql/mysql-test/suite/jp/include usr/share/mysql/mysql-test/suite/jp/std_data -usr/share/mysql/mysql-test/suite/ndb -usr/share/mysql/mysql-test/suite/ndb/t -usr/share/mysql/mysql-test/suite/ndb/r usr/share/mysql/mysql-test/suite/maria usr/share/mysql/mysql-test/suite/maria/t usr/share/mysql/mysql-test/suite/maria/r @@ -63,9 +57,6 @@ usr/share/mysql/mysql-test/suite/binlog usr/share/mysql/mysql-test/suite/binlog/t usr/share/mysql/mysql-test/suite/binlog/r usr/share/mysql/mysql-test/suite/binlog/std_data -usr/share/mysql/mysql-test/suite/ndb_team -usr/share/mysql/mysql-test/suite/ndb_team/t -usr/share/mysql/mysql-test/suite/ndb_team/r usr/share/mysql/mysql-test/suite/federated usr/share/mysql/mysql-test/suite/funcs_1 usr/share/mysql/mysql-test/suite/funcs_1/cursors @@ -87,9 +78,5 @@ usr/share/mysql/mysql-test/suite/oqgraph/t usr/share/mysql/mysql-test/suite/oqgraph/r usr/share/mysql/mysql-test/suite/oqgraph/include usr/share/mysql/mysql-test/std_data -usr/share/mysql/mysql-test/std_data/ndb_backup50 usr/share/mysql/mysql-test/std_data/parts -usr/share/mysql/mysql-test/std_data/ndb_backup51_data_le -usr/share/mysql/mysql-test/std_data/ndb_backup51_data_be -usr/share/mysql/mysql-test/std_data/ndb_backup51 usr/share/mysql/mysql-test/std_data/funcs_1 diff --git a/debian/patches/00list b/debian/patches/00list index 77c159a17ed..f4485b1b58e 100644 --- a/debian/patches/00list +++ b/debian/patches/00list @@ -1,6 +1,5 @@ # 01_MAKEFILES__Docs_Images_Makefile.in.dpatch # 01_MAKEFILES__Docs_Makefile.in.dpatch -# 02_no_builtin_ndbcluster_plugin.dpatch # 21_init__openquery_configtest.dpatch 33_scripts__mysql_create_system_tables__no_test.dpatch 38_scripts__mysqld_safe.sh__signals.dpatch diff --git a/debian/patches/02_no_builtin_ndbcluster_plugin.dpatch b/debian/patches/02_no_builtin_ndbcluster_plugin.dpatch deleted file mode 100644 index dc881ccca10..00000000000 --- a/debian/patches/02_no_builtin_ndbcluster_plugin.dpatch +++ /dev/null @@ -1,18 +0,0 @@ -#! /bin/sh /usr/share/dpatch/dpatch-run -## 02_no_builtin_ndbcluster_plugin.dpatch by -## -## All lines beginning with `## DP:' are a description of the patch. -## DP: As we completely disabled ndbcluster - -@DPATCH@ - ---- old/sql/sql_builtin.cc -+++ new/sql/sql_builtin.cc -@@ -22,6 +22,6 @@ - - struct st_mysql_plugin *mysqld_builtins[]= - { -- builtin_binlog_plugin, builtin_partition_plugin, builtin_csv_plugin, builtin_heap_plugin, builtin_myisam_plugin, builtin_myisammrg_plugin, builtin_ndbcluster_plugin,(struct st_mysql_plugin *)0 -+ builtin_binlog_plugin, builtin_partition_plugin, builtin_csv_plugin, builtin_heap_plugin, builtin_myisam_plugin, builtin_myisammrg_plugin, (struct st_mysql_plugin *)0 - }; - diff --git a/debian/po/ar.po b/debian/po/ar.po index 737522212b8..c9009069ee5 100644 --- a/debian/po/ar.po +++ b/debian/po/ar.po @@ -212,21 +212,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/ca.po b/debian/po/ca.po index bbff00407cc..7c7d2210834 100644 --- a/debian/po/ca.po +++ b/debian/po/ca.po @@ -177,21 +177,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #, fuzzy #~ msgid "" #~ "To use mysql you must install an equivalent user and group to the " diff --git a/debian/po/cs.po b/debian/po/cs.po index 7f80d45e33b..f3ae24b1e8c 100644 --- a/debian/po/cs.po +++ b/debian/po/cs.po @@ -214,21 +214,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/da.po b/debian/po/da.po index 8c592992182..d68b8575d72 100644 --- a/debian/po/da.po +++ b/debian/po/da.po @@ -216,21 +216,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/de.po b/debian/po/de.po index e1fbad09d67..6147b55903e 100644 --- a/debian/po/de.po +++ b/debian/po/de.po @@ -214,21 +214,3 @@ msgstr "" "Die beiden von Ihnen eingegebenen Passwörter sind nicht identisch. Bitte " "erneut versuchen." -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "NDB-Cluster scheint gerade benutzt zu werden" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"MySQL-5.1 bietet keine NDB-Clusterunterstützung mehr. Bitte migrieren Sie " -"Ihr System zum neuen »mysql-cluster«-Paket und entfernen Sie alle Zeilen, " -"die mit »ndb« beginnen aus allen Konfigurationsdateien im Verzeichnis /etc/" -"mysql/." diff --git a/debian/po/es.po b/debian/po/es.po index 957b40118bb..e76c173a9f5 100644 --- a/debian/po/es.po +++ b/debian/po/es.po @@ -244,21 +244,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/eu.po b/debian/po/eu.po index 0a4980488cb..8f1ae0c8ecd 100644 --- a/debian/po/eu.po +++ b/debian/po/eu.po @@ -206,29 +206,6 @@ msgstr "Pasahitz sarrera errorea" msgid "The two passwords you entered were not the same. Please try again." msgstr "Idatzi dituzun bi pasahitzak ez dira berdina. Mesedez saiatu berriz." -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "Dirudienez NDB Cluster-a erabilia dago" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -#, fuzzy -#| msgid "" -#| "MySQL-5.1 has orphaned NDB Cluster support. Please migrate to the new " -#| "mysql-cluster package and remove all lines starting with \"ndb\" from all " -#| "config files below /etc/mysql/." -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"MySQL-5.1-ek NDB cluster euskarri umezurtz bat behar du. Mesedez migratu " -"mysql-cluster pakete berrira eta /etc/mysql/ azpiko konfigurazio fitxategi " -"guztietan \"ndb\"-ez hasten diren lerro guztiak ezabatu." - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/fr.po b/debian/po/fr.po index 5055a354f9f..98d6740b7e2 100644 --- a/debian/po/fr.po +++ b/debian/po/fr.po @@ -202,24 +202,6 @@ msgstr "" "Le mot de passe et sa confirmation ne sont pas identiques. Veuillez " "recommencer." -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "Abandon de la gestion de NDB" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"La version 5.1 de MySQL ne gère plus les grappes NDB. Vous devriez utiliser " -"le paquet mysql-cluster et supprimer toutes les lignes commençant par " -"« ndb » des fichiers de configuration situés dans /etc/mysql." - #~ msgid "" #~ "To use MySQL, the following entries for users and groups should be added " #~ "to the system:" diff --git a/debian/po/gl.po b/debian/po/gl.po index d991d98053a..122e4091c16 100644 --- a/debian/po/gl.po +++ b/debian/po/gl.po @@ -208,21 +208,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/it.po b/debian/po/it.po index 40b24d575bf..459099cbc6a 100644 --- a/debian/po/it.po +++ b/debian/po/it.po @@ -199,20 +199,3 @@ msgstr "Errore di inserimento della password" msgid "The two passwords you entered were not the same. Please try again." msgstr "Le due password inserite sono diverse. Riprovare." -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "È in uso un cluster NDB" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"MySQL-5.1 non fornisce più il supporto per i cluster NDB. Si dovrebbe " -"migrare al nuovo pacchetto mysql-cluster e rimuovere tutte le righe che " -"iniziano per \"ndb\" da tutti i file di configurazione sotto /etc/mysql/." diff --git a/debian/po/ja.po b/debian/po/ja.po index 6173c9b1ef8..c1087266eaf 100644 --- a/debian/po/ja.po +++ b/debian/po/ja.po @@ -205,20 +205,3 @@ msgstr "パスワード入力エラー" msgid "The two passwords you entered were not the same. Please try again." msgstr "入力ã•れãŸäºŒã¤ã®ãƒ‘スワードãŒä¸€è‡´ã—ã¾ã›ã‚“。å†å…¥åŠ›ã—ã¦ãã ã•ã„。" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "NDB クラスタãŒåˆ©ç”¨ã•れã¦ã„るよã†ã§ã™" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"MySQL-5.1 ã§ã¯ NDB クラスタã®ã‚µãƒãƒ¼ãƒˆã‚’æä¾›ã—ãªããªã£ã¦ã„ã¾ã™ã€‚æ–°ãŸãª mysql-" -"cluster パッケージã«ç§»è¡Œã—ã¦ã€/etc/mysql 以下ã®è¨­å®šãƒ•ァイルã™ã¹ã¦ã‹ã‚‰ã€Œndbã€" -"ã§å§‹ã¾ã‚‹è¡Œã‚’削除ã—ã¦ãã ã•ã„。" diff --git a/debian/po/nb.po b/debian/po/nb.po index c3c2be245e7..992684fa3e1 100644 --- a/debian/po/nb.po +++ b/debian/po/nb.po @@ -222,21 +222,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "Support MySQL connections from hosts running Debian \"sarge\" or older?" #~ msgstr "" diff --git a/debian/po/nl.po b/debian/po/nl.po index a8a920ffae8..82864ed8456 100644 --- a/debian/po/nl.po +++ b/debian/po/nl.po @@ -223,21 +223,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "Support MySQL connections from hosts running Debian \"sarge\" or older?" #~ msgstr "" diff --git a/debian/po/pt.po b/debian/po/pt.po index 1cb1b455160..0c35c038b70 100644 --- a/debian/po/pt.po +++ b/debian/po/pt.po @@ -213,21 +213,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po index d8748ad9288..a0b4cdbfbfb 100644 --- a/debian/po/pt_BR.po +++ b/debian/po/pt_BR.po @@ -212,21 +212,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use MariaDB, the following entries for users and groups should be " #~ "added to the system:" diff --git a/debian/po/ro.po b/debian/po/ro.po index 0c37e3c0188..ceefecc8a1b 100644 --- a/debian/po/ro.po +++ b/debian/po/ro.po @@ -222,21 +222,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "Cannot upgrade if ISAM tables are present!" #~ msgstr "Nu se poate face actualizarea dacă sunt prezente tabele ISAM!" diff --git a/debian/po/ru.po b/debian/po/ru.po index 55d06fc86c9..5136933159b 100644 --- a/debian/po/ru.po +++ b/debian/po/ru.po @@ -206,20 +206,3 @@ msgstr "Ошибка ввода паролÑ" msgid "The two passwords you entered were not the same. Please try again." msgstr "Два введённых Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð½Ðµ одинаковы. Повторите ввод." -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "NDB Cluster уже иÑпользуетÑÑ" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"MySQL-5.1 больше не поддерживает NDB Cluster. Переходите на новый пакет " -"mysql-cluster и удалите вÑе Ñтроки, начинающиеÑÑ Ñ \"ndb\", из вÑех файлов " -"наÑтройки в каталоге /etc/mysql/." diff --git a/debian/po/sv.po b/debian/po/sv.po index 67d227618f4..0cd512a252b 100644 --- a/debian/po/sv.po +++ b/debian/po/sv.po @@ -200,24 +200,6 @@ msgstr "Fel vid inmatning av lösenord" msgid "The two passwords you entered were not the same. Please try again." msgstr "De tvÃ¥ lösenorden du angav stämde inte överrens. Prova igen." -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "NDB-kluster används inte" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" -"Stödet för NDB-kluster har tagits bort i MySQL-5.1. Migrera till det nya " -"paketet mysql-cluster och ta bort alla rader som inleds med \"ndb\" frÃ¥n " -"alla inställlningsfiler i /etc/mysql/." - #~ msgid "" #~ "To use MySQL, the following entries for users and groups should be added " #~ "to the system:" diff --git a/debian/po/templates.pot b/debian/po/templates.pot index 66af762ac0d..c164f1f4811 100644 --- a/debian/po/templates.pot +++ b/debian/po/templates.pot @@ -170,18 +170,3 @@ msgstr "" #: ../mariadb-server-10.1.templates:9001 msgid "The two passwords you entered were not the same. Please try again." msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" diff --git a/debian/po/tr.po b/debian/po/tr.po index 9e80ff89f00..814341a6bc2 100644 --- a/debian/po/tr.po +++ b/debian/po/tr.po @@ -178,21 +178,6 @@ msgstr "" msgid "The two passwords you entered were not the same. Please try again." msgstr "" -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "NDB Cluster seems to be in use" -msgstr "" - -#. Type: error -#. Description -#: ../mariadb-server-10.1.templates:10001 -msgid "" -"MySQL-5.1 no longer provides NDB Cluster support. Please migrate to the new " -"mysql-cluster package and remove all lines starting with \"ndb\" from all " -"config files below /etc/mysql/." -msgstr "" - #~ msgid "" #~ "To use mysql you must install an equivalent user and group to the " #~ "following and ensure yourself that /var/lib/mysql has the right " diff --git a/extra/CMakeLists.txt b/extra/CMakeLists.txt index 50db83f2777..b9e582004dc 100644 --- a/extra/CMakeLists.txt +++ b/extra/CMakeLists.txt @@ -13,15 +13,7 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -INCLUDE_DIRECTORIES( -${CMAKE_SOURCE_DIR}/include -${ZLIB_INCLUDE_DIR} -# Following is for perror, in case NDB is compiled in. -${CMAKE_SOURCE_DIR}/storage/ndb/include -${CMAKE_SOURCE_DIR}/storage/ndb/include/util -${CMAKE_SOURCE_DIR}/storage/ndb/include/ndbapi -${CMAKE_SOURCE_DIR}/storage/ndb/include/portlib -${CMAKE_SOURCE_DIR}/storage/ndb/include/mgmapi) +INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${ZLIB_INCLUDE_DIR}) # Default install component for the files is Server here SET(MYSQL_INSTALL_COMPONENT Server) diff --git a/extra/perror.c b/extra/perror.c index 8aa6aa35b08..11a703c8666 100644 --- a/extra/perror.c +++ b/extra/perror.c @@ -23,11 +23,6 @@ #include #include #include -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -#include "../storage/ndb/src/ndbapi/ndberror.c" -#include "../storage/ndb/src/kernel/error/ndbd_exit_codes.c" -#include "../storage/ndb/include/mgmapi/mgmapi_error.h" -#endif #include /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ static my_bool verbose, print_all_codes; @@ -35,35 +30,12 @@ static my_bool verbose, print_all_codes; #include #include -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -static my_bool ndb_code; -static char ndb_string[1024]; -int mgmapi_error_string(int err_no, char *str, int size) -{ - int i; - for (i= 0; i < ndb_mgm_noOfErrorMsgs; i++) - { - if ((int)ndb_mgm_error_msgs[i].code == err_no) - { - my_snprintf(str, size-1, "%s", ndb_mgm_error_msgs[i].msg); - str[size-1]= '\0'; - return 0; - } - } - return -1; -} -#endif - static struct my_option my_long_options[] = { {"help", '?', "Displays this help and exits.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"info", 'I', "Synonym for --help.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE - {"ndb", 257, "Ndbcluster storage engine specific error codes.", &ndb_code, - &ndb_code, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, -#endif #ifdef HAVE_SYS_ERRLIST {"all", 'a', "Print all the error messages and the number. Deprecated," " will be removed in a future release.", @@ -334,35 +306,7 @@ int main(int argc,char *argv[]) found=0; code=atoi(*argv); -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE - if (ndb_code) - { - if ((ndb_error_string(code, ndb_string, sizeof(ndb_string)) < 0) && - (ndbd_exit_string(code, ndb_string, sizeof(ndb_string)) < 0) && - (mgmapi_error_string(code, ndb_string, sizeof(ndb_string)) < 0)) - { - msg= 0; - } - else - msg= ndb_string; - if (msg) - { - if (verbose) - printf("NDB error code %3d: %s\n",code,msg); - else - puts(msg); - } - else - { - fprintf(stderr,"Illegal ndb error code: %d\n",code); - error= 1; - } - found= 1; - msg= 0; - } - else -#endif - msg = strerror(code); + msg = strerror(code); /* We don't print the OS error message if it is the same as the diff --git a/include/my_base.h b/include/my_base.h index cdf8e189031..a443b4d161c 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -159,8 +159,6 @@ enum ha_extra_function { Ignore if the a tuple is not found, continue processing the transaction and ignore that 'row'. Needed for idempotency handling on the slave - - Currently only used by NDB storage engine. Partition handler ignores flag. */ HA_EXTRA_IGNORE_NO_KEY, HA_EXTRA_NO_IGNORE_NO_KEY, diff --git a/include/my_global.h b/include/my_global.h index bec5fb027eb..4a1cc8c5b75 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -1231,9 +1231,6 @@ static inline double rint(double x) #undef HAVE_SMEM /* No shared memory */ #else -#ifdef WITH_NDB_BINLOG -#define HAVE_NDB_BINLOG 1 -#endif #define HAVE_REPLICATION #define HAVE_EXTERNAL_CLIENT #endif /* EMBEDDED_LIBRARY */ diff --git a/include/mysqld_default_groups.h b/include/mysqld_default_groups.h index 30dfdae1338..3bc82359787 100644 --- a/include/mysqld_default_groups.h +++ b/include/mysqld_default_groups.h @@ -1,7 +1,4 @@ const char *load_default_groups[]= { -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -"mysql_cluster", -#endif "mysqld", "server", MYSQL_BASE_VERSION, "mariadb", MARIADB_BASE_VERSION, "client-server", diff --git a/man/mysql-test-run.pl.1 b/man/mysql-test-run.pl.1 index 20abc250b15..87d640dead7 100644 --- a/man/mysql-test-run.pl.1 +++ b/man/mysql-test-run.pl.1 @@ -1141,25 +1141,6 @@ Section\ \&4.9, \(lqPassing Options from mysql-test-run.pl to mysqld or mysqltes .sp -1 .IP \(bu 2.3 .\} -.\" mysql-test-run.pl: ndb-connectstring option -.\" ndb-connectstring option: mysql-test-run.pl -\fB\-\-ndb\-connectstring=\fR\fB\fIstr\fR\fR -.sp -Pass -\fB\-\-ndb\-connectstring=\fR\fB\fIstr\fR\fR -to the master MySQL server\&. This option also prevents -\fBmysql\-test\-run\&.pl\fR -from starting a cluster\&. It is assumed that there is already a cluster running to which the server can connect with the given connectstring\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} .\" mysql-test-run.pl: nocheck-testcases option .\" nocheck-testcases option: mysql-test-run.pl \fB\-\-nocheck\-testcases\fR @@ -1445,42 +1426,6 @@ Do not apply combinations; ignore combinations file or option\&. .sp -1 .IP \(bu 2.3 .\} -.\" mysql-test-run.pl: skip-ndbcluster option -.\" skip-ndbcluster option: mysql-test-run.pl -\fB\-\-skip\-ndbcluster\fR, -.\" mysql-test-run.pl: skip-ndb option -.\" skip-ndb option: mysql-test-run.pl -\fB\-\-skip\-ndb\fR -.sp -Do not start NDB Cluster; skip Cluster test cases\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" mysql-test-run.pl: skip-ndbcluster-slave option -.\" skip-ndbcluster-slave option: mysql-test-run.pl -\fB\-\-skip\-ndbcluster\-slave\fR, -.\" mysql-test-run.pl: skip-ndb-slave option -.\" skip-ndb-slave option: mysql-test-run.pl -\fB\-\-skip\-ndb\-slave\fR -.sp -Do not start an NDB Cluster slave\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} .\" mysql-test-run.pl: skip-rpl option .\" skip-rpl option: mysql-test-run.pl \fB\-\-skip\-rpl\fR @@ -2034,20 +1979,6 @@ Search the server log for errors or warning after each test and report any suspi .RE .sp .RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" mysql-test-run.pl: with-ndbcluster-only option -.\" with-ndbcluster-only option: mysql-test-run.pl -\fB\-\-with\-ndbcluster\-only\fR -.sp -Run only test cases that have -ndb -in their name\&. .RE .SH "COPYRIGHT" .br diff --git a/man/mysqldump.1 b/man/mysqldump.1 index 59d2416b25e..e91e8859ec4 100644 --- a/man/mysqldump.1 +++ b/man/mysqldump.1 @@ -1718,14 +1718,6 @@ option are mutually exclusive because LOCK TABLES causes any pending transactions to be committed implicitly\&. .sp -This option is not supported for MySQL Cluster tables; the results cannot be guaranteed to be consistent due to the fact that the -NDBCLUSTER -storage engine supports only the -READ_COMMITTED -transaction isolation level\&. You should always use -NDB -backup and restore instead\&. -.sp To dump large tables, you should combine the \fB\-\-single\-transaction\fR option with diff --git a/man/ndbd.8 b/man/ndbd.8 deleted file mode 100644 index c3c28d7491b..00000000000 --- a/man/ndbd.8 +++ /dev/null @@ -1,819 +0,0 @@ -'\" t -.\" Title: \fBndbd\fR -.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] -.\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: 04/06/2010 -.\" Manual: MySQL Database System -.\" Source: MySQL 5.1 -.\" Language: English -.\" -.TH "\FBNDBD\FR" "8" "04/06/2010" "MySQL 5\&.1" "MySQL Database System" -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.\" ndbd -.\" MySQL Cluster: ndbd -.\" MySQL Cluster: data nodes -.\" data nodes (MySQL Cluster) -.\" storage nodes - see data nodes, ndbd -.SH "NAME" -ndbd \- the MySQL Cluster data node daemon -.SH "SYNOPSIS" -.HP \w'\fBndbd\ \fR\fB\fIoptions\fR\fR\ 'u -\fBndbd \fR\fB\fIoptions\fR\fR -.SH "DESCRIPTION" -.PP -\fBndbd\fR -is the process that is used to handle all the data in tables using the NDB Cluster storage engine\&. This is the process that empowers a data node to accomplish distributed transaction handling, node recovery, checkpointing to disk, online backup, and related tasks\&. -.PP -In a MySQL Cluster, a set of -\fBndbd\fR -processes cooperate in handling data\&. These processes can execute on the same computer (host) or on different computers\&. The correspondences between data nodes and Cluster hosts is completely configurable\&. -.\" MySQL Cluster: administration -.\" MySQL Cluster: commands -.\" command options (MySQL Cluster): ndbd -.\" MySQL Cluster: ndbd process -.PP -The following table includes command options specific to the MySQL Cluster data node program -\fBndbd\fR\&. Additional descriptions follow the table\&. For options common to all MySQL Cluster programs, see -Section\ \&17.4.2, \(lqOptions Common to MySQL Cluster Programs\(rq\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBNote\fR -.ps -1 -.br -.PP -All of these options also apply to the multi\-threaded version of this program \(em -\fBndbmtd\fR, which is available in MySQL Cluster NDB 7\&.0 \(em and you may substitute -\(lq\fBndbmtd\fR\(rq -for -\(lq\fBndbd\fR\(rq -wherever the latter occurs in this section\&. -.sp .5v -.RE -.PP -For options common to all -NDBCLUSTER -programs, see -Section\ \&17.4.2, \(lqOptions Common to MySQL Cluster Programs\(rq\&. -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -\fB\-\-bind\-address\fR -.TS -allbox tab(:); -l l s -l l s -l l s -^ l l -^ l l. -T{ -\fBVersion Introduced\fR -T}:T{ -5\&.1\&.12 -T} -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-bind\-address=name -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -string -T} -:T{ -\fBDefault\fR -T}:T{ -T} -.TE -.sp 1 -Causes -\fBndbd\fR -to bind to a specific network interface (host name or IP address)\&. This option has no default value\&. -.sp -This option was added in MySQL 5\&.1\&.12\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -\fB\-\-daemon\fR, -\fB\-d\fR -.TS -allbox tab(:); -l l s -l l s -^ l l -^ l l. -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-daemon -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -TRUE -T} -.TE -.sp 1 -Instructs -\fBndbd\fR -to execute as a daemon process\&. This is the default behavior\&. -\fB\-\-nodaemon\fR -can be used to prevent the process from running as a daemon\&. -.sp -This option has no effect when running -\fBndbd\fR -or -\fBndbmtd\fR -on Windows platforms\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" --initial option (ndbd) -.\" --initial option (ndbmtd) -\fB\-\-initial\fR -.TS -allbox tab(:); -l l s -l l s -^ l l -^ l l. -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-initial -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -FALSE -T} -.TE -.sp 1 -Instructs -\fBndbd\fR -to perform an initial start\&. An initial start erases any files created for recovery purposes by earlier instances of -\fBndbd\fR\&. It also re\-creates recovery log files\&. Note that on some operating systems this process can take a substantial amount of time\&. -.sp -An -\fB\-\-initial\fR -start is to be used -\fIonly\fR -when starting the -\fBndbd\fR -process under very special circumstances; this is because this option causes all files to be removed from the Cluster file system and all redo log files to be re\-created\&. These circumstances are listed here: -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -When performing a software upgrade which has changed the contents of any files\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -When restarting the node with a new version of -\fBndbd\fR\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -As a measure of last resort when for some reason the node restart or system restart repeatedly fails\&. In this case, be aware that this node can no longer be used to restore data due to the destruction of the data files\&. -.RE -.RS 4 -Use of this option prevents the -StartPartialTimeout -and -StartPartitionedTimeout -configuration parameters from having any effect\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBImportant\fR -.ps -1 -.br -This option does -\fInot\fR -affect either of the following: -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -Backup files that have already been created by the affected node -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -MySQL Cluster Disk Data files (see -Section\ \&17.5.10, \(lqMySQL Cluster Disk Data Tables\(rq)\&. -.RE -.RS 4 -.sp -This option also has no effect on recovery of data by a data node that is just starting (or restarting) from data nodes that are already running\&. This recovery of data occurs automatically, and requires no user intervention in a MySQL Cluster that is running normally\&. -.sp .5v -.RE -It is permissible to use this option when starting the cluster for the very first time (that is, before any data node files have been created); however, it is -\fInot\fR -necessary to do so\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" --initial-start option (ndbd) -.\" --initial-start option (ndbmtd) -\fB\-\-initial\-start\fR -.TS -allbox tab(:); -l l s -l l s -l l s -^ l l -^ l l. -T{ -\fBVersion Introduced\fR -T}:T{ -5\&.1\&.11 -T} -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-initial\-start -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -FALSE -T} -.TE -.sp 1 -This option is used when performing a partial initial start of the cluster\&. Each node should be started with this option, as well as -\fB\-\-nowait\-nodes\fR\&. -.sp -Suppose that you have a 4\-node cluster whose data nodes have the IDs 2, 3, 4, and 5, and you wish to perform a partial initial start using only nodes 2, 4, and 5 \(em that is, omitting node 3: -.sp -.if n \{\ -.RS 4 -.\} -.nf -shell> \fBndbd \-\-ndb\-nodeid=2 \-\-nowait\-nodes=3 \-\-initial\-start\fR -shell> \fBndbd \-\-ndb\-nodeid=4 \-\-nowait\-nodes=3 \-\-initial\-start\fR -shell> \fBndbd \-\-ndb\-nodeid=5 \-\-nowait\-nodes=3 \-\-initial\-start\fR -.fi -.if n \{\ -.RE -.\} -.sp -Prior to MySQL 5\&.1\&.19, it was not possible to perform DDL operations involving Disk Data tables on a partially started cluster\&. (See -\m[blue]\fBBug#24631\fR\m[]\&\s-2\u[1]\d\s+2\&.) -.sp -When using this option, you must also specify the node ID for the data node being started with the -\fB\-\-ndb\-nodeid\fR -option\&. -.sp -This option was added in MySQL 5\&.1\&.11\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBImportant\fR -.ps -1 -.br -Do not confuse this option with the -\fB\-\-nowait\-nodes\fR -option added for -\fBndb_mgmd\fR -in MySQL Cluster NDB 7\&.0\&.10, which can be used to allow a cluster configured with multiple management servers to be started without all management servers being online\&. -.sp .5v -.RE -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" --nowait-nodes option (ndbd) -.\" --nowait-nodes option (ndbmtd) -\fB\-\-nowait\-nodes=\fR\fB\fInode_id_1\fR\fR\fB[, \fR\fB\fInode_id_2\fR\fR\fB[, \&.\&.\&.]]\fR -.TS -allbox tab(:); -l l s -l l s -l l s -^ l l -^ l l. -T{ -\fBVersion Introduced\fR -T}:T{ -5\&.1\&.9 -T} -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-nowait\-nodes=list -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -string -T} -:T{ -\fBDefault\fR -T}:T{ -T} -.TE -.sp 1 -This option takes a list of data nodes which for which the cluster will not wait for before starting\&. -.sp -This can be used to start the cluster in a partitioned state\&. For example, to start the cluster with only half of the data nodes (nodes 2, 3, 4, and 5) running in a 4\-node cluster, you can start each -\fBndbd\fR -process with -\fB\-\-nowait\-nodes=3,5\fR\&. In this case, the cluster starts as soon as nodes 2 and 4 connect, and does -\fInot\fR -wait -StartPartitionedTimeout -milliseconds for nodes 3 and 5 to connect as it would otherwise\&. -.sp -If you wanted to start up the same cluster as in the previous example without one -\fBndbd\fR -\(em say, for example, that the host machine for node 3 has suffered a hardware failure \(em then start nodes 2, 4, and 5 with -\fB\-\-nowait\-nodes=3\fR\&. Then the cluster will start as soon as nodes 2, 4, and 5 connect and will not wait for node 3 to start\&. -.sp -This option was added in MySQL 5\&.1\&.9\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" --nodaemon option (ndbd) -.\" --nodaemon option (ndbmtd) -\fB\-\-nodaemon\fR -.TS -allbox tab(:); -l l s -l l s -^ l l -^ l l -l l s -^ l l -^ l l. -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-nodaemon -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -FALSE -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR (windows) -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -TRUE -T} -.TE -.sp 1 -Instructs -\fBndbd\fR -not to start as a daemon process\&. This is useful when -\fBndbd\fR -is being debugged and you want output to be redirected to the screen\&. -.sp -As of MySQL Cluster NDB 7\&.0\&.8, the default behavior for -\fBndbd\fR -and -\fBndbmtd\fR -on Windows is to run in the foreground, making this option unnecessary on Windows platforms\&. (\m[blue]\fBBug#45588\fR\m[]\&\s-2\u[2]\d\s+2) -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" --nostart option (ndbd) -.\" -n option (ndbd) -.\" --nostart option (ndbmtd) -.\" -n option (ndbmtd) -\fB\-\-nostart\fR, -\fB\-n\fR -.TS -allbox tab(:); -l l s -l l s -^ l l -^ l l. -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-\-nostart -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -FALSE -T} -.TE -.sp 1 -Instructs -\fBndbd\fR -not to start automatically\&. When this option is used, -\fBndbd\fR -connects to the management server, obtains configuration data from it, and initializes communication objects\&. However, it does not actually start the execution engine until specifically requested to do so by the management server\&. This can be accomplished by issuing the proper -START -command in the management client (see -Section\ \&17.5.2, \(lqCommands in the MySQL Cluster Management Client\(rq)\&. -.RE -.\" MySQL Cluster: log files -.\" log files (MySQL Cluster) -.PP -\fBndbd\fR -generates a set of log files which are placed in the directory specified by -DataDir -in the -config\&.ini -configuration file\&. -.PP -These log files are listed below\&. -\fInode_id\fR -is the node\'s unique identifier\&. Note that -\fInode_id\fR -represents the node\'s unique identifier\&. For example, -ndb_2_error\&.log -is the error log generated by the data node whose node ID is -2\&. -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" MySQL Cluster: error logs -.\" error logs (MySQL Cluster) -ndb_\fInode_id\fR_error\&.log -is a file containing records of all crashes which the referenced -\fBndbd\fR -process has encountered\&. Each record in this file contains a brief error string and a reference to a trace file for this crash\&. A typical entry in this file might appear as shown here: -.sp -.if n \{\ -.RS 4 -.\} -.nf -Date/Time: Saturday 30 July 2004 \- 00:20:01 -Type of error: error -Message: Internal program error (failed ndbrequire) -Fault ID: 2341 -Problem data: DbtupFixAlloc\&.cpp -Object of reference: DBTUP (Line: 173) -ProgramName: NDB Kernel -ProcessID: 14909 -TraceFile: ndb_2_trace\&.log\&.2 -***EOM*** -.fi -.if n \{\ -.RE -.\} -.sp -Listings of possible -\fBndbd\fR -exit codes and messages generated when a data node process shuts down prematurely can be found in -\m[blue]\fBndbd Error Messages\fR\m[]\&\s-2\u[3]\d\s+2\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBImportant\fR -.ps -1 -.br -\fIThe last entry in the error log file is not necessarily the newest one\fR -(nor is it likely to be)\&. Entries in the error log are -\fInot\fR -listed in chronological order; rather, they correspond to the order of the trace files as determined in the -ndb_\fInode_id\fR_trace\&.log\&.next -file (see below)\&. Error log entries are thus overwritten in a cyclical and not sequential fashion\&. -.sp .5v -.RE -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.\" MySQL Cluster: trace files -.\" trace files (MySQL Cluster) -ndb_\fInode_id\fR_trace\&.log\&.\fItrace_id\fR -is a trace file describing exactly what happened just before the error occurred\&. This information is useful for analysis by the MySQL Cluster development team\&. -.sp -It is possible to configure the number of these trace files that will be created before old files are overwritten\&. -\fItrace_id\fR -is a number which is incremented for each successive trace file\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -ndb_\fInode_id\fR_trace\&.log\&.next -is the file that keeps track of the next trace file number to be assigned\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -ndb_\fInode_id\fR_out\&.log -is a file containing any data output by the -\fBndbd\fR -process\&. This file is created only if -\fBndbd\fR -is started as a daemon, which is the default behavior\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -ndb_\fInode_id\fR\&.pid -is a file containing the process ID of the -\fBndbd\fR -process when started as a daemon\&. It also functions as a lock file to avoid the starting of nodes with the same identifier\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -ndb_\fInode_id\fR_signal\&.log -is a file used only in debug versions of -\fBndbd\fR, where it is possible to trace all incoming, outgoing, and internal messages with their data in the -\fBndbd\fR -process\&. -.RE -.PP -It is recommended not to use a directory mounted through NFS because in some environments this can cause problems whereby the lock on the -\&.pid -file remains in effect even after the process has terminated\&. -.PP -To start -\fBndbd\fR, it may also be necessary to specify the host name of the management server and the port on which it is listening\&. Optionally, one may also specify the node ID that the process is to use\&. -.sp -.if n \{\ -.RS 4 -.\} -.nf -shell> \fBndbd \-\-connect\-string="nodeid=2;host=ndb_mgmd\&.mysql\&.com:1186"\fR -.fi -.if n \{\ -.RE -.\} -.PP -See -Section\ \&17.3.2.3, \(lqThe MySQL Cluster Connectstring\(rq, for additional information about this issue\&. -\fBndbd\fR(8), describes other options for -\fBndbd\fR\&. -.PP -When -\fBndbd\fR -starts, it actually initiates two processes\&. The first of these is called the -\(lqangel process\(rq; its only job is to discover when the execution process has been completed, and then to restart the -\fBndbd\fR -process if it is configured to do so\&. Thus, if you attempt to kill -\fBndbd\fR -via the Unix -\fBkill\fR -command, it is necessary to kill both processes, beginning with the angel process\&. The preferred method of terminating an -\fBndbd\fR -process is to use the management client and stop the process from there\&. -.PP -The execution process uses one thread for reading, writing, and scanning data, as well as all other activities\&. This thread is implemented asynchronously so that it can easily handle thousands of concurrent actions\&. In addition, a watch\-dog thread supervises the execution thread to make sure that it does not hang in an endless loop\&. A pool of threads handles file I/O, with each thread able to handle one open file\&. Threads can also be used for transporter connections by the transporters in the -\fBndbd\fR -process\&. In a multi\-processor system performing a large number of operations (including updates), the -\fBndbd\fR -process can consume up to 2 CPUs if permitted to do so\&. -.PP -For a machine with many CPUs it is possible to use several -\fBndbd\fR -processes which belong to different node groups; however, such a configuration is still considered experimental and is not supported for MySQL 5\&.1 in a production setting\&. See -Section\ \&17.1.5, \(lqKnown Limitations of MySQL Cluster\(rq\&. -.SH "COPYRIGHT" -.br -.PP -Copyright 2007-2008 MySQL AB, 2008-2010 Sun Microsystems, Inc. -.PP -This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. -.PP -This documentation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -.PP -You should have received a copy of the GNU General Public License along with the program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see http://www.gnu.org/licenses/. -.sp -.SH "NOTES" -.IP " 1." 4 -Bug#24631 -.RS 4 -\%http://bugs.mysql.com/bug.php?id=24631 -.RE -.IP " 2." 4 -Bug#45588 -.RS 4 -\%http://bugs.mysql.com/bug.php?id=45588 -.RE -.IP " 3." 4 -ndbd Error Messages -.RS 4 -\%http://dev.mysql.com/doc/ndbapi/en/ndbd-error-messages.html -.RE -.SH "SEE ALSO" -For more information, please refer to the MySQL Reference Manual, -which may already be installed locally and which is also available -online at http://dev.mysql.com/doc/. -.SH AUTHOR -Sun Microsystems, Inc. (http://www.mysql.com/). diff --git a/man/ndbd_redo_log_reader.1 b/man/ndbd_redo_log_reader.1 deleted file mode 100644 index f6a2326cab4..00000000000 --- a/man/ndbd_redo_log_reader.1 +++ /dev/null @@ -1,176 +0,0 @@ -'\" t -.\" Title: \fBndbd_redo_log_reader\fR -.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] -.\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: 04/06/2010 -.\" Manual: MySQL Database System -.\" Source: MySQL 5.1 -.\" Language: English -.\" -.TH "\FBNDBD_REDO_LOG_REA" "1" "04/06/2010" "MySQL 5\&.1" "MySQL Database System" -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.\" ndbd_redo_log_reader -.SH "NAME" -ndbd_redo_log_reader \- check and print content of cluster redo log -.SH "SYNOPSIS" -.HP \w'\fBndbd_redo_log_reader\ \fR\fB\fIfile_name\fR\fR\fB\ [\fR\fB\fIoptions\fR\fR\fB]\fR\ 'u -\fBndbd_redo_log_reader \fR\fB\fIfile_name\fR\fR\fB [\fR\fB\fIoptions\fR\fR\fB]\fR -.SH "DESCRIPTION" -.PP -Reads a redo log file, checking it for errors, printing its contents in a human\-readable format, or both\&. -\fBndbd_redo_log_reader\fR -is intended for use primarily by MySQL developers and support personnel in debugging and diagnosing problems\&. -.PP -This utility was made available as part of default builds beginning with MySQL Cluster NDB 6\&.1\&.3\&. It remains under development, and its syntax and behavior are subject to change in future releases\&. For this reason, it should be considered experimental at this time\&. -.PP -The C++ source files for -\fBndbd_redo_log_reader\fR -can be found in the directory -/storage/ndb/src/kernel/blocks/dblqh/redoLogReader\&. -.PP -The following table includes options that are specific to the MySQL Cluster program -\fBndbd_redo_log_reader\fR\&. Additional descriptions follow the table\&. For options common to all MySQL Cluster programs, see -Section\ \&17.4.2, \(lqOptions Common to MySQL Cluster Programs\(rq\&. -.PP -\fBUsage\fR: -.sp -.if n \{\ -.RS 4 -.\} -.nf -ndbd_redo_log_reader \fIfile_name\fR [\fIoptions\fR] -.fi -.if n \{\ -.RE -.\} -.PP -\fIfile_name\fR -is the name of a cluster REDO log file\&. REDO log files are located in the numbered directories under the data node\'s data directory (DataDir); the path under this directory to the REDO log files matches the pattern -ndb_\fI#\fR_fs/D\fI#\fR/LCP/\fI#\fR/T\fI#\fRF\fI#\fR\&.Data\&. In each case, the -\fI#\fR -represents a number (not necessarily the same number)\&. For more information, see -\m[blue]\fBCluster Data Node FileSystemDir Files\fR\m[]\&\s-2\u[1]\d\s+2\&. -.PP -The name of the file to be read may be followed by one or more of the options listed here: -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.TS -allbox tab(:); -l l s -l l s -^ l l -^ l l. -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-noprint -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -FALSE -T} -.TE -.sp 1 -\fB\-noprint\fR: Do not print the contents of the log file\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -.TS -allbox tab(:); -l l s -l l s -^ l l -^ l l. -T{ -\fBCommand\-Line Format\fR -T}:T{ -\-nocheck -T} -T{ -\ \& -T}:T{ -\fBPermitted Values \fR -T} -:T{ -\fBType\fR -T}:T{ -boolean -T} -:T{ -\fBDefault\fR -T}:T{ -FALSE -T} -.TE -.sp 1 -\fB\-nocheck\fR: Do not check the log file for errors\&. -.RE -.sp -.RE -.PP -Like -\fBndb_print_backup_file\fR -and -\fBndb_print_schema_file\fR -(and unlike most of the -NDB -utilities that are intended to be run on a management server host or to connect to a management server) -\fBndbd_redo_log_reader\fR -must be run on a cluster data node, since it accesses the data node file system directly\&. Because it does not make use of the management server, this utility can be used when the management server is not running, and even when the cluster has been completely shut down\&. -.SH "COPYRIGHT" -.br -.PP -Copyright 2007-2008 MySQL AB, 2008-2010 Sun Microsystems, Inc. -.PP -This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. -.PP -This documentation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -.PP -You should have received a copy of the GNU General Public License along with the program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see http://www.gnu.org/licenses/. -.sp -.SH "NOTES" -.IP " 1." 4 -Cluster Data Node FileSystemDir Files -.RS 4 -\%http://dev.mysql.com/doc/ndbapi/en/ndb-internals-ndbd-filesystemdir-files.html -.RE -.SH "SEE ALSO" -For more information, please refer to the MySQL Reference Manual, -which may already be installed locally and which is also available -online at http://dev.mysql.com/doc/. -.SH AUTHOR -Sun Microsystems, Inc. (http://www.mysql.com/). diff --git a/man/ndbmtd.8 b/man/ndbmtd.8 deleted file mode 100644 index baadd4eaee2..00000000000 --- a/man/ndbmtd.8 +++ /dev/null @@ -1,388 +0,0 @@ -'\" t -.\" Title: \fBndbmtd\fR -.\" Author: [FIXME: author] [see http://docbook.sf.net/el/author] -.\" Generator: DocBook XSL Stylesheets v1.75.2 -.\" Date: 04/06/2010 -.\" Manual: MySQL Database System -.\" Source: MySQL 5.1 -.\" Language: English -.\" -.TH "\FBNDBMTD\FR" "8" "04/06/2010" "MySQL 5\&.1" "MySQL Database System" -.\" ----------------------------------------------------------------- -.\" * set default formatting -.\" ----------------------------------------------------------------- -.\" disable hyphenation -.nh -.\" disable justification (adjust text to left margin only) -.ad l -.\" ----------------------------------------------------------------- -.\" * MAIN CONTENT STARTS HERE * -.\" ----------------------------------------------------------------- -.\" ndbmtd -.\" MySQL Cluster: ndbmtd -.\" MySQL Cluster: data nodes -.\" data nodes (MySQL Cluster) -.\" storage nodes - see data nodes, ndbd, ndbmtd -.SH "NAME" -ndbmtd \- the MySQL Cluster data node daemon (multi\-threaded version) -.SH "SYNOPSIS" -.HP \w'\fBndbmtd\ \fR\fB\fIoptions\fR\fR\ 'u -\fBndbmtd \fR\fB\fIoptions\fR\fR -.SH "DESCRIPTION" -.PP -\fBndbmtd\fR -is a multi\-threaded version of -\fBndbd\fR, the process that is used to handle all the data in tables using the -NDBCLUSTER -storage engine\&. -\fBndbmtd\fR -is intended for use on host computers having multiple CPU cores\&. Except where otherwise noted, -\fBndbmtd\fR -functions in the same way as -\fBndbd\fR; therefore, in this section, we concentrate on the ways in which -\fBndbmtd\fR -differs from -\fBndbd\fR, and you should consult -\fBndbd\fR(8), for additional information about running MySQL Cluster data nodes that apply to both the single\-threaded and multi\-threaded versions of the data node process\&. -.PP -Command\-line options and configuration parameters used with -\fBndbd\fR -also apply to -\fBndbmtd\fR\&. For more information about these options and parameters, see -\fBndbd\fR(8), and -Section\ \&17.3.2.6, \(lqDefining MySQL Cluster Data Nodes\(rq, respectively\&. -.PP -\fBndbmtd\fR -is also file system\-compatible with -\fBndbd\fR\&. In other words, a data node running -\fBndbd\fR -can be stopped, the binary replaced with -\fBndbmtd\fR, and then restarted without any loss of data\&. (However, when doing this, you must make sure that -MaxNoOfExecutionThreads -is set to an apppriate value before restarting the node if you wish for -\fBndbmtd\fR -to run in multi\-threaded fashion\&.) Similarly, an -\fBndbmtd\fR -binary can be replaced with -\fBndbd\fR -simply by stopping the node and then starting -\fBndbd\fR -in place of the multi\-threaded binary\&. It is not necessary when switching between the two to start the data node binary using -\fB\-\-initial\fR\&. -.PP -Prior to MySQL Cluster NDB 7\&.0\&.6, there were known issues when using -\fBndbmtd\fR -with MySQL Cluster Disk Data tables\&. If you wish to use multi\-threaded data nodes with disk\-based -NDB -tables, you should ensure that you are running MySQL Cluster NDB 7\&.0\&.6 or later\&. (\m[blue]\fBBug#41915\fR\m[]\&\s-2\u[1]\d\s+2, -\m[blue]\fBBug#44915\fR\m[]\&\s-2\u[2]\d\s+2) -.PP -Using -\fBndbmtd\fR -differs from using -\fBndbd\fR -in two key respects: -.sp -.RS 4 -.ie n \{\ -\h'-04' 1.\h'+01'\c -.\} -.el \{\ -.sp -1 -.IP " 1." 4.2 -.\} -You must set an appropriate value for the -MaxNoOfExecutionThreads -configuration parameter in the -config\&.ini -file\&. If you do not do so, -\fBndbmtd\fR -runs in single\-threaded mode \(em that is, it behaves like -\fBndbd\fR\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04' 2.\h'+01'\c -.\} -.el \{\ -.sp -1 -.IP " 2." 4.2 -.\} -Trace files are generated by critical errors in -\fBndbmtd\fR -processes in a somewhat different fashion from how these are generated by -\fBndbd\fR -failures\&. -.RE -.PP -These differences are discussed in more detail in the next few paragraphs\&. -.\" execution threads (MySQL Cluster) -.\" MySQL Cluster: execution threads -.\" ndbmtd: MaxNoOfExecutionThreads -.\" MaxNoOfExecutionThreads: ndbmtd -.\" ndbmtd: trace files -.\" trace files: ndbmtd -.PP -\fBNumber of execution threads\fR. The -MaxNoOfExecutionThreads -configuration parameter is used to determine the number of local query handler (LQH) threads spawned by -\fBndbmtd\fR\&. Although this parameter is set in -[ndbd] -or -[ndbd default] -sections of the -config\&.ini -file, it is exclusive to -\fBndbmtd\fR -and does not apply to -\fBndbd\fR\&. -.PP -This parameter takes an integer value from 2 to 8 inclusive\&. Generally, you should set this parameter equal to the number of CPU cores on the data node host, as shown in the following table: -.TS -allbox tab(:); -lB lB. -T{ -Number of Cores -T}:T{ -Recommended MaxNoOfExecutionThreads Value -T} -.T& -l l -l l -l l. -T{ -2 -T}:T{ -2 -T} -T{ -4 -T}:T{ -4 -T} -T{ -8 or more -T}:T{ -8 -T} -.TE -.sp 1 -.PP -(It is possible to set this parameter to other values within the permitted range, but these are automatically rounded as shown in the -\fBValue Used\fR -column of the next table in this section\&.) -.PP -The multi\-threaded data node process always spawns at least 4 threads: -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -1 local query handler (LQH) thread -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -1 transaction coordinator (TC) thread -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -1 transporter thread -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} -1 subscription manager (SUMA) thread -.RE -.PP -Setting this parameter to a value between 4 and 8 inclusive causes additional LQH threads to be used by -\fBndbmtd\fR -(up to a maximum of 4 LQH threads), as shown in the following table: -.TS -allbox tab(:); -lB lB lB. -T{ -config\&.ini Value -T}:T{ -Value Used -T}:T{ -Number of LQH Threads Used -T} -.T& -l l l -l l l -l l l. -T{ -3 -T}:T{ -2 -T}:T{ -1 -T} -T{ -5 or 6 -T}:T{ -4 -T}:T{ -2 -T} -T{ -7 -T}:T{ -8 -T}:T{ -4 -T} -.TE -.sp 1 -.PP -Setting this parameter outside the permitted range of values causes the management server to abort on startup with the error -Error line \fInumber\fR: Illegal value \fIvalue\fR for parameter MaxNoOfExecutionThreads\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBNote\fR -.ps -1 -.br -.PP -In MySQL Cluster NDB 6\&.4\&.0, it is not possible to set -MaxNoOfExecutionThreads -to 2\&. You can safely use the value 3 instead (it is treated as 2 internally)\&. This issue is resolved in MySQL Cluster NDB 6\&.4\&.1\&. -.sp .5v -.RE -.PP -In MySQL Cluster NDB 6\&.4\&.0 through 6\&.4\&.3, the default value for this parameter was undefined, although the default behavior for -\fBndbmtd\fR -was to use 1 LQH thread, as though -MaxNoOfExecutionThreads -had been set to 2\&. Beginning with MySQL Cluster NDB 7\&.0\&.4, this parameter has an explcit default value of 2, thus guaranteeing this default behavior\&. -.PP -In MySQL Cluster NDB 7\&.0, it is not possible to cause -\fBndbmtd\fR -to use more than 1 TC thread, although we plan to introduce this capability in a future MySQL Cluster release series\&. -.\" MySQL Cluster: log files -.\" log files (MySQL Cluster): ndbmtd -.\" ndbmtd: trace files -.PP -Like -\fBndbd\fR, -\fBndbmtd\fR -generates a set of log files which are placed in the directory specified by -DataDir -in the -config\&.ini -configuration file\&. Except for trace files, these are generated in the same way and have the same names as those generated by -\fBndbd\fR\&. -.PP -In the event of a critical error, -\fBndbmtd\fR -generates trace files describing what happened just prior to the error\' occurrence\&. These files, which can be found in the data node\'s -DataDir, are useful for analysis of problems by the MySQL Cluster Development and Support teams\&. One trace file is generated for each -\fBndbmtd\fR -thread\&. The names of these files follow the pattern -ndb_\fInode_id\fR_trace\&.log\&.\fItrace_id\fR_t\fIthread_id\fR, where -\fInode_id\fR -is the data node\'s unique node ID in the cluster, -\fItrace_id\fR -is a trace sequence number, and -\fIthread_id\fR -is the thread ID\&. For example, in the event of the failure of an -\fBndbmtd\fR -process running as a MySQL Cluster data node having the node ID 3 and with -MaxNoOfExecutionThreads -equal to 4, four trace files are generated in the data node\'s data directory; if the is the first time this node has failed, then these files are named -ndb_3_trace\&.log\&.1_t1, -ndb_3_trace\&.log\&.1_t2, -ndb_3_trace\&.log\&.1_t3, and -ndb_3_trace\&.log\&.1_t4\&. Internally, these trace files follow the same format as -\fBndbd\fR -trace files\&. -.PP -The -\fBndbd\fR -exit codes and messages that are generated when a data node process shuts down prematurely are also used by -\fBndbmtd\fR\&. See -\m[blue]\fBndbd Error Messages\fR\m[]\&\s-2\u[3]\d\s+2, for a listing of these\&. -.if n \{\ -.sp -.\} -.RS 4 -.it 1 an-trap -.nr an-no-space-flag 1 -.nr an-break-flag 1 -.br -.ps +1 -\fBNote\fR -.ps -1 -.br -.PP -It is possible to use -\fBndbd\fR -and -\fBndbmtd\fR -concurrently on different data nodes in the same MySQL Cluster\&. However, such configurations have not been tested extensively; thus, we cannot not recommend doing so in a production setting at this time\&. -.sp .5v -.RE -.SH "COPYRIGHT" -.br -.PP -Copyright 2007-2008 MySQL AB, 2008-2010 Sun Microsystems, Inc. -.PP -This documentation is free software; you can redistribute it and/or modify it only under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. -.PP -This documentation is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. -.PP -You should have received a copy of the GNU General Public License along with the program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or see http://www.gnu.org/licenses/. -.sp -.SH "NOTES" -.IP " 1." 4 -Bug#41915 -.RS 4 -\%http://bugs.mysql.com/bug.php?id=41915 -.RE -.IP " 2." 4 -Bug#44915 -.RS 4 -\%http://bugs.mysql.com/bug.php?id=44915 -.RE -.IP " 3." 4 -ndbd Error Messages -.RS 4 -\%http://dev.mysql.com/doc/ndbapi/en/ndbd-error-messages.html -.RE -.SH "SEE ALSO" -For more information, please refer to the MySQL Reference Manual, -which may already be installed locally and which is also available -online at http://dev.mysql.com/doc/. -.SH AUTHOR -Sun Microsystems, Inc. (http://www.mysql.com/). diff --git a/man/perror.1 b/man/perror.1 index a37cec6a518..adf393e8057 100644 --- a/man/perror.1 +++ b/man/perror.1 @@ -77,22 +77,6 @@ OS error code 64: Machine is not on the network .RE .\} .PP -To obtain the error message for a MySQL Cluster error code, invoke -\fBperror\fR -with the -\fB\-\-ndb\fR -option: -.sp -.if n \{\ -.RS 4 -.\} -.nf -shell> \fBperror \-\-ndb \fR\fB\fIerrorcode\fR\fR -.fi -.if n \{\ -.RE -.\} -.PP Note that the meaning of system error messages may be dependent on your operating system\&. A given error code may mean different things on different operating systems\&. .PP \fBperror\fR @@ -124,21 +108,6 @@ Display a help message and exit\&. .sp -1 .IP \(bu 2.3 .\} -.\" perror: ndb option -.\" ndb option: perror -\fB\-\-ndb\fR -.sp -Print the error message for a MySQL Cluster error code\&. -.RE -.sp -.RS 4 -.ie n \{\ -\h'-04'\(bu\h'+03'\c -.\} -.el \{\ -.sp -1 -.IP \(bu 2.3 -.\} .\" perror: silent option .\" silent option: perror \fB\-\-silent\fR, diff --git a/mysql-test/CMakeLists.txt b/mysql-test/CMakeLists.txt index 2948fb88069..fe681758bd0 100644 --- a/mysql-test/CMakeLists.txt +++ b/mysql-test/CMakeLists.txt @@ -74,7 +74,7 @@ ENDIF() IF(WITH_EMBEDDED_SERVER) SET(TEST_EMBEDDED ${MTR_FORCE} --comment=embedded --timer --embedded-server - --skip-rpl --skip-ndbcluster ${EXP}) + --skip-rpl ${EXP}) ELSE() SET(TEST_EMBEDDED echo "Can not test embedded, not compiled in") ENDIF() @@ -92,8 +92,8 @@ ADD_CUSTOM_TARGET(test-force ADD_CUSTOM_TARGET(test-bt ${TEST_BT_START} - COMMAND ${MTR_FORCE} --comment=normal --timer --skip-ndbcluster --report-features ${EXP} - COMMAND ${MTR_FORCE} --comment=ps --timer --skip-ndbcluster --ps-protocol ${EXP} + COMMAND ${MTR_FORCE} --comment=normal --timer --report-features ${EXP} + COMMAND ${MTR_FORCE} --comment=ps --timer --ps-protocol ${EXP} COMMAND ${MTR_FORCE} --comment=funcs1+ps --ps-protocol --reorder --suite=funcs_1 ${EXP} COMMAND ${MTR_FORCE} --comment=funcs2 --suite=funcs_2 ${EXP} COMMAND ${MTR_FORCE} --comment=partitions --suite=parts ${EXP} @@ -105,13 +105,13 @@ ADD_CUSTOM_TARGET(test-bt ADD_CUSTOM_TARGET(test-bt-fast ${TEST_BT_START} - COMMAND ${MTR_FORCE} --comment=ps --timer --skip-ndbcluster --ps-protocol --report-features ${EXP} + COMMAND ${MTR_FORCE} --comment=ps --timer --ps-protocol --report-features ${EXP} COMMAND ${MTR_FORCE} --comment=stress --suite=stress ${EXP} ) ADD_CUSTOM_TARGET(test-bt-debug ${TEST_BT_START} - COMMAND ${MTR_FORCE} --comment=debug --timer --skip-ndbcluster --skip-rpl --report-features ${EXP} + COMMAND ${MTR_FORCE} --comment=debug --timer --skip-rpl --report-features ${EXP} ) # Process .in files with includes in collections/ diff --git a/mysql-test/collections/default.experimental b/mysql-test/collections/default.experimental index 18cd4748687..cc2e86d6eda 100644 --- a/mysql-test/collections/default.experimental +++ b/mysql-test/collections/default.experimental @@ -1,7 +1,7 @@ # For easier human reading (MTR doesn't care), please keep entries # in alphabetical order. This also helps with merge conflict resolution. -binlog.binlog_multi_engine # joro : NDB tests marked as experimental as agreed with bochklin +binlog.binlog_multi_engine # joro : tests marked as experimental as agreed with bochklin funcs_1.charset_collation_1 # depends on compile-time decisions diff --git a/mysql-test/collections/mysql-trunk.daily b/mysql-test/collections/mysql-trunk.daily index 47d189a2c65..9654d61be06 100644 --- a/mysql-test/collections/mysql-trunk.daily +++ b/mysql-test/collections/mysql-trunk.daily @@ -2,6 +2,5 @@ perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collection perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=ps_row --vardir=var-ps_row --ps-protocol --mysqld=--binlog-format=row perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=embedded --vardir=var-emebbed --embedded perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=funcs_1 --vardir=var-funcs_1 --suite=funcs_1 -perl mysql-test-run.pl --timer --force --parallel=auto --comment=rpl_ndb_row --vardir=var-rpl_ndb_row --mysqld=--binlog-format=row --suite=rpl_ndb,ndb -perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --mysqld=--binlog-format=row --suite=rpl,binlog --skip-ndb +perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_row --vardir=var-rpl_binlog_row --mysqld=--binlog-format=row --suite=rpl,binlog perl mysql-test-run.pl --timer --force --parallel=auto --experimental=collections/default.experimental --comment=rpl_binlog_checksum --mysqld=--binlog-checksum=CRC32 --vardir=var-rpl_binlog_checksum --suite=binlog,rpl diff --git a/mysql-test/extra/rpl_tests/rpl_auto_increment.test b/mysql-test/extra/rpl_tests/rpl_auto_increment.test index 8cd86de8040..67286c37258 100644 --- a/mysql-test/extra/rpl_tests/rpl_auto_increment.test +++ b/mysql-test/extra/rpl_tests/rpl_auto_increment.test @@ -1,7 +1,6 @@ # # Test of auto_increment with offset # --- source include/not_ndb_default.inc -- source include/master-slave.inc eval create table t1 (a int not null auto_increment,b int, primary key (a)) engine=$engine_type2 auto_increment=3; diff --git a/mysql-test/extra/rpl_tests/rpl_ddl.test b/mysql-test/extra/rpl_tests/rpl_ddl.test index 8c35ff974d8..34897d00733 100644 --- a/mysql-test/extra/rpl_tests/rpl_ddl.test +++ b/mysql-test/extra/rpl_tests/rpl_ddl.test @@ -146,13 +146,10 @@ eval CREATE TABLE mysqltest1.t1 (f1 BIGINT) ENGINE=$engine_type; # Prevent Bug#26687 rpl_ddl test fails if run with --innodb option # The testscript (suite/rpl/rpl_ddl.test) + the expected result need that the # slave uses MyISAM for the table mysqltest.t1. -# This is not valid in case of suite/rpl_ndb/rpl_ndb_ddl.test which sources -# also this script. sync_slave_with_master; connection slave; if (`SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES - WHERE TABLE_SCHEMA = 'mysqltest1' AND TABLE_NAME = 't1' - AND ENGINE <> 'MyISAM' AND '$engine_type' <> 'NDB'`) + WHERE TABLE_SCHEMA = 'mysqltest1' AND TABLE_NAME = 't1' AND ENGINE <> 'MyISAM'`) { skip This test needs on slave side: InnoDB disabled, default engine: MyISAM; } diff --git a/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test b/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test index 7dcb9e2725c..9b5a552c327 100644 --- a/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test +++ b/mysql-test/extra/rpl_tests/rpl_extra_col_slave.test @@ -395,9 +395,6 @@ sync_slave_with_master; ############################################################### # Error reaction is up to sql_mode of the slave sql (bug#38173) #--echo *** Create t9 on slave *** -# Please, check BUG#47741 to see why you are not testing NDB. -if (`SELECT UPPER(LEFT($engine_type, 3)) != 'NDB'`) -{ STOP SLAVE; RESET SLAVE; eval CREATE TABLE t9 (a INT KEY, b BLOB, c CHAR(5), @@ -446,8 +443,6 @@ if (`SELECT UPPER(LEFT($engine_type, 3)) != 'NDB'`) DROP TABLE t9; sync_slave_with_master; -} - ############################################ # More columns in slave at middle of table # # Expect: Proper error message # diff --git a/mysql-test/extra/rpl_tests/rpl_foreign_key.test b/mysql-test/extra/rpl_tests/rpl_foreign_key.test index db646a736f9..d10deece1b1 100644 --- a/mysql-test/extra/rpl_tests/rpl_foreign_key.test +++ b/mysql-test/extra/rpl_tests/rpl_foreign_key.test @@ -21,9 +21,7 @@ connection master; SET TIMESTAMP=1000000000; CREATE TABLE t3 ( a INT UNIQUE ); SET FOREIGN_KEY_CHECKS=0; -# Had to add 1022 for run with ndb as ndb uses different -# error and error code for error ER_DUP_ENTRY. Bug 16677 ---error 1022, ER_DUP_ENTRY +--error ER_DUP_ENTRY INSERT INTO t3 VALUES (1),(1); sync_slave_with_master; diff --git a/mysql-test/extra/rpl_tests/rpl_implicit_commit_binlog.test b/mysql-test/extra/rpl_tests/rpl_implicit_commit_binlog.test index 549d184185d..4be2ea376d2 100644 --- a/mysql-test/extra/rpl_tests/rpl_implicit_commit_binlog.test +++ b/mysql-test/extra/rpl_tests/rpl_implicit_commit_binlog.test @@ -64,22 +64,6 @@ while ($ddl_cases >= 1) { let $commit_event_row_number= 4; } - # - # In NDB (RBR and MIXED modes), the commit event is usually the seventh event - # in the binary log: - # - # 1: COMMAND - # 2: BEGIN - # 3: TABLE MAP EVENT - # 4: TABLE MAP EVENT (ndb_apply_status) - # 5: ROW EVENT - # 6: ROW EVENT - # 7: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 7; - } let $first_binlog_position= query_get_value("SHOW MASTER STATUS", Position, 1); --enable_query_log @@ -87,32 +71,10 @@ while ($ddl_cases >= 1) if ($ddl_cases == 41) { let $cmd= LOAD INDEX INTO CACHE nt_1 IGNORE LEAVES; - if ($engine == NDB) - { - # This seems to be related to epochs. - # We need to check this against an updated version or avoid it. - let $ok= no; - let $commit_event_row_number= 6; - } } if ($ddl_cases == 40) { let $cmd= LOAD INDEX INTO CACHE tt_1, tt_2 IGNORE LEAVES; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 39) { @@ -121,21 +83,6 @@ while ($ddl_cases >= 1) if ($ddl_cases == 38) { let $cmd= CHECK TABLE nt_1; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 37) { @@ -148,40 +95,10 @@ while ($ddl_cases >= 1) if ($ddl_cases == 35) { let $cmd= LOCK TABLES tt_1 WRITE; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 34) { let $cmd= UNLOCK TABLES; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 33) { @@ -194,42 +111,6 @@ while ($ddl_cases >= 1) if ($ddl_cases == 31) { let $cmd= SET PASSWORD FOR 'user'@'localhost' = PASSWORD('newpass'); - # - # In NDB (RBR mode), the commit event is the eleventh event - # in the binary log: - # - # 1: DDL EVENT which triggered the previous commmit. - # 2: BEGIN - # 3: TABLE MAP EVENT - # 4: ROW EVENT - # 5: COMMIT - # 6: BEGIN - # 7: TABLE MAP EVENT - # 8: TABLE MAP EVENT (ndb_apply_status) - # 9: ROW EVENT - # 10: ROW EVENT - # 11: COMMIT - # - if (`SELECT '$engine' = 'NDB' && @@binlog_format = 'ROW'`) - { - let $commit_event_row_number= 11; - } - # - # In NDB (MIXED mode), the commit event is the eighth event - # in the binary log: - # - # 1: DDL EVENT which triggered the previous commmit. - # 2: BEGIN - # 3: TABLE MAP EVENT - # 4: TABLE MAP EVENT (ndb_apply_status) - # 5: ROW EVENT - # 6: ROW EVENT - # 7: COMMIT - # - if (`SELECT '$engine' = 'NDB' && @@binlog_format != 'ROW'`) - { - let $commit_event_row_number= 7; - } } if ($ddl_cases == 30) { @@ -272,7 +153,7 @@ while ($ddl_cases >= 1) # 5: COMMIT # 6: DDL EVENT which triggered the previous commmit. # - if (`select @@binlog_format = 'ROW' && '$engine' != 'NDB'`) + if (`select @@binlog_format = 'ROW'`) { let $commit_event_row_number= 5; } @@ -316,42 +197,10 @@ while ($ddl_cases >= 1) if ($ddl_cases == 13) { let $cmd= CREATE INDEX ix ON tt_1(ddl_case); - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # 7: DDL EVENT which triggered the previous commmit. - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 12) { let $cmd= DROP INDEX ix ON tt_1; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # 7: DDL EVENT which triggered the previous commmit. - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 11) { @@ -377,39 +226,6 @@ while ($ddl_cases >= 1) { let $commit_event_row_number= 4; } - # - # In NDB (RBR mode), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if (`SELECT '$engine' = 'NDB' && @@binlog_format = 'ROW'` ) - { - let $commit_event_row_number= 6; - } - # - # In NDB (MIXED mode), the commit event is the nineth event - # in the binary log: - # - # 1: BEGIN - # 2: DDL EVENT which triggered the previous commmit. - # 3: COMMIT - # 4: BEGIN - # 5: TABLE MAP EVENT - # 6: TABLE MAP EVENT (ndb_apply_status) - # 7: ROW EVENT - # 8: ROW EVENT - # 9: COMMIT - # - if (`SELECT '$engine' = 'NDB' && @@binlog_format != 'ROW'` ) - { - let $commit_event_row_number= 9; - } } if ($ddl_cases == 10) { @@ -427,21 +243,6 @@ while ($ddl_cases >= 1) { let $commit_event_row_number= 4; } - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 9) { @@ -459,21 +260,6 @@ while ($ddl_cases >= 1) { let $commit_event_row_number= 4; } - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 8) { @@ -514,42 +300,6 @@ while ($ddl_cases >= 1) { let $commit_event_row_number= 5; } - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: DROP TEMPORARY table IF EXISTS - # 3: COMMIT - # 4: BEGIN - # 5: TABLE MAP EVENT - # 6: TABLE MAP EVENT (ndb_apply_status) - # 7: ROW EVENT - # 8: ROW EVENT - # 9: COMMIT - # - if ($engine == NDB) - { - let $commit_event_row_number= 9; - } - # - # In NDB (MIXED mode), the commit event is the nineth event - # in the binary log: - # - # 1: BEGIN - # 2: DDL EVENT which triggered the previous commmit. - # 3: COMMIT - # 4: BEGIN - # 5: TABLE MAP EVENT - # 6: TABLE MAP EVENT (ndb_apply_status) - # 7: ROW EVENT - # 8: ROW EVENT - # 9: COMMIT - # - if (`SELECT '$engine' = 'NDB' && @@binlog_format != 'ROW'` ) - { - let $commit_event_row_number= 9; - } } if ($ddl_cases == 7) { @@ -574,42 +324,10 @@ while ($ddl_cases >= 1) if ($ddl_cases == 2) { let $cmd= CREATE DATABASE db; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # 7: DDL EVENT which triggered the previous commmit. - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } if ($ddl_cases == 1) { let $cmd= DROP DATABASE IF EXISTS db; - # - # In NDB (RBR and MIXED modes), the commit event is the sixth event - # in the binary log: - # - # 1: BEGIN - # 2: TABLE MAP EVENT - # 3: TABLE MAP EVENT (ndb_apply_status) - # 4: ROW EVENT - # 5: ROW EVENT - # 6: COMMIT - # 7: DDL EVENT which triggered the previous commmit. - # - if ($engine == NDB) - { - let $commit_event_row_number= 6; - } } --eval $cmd --disable_query_log diff --git a/mysql-test/extra/rpl_tests/rpl_innodb.test b/mysql-test/extra/rpl_tests/rpl_innodb.test index 865c97cf95d..6b3732439b2 100644 --- a/mysql-test/extra/rpl_tests/rpl_innodb.test +++ b/mysql-test/extra/rpl_tests/rpl_innodb.test @@ -48,8 +48,6 @@ connection master; # #Note Matthias: to be merged to rpl_ddl.test ---source include/not_ndb_default.inc - FLUSH LOGS; sync_slave_with_master; FLUSH LOGS; diff --git a/mysql-test/extra/rpl_tests/rpl_loadfile.test b/mysql-test/extra/rpl_tests/rpl_loadfile.test index 85620b58a97..9073e5e3d9f 100644 --- a/mysql-test/extra/rpl_tests/rpl_loadfile.test +++ b/mysql-test/extra/rpl_tests/rpl_loadfile.test @@ -23,10 +23,6 @@ SELECT * FROM test.t1 ORDER BY blob_column; save_master_pos; sync_slave_with_master; connection slave; -# Need to allow some time when NDB engine is used for -# the injector thread to have time to populate binlog -let $wait_condition= SELECT INSTR(blob_column,'aberration') > 0 FROM test.t1 WHERE a = 2; ---source include/wait_condition.inc SELECT * FROM test.t1 ORDER BY blob_column; # Cleanup diff --git a/mysql-test/extra/rpl_tests/rpl_mixing_engines.test b/mysql-test/extra/rpl_tests/rpl_mixing_engines.test index 991e1c465e9..32e035702da 100644 --- a/mysql-test/extra/rpl_tests/rpl_mixing_engines.test +++ b/mysql-test/extra/rpl_tests/rpl_mixing_engines.test @@ -12,8 +12,7 @@ # used in the same transaction. # # * Statements that do an implicit commit (i.e., most but not all DDL, and -# some utility commands) are logged specially due to unspecified requirements by -# NDB. +# some utility commands) are logged specially # # * Statements that update temporary tables need special treatment since they # are not logged in row format. @@ -147,9 +146,7 @@ # - Rules for committing statements, except CREATE [TEMPORARY] TABLE...SELECT # # * All other statements that have a pre-commit are written directly to the -# binlog. (Note: this is semantically equivalent to writing it to the SC and -# flushing the SC. However, due to requirements by NDB (which have not been -# clarified), we write directly to the binlog.) +# binlog. # # We use the include file rpl_mixing_engines.inc to generate sql commands from a # format string. The format string consists of a sequence of 'codes' separated diff --git a/mysql-test/extra/rpl_tests/rpl_ndb_2multi_basic.test b/mysql-test/extra/rpl_tests/rpl_ndb_2multi_basic.test deleted file mode 100644 index 16f8116d92e..00000000000 --- a/mysql-test/extra/rpl_tests/rpl_ndb_2multi_basic.test +++ /dev/null @@ -1,119 +0,0 @@ -####################################### -# Author: Rafal Somla # -# Date: 2006-08-20 # -# Purpose: Test replication of basic # -# table operations in various setups # -# # -# Based on rpl_ndb_2multi_eng.test by # -# JBM # -####################################### - ---echo --- Doing pre test cleanup --- - -connection master; ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_query_log - -################################################# ---echo --- Create Table Section --- - -CREATE TABLE t1 (id MEDIUMINT NOT NULL, - b1 INT, - vc VARCHAR(255), - bc CHAR(255), - d DECIMAL(10,4) DEFAULT 0, - f FLOAT DEFAULT 0, - total BIGINT UNSIGNED, - y YEAR, - t DATE, - PRIMARY KEY(id)); - ---echo --- Show table on master --- - -SHOW CREATE TABLE t1; - ---echo --- Show table on slave --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---source include/rpl_multi_engine2.inc - -################################################# -# Okay lets see how it holds up to table changes ---echo --- Check that simple Alter statements are replicated correctly -- - -ALTER TABLE t1 DROP PRIMARY KEY; -# note: table with no PK can't contain blobs if it is to be replicated. -ALTER TABLE t1 MODIFY vc char(32); - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still same engine --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---source include/rpl_multi_engine2.inc - -################################################# ---echo --- Check that replication works when slave has more columns than master -connection master; -ALTER TABLE t1 ADD PRIMARY KEY(id,total); -ALTER TABLE t1 MODIFY vc TEXT; - -INSERT INTO t1 VALUES(3,1,'Testing MySQL databases is a cool ', - 'Must make it bug free for the customer', - 654321.4321,15.21,0,1965,"1905-11-14"); -INSERT INTO t1 VALUES(20,1,'Testing MySQL databases is a cool ', - 'Must make it bug free for the customer', - 654321.4321,15.21,0,1965,"1965-11-14"); -INSERT INTO t1 VALUES(50,1,'Testing MySQL databases is a cool ', - 'Must make it bug free for the customer', - 654321.4321,15.21,0,1965,"1985-11-14"); - ---echo --- Add columns on slave --- ---sync_slave_with_master -ALTER TABLE t1 ADD (u int, v char(16) default 'default'); -UPDATE t1 SET u=7 WHERE id < 50; -UPDATE t1 SET v='explicit' WHERE id >10; - ---echo --- Show changed table on slave --- - -SHOW CREATE TABLE t1; -SELECT * -FROM t1 -ORDER BY id; - ---source include/rpl_multi_engine2.inc -TRUNCATE TABLE t1; - -################################################# ---echo --- Check that replication works when master has more columns than slave -connection master; - ---echo --- Remove columns on slave --- ---sync_slave_with_master -ALTER TABLE t1 DROP COLUMN v; -ALTER TABLE t1 DROP COLUMN u; -ALTER TABLE t1 DROP COLUMN t; -ALTER TABLE t1 DROP COLUMN y; - ---echo --- Show changed table on slave --- - -SHOW CREATE TABLE t1; - ---source include/rpl_multi_engine2.inc -TRUNCATE TABLE t1; - -################################################# ---echo --- Do Cleanup -- -connection master; -DROP TABLE IF EXISTS t1; - -sync_slave_with_master; -connection master; diff --git a/mysql-test/extra/rpl_tests/rpl_ndb_2multi_eng.test b/mysql-test/extra/rpl_tests/rpl_ndb_2multi_eng.test deleted file mode 100644 index 1677aaf8277..00000000000 --- a/mysql-test/extra/rpl_tests/rpl_ndb_2multi_eng.test +++ /dev/null @@ -1,347 +0,0 @@ -####################################### -# Author: JBM # -# Date: 2006-02-23 # -# Purpose: See if replication between # -# NDB -> MyISAM and InnoDB works. # -# and if # -# MyISAM and InnoDB -> NDB works. # -####################################### -# By JBM # -# Date 2006-02-28 # -# Change: Implemented review comments # -####################################### - ---echo --- Doing pre test cleanup --- - -connection master; ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_query_log - ---echo --- Start test 1 Basic testing --- ---echo --- Create Table Section --- - -################################################# -# Requirment: Create basic table, replicate # -# basice operations such at insert, update # -# delete between 2 different storage engines # -# Alter table and ensure table is handled # -# Correctly on the slave # -################################################# - -CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), - bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, - f FLOAT DEFAULT 0, total BIGINT UNSIGNED, - y YEAR, t DATE,PRIMARY KEY(id)); - ---echo --- Show table on master --- - -SHOW CREATE TABLE t1; - ---echo --- Show table on slave --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - -# Okay lets see how it holds up to table changes ---echo --- Check that simple Alter statements are replicated correctly -- - -ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total); -ALTER TABLE t1 MODIFY vc TEXT; - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still same engine --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- End test 1 Basic testing --- ---echo --- Do Cleanup -- - -DROP TABLE IF EXISTS t1; - -################################################################# - ---echo --- Start test 2 partition RANGE testing -- ---echo --- Do setup -- - - -################################################# -# Requirment: Create table that is partitioned # -# by range on year i.e. year(t) and replicate # -# basice operations such at insert, update # -# delete between 2 different storage engines # -# Alter table and ensure table is handled # -# Correctly on the slave # -################################################# - -CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), - bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, - f FLOAT DEFAULT 0, total BIGINT UNSIGNED, - y YEAR, t DATE) - PARTITION BY RANGE (YEAR(t)) - (PARTITION p0 VALUES LESS THAN (1901), - PARTITION p1 VALUES LESS THAN (1946), - PARTITION p2 VALUES LESS THAN (1966), - PARTITION p3 VALUES LESS THAN (1986), - PARTITION p4 VALUES LESS THAN (2005), - PARTITION p5 VALUES LESS THAN MAXVALUE); - ---echo --- Show table on master --- - -SHOW CREATE TABLE t1; - ---echo --- Show table on slave -- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- Check that simple Alter statements are replicated correctly --- - -ALTER TABLE t1 ADD PRIMARY KEY(t,id); -ALTER TABLE t1 MODIFY vc TEXT; - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still same engine --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- ---enable_query_log - ---source include/rpl_multi_engine3.inc - ---echo --- End test 2 partition RANGE testing --- ---echo --- Do Cleanup --- - -DROP TABLE IF EXISTS t1; - -######################################################## - ---echo --- Start test 3 partition LIST testing --- ---echo --- Do setup --- -################################################# -# Requirment: Create table that is partitioned # -# by list on id i.e. (2,4). Pretend that we # -# missed one and alter to add. Then replicate # -# basice operations such at insert, update # -# delete between 2 different storage engines # -# Alter table and ensure table is handled # -# Correctly on the slave # -################################################# - - -CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), - bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, - f FLOAT DEFAULT 0, total BIGINT UNSIGNED, - y YEAR, t DATE) - PARTITION BY LIST(id) - (PARTITION p0 VALUES IN (2, 4), - PARTITION p1 VALUES IN (42, 142)); - ---echo --- Test 3 Alter to add partition --- - -ALTER TABLE t1 ADD PARTITION (PARTITION p2 VALUES IN (412)); - ---echo --- Show table on master --- - -SHOW CREATE TABLE t1; - ---echo --- Show table on slave --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- Check that simple Alter statements are replicated correctly --- - -ALTER TABLE t1 ADD PRIMARY KEY(id); -ALTER TABLE t1 MODIFY vc TEXT; - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still same engine --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- End test 3 partition LIST testing --- ---echo --- Do Cleanup -- - -DROP TABLE IF EXISTS t1; - -######################################################## - ---echo --- Start test 4 partition HASH testing --- ---echo --- Do setup --- -################################################# -# Requirment: Create table that is partitioned # -# by hash on year i.e. YEAR(t). Then replicate # -# basice operations such at insert, update # -# delete between 2 different storage engines # -# Alter table and ensure table is handled # -# Correctly on the slave # -################################################# - - -CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), - bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, - f FLOAT DEFAULT 0, total BIGINT UNSIGNED, - y YEAR, t DATE) - PARTITION BY HASH( YEAR(t) ) - PARTITIONS 4; - ---echo --- show that tables have been created correctly --- - -SHOW CREATE TABLE t1; -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- Check that simple Alter statements are replicated correctly --- - -ALTER TABLE t1 ADD PRIMARY KEY(t,id); -ALTER TABLE t1 MODIFY vc TEXT; - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still same engine --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- End test 4 partition HASH testing --- ---echo --- Do Cleanup -- - -DROP TABLE IF EXISTS t1; - -######################################################## - ---echo --- Start test 5 partition by key testing --- ---echo --- Create Table Section --- - -################################################# -# Requirment: Create table that is partitioned # -# by key on id with 4 parts. Then replicate # -# basice operations such at insert, update # -# delete between 2 different storage engines # -# Alter table and ensure table is handled # -# Correctly on the slave # -################################################# - -CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), - bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, - f FLOAT DEFAULT 0, total BIGINT UNSIGNED, - y YEAR, t DATE,PRIMARY KEY(id)) - PARTITION BY KEY() - PARTITIONS 4; - ---echo --- Show that tables on master are ndbcluster tables --- - -SHOW CREATE TABLE t1; - ---echo --- Show that tables on slave --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - -# Okay lets see how it holds up to table changes ---echo --- Check that simple Alter statements are replicated correctly --- - -ALTER TABLE t1 DROP PRIMARY KEY, ADD PRIMARY KEY(id, total); - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still right type --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- Check that simple Alter statements are replicated correctly --- - -ALTER TABLE t1 MODIFY vc TEXT; - ---echo --- Show the new improved table on the master --- - -SHOW CREATE TABLE t1; - ---echo --- Make sure that our tables on slave are still same engine --- ---echo --- and that the alter statements replicated correctly --- - -sync_slave_with_master; -SHOW CREATE TABLE t1; - ---echo --- Perform basic operation on master --- ---echo --- and ensure replicated correctly --- - ---source include/rpl_multi_engine3.inc - ---echo --- End test 5 key partition testing --- ---echo --- Do Cleanup --- - -DROP TABLE IF EXISTS t1; -sync_slave_with_master; - -# End of 5.1 test case diff --git a/mysql-test/extra/rpl_tests/rpl_ndb_apply_status.test b/mysql-test/extra/rpl_tests/rpl_ndb_apply_status.test deleted file mode 100644 index bba860ab9aa..00000000000 --- a/mysql-test/extra/rpl_tests/rpl_ndb_apply_status.test +++ /dev/null @@ -1,307 +0,0 @@ -############################################# -#Authors: TU and Jeb -#Date: 2007/04 -#Purpose: Generic replication to cluster -# and ensuring that the ndb_apply_status -# table is updated. -############################################# -# Notes: -# include/select_ndb_apply_status.inc -# Selects out the log name, start & end pos -# from the ndb_apply_status table -# -# include/show_binlog_using_logname.inc -# To select out 1 row from offset 1 -# from the start position in the binlog whose -# name is = log_name -# -# include/tpcb.inc -# Creates DATABASE tpcb, the tables and -# stored procedures for loading the DB -# and for running transactions against DB. -############################################## - - ---echo ---echo *** Test 1 *** ---echo - -connection master; -create table t1 (a int key, b int) engine innodb; -create table t2 (a int key, b int) engine innodb; - ---echo - ---sync_slave_with_master -alter table t1 engine ndb; -alter table t2 engine ndb; - ---echo - -# check binlog position without begin -connection master; -insert into t1 values (1,2); - ---echo - ---sync_slave_with_master ---source include/select_ndb_apply_status.inc - ---echo - -connection master; ---echo # Now check that that is in the apply_status table is consistant ---echo # with what is in the binlog ---echo ---echo # since insert is done with transactional engine, expect a BEGIN ---echo # at ---echo ---let $binlog_start= $start_pos ---let $binlog_limit= 1 ---source include/show_binlog_events.inc - ---echo ---echo # Now the insert, one step after ---echo ---let $binlog_start= $start_pos ---let $binlog_limit= 1,1 ---source include/show_binlog_events.inc - ---echo ---echo # and the COMMIT should be at ---echo ---let $binlog_start= $start_pos ---let $binlog_limit= 2,1 ---source include/show_binlog_events.inc - ---echo - -# check binlog position with begin -begin; -insert into t1 values (2,3); -insert into t2 values (3,4); -commit; - ---echo - ---sync_slave_with_master ---source include/select_ndb_apply_status.inc - -connection master; ---let $binlog_start= $start_pos ---let $binlog_limit= 1 ---source include/show_binlog_events.inc ---echo ---let $binlog_start= $start_pos ---let $binlog_limit= 1,2 ---source include/show_binlog_events.inc ---echo ---let $binlog_start= $start_pos ---let $binlog_limit= 3,1 ---source include/show_binlog_events.inc - ---echo - -connection master; -DROP TABLE test.t1, test.t2; ---sync_slave_with_master -SHOW TABLES; - -# Run in some transactions using stored procedures -# and ensure that the ndb_apply_status table is -# updated to show the transactions - - ---echo ---echo *** Test 2 *** ---echo - -# Create database/tables and stored procdures -connection master; ---source include/tpcb.inc - -# Switch tables on slave to use NDB ---sync_slave_with_master -USE tpcb; -ALTER TABLE account ENGINE NDB; -ALTER TABLE branch ENGINE NDB; -ALTER TABLE teller ENGINE NDB; -ALTER TABLE history ENGINE NDB; - ---echo - -# Load DB tpcb and run some transactions -connection master; ---disable_query_log -CALL tpcb.load(); -SET AUTOCOMMIT=0; -let $run= 5; -while ($run) -{ - START TRANSACTION; - --disable_warnings - --eval CALL tpcb.trans($rpl_format); - --enable_warnings - eval SET @my_errno= $mysql_errno; - let $run_good= `SELECT @my_errno = 0`; - let $run_bad= `SELECT @my_errno <> 0`; - if ($run_good) - { - COMMIT; - } - if ($run_bad) - { - ROLLBACK; - } - dec $run; -} - -SET AUTOCOMMIT=1; ---enable_query_log - ---sync_slave_with_master ---source include/select_ndb_apply_status.inc - ---echo - -connection master; ---source include/show_binlog_using_logname.inc - -# Flush the logs on the master moving all -# Transaction to a new binlog and ensure -# that the ndb_apply_status table is updated -# to show the use of the new binlog. - ---echo ---echo ** Test 3 ** ---echo - -# Flush logs on master which should force it -# to switch to binlog #2 - -FLUSH LOGS; - -# Run in some transaction to increase end pos in -# binlog - ---disable_query_log -SET AUTOCOMMIT=0; -let $run= 5; -while ($run) -{ - START TRANSACTION; - --disable_warnings - --eval CALL tpcb.trans($rpl_format); - --enable_warnings - eval SET @my_errno= $mysql_errno; - let $run_good= `SELECT @my_errno = 0`; - let $run_bad= `SELECT @my_errno <> 0`; - if ($run_good) - { - COMMIT; - } - if ($run_bad) - { - ROLLBACK; - } - dec $run; -} -SET AUTOCOMMIT=1; ---enable_query_log - ---echo - ---sync_slave_with_master ---source include/select_ndb_apply_status.inc - ---echo - -connection master; ---source include/show_binlog_using_logname.inc - -# Now we reset both the master and the slave -# Run some more transaction and ensure -# that the ndb_apply_status is updated -# correctly - ---echo ---echo ** Test 4 ** ---echo - -# Reset both slave and master -# This should reset binlog to #1 ---source include/rpl_reset.inc - ---echo - -# Run in some transactions and check -connection master; ---disable_query_log -SET AUTOCOMMIT=0; -let $run= 5; -while ($run) -{ - START TRANSACTION; - --disable_warnings - --eval CALL tpcb.trans($rpl_format); - --enable_warnings - eval SET @my_errno= $mysql_errno; - let $run_good= `SELECT @my_errno = 0`; - let $run_bad= `SELECT @my_errno <> 0`; - if ($run_good) - { - COMMIT; - } - if ($run_bad) - { - ROLLBACK; - } - dec $run; -} -SET AUTOCOMMIT=1; ---enable_query_log - ---sync_slave_with_master ---source include/select_ndb_apply_status.inc - ---echo - -connection master; ---source include/show_binlog_using_logname.inc - -# Since we are doing replication, it is a good -# idea to check to make sure all data was -# Replicated correctly - ---echo ---echo *** DUMP MASTER & SLAVE FOR COMPARE ******** - ---exec $MYSQL_DUMP -n -t --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/master_apply_status.sql - ---exec $MYSQL_DUMP_SLAVE -n -t --compact --order-by-primary --skip-extended-insert tpcb account teller branch history > $MYSQLTEST_VARDIR/tmp/slave_apply_status.sql - -connection master; -DROP DATABASE tpcb; - ---sync_slave_with_master - -####### Commenting out until decision on Bug#27960 ########### - -#--source include/select_ndb_apply_status.inc - -#connection master; -#--eval SHOW BINLOG EVENTS in '$log_name' from $start_pos -#--source include/show_binlog_using_logname.inc - ---echo ****** Do dumps compare ************ - - -diff_files $MYSQLTEST_VARDIR/tmp/master_apply_status.sql $MYSQLTEST_VARDIR/tmp/slave_apply_status.sql; - -## Note: Ths files should only get removed, if the above diff succeeds. - ---exec rm $MYSQLTEST_VARDIR/tmp/master_apply_status.sql ---exec rm $MYSQLTEST_VARDIR/tmp/slave_apply_status.sql - - -# End of 5.1 Test diff --git a/mysql-test/extra/rpl_tests/rpl_partition.test b/mysql-test/extra/rpl_tests/rpl_partition.test index 8f2956bf1ef..119e3bebc42 100644 --- a/mysql-test/extra/rpl_tests/rpl_partition.test +++ b/mysql-test/extra/rpl_tests/rpl_partition.test @@ -201,11 +201,11 @@ CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), PARTITION BY KEY() PARTITIONS 4; ---echo --- Show that tables on master are ndbcluster tables --- +--echo --- Show tables on master --- SHOW CREATE TABLE t1; ---echo --- Show that tables on slave --- +--echo --- Show tables on slave --- sync_slave_with_master; SHOW CREATE TABLE t1; diff --git a/mysql-test/extra/rpl_tests/rpl_row_blob.test b/mysql-test/extra/rpl_tests/rpl_row_blob.test index 762daa816c0..5cd7b6b4b29 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_blob.test +++ b/mysql-test/extra/rpl_tests/rpl_row_blob.test @@ -36,7 +36,6 @@ SELECT LENGTH(data) FROM test.t1 WHERE c1 = 3; save_master_pos; connection slave; sync_with_master; ---source include/wait_for_ndb_to_binlog.inc --echo --echo **** Data Insert Validation Slave Section test.t1 **** --echo @@ -59,7 +58,6 @@ SELECT LENGTH(data) FROM test.t1 WHERE c1 = 2; save_master_pos; connection slave; sync_with_master; ---source include/wait_for_ndb_to_binlog.inc --echo --echo **** Data Update Validation Slave Section test.t1 **** --echo @@ -130,7 +128,6 @@ FROM test.t2 WHERE c1=2; save_master_pos; connection slave; sync_with_master; ---source include/wait_for_ndb_to_binlog.inc --echo --echo **** Data Insert Validation Slave Section test.t2 **** --echo @@ -156,7 +153,6 @@ FROM test.t2 WHERE c1=2; save_master_pos; connection slave; sync_with_master; ---source include/wait_for_ndb_to_binlog.inc --echo --echo **** Data Update Validation Slave Section test.t2 **** --echo @@ -176,8 +172,4 @@ diff_files $MYSQLTEST_VARDIR/tmp/rpl_row_blob_master.sql $MYSQLTEST_VARDIR/tmp/r DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; # ensure cleanup on slave as well: -# ndb blob tables consist of several tables -# if cluster is shutdown while not all tables are -# properly dropped, the table becomes inconsistent -# and wrecks later test cases --sync_slave_with_master diff --git a/mysql-test/extra/rpl_tests/rpl_row_func003.test b/mysql-test/extra/rpl_tests/rpl_row_func003.test index d12b5a3306a..f46f9eddc60 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_func003.test +++ b/mysql-test/extra/rpl_tests/rpl_row_func003.test @@ -54,11 +54,8 @@ INSERT INTO test.t1 VALUES (null,test.f1()); ROLLBACK; SET AUTOCOMMIT=1; -# Sync master and slave for all engines except NDB -if (`SELECT UPPER(LEFT('$engine_type', 3)) != 'NDB'`) { sync_slave_with_master; connection master; -} # Time to dump the databases and so we can see if they match diff --git a/mysql-test/extra/rpl_tests/rpl_row_sp003.test b/mysql-test/extra/rpl_tests/rpl_row_sp003.test index d2c2ea0caf3..a8f25485f26 100644 --- a/mysql-test/extra/rpl_tests/rpl_row_sp003.test +++ b/mysql-test/extra/rpl_tests/rpl_row_sp003.test @@ -53,8 +53,6 @@ SELECT release_lock("test"); connection master; SELECT * FROM test.t1; -#show binlog events; ---source include/wait_for_ndb_to_binlog.inc sync_slave_with_master; connection slave; SELECT * FROM test.t1; diff --git a/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test b/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test index 1687a233914..c6e91449fb3 100644 --- a/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test +++ b/mysql-test/extra/rpl_tests/rpl_sv_relay_space.test @@ -8,8 +8,6 @@ SHOW VARIABLES LIKE 'relay_log_space_limit'; # Matz says: I have no idea what this is supposed to test, but it has # potential for generating different results with some storage engines # that process rows in an order not dependent on the insertion order. -# For instance, I would assume that distributed storage engines (like -# NDB) could process rows based on locality. eval CREATE TABLE t1 (name varchar(64), age smallint(3))ENGINE=$engine_type; INSERT INTO t1 SET name='Andy', age=31; diff --git a/mysql-test/extra/rpl_tests/rpl_trig004.test b/mysql-test/extra/rpl_tests/rpl_trig004.test index 1a738db27fc..4988fe87f8f 100644 --- a/mysql-test/extra/rpl_tests/rpl_trig004.test +++ b/mysql-test/extra/rpl_tests/rpl_trig004.test @@ -7,7 +7,6 @@ ############################################################################# # Change Auth: JBM # # Date: 2006-02-14 # -# Change: Added error, sleep and comments (ndb) # #################################################### # Begin clean up test section @@ -28,15 +27,12 @@ CREATE TRIGGER test.t1_bi_t2 BEFORE INSERT ON test.t2 FOR EACH ROW INSERT INTO t delimiter ;// INSERT INTO test.t2 VALUES (1, 0.0); -# Expect duplicate error 1022 == ndb ---error 1022, ER_DUP_ENTRY +--error ER_DUP_ENTRY INSERT INTO test.t2 VALUES (1, 0.0); #show binlog events; select * from test.t1; select * from test.t2; -let $wait_time= 10; ---source include/wait_for_ndb_to_binlog.inc sync_slave_with_master; connection slave; select * from test.t1; diff --git a/mysql-test/include/ctype_utf8mb4.inc b/mysql-test/include/ctype_utf8mb4.inc index 9ee2414e142..1971cc0c9a1 100644 --- a/mysql-test/include/ctype_utf8mb4.inc +++ b/mysql-test/include/ctype_utf8mb4.inc @@ -191,17 +191,9 @@ drop table t1; # if(!$is_heap) { -if(!$is_ndb) -{ --error ER_TOO_LONG_KEY eval create table t1 (a text character set utf8mb4, primary key(a(371))) engine $engine; } -if($is_ndb) -{ ---error ER_BLOB_USED_AS_KEY -eval create table t1 (a text character set utf8mb4, primary key(a(371))) engine $engine; -} -} # # Bug 2959 @@ -254,8 +246,6 @@ drop table t2; # Bug 4521: unique key prefix interacts poorly with utf8mb4 # MYISAM: keys with prefix compression, case insensitive collation. # -if (!$is_ndb) -{ eval create table t1 (c varchar(30) character set utf8mb4, unique(c(10))) engine $engine; insert into t1 values ('1'),('2'),('3'),('x'),('y'),('z'); insert into t1 values ('aaaaaaaaaa'); @@ -549,7 +539,6 @@ select c as c_all from t1 order by c; select c as c_a from t1 where c='a'; select c as c_a from t1 where c='б'; drop table t1; -} # Bug#4594: column index make = failed for gbk, but like works @@ -593,8 +582,6 @@ drop table t1; # the same for HEAP+HASH # -if (!$is_ndb) -{ eval create table t1 ( str varchar(255) character set utf8mb4 not null, key str using hash (str(2)) @@ -618,7 +605,6 @@ INSERT INTO t1 VALUES ('str'); INSERT INTO t1 VALUES ('str2'); select * from t1 where str='str'; drop table t1; -} # # Bug #5397: Crash with varchar binary and LIKE @@ -651,8 +637,6 @@ DROP TABLE t1; # if (!$is_heap) { -if (!$is_ndb) -{ eval CREATE TABLE t1 ( id int unsigned NOT NULL auto_increment, list_id smallint unsigned NOT NULL, @@ -689,7 +673,6 @@ SELECT id, term FROM t1 where (list_id = 1) AND (term = "testetest"); SELECT id, term FROM t1 where (list_id = 1) AND (term = "testètest"); DROP TABLE t1; } -} # # Bug #6019 SELECT tries to use too short prefix index on utf8mb4 data @@ -979,14 +962,7 @@ if (!$is_heap) # eval CREATE TABLE t1 (t TINYTEXT CHARACTER SET utf8mb4) ENGINE $engine; INSERT INTO t1 VALUES(REPEAT('a', 100)); -if (!$is_ndb) -{ eval CREATE TEMPORARY TABLE t2 ENGINE $engine SELECT COALESCE(t) AS bug FROM t1; -} -if ($is_ndb) -{ -eval CREATE TABLE t2 ENGINE $engine SELECT COALESCE(t) AS bug FROM t1; -} SELECT LENGTH(bug) FROM t2; DROP TABLE t2; DROP TABLE t1; @@ -1202,8 +1178,6 @@ SET NAMES latin2; if (!$is_heap) { -if (!$is_ndb) -{ eval CREATE TABLE t1 ( id int(11) NOT NULL default '0', tid int(11) NOT NULL default '0', @@ -1229,7 +1203,6 @@ SELECT * FROM t1 WHERE tid=72 and val LIKE 'VOLN DROP TABLE t1; } -} # # Bug 20709: problem with utf8mb4 fields in temporary tables @@ -1318,8 +1291,6 @@ drop table t1; # # Check that do_varstring2_mb produces a warning # -if (!$is_ndb) -{ eval create table t1 ( a varchar(4000) not null ) default character set utf8mb4 engine $engine; @@ -1327,7 +1298,6 @@ insert into t1 values (repeat('a',4000)); alter table t1 change a a varchar(3000) character set utf8mb4 not null; select length(a) from t1; drop table t1; -} # # Bug#10504: Character set does not support traditional mode @@ -1614,8 +1584,6 @@ set max_sort_length=default; --echo # if (!$is_heap) { -if (!$is_ndb) -{ eval CREATE TABLE t1 ( clipid INT NOT NULL, Tape TINYTEXT, @@ -1626,7 +1594,6 @@ ALTER TABLE t1 ADD mos TINYINT DEFAULT 0 AFTER clipid; SHOW CREATE TABLE t1; DROP TABLE t1; } -} #--echo # #--echo # Check that supplementary characters are not allowed in identifiers @@ -1807,12 +1774,9 @@ INSERT INTO t2 VALUES (x'ea9da8'); SELECT HEX(CONCAT(utf8mb4, utf8mb3)) FROM t1,t2 ORDER BY 1; SELECT CHARSET(CONCAT(utf8mb4, utf8mb3)) FROM t1, t2 LIMIT 1; -if (!$is_ndb) -{ eval CREATE TEMPORARY TABLE t3 ENGINE $engine AS SELECT *, concat(utf8mb4,utf8mb3) FROM t1, t2; SHOW CREATE TABLE t3; DROP TEMPORARY TABLE t3; -} SELECT * FROM t1, t2 WHERE t1.utf8mb4 > t2.utf8mb3; SELECT * FROM t1, t2 WHERE t1.utf8mb4 = t2.utf8mb3; diff --git a/mysql-test/include/default_ndbd.cnf b/mysql-test/include/default_ndbd.cnf deleted file mode 100644 index 9a88a5936aa..00000000000 --- a/mysql-test/include/default_ndbd.cnf +++ /dev/null @@ -1,27 +0,0 @@ - -[cluster_config] -MaxNoOfSavedMessages= 1000 -MaxNoOfConcurrentTransactions= 128 -MaxNoOfConcurrentOperations= 10000 -DataMemory= 20M -IndexMemory= 1M -Diskless= 0 -TimeBetweenWatchDogCheck= 30000 -MaxNoOfOrderedIndexes= 32 -MaxNoOfAttributes= 2048 -TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 4 -FragmentLogFileSize= 12M -DiskPageBufferMemory= 4M - -# O_DIRECT has issues on 2.4 whach have not been handled, Bug #29612 -#ODirect= 1 -# the following parametes just function as a small regression -# test that the parameter exists -InitialNoOfOpenFiles= 27 - -# Increase timeouts for slow test-machines -HeartbeatIntervalDbDb= 30000 -HeartbeatIntervalDbApi= 30000 - -#TransactionDeadlockDetectionTimeout= 7500 diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc deleted file mode 100644 index 8dbfa2aa034..00000000000 --- a/mysql-test/include/have_multi_ndb.inc +++ /dev/null @@ -1,52 +0,0 @@ -# Setup connections to both MySQL Servers connected to the cluster -connect (server1,127.0.0.1,root,,test,$MASTER_MYPORT,); -connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); - -# Check that server1 has NDB support -connection server1; -let $engines_table= query_get_value(SHOW TABLES FROM information_schema LIKE 'ENGINES', Tables_in_information_schema (ENGINES), 1); -disable_query_log; -if (`SELECT 1 FROM dual WHERE '$engines_table' = 'engines'`) -{ ---require r/true.require -SELECT (support = 'YES' or support = 'DEFAULT' or support = 'ENABLED') as `TRUE` FROM information_schema.engines WHERE engine = 'ndbcluster'; ---source include/ndb_not_readonly.inc -} -enable_query_log; - -# Check that server2 has NDB support -connection server2; -let $engines_table= query_get_value(SHOW TABLES FROM information_schema LIKE 'ENGINES', Tables_in_information_schema (ENGINES), 1); -disable_query_log; -if (`SELECT 1 FROM dual WHERE '$engines_table' = 'engines'`) -{ ---require r/true.require -SELECT (support = 'YES' or support = 'DEFAULT' or support = 'ENABLED') as `TRUE` FROM information_schema.engines WHERE engine = 'ndbcluster'; ---source include/ndb_not_readonly.inc -} -enable_query_log; - -# cleanup - -connection server1; -disable_query_log; -disable_warnings; ---error 0,1051 -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; -flush tables; -flush status; -enable_warnings; -enable_query_log; - -connection server2; -disable_query_log; -disable_warnings; ---error 0,1051 -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; -flush tables; -flush status; -enable_warnings; -enable_query_log; - -# Set the default connection -connection server1; diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc deleted file mode 100644 index 1266f80c8cd..00000000000 --- a/mysql-test/include/have_ndb.inc +++ /dev/null @@ -1,2 +0,0 @@ -# Check that server is compiled and started with support for NDB ---source include/have_multi_ndb.inc diff --git a/mysql-test/include/have_ndb_extra.inc b/mysql-test/include/have_ndb_extra.inc deleted file mode 100644 index 4837a7ad4ea..00000000000 --- a/mysql-test/include/have_ndb_extra.inc +++ /dev/null @@ -1,2 +0,0 @@ --- require r/have_ndb_extra.require -eval select $NDB_EXTRA_TEST; diff --git a/mysql-test/include/have_ndbapi_examples.inc b/mysql-test/include/have_ndbapi_examples.inc deleted file mode 100644 index 88499d555c0..00000000000 --- a/mysql-test/include/have_ndbapi_examples.inc +++ /dev/null @@ -1,4 +0,0 @@ ---require r/have_ndbapi_examples.require -disable_query_log; -eval select LENGTH('$NDB_EXAMPLES_BINARY') > 0 as 'have_ndb_example'; -enable_query_log; diff --git a/mysql-test/include/loaddata_autocom.inc b/mysql-test/include/loaddata_autocom.inc index bb286fb4169..3bf88fefa6d 100644 --- a/mysql-test/include/loaddata_autocom.inc +++ b/mysql-test/include/loaddata_autocom.inc @@ -1,5 +1,4 @@ # Test if the engine does autocommit in LOAD DATA INFILE, or not -# (NDB wants to do, others don't). eval SET SESSION STORAGE_ENGINE = $engine_type; @@ -9,8 +8,6 @@ drop table if exists t1; let $load_file= $MYSQLTEST_VARDIR/std_data/loaddata2.dat; -# NDB does not support the create option 'Binlog of table with BLOB attribute and no PK' -# So use a dummy PK here. create table t1 (id int unsigned not null auto_increment primary key, a text, b text); start transaction; --replace_result $load_file LOAD_FILE diff --git a/mysql-test/include/mtr_check.sql b/mysql-test/include/mtr_check.sql index e34e32ad1a6..e54041da1bc 100644 --- a/mysql-test/include/mtr_check.sql +++ b/mysql-test/include/mtr_check.sql @@ -47,7 +47,7 @@ BEGIN -- Show "mysql" database, tables and columns SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES - WHERE table_schema='mysql' AND table_name != 'ndb_apply_status' + WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, @@ -55,7 +55,7 @@ BEGIN numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS - WHERE table_schema='mysql' AND table_name != 'ndb_apply_status' + WHERE table_schema='mysql' ORDER BY columns_in_mysql; -- Dump all events, there should be none diff --git a/mysql-test/include/mtr_warnings.sql b/mysql-test/include/mtr_warnings.sql index 0ad1079cd92..bc8d8044afb 100644 --- a/mysql-test/include/mtr_warnings.sql +++ b/mysql-test/include/mtr_warnings.sql @@ -115,9 +115,6 @@ INSERT INTO global_suppressions VALUES ("unknown variable 'loose-"), ("You have forced lower_case_table_names to 0 through a command-line option"), ("Setting lower_case_table_names=2"), - ("NDB Binlog:"), - ("NDB: failed to setup table"), - ("NDB: only row based binary logging"), ("Neither --relay-log nor --relay-log-index were used"), ("Query partially completed"), ("Slave I.O thread aborted while waiting for relay log"), @@ -139,7 +136,6 @@ INSERT INTO global_suppressions VALUES ("Slave: The incident LOST_EVENTS occured on the master"), ("Slave: Unknown error.* 1105"), ("Slave: Can't drop database.* database doesn't exist"), - ("Time-out in NDB"), ("Warning:\s+One can only use the --user.*root"), ("Warning:\s+Table:.* on (delete|rename)"), ("You have an error in your SQL syntax"), diff --git a/mysql-test/include/ndb_backup.inc b/mysql-test/include/ndb_backup.inc deleted file mode 100644 index eef3bf2bd1e..00000000000 --- a/mysql-test/include/ndb_backup.inc +++ /dev/null @@ -1,48 +0,0 @@ -###################################################### -# By JBM 2006-02-16 So that the code is not repeated # -# in test cases and can be reused. # -###################################################### - ---exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "start backup" >> $NDB_TOOLS_OUTPUT - -# To find the backupid, we must dump this data to a table, and SELECT -# what we want into an outfile. This could be accomplished with grep, but -# grep isn't Windows-portable - ---disable_query_log -# create a table to help us out ---disable_warnings # leave this on until done with the entire process -# cleanup -DROP TABLE IF EXISTS helper1; -CREATE TABLE helper1(c1 VARCHAR(20)); -# dump raw data to file -let $ndb_backup_file1= $MYSQLTEST_VARDIR/ndb_backup_tmp.dat; -let $ndb_backup_file2= $MYSQLTEST_VARDIR/tmp.dat; ---disable_warnings ---error 0,1 ---remove_file $ndb_backup_file1 ---enable_warnings ---exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="$NDB_CONNECTSTRING" -d sys --delimiter=',' SYSTAB_0 > $ndb_backup_file1 -# load the table from the raw data file -eval LOAD DATA INFILE '$ndb_backup_file1' INTO TABLE helper1; ---remove_file $ndb_backup_file1 -# output what we need -eval SELECT * FROM helper1 WHERE c1 LIKE '%520093696%' -INTO OUTFILE '$ndb_backup_file2'; -# cleanup -DROP TABLE helper1; ---enable_warnings ---enable_query_log - -CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info -(id INT, backup_id INT) ENGINE = MEMORY; - ---replace_result $MYSQLTEST_VARDIR -eval LOAD DATA INFILE '$ndb_backup_file2' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; ---remove_file $ndb_backup_file2 - -# Load backup id into environment variable -let the_backup_id=`SELECT backup_id from test.backup_info`; - -DROP TABLE test.backup_info; - diff --git a/mysql-test/include/ndb_backup_print.inc b/mysql-test/include/ndb_backup_print.inc deleted file mode 100644 index 69faa8f421b..00000000000 --- a/mysql-test/include/ndb_backup_print.inc +++ /dev/null @@ -1,9 +0,0 @@ ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults $ndb_restore_opts -b $the_backup_id -n 1 $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id $ndb_restore_filter > $MYSQLTEST_VARDIR/tmp/tmp.dat ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults $ndb_restore_opts -b $the_backup_id -n 2 $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id $ndb_restore_filter >> $MYSQLTEST_VARDIR/tmp/tmp.dat ---exec sort $MYSQLTEST_VARDIR/tmp/tmp.dat ---disable_warnings ---error 0,1 ---remove_file $MYSQLTEST_VARDIR/tmp/tmp.dat ---enable_warnings ---let ndb_restore_opts= ---let ndb_restore_filter= diff --git a/mysql-test/include/ndb_default_cluster.inc b/mysql-test/include/ndb_default_cluster.inc deleted file mode 100644 index de7eda3c596..00000000000 --- a/mysql-test/include/ndb_default_cluster.inc +++ /dev/null @@ -1,4 +0,0 @@ --- require r/ndb_default_cluster.require -disable_query_log; -show status like "Ndb_config_from_host"; -enable_query_log; diff --git a/mysql-test/include/ndb_master-slave.inc b/mysql-test/include/ndb_master-slave.inc deleted file mode 100644 index 8305a310953..00000000000 --- a/mysql-test/include/ndb_master-slave.inc +++ /dev/null @@ -1,12 +0,0 @@ ---source include/master-slave.inc - -connection slave; -# Check that server is compiled and started with support for NDB -disable_query_log; ---require r/true.require -select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; ---source include/ndb_not_readonly.inc -enable_query_log; - -# Set the default connection to 'master' -connection master; diff --git a/mysql-test/include/ndb_master-slave_2ch.inc b/mysql-test/include/ndb_master-slave_2ch.inc deleted file mode 100644 index 17017d2b801..00000000000 --- a/mysql-test/include/ndb_master-slave_2ch.inc +++ /dev/null @@ -1,67 +0,0 @@ -# ==== Purpose ==== -# -# Set up circular cluster replication where each -# cluster has two mysqlds and replication directions are -# following: -# master ---> slave -# / \ -# cluster A cluster B -# \ / -# master1 <--- slave1 -# -# ==== Usage ==== -# -# [--let $rpl_server_count= N] -# [--let $rpl_skip_check_server_ids= 1] -# [--let $rpl_skip_reset_master_and_slave= 1] -# [--let $rpl_skip_change_master= 1] -# [--let $rpl_skip_start_slave= 1] -# [--let $rpl_debug= 1] -# [--let $slave_timeout= NUMBER] -# --source include/ndb_master-slave_2ch.inc -# -# Parameters: -# $rpl_server_count, $rpl_skip_check_server_ids, -# $rpl_skip_reset_master_and_slave, $rpl_skip_change_master, -# $rpl_skip_start_slave, $rpl_debug, $slave_timeout -# See include/master-slave.inc - ---let $rpl_topology= 1->2,4->3 ---let $rpl_skip_check_server_ids= 1 ---source include/rpl_init.inc - -# Make connections to mysqlds - ---let $rpl_connection_name= master ---let $rpl_server_number= 1 ---source include/rpl_connect.inc - ---let $rpl_connection_name= master1 ---let $rpl_server_number= 1 ---source include/rpl_connect.inc - ---let $rpl_connection_name= slave ---let $rpl_server_number= 2 ---source include/rpl_connect.inc - ---let $rpl_connection_name= slave1 ---let $rpl_server_number= 2 ---source include/rpl_connect.inc - - -# Check that all mysqld are compiled with ndb support ---let $_rpl_server= 4 -while ($_rpl_server) -{ - --connection server_$_rpl_server - if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'ndbcluster' AND (support = 'YES' OR support = 'DEFAULT')`) - { - --skip Test requires NDB. - } - --source include/ndb_not_readonly.inc - --dec $_rpl_server -} - - -# Set the default connection to 'master' (cluster A) -connection master; diff --git a/mysql-test/include/ndb_not_readonly.inc b/mysql-test/include/ndb_not_readonly.inc deleted file mode 100644 index ebb343bb18d..00000000000 --- a/mysql-test/include/ndb_not_readonly.inc +++ /dev/null @@ -1,36 +0,0 @@ -# Check that server has come out ot readonly mode -# -# wait for server to connect properly to cluster -# - ---disable_query_log - -set @saved_log = @@sql_log_bin; -set sql_log_bin = 0; ---error 0,ER_NO_SUCH_TABLE,ER_OPEN_AS_READONLY,ER_GET_ERRMSG,ER_KEY_NOT_FOUND -delete from mysql.ndb_apply_status where server_id=0; -let $mysql_errno= 1; -let $counter= 600; -while ($mysql_errno) -{ - # Table is readonly until the mysqld has connected properly - --error 0,ER_NO_SUCH_TABLE,ER_OPEN_AS_READONLY,ER_GET_ERRMSG - replace into mysql.ndb_apply_status values(0,0,"",0,0); - if ($mysql_errno) - { - if (!$counter) - { - die Failed while waiting for mysqld to come out of readonly mode; - } - dec $counter; - --sleep 0.1 - } -} -delete from mysql.ndb_apply_status where server_id=0; -set sql_log_bin = @saved_log; - ---enable_query_log - -# -# connected -# diff --git a/mysql-test/include/ndb_restore_master.inc b/mysql-test/include/ndb_restore_master.inc deleted file mode 100644 index ae5f055b442..00000000000 --- a/mysql-test/include/ndb_restore_master.inc +++ /dev/null @@ -1,8 +0,0 @@ -###################################################### -# By JBM 2006-02-16 So that the code is not repeated # -# in test cases and can be reused. # -###################################################### - ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT - ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -p 8 -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT diff --git a/mysql-test/include/ndb_restore_slave_eoption.inc b/mysql-test/include/ndb_restore_slave_eoption.inc deleted file mode 100644 index a8657f68c8d..00000000000 --- a/mysql-test/include/ndb_restore_slave_eoption.inc +++ /dev/null @@ -1,11 +0,0 @@ -###################################################### -# By JBM 2006-03-08 So that the code is not repeated # -# in test cases and can be reused. # -###################################################### - ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING_SLAVE" -p 8 -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT - ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING_SLAVE" -p 8 -b $the_backup_id -n 2 -r -e --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT - - - diff --git a/mysql-test/include/ndb_setup_slave.inc b/mysql-test/include/ndb_setup_slave.inc deleted file mode 100644 index 5ee55150550..00000000000 --- a/mysql-test/include/ndb_setup_slave.inc +++ /dev/null @@ -1,27 +0,0 @@ -# -# now setup replication to continue from last epoch -# 1. get ndb_apply_status epoch from slave -# 2. get corresponding _next_ binlog postition from master -# 3. change master on slave - -# 1. ---connection slave ---replace_column 1 -SELECT @the_epoch:=MAX(epoch) FROM mysql.ndb_apply_status; ---let $the_epoch= `select @the_epoch` - -# 2. ---connection master ---replace_result $the_epoch ---replace_column 1 -eval SELECT @the_pos:=Position,@the_file:=SUBSTRING_INDEX(FILE, '/', -1) - FROM mysql.ndb_binlog_index WHERE epoch > $the_epoch ORDER BY epoch ASC LIMIT 1; ---let $the_pos= `SELECT @the_pos` ---let $the_file= `SELECT @the_file` - -# 3. ---connection slave ---replace_result $the_pos -eval CHANGE MASTER TO - master_log_file = '$the_file', - master_log_pos = $the_pos ; diff --git a/mysql-test/include/ndb_wait_connected.inc b/mysql-test/include/ndb_wait_connected.inc deleted file mode 100644 index cfea94db1f1..00000000000 --- a/mysql-test/include/ndb_wait_connected.inc +++ /dev/null @@ -1,26 +0,0 @@ -# Check that mysqld has reconnected to ndbd after -# restart of ndbd -# ---disable_query_log ---disable_result_log -let $mysql_errno= 1; -let $counter= 600; -while ($mysql_errno) -{ - --error 0,157 - CREATE TABLE ndb_wait_connected (a int primary key); - if ($mysql_errno) - { - if (!$counter) - { - die Failed waiting for mysqld to reconnect to ndbd; - } - dec $counter; - --sleep 0.1 - } -} -DROP TABLE ndb_wait_connected; ---enable_query_log ---enable_result_log - - diff --git a/mysql-test/include/not_ndb.inc b/mysql-test/include/not_ndb.inc deleted file mode 100644 index 965538c20a7..00000000000 --- a/mysql-test/include/not_ndb.inc +++ /dev/null @@ -1,7 +0,0 @@ --- require r/not_ndb.require -disable_query_log; -# so that both DISABLED and NO is output as NO --- replace_result DISABLED NO -show variables like "have_ndbcluster"; -enable_query_log; - diff --git a/mysql-test/include/not_ndb_default.inc b/mysql-test/include/not_ndb_default.inc deleted file mode 100644 index 682a2944171..00000000000 --- a/mysql-test/include/not_ndb_default.inc +++ /dev/null @@ -1,4 +0,0 @@ ---require r/not_ndb_default.require -disable_query_log; -select convert(@@storage_engine using latin1) NOT IN ("ndbcluster","NDBCLUSTER") as "TRUE"; -enable_query_log; diff --git a/mysql-test/include/ps_query.inc b/mysql-test/include/ps_query.inc index 8148935cbe1..932e80f8b81 100644 --- a/mysql-test/include/ps_query.inc +++ b/mysql-test/include/ps_query.inc @@ -428,7 +428,6 @@ execute stmt1 using @arg01, @arg02; prepare stmt1 from ' select a, b FROM t1 outer_table where a = (select a from t1 where b = outer_table.b ) order by a '; # also Bug#4000 (only BDB tables) -# Bug#4106 : ndb table, query with correlated subquery, wrong result execute stmt1 ; # test case derived from client_test.c: test_subqueries_ref let $1= 3 ; diff --git a/mysql-test/include/rpl_init.inc b/mysql-test/include/rpl_init.inc index 09569fc2b57..4ee4cccdc20 100644 --- a/mysql-test/include/rpl_init.inc +++ b/mysql-test/include/rpl_init.inc @@ -35,9 +35,7 @@ # (It is allowed, but not required, to configure SERVER_MYPORT_1 # and SERVER_MYPORT_2 too. If these variables are not set, the # variables MASTER_MYPORT and SLAVE_MYPORT, configured in the -# default my.cnf used by the rpl and rpl_ndb suites, are used -# instead. In addition, in the rpl_ndb suite, SERVER_MYPORT_3 is -# not needed since MASTER_MYPORT1 can be used instead.) +# default my.cnf used by the rpl suite, are used instead.) # # 2. Execute the following near the top of the test: # @@ -124,18 +122,6 @@ if (!$SERVER_MYPORT_2) { --let SERVER_MYPORT_2= $SLAVE_MYPORT } -# Allow $MASTER_MYPORT1 as alias for $SERVER_MYPORT_3 -# (this alias is used by rpl_ndb tests) -if (!$SERVER_MYPORT_3) -{ - --let SERVER_MYPORT_3= $MASTER_MYPORT1 -} -# Allow $SLAVE_MYPORT1 as alias for $SERVER_MYPORT_4 -# (this alias is used by rpl_ndb tests) -if (!$SERVER_MYPORT_4) -{ - --let SERVER_MYPORT_4= $SLAVE_MYPORT1 -} # Check that $rpl_server_count is set if (!$rpl_server_count) { diff --git a/mysql-test/include/rpl_multi_engine2.inc b/mysql-test/include/rpl_multi_engine2.inc index e683a1d5838..24154220cb0 100644 --- a/mysql-test/include/rpl_multi_engine2.inc +++ b/mysql-test/include/rpl_multi_engine2.inc @@ -72,10 +72,6 @@ ORDER BY id; connection master; --echo --- Remove a record from t1 on master --- -# Note: there is an error in replication of Delete_row -# from NDB to MyISAM (BUG#28538). However, if there is -# only one row in Delete_row event then it works fine, -# as this test demonstrates. DELETE FROM t1 WHERE id = 412; --echo --- Show current count on master for t1 --- diff --git a/mysql-test/include/safe_set_to_maybe_ro_var.inc b/mysql-test/include/safe_set_to_maybe_ro_var.inc deleted file mode 100644 index add7f2091b3..00000000000 --- a/mysql-test/include/safe_set_to_maybe_ro_var.inc +++ /dev/null @@ -1,23 +0,0 @@ -# to mask out the error - never abort neither log in result file - in setting -# to read-only variable. -# It is assumed that the new value is equal to one the var was set to. -# Such situation happens particularily with binlog_format that becomes read-only -# with ndb default storage. -# -# when generate results always watch the file to find what is expected, -# the SET query may fail - -# script accepts $maybe_ro_var the var name and $val4var the value - -### USAGE: -### let $maybe_ro_var= ... -### let $val4var= ... -### include/safe_set_to_maybe_ro_var.inc - ---disable_result_log ---disable_abort_on_error -eval SET $maybe_ro_var = $val4var; ---enable_abort_on_error ---enable_result_log - -eval SELECT $maybe_ro_var; diff --git a/mysql-test/include/select_ndb_apply_status.inc b/mysql-test/include/select_ndb_apply_status.inc deleted file mode 100644 index a676b7cfb06..00000000000 --- a/mysql-test/include/select_ndb_apply_status.inc +++ /dev/null @@ -1,13 +0,0 @@ -################################################## -# Author: Jeb -# Date: 2007/04 -# Purpose: To select out log name, start and end -# positions from ndb_apply_status table -################################################## ---replace_column 1 2 3 -select @log_name:=log_name, @start_pos:=start_pos, @end_pos:=end_pos - from mysql.ndb_apply_status; ---let $start_pos = `select @start_pos` ---let $end_pos = `select @end_pos` ---let $log_name = `select @log_name` - diff --git a/mysql-test/include/wait_for_ndb_to_binlog.inc b/mysql-test/include/wait_for_ndb_to_binlog.inc deleted file mode 100644 index c1f94802724..00000000000 --- a/mysql-test/include/wait_for_ndb_to_binlog.inc +++ /dev/null @@ -1,26 +0,0 @@ -# ==== Purpose ==== -# -# Several test primitives from mysql-test/extra/rpl_tests -# are shared for test cases for MyISAM, InnoDB, NDB and -# other engines. -# For NDB engine all events will be added by NDB injector -# so tests only can continue after injector is ready, -# this test waits for proper injector thread state. -# -# ==== Usage ==== -# -# let $engine_type= NDB; -# --source include/wait_for_ndb_to_binlog.inc -# -# ==== Parameters ===== -# -# $engine_type -# Type of engine. If type is NDB then it waits for injector -# thread proper state. - -if (`SELECT UPPER(LEFT('$engine_type',3)) = 'NDB'`) { - let $show_statement= SHOW PROCESSLIST; - let $field= State; - let $condition= = 'Waiting for event from ndbcluster'; - source include/wait_show_condition.inc; -} diff --git a/mysql-test/lib/My/ConfigFactory.pm b/mysql-test/lib/My/ConfigFactory.pm index 4e8507a5c4a..8c19ed58665 100644 --- a/mysql-test/lib/My/ConfigFactory.pm +++ b/mysql-test/lib/My/ConfigFactory.pm @@ -260,65 +260,6 @@ if (IS_WINDOWS) push(@mysqld_rules, {'shared-memory-base-name' => \&fix_socket}); } -sub fix_ndb_mgmd_port { - my ($self, $config, $group_name, $group)= @_; - my $hostname= $group->value('HostName'); - return $self->{PORT}++; -} - - -sub fix_cluster_dir { - my ($self, $config, $group_name, $group)= @_; - my $vardir= $self->{ARGS}->{vardir}; - my (undef, $process_type, $idx, $suffix)= split(/\./, $group_name); - return "$vardir/mysql_cluster.$suffix/$process_type.$idx"; -} - - -sub fix_cluster_backup_dir { - my ($self, $config, $group_name, $group)= @_; - my $vardir= $self->{ARGS}->{vardir}; - my (undef, $process_type, $idx, $suffix)= split(/\./, $group_name); - return "$vardir/mysql_cluster.$suffix/"; -} - - -# -# Rules to run for each ndb_mgmd in the config -# - will be run in order listed here -# -my @ndb_mgmd_rules= -( - { 'PortNumber' => \&fix_ndb_mgmd_port }, - { 'DataDir' => \&fix_cluster_dir }, -); - - -# -# Rules to run for each ndbd in the config -# - will be run in order listed here -# -my @ndbd_rules= -( - { 'HostName' => \&fix_host }, - { 'DataDir' => \&fix_cluster_dir }, - { 'BackupDataDir' => \&fix_cluster_backup_dir }, -); - - -# -# Rules to run for each cluster_config section -# - will be run in order listed here -# -my @cluster_config_rules= -( - { 'ndb_mgmd' => \&fix_host }, - { 'ndbd' => \&fix_host }, - { 'mysqld' => \&fix_host }, - { 'ndbapi' => \&fix_host }, -); - - # # Rules to run for [client] section # - will be run in order listed here @@ -496,49 +437,12 @@ sub post_fix_resolve_at_variables { } } -sub post_fix_mysql_cluster_section { - my ($self, $config)= @_; - - # Add a [mysl_cluster.] section for each - # defined [cluster_config.] section - foreach my $group ( $config->like('cluster_config\.\w*$') ) - { - my @urls; - # Generate ndb_connectstring for this cluster - foreach my $ndb_mgmd ( $config->like('cluster_config.ndb_mgmd.')) { - if ($ndb_mgmd->suffix() eq $group->suffix()) { - my $host= $ndb_mgmd->value('HostName'); - my $port= $ndb_mgmd->value('PortNumber'); - push(@urls, "$host:$port"); - } - } - croak "Could not generate valid ndb_connectstring for '$group'" - unless @urls > 0; - my $ndb_connectstring= join(";", @urls); - - # Add ndb_connectstring to [mysql_cluster.] - $config->insert('mysql_cluster'.$group->suffix(), - 'ndb_connectstring', $ndb_connectstring); - - # Add ndb_connectstring to each mysqld connected to this - # cluster - foreach my $mysqld ( $config->like('cluster_config.mysqld.')) { - if ($mysqld->suffix() eq $group->suffix()) { - my $after= $mysqld->after('cluster_config.mysqld'); - $config->insert("mysqld$after", - 'ndb_connectstring', $ndb_connectstring); - } - } - } -} - # # Rules to run last of all # my @post_rules= ( \&post_check_client_groups, - \&post_fix_mysql_cluster_section, \&post_fix_resolve_at_variables, \&post_check_embedded_group, ); @@ -576,54 +480,6 @@ sub run_section_rules { } -sub run_generate_sections_from_cluster_config { - my ($self, $config)= @_; - - my @options= ('ndb_mgmd', 'ndbd', - 'mysqld', 'ndbapi'); - - foreach my $group ( $config->like('cluster_config\.\w*$') ) { - - # Keep track of current index per process type - my %idxes; - map { $idxes{$_}= 1; } @options; - - foreach my $option_name ( @options ) { - my $value= $group->value($option_name); - my @hosts= split(/,/, $value, -1); # -1 => return also empty strings - - # Add at least one host - push(@hosts, undef) unless scalar(@hosts); - - # Assign hosts unless already fixed - @hosts= map { $self->fix_host() unless $_; } @hosts; - - # Write the hosts value back - $group->insert($option_name, join(",", @hosts)); - - # Generate sections for each host - foreach my $host ( @hosts ){ - my $idx= $idxes{$option_name}++; - - my $suffix= $group->suffix(); - # Generate a section for ndb_mgmd to read - $config->insert("cluster_config.$option_name.$idx$suffix", - "HostName", $host); - - if ($option_name eq 'mysqld'){ - my $datadir= - $self->fix_cluster_dir($config, - "cluster_config.mysqld.$idx$suffix", - $group); - $config->insert("mysqld.$idx$suffix", - 'datadir', "$datadir/data"); - } - } - } - } -} - - sub new_config { my ($class, $args)= @_; @@ -648,18 +504,6 @@ sub new_config { &$rule($self, $config); } - $self->run_section_rules($config, - 'cluster_config\.\w*$', - @cluster_config_rules); - $self->run_generate_sections_from_cluster_config($config); - - $self->run_section_rules($config, - 'cluster_config.ndb_mgmd.', - @ndb_mgmd_rules); - $self->run_section_rules($config, - 'cluster_config.ndbd', - @ndbd_rules); - $self->run_section_rules($config, 'mysqld.', @mysqld_rules); diff --git a/mysql-test/lib/mtr_cases.pm b/mysql-test/lib/mtr_cases.pm index 441fd6e6559..97348cd39fc 100644 --- a/mysql-test/lib/mtr_cases.pm +++ b/mysql-test/lib/mtr_cases.pm @@ -36,7 +36,6 @@ our $do_test; our $skip_test; our $binlog_format; our $enable_disabled; -our $opt_with_ndbcluster_only; sub collect_option { my ($opt, $value)= @_; @@ -817,29 +816,6 @@ sub collect_one_test_case { return $tinfo } - if ( $tinfo->{'ndb_test'} ) - { - # This is a NDB test - if ( $::ndbcluster_enabled == 0) - { - # ndbcluster is disabled - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "ndbcluster disabled"; - return $tinfo; - } - } - else - { - # This is not a ndb test - if ( $opt_with_ndbcluster_only ) - { - # Only the ndb test should be run, all other should be skipped - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "Only ndbcluster tests"; - return $tinfo; - } - } - if ( $tinfo->{'rpl_test'} ) { if ( $skip_rpl ) @@ -976,10 +952,7 @@ sub collect_one_test_case { my $tags_map= {'big_test' => ['big_test', 1], - 'have_ndb' => ['ndb_test', 1], - 'have_multi_ndb' => ['ndb_test', 1], 'master-slave' => ['rpl_test', 1], - 'ndb_master-slave' => ['rpl_test', 1, 'ndb_test', 1], 'long_test' => ['long_test', 1], }; my $tags_regex_string= join('|', keys %$tags_map); diff --git a/mysql-test/lib/v1/mtr_cases.pl b/mysql-test/lib/v1/mtr_cases.pl index baeab24f519..067e7e5e4d1 100644 --- a/mysql-test/lib/v1/mtr_cases.pl +++ b/mysql-test/lib/v1/mtr_cases.pl @@ -154,7 +154,6 @@ sub collect_test_cases ($) { # # Append the criteria for sorting, in order of importance. # - push(@criteria, "ndb=" . ($tinfo->{'ndb_test'} ? "1" : "0")); # Group test with equal options together. # Ending with "~" makes empty sort later than filled push(@criteria, join("!", sort @{$tinfo->{'master_opt'}}) . "~"); @@ -788,8 +787,6 @@ sub collect_one_test_case($$$$$$$$$) { { # Different default engine is used # tag test to require that engine - $tinfo->{'ndb_test'}= 1 - if ( $::used_default_engine =~ /^ndb/i ); $tinfo->{'innodb_test'}= 1 if ( $::used_default_engine =~ /^innodb/i ); @@ -809,20 +806,6 @@ sub collect_one_test_case($$$$$$$$$) { return; } - if ( $tinfo->{'ndb_extra'} and ! $::opt_ndb_extra_test ) - { - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "Test need 'ndb_extra' option"; - return; - } - - if ( $tinfo->{'require_manager'} ) - { - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "Test need the _old_ manager(to be removed)"; - return; - } - if ( $tinfo->{'need_debug'} && ! $::debug_compiled_binaries ) { $tinfo->{'skip'}= 1; @@ -830,38 +813,6 @@ sub collect_one_test_case($$$$$$$$$) { return; } - if ( $tinfo->{'ndb_test'} ) - { - # This is a NDB test - if ( ! $::glob_ndbcluster_supported ) - { - # Ndb is not supported, skip it - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "No ndbcluster support"; - return; - } - elsif ( $::opt_skip_ndbcluster ) - { - # All ndb test's should be skipped - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "No ndbcluster tests(--skip-ndbcluster)"; - return; - } - # Ndb tests run with two mysqld masters - $tinfo->{'master_num'}= 2; - } - else - { - # This is not a ndb test - if ( $::opt_with_ndbcluster_only ) - { - # Only the ndb test should be run, all other should be skipped - $tinfo->{'skip'}= 1; - $tinfo->{'comment'}= "Only ndbcluster tests(--with-ndbcluster-only)"; - return; - } - } - if ( $tinfo->{'innodb_test'} ) { # This is a test that need innodb @@ -916,10 +867,6 @@ our @tags= ["include/have_log_bin.inc", "need_binlog", 1], ["include/big_test.inc", "big_test", 1], ["include/have_debug.inc", "need_debug", 1], - ["include/have_ndb.inc", "ndb_test", 1], - ["include/have_multi_ndb.inc", "ndb_test", 1], - ["include/have_ndb_extra.inc", "ndb_extra", 1], - ["include/ndb_master-slave.inc", "ndb_test", 1], ["require_manager", "require_manager", 1], ["include/federated.inc", "federated_test", 1], ["include/have_federated_db.inc", "federated_test", 1], diff --git a/mysql-test/lib/v1/mtr_process.pl b/mysql-test/lib/v1/mtr_process.pl index f86f9e5dec0..3518b33839e 100644 --- a/mysql-test/lib/v1/mtr_process.pl +++ b/mysql-test/lib/v1/mtr_process.pl @@ -31,7 +31,6 @@ sub mtr_check_stop_servers ($); sub mtr_kill_leftovers (); sub mtr_wait_blocking ($); sub mtr_record_dead_children (); -sub mtr_ndbmgm_start($$); sub mtr_mysqladmin_start($$$); sub mtr_exit ($); sub sleep_until_file_created ($$$); @@ -342,10 +341,8 @@ sub mtr_process_exit_status { ############################################################################## -# Kill all processes(mysqld, ndbd, ndb_mgmd and im) that would conflict with -# this run +# Kill all processes that would conflict with this run # Make sure to remove the PID file, if any. -# kill IM manager first, else it will restart the servers sub mtr_kill_leftovers () { mtr_report("Killing Possible Leftover Processes"); @@ -376,46 +373,6 @@ sub mtr_kill_leftovers () { $srv->{'pid'}= 0; # Assume we are done with it } - if ( ! $::opt_skip_ndbcluster ) - { - - foreach my $cluster (@{$::clusters}) - { - - # Don't shut down a "running" cluster - next if $cluster->{'use_running'}; - - mtr_debug(" - cluster " . - "(pid: $cluster->{pid}; " . - "pid file: '$cluster->{path_pid})"); - - my $pid= mtr_ndbmgm_start($cluster, "shutdown"); - - # Save the pid of the ndb_mgm process - $admin_pids{$pid}= 1; - - push(@kill_pids,{ - pid => $cluster->{'pid'}, - pidfile => $cluster->{'path_pid'} - }); - - $cluster->{'pid'}= 0; # Assume we are done with it - - foreach my $ndbd (@{$cluster->{'ndbds'}}) - { - mtr_debug(" - ndbd " . - "(pid: $ndbd->{pid}; " . - "pid file: '$ndbd->{path_pid})"); - - push(@kill_pids,{ - pid => $ndbd->{'pid'}, - pidfile => $ndbd->{'path_pid'}, - }); - $ndbd->{'pid'}= 0; # Assume we are done with it - } - } - } - # Wait for all the admin processes to complete mtr_wait_blocking(\%admin_pids); @@ -747,32 +704,6 @@ sub mtr_mysqladmin_start($$$) { } -# Start "ndb_mgm shutdown" for a specific cluster, it will -# shutdown all data nodes and leave the ndb_mgmd running -sub mtr_ndbmgm_start($$) { - my $cluster= shift; - my $command= shift; - - my $args; - - mtr_init_args(\$args); - - mtr_add_arg($args, "--no-defaults"); - mtr_add_arg($args, "--core"); - mtr_add_arg($args, "--try-reconnect=1"); - mtr_add_arg($args, "--ndb_connectstring=%s", $cluster->{'connect_string'}); - mtr_add_arg($args, "-e"); - mtr_add_arg($args, "$command"); - - my $pid= mtr_spawn($::exe_ndb_mgm, $args, - "", "/dev/null", "/dev/null", "", - {}); - mtr_verbose("mtr_ndbmgm_start, pid: $pid"); - return $pid; - -} - - # Ping all servers in list, exit when none of them answers # or when timeout has passed sub mtr_ping_with_timeout($) { @@ -843,25 +774,6 @@ sub mark_process_dead($) } } - foreach my $cluster (@{$::clusters}) - { - if ( $cluster->{'pid'} eq $ret_pid ) - { - mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid"); - $cluster->{'pid'}= 0; - return; - } - - foreach my $ndbd (@{$cluster->{'ndbds'}}) - { - if ( $ndbd->{'pid'} eq $ret_pid ) - { - mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid"); - $ndbd->{'pid'}= 0; - return; - } - } - } mtr_warning("mark_process_dead couldn't find an entry for pid: $ret_pid"); } @@ -915,52 +827,6 @@ sub check_expected_crash_and_restart($) } } - foreach my $cluster (@{$::clusters}) - { - if ( $cluster->{'pid'} eq $ret_pid ) - { - mtr_verbose("$cluster->{'name'} cluster ndb_mgmd exited, pid: $ret_pid"); - $cluster->{'pid'}= 0; - - # Check if crash expected and restart if it was - my $expect_file= "$::opt_vardir/tmp/ndb_mgmd_" . "$cluster->{'type'}" . - ".expect"; - if ( -f $expect_file ) - { - mtr_verbose("Crash was expected, file $expect_file exists"); - unlink($expect_file); - ndbmgmd_start($cluster); - } - return; - } - - foreach my $ndbd (@{$cluster->{'ndbds'}}) - { - if ( $ndbd->{'pid'} eq $ret_pid ) - { - mtr_verbose("$cluster->{'name'} cluster ndbd exited, pid: $ret_pid"); - $ndbd->{'pid'}= 0; - - # Check if crash expected and restart if it was - my $expect_file= "$::opt_vardir/tmp/ndbd_" . "$cluster->{'type'}" . - "$ndbd->{'idx'}" . ".expect"; - if ( -f $expect_file ) - { - mtr_verbose("Crash was expected, file $expect_file exists"); - unlink($expect_file); - ndbd_start($cluster, $ndbd->{'idx'}, - $ndbd->{'start_extra_args'}); - } - return; - } - } - } - - if ($::instance_manager->{'spawner_pid'} eq $ret_pid) - { - return; - } - mtr_warning("check_expected_crash_and_restart couldn't find an entry for pid: $ret_pid"); } diff --git a/mysql-test/lib/v1/mtr_report.pl b/mysql-test/lib/v1/mtr_report.pl index accf00dbb5d..e2556d17093 100644 --- a/mysql-test/lib/v1/mtr_report.pl +++ b/mysql-test/lib/v1/mtr_report.pl @@ -287,9 +287,6 @@ sub mtr_report_stats ($) { /unknown variable 'loose-/ or /You have forced lower_case_table_names to 0 through a command-line option/ or /Setting lower_case_table_names=2/ or - /NDB Binlog:/ or - /NDB: failed to setup table/ or - /NDB: only row based binary logging/ or /Neither --relay-log nor --relay-log-index were used/ or /Query partially completed/ or /Slave I.O thread aborted while waiting for relay log/ or @@ -314,7 +311,6 @@ sub mtr_report_stats ($) { /Slave: Can't drop database.* database doesn't exist/ or /Slave SQL:.*(?:error.* \d+|Query:.*)/ or /Sort aborted/ or - /Time-out in NDB/ or /One can only use the --user.*root/ or /Table:.* on (delete|rename)/ or /You have an error in your SQL syntax/ or diff --git a/mysql-test/lib/v1/mysql-test-run.pl b/mysql-test/lib/v1/mysql-test-run.pl index baeb141f18c..79489d92742 100755 --- a/mysql-test/lib/v1/mysql-test-run.pl +++ b/mysql-test/lib/v1/mysql-test-run.pl @@ -92,7 +92,6 @@ our $glob_cygwin_perl= ($^O eq "cygwin"); # Cygwin Perl our $glob_win32= ($glob_win32_perl or $glob_cygwin_perl); require "lib/v1/mtr_cases.pl"; -require "lib/v1/mtr_im.pl"; require "lib/v1/mtr_process.pl"; require "lib/v1/mtr_timer.pl"; require "lib/v1/mtr_io.pl"; @@ -168,8 +167,6 @@ our $exe_mysqlimport; our $exe_mysqlshow; our $file_mysql_fix_privilege_tables; our $exe_mysqltest; -our $exe_ndbd; -our $exe_ndb_mgmd; our $exe_slave_mysqld; our $exe_my_print_defaults; our $exe_perror; @@ -233,19 +230,9 @@ our $opt_gprof_slave; our $master; our $slave; -our $clusters; - -our $instance_manager; our $opt_master_myport; our $opt_slave_myport; -our $im_port; -our $im_mysqld1_port; -our $im_mysqld2_port; -our $opt_ndbcluster_port; -our $opt_ndbconnectstring; -our $opt_ndbcluster_port_slave; -our $opt_ndbconnectstring_slave; our $opt_record; my $opt_report_features; @@ -257,7 +244,6 @@ our $max_slave_num= 0; our $max_master_num= 1; our $use_innodb; our $opt_skip_test; -our $opt_skip_im; our $opt_sleep; @@ -296,22 +282,9 @@ our $opt_stress_test_file= ""; our $opt_warnings; -our $opt_skip_ndbcluster= 0; -our $opt_skip_ndbcluster_slave= 0; -our $opt_with_ndbcluster= 0; -our $opt_with_ndbcluster_only= 0; -our $glob_ndbcluster_supported= 0; -our $opt_ndb_extra_test= 0; our $opt_skip_master_binlog= 0; our $opt_skip_slave_binlog= 0; -our $exe_ndb_mgm; -our $exe_ndb_waiter; -our $path_ndb_tools_dir; -our $path_ndb_examples_dir; -our $exe_ndb_example; -our $path_ndb_testrun_log; - our $path_sql_dir; our @data_dir_lst; @@ -345,11 +318,6 @@ sub remove_stale_vardir (); sub setup_vardir (); sub check_ssl_support ($); sub check_running_as_root(); -sub check_ndbcluster_support ($); -sub rm_ndbcluster_tables ($); -sub ndbcluster_start_install ($); -sub ndbcluster_start ($$); -sub ndbcluster_wait_started ($$); sub mysqld_wait_started($); sub run_benchmarks ($); sub initialize_servers (); @@ -363,8 +331,6 @@ sub run_testcase_check_skip_test($); sub report_failure_and_restart ($); sub do_before_start_master ($); sub do_before_start_slave ($); -sub ndbd_start ($$$); -sub ndb_mgmd_start ($); sub mysqld_start ($$$); sub mysqld_arguments ($$$$); sub stop_all_servers (); @@ -384,7 +350,6 @@ sub main () { command_line_setup(); - check_ndbcluster_support(\%mysqld_variables); check_ssl_support(\%mysqld_variables); check_debug_support(\%mysqld_variables); @@ -419,42 +384,18 @@ sub main () { if (!$opt_suites) { $opt_suites= $opt_suites_default; - - # Check for any extra suites to enable based on the path name - my %extra_suites= - ( - "mysql-5.1-new-ndb" => "ndb_team", - "mysql-5.1-new-ndb-merge" => "ndb_team", - "mysql-5.1-telco-6.2" => "ndb_team", - "mysql-5.1-telco-6.2-merge" => "ndb_team", - "mysql-5.1-telco-6.3" => "ndb_team", - "mysql-6.0-ndb" => "ndb_team", - ); - - foreach my $dir ( reverse splitdir($glob_basedir) ) - { - my $extra_suite= $extra_suites{$dir}; - if (defined $extra_suite){ - mtr_report("Found extra suite: $extra_suite"); - $opt_suites= "$extra_suite,$opt_suites"; - last; - } - } } my $tests= collect_test_cases($opt_suites); - # Turn off NDB and other similar options if no tests use it - my ($need_ndbcluster,$need_im, $need_debug); + my ($need_debug); foreach my $test (@$tests) { next if $test->{skip}; if (!$opt_extern) { - $need_ndbcluster||= $test->{ndb_test}; $need_debug||=$test->{need_debug}; - $need_im||= $test->{component_id} eq 'im'; # Count max number of slaves used by a test case if ( $test->{slave_num} > $max_slave_num) { @@ -472,30 +413,11 @@ sub main () { $use_innodb||= $test->{'innodb_test'}; } - # Check if cluster can be skipped - if ( !$need_ndbcluster ) - { - $opt_skip_ndbcluster= 1; - $opt_skip_ndbcluster_slave= 1; - } - if ( !$need_debug && !$opt_debug) { $opt_debug=0; } - # Check if slave cluster can be skipped - if ($max_slave_num == 0) - { - $opt_skip_ndbcluster_slave= 1; - } - - # Check if im can be skipped - if ( ! $need_im ) - { - $opt_skip_im= 1; - } - initialize_servers(); if ( $opt_report_features ) { @@ -580,23 +502,16 @@ sub command_line_setup () { 'compress' => \$opt_compress, 'bench' => \$opt_bench, 'small-bench' => \$opt_small_bench, - 'with-ndbcluster|ndb' => \$opt_with_ndbcluster, 'vs-config' => \$opt_vs_config, # Control what test suites or cases to run 'force' => \$opt_force, - 'with-ndbcluster-only' => \$opt_with_ndbcluster_only, - 'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster, - 'skip-ndbcluster-slave|skip-ndb-slave' - => \$opt_skip_ndbcluster_slave, - 'ndb-extra-test' => \$opt_ndb_extra_test, 'skip-master-binlog' => \$opt_skip_master_binlog, 'skip-slave-binlog' => \$opt_skip_slave_binlog, 'do-test=s' => \$opt_do_test, 'start-from=s' => \$opt_start_from, 'suite|suites=s' => \$opt_suites, 'skip-rpl' => \$opt_skip_rpl, - 'skip-im' => \$opt_skip_im, 'skip-test=s' => \$opt_skip_test, 'big-test' => \$opt_big_test, 'combination=s' => \@opt_combinations, @@ -605,11 +520,6 @@ sub command_line_setup () { # Specify ports 'master_port=i' => \$opt_master_myport, 'slave_port=i' => \$opt_slave_myport, - 'ndbcluster-port|ndbcluster_port=i' => \$opt_ndbcluster_port, - 'ndbcluster-port-slave=i' => \$opt_ndbcluster_port_slave, - 'im-port=i' => \$im_port, # Instance Manager port. - 'im-mysqld1-port=i' => \$im_mysqld1_port, # Port of mysqld, controlled by IM - 'im-mysqld2-port=i' => \$im_mysqld2_port, # Port of mysqld, controlled by IM 'mtr-build-thread=i' => \$opt_mtr_build_thread, # Test case authoring @@ -625,8 +535,6 @@ sub command_line_setup () { # Run test on running server 'extern' => \$opt_extern, - 'ndb-connectstring=s' => \$opt_ndbconnectstring, - 'ndb-connectstring-slave=s' => \$opt_ndbconnectstring_slave, # Debugging 'gdb' => \$opt_gdb, @@ -800,7 +708,6 @@ sub command_line_setup () { push(@glob_test_mode, "embedded"); $opt_skip_rpl= 1; # We never run replication with embedded - $opt_skip_ndbcluster= 1; # Turn off use of NDB cluster $opt_skip_ssl= 1; # Turn off use of SSL # Turn off use of bin log @@ -922,12 +829,6 @@ sub command_line_setup () { # -------------------------------------------------------------------------- # Find out default storage engine being used(if any) # -------------------------------------------------------------------------- - if ( $opt_with_ndbcluster ) - { - # --ndb or --with-ndbcluster turns on --default-storage-engine=ndbcluster - push(@opt_extra_mysqld_opt, "--default-storage-engine=ndbcluster"); - } - foreach my $arg ( @opt_extra_mysqld_opt ) { if ( $arg =~ /default-storage-engine=(\S+)/ ) @@ -1011,25 +912,6 @@ sub command_line_setup () { $opt_tmpdir= "$opt_vardir/tmp" unless $opt_tmpdir; $opt_tmpdir =~ s,/+$,,; # Remove ending slash if any - # -------------------------------------------------------------------------- - # Check im suport - # -------------------------------------------------------------------------- - if ($opt_extern) - { - # mtr_report("Disable instance manager when running with extern mysqld"); - $opt_skip_im= 1; - } - elsif ( $mysql_version_id < 50000 ) - { - # Instance manager is not supported until 5.0 - $opt_skip_im= 1; - } - elsif ( $glob_win32 ) - { - mtr_report("Disable Instance manager - testing not supported on Windows"); - $opt_skip_im= 1; - } - # -------------------------------------------------------------------------- # Record flag # -------------------------------------------------------------------------- @@ -1188,7 +1070,6 @@ sub command_line_setup () { path_sock => "$sockdir/master.sock", port => $opt_master_myport, start_timeout => 400, # enough time create innodb tables - cluster => 0, # index in clusters list start_opts => [], }; @@ -1203,7 +1084,6 @@ sub command_line_setup () { path_sock => "$sockdir/master1.sock", port => $opt_master_myport + 1, start_timeout => 400, # enough time create innodb tables - cluster => 0, # index in clusters list start_opts => [], }; @@ -1219,7 +1099,6 @@ sub command_line_setup () { port => $opt_slave_myport, start_timeout => 400, - cluster => 1, # index in clusters list start_opts => [], }; @@ -1234,7 +1113,6 @@ sub command_line_setup () { path_sock => "$sockdir/slave1.sock", port => $opt_slave_myport + 1, start_timeout => 300, - cluster => -1, # index in clusters list start_opts => [], }; @@ -1249,91 +1127,9 @@ sub command_line_setup () { path_sock => "$sockdir/slave2.sock", port => $opt_slave_myport + 2, start_timeout => 300, - cluster => -1, # index in clusters list start_opts => [], }; - $instance_manager= - { - path_err => "$opt_vardir/log/im.err", - path_log => "$opt_vardir/log/im.log", - path_pid => "$opt_vardir/run/im.pid", - path_angel_pid => "$opt_vardir/run/im.angel.pid", - path_sock => "$sockdir/im.sock", - port => $im_port, - start_timeout => $master->[0]->{'start_timeout'}, - admin_login => 'im_admin', - admin_password => 'im_admin_secret', - admin_sha1 => '*598D51AD2DFF7792045D6DF3DDF9AA1AF737B295', - password_file => "$opt_vardir/im.passwd", - defaults_file => "$opt_vardir/im.cnf", - }; - - $instance_manager->{'instances'}->[0]= - { - server_id => 1, - port => $im_mysqld1_port, - path_datadir => "$opt_vardir/im_mysqld_1.data", - path_sock => "$sockdir/mysqld_1.sock", - path_pid => "$opt_vardir/run/mysqld_1.pid", - start_timeout => 400, # enough time create innodb tables - old_log_format => 1 - }; - - $instance_manager->{'instances'}->[1]= - { - server_id => 2, - port => $im_mysqld2_port, - path_datadir => "$opt_vardir/im_mysqld_2.data", - path_sock => "$sockdir/mysqld_2.sock", - path_pid => "$opt_vardir/run/mysqld_2.pid", - nonguarded => 1, - start_timeout => 400, # enough time create innodb tables - old_log_format => 1 - }; - - my $data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port"; - $clusters->[0]= - { - name => "Master", - nodes => 2, - port => "$opt_ndbcluster_port", - data_dir => "$data_dir", - connect_string => "host=localhost:$opt_ndbcluster_port", - path_pid => "$data_dir/ndb_3.pid", # Nodes + 1 - pid => 0, # pid of ndb_mgmd - installed_ok => 0, - }; - - $data_dir= "$opt_vardir/ndbcluster-$opt_ndbcluster_port_slave"; - $clusters->[1]= - { - name => "Slave", - nodes => 1, - port => "$opt_ndbcluster_port_slave", - data_dir => "$data_dir", - connect_string => "host=localhost:$opt_ndbcluster_port_slave", - path_pid => "$data_dir/ndb_2.pid", # Nodes + 1 - pid => 0, # pid of ndb_mgmd - installed_ok => 0, - }; - - # Init pids of ndbd's - foreach my $cluster ( @{$clusters} ) - { - for ( my $idx= 0; $idx < $cluster->{'nodes'}; $idx++ ) - { - my $nodeid= $idx+1; - $cluster->{'ndbds'}->[$idx]= - { - pid => 0, - nodeid => $nodeid, - path_pid => "$cluster->{'data_dir'}/ndb_${nodeid}.pid", - path_fs => "$cluster->{'data_dir'}/ndb_${nodeid}_fs", - }; - } - } - # -------------------------------------------------------------------------- # extern # -------------------------------------------------------------------------- @@ -1341,7 +1137,6 @@ sub command_line_setup () { { # Turn off features not supported when running with extern server $opt_skip_rpl= 1; - $opt_skip_ndbcluster= 1; # Setup master->[0] with the settings for the extern server $master->[0]->{'path_sock'}= $opt_socket ? $opt_socket : "/tmp/mysql.sock"; @@ -1354,41 +1149,10 @@ sub command_line_setup () { } - # -------------------------------------------------------------------------- - # ndbconnectstring and ndbconnectstring_slave - # -------------------------------------------------------------------------- - if ( $opt_ndbconnectstring ) - { - # ndbconnectstring was supplied by user, the tests shoudl be run - # against an already started cluster, change settings - my $cluster= $clusters->[0]; # Master cluster - $cluster->{'connect_string'}= $opt_ndbconnectstring; - $cluster->{'use_running'}= 1; - - mtr_error("Can't specify --ndb-connectstring and --skip-ndbcluster") - if $opt_skip_ndbcluster; - } - $ENV{'NDB_CONNECTSTRING'}= $clusters->[0]->{'connect_string'}; - - - if ( $opt_ndbconnectstring_slave ) - { - # ndbconnectstring-slave was supplied by user, the tests should be run - # agains an already started slave cluster, change settings - my $cluster= $clusters->[1]; # Slave cluster - $cluster->{'connect_string'}= $opt_ndbconnectstring_slave; - $cluster->{'use_running'}= 1; - - mtr_error("Can't specify ndb-connectstring_slave and " . - "--skip-ndbcluster-slave") - if $opt_skip_ndbcluster_slave; - } - $path_timefile= "$opt_vardir/log/mysqltest-time"; $path_mysqltest_log= "$opt_vardir/log/mysqltest.log"; $path_current_test_log= "$opt_vardir/log/current_test"; - $path_ndb_testrun_log= "$opt_vardir/log/ndb_testrun.log"; $path_snapshot= "$opt_tmpdir/snapshot_$opt_master_myport/"; } @@ -1422,11 +1186,6 @@ sub set_mtr_build_thread_ports($) { # A magic value in command_line_setup depends on these equations. $opt_master_myport= $mtr_build_thread * 10 + 10000; # and 1 $opt_slave_myport= $opt_master_myport + 2; # and 3 4 - $opt_ndbcluster_port= $opt_master_myport + 5; - $opt_ndbcluster_port_slave= $opt_master_myport + 6; - $im_port= $opt_master_myport + 7; - $im_mysqld1_port= $opt_master_myport + 8; - $im_mysqld2_port= $opt_master_myport + 9; if ( $opt_master_myport < 5001 or $opt_master_myport + 10 >= 32767 ) { @@ -1449,14 +1208,6 @@ sub datadir_list_setup () { { push(@data_dir_lst, $slave->[$idx]->{'path_myddir'}); } - - unless ($opt_skip_im) - { - foreach my $instance (@{$instance_manager->{'instances'}}) - { - push(@data_dir_lst, $instance->{'path_datadir'}); - } - } } @@ -1584,45 +1335,6 @@ sub collect_mysqld_features_from_running_server () } } -sub executable_setup_ndb () { - - # Look for ndb tols and binaries - my $ndb_path= mtr_file_exists("$glob_bindir/ndb", - "$glob_bindir/storage/ndb", - "$glob_bindir/bin"); - - $exe_ndbd= - mtr_exe_maybe_exists("$ndb_path/src/kernel/ndbd", - "$ndb_path/ndbd", - "$glob_bindir/libexec/ndbd"); - $exe_ndb_mgm= - mtr_exe_maybe_exists("$ndb_path/src/mgmclient/ndb_mgm", - "$ndb_path/ndb_mgm"); - $exe_ndb_mgmd= - mtr_exe_maybe_exists("$ndb_path/src/mgmsrv/ndb_mgmd", - "$ndb_path/ndb_mgmd", - "$glob_bindir/libexec/ndb_mgmd"); - $exe_ndb_waiter= - mtr_exe_maybe_exists("$ndb_path/tools/ndb_waiter", - "$ndb_path/ndb_waiter"); - - # May not exist - $path_ndb_tools_dir= mtr_file_exists("$ndb_path/tools", - "$ndb_path"); - # May not exist - $path_ndb_examples_dir= - mtr_file_exists("$ndb_path/ndbapi-examples", - "$ndb_path/examples"); - # May not exist - $exe_ndb_example= - mtr_file_exists("$path_ndb_examples_dir/ndbapi_simple/ndbapi_simple"); - - return ( $exe_ndbd eq "" or - $exe_ndb_mgm eq "" or - $exe_ndb_mgmd eq "" or - $exe_ndb_waiter eq ""); -} - sub executable_setup () { # @@ -1693,18 +1405,6 @@ sub executable_setup () { "$glob_basedir/share/mysql_fix_privilege_tables.sql", "$glob_basedir/share/mysql/mysql_fix_privilege_tables.sql"); - if ( ! $opt_skip_ndbcluster and executable_setup_ndb()) - { - mtr_warning("Could not find all required ndb binaries, " . - "all ndb tests will fail, use --skip-ndbcluster to " . - "skip testing it."); - - foreach my $cluster (@{$clusters}) - { - $cluster->{"executable_setup_failed"}= 1; - } - } - # Look for the udf_example library $lib_udf_example= mtr_file_exists(vs_config_dirs('sql', 'udf_example.dll'), @@ -1870,14 +1570,6 @@ sub environment_setup () { } } - # -------------------------------------------------------------------------- - # Add the path where libndbclient can be found - # -------------------------------------------------------------------------- - if ( $glob_ndbcluster_supported ) - { - push(@ld_library_paths, "$glob_bindir/storage/ndb/src/.libs"); - } - # -------------------------------------------------------------------------- # Valgrind need to be run with debug libraries otherwise it's almost # impossible to add correct supressions, that means if "/usr/lib/debug" @@ -1955,64 +1647,10 @@ sub environment_setup () { $ENV{'MYSQL_TCP_PORT'}= $mysqld_variables{'port'}; $ENV{'DEFAULT_MASTER_PORT'}= $mysqld_variables{'master-port'}; - $ENV{'IM_PATH_SOCK'}= $instance_manager->{path_sock}; - $ENV{'IM_USERNAME'}= $instance_manager->{admin_login}; - $ENV{'IM_PASSWORD'}= $instance_manager->{admin_password}; $ENV{MTR_BUILD_THREAD}= $opt_mtr_build_thread; $ENV{'EXE_MYSQL'}= $exe_mysql; - - # ---------------------------------------------------- - # Setup env for NDB - # ---------------------------------------------------- - if ( ! $opt_skip_ndbcluster ) - { - $ENV{'NDB_MGM'}= $exe_ndb_mgm; - - $ENV{'NDBCLUSTER_PORT'}= $opt_ndbcluster_port; - $ENV{'NDBCLUSTER_PORT_SLAVE'}= $opt_ndbcluster_port_slave; - - $ENV{'NDB_EXTRA_TEST'}= $opt_ndb_extra_test; - - $ENV{'NDB_BACKUP_DIR'}= $clusters->[0]->{'data_dir'}; - $ENV{'NDB_DATA_DIR'}= $clusters->[0]->{'data_dir'}; - $ENV{'NDB_TOOLS_DIR'}= $path_ndb_tools_dir; - $ENV{'NDB_TOOLS_OUTPUT'}= $path_ndb_testrun_log; - - if ( $mysql_version_id >= 50000 ) - { - $ENV{'NDB_EXAMPLES_DIR'}= $path_ndb_examples_dir; - $ENV{'MY_NDB_EXAMPLES_BINARY'}= $exe_ndb_example; - } - $ENV{'NDB_EXAMPLES_OUTPUT'}= $path_ndb_testrun_log; - } - - # ---------------------------------------------------- - # Setup env for IM - # ---------------------------------------------------- - if ( ! $opt_skip_im ) - { - $ENV{'IM_PATH_PID'}= $instance_manager->{path_pid}; - $ENV{'IM_PATH_ANGEL_PID'}= $instance_manager->{path_angel_pid}; - $ENV{'IM_PORT'}= $instance_manager->{port}; - $ENV{'IM_DEFAULTS_PATH'}= $instance_manager->{defaults_file}; - $ENV{'IM_PASSWORD_PATH'}= $instance_manager->{password_file}; - - $ENV{'IM_MYSQLD1_SOCK'}= - $instance_manager->{instances}->[0]->{path_sock}; - $ENV{'IM_MYSQLD1_PORT'}= - $instance_manager->{instances}->[0]->{port}; - $ENV{'IM_MYSQLD1_PATH_PID'}= - $instance_manager->{instances}->[0]->{path_pid}; - $ENV{'IM_MYSQLD2_SOCK'}= - $instance_manager->{instances}->[1]->{path_sock}; - $ENV{'IM_MYSQLD2_PORT'}= - $instance_manager->{instances}->[1]->{port}; - $ENV{'IM_MYSQLD2_PATH_PID'}= - $instance_manager->{instances}->[1]->{path_pid}; - } - # ---------------------------------------------------- # Setup env so childs can execute mysqlcheck # ---------------------------------------------------- @@ -2228,20 +1866,6 @@ sub environment_setup () { print "Using SLAVE_MYPORT = $ENV{SLAVE_MYPORT}\n"; print "Using SLAVE_MYPORT1 = $ENV{SLAVE_MYPORT1}\n"; print "Using SLAVE_MYPORT2 = $ENV{SLAVE_MYPORT2}\n"; - if ( ! $opt_skip_ndbcluster ) - { - print "Using NDBCLUSTER_PORT = $ENV{NDBCLUSTER_PORT}\n"; - if ( ! $opt_skip_ndbcluster_slave ) - { - print "Using NDBCLUSTER_PORT_SLAVE = $ENV{NDBCLUSTER_PORT_SLAVE}\n"; - } - } - if ( ! $opt_skip_im ) - { - print "Using IM_PORT = $ENV{IM_PORT}\n"; - print "Using IM_MYSQLD1_PORT = $ENV{IM_MYSQLD1_PORT}\n"; - print "Using IM_MYSQLD2_PORT = $ENV{IM_MYSQLD2_PORT}\n"; - } } # Create an environment variable to make it possible @@ -2599,155 +2223,6 @@ sub vs_config_dirs ($$) { "$glob_bindir/$path_part/debug/$exe"); } -############################################################################## -# -# Start the ndb cluster -# -############################################################################## - -sub check_ndbcluster_support ($) { - my $mysqld_variables= shift; - - if ($opt_skip_ndbcluster || $opt_extern) - { - if (!$opt_extern) - { - mtr_report("Skipping ndbcluster"); - } - $opt_skip_ndbcluster_slave= 1; - return; - } - - if ( ! $mysqld_variables->{'ndb-connectstring'} ) - { - mtr_report("Skipping ndbcluster, mysqld not compiled with ndbcluster"); - $opt_skip_ndbcluster= 1; - $opt_skip_ndbcluster_slave= 1; - return; - } - $glob_ndbcluster_supported= 1; - mtr_report("Using ndbcluster when necessary, mysqld supports it"); - - if ( $mysql_version_id < 50100 ) - { - # Slave cluster is not supported until 5.1 - $opt_skip_ndbcluster_slave= 1; - - } - - return; -} - - -sub ndbcluster_start_install ($) { - my $cluster= shift; - - mtr_report("Installing $cluster->{'name'} Cluster"); - - mkdir($cluster->{'data_dir'}); - - # Create a config file from template - my $ndb_no_ord=512; - my $ndb_no_attr=2048; - my $ndb_con_op=105000; - my $ndb_dmem="80M"; - my $ndb_imem="24M"; - my $ndb_pbmem="32M"; - my $nodes= $cluster->{'nodes'}; - my $ndb_host= "localhost"; - my $ndb_diskless= 0; - - if (!$opt_bench) - { - # Use a smaller configuration - if ( $mysql_version_id < 50100 ) - { - # 4.1 and 5.0 is using a "larger" --small configuration - $ndb_no_ord=128; - $ndb_con_op=10000; - $ndb_dmem="40M"; - $ndb_imem="12M"; - } - else - { - $ndb_no_ord=32; - $ndb_con_op=10000; - $ndb_dmem="20M"; - $ndb_imem="1M"; - $ndb_pbmem="4M"; - } - } - - my $config_file_template= "lib/v1/ndb_config_${nodes}_node.ini"; - my $config_file= "$cluster->{'data_dir'}/config.ini"; - - open(IN, $config_file_template) - or mtr_error("Can't open $config_file_template: $!"); - open(OUT, ">", $config_file) - or mtr_error("Can't write to $config_file: $!"); - while () - { - chomp; - - s/CHOOSE_MaxNoOfAttributes/$ndb_no_attr/; - s/CHOOSE_MaxNoOfOrderedIndexes/$ndb_no_ord/; - s/CHOOSE_MaxNoOfConcurrentOperations/$ndb_con_op/; - s/CHOOSE_DataMemory/$ndb_dmem/; - s/CHOOSE_IndexMemory/$ndb_imem/; - s/CHOOSE_Diskless/$ndb_diskless/; - s/CHOOSE_HOSTNAME_.*/$ndb_host/; - s/CHOOSE_FILESYSTEM/$cluster->{'data_dir'}/; - s/CHOOSE_PORT_MGM/$cluster->{'port'}/; - if ( $mysql_version_id < 50000 ) - { - my $base_port= $cluster->{'port'} + 1; - s/CHOOSE_PORT_TRANSPORTER/$base_port/; - } - s/CHOOSE_DiskPageBufferMemory/$ndb_pbmem/; - - print OUT "$_ \n"; - } - close OUT; - close IN; - - - # Start cluster with "--initial" - - ndbcluster_start($cluster, "--initial"); - - return 0; -} - - -sub ndbcluster_wait_started($$){ - my $cluster= shift; - my $ndb_waiter_extra_opt= shift; - my $path_waiter_log= "$cluster->{'data_dir'}/ndb_waiter.log"; - my $args; - - mtr_init_args(\$args); - - mtr_add_arg($args, "--no-defaults"); - mtr_add_arg($args, "--core"); - mtr_add_arg($args, "--ndb-connectstring=%s", $cluster->{'connect_string'}); - mtr_add_arg($args, "--timeout=60"); - - if ($ndb_waiter_extra_opt) - { - mtr_add_arg($args, "$ndb_waiter_extra_opt"); - } - - # Start the ndb_waiter which will connect to the ndb_mgmd - # and poll it for state of the ndbd's, will return when - # all nodes in the cluster is started - my $res= mtr_run($exe_ndb_waiter, $args, - "", $path_waiter_log, $path_waiter_log, ""); - mtr_verbose("ndbcluster_wait_started, returns: $res") if $res; - return $res; -} - - - sub mysqld_wait_started($){ my $mysqld= shift; @@ -2767,137 +2242,6 @@ sub mysqld_wait_started($){ } -sub ndb_mgmd_wait_started($) { - my ($cluster)= @_; - - my $retries= 100; - while (ndbcluster_wait_started($cluster, "--no-contact") and - $retries) - { - # Millisceond sleep emulated with select - select(undef, undef, undef, (0.1)); - - $retries--; - } - - return $retries == 0; - -} - -sub ndb_mgmd_start ($) { - my $cluster= shift; - - my $args; # Arg vector - my $pid= -1; - - mtr_init_args(\$args); - mtr_add_arg($args, "--no-defaults"); - mtr_add_arg($args, "--core"); - mtr_add_arg($args, "--nodaemon"); - mtr_add_arg($args, "--config-file=%s", "$cluster->{'data_dir'}/config.ini"); - - - my $path_ndb_mgmd_log= "$cluster->{'data_dir'}/\l$cluster->{'name'}_ndb_mgmd.log"; - $pid= mtr_spawn($exe_ndb_mgmd, $args, "", - $path_ndb_mgmd_log, - $path_ndb_mgmd_log, - "", - { append_log_file => 1 }); - - # FIXME Should not be needed - # Unfortunately the cluster nodes will fail to start - # if ndb_mgmd has not started properly - if (ndb_mgmd_wait_started($cluster)) - { - mtr_error("Failed to wait for start of ndb_mgmd"); - } - - # Remember pid of ndb_mgmd - $cluster->{'pid'}= $pid; - - mtr_verbose("ndb_mgmd_start, pid: $pid"); - - return $pid; -} - - -sub ndbd_start ($$$) { - my $cluster= shift; - my $idx= shift; - my $extra_args= shift; - - my $args; # Arg vector - my $pid= -1; - - mtr_init_args(\$args); - mtr_add_arg($args, "--no-defaults"); - mtr_add_arg($args, "--core"); - mtr_add_arg($args, "--ndb-connectstring=%s", "$cluster->{'connect_string'}"); - if ( $mysql_version_id >= 50000) - { - mtr_add_arg($args, "--character-sets-dir=%s", "$path_charsetsdir"); - } - mtr_add_arg($args, "--nodaemon"); - mtr_add_arg($args, "$extra_args"); - - my $nodeid= $cluster->{'ndbds'}->[$idx]->{'nodeid'}; - my $path_ndbd_log= "$cluster->{'data_dir'}/ndb_${nodeid}.log"; - $pid= mtr_spawn($exe_ndbd, $args, "", - $path_ndbd_log, - $path_ndbd_log, - "", - { append_log_file => 1 }); - - # Add pid to list of pids for this cluster - $cluster->{'ndbds'}->[$idx]->{'pid'}= $pid; - - # Rememeber options used when starting - $cluster->{'ndbds'}->[$idx]->{'start_extra_args'}= $extra_args; - $cluster->{'ndbds'}->[$idx]->{'idx'}= $idx; - - mtr_verbose("ndbd_start, pid: $pid"); - - return $pid; -} - - -sub ndbcluster_start ($$) { - my $cluster= shift; - my $extra_args= shift; - - mtr_verbose("ndbcluster_start '$cluster->{'name'}'"); - - if ( $cluster->{'use_running'} ) - { - return 0; - } - - if ( $cluster->{'pid'} ) - { - mtr_error("Cluster '$cluster->{'name'}' already started"); - } - - ndb_mgmd_start($cluster); - - for ( my $idx= 0; $idx < $cluster->{'nodes'}; $idx++ ) - { - ndbd_start($cluster, $idx, $extra_args); - } - - return 0; -} - - -sub rm_ndbcluster_tables ($) { - my $dir= shift; - foreach my $bin ( glob("$dir/mysql/ndb_apply_status*"), - glob("$dir/mysql/ndb_schema*")) - { - unlink($bin); - } -} - - ############################################################################## # # Run the benchmark suite @@ -2929,11 +2273,6 @@ sub run_benchmarks ($) { mtr_add_arg($args, "--small-tables"); } - if ( $opt_with_ndbcluster ) - { - mtr_add_arg($args, "--create-options=TYPE=ndb"); - } - chdir($glob_mysql_bench_dir) or mtr_error("Couldn't chdir to '$glob_mysql_bench_dir': $!"); @@ -3078,69 +2417,6 @@ sub mysql_install_db () { copy_install_db("slave".($idx+1), $slave->[$idx]->{'path_myddir'}); } - if ( ! $opt_skip_im ) - { - im_prepare_env($instance_manager); - } - - my $cluster_started_ok= 1; # Assume it can be started - - my $cluster= $clusters->[0]; # Master cluster - if ($opt_skip_ndbcluster || - $cluster->{'use_running'} || - $cluster->{executable_setup_failed}) - { - # Don't install master cluster - } - elsif (ndbcluster_start_install($cluster)) - { - mtr_warning("Failed to start install of $cluster->{name}"); - $cluster_started_ok= 0; - } - - $cluster= $clusters->[1]; # Slave cluster - if ($max_slave_num == 0 || - $opt_skip_ndbcluster_slave || - $cluster->{'use_running'} || - $cluster->{executable_setup_failed}) - { - # Don't install slave cluster - } - elsif (ndbcluster_start_install($cluster)) - { - mtr_warning("Failed to start install of $cluster->{name}"); - $cluster_started_ok= 0; - } - - foreach $cluster (@{$clusters}) - { - - next if !$cluster->{'pid'}; - - $cluster->{'installed_ok'}= 1; # Assume install suceeds - - if (ndbcluster_wait_started($cluster, "")) - { - # failed to install, disable usage and flag that its no ok - mtr_report("ndbcluster_install of $cluster->{'name'} failed"); - $cluster->{"installed_ok"}= 0; - - $cluster_started_ok= 0; - } - } - - if ( ! $cluster_started_ok ) - { - if ( $opt_force) - { - # Continue without cluster - } - else - { - mtr_error("To continue, re-run with '--force'."); - } - } - return 0; } @@ -3170,7 +2446,6 @@ sub install_db ($$) { mtr_add_arg($args, "--bootstrap"); mtr_add_arg($args, "--basedir=%s", $glob_basedir); mtr_add_arg($args, "--datadir=%s", $data_dir); - mtr_add_arg($args, "--loose-skip-ndbcluster"); mtr_add_arg($args, "--loose-skip-aria"); mtr_add_arg($args, "--disable-sync-frm"); mtr_add_arg($args, "--loose-disable-debug"); @@ -3268,105 +2543,6 @@ sub install_db ($$) { } -sub im_prepare_env($) { - my $instance_manager = shift; - - im_create_passwd_file($instance_manager); - im_prepare_data_dir($instance_manager); -} - - -sub im_create_passwd_file($) { - my $instance_manager = shift; - - my $pwd_file_path = $instance_manager->{'password_file'}; - - mtr_report("Creating IM password file ($pwd_file_path)"); - - open(OUT, ">", $pwd_file_path) - or mtr_error("Can't write to $pwd_file_path: $!"); - - print OUT $instance_manager->{'admin_login'}, ":", - $instance_manager->{'admin_sha1'}, "\n"; - - close(OUT); -} - - -sub im_create_defaults_file($) { - my $instance_manager = shift; - - my $defaults_file = $instance_manager->{'defaults_file'}; - - open(OUT, ">", $defaults_file) - or mtr_error("Can't write to $defaults_file: $!"); - - print OUT <{path_pid} -angel-pid-file = $instance_manager->{path_angel_pid} -socket = $instance_manager->{path_sock} -port = $instance_manager->{port} -password-file = $instance_manager->{password_file} -default-mysqld-path = $exe_mysqld - -EOF -; - - foreach my $instance (@{$instance_manager->{'instances'}}) - { - my $server_id = $instance->{'server_id'}; - - print OUT <{path_sock} -pid-file = $instance->{path_pid} -port = $instance->{port} -datadir = $instance->{path_datadir} -lc-messages-dir = $path_language -log = $instance->{path_datadir}/mysqld$server_id.log -log-error = $instance->{path_datadir}/mysqld$server_id.err.log -log-slow-queries = $instance->{path_datadir}/mysqld$server_id.slow.log -character-sets-dir = $path_charsetsdir -basedir = $glob_basedir -server_id = $server_id -shutdown-delay = 10 -skip-stack-trace -loose-skip-innodb -loose-skip-ndbcluster -EOF -; - if ( $mysql_version_id < 50100 ) - { - print OUT "skip-bdb\n"; - } - print OUT "nonguarded\n" if $instance->{'nonguarded'}; - if ( $mysql_version_id >= 50100 ) - { - print OUT "log-output=FILE\n" if $instance->{'old_log_format'}; - } - print OUT "\n"; - } - - close(OUT); -} - - -sub im_prepare_data_dir($) { - my $instance_manager = shift; - - foreach my $instance (@{$instance_manager->{'instances'}}) - { - copy_install_db( - 'im_mysqld_' . $instance->{'server_id'}, - $instance->{'path_datadir'}); - } -} - - - # # Restore snapshot of the installed slave databases # if the snapshot exists @@ -3423,56 +2599,6 @@ sub run_testcase_check_skip_test($) return 1; } - if ($tinfo->{'ndb_test'}) - { - foreach my $cluster (@{$clusters}) - { - # Slave cluster is skipped and thus not - # installed, no need to perform checks - last if ($opt_skip_ndbcluster_slave and - $cluster->{'name'} eq 'Slave'); - - # Using running cluster - no need - # to check if test should be skipped - # will be done by test itself - last if ($cluster->{'use_running'}); - - # If test needs this cluster, check binaries was found ok - if ( $cluster->{'executable_setup_failed'} ) - { - mtr_report_test_name($tinfo); - $tinfo->{comment}= - "Failed to find cluster binaries"; - mtr_report_test_failed($tinfo); - return 1; - } - - # If test needs this cluster, check it was installed ok - if ( !$cluster->{'installed_ok'} ) - { - mtr_report_test_name($tinfo); - $tinfo->{comment}= - "Cluster $cluster->{'name'} was not installed ok"; - mtr_report_test_failed($tinfo); - return 1; - } - - } - } - - if ( $tinfo->{'component_id'} eq 'im' ) - { - # If test needs im, check binaries was found ok - if ( $instance_manager->{'executable_setup_failed'} ) - { - mtr_report_test_name($tinfo); - $tinfo->{comment}= - "Failed to find MySQL manager binaries"; - mtr_report_test_failed($tinfo); - return 1; - } - } - return 0; } @@ -3492,15 +2618,6 @@ sub do_before_run_mysqltest($) if (!$opt_extern) { - if ( $mysql_version_id < 50000 ) { - # Set environment variable NDB_STATUS_OK to 1 - # if script decided to run mysqltest cluster _is_ installed ok - $ENV{'NDB_STATUS_OK'} = "1"; - } elsif ( $mysql_version_id < 50100 ) { - # Set environment variable NDB_STATUS_OK to YES - # if script decided to run mysqltest cluster _is_ installed ok - $ENV{'NDB_STATUS_OK'} = "YES"; - } if (defined $tinfo->{binlog_format} and $mysql_version_id > 50100 ) { # Dynamically switch binlog format of @@ -3552,16 +2669,6 @@ sub run_testcase_mark_logs($$) { mtr_tofile($mysqld->{path_myerr}, $log_msg); } - - if ( $tinfo->{'component_id'} eq 'im') - { - mtr_tofile($instance_manager->{path_err}, $log_msg); - mtr_tofile($instance_manager->{path_log}, $log_msg); - } - - # ndbcluster log file - mtr_tofile($path_ndb_testrun_log, $log_msg); - } sub find_testcase_skipped_reason($) @@ -3716,15 +2823,6 @@ sub run_testcase ($) { # Remove the file that mysqltest writes info to unlink($path_timefile); - - # ---------------------------------------------------------------------- - # Stop Instance Manager if we are processing an IM-test case. - # ---------------------------------------------------------------------- - if ( $tinfo->{'component_id'} eq 'im' and - !mtr_im_stop($instance_manager, $tinfo->{'name'})) - { - mtr_error("Failed to stop Instance Manager.") - } } @@ -3785,16 +2883,6 @@ sub restore_installed_db ($) { mtr_rmtree("$data_dir"); mtr_copy_dir("$path_snapshot/$name", "$data_dir"); } - - # Remove the ndb_*_fs dirs for all ndbd nodes - # forcing a clean start of ndb - foreach my $cluster (@{$clusters}) - { - foreach my $ndbd (@{$cluster->{'ndbds'}}) - { - mtr_rmtree("$ndbd->{'path_fs'}" ); - } - } } else { @@ -4025,23 +3113,6 @@ sub mysqld_arguments ($$$$) { $prefix); mtr_add_arg($args, "%s--local-infile", $prefix); - - my $cluster= $clusters->[$mysqld->{'cluster'}]; - if ( $cluster->{'pid'} || # Cluster is started - $cluster->{'use_running'} ) # Using running cluster - { - mtr_add_arg($args, "%s--ndbcluster", $prefix); - mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, - $cluster->{'connect_string'}); - if ( $mysql_version_id >= 50100 ) - { - mtr_add_arg($args, "%s--ndb-extra-logging", $prefix); - } - } - else - { - mtr_add_arg($args, "%s--loose-skip-ndbcluster", $prefix); - } } else { @@ -4109,25 +3180,6 @@ sub mysqld_arguments ($$$$) { mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id); mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank); } - - my $cluster= $clusters->[$mysqld->{'cluster'}]; - if ( $cluster->{'pid'} || # Slave cluster is started - $cluster->{'use_running'} ) # Using running slave cluster - { - mtr_add_arg($args, "%s--ndbcluster", $prefix); - mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, - $cluster->{'connect_string'}); - - if ( $mysql_version_id >= 50100 ) - { - mtr_add_arg($args, "%s--ndb-extra-logging", $prefix); - } - } - else - { - mtr_add_arg($args, "%s--loose-skip-ndbcluster", $prefix); - } - } # end slave if ( $debug_compiled_binaries && defined $opt_debug ) @@ -4315,15 +3367,6 @@ sub stop_all_servers () { mtr_report("Stopping All Servers"); - if ( ! $opt_skip_im ) - { - mtr_report("Shutting-down Instance Manager"); - unless (mtr_im_stop($instance_manager, "stop_all_servers")) - { - mtr_error("Failed to stop Instance Manager.") - } - } - my %admin_pids; # hash of admin processes that requests shutdown my @kill_pids; # list of processes to shutdown/kill my $pid; @@ -4349,45 +3392,11 @@ sub stop_all_servers () { } } - # Start shutdown of clusters - foreach my $cluster (@{$clusters}) - { - if ( $cluster->{'pid'} ) - { - $pid= mtr_ndbmgm_start($cluster, "shutdown"); - $admin_pids{$pid}= 1; - - push(@kill_pids,{ - pid => $cluster->{'pid'}, - pidfile => $cluster->{'path_pid'} - }); - - $cluster->{'pid'}= 0; # Assume we are done with it - - foreach my $ndbd (@{$cluster->{'ndbds'}}) - { - if ( $ndbd->{'pid'} ) - { - push(@kill_pids,{ - pid => $ndbd->{'pid'}, - pidfile => $ndbd->{'path_pid'}, - }); - $ndbd->{'pid'}= 0; - } - } - } - } - # Wait blocking until all shutdown processes has completed mtr_wait_blocking(\%admin_pids); # Make sure that process has shutdown else try to kill them mtr_check_stop_servers(\@kill_pids); - - foreach my $mysqld (@{$master}, @{$slave}) - { - rm_ndbcluster_tables($mysqld->{'path_myddir'}); - } } @@ -4413,25 +3422,6 @@ sub run_testcase_need_master_restart($) $do_restart= 1; # Always restart if --force-restart in -opt file mtr_verbose("Restart master: Restart forced with --force-restart"); } - elsif ( ! $opt_skip_ndbcluster and - !$tinfo->{'ndb_test'} and - $clusters->[0]->{'pid'} != 0 ) - { - $do_restart= 1; # Restart without cluster - mtr_verbose("Restart master: Test does not need cluster"); - } - elsif ( ! $opt_skip_ndbcluster and - $tinfo->{'ndb_test'} and - $clusters->[0]->{'pid'} == 0 ) - { - $do_restart= 1; # Restart with cluster - mtr_verbose("Restart master: Test need cluster"); - } - elsif( $tinfo->{'component_id'} eq 'im' ) - { - $do_restart= 1; - mtr_verbose("Restart master: Always restart for im tests"); - } elsif ( $master->[0]->{'running_master_options'} and $master->[0]->{'running_master_options'}->{'timezone'} ne $tinfo->{'timezone'}) @@ -4556,30 +3546,6 @@ sub run_testcase_stop_servers($$$) { $mysqld->{'pid'}= 0; # Assume we are done with it } } - - # Start shutdown of master cluster - my $cluster= $clusters->[0]; - if ( $cluster->{'pid'} ) - { - $pid= mtr_ndbmgm_start($cluster, "shutdown"); - $admin_pids{$pid}= 1; - - push(@kill_pids,{ - pid => $cluster->{'pid'}, - pidfile => $cluster->{'path_pid'} - }); - - $cluster->{'pid'}= 0; # Assume we are done with it - - foreach my $ndbd (@{$cluster->{'ndbds'}}) - { - push(@kill_pids,{ - pid => $ndbd->{'pid'}, - pidfile => $ndbd->{'path_pid'}, - }); - $ndbd->{'pid'}= 0; # Assume we are done with it - } - } } if ( $do_restart || $do_slave_restart ) @@ -4609,31 +3575,6 @@ sub run_testcase_stop_servers($$$) { $mysqld->{'pid'}= 0; # Assume we are done with it } } - - # Start shutdown of slave cluster - my $cluster= $clusters->[1]; - if ( $cluster->{'pid'} ) - { - $pid= mtr_ndbmgm_start($cluster, "shutdown"); - - $admin_pids{$pid}= 1; - - push(@kill_pids,{ - pid => $cluster->{'pid'}, - pidfile => $cluster->{'path_pid'} - }); - - $cluster->{'pid'}= 0; # Assume we are done with it - - foreach my $ndbd (@{$cluster->{'ndbds'}} ) - { - push(@kill_pids,{ - pid => $ndbd->{'pid'}, - pidfile => $ndbd->{'path_pid'}, - }); - $ndbd->{'pid'}= 0; # Assume we are done with it - } - } } # ---------------------------------------------------------------------- @@ -4647,15 +3588,6 @@ sub run_testcase_stop_servers($$$) { # Make sure that process has shutdown else try to kill them mtr_check_stop_servers(\@kill_pids); - - foreach my $mysqld (@{$master}, @{$slave}) - { - if ( ! $mysqld->{'pid'} ) - { - # Remove ndbcluster tables if server is stopped - rm_ndbcluster_tables($mysqld->{'path_myddir'}); - } - } } @@ -4675,14 +3607,6 @@ sub run_testcase_start_servers($) { if ( $tinfo->{'component_id'} eq 'mysqld' ) { - if ( ! $opt_skip_ndbcluster and - !$clusters->[0]->{'pid'} and - $tinfo->{'ndb_test'} ) - { - # Test need cluster, cluster is not started, start it - ndbcluster_start($clusters->[0], ""); - } - if ( !$master->[0]->{'pid'} ) { # Master mysqld is not started @@ -4692,48 +3616,9 @@ sub run_testcase_start_servers($) { } - if ( $clusters->[0]->{'pid'} || $clusters->[0]->{'use_running'} - and ! $master->[1]->{'pid'} and - $tinfo->{'master_num'} > 1 ) - { - # Test needs cluster, start an extra mysqld connected to cluster - - if ( $mysql_version_id >= 50100 ) - { - # First wait for first mysql server to have created ndb system - # tables ok FIXME This is a workaround so that only one mysqld - # create the tables - if ( ! sleep_until_file_created( - "$master->[0]->{'path_myddir'}/mysql/ndb_apply_status.ndb", - $master->[0]->{'start_timeout'}, - $master->[0]->{'pid'})) - { - - $tinfo->{'comment'}= "Failed to create 'mysql/ndb_apply_status' table"; - return 1; - } - } - mysqld_start($master->[1],$tinfo->{'master_opt'},[]); - } - # Save this test case information, so next can examine it $master->[0]->{'running_master_options'}= $tinfo; } - elsif ( ! $opt_skip_im and $tinfo->{'component_id'} eq 'im' ) - { - # We have to create defaults file every time, in order to ensure that it - # will be the same for each test. The problem is that test can change the - # file (by SET/UNSET commands), so w/o recreating the file, execution of - # one test can affect the other. - - im_create_defaults_file($instance_manager); - - if ( ! mtr_im_start($instance_manager, $tinfo->{im_opts}) ) - { - $tinfo->{'comment'}= "Failed to start Instance Manager. "; - return 1; - } - } # ---------------------------------------------------------------------- # Start slaves - if needed @@ -4744,14 +3629,6 @@ sub run_testcase_start_servers($) { do_before_start_slave($tinfo); - if ( ! $opt_skip_ndbcluster_slave and - !$clusters->[1]->{'pid'} and - $tinfo->{'ndb_test'} ) - { - # Test need slave cluster, cluster is not started, start it - ndbcluster_start($clusters->[1], ""); - } - for ( my $idx= 0; $idx < $tinfo->{'slave_num'}; $idx++ ) { if ( ! $slave->[$idx]->{'pid'} ) @@ -4766,20 +3643,6 @@ sub run_testcase_start_servers($) { $slave->[0]->{'running_slave_options'}= $tinfo; } - # Wait for clusters to start - foreach my $cluster (@{$clusters}) - { - - next if !$cluster->{'pid'}; - - if (ndbcluster_wait_started($cluster, "")) - { - # failed to start - $tinfo->{'comment'}= "Start of $cluster->{'name'} cluster failed"; - return 1; - } - } - # Wait for mysqld's to start foreach my $mysqld (@{$master},@{$slave}) { @@ -4908,14 +3771,7 @@ sub run_mysqltest ($) { mtr_add_arg($args, "--mark-progress") if $opt_mark_progress; - if ($tinfo->{'component_id'} eq 'im') - { - mtr_add_arg($args, "--socket=%s", $instance_manager->{'path_sock'}); - mtr_add_arg($args, "--port=%d", $instance_manager->{'port'}); - mtr_add_arg($args, "--user=%s", $instance_manager->{'admin_login'}); - mtr_add_arg($args, "--password=%s", $instance_manager->{'admin_password'}); - } - else # component_id == mysqld + # component_id == mysqld { mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_sock'}); mtr_add_arg($args, "--port=%d", $master->[0]->{'port'}); @@ -5325,7 +4181,6 @@ Options to control what engine/variation to run skip-ssl Dont start server with support for ssl connections bench Run the benchmark suite small-bench Run the benchmarks with --small-tests --small-tables - ndb|with-ndbcluster Use cluster as default table type vs-config Visual Studio configuration used to create executables (default: MTR_VS_CONFIG environment variable) @@ -5347,10 +4202,6 @@ Options to control directories to use Options to control what test suites or cases to run force Continue to run the suite after failure - with-ndbcluster-only Run only tests that include "ndb" in the filename - skip-ndb[cluster] Skip all tests that need cluster - skip-ndb[cluster]-slave Skip all tests that need a slave cluster - ndb-extra Run extra tests from ndb directory do-test=PREFIX or REGEX Run test cases which name are prefixed with PREFIX or fulfills REGEX @@ -5362,7 +4213,6 @@ Options to control what test suites or cases to run list of suite names. The default is: "$opt_suites_default" skip-rpl Skip the replication test cases. - skip-im Don't start IM, and skip the IM test cases big-test Set the environment variable BIG_TEST, which can be checked from test cases. combination="ARG1 .. ARG2" Specify a set of "mysqld" arguments for one @@ -5373,8 +4223,6 @@ Options that specify ports master_port=PORT Specify the port number used by the first master slave_port=PORT Specify the port number used by the first slave - ndbcluster-port=PORT Specify the port number used by cluster - ndbcluster-port-slave=PORT Specify the port number used by slave cluster mtr-build-thread=# Specify unique collection of ports. Can also be set by setting the environment variable MTR_BUILD_THREAD. @@ -5391,8 +4239,6 @@ Options that pass on options Options to run test on running server extern Use running server for tests - ndb-connectstring=STR Use running cluster, and connect using STR - ndb-connectstring-slave=STR Use running slave cluster, and connect using STR user=USER User for connection to extern server socket=PATH Socket for connection to extern server diff --git a/mysql-test/lib/v1/ndb_config_1_node.ini b/mysql-test/lib/v1/ndb_config_1_node.ini deleted file mode 100644 index 4e0be7796dc..00000000000 --- a/mysql-test/lib/v1/ndb_config_1_node.ini +++ /dev/null @@ -1,47 +0,0 @@ -[ndbd default] -NoOfReplicas= 1 -MaxNoOfConcurrentTransactions= 64 -MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations -DataMemory= CHOOSE_DataMemory -IndexMemory= CHOOSE_IndexMemory -Diskless= CHOOSE_Diskless -TimeBetweenWatchDogCheck= 30000 -DataDir= CHOOSE_FILESYSTEM -MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes -MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes -TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 8 -FragmentLogFileSize= 6M -DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory - -# -# Increase timeouts to cater for slow test-machines -# (possibly running several tests in parallell) -# -HeartbeatIntervalDbDb= 30000 -HeartbeatIntervalDbApi= 30000 -#TransactionDeadlockDetectionTimeout= 7500 - -[ndbd] -HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress - -[ndb_mgmd] -HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress -DataDir= CHOOSE_FILESYSTEM # -PortNumber= CHOOSE_PORT_MGM - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] diff --git a/mysql-test/lib/v1/ndb_config_2_node.ini b/mysql-test/lib/v1/ndb_config_2_node.ini deleted file mode 100644 index 57e4d049ad6..00000000000 --- a/mysql-test/lib/v1/ndb_config_2_node.ini +++ /dev/null @@ -1,55 +0,0 @@ -[ndbd default] -NoOfReplicas= 2 -MaxNoOfConcurrentTransactions= 64 -MaxNoOfConcurrentOperations= CHOOSE_MaxNoOfConcurrentOperations -DataMemory= CHOOSE_DataMemory -IndexMemory= CHOOSE_IndexMemory -Diskless= CHOOSE_Diskless -TimeBetweenWatchDogCheck= 30000 -DataDir= CHOOSE_FILESYSTEM -MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes -MaxNoOfAttributes= CHOOSE_MaxNoOfAttributes -TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 4 -FragmentLogFileSize=12M -DiskPageBufferMemory= CHOOSE_DiskPageBufferMemory -# O_DIRECT has issues on 2.4 whach have not been handled, Bug #29612 -#ODirect= 1 -# the following parametes just function as a small regression -# test that the parameter exists -InitialNoOfOpenFiles= 27 - -# -# Increase timeouts to cater for slow test-machines -# (possibly running several tests in parallell) -# -HeartbeatIntervalDbDb= 30000 -HeartbeatIntervalDbApi= 30000 -#TransactionDeadlockDetectionTimeout= 7500 - -[ndbd] -HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress - -[ndbd] -HostName= CHOOSE_HOSTNAME_2 # hostname is a valid network adress - -[ndb_mgmd] -HostName= CHOOSE_HOSTNAME_1 # hostname is a valid network adress -DataDir= CHOOSE_FILESYSTEM # -PortNumber= CHOOSE_PORT_MGM - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 861bc875896..5e03bfea15b 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -341,16 +341,6 @@ sub check_timeout ($) { return testcase_timeout($_[0]) / 10; } our $opt_warnings= 1; -our $ndbcluster_enabled= 0; -my $opt_include_ndbcluster= 0; -my $opt_skip_ndbcluster= 0; - -my $exe_ndbd; -my $exe_ndbmtd; -my $exe_ndb_mgmd; -my $exe_ndb_waiter; -my $exe_ndb_mgm; - our %mysqld_variables; our @optional_plugins; @@ -413,7 +403,6 @@ sub main { # Run the mysqld to find out what features are available collect_mysqld_features(); } - check_ndbcluster_support(); check_ssl_support(); check_debug_support(); @@ -605,12 +594,6 @@ sub run_test_server ($$$) { my $test_failure= 0; # Set true if test suite failed my $extra_warnings= []; # Warnings found during server shutdowns - # Scheduler variables - my $max_ndb= $ENV{MTR_MAX_NDB} || $childs / 2; - $max_ndb = $childs if $max_ndb > $childs; - $max_ndb = 1 if $max_ndb < 1; - my $num_ndb_tests= 0; - my $completed= []; my %running; my $result; @@ -784,9 +767,6 @@ sub run_test_server ($$$) { mtr_error("'", $result->{name},"' is not known to be running") unless delete $running{$result->key()}; - # Update scheduler variables - $num_ndb_tests-- if ($result->{ndb_test}); - # Save result in completed list push(@$completed, $result); @@ -819,7 +799,6 @@ sub run_test_server ($$$) { # Find next test to schedule # - Try to use same configuration as worker used last time - # - Limit number of parallel ndb tests my $next; my $second_best; @@ -839,12 +818,6 @@ sub run_test_server ($$$) { redo; } - # Limit number of parallell NDB tests - if ($t->{ndb_test} and $num_ndb_tests >= $max_ndb){ - #mtr_report("Skipping, num ndb is already at max, $num_ndb_tests"); - next; - } - # From secondary choices, we prefer to pick a 'long-running' test if # possible; this helps avoid getting stuck with a few of those at the # end of high --parallel runs, with most workers being idle. @@ -897,7 +870,6 @@ sub run_test_server ($$$) { delete $next->{criteria}; $next->write_test($sock, 'TESTCASE'); $running{$next->key()}= $next; - $num_ndb_tests++ if ($next->{ndb_test}); } else { # No more test, tell child to exit @@ -1134,9 +1106,6 @@ sub command_line_setup { # Control what test suites or cases to run 'force+' => \$opt_force, - 'with-ndbcluster-only' => \&collect_option, - 'ndb|include-ndbcluster' => \$opt_include_ndbcluster, - 'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster, 'suite|suites=s' => \$opt_suites, 'skip-rpl' => \&collect_option, 'skip-test=s' => \&collect_option, @@ -2011,51 +1980,6 @@ sub executable_setup () { $exe_mysql_embedded= mtr_exe_maybe_exists("$basedir/libmysqld/examples/mysql_embedded"); - if ( $ndbcluster_enabled ) - { - # Look for single threaded NDB - $exe_ndbd= - my_find_bin($bindir, - ["storage/ndb/src/kernel", "libexec", "sbin", "bin"], - "ndbd"); - - # Look for multi threaded NDB - $exe_ndbmtd= - my_find_bin($bindir, - ["storage/ndb/src/kernel", "libexec", "sbin", "bin"], - "ndbmtd", NOT_REQUIRED); - if ($exe_ndbmtd) - { - my $mtr_ndbmtd = $ENV{MTR_NDBMTD}; - if ($mtr_ndbmtd) - { - mtr_report(" - multi threaded ndbd found, will be used always"); - $exe_ndbd = $exe_ndbmtd; - } - else - { - mtr_report(" - multi threaded ndbd found, will be ". - "used \"round robin\""); - } - } - - $exe_ndb_mgmd= - my_find_bin($bindir, - ["storage/ndb/src/mgmsrv", "libexec", "sbin", "bin"], - "ndb_mgmd"); - - $exe_ndb_mgm= - my_find_bin($bindir, - ["storage/ndb/src/mgmclient", "bin"], - "ndb_mgm"); - - $exe_ndb_waiter= - my_find_bin($bindir, - ["storage/ndb/tools/", "bin"], - "ndb_waiter"); - - } - # Look for mysqltest executable if ( $opt_embedded_server ) { @@ -2280,14 +2204,6 @@ sub environment_setup { } } - # -------------------------------------------------------------------------- - # Add the path where libndbclient can be found - # -------------------------------------------------------------------------- - if ( $ndbcluster_enabled ) - { - push(@ld_library_paths, "$basedir/storage/ndb/src/.libs"); - } - # -------------------------------------------------------------------------- # Valgrind need to be run with debug libraries otherwise it's almost # impossible to add correct supressions, that means if "/usr/lib/debug" @@ -2373,34 +2289,6 @@ sub environment_setup { # $ENV{HAVE_BROKEN_DNS}= defined(gethostbyname('invalid_hostname')); - # ---------------------------------------------------- - # Setup env for NDB - # ---------------------------------------------------- - if ( $ndbcluster_enabled ) - { - $ENV{'NDB_MGM'}= - my_find_bin($bindir, - ["storage/ndb/src/mgmclient", "bin"], - "ndb_mgm"); - - $ENV{'NDB_TOOLS_DIR'}= - my_find_dir($bindir, - ["storage/ndb/tools", "bin"]); - - $ENV{'NDB_EXAMPLES_DIR'}= - my_find_dir($basedir, - ["storage/ndb/ndbapi-examples", "bin"]); - - $ENV{'NDB_EXAMPLES_BINARY'}= - my_find_bin($bindir, - ["storage/ndb/ndbapi-examples/ndbapi_simple", "bin"], - "ndbapi_simple", NOT_REQUIRED); - - my $path_ndb_testrun_log= "$opt_vardir/log/ndb_testrun.log"; - $ENV{'NDB_TOOLS_OUTPUT'}= $path_ndb_testrun_log; - $ENV{'NDB_EXAMPLES_OUTPUT'}= $path_ndb_testrun_log; - } - # ---------------------------------------------------- # mysql clients # ---------------------------------------------------- @@ -2868,316 +2756,6 @@ sub vs_config_dirs ($$) { "$basedir/$path_part/debug/$exe"); } - -sub check_ndbcluster_support { - - my $ndbcluster_supported = 0; - if ($mysqld_variables{'ndb-connectstring'}) - { - $ndbcluster_supported = 1; - } - - if ($opt_skip_ndbcluster && $opt_include_ndbcluster) - { - # User is ambivalent. Theoretically the arg which was - # given last on command line should win, but that order is - # unknown at this time. - mtr_error("Ambigous command, both --include-ndbcluster " . - " and --skip-ndbcluster was specified"); - } - - # Check if this is MySQL Cluster, ie. mysql version string ends - # with -ndb-Y.Y.Y[-status] - if ( defined $mysql_version_extra && - $mysql_version_extra =~ /-ndb-([0-9]*)\.([0-9]*)\.([0-9]*)/ ) - { - # MySQL Cluster tree - mtr_report(" - MySQL Cluster detected"); - - if ($opt_skip_ndbcluster) - { - mtr_report(" - skipping ndbcluster(--skip-ndbcluster)"); - return; - } - - if (!$ndbcluster_supported) - { - # MySQL Cluster tree, but mysqld was not compiled with - # ndbcluster -> fail unless --skip-ndbcluster was used - mtr_error("This is MySQL Cluster but mysqld does not " . - "support ndbcluster. Use --skip-ndbcluster to " . - "force mtr to run without it."); - } - - # mysqld was compiled with ndbcluster -> auto enable - } - else - { - # Not a MySQL Cluster tree - if (!$ndbcluster_supported) - { - if ($opt_include_ndbcluster) - { - mtr_error("Could not detect ndbcluster support ". - "requested with --include-ndbcluster"); - } - - # Silently skip, mysqld was compiled without ndbcluster - # which is the default case - return; - } - - if ($opt_skip_ndbcluster) - { - # Compiled with ndbcluster but ndbcluster skipped - mtr_report(" - skipping ndbcluster(--skip-ndbcluster)"); - return; - } - - - # Not a MySQL Cluster tree, enable ndbcluster - # if --include-ndbcluster was used - if ($opt_include_ndbcluster) - { - # enable ndbcluster - } - else - { - mtr_report(" - skipping ndbcluster(disabled by default)"); - return; - } - } - - mtr_report(" - enabling ndbcluster"); - $ndbcluster_enabled= 1; - # Add MySQL Cluster test suites - push @DEFAULT_SUITES, qw(ndb ndb_binlog rpl_ndb ndb_rpl ndb_memcache); - return; -} - - -sub ndbcluster_wait_started { - my $cluster= shift; - my $ndb_waiter_extra_opt= shift; - my $path_waitlog= join('/', $opt_vardir, $cluster->name(), "ndb_waiter.log"); - - my $args; - mtr_init_args(\$args); - mtr_add_arg($args, "--defaults-file=%s", $path_config_file); - mtr_add_arg($args, "--defaults-group-suffix=%s", $cluster->suffix()); - mtr_add_arg($args, "--timeout=%d", $opt_start_timeout); - - if ($ndb_waiter_extra_opt) - { - mtr_add_arg($args, "$ndb_waiter_extra_opt"); - } - - # Start the ndb_waiter which will connect to the ndb_mgmd - # and poll it for state of the ndbd's, will return when - # all nodes in the cluster is started - - my $res= My::SafeProcess->run - ( - name => "ndb_waiter ".$cluster->name(), - path => $exe_ndb_waiter, - args => \$args, - output => $path_waitlog, - error => $path_waitlog, - append => 1, - ); - - # Check that ndb_mgmd(s) are still alive - foreach my $ndb_mgmd ( in_cluster($cluster, ndb_mgmds()) ) - { - my $proc= $ndb_mgmd->{proc}; - if ( ! $proc->wait_one(0) ) - { - mtr_warning("$proc died"); - return 2; - } - } - - # Check that all started ndbd(s) are still alive - foreach my $ndbd ( in_cluster($cluster, ndbds()) ) - { - my $proc= $ndbd->{proc}; - next unless defined $proc; - if ( ! $proc->wait_one(0) ) - { - mtr_warning("$proc died"); - return 3; - } - } - - if ($res) - { - mtr_verbose("ndbcluster_wait_started failed"); - return 1; - } - return 0; -} - - -sub ndb_mgmd_wait_started($) { - my ($cluster)= @_; - - my $retries= 100; - while ($retries) - { - my $result= ndbcluster_wait_started($cluster, "--no-contact"); - if ($result == 0) - { - # ndb_mgmd is started - mtr_verbose("ndb_mgmd is started"); - return 0; - } - elsif ($result > 1) - { - mtr_warning("Cluster process failed while waiting for start"); - return $result; - } - - mtr_milli_sleep(100); - $retries--; - } - - return 1; -} - -sub ndb_mgmd_stop{ - my $ndb_mgmd= shift or die "usage: ndb_mgmd_stop()"; - - my $host=$ndb_mgmd->value('HostName'); - my $port=$ndb_mgmd->value('PortNumber'); - mtr_verbose("Stopping cluster '$host:$port'"); - - my $args; - mtr_init_args(\$args); - mtr_add_arg($args, "--ndb-connectstring=%s:%s", $host,$port); - mtr_add_arg($args, "-e"); - mtr_add_arg($args, "shutdown"); - - My::SafeProcess->run - ( - name => "ndb_mgm shutdown $host:$port", - path => $exe_ndb_mgm, - args => \$args, - output => "/dev/null", - ); -} - -sub ndb_mgmd_start ($$) { - my ($cluster, $ndb_mgmd)= @_; - - mtr_verbose("ndb_mgmd_start"); - - my $dir= $ndb_mgmd->value("DataDir"); - mkpath($dir) unless -d $dir; - - my $args; - mtr_init_args(\$args); - mtr_add_arg($args, "--defaults-file=%s", $path_config_file); - mtr_add_arg($args, "--defaults-group-suffix=%s", $cluster->suffix()); - mtr_add_arg($args, "--mycnf"); - mtr_add_arg($args, "--nodaemon"); - - my $path_ndb_mgmd_log= "$dir/ndb_mgmd.log"; - - $ndb_mgmd->{'proc'}= My::SafeProcess->new - ( - name => $ndb_mgmd->after('cluster_config.'), - path => $exe_ndb_mgmd, - args => \$args, - output => $path_ndb_mgmd_log, - error => $path_ndb_mgmd_log, - append => 1, - verbose => $opt_verbose, - shutdown => sub { ndb_mgmd_stop($ndb_mgmd) }, - ); - mtr_verbose("Started $ndb_mgmd->{proc}"); - - # FIXME Should not be needed - # Unfortunately the cluster nodes will fail to start - # if ndb_mgmd has not started properly - if (ndb_mgmd_wait_started($cluster)) - { - mtr_warning("Failed to wait for start of ndb_mgmd"); - return 1; - } - - return 0; -} - -sub ndbd_stop { - # Intentionally left empty, ndbd nodes will be shutdown - # by sending "shutdown" to ndb_mgmd -} - -our $exe_ndbmtd_counter= 0; - -sub ndbd_start { - my ($cluster, $ndbd)= @_; - - mtr_verbose("ndbd_start"); - - my $dir= $ndbd->value("DataDir"); - mkpath($dir) unless -d $dir; - - my $args; - mtr_init_args(\$args); - mtr_add_arg($args, "--defaults-file=%s", $path_config_file); - mtr_add_arg($args, "--defaults-group-suffix=%s", $cluster->suffix()); - mtr_add_arg($args, "--nodaemon"); - -# > 5.0 { 'character-sets-dir' => \&fix_charset_dir }, - - my $exe= $exe_ndbd; - if ($exe_ndbmtd and ($exe_ndbmtd_counter++ % 2) == 0) - { - # Use ndbmtd every other time - $exe= $exe_ndbmtd; - } - - my $path_ndbd_log= "$dir/ndbd.log"; - my $proc= My::SafeProcess->new - ( - name => $ndbd->after('cluster_config.'), - path => $exe, - args => \$args, - output => $path_ndbd_log, - error => $path_ndbd_log, - append => 1, - verbose => $opt_verbose, - shutdown => sub { ndbd_stop($ndbd) }, - ); - mtr_verbose("Started $proc"); - - $ndbd->{proc}= $proc; - - return; -} - - -sub ndbcluster_start ($) { - my ($cluster) = @_; - - mtr_verbose("ndbcluster_start '".$cluster->name()."'"); - - foreach my $ndb_mgmd ( in_cluster($cluster, ndb_mgmds()) ) - { - next if started($ndb_mgmd); - ndb_mgmd_start($cluster, $ndb_mgmd); - } - - foreach my $ndbd ( in_cluster($cluster, ndbds()) ) - { - next if started($ndbd); - ndbd_start($cluster, $ndbd); - } - - return 0; -} - sub mysql_server_start($) { my ($mysqld, $tinfo) = @_; @@ -4112,8 +3690,6 @@ sub config_files($) { sub _like { return $config ? $config->like($_[0]) : (); } sub mysqlds { return _like('mysqld\.'); } -sub ndbds { return _like('cluster_config\.ndbd\.');} -sub ndb_mgmds { return _like('cluster_config\.ndb_mgmd\.'); } sub fix_servers($) { my ($tinfo) = @_; @@ -4124,19 +3700,6 @@ sub fix_servers($) { START => \&mysql_server_start, WAIT => \&mysql_server_wait, }, - qr/mysql_cluster\./ => { - SORT => 200, - START => \&ndbcluster_start, - WAIT => \&ndbcluster_wait_started, - }, - qr/cluster_config\.ndb_mgmd\./ => { - SORT => 210, - START => undef, - }, - qr/cluster_config\.ndbd\./ => { - SORT => 220, - START => undef, - }, $tinfo->{suite}->servers() ); for ($config->groups()) { @@ -4788,7 +4351,6 @@ sub extract_warning_lines ($$) { ( @global_suppressions, qr/error .*connecting to master/, - qr/Plugin 'ndbcluster' will be forced to shutdown/, qr/InnoDB: Error: in ALTER TABLE `test`.`t[12]`/, qr/InnoDB: Error: table `test`.`t[12]` .*does not exist in the InnoDB internal/, qr/InnoDB: Warning: Setting innodb_use_sys_malloc/, @@ -4802,7 +4364,6 @@ sub extract_warning_lines ($$) { qr/Now setting lower_case_table_names to [02]/, qr/Setting lower_case_table_names=2/, qr/You have forced lower_case_table_names to 0/, - qr/Plugin 'ndbcluster' will be forced to shutdow/, qr/deprecated/, qr/Slave SQL thread retried transaction/, qr/Slave \(additional info\)/, @@ -5672,18 +5233,6 @@ sub servers_need_restart($) { ############################################ -# -# Filter a list of servers and return only those that are part -# of the specified cluster -# -sub in_cluster { - my ($cluster)= shift; - # Return only processes for a specific cluster - return grep { $_->suffix() eq $cluster->suffix() } @_; -} - - - # # Filter a list of servers and return the SafeProcess # for only those that are started or stopped @@ -6397,9 +5946,6 @@ Options to control what test suites or cases to run the execution will continue from the next test file. When specified twice, execution will continue executing the failed test file from the next command. - with-ndbcluster-only Run only tests that include "ndb" in the filename - skip-ndb[cluster] Skip all tests that need cluster. Default. - include-ndb[cluster] Enable all tests that need cluster do-test=PREFIX or REGEX Run test cases which name are prefixed with PREFIX or fulfills REGEX diff --git a/mysql-test/r/have_ndb_extra.require b/mysql-test/r/have_ndb_extra.require deleted file mode 100644 index 8f7c125196a..00000000000 --- a/mysql-test/r/have_ndb_extra.require +++ /dev/null @@ -1,3 +0,0 @@ -select 1; -1 -1 diff --git a/mysql-test/r/have_ndbapi_examples.require b/mysql-test/r/have_ndbapi_examples.require deleted file mode 100644 index 924d2d51708..00000000000 --- a/mysql-test/r/have_ndbapi_examples.require +++ /dev/null @@ -1,2 +0,0 @@ -have_ndb_example -1 diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index b6d2fb09d6c..3d261d0be5f 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -42,7 +42,7 @@ insert into t5 values (10); create view v1 (c) as SELECT table_name FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND -table_name not like 'ndb_%' AND table_name not like 'innodb_%' AND +table_name not like 'innodb_%' AND table_name not like 'xtradb_%'; select * from v1; c diff --git a/mysql-test/r/information_schema_all_engines.result b/mysql-test/r/information_schema_all_engines.result index f1f8e573fec..58ac182f3a4 100644 --- a/mysql-test/r/information_schema_all_engines.result +++ b/mysql-test/r/information_schema_all_engines.result @@ -397,7 +397,7 @@ Database: INFORMATION_SCHEMA Wildcard: inf_rmation_schema | Databases | | information_schema | -SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA; +SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') GROUP BY TABLE_SCHEMA; table_schema count(*) information_schema 55 mysql 30 diff --git a/mysql-test/r/ndb_default_cluster.require b/mysql-test/r/ndb_default_cluster.require deleted file mode 100644 index 3616ae0f343..00000000000 --- a/mysql-test/r/ndb_default_cluster.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -Ndb_config_from_host localhost diff --git a/mysql-test/r/not_ndb.require b/mysql-test/r/not_ndb.require deleted file mode 100644 index 36fcf7958d4..00000000000 --- a/mysql-test/r/not_ndb.require +++ /dev/null @@ -1,2 +0,0 @@ -Variable_name Value -have_ndbcluster NO diff --git a/mysql-test/r/not_ndb_default.require b/mysql-test/r/not_ndb_default.require deleted file mode 100644 index 09aae1ed1d0..00000000000 --- a/mysql-test/r/not_ndb_default.require +++ /dev/null @@ -1,2 +0,0 @@ -TRUE -1 diff --git a/mysql-test/std_data/funcs_1/ndb_tb1.txt b/mysql-test/std_data/funcs_1/ndb_tb1.txt deleted file mode 100644 index 4c20ed1661b..00000000000 --- a/mysql-test/std_data/funcs_1/ndb_tb1.txt +++ /dev/null @@ -1,10 +0,0 @@ - a`0 a`0 0` 0` a`0 a`0 0` 0` ! 6 163 103 238 3058 30243 22056 9444 -5 1 1 1 -5 1 1 1 -5 1 1 1 -5 1 1 1 -5 -5 1 1 1 1 1 1 -5 0.0 1 0.0 1 0.0 1 0.0 -5 1 1 1 -5 -5 - aa0 aa0 1aa 1aa aa0 aa0 1aa 1aa @ 9 207 1 246 13214 57220 1505 58996 -4 2 2 2 -4 2 2 2 -4 2 2 2 -4 2 2 2 -4 -4 2 2 2 2 2 2 -4 1.1 2 1.1 2 1.1 2 1.1 -4 2 2 2 -4 -4 - ab0 ab0 2baa 2baa ab0 ab0 2baa 2baa # 3 50 103 193 10965 3038 31585 20149 -3 3 3 3 -3 3 3 3 -3 3 3 3 -3 3 3 3 -3 -3 3 3 3 3 3 3 -3 2.2 3 2.2 3 2.2 3 2.2 -3 3 3 3 -3 -3 - ac0 ac0 3caaa 3caaa ac0 ac0 3caaa 3caaa $ 62 188 47 176 5103 58378 13178 38317 -2 4 4 4 -2 4 4 4 -2 4 4 4 -2 4 4 4 -2 -2 4 4 4 4 4 4 -2 3.3 4 3.3 4 3.3 4 3.3 -2 4 4 4 -2 -2 - ad0 ad0 4daaaa 4daaaa ad0 ad0 4daaaa 4daaaa % 59 15 21 80 17942 48443 12646 53903 -1 5 5 5 -1 5 5 5 -1 5 5 5 -1 5 5 5 -1 -1 5 5 5 5 5 5 -1 4.4 5 4.4 5 4.4 5 4.4 -1 5 5 5 -1 -1 - ae0 ae0 5eaaaaa 5eaaaaa ae0 ae0 5eaaaaa 5eaaaaa ^ 86 223 103 88 3880 31147 5801 28348 0 6 6 6 0 6 6 6 0 6 6 6 0 6 6 6 0 0 6 6 6 6 6 6 0 5.5 6 5.5 6 5.5 6 5.5 0 6 6 6 0 0 - af0 af0 6faaaaaa 6faaaaaa af0 af0 6faaaaaa 6faaaaaa & 124 125 77 208 2591 29533 18803 21557 1 7 7 7 1 7 7 7 1 7 7 7 1 7 7 7 1 1 7 7 7 7 7 7 1 6.6 7 6.6 7 6.6 7 6.6 1 7 7 7 1 1 - ag0 ag0 7gaaaaaaa 7gaaaaaaa ag0 ag0 7gaaaaaaa 7gaaaaaaa * 123 103 80 92 10179 60769 25778 58195 2 8 8 8 2 8 8 8 2 8 8 8 2 8 8 8 2 2 8 8 8 8 8 8 2 7.7 8 7.7 8 7.7 8 7.7 2 8 8 8 2 2 - a^0 a^0 8^aaaaaaaa 8^aaaaaaaa a^0 a^0 8^aaaaaaaa 8^aaaaaaaa ( 111 166 81 66 5159 2177 6774 38396 3 9 9 9 3 9 9 9 3 9 9 9 3 9 9 9 3 3 9 9 9 9 9 9 3 8.8 9 8.8 9 8.8 9 8.8 3 9 9 9 3 3 - a_0 a_0 9_aaaaaaaaa 9_aaaaaaaaa a_0 a_0 9_aaaaaaaaa 9_aaaaaaaaa ) 37 174 97 34 9183 16470 13064 6297 4 10 10 10 4 10 10 10 4 10 10 10 4 10 10 10 4 4 10 10 10 10 10 10 4 9.9 10 9.9 10 9.9 10 9.9 4 10 10 10 4 4 diff --git a/mysql-test/std_data/funcs_1/ndb_tb2.txt b/mysql-test/std_data/funcs_1/ndb_tb2.txt deleted file mode 100644 index 8ae7dea6df4..00000000000 --- a/mysql-test/std_data/funcs_1/ndb_tb2.txt +++ /dev/null @@ -1,10 +0,0 @@ -1 1 1 1 1 1 -5 0.0 1 0.0 1 0.0 1 0.0 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1000-01-01 838:59:58 1970-01-02 00:00:01 19700102000001 1902 1902 1902 2 2 -2 2 2 2 2 2 -4 1.1 2 1.1 2 1.1 2 1.1 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1000-01-02 838:59:57 1970-01-03 00:00:02 19700103000002 1903 1903 1903 1 3 -3 3 3 3 3 3 -3 2.2 3 2.2 3 2.2 3 2.2 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1000-01-03 838:59:56 1970-01-04 00:00:03 19700104000003 1904 1904 1904 2 1 -4 4 4 4 4 4 -2 3.3 4 3.3 4 3.3 4 3.3 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1000-01-04 838:59:55 1970-01-05 00:00:04 19700105000004 1905 1905 1905 1 2 -5 5 5 5 5 5 -1 4.4 5 4.4 5 4.4 5 4.4 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1000-01-05 838:59:54 1970-01-06 00:00:05 19700106000005 1906 1906 1906 2 3 -6 6 6 6 6 6 0 5.5 6 5.5 6 5.5 6 5.5 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1000-01-06 838:59:53 1970-01-07 00:00:06 19700107000006 1907 1907 1907 1 1 -7 7 7 7 7 7 1 6.6 7 6.6 7 6.6 7 6.6 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1000-01-07 838:59:52 1970-01-08 00:00:07 19700108000007 1908 1908 1908 2 2 -8 8 8 8 8 8 2 7.7 8 7.7 8 7.7 8 7.7 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1000-01-08 838:59:51 1970-01-09 00:00:08 19700109000008 1909 1909 1909 1 3 -9 9 9 9 9 9 3 8.8 9 8.8 9 8.8 9 8.8 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1000-01-09 838:59:50 1970-01-10 00:00:09 19700110000009 1910 1910 1910 2 1 -10 10 10 10 10 10 4 9.9 10 9.9 10 9.9 10 9.9 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1000-01-10 838:59:49 1970-01-11 00:00:10 19700111000010 1911 1911 1911 1 2 diff --git a/mysql-test/std_data/funcs_1/ndb_tb3.txt b/mysql-test/std_data/funcs_1/ndb_tb3.txt deleted file mode 100644 index a01cc36da54..00000000000 --- a/mysql-test/std_data/funcs_1/ndb_tb3.txt +++ /dev/null @@ -1,10 +0,0 @@ -! ! ! a`0 a`0 0` 0` a`0 a`0 0` 0` ! 37 102 115 214 22348 22112 23636 18043 -5 1 1 1 -5 1 1 1 -5 1 1 1 -5 1 1 1 -5 -5 1 1 1 1 1 1 -5 0.0 1 0.0 1 0.0 1 0.0 -5 1 1 1 -5 -5 -@ @ @ aa0 aa0 1aa 1aa aa0 aa0 1aa 1aa @ 30 114 62 146 22059 6000 19024 8674 -4 2 2 2 -4 2 2 2 -4 2 2 2 -4 2 2 2 -4 -4 2 2 2 2 2 2 -4 1.1 2 1.1 2 1.1 2 1.1 -4 2 2 2 -4 -4 -# # # ab0 ab0 2baa 2baa ab0 ab0 2baa 2baa # 113 254 52 51 27963 63797 516 63989 -3 3 3 3 -3 3 3 3 -3 3 3 3 -3 3 3 3 -3 -3 3 3 3 3 3 3 -3 2.2 3 2.2 3 2.2 3 2.2 -3 3 3 3 -3 -3 -$ $ $ ac0 ac0 3caaa 3caaa ac0 ac0 3caaa 3caaa $ 70 78 40 203 28716 18828 14939 30960 -2 4 4 4 -2 4 4 4 -2 4 4 4 -2 4 4 4 -2 -2 4 4 4 4 4 4 -2 3.3 4 3.3 4 3.3 4 3.3 -2 4 4 4 -2 -2 -% % % ad0 ad0 4daaaa 4daaaa ad0 ad0 4daaaa 4daaaa % 1 228 76 249 16746 12853 8405 35402 -1 5 5 5 -1 5 5 5 -1 5 5 5 -1 5 5 5 -1 -1 5 5 5 5 5 5 -1 4.4 5 4.4 5 4.4 5 4.4 -1 5 5 5 -1 -1 -^ ^ ^ ae0 ae0 5eaaaaa 5eaaaaa ae0 ae0 5eaaaaa 5eaaaaa ^ 116 52 51 248 26877 15243 20063 65464 0 6 6 6 0 6 6 6 0 6 6 6 0 6 6 6 0 0 6 6 6 6 6 6 0 5.5 6 5.5 6 5.5 6 5.5 0 6 6 6 0 0 -& & & af0 af0 6faaaaaa 6faaaaaa af0 af0 6faaaaaa 6faaaaaa & 59 163 63 26 24559 55618 27326 12704 1 7 7 7 1 7 7 7 1 7 7 7 1 7 7 7 1 1 7 7 7 7 7 7 1 6.6 7 6.6 7 6.6 7 6.6 1 7 7 7 1 1 -* * * ag0 ag0 7gaaaaaaa 7gaaaaaaa ag0 ag0 7gaaaaaaa 7gaaaaaaa * 69 229 119 159 11779 48557 14747 42703 2 8 8 8 2 8 8 8 2 8 8 8 2 8 8 8 2 2 8 8 8 8 8 8 2 7.7 8 7.7 8 7.7 8 7.7 2 8 8 8 2 2 -( ( ( a^0 a^0 8^aaaaaaaa 8^aaaaaaaa a^0 a^0 8^aaaaaaaa 8^aaaaaaaa ( 54 89 113 155 1068 61537 14823 43439 3 9 9 9 3 9 9 9 3 9 9 9 3 9 9 9 3 3 9 9 9 9 9 9 3 8.8 9 8.8 9 8.8 9 8.8 3 9 9 9 3 3 -) ) ) a_0 a_0 9_aaaaaaaaa 9_aaaaaaaaa a_0 a_0 9_aaaaaaaaa 9_aaaaaaaaa ) 68 34 44 175 32453 44381 506 37695 4 10 10 10 4 10 10 10 4 10 10 10 4 10 10 10 4 4 10 10 10 10 10 10 4 9.9 10 9.9 10 9.9 10 9.9 4 10 10 10 4 4 diff --git a/mysql-test/std_data/funcs_1/ndb_tb4.txt b/mysql-test/std_data/funcs_1/ndb_tb4.txt deleted file mode 100644 index 5092371d093..00000000000 --- a/mysql-test/std_data/funcs_1/ndb_tb4.txt +++ /dev/null @@ -1,10 +0,0 @@ -1 1 1 1 1 1 -5 0.0 1 0.0 1 0.0 1 0.0 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 -1.17549435e-38 -1.17549435e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1.175494352e-38 1000-01-01 838:59:58 1970-01-02 00:00:01 19700102000001 1902 1902 1902 2 2 0! 0 0 0 0! -2 2 2 2 2 2 -4 1.1 2 1.1 2 1.1 2 1.1 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 -1.175494349e-38 -1.175494349e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1.175494353e-38 1000-01-02 838:59:57 1970-01-03 00:00:02 19700103000002 1903 1903 1903 1 3 1@# 1@ 1@ 1@ 1@# -3 3 3 3 3 3 -3 2.2 3 2.2 3 2.2 3 2.2 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 -1.175494348e-38 -1.175494348e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1.175494354e-38 1000-01-03 838:59:56 1970-01-04 00:00:03 19700104000003 1904 1904 1904 2 1 2#$% 2#$ 2#$ 2#$ 2#$% -4 4 4 4 4 4 -2 3.3 4 3.3 4 3.3 4 3.3 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 -1.175494347e-38 -1.175494347e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1.175494355e-38 1000-01-04 838:59:55 1970-01-05 00:00:04 19700105000004 1905 1905 1905 1 2 3$%^& 3$%^ 3$%^ 3$%^ 3$%^& -5 5 5 5 5 5 -1 4.4 5 4.4 5 4.4 5 4.4 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 -1.175494346e-38 -1.175494346e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1.175494356e-38 1000-01-05 838:59:54 1970-01-06 00:00:05 19700106000005 1906 1906 1906 2 3 4%^&*( 4%^&* 4%^&* 4%^&* 4%^&*( -6 6 6 6 6 6 0 5.5 6 5.5 6 5.5 6 5.5 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 -1.175494345e-38 -1.175494345e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1.175494357e-38 1000-01-06 838:59:53 1970-01-07 00:00:06 19700107000006 1907 1907 1907 1 1 5^&*()_ 5^&*() 5^&*() 5^&*() 5^&*()_ -7 7 7 7 7 7 1 6.6 7 6.6 7 6.6 7 6.6 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 -1.175494344e-38 -1.175494344e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1.175494358e-38 1000-01-07 838:59:52 1970-01-08 00:00:07 19700108000007 1908 1908 1908 2 2 6&*()_+= 6&*()_+ 6&*()_+ 6&*()_+ 6&*()_+= -8 8 8 8 8 8 2 7.7 8 7.7 8 7.7 8 7.7 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 -1.175494343e-38 -1.175494343e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1.175494359e-38 1000-01-08 838:59:51 1970-01-09 00:00:08 19700109000008 1909 1909 1909 1 3 7*()_+=-| 7*()_+=- 7*()_+=- 7*()_+=- 7*()_+=-| -9 9 9 9 9 9 3 8.8 9 8.8 9 8.8 9 8.8 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 -1.175494342e-38 -1.175494342e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1.17549436e-38 1000-01-09 838:59:50 1970-01-10 00:00:09 19700110000009 1910 1910 1910 2 1 8()_+=-|{} 8()_+=-|{ 8()_+=-|{ 8()_+=-|{ 8()_+=-|{} -10 10 10 10 10 10 4 9.9 10 9.9 10 9.9 10 9.9 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 -1.175494341e-38 -1.175494341e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1.175494361e-38 1000-01-10 838:59:49 1970-01-11 00:00:10 19700111000010 1911 1911 1911 1 2 9)_+=-|{}[] 9)_+=-|{}[ 9)_+=-|{}[ 9)_+=-|{}[ 9)_+=-|{}[] diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data b/mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data deleted file mode 100644 index 32494d5a1e7..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-1-0.1.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data b/mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data deleted file mode 100644 index 2141fb0a6e4..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-1-0.2.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl b/mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl deleted file mode 100644 index cbe548e0ca5..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-1.1.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.1.log b/mysql-test/std_data/ndb_backup50/BACKUP-1.1.log deleted file mode 100644 index e4e114d4b46..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-1.1.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl b/mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl deleted file mode 100644 index cbe548e0ca5..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-1.2.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-1.2.log b/mysql-test/std_data/ndb_backup50/BACKUP-1.2.log deleted file mode 100644 index a1c89b7015c..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-1.2.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-2-0.1.Data b/mysql-test/std_data/ndb_backup50/BACKUP-2-0.1.Data deleted file mode 100644 index 09e63064666..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-2-0.1.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-2-0.2.Data b/mysql-test/std_data/ndb_backup50/BACKUP-2-0.2.Data deleted file mode 100644 index a8332239d8f..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-2-0.2.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-2.1.ctl b/mysql-test/std_data/ndb_backup50/BACKUP-2.1.ctl deleted file mode 100644 index f54103a2a44..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-2.1.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-2.1.log b/mysql-test/std_data/ndb_backup50/BACKUP-2.1.log deleted file mode 100644 index 5564f952e66..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-2.1.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-2.2.ctl b/mysql-test/std_data/ndb_backup50/BACKUP-2.2.ctl deleted file mode 100644 index f54103a2a44..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-2.2.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup50/BACKUP-2.2.log b/mysql-test/std_data/ndb_backup50/BACKUP-2.2.log deleted file mode 100644 index 5564f952e66..00000000000 Binary files a/mysql-test/std_data/ndb_backup50/BACKUP-2.2.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data b/mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data deleted file mode 100644 index 2407d1f261b..00000000000 Binary files a/mysql-test/std_data/ndb_backup51/BACKUP-1-0.1.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data b/mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data deleted file mode 100644 index f21e9886523..00000000000 Binary files a/mysql-test/std_data/ndb_backup51/BACKUP-1-0.2.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl b/mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl deleted file mode 100644 index 99e2f297693..00000000000 Binary files a/mysql-test/std_data/ndb_backup51/BACKUP-1.1.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.1.log b/mysql-test/std_data/ndb_backup51/BACKUP-1.1.log deleted file mode 100644 index 4448cd98c04..00000000000 Binary files a/mysql-test/std_data/ndb_backup51/BACKUP-1.1.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl b/mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl deleted file mode 100644 index 99e2f297693..00000000000 Binary files a/mysql-test/std_data/ndb_backup51/BACKUP-1.2.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51/BACKUP-1.2.log b/mysql-test/std_data/ndb_backup51/BACKUP-1.2.log deleted file mode 100644 index 3be69891402..00000000000 Binary files a/mysql-test/std_data/ndb_backup51/BACKUP-1.2.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.1.Data b/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.1.Data deleted file mode 100644 index 267039d757e..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.1.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.2.Data b/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.2.Data deleted file mode 100644 index 9033c2735e1..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1-0.2.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.ctl b/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.ctl deleted file mode 100644 index 30fd0a2dda1..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.log b/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.log deleted file mode 100644 index 39f7244b014..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.1.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.ctl b/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.ctl deleted file mode 100644 index 30fd0a2dda1..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.log b/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.log deleted file mode 100644 index 39f7244b014..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_be/BACKUP-1.2.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.1.Data b/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.1.Data deleted file mode 100644 index 45d4d536c02..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.1.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.2.Data b/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.2.Data deleted file mode 100644 index 067fc6b716d..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1-0.2.Data and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.ctl b/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.ctl deleted file mode 100644 index 949b7d0be8b..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.log b/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.log deleted file mode 100644 index 3686d2718a2..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.1.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.ctl b/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.ctl deleted file mode 100644 index 949b7d0be8b..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.ctl and /dev/null differ diff --git a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.log b/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.log deleted file mode 100644 index 3686d2718a2..00000000000 Binary files a/mysql-test/std_data/ndb_backup51_data_le/BACKUP-1.2.log and /dev/null differ diff --git a/mysql-test/std_data/ndb_config_config.ini b/mysql-test/std_data/ndb_config_config.ini deleted file mode 100644 index c325952d322..00000000000 --- a/mysql-test/std_data/ndb_config_config.ini +++ /dev/null @@ -1,55 +0,0 @@ -[ndbd default] -NoOfReplicas= 2 -MaxNoOfConcurrentTransactions= 64 -MaxNoOfConcurrentOperations= 10000 -DataMemory= 20M -IndexMemory= 1M -Diskless= 0 -TimeBetweenWatchDogCheck= 30000 -DataDir= /data/msvensson/mysql/mysql-5.1-new-maint/mysql-test/var/ndbcluster-10095 -MaxNoOfOrderedIndexes= 32 -MaxNoOfAttributes= 2048 -TimeBetweenGlobalCheckpoints= 500 -NoOfFragmentLogFiles= 4 -FragmentLogFileSize=12M -DiskPageBufferMemory= 4M -# O_DIRECT has issues on 2.4 whach have not been handled, Bug #29612 -#ODirect= 1 -# the following parametes just function as a small regression -# test that the parameter exists -InitialNoOfOpenFiles= 27 - -# -# Increase timeouts to cater for slow test-machines -# (possibly running several tests in parallell) -# -HeartbeatIntervalDbDb= 30000 -HeartbeatIntervalDbApi= 30000 -#TransactionDeadlockDetectionTimeout= 7500 - -[ndbd] -HostName= localhost - -[ndbd] -HostName= localhost - -[ndb_mgmd] -HostName= localhost -DataDir= /data/msvensson/mysql/mysql-5.1-new-maint/mysql-test/var/ndbcluster-10095 # -PortNumber= 10095 - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] - -[mysqld] diff --git a/mysql-test/std_data/ndb_config_mycnf1.cnf b/mysql-test/std_data/ndb_config_mycnf1.cnf deleted file mode 100644 index c680bfd8fa3..00000000000 --- a/mysql-test/std_data/ndb_config_mycnf1.cnf +++ /dev/null @@ -1,15 +0,0 @@ -[cluster_config] -NoOfReplicas=1 -DataMemory=50M - -[cluster_config.jonas] -IndexMemory=50M -ndbd = localhost,localhost,localhost,localhost -ndb_mgmd = localhost -mysqld = localhost - -[cluster_config.ndbd.1] -DataMemory=25M - -[cluster_config.ndbd.2.jonas] -DataMemory=35M diff --git a/mysql-test/std_data/ndb_config_mycnf2.cnf b/mysql-test/std_data/ndb_config_mycnf2.cnf deleted file mode 100644 index 3bf6b9a1194..00000000000 --- a/mysql-test/std_data/ndb_config_mycnf2.cnf +++ /dev/null @@ -1,31 +0,0 @@ -# -# Testing automatic node id generation -# -[cluster_config] -NoOfReplicas=2 -Signum=39 - -[cluster_config.cluster0] -ndbd = localhost,localhost,localhost,localhost -ndb_mgmd = localhost -mysqld = ,,,, - -[cluster_config.cluster1] -ndbd = localhost,localhost,localhost,localhost -ndb_mgmd = localhost -mysqld = ,,,, -[cluster_config.ndbd.1.cluster1] -NodeId=2 -[cluster_config.mysqld.1.cluster1] -NodeId=1 - -[cluster_config.cluster2] -ndbd = localhost,localhost,localhost,localhost -ndb_mgmd = localhost,localhost -mysqld = ,,,, -[cluster_config.mysqld.1.cluster2] -NodeId=11 -[cluster_config.ndb_mgmd.1.cluster2] -NodeId=1 -[cluster_config.ndbd.1.cluster2] -NodeId=3 diff --git a/mysql-test/suite/binlog/r/binlog_multi_engine.result b/mysql-test/suite/binlog/r/binlog_multi_engine.result deleted file mode 100644 index d0febc3f8bc..00000000000 --- a/mysql-test/suite/binlog/r/binlog_multi_engine.result +++ /dev/null @@ -1,106 +0,0 @@ -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); -CREATE TABLE t1m (m INT, n INT) ENGINE=MYISAM; -CREATE TABLE t1b (b INT, c INT) ENGINE=BLACKHOLE; -CREATE TABLE t1n (e INT, f INT) ENGINE=NDB; -RESET MASTER; -SET SESSION BINLOG_FORMAT=STATEMENT; -INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2); -UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c; -The last event before the COMMIT is use `test`; UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c -*** Please look in binlog_multi_engine.test if you have a diff here **** -START TRANSACTION; -INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2); -UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; -Warnings: -Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them. -UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c; -COMMIT; -TRUNCATE t1m; -TRUNCATE t1b; -TRUNCATE t1n; -show binlog events from ; -Log_name Pos Event_type Server_id End_log_pos Info -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2) -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2) -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2) -mysqld-bin.000001 # Query # # use `test`; UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Table_map # # table_id: # (test.t1n) -mysqld-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) -mysqld-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # use `test`; TRUNCATE t1m -mysqld-bin.000001 # Query # # use `test`; TRUNCATE t1b -mysqld-bin.000001 # Query # # use `test`; TRUNCATE t1n -RESET MASTER; -SET SESSION BINLOG_FORMAT=MIXED; -INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2); -The last event before the COMMIT is use `test`; INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2) -INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2); -UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c; -UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; -ERROR HY000: Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging. -TRUNCATE t1m; -TRUNCATE t1b; -TRUNCATE t1n; -show binlog events from ; -Log_name Pos Event_type Server_id End_log_pos Info -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2) -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2) -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Table_map # # table_id: # (test.t1n) -mysqld-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) -mysqld-bin.000001 # Write_rows # # table_id: # -mysqld-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Query # # use `test`; UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # use `test`; TRUNCATE t1m -mysqld-bin.000001 # Query # # use `test`; TRUNCATE t1b -mysqld-bin.000001 # Query # # use `test`; TRUNCATE t1n -RESET MASTER; -SET SESSION BINLOG_FORMAT=ROW; -INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2); -UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; -ERROR HY000: Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging. -UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c; -ERROR HY000: Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging. -show binlog events from ; -Log_name Pos Event_type Server_id End_log_pos Info -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Table_map # # table_id: # (test.t1m) -mysqld-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Table_map # # table_id: # (test.t1b) -mysqld-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F -mysqld-bin.000001 # Query # # COMMIT -mysqld-bin.000001 # Query # # BEGIN -mysqld-bin.000001 # Table_map # # table_id: # (test.t1n) -mysqld-bin.000001 # Table_map # # table_id: # (mysql.ndb_apply_status) -mysqld-bin.000001 # Write_rows # # table_id: # -mysqld-bin.000001 # Write_rows # # table_id: # flags: STMT_END_F -mysqld-bin.000001 # Query # # COMMIT -RESET MASTER; -DROP TABLE t1m, t1b, t1n; diff --git a/mysql-test/suite/binlog/r/binlog_old_versions.result b/mysql-test/suite/binlog/r/binlog_old_versions.result index 594f1101a38..30b64535eb4 100644 --- a/mysql-test/suite/binlog/r/binlog_old_versions.result +++ b/mysql-test/suite/binlog/r/binlog_old_versions.result @@ -53,7 +53,7 @@ SELECT COUNT(*) FROM t3; COUNT(*) 17920 DROP TABLE t1, t3; -==== Read binlog from ndb tree (mysql-5.1-telco-6.1) ==== +==== Read binlog from telco tree (mysql-5.1-telco-6.1) ==== SELECT * FROM t1 ORDER BY a; a b 0 last_insert_id diff --git a/mysql-test/suite/binlog/t/binlog_multi_engine.test b/mysql-test/suite/binlog/t/binlog_multi_engine.test deleted file mode 100644 index 90fddd4f3fd..00000000000 --- a/mysql-test/suite/binlog/t/binlog_multi_engine.test +++ /dev/null @@ -1,110 +0,0 @@ -# Test to test how logging is done depending on the capabilities of -# the engines. Unfortunately, we don't have a good row-only logging -# engine, and NDB does not really cut is since it is also -# self-logging. I'm using it nevertheless. - -source include/have_blackhole.inc; -source include/have_ndb.inc; -source include/have_log_bin.inc; - -call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); - -CREATE TABLE t1m (m INT, n INT) ENGINE=MYISAM; -CREATE TABLE t1b (b INT, c INT) ENGINE=BLACKHOLE; -CREATE TABLE t1n (e INT, f INT) ENGINE=NDB; - -RESET MASTER; - -SET SESSION BINLOG_FORMAT=STATEMENT; - -INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2); - -UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c; - -# Here and below we need to wait when some event appears in binlog -# to avoid unsrted mixing local events and from NDB -let $wait_binlog_event= COMMIT; -source include/wait_for_binlog_event.inc; -let $event= query_get_value(SHOW BINLOG EVENTS, Info, 9); ---echo The last event before the COMMIT is $event - -echo *** Please look in binlog_multi_engine.test if you have a diff here ****; -START TRANSACTION; -INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2); -UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; -UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c; -COMMIT; - -let $wait_binlog_event= COMMIT; -source include/wait_for_binlog_event.inc; - -TRUNCATE t1m; -TRUNCATE t1b; -TRUNCATE t1n; - -let $wait_binlog_event= t1n; -source include/wait_for_binlog_event.inc; - -source include/show_binlog_events.inc; - -RESET MASTER; - -SET SESSION BINLOG_FORMAT=MIXED; - -INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2); - -let $wait_binlog_event= COMMIT; -source include/wait_for_binlog_event.inc; -let $event= query_get_value(SHOW BINLOG EVENTS, Info, 6); ---echo The last event before the COMMIT is $event - -INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2); - -let $wait_binlog_event= COMMIT; -source include/wait_for_binlog_event.inc; - -UPDATE t1m, t1b SET m = 2, b = 3 WHERE n = c; -error ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE; -UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; - -# Not possible to test this since NDB writes its own binlog, which -# might cause it to be out of sync with the results from MyISAM. -# This will generate an error once BUG#28722 is fixed. - -#UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; - -TRUNCATE t1m; -TRUNCATE t1b; -TRUNCATE t1n; - -source include/show_binlog_events.inc; - -RESET MASTER; - -SET SESSION BINLOG_FORMAT=ROW; - -INSERT INTO t1m VALUES (1,1), (1,2), (2,1), (2,2); - -INSERT INTO t1b VALUES (1,1), (1,2), (2,1), (2,2); -INSERT INTO t1n VALUES (1,1), (1,2), (2,1), (2,2); - -error ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE; -UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; - -# Not possible to test this since NDB writes its own binlog, which -# might cause it to be out of sync with the results from MyISAM. -# This will generate an error once BUG#28722 is fixed. - -#UPDATE t1m, t1n SET m = 2, e = 3 WHERE n = f; - -error ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE; -UPDATE t1n, t1b SET e = 2, b = 3 WHERE f = c; - -source include/show_binlog_events.inc; - -RESET MASTER; - -DROP TABLE t1m, t1b, t1n; - diff --git a/mysql-test/suite/binlog/t/binlog_old_versions.test b/mysql-test/suite/binlog/t/binlog_old_versions.test index 330aac137de..130101541e3 100644 --- a/mysql-test/suite/binlog/t/binlog_old_versions.test +++ b/mysql-test/suite/binlog/t/binlog_old_versions.test @@ -11,7 +11,7 @@ # The previous versions we currently test are: # - version 5.1.17 and earlier trees # - mysql-5.1-wl2325-xxx trees (AKA alcatel trees) -# - mysql-5.1-telco-6.1 trees (AKA ndb trees) +# - mysql-5.1-telco-6.1 trees # For completeness, we also test mysql-5.1-new_rpl, which is supposed # to be the "correct" version. @@ -75,7 +75,7 @@ SELECT COUNT(*) FROM t3; DROP TABLE t1, t3; ---echo ==== Read binlog from ndb tree (mysql-5.1-telco-6.1) ==== +--echo ==== Read binlog from telco tree (mysql-5.1-telco-6.1) ==== # Read binlog. --exec $MYSQL_BINLOG --local-load=$MYSQLTEST_VARDIR/tmp/ suite/binlog/std_data/ver_5_1-telco.001 | $MYSQL --local-infile=1 diff --git a/mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_verbose.test b/mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_verbose.test index 42d92e1a44d..98aa7635deb 100644 --- a/mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_verbose.test +++ b/mysql-test/suite/binlog/t/binlog_row_mysqlbinlog_verbose.test @@ -1,12 +1,8 @@ ######################################################## -# Test mysqlbinlog command with Ndb produced Binlog -# variants -# # WHAT # ==== # This test aims to check that the mysqlbinlog --verbose -# command can output binlogs in 4 format variants, currently -# used by Ndb +# command can output binlogs in 4 format variants. # # 1) Updates logged as write_row events # Only primary key and updated columns included in the @@ -18,19 +14,6 @@ # event # 4) Updates logged as update_row events # All columns included in the event -# -# Format variant (1) is the Ndb default. -# Bug#47323 resulted in binlogs generated in format (1) -# being incorrectly parsed by the mysqlbinlog --verbose -# option -# -# HOW -# === -# Row-based binlog files in each format have been -# captured from an Ndb cluster -# These are output using the mysqlbinlog --verbose -# tool and the output is checked. -# ######################################################## # We require binlog_format_row as we're independent of binlog format diff --git a/mysql-test/suite/binlog/t/binlog_unsafe.test b/mysql-test/suite/binlog/t/binlog_unsafe.test index 2de84a58875..c9e92e21002 100644 --- a/mysql-test/suite/binlog/t/binlog_unsafe.test +++ b/mysql-test/suite/binlog/t/binlog_unsafe.test @@ -96,10 +96,6 @@ # rpl.rpl_variables_stm tests the small subset of variables that # actually can be replicated safely in statement mode. # -# rpl_ndb.rpl_ndb_binlog_format_errors tests all errors and warnings -# related to logging format (not just 'Unsafe statement written to the -# binary log using statement format since BINLOG_FORMAT = STATEMENT'). - --source include/have_udf.inc --source include/have_log_bin.inc --source include/have_binlog_format_statement.inc diff --git a/mysql-test/suite/engines/funcs/t/rpl_bit.test b/mysql-test/suite/engines/funcs/t/rpl_bit.test index 07b0778296c..7f85313ae4c 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_bit.test +++ b/mysql-test/suite/engines/funcs/t/rpl_bit.test @@ -6,7 +6,6 @@ ############################################################################# # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## -- source include/master-slave.inc diff --git a/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test b/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test index 6b23f1a0d03..adf1526a657 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test +++ b/mysql-test/suite/engines/funcs/t/rpl_err_ignoredtable.test @@ -2,8 +2,6 @@ # Bug #797: If a query is ignored on slave (replicate-ignore-table) the slave # still checks that it has the same error as on the master. ########################################################################## -# 2006-02-07 JBM Added error code 1022 for NDB Engine + ORDER BY -########################################################################## -- source include/master-slave.inc diff --git a/mysql-test/suite/engines/funcs/t/rpl_loadfile.test b/mysql-test/suite/engines/funcs/t/rpl_loadfile.test index 97ee89a6d95..26235d89016 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_loadfile.test +++ b/mysql-test/suite/engines/funcs/t/rpl_loadfile.test @@ -6,7 +6,6 @@ ############################################################################# # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## # Includes @@ -42,9 +41,6 @@ CALL test.p1(); --enable_warnings SELECT * FROM test.t1 ORDER BY blob_column; save_master_pos; -# Need to allow some time when NDB engine is used for -# the injector thread to have time to populate binlog -sleep 10; sync_slave_with_master; connection slave; SELECT * FROM test.t1 ORDER BY blob_column; diff --git a/mysql-test/suite/engines/funcs/t/rpl_log_pos.test b/mysql-test/suite/engines/funcs/t/rpl_log_pos.test index 3a762b19756..22deee6b5f3 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_log_pos.test +++ b/mysql-test/suite/engines/funcs/t/rpl_log_pos.test @@ -1,7 +1,6 @@ ########## # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## # diff --git a/mysql-test/suite/engines/funcs/t/rpl_ps.test b/mysql-test/suite/engines/funcs/t/rpl_ps.test index b8792722192..09c7b779f65 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_ps.test +++ b/mysql-test/suite/engines/funcs/t/rpl_ps.test @@ -2,8 +2,6 @@ # Test of replicating user variables # ########################################################### -# 2006-02-08 By JBM added order by for use w/ NDB engine -########################################################### source include/master-slave.inc; #save_master_pos; diff --git a/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test b/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test index 30d2688c3fb..f4e6239c679 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test +++ b/mysql-test/suite/engines/funcs/t/rpl_rbr_to_sbr.test @@ -1,5 +1,4 @@ -- source include/have_binlog_format_mixed.inc --- source include/not_ndb_default.inc -- source include/master-slave.inc # Test that the slave temporarily switches to ROW when seeing row diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test b/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test index ea4b958ae4c..3e057d48ec9 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test +++ b/mysql-test/suite/engines/funcs/t/rpl_row_max_relay_size.test @@ -4,7 +4,6 @@ # Test of manual relay log rotation with FLUSH LOGS. # Requires statement logging -source include/not_ndb_default.inc; source include/have_binlog_format_row.inc; source extra/rpl_tests/rpl_max_relay_size.test; diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test index 054fa02f514..d8a5aacc5e6 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test +++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp005.test @@ -6,8 +6,6 @@ # Test: Tests SPs with cursors, flow logic, and alter sp. In addition the # # tests SPs with insert and update operations. # ############################################################################# -# 2006-02-08 By JBM added ORDER BY for use with NDB engine -############################################################################# # Includes -- source include/have_binlog_format_row.inc diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test b/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test index 2a4b1e5e605..505ed582ba9 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test +++ b/mysql-test/suite/engines/funcs/t/rpl_row_sp009.test @@ -5,8 +5,6 @@ ############################################################################# #TEST: Taken and modfied from http://bugs.mysql.com/bug.php?id=12168 # ############################################################################# -# 2006-02-08 By JBM : Added order by for ndb engine use -############################################################################# # Includes -- source include/have_binlog_format_row.inc diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_until.test b/mysql-test/suite/engines/funcs/t/rpl_row_until.test index bf38bd487ea..b8223c5a196 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_row_until.test +++ b/mysql-test/suite/engines/funcs/t/rpl_row_until.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/engines/funcs/t/rpl_row_view01.test b/mysql-test/suite/engines/funcs/t/rpl_row_view01.test index 634e3c30cc6..1ccfcb4eb27 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_row_view01.test +++ b/mysql-test/suite/engines/funcs/t/rpl_row_view01.test @@ -5,8 +5,6 @@ ############################################################################# #TEST: row based replication of views # ############################################################################# -# 2006-02-08 By JBM added order by and sleep for use with ndb engine -############################################################################# # Includes -- source include/have_binlog_format_row.inc -- source include/master-slave.inc @@ -43,11 +41,6 @@ CREATE VIEW mysqltest1.v4 AS SELECT * FROM mysqltest1.v3 WHERE a > 1 WITH LOCAL SELECT * FROM mysqltest1.v2; SELECT * FROM mysqltest1.v1; -# Had to add a sleep for use with NDB -# engine. Injector thread would have not -# populated biblog and data would not be on -# the slave. -sleep 10; sync_slave_with_master; SELECT * FROM mysqltest1.v2; SELECT * FROM mysqltest1.v1; diff --git a/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test b/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test index c9c77f47d8d..94ce539291d 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test +++ b/mysql-test/suite/engines/funcs/t/rpl_sp_effects.test @@ -1,7 +1,6 @@ ########################################## # Change Author: JBM # Change Date: 2006-05-02 -# Change: Added Order By for NDB testing ########################################## # Test of replication of stored procedures (WL#2146 for MySQL 5.0) diff --git a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test index b9df07101fb..396ba4073e4 100644 --- a/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/engines/funcs/t/rpl_switch_stm_row_mixed.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/master-slave.inc connection default; diff --git a/mysql-test/suite/engines/iuds/t/type_bit_iuds.test b/mysql-test/suite/engines/iuds/t/type_bit_iuds.test index 628457cb9cd..88418decfad 100644 --- a/mysql-test/suite/engines/iuds/t/type_bit_iuds.test +++ b/mysql-test/suite/engines/iuds/t/type_bit_iuds.test @@ -71,7 +71,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -235,7 +234,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -359,7 +357,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -523,7 +520,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -647,7 +643,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -811,7 +806,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -935,7 +929,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -1099,7 +1092,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -1223,7 +1215,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -1387,7 +1378,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -1511,7 +1501,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -1675,7 +1664,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -1799,7 +1787,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -1963,7 +1950,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -2087,7 +2073,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -2251,7 +2236,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -2375,7 +2359,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -2539,7 +2522,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -2663,7 +2645,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -2827,7 +2808,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -2951,7 +2931,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -3115,7 +3094,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -3239,7 +3217,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -3403,7 +3380,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -3527,7 +3503,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -3691,7 +3666,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -3815,7 +3789,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -3979,7 +3952,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -4103,7 +4075,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -4267,7 +4238,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -4391,7 +4361,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -4555,7 +4524,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -4679,7 +4647,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -4843,7 +4810,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -4967,7 +4933,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -5131,7 +5096,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -5255,7 +5219,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -5419,7 +5382,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -5543,7 +5505,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -5707,7 +5668,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -5831,7 +5791,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -5995,7 +5954,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -6119,7 +6077,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -6283,7 +6240,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -6407,7 +6363,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -6571,7 +6526,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -6695,7 +6649,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -6859,7 +6812,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -6983,7 +6935,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -7147,7 +7098,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -7271,7 +7221,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -7435,7 +7384,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -7559,7 +7507,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -7723,7 +7670,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -7847,7 +7793,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -8011,7 +7956,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -8135,7 +8079,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -8299,7 +8242,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -8423,7 +8365,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -8587,7 +8528,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -8711,7 +8651,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -8875,7 +8814,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -8999,7 +8937,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -9163,7 +9100,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -9287,7 +9223,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -9451,7 +9386,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -9575,7 +9509,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -9739,7 +9672,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -9863,7 +9795,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -10027,7 +9958,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -10151,7 +10081,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -10315,7 +10244,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -10439,7 +10367,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -10603,7 +10530,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -10727,7 +10653,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -10891,7 +10816,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -11015,7 +10939,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -11179,7 +11102,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -11303,7 +11225,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -11467,7 +11388,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -11591,7 +11511,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -11755,7 +11674,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -11879,7 +11797,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -12043,7 +11960,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -12167,7 +12083,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -12331,7 +12246,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -12455,7 +12369,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -12619,7 +12532,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -12743,7 +12655,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -12907,7 +12818,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -13031,7 +12941,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -13195,7 +13104,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -13319,7 +13227,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -13483,7 +13390,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -13607,7 +13513,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -13771,7 +13676,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -13896,7 +13800,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -14060,7 +13963,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -14185,7 +14087,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -14349,7 +14250,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -14474,7 +14374,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -14638,7 +14537,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -14763,7 +14661,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -14927,7 +14824,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -15052,7 +14948,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -15216,7 +15111,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -15341,7 +15235,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -15505,7 +15398,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -15630,7 +15522,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -15794,7 +15685,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -15919,7 +15809,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -16083,7 +15972,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -16208,7 +16096,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(0) NOT NULL PRIMARY KEY, c2 BIT(0)); CREATE TABLE t6(c1 BIT(0), c2 BIT(0)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -16372,7 +16259,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -16497,7 +16383,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(1) NOT NULL PRIMARY KEY, c2 BIT(1)); CREATE TABLE t6(c1 BIT(1), c2 BIT(1)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -16661,7 +16546,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -16786,7 +16670,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(2) NOT NULL PRIMARY KEY, c2 BIT(2)); CREATE TABLE t6(c1 BIT(2), c2 BIT(2)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -16950,7 +16833,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -17075,7 +16957,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(4) NOT NULL PRIMARY KEY, c2 BIT(4)); CREATE TABLE t6(c1 BIT(4), c2 BIT(4)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -17239,7 +17120,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -17364,7 +17244,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(8) NOT NULL PRIMARY KEY, c2 BIT(8)); CREATE TABLE t6(c1 BIT(8), c2 BIT(8)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -17528,7 +17407,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -17653,7 +17531,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(16) NOT NULL PRIMARY KEY, c2 BIT(16)); CREATE TABLE t6(c1 BIT(16), c2 BIT(16)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -17817,7 +17694,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -17942,7 +17818,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(32) NOT NULL PRIMARY KEY, c2 BIT(32)); CREATE TABLE t6(c1 BIT(32), c2 BIT(32)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -18106,7 +17981,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); @@ -18231,7 +18105,6 @@ CREATE TABLE t4(i INT, b BIT NOT NULL); ALTER TABLE t4 ADD PRIMARY KEY (i); CREATE TABLE t5(c1 BIT(64) NOT NULL PRIMARY KEY, c2 BIT(64)); CREATE TABLE t6(c1 BIT(64), c2 BIT(64)); -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); --sorted_result @@ -18395,7 +18268,6 @@ UPDATE t5,t6 SET t5.c2=t6.c1+t6.c2 WHERE t5.c1=t6.c1 AND t5.c2=t6.c2; TRUNCATE t5; TRUNCATE t6; -#Borrowed from suite/ndb/t/ndb_bitfield.test INSERT IGNORE INTO t5 VALUES (95, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (69, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135); INSERT INTO t6 VALUES (94, 46), (31, 438), (61, 152), (78, 123), (88, 411), (122, 118), (0, 177),(75, 42), (108, 67), (79, 349), (59, 188), (68, 206), (49, 345), (118, 380),(111, 368), (94, 468), (56, 379), (77, 133), (29, 399), (9, 363), (23, 36),(116, 390), (119, 368), (87, 351), (123, 411), (24, 398), (34, 202), (28, 499),(30, 83), (5, 178), (60, 343), (4, 245), (104, 280), (106, 446), (127, 403),(44, 307), (68, 454), (57, 135),(null,23),(1,null),(null,null); diff --git a/mysql-test/suite/engines/rr_trx/run_stress_tx_rr.pl b/mysql-test/suite/engines/rr_trx/run_stress_tx_rr.pl index 1164b471bd3..f277bce018e 100755 --- a/mysql-test/suite/engines/rr_trx/run_stress_tx_rr.pl +++ b/mysql-test/suite/engines/rr_trx/run_stress_tx_rr.pl @@ -108,7 +108,7 @@ $engine_options=""; add_engine_help(); } -# From this point forward there is no difference between the build in InnDB and the plugin +# From this point forward there is no difference between the build in InnoDB and the plugin $opt_engine='InnoDB' if ($opt_engine eq 'InnoDB_plugin'); # checking that custom files for that engine exist @@ -141,7 +141,6 @@ $cmd="MTR_VERSION=1 " . "--mysqld=--log-output=file " . "--mysqld=--sql-mode=no_engine_substitution " . "--skip-im " . - "--skip-ndb " . $engine_options . " > ".$runlog." 2>&1"; diff --git a/mysql-test/suite/federated/federatedx.test b/mysql-test/suite/federated/federatedx.test index 818cc2c1681..c4067070d14 100644 --- a/mysql-test/suite/federated/federatedx.test +++ b/mysql-test/suite/federated/federatedx.test @@ -1506,8 +1506,7 @@ DROP TABLE federated.test; # and lost changes to NEW variables. # Since for federated engine only operation which is affected by wrong # fields mark-up is handler::write_row() this file constains coverage -# for ON INSERT triggers only. Tests for other types of triggers reside -# in ndb_trigger.test. +# for ON INSERT triggers only. # connection slave; --disable_warnings diff --git a/mysql-test/suite/funcs_1/datadict/datadict_load.inc b/mysql-test/suite/funcs_1/datadict/datadict_load.inc index e3013249faf..9e3b87660f8 100644 --- a/mysql-test/suite/funcs_1/datadict/datadict_load.inc +++ b/mysql-test/suite/funcs_1/datadict/datadict_load.inc @@ -61,15 +61,11 @@ let $SERVER_NAME= `SELECT DISTINCT host FROM mysql.user WHERE host NOT In ("loca eval SET @ENGINE_INNODB = IF( '$engine_type' = 'innodb', 1, 0); eval SET @ENGINE_MEMORY = IF( '$engine_type' = 'memory', 1, 0); eval SET @ENGINE_MYISAM = IF( '$engine_type' = 'myisam', 1, 0); -eval SET @ENGINE_NDB = IF( '$engine_type' = 'ndb', 1, 0); --enable_query_log let $engine_myisam= `SELECT @ENGINE_MYISAM = 1`; let $engine_innodb= `SELECT @ENGINE_INNODB = 1`; let $engine_memory= `SELECT @ENGINE_MEMORY = 1`; -let $engine_ndb= `SELECT @ENGINE_NDB = 1`; -# Note: The NDB variant with their own tb1 - tb4 tables is not ready for use. -let $engine_ndb= 0; --disable_warnings DROP DATABASE IF EXISTS test1; @@ -107,15 +103,5 @@ if ($engine_myisam) --source suite/funcs_1/include/myisam_tb2.inc } -if ($engine_ndb) -{ - --source suite/funcs_1/include/ndb_tb1.inc - --source suite/funcs_1/include/ndb_tb2.inc - --source suite/funcs_1/include/ndb_tb3.inc - --source suite/funcs_1/include/ndb_tb4.inc - USE test1; - --source suite/funcs_1/include/ndb_tb2.inc -} - USE test; --source suite/funcs_1/include/sp_tb.inc diff --git a/mysql-test/suite/funcs_1/datadict/processlist_priv.inc b/mysql-test/suite/funcs_1/datadict/processlist_priv.inc index 2114e6f3126..ef00a6315f6 100644 --- a/mysql-test/suite/funcs_1/datadict/processlist_priv.inc +++ b/mysql-test/suite/funcs_1/datadict/processlist_priv.inc @@ -46,7 +46,6 @@ # ----> MyISAM # # - There is no impact of the GLOBAL(server) or SESSION default # # storage engine setting on the engine used for I_S tables. # -# That means we cannot get NDB or InnoDB instead. # # # # Creation: # # 2007-08 hhunger Implement this test as part of # diff --git a/mysql-test/suite/funcs_1/datadict/processlist_val.inc b/mysql-test/suite/funcs_1/datadict/processlist_val.inc index f5d736a8402..72c23fa89ed 100644 --- a/mysql-test/suite/funcs_1/datadict/processlist_val.inc +++ b/mysql-test/suite/funcs_1/datadict/processlist_val.inc @@ -20,7 +20,6 @@ # The column PROCESSLIST.INFO is of data type LONGTEXT ----> MyISAM # # - There is no impact of the GLOBAL(server) or SESSION default storage # # engine setting on the engine used for I_S tables. # -# That means we cannot get NDB or InnoDB instead. # # 3. The SHOW (FULL) PROCESSLIST command are for comparison. # # The main test target is INFORMATION_SCHEMA.PROCESSLIST ! # # 4. Attention: # diff --git a/mysql-test/suite/funcs_1/datadict/tables2.inc b/mysql-test/suite/funcs_1/datadict/tables2.inc index 0d110dd22d9..1dc00e5b0f7 100644 --- a/mysql-test/suite/funcs_1/datadict/tables2.inc +++ b/mysql-test/suite/funcs_1/datadict/tables2.inc @@ -21,13 +21,11 @@ # 20 CREATE_OPTIONS # 21 TABLE_COMMENT User defined comment # + InnoDB -# + NDB: "number_of_replicas: " appended # + InnoDB: "InnoDB free: kB" appended # depends on tablespace history! # The LEFT/INSTR/IF/LENGTH stuff should remove these # storage engine specific part. let $innodb_pattern = 'InnoDB free'; -let $ndb_pattern = 'number_of_replicas'; --vertical_results # We do not unify the engine name here, because the rowformat is # specific to the engine. @@ -36,11 +34,9 @@ let $ndb_pattern = 'number_of_replicas'; eval SELECT *, LEFT( table_comment, - IF(INSTR(table_comment,$innodb_pattern) = 0 - AND INSTR(table_comment,$ndb_pattern) = 0, + IF(INSTR(table_comment,$innodb_pattern) = 0, LENGTH(table_comment), - INSTR(table_comment,$innodb_pattern) - + INSTR(table_comment,$ndb_pattern) - 1)) + INSTR(table_comment,$innodb_pattern) - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/include/ndb_tb1.inc b/mysql-test/suite/funcs_1/include/ndb_tb1.inc deleted file mode 100644 index fd2db538b4c..00000000000 --- a/mysql-test/suite/funcs_1/include/ndb_tb1.inc +++ /dev/null @@ -1,70 +0,0 @@ -##### suite/funcs_1/include/ndb_tb1.inc - ---disable_warnings -drop table if exists tb1 ; ---enable_warnings -create table tb1 ( -f1 char(0), -f2 char(0) binary, -f3 char(0) ascii, -f4 tinytext unicode, -f5 text, -f6 mediumtext, -f7 longtext, -f8 tinyblob, -f9 blob, -f10 mediumblob, -f11 longblob, -f12 binary, -f13 tinyint, -f14 tinyint unsigned, -f15 tinyint zerofill, -f16 tinyint unsigned zerofill, -f17 smallint, -f18 smallint unsigned, -f19 smallint zerofill, -f20 smallint unsigned zerofill, -f21 mediumint, -f22 mediumint unsigned, -f23 mediumint zerofill, -f24 mediumint unsigned zerofill, -f25 int, -f26 int unsigned, -f27 int zerofill, -f28 int unsigned zerofill, -f29 bigint, -f30 bigint unsigned, -f31 bigint zerofill, -f32 bigint unsigned zerofill, -f33 decimal, -f34 decimal unsigned, -f35 decimal zerofill, -f36 decimal unsigned zerofill not null DEFAULT 9.9, -f37 decimal (0) not null DEFAULT 9.9, -f38 decimal (64) not null DEFAULT 9.9, -f39 decimal (0) unsigned not null DEFAULT 9.9, -f40 decimal (64) unsigned not null DEFAULT 9.9, -f41 decimal (0) zerofill not null DEFAULT 9.9, -f42 decimal (64) zerofill not null DEFAULT 9.9, -f43 decimal (0) unsigned zerofill not null DEFAULT 9.9, -f44 decimal (64) unsigned zerofill not null DEFAULT 9.9, -f45 decimal (0,0) not null DEFAULT 9.9, -f46 decimal (63,30) not null DEFAULT 9.9, -f47 decimal (0,0) unsigned not null DEFAULT 9.9, -f48 decimal (63,30) unsigned not null DEFAULT 9.9, -f49 decimal (0,0) zerofill not null DEFAULT 9.9, -f50 decimal (63,30) zerofill not null DEFAULT 9.9, -f51 decimal (0,0) unsigned zerofill not null DEFAULT 9.9, -f52 decimal (63,30) unsigned zerofill not null DEFAULT 9.9, -f53 numeric not null DEFAULT 99, -f54 numeric unsigned not null DEFAULT 99, -f55 numeric zerofill not null DEFAULT 99, -f56 numeric unsigned zerofill not null DEFAULT 99, -f57 numeric (0) not null DEFAULT 99, -f58 numeric (64) not null DEFAULT 99 -) engine = ndb; - ---replace_result $MYSQLTEST_VARDIR -eval -load data infile '$MYSQLTEST_VARDIR/std_data/funcs_1/ndb_tb1.txt' -into table tb1 ; diff --git a/mysql-test/suite/funcs_1/include/ndb_tb2.inc b/mysql-test/suite/funcs_1/include/ndb_tb2.inc deleted file mode 100644 index 3a8d647b65f..00000000000 --- a/mysql-test/suite/funcs_1/include/ndb_tb2.inc +++ /dev/null @@ -1,63 +0,0 @@ -##### suite/funcs_1/include/ndb_tb2.inc - ---disable_warnings -drop table if exists tb2 ; ---enable_warnings -create table tb2 ( -f59 numeric (0) unsigned, -f60 numeric (64) unsigned, -f61 numeric (0) zerofill, -f62 numeric (64) zerofill, -f63 numeric (0) unsigned zerofill, -f64 numeric (64) unsigned zerofill, -f65 numeric (0,0), -f66 numeric (63,30), -f67 numeric (0,0) unsigned, -f68 numeric (63,30) unsigned, -f69 numeric (0,0) zerofill, -f70 numeric (63,30) zerofill, -f71 numeric (0,0) unsigned zerofill, -f72 numeric (63,30) unsigned zerofill, -f73 real, -f74 real unsigned, -f75 real zerofill, -f76 real unsigned zerofill, -f77 double default 7.7, -f78 double unsigned default 7.7, -f79 double zerofill default 7.7, -f80 double unsigned zerofill default 8.8, -f81 float not null default 8.8, -f82 float unsigned not null default 8.8, -f83 float zerofill not null default 8.8, -f84 float unsigned zerofill not null default 8.8, -f85 float(0) not null default 8.8, -f86 float(23) not null default 8.8, -f87 float(0) unsigned not null default 8.8, -f88 float(23) unsigned not null default 8.8, -f89 float(0) zerofill not null default 8.8, -f90 float(23) zerofill not null default 8.8, -f91 float(0) unsigned zerofill not null default 8.8, -f92 float(23) unsigned zerofill not null default 8.8, -f93 float(24) not null default 8.8, -f94 float(53) not null default 8.8, -f95 float(24) unsigned not null default 8.8, -f96 float(53) unsigned not null default 8.8, -f97 float(24) zerofill not null default 8.8, -f98 float(53) zerofill not null default 8.8, -f99 float(24) unsigned zerofill not null default 8.8, -f100 float(53) unsigned zerofill not null default 8.8, -f101 date not null default '2000-01-01', -f102 time not null default 20, -f103 datetime not null default '2/2/2', -f104 timestamp not null default 20001231235959, -f105 year not null default 2000, -f106 year(3) not null default 2000, -f107 year(4) not null default 2000, -f108 enum("1enum","2enum") not null default "1enum", -f109 set("1set","2set") not null default "1set" -) engine = ndb; - ---replace_result $MYSQLTEST_VARDIR -eval -load data infile '$MYSQLTEST_VARDIR/std_data/funcs_1/ndb_tb2.txt' -into table tb2 ; diff --git a/mysql-test/suite/funcs_1/include/ndb_tb3.inc b/mysql-test/suite/funcs_1/include/ndb_tb3.inc deleted file mode 100644 index 6ade99bd1a2..00000000000 --- a/mysql-test/suite/funcs_1/include/ndb_tb3.inc +++ /dev/null @@ -1,70 +0,0 @@ -##### suite/funcs_1/include/ndb_tb3.inc - ---disable_warnings -drop table if exists tb3 ; ---enable_warnings -create table tb3 ( -f118 char not null DEFAULT 'a', -f119 char binary not null DEFAULT b'101', -f120 char ascii not null DEFAULT b'101', -f121 tinytext, -f122 text, -f123 mediumtext, -f124 longtext unicode, -f125 tinyblob, -f126 blob, -f127 mediumblob, -f128 longblob, -f129 binary not null DEFAULT b'101', -f130 tinyint not null DEFAULT 99, -f131 tinyint unsigned not null DEFAULT 99, -f132 tinyint zerofill not null DEFAULT 99, -f133 tinyint unsigned zerofill not null DEFAULT 99, -f134 smallint not null DEFAULT 999, -f135 smallint unsigned not null DEFAULT 999, -f136 smallint zerofill not null DEFAULT 999, -f137 smallint unsigned zerofill not null DEFAULT 999, -f138 mediumint not null DEFAULT 9999, -f139 mediumint unsigned not null DEFAULT 9999, -f140 mediumint zerofill not null DEFAULT 9999, -f141 mediumint unsigned zerofill not null DEFAULT 9999, -f142 int not null DEFAULT 99999, -f143 int unsigned not null DEFAULT 99999, -f144 int zerofill not null DEFAULT 99999, -f145 int unsigned zerofill not null DEFAULT 99999, -f146 bigint not null DEFAULT 999999, -f147 bigint unsigned not null DEFAULT 999999, -f148 bigint zerofill not null DEFAULT 999999, -f149 bigint unsigned zerofill not null DEFAULT 999999, -f150 decimal not null DEFAULT 999.999, -f151 decimal unsigned not null DEFAULT 999.17, -f152 decimal zerofill not null DEFAULT 999.999, -f153 decimal unsigned zerofill, -f154 decimal (0), -f155 decimal (64), -f156 decimal (0) unsigned, -f157 decimal (64) unsigned, -f158 decimal (0) zerofill, -f159 decimal (64) zerofill, -f160 decimal (0) unsigned zerofill, -f161 decimal (64) unsigned zerofill, -f162 decimal (0,0), -f163 decimal (63,30), -f164 decimal (0,0) unsigned, -f165 decimal (63,30) unsigned, -f166 decimal (0,0) zerofill, -f167 decimal (63,30) zerofill, -f168 decimal (0,0) unsigned zerofill, -f169 decimal (63,30) unsigned zerofill, -f170 numeric, -f171 numeric unsigned, -f172 numeric zerofill, -f173 numeric unsigned zerofill, -f174 numeric (0), -f175 numeric (64) -) engine = ndb; - ---replace_result $MYSQLTEST_VARDIR -eval -load data infile '$MYSQLTEST_VARDIR/std_data/funcs_1/ndb_tb3.txt' -into table tb3; diff --git a/mysql-test/suite/funcs_1/include/ndb_tb4.inc b/mysql-test/suite/funcs_1/include/ndb_tb4.inc deleted file mode 100644 index 3eaae90179a..00000000000 --- a/mysql-test/suite/funcs_1/include/ndb_tb4.inc +++ /dev/null @@ -1,70 +0,0 @@ -##### suite/funcs_1/include/ndb_tb4.inc - ---disable_warnings -drop table if exists tb4; ---enable_warnings -create table tb4 ( -f176 numeric (0) unsigned not null DEFAULT 9, -f177 numeric (64) unsigned not null DEFAULT 9, -f178 numeric (0) zerofill not null DEFAULT 9, -f179 numeric (64) zerofill not null DEFAULT 9, -f180 numeric (0) unsigned zerofill not null DEFAULT 9, -f181 numeric (64) unsigned zerofill not null DEFAULT 9, -f182 numeric (0,0) not null DEFAULT 9, -f183 numeric (63,30) not null DEFAULT 9, -f184 numeric (0,0) unsigned not null DEFAULT 9, -f185 numeric (63,30) unsigned not null DEFAULT 9, -f186 numeric (0,0) zerofill not null DEFAULT 9, -f187 numeric (63,30) zerofill not null DEFAULT 9, -f188 numeric (0,0) unsigned zerofill not null DEFAULT 9, -f189 numeric (63,30) unsigned zerofill not null DEFAULT 9, -f190 real not null DEFAULT 88.8, -f191 real unsigned not null DEFAULT 88.8, -f192 real zerofill not null DEFAULT 88.8, -f193 real unsigned zerofill not null DEFAULT 88.8, -f194 double not null DEFAULT 55.5, -f195 double unsigned not null DEFAULT 55.5, -f196 double zerofill not null DEFAULT 55.5, -f197 double unsigned zerofill not null DEFAULT 55.5, -f198 float, -f199 float unsigned, -f200 float zerofill, -f201 float unsigned zerofill, -f202 float(0), -f203 float(23), -f204 float(0) unsigned, -f205 float(23) unsigned, -f206 float(0) zerofill, -f207 float(23) zerofill, -f208 float(0) unsigned zerofill, -f209 float(23) unsigned zerofill, -f210 float(24), -f211 float(53), -f212 float(24) unsigned, -f213 float(53) unsigned, -f214 float(24) zerofill, -f215 float(53) zerofill, -f216 float(24) unsigned zerofill, -f217 float(53) unsigned zerofill, -f218 date, -f219 time, -f220 datetime, -f221 timestamp, -f222 year, -f223 year(3), -f224 year(4), -f225 enum("1enum","2enum"), -f226 set("1set","2set"), -f235 char(0) unicode, -f236 char(90), -f237 char(255) ascii, -f238 varchar(0), -f239 varchar(3000) binary, -f240 varchar(2000) unicode, -f241 char(100) unicode -) engine = ndb; - ---replace_result $MYSQLTEST_VARDIR -eval -load data infile '$MYSQLTEST_VARDIR/std_data/funcs_1/ndb_tb4.txt' -into table tb4 ; diff --git a/mysql-test/suite/funcs_1/r/is_tables_innodb.result b/mysql-test/suite/funcs_1/r/is_tables_innodb.result index bc984b1b6fe..204ee893fd8 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_innodb.result +++ b/mysql-test/suite/funcs_1/r/is_tables_innodb.result @@ -7,11 +7,9 @@ CREATE TABLE test1.t2 (f1 VARCHAR(20)) ENGINE = ; CREATE TABLE test2.t1 (f1 VARCHAR(20)) ENGINE = ; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -99,11 +97,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_is.result b/mysql-test/suite/funcs_1/r/is_tables_is.result index ca80abe8472..f88afbbdf5a 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_is.result +++ b/mysql-test/suite/funcs_1/r/is_tables_is.result @@ -2,11 +2,9 @@ DROP DATABASE IF EXISTS test1; CREATE DATABASE test1; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -894,11 +892,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result index ca80abe8472..f88afbbdf5a 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result @@ -2,11 +2,9 @@ DROP DATABASE IF EXISTS test1; CREATE DATABASE test1; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -894,11 +892,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_memory.result b/mysql-test/suite/funcs_1/r/is_tables_memory.result index 961fb98d573..acf6636cd38 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_memory.result +++ b/mysql-test/suite/funcs_1/r/is_tables_memory.result @@ -8,11 +8,9 @@ CREATE TABLE test1.t2 (f1 VARCHAR(20)) ENGINE = ; CREATE TABLE test2.t1 (f1 VARCHAR(20)) ENGINE = ; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -100,11 +98,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_myisam.result b/mysql-test/suite/funcs_1/r/is_tables_myisam.result index 1afb80ca3d7..5ef3fbaafab 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_myisam.result +++ b/mysql-test/suite/funcs_1/r/is_tables_myisam.result @@ -8,11 +8,9 @@ CREATE TABLE test1.t2 (f1 VARCHAR(20)) ENGINE = ; CREATE TABLE test2.t1 (f1 VARCHAR(20)) ENGINE = ; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -100,11 +98,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_myisam_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_myisam_embedded.result index ddf98bbded8..3ffa2662313 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_myisam_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_tables_myisam_embedded.result @@ -8,11 +8,9 @@ CREATE TABLE test1.t2 (f1 VARCHAR(20)) ENGINE = ; CREATE TABLE test2.t1 (f1 VARCHAR(20)) ENGINE = ; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -100,11 +98,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql.result b/mysql-test/suite/funcs_1/r/is_tables_mysql.result index 1eeaba15838..430c7fe2580 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_mysql.result +++ b/mysql-test/suite/funcs_1/r/is_tables_mysql.result @@ -2,11 +2,9 @@ DROP DATABASE IF EXISTS test1; CREATE DATABASE test1; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -708,11 +706,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result index 6c0b0db07eb..f1a6cc327b7 100644 --- a/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result +++ b/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result @@ -2,11 +2,9 @@ DROP DATABASE IF EXISTS test1; CREATE DATABASE test1; SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables @@ -708,11 +706,9 @@ GRANT SELECT ON test1.* TO testuser1@localhost; # Establish connection testuser1 (user=testuser1) SELECT *, LEFT( table_comment, -IF(INSTR(table_comment,'InnoDB free') = 0 -AND INSTR(table_comment,'number_of_replicas') = 0, +IF(INSTR(table_comment,'InnoDB free') = 0, LENGTH(table_comment), -INSTR(table_comment,'InnoDB free') -+ INSTR(table_comment,'number_of_replicas') - 1)) +INSTR(table_comment,'InnoDB free') - 1)) AS "user_comment", '-----------------------------------------------------' AS "Separator" FROM information_schema.tables diff --git a/mysql-test/suite/funcs_1/triggers/triggers_1011ext.inc b/mysql-test/suite/funcs_1/triggers/triggers_1011ext.inc index a5388dc80c3..dd8b2a87cd8 100644 --- a/mysql-test/suite/funcs_1/triggers/triggers_1011ext.inc +++ b/mysql-test/suite/funcs_1/triggers/triggers_1011ext.inc @@ -410,8 +410,6 @@ let $message= Testcase y.y.y.5: Rollback of nested trigger references; set autocommit=0; start transaction; -# Bug#32656 NDB: Duplicate key error aborts transaction in handler. -# Doesn't talk back to SQL --error ER_WARN_DATA_OUT_OF_RANGE insert into t1 values (1); commit; diff --git a/mysql-test/suite/funcs_1/views/views_master.inc b/mysql-test/suite/funcs_1/views/views_master.inc index 1743bace705..c34773b6c43 100644 --- a/mysql-test/suite/funcs_1/views/views_master.inc +++ b/mysql-test/suite/funcs_1/views/views_master.inc @@ -1,9 +1,6 @@ #### suite/funcs_1/views/views_master.test # # Last Change: -# 2007-10-05 mleich -# 1. Fix for Bug#31237 Test "ndb_views" fails because of differing order ... -# 2. Cleanup of test # 2007-11-15 hhunger WL#4084: Review and fix all disabled tests ... let $message= ! Attention: The file with the expected results is not @@ -2945,10 +2942,6 @@ eval EXPLAIN SELECT * FROM test3.v$toplevel; #++++++++++++++++++++++++++++++++++++++++++++++ let $message= FIXME - Setting join_limit to 28 - hangs for higher values; --source include/show_msg.inc -# OBN - Reduced from 30 in 5.1.21 to avoid hitting the ndbcluster limit -# of "ERROR HY000: Got temporary error 4006 'Connect failure -# - out of connection objects (increase MaxNoOfConcurrentTransactions)' -# from NDBCLUSTER " to early; #SET @join_limit = 61; SET @join_limit = 28; # OBN - see above SET @max_level = @join_limit - 1; @@ -3918,7 +3911,7 @@ SELECT * FROM v1 order by f1, report; # # 3. Length of one base table column is reduced # We have to mangle within warnings the row numbers, because they are not -# deterministic in case of NDB. +# always deterministic in engines --replace_regex /at row [0-9]/at row / ALTER TABLE t1 CHANGE COLUMN f4 f4 CHAR(8); INSERT INTO t1 SET f1 = 3, f4 = '<-- 10 -->', report = 't1 3'; diff --git a/mysql-test/suite/funcs_2/charset/charset_master.test b/mysql-test/suite/funcs_2/charset/charset_master.test index dd02d7491cc..63cd5eb303d 100644 --- a/mysql-test/suite/funcs_2/charset/charset_master.test +++ b/mysql-test/suite/funcs_2/charset/charset_master.test @@ -2,7 +2,7 @@ # Author: Serge Kozlov # # Date: 2005-09-21 # # Purpose: used by ../t/*_charset.test # -# Require: set $engine_type= [NDB,MyISAM,InnoDB,etc] before calling # +# Require: set $engine_type= [MyISAM,InnoDB,etc] before calling # # # # Last modification: Matthias Leich # # Date: 2008-07-02 # @@ -696,4 +696,4 @@ let $coll= utf8_unicode_ci; } DROP database test; -CREATE database test; \ No newline at end of file +CREATE database test; diff --git a/mysql-test/suite/funcs_2/readme.txt b/mysql-test/suite/funcs_2/readme.txt index 53ce41cc845..b7861a8780d 100644 --- a/mysql-test/suite/funcs_2/readme.txt +++ b/mysql-test/suite/funcs_2/readme.txt @@ -9,11 +9,6 @@ for MySQL 5.0 only. All cases separated by 4 test scenarios (by engines): - innodb_charset.test; - memory_charset.test; - myisam_charset.test; - - ndb_charset.test; -Note: if you use standard binary distributions or compile from source tree -without cluster support then ndb_charset.test will be skipped. Use -BUILD/compile-*****-max shellscript for compilation with ndb support or -download MAX package. Before running the suite under Windows/cygwin make sure that all files inside it converted to unix text format. diff --git a/mysql-test/suite/innodb/r/innodb_multi_update.result b/mysql-test/suite/innodb/r/innodb_multi_update.result index 558fc3938a8..64f9ebc2fc2 100644 --- a/mysql-test/suite/innodb/r/innodb_multi_update.result +++ b/mysql-test/suite/innodb/r/innodb_multi_update.result @@ -75,7 +75,7 @@ a b 5 15 drop table bug38999_1,bug38999_2; # -# Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb +# Bug#54475 improper error handling causes cascading crashing failures in innodb # CREATE TABLE t1(f1 INT) ENGINE=INNODB; INSERT INTO t1 VALUES(1); diff --git a/mysql-test/suite/innodb/t/innodb_multi_update.test b/mysql-test/suite/innodb/t/innodb_multi_update.test index 73ca0ba51dd..8d5283a9ed5 100644 --- a/mysql-test/suite/innodb/t/innodb_multi_update.test +++ b/mysql-test/suite/innodb/t/innodb_multi_update.test @@ -29,7 +29,7 @@ select * from bug38999_2; drop table bug38999_1,bug38999_2; --echo # ---echo # Bug#54475 improper error handling causes cascading crashing failures in innodb/ndb +--echo # Bug#54475 improper error handling causes cascading crashing failures in innodb --echo # CREATE TABLE t1(f1 INT) ENGINE=INNODB; INSERT INTO t1 VALUES(1); diff --git a/mysql-test/suite/parts/inc/partition.pre b/mysql-test/suite/parts/inc/partition.pre index f82916ae631..a78d80be20e 100644 --- a/mysql-test/suite/parts/inc/partition.pre +++ b/mysql-test/suite/parts/inc/partition.pre @@ -25,23 +25,6 @@ # Set the session storage engine eval SET @@session.default_storage_engine = $engine; -##### Disabled/affected testcases, because of open bugs ##### -# --echo -# --echo #------------------------------------------------------------------------ -# --echo # There are several testcases disabled because of the open bugs -# if (`SELECT @@session.default_storage_engine IN('ndbcluster')`) -# { -# --echo # #18730 -# } -# --echo #------------------------------------------------------------------------ -# # Attention: Only bugs appearing in all storage engines should be mentioned above. -# # The top level test wrapper (example: t/partition_basic_ndb.test) -# # may set the $fixed_bug variable to 0 after sourcing -# # this file. -# # Bug#18730: Partitions: NDB, crash on SELECT MIN() -# # Attention: NDB testcases set this variable later to 0 -# let $fixed_bug18730= 1; - --echo --echo #------------------------------------------------------------------------ --echo # 0. Setting of auxiliary variables + Creation of an auxiliary tables @@ -127,18 +110,7 @@ f_char1 CHAR(20), f_char2 CHAR(20), f_charbig VARCHAR(1000); -# Currently (April 2006) the default compiled NDB cannot manage -# no_of_partitions (no subpartitioning) > 8 -# no_of_partitions * no_of_subpartitions > 8 -# This NDB specific limitation will cause -# 1005: Can't create table 'test.t1' (errno: 1224) -# in partition_methods[1|2].inc and partition_alter_1[1|3].inc -# when $sub_part_no is set to >= 3. let $sub_part_no= 3; -if (`SELECT @@session.default_storage_engine = 'ndbcluster'`) -{ - let $sub_part_no= 2; -} # Auxiliary table used for many experiments (INSERT INTO t1 ... SELECT ...) # on the tables to be checked @@ -297,16 +269,11 @@ if (0) # Examples for the main rgression tests: # InnoDB - The PRIMARY KEY is a clustered index where the data for the # rows are stored. $do_pk_tests= 1 -# NDB - The PRIMARY KEY is used for implicit partitioning (NDB). -# $do_pk_tests= 1 # MyISAM - AFAIK there is no effect on the tree containing the rows. # $do_pk_tests= 0 # # Assign a big number smaller than the maximum value for partitions # and smaller than the maximum value of SIGNED INTEGER -# The NDB handler only supports 32 bit integers in VALUES -# 2147483647 seems to be too big. -# $MAX_VALUE= (2147483646); # # # 2. Typical architecture of a test: diff --git a/mysql-test/suite/parts/inc/partition_auto_increment.inc b/mysql-test/suite/parts/inc/partition_auto_increment.inc index 034460d49ac..45406bd145a 100644 --- a/mysql-test/suite/parts/inc/partition_auto_increment.inc +++ b/mysql-test/suite/parts/inc/partition_auto_increment.inc @@ -575,7 +575,7 @@ INSERT INTO t1 VALUES (3, NULL), (2, 0), (2, NULL); INSERT INTO t1 VALUES (2, 2); if (!$mysql_errno) { - echo # ERROR (only OK if Blackhole/NDB) should give ER_DUP_KEY or ER_DUP_ENTRY; + echo # ERROR (only OK if Blackhole) should give ER_DUP_KEY or ER_DUP_ENTRY; echo # mysql_errno: $mysql_errno; } INSERT INTO t1 VALUES (2, 22), (2, NULL); diff --git a/mysql-test/suite/parts/inc/partition_check.inc b/mysql-test/suite/parts/inc/partition_check.inc index 235764a034f..ed323dfa245 100644 --- a/mysql-test/suite/parts/inc/partition_check.inc +++ b/mysql-test/suite/parts/inc/partition_check.inc @@ -324,7 +324,6 @@ FROM t1; # DEBUG SELECT COUNT(*),MIN(f_int1),MAX(f_int1) FROM t1; # # 3.4 (mass) Update @max_row_div4 * 2 + 1 records -# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response UPDATE t1 SET f_int1 = f_int1 + @max_row WHERE f_int1 BETWEEN @max_row_div2 - @max_row_div4 AND @max_row_div2 + @max_row_div4; @@ -431,7 +430,6 @@ WHERE f_int1 = @cur_value AND f_int2 = @cur_value # record to another partition/subpartition might appear. SELECT MIN(f_int1) INTO @cur_value1 FROM t1; SELECT MAX(f_int1) + 1 INTO @cur_value2 FROM t1; -# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response UPDATE t1 SET f_int1 = @cur_value2 WHERE f_int1 = @cur_value1 AND f_charbig = '#SINGLE#'; # Check of preceding statement via Select @@ -451,8 +449,6 @@ WHERE f_int1 = @cur_value2 AND f_charbig = '#SINGLE#'; SET @cur_value1= -1; SELECT MAX(f_int1) INTO @cur_value2 FROM t1; # Bug#15968: Partitions: crash when INSERT with f_int1 = -1 into PARTITION BY HASH(f_int1) -# Bug#16385: Partitions: crash when updating a range partitioned NDB table -# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response UPDATE t1 SET f_int1 = @cur_value1 WHERE f_int1 = @cur_value2 AND f_charbig = '#SINGLE#'; # Check of preceding statement via Select @@ -538,7 +534,6 @@ eval DELETE FROM t1 WHERE f_charbig = '#$max_int_4##'; # Let's INSERT a record where the result of the partitioning function is # probably (depends on function currently used) zero and look if there are # any strange effects during the execution of the next statements. -# Bug#17891: Partitions: NDB, crash on select .. where col is null or col = value # Bug#18659: Partitions: wrong result on WHERE IS NULL DELETE FROM t1 WHERE f_int1 IS NULL OR f_int1 = 0; # Attention: Zero should be tested @@ -965,7 +960,6 @@ let $tab_has_trigg= t0_aux; let $tab_in_trigg= t1; # Insert three records, which will be updated by the trigger -# Bug#18735: Partitions: NDB, UNIQUE INDEX, UPDATE, strange server response eval INSERT INTO $tab_in_trigg(f_int1,f_int2,f_char1,f_char2,f_charbig) SELECT f_int1,f_int2,f_char1,f_char2,'just inserted' FROM t0_template WHERE f_int1 BETWEEN @max_row_div2 - 1 AND @max_row_div2 + 1; @@ -1136,7 +1130,6 @@ CHECKSUM TABLE t1 EXTENDED; # ... , it is mapped to ALTER TABLE, which rebuilds the table. # Rebuilding updates index statistics and frees unused space in the # clustered index. -# FIXME What will happen with NDB ? OPTIMIZE TABLE t1; --source suite/parts/inc/partition_layout_check2.inc # 10.2 REPAIR TABLE diff --git a/mysql-test/suite/parts/inc/partition_mgm.inc b/mysql-test/suite/parts/inc/partition_mgm.inc index 75834228d7a..5386a929db0 100644 --- a/mysql-test/suite/parts/inc/partition_mgm.inc +++ b/mysql-test/suite/parts/inc/partition_mgm.inc @@ -9,10 +9,8 @@ # Uses following variables: # # engine Use specified storage engine # # can_only_key Storage engine only able to use HASH/KEY (not range/list) # -# (E.g. not ndbcluster) # # part_optA-D Extra partitioning options (E.g. INDEX/DATA DIR) # # # -# have_bug33158 NDB case insensitive create, but case sensitive rename # # no_truncate No support for truncate partition # #------------------------------------------------------------------------------# # Original Author: mattiasj # @@ -144,8 +142,6 @@ SHOW CREATE TABLE tablea; } if (!$lower_case_table_names) { -if (!$have_bug33158) -{ eval CREATE TABLE tablea (a INT) ENGINE = $engine PARTITION BY KEY (a) @@ -164,7 +160,6 @@ RENAME TABLE tableA to TableA; SHOW CREATE TABLE tablea; DROP TABLE tablea; } -} --echo # Test of REMOVE PARTITIONING ALTER TABLE TableA REMOVE PARTITIONING; diff --git a/mysql-test/suite/parts/inc/partition_supported_sql_funcs.inc b/mysql-test/suite/parts/inc/partition_supported_sql_funcs.inc index 0de6bd7d2f5..867bff4e98e 100644 --- a/mysql-test/suite/parts/inc/partition_supported_sql_funcs.inc +++ b/mysql-test/suite/parts/inc/partition_supported_sql_funcs.inc @@ -10,8 +10,6 @@ # Original Date: 2006-11-22 # # Change Author: MattiasJ # # Change Date: 2008-05-15 # -# Change: Added $max_8_partitions since ndb only capable of 8 partitions # -# and $no_reorg_partition since ndb does not support that # ################################################################################ --echo ------------------------------------------------------------------------- --echo --- $sqlfunc in partition with coltype $coltype diff --git a/mysql-test/suite/parts/inc/partition_syntax_2.inc b/mysql-test/suite/parts/inc/partition_syntax_2.inc index b8e728ee79b..f7e201bbf55 100644 --- a/mysql-test/suite/parts/inc/partition_syntax_2.inc +++ b/mysql-test/suite/parts/inc/partition_syntax_2.inc @@ -12,8 +12,6 @@ # Change: # ################################################################################ -if (`SELECT @@session.storage_engine NOT IN('ndbcluster')`) -{ --error ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF eval CREATE TABLE t1 ( $column_list, @@ -26,23 +24,3 @@ if (`SELECT @@session.storage_engine NOT IN('ndbcluster')`) PRIMARY KEY(f_int1,f_int2), $unique_index ) $partition_scheme; -} -if (`SELECT @@session.storage_engine IN('ndbcluster')`) -{ - eval CREATE TABLE t1 ( - $column_list, - $unique_index - ) - $partition_scheme; - eval $insert_all; - --source suite/parts/inc/partition_check.inc - DROP TABLE t1; - eval CREATE TABLE t1 ( - $column_list, - PRIMARY KEY(f_int1,f_int2), $unique_index - ) - $partition_scheme; - eval $insert_all; - --source suite/parts/inc/partition_check.inc - DROP TABLE t1; -} diff --git a/mysql-test/suite/parts/inc/partition_trigg3.inc b/mysql-test/suite/parts/inc/partition_trigg3.inc index b56847ada44..34dbf4e2cd1 100644 --- a/mysql-test/suite/parts/inc/partition_trigg3.inc +++ b/mysql-test/suite/parts/inc/partition_trigg3.inc @@ -45,7 +45,7 @@ END| delimiter ;| # Additional statements because of Bug(limitation)#17704 SET @counter = 1; -# Bug#18730 Partitions: NDB, crash on SELECT MIN() +# Bug#18730 Partitions: crash on SELECT MIN() SELECT MAX(f_int1), MIN(f_int2) INTO @my_max1,@my_min2 FROM t1; # Additional statements end eval $statement; diff --git a/mysql-test/suite/parts/inc/partition_value.inc b/mysql-test/suite/parts/inc/partition_value.inc index 3e25e740de6..79c4d6b8187 100644 --- a/mysql-test/suite/parts/inc/partition_value.inc +++ b/mysql-test/suite/parts/inc/partition_value.inc @@ -80,11 +80,6 @@ eval INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) VALUES(NULL,NULL,NULL,NULL,NULL); eval SELECT COUNT(*) = 1 FROM t1 WHERE f_char1 IS NULL; DROP TABLE t1; -# -# The NDB handler only supports 32 bit integers in VALUES -# therefor we have to skip the next test for NDB. -if (`SELECT @@session.storage_engine NOT IN('ndbcluster')`) -{ --echo # 3. LIST() eval CREATE TABLE t1 ( $column_list @@ -111,7 +106,6 @@ eval INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig) VALUES(NULL,NULL,NULL,NULL,NULL); eval SELECT COUNT(*) = 1 FROM t1 WHERE f_char1 IS NULL; DROP TABLE t1; -} # --echo # 4. Partition by RANGE(...) subpartition by HASH() eval CREATE TABLE t1 ( diff --git a/mysql-test/suite/parts/r/partition_auto_increment_blackhole.result b/mysql-test/suite/parts/r/partition_auto_increment_blackhole.result index 2344f03ce3f..03e87bc22fb 100644 --- a/mysql-test/suite/parts/r/partition_auto_increment_blackhole.result +++ b/mysql-test/suite/parts/r/partition_auto_increment_blackhole.result @@ -510,7 +510,7 @@ INSERT INTO t1 VALUES (2, NULL); INSERT INTO t1 VALUES (3, NULL); INSERT INTO t1 VALUES (3, NULL), (2, 0), (2, NULL); INSERT INTO t1 VALUES (2, 2); -# ERROR (only OK if Blackhole/NDB) should give ER_DUP_KEY or ER_DUP_ENTRY +# ERROR (only OK if Blackhole) should give ER_DUP_KEY or ER_DUP_ENTRY # mysql_errno: 0 INSERT INTO t1 VALUES (2, 22), (2, NULL); SELECT * FROM t1 ORDER BY c1,c2; diff --git a/mysql-test/suite/rpl/r/rpl_foreign_key_innodb.result b/mysql-test/suite/rpl/r/rpl_foreign_key_innodb.result index f778e76adc0..c30bac3d8c0 100644 --- a/mysql-test/suite/rpl/r/rpl_foreign_key_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_foreign_key_innodb.result @@ -32,7 +32,7 @@ SET TIMESTAMP=1000000000; CREATE TABLE t3 ( a INT UNIQUE ); SET FOREIGN_KEY_CHECKS=0; INSERT INTO t3 VALUES (1),(1); -Got one of the listed errors +ERROR 23000: Duplicate entry '1' for key 'a' SET FOREIGN_KEY_CHECKS=0; DROP TABLE IF EXISTS t1,t2,t3; SET FOREIGN_KEY_CHECKS=1; diff --git a/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result b/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result index c7241c52353..ef68c81bde8 100644 --- a/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result +++ b/mysql-test/suite/rpl/r/rpl_row_basic_8partition.result @@ -2,9 +2,6 @@ include/master-slave.inc [connection master] DROP TABLE IF EXISTS t1; SET @@BINLOG_FORMAT = ROW; -SELECT @@BINLOG_FORMAT; -@@BINLOG_FORMAT -ROW **** Partition RANGE testing **** CREATE TABLE t1 (id MEDIUMINT NOT NULL, b1 BIT(8), vc VARCHAR(255), bc CHAR(255), d DECIMAL(10,4) DEFAULT 0, diff --git a/mysql-test/suite/rpl/r/rpl_row_trig004.result b/mysql-test/suite/rpl/r/rpl_row_trig004.result index 50317b55e2e..075616b58cf 100644 --- a/mysql-test/suite/rpl/r/rpl_row_trig004.result +++ b/mysql-test/suite/rpl/r/rpl_row_trig004.result @@ -8,7 +8,7 @@ CREATE TABLE test.t2 (n MEDIUMINT NOT NULL, f FLOAT, PRIMARY KEY(n))ENGINE=INNOD CREATE TRIGGER test.t1_bi_t2 BEFORE INSERT ON test.t2 FOR EACH ROW INSERT INTO test.t1 VALUES (NULL, 1.234)// INSERT INTO test.t2 VALUES (1, 0.0); INSERT INTO test.t2 VALUES (1, 0.0); -Got one of the listed errors +ERROR 23000: Duplicate entry '1' for key 'PRIMARY' select * from test.t1; n d 1 1.234 diff --git a/mysql-test/suite/rpl/r/rpl_row_view01.result b/mysql-test/suite/rpl/r/rpl_row_view01.result index 471de20ea6d..08369f60233 100644 --- a/mysql-test/suite/rpl/r/rpl_row_view01.result +++ b/mysql-test/suite/rpl/r/rpl_row_view01.result @@ -9,7 +9,6 @@ DROP TABLE IF EXISTS mysqltest1.t3; DROP TABLE IF EXISTS mysqltest1.t1; DROP TABLE IF EXISTS mysqltest1.t2; DROP TABLE IF EXISTS mysqltest1.t4; -DROP TABLE IF EXISTS mysqltest1.t10; CREATE TABLE mysqltest1.t1 (a INT, c CHAR(6),PRIMARY KEY(a)); CREATE TABLE mysqltest1.t2 (a INT, c CHAR(6),PRIMARY KEY(a)); CREATE TABLE mysqltest1.t3 (a INT, c CHAR(6), c2 CHAR(6), PRIMARY KEY(a)); @@ -32,7 +31,6 @@ a c c2 1 Thank GOD 2 it is 3 Friday TGIF -CREATE TABLE mysqltest1.t10 (a INT, PRIMARY KEY(a)); SELECT * FROM mysqltest1.v2; qty price value 3 50 150 @@ -96,6 +94,5 @@ DROP TABLE IF EXISTS mysqltest1.t3; DROP TABLE IF EXISTS mysqltest1.t1; DROP TABLE IF EXISTS mysqltest1.t2; DROP TABLE IF EXISTS mysqltest1.t4; -DROP TABLE IF EXISTS mysqltest1.t10; DROP DATABASE mysqltest1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_EE_err.test b/mysql-test/suite/rpl/t/rpl_EE_err.test index 5272b11f342..28c5af0a192 100644 --- a/mysql-test/suite/rpl/t/rpl_EE_err.test +++ b/mysql-test/suite/rpl/t/rpl_EE_err.test @@ -1,3 +1,2 @@ --- source include/not_ndb_default.inc let $engine_type=myisam; -- source extra/rpl_tests/rpl_EE_err.test diff --git a/mysql-test/suite/rpl/t/rpl_auto_increment.test b/mysql-test/suite/rpl/t/rpl_auto_increment.test index 8869e3745db..fe0f1689471 100644 --- a/mysql-test/suite/rpl/t/rpl_auto_increment.test +++ b/mysql-test/suite/rpl/t/rpl_auto_increment.test @@ -1,7 +1,6 @@ ##################################### # Wrapper for rpl_auto_increment.test# ##################################### --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=innodb; let $engine_type2=myisam; diff --git a/mysql-test/suite/rpl/t/rpl_bit.test b/mysql-test/suite/rpl/t/rpl_bit.test index 67606e5d509..305a2abcf3a 100644 --- a/mysql-test/suite/rpl/t/rpl_bit.test +++ b/mysql-test/suite/rpl/t/rpl_bit.test @@ -6,7 +6,6 @@ ############################################################################# # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_bug26395.test b/mysql-test/suite/rpl/t/rpl_bug26395.test index 9f45db85fb7..2d6252c3ad9 100644 --- a/mysql-test/suite/rpl/t/rpl_bug26395.test +++ b/mysql-test/suite/rpl/t/rpl_bug26395.test @@ -4,9 +4,6 @@ # just before writing the XID log event is executed correctly. The # master rolls back, so the slave should not execute statement. # -# This test was previously part of rpl_ndb_transaction.test -# -# # ==== Method ==== # # We want master to be alive so that it can replicate the statement to diff --git a/mysql-test/suite/rpl/t/rpl_commit_after_flush.test b/mysql-test/suite/rpl/t/rpl_commit_after_flush.test index 5e070b14301..1e2ca875190 100644 --- a/mysql-test/suite/rpl/t/rpl_commit_after_flush.test +++ b/mysql-test/suite/rpl/t/rpl_commit_after_flush.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/master-slave.inc let $engine_type=innodb; diff --git a/mysql-test/suite/rpl/t/rpl_ddl.test b/mysql-test/suite/rpl/t/rpl_ddl.test index 89ae2c03242..2045ec97edd 100644 --- a/mysql-test/suite/rpl/t/rpl_ddl.test +++ b/mysql-test/suite/rpl/t/rpl_ddl.test @@ -22,7 +22,6 @@ # effects like failing 'sync_slave_with_master', crashes of the slave or # abort of the test case etc.. # ---source include/not_ndb_default.inc --source include/have_innodb.inc --source include/master-slave.inc let $engine_type= InnoDB; diff --git a/mysql-test/suite/rpl/t/rpl_deadlock_innodb.test b/mysql-test/suite/rpl/t/rpl_deadlock_innodb.test index 14776263516..ade915da105 100644 --- a/mysql-test/suite/rpl/t/rpl_deadlock_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_deadlock_innodb.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/long_test.inc let $engine_type=innodb; diff --git a/mysql-test/suite/rpl/t/rpl_delete_no_where.test b/mysql-test/suite/rpl/t/rpl_delete_no_where.test index 4bfe6d07b5a..9ab637063e0 100644 --- a/mysql-test/suite/rpl/t/rpl_delete_no_where.test +++ b/mysql-test/suite/rpl/t/rpl_delete_no_where.test @@ -1,7 +1,3 @@ -################################################### -# By JBM 2006-02-14 added to skip test when NDB # -################################################## --- source include/not_ndb_default.inc -- source include/master-slave.inc let $engine_type=myisam; -- source extra/rpl_tests/rpl_delete_no_where.test diff --git a/mysql-test/suite/rpl/t/rpl_drop_temp.test b/mysql-test/suite/rpl/t/rpl_drop_temp.test index 08227c18fea..d77632d2ed1 100644 --- a/mysql-test/suite/rpl/t/rpl_drop_temp.test +++ b/mysql-test/suite/rpl/t/rpl_drop_temp.test @@ -2,8 +2,6 @@ # Change Author: JBM # Change Date: 2006-02-07 # Change: Added ENGINE=MyISAM -# Purpose: According to TU in 16552 This is how -# to work around NDB's issue with temp tables ############################################## source include/have_binlog_format_mixed_or_statement.inc; source include/master-slave.inc; diff --git a/mysql-test/suite/rpl/t/rpl_err_ignoredtable.test b/mysql-test/suite/rpl/t/rpl_err_ignoredtable.test index 31a36a7e0aa..9aee3582574 100644 --- a/mysql-test/suite/rpl/t/rpl_err_ignoredtable.test +++ b/mysql-test/suite/rpl/t/rpl_err_ignoredtable.test @@ -2,8 +2,6 @@ # Bug #797: If a query is ignored on slave (replicate-ignore-table) the slave # still checks that it has the same error as on the master. ########################################################################## -# 2006-02-07 JBM Added error code 1022 for NDB Engine + ORDER BY -########################################################################## -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_failed_optimize.test b/mysql-test/suite/rpl/t/rpl_failed_optimize.test index 798b3ef8b41..99860f16966 100644 --- a/mysql-test/suite/rpl/t/rpl_failed_optimize.test +++ b/mysql-test/suite/rpl/t/rpl_failed_optimize.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=InnoDB; -- source extra/rpl_tests/rpl_failed_optimize.test diff --git a/mysql-test/suite/rpl/t/rpl_foreign_key_innodb.test b/mysql-test/suite/rpl/t/rpl_foreign_key_innodb.test index ce28c0334ec..ed28c2e9d1c 100644 --- a/mysql-test/suite/rpl/t/rpl_foreign_key_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_foreign_key_innodb.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=INNODB; -- source extra/rpl_tests/rpl_foreign_key.test diff --git a/mysql-test/suite/rpl/t/rpl_insert_id.test b/mysql-test/suite/rpl/t/rpl_insert_id.test index f2f62a207a0..ad32ad24b33 100644 --- a/mysql-test/suite/rpl/t/rpl_insert_id.test +++ b/mysql-test/suite/rpl/t/rpl_insert_id.test @@ -1,7 +1,6 @@ ################################# # Wrapper for rpl_insert_id.test# ################################# --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=myisam; -- source extra/rpl_tests/rpl_insert_id.test diff --git a/mysql-test/suite/rpl/t/rpl_insert_id_pk.test b/mysql-test/suite/rpl/t/rpl_insert_id_pk.test index c0d68855f85..148afdac87b 100644 --- a/mysql-test/suite/rpl/t/rpl_insert_id_pk.test +++ b/mysql-test/suite/rpl/t/rpl_insert_id_pk.test @@ -1,7 +1,6 @@ ################################# # Wrapper for rpl_insert_id.test# ################################# --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=innodb; -- source extra/rpl_tests/rpl_insert_id_pk.test diff --git a/mysql-test/suite/rpl/t/rpl_insert_ignore.test b/mysql-test/suite/rpl/t/rpl_insert_ignore.test index 0346975fcee..0891a04db25 100644 --- a/mysql-test/suite/rpl/t/rpl_insert_ignore.test +++ b/mysql-test/suite/rpl/t/rpl_insert_ignore.test @@ -1,7 +1,6 @@ ##################################### # Wrapper for rpl_insert_ignore.test# ##################################### --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_loaddata.test b/mysql-test/suite/rpl/t/rpl_loaddata.test index 9f4ca1aaf18..76b008caa92 100644 --- a/mysql-test/suite/rpl/t/rpl_loaddata.test +++ b/mysql-test/suite/rpl/t/rpl_loaddata.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_binlog_format_statement.inc let $engine_type=MyISAM; diff --git a/mysql-test/suite/rpl/t/rpl_loadfile.test b/mysql-test/suite/rpl/t/rpl_loadfile.test index 3f897faac48..30a7ae2f613 100644 --- a/mysql-test/suite/rpl/t/rpl_loadfile.test +++ b/mysql-test/suite/rpl/t/rpl_loadfile.test @@ -6,7 +6,6 @@ ############################################################################# # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## # Includes diff --git a/mysql-test/suite/rpl/t/rpl_log_pos.test b/mysql-test/suite/rpl/t/rpl_log_pos.test index 1270933ede1..4f63cd32916 100644 --- a/mysql-test/suite/rpl/t/rpl_log_pos.test +++ b/mysql-test/suite/rpl/t/rpl_log_pos.test @@ -1,7 +1,6 @@ ########## # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## # diff --git a/mysql-test/suite/rpl/t/rpl_multi_engine.test b/mysql-test/suite/rpl/t/rpl_multi_engine.test index b1dbf99f114..27ba3b3648e 100644 --- a/mysql-test/suite/rpl/t/rpl_multi_engine.test +++ b/mysql-test/suite/rpl/t/rpl_multi_engine.test @@ -1,9 +1,5 @@ # See if replication between MyISAM, MEMORY and InnoDB works. -#This test case is not written for NDB, result files do not -#match when NDB is the default engine --- source include/not_ndb_default.inc - -- source include/master-slave.inc connection slave; diff --git a/mysql-test/suite/rpl/t/rpl_multi_update.test b/mysql-test/suite/rpl/t/rpl_multi_update.test index 710337816da..cc147eb3263 100644 --- a/mysql-test/suite/rpl/t/rpl_multi_update.test +++ b/mysql-test/suite/rpl/t/rpl_multi_update.test @@ -1,3 +1,2 @@ --- source include/not_ndb_default.inc let $engine_type=MyISAM; -- source extra/rpl_tests/rpl_multi_update.test diff --git a/mysql-test/suite/rpl/t/rpl_multi_update2.test b/mysql-test/suite/rpl/t/rpl_multi_update2.test index 138c1455952..95952cd9e15 100644 --- a/mysql-test/suite/rpl/t/rpl_multi_update2.test +++ b/mysql-test/suite/rpl/t/rpl_multi_update2.test @@ -1,10 +1,7 @@ ####################################################### # Wrapper for rpl_multi_update2.test to allow multi # # Engines to reuse test code. By JBM 2006-02-15 # -# Added comments section and to skip when ndb is # -# Default engine. # ####################################################### ---source include/not_ndb_default.inc --source include/master-slave.inc call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT'); let $engine_type=MyISAM; diff --git a/mysql-test/suite/rpl/t/rpl_multi_update3.test b/mysql-test/suite/rpl/t/rpl_multi_update3.test index dc12d528c24..d356ca88517 100644 --- a/mysql-test/suite/rpl/t/rpl_multi_update3.test +++ b/mysql-test/suite/rpl/t/rpl_multi_update3.test @@ -1,10 +1,7 @@ ####################################################### # Wrapper for rpl_multi_update3.test to allow multi # # Engines to reuse test code. By JBM 2006-02-15 # -# Added comments section and to skip when ndb is # -# Default engine. # ####################################################### ---source include/not_ndb_default.inc --source include/master-slave.inc call mtr.add_suppression('Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT.'); let $engine_type=MyISAM; diff --git a/mysql-test/suite/rpl/t/rpl_optimize.test b/mysql-test/suite/rpl/t/rpl_optimize.test index 47063539ffc..bb960d4eb69 100644 --- a/mysql-test/suite/rpl/t/rpl_optimize.test +++ b/mysql-test/suite/rpl/t/rpl_optimize.test @@ -4,13 +4,10 @@ ##################################### # Change Author: JBM # Change Date: 2006-02-09 -# Change: NDB does not and will not support # OPTIMIZE for memory tables. If and when # it does support for Disk Data, a new # version of this test will be need. -# Skipping this test if default engine = ndb ##################################### --- source include/not_ndb_default.inc # Slow test, don't run during staging part -- source include/not_staging.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_ps.test b/mysql-test/suite/rpl/t/rpl_ps.test index b3d07dd1400..b4ea3a385b1 100644 --- a/mysql-test/suite/rpl/t/rpl_ps.test +++ b/mysql-test/suite/rpl/t/rpl_ps.test @@ -1,9 +1,6 @@ # # Test of replicating user variables # -########################################################### -# 2006-02-08 By JBM added order by for use w/ NDB engine -########################################################### source include/master-slave.inc; #sync_slave_with_master; diff --git a/mysql-test/suite/rpl/t/rpl_rbr_to_sbr.test b/mysql-test/suite/rpl/t/rpl_rbr_to_sbr.test index 30cc564917a..d7c82266ebf 100644 --- a/mysql-test/suite/rpl/t/rpl_rbr_to_sbr.test +++ b/mysql-test/suite/rpl/t/rpl_rbr_to_sbr.test @@ -1,5 +1,4 @@ -- source include/have_binlog_format_mixed.inc --- source include/not_ndb_default.inc -- source include/master-slave.inc # Test that the slave temporarily switches to ROW when seeing row diff --git a/mysql-test/suite/rpl/t/rpl_relay_space_innodb.test b/mysql-test/suite/rpl/t/rpl_relay_space_innodb.test index ca16a583fe5..87df66bc148 100644 --- a/mysql-test/suite/rpl/t/rpl_relay_space_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_relay_space_innodb.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=InnoDB; -- source extra/rpl_tests/rpl_sv_relay_space.test diff --git a/mysql-test/suite/rpl/t/rpl_relay_space_myisam.test b/mysql-test/suite/rpl/t/rpl_relay_space_myisam.test index 13719cbdc33..e022921a5bb 100644 --- a/mysql-test/suite/rpl/t/rpl_relay_space_myisam.test +++ b/mysql-test/suite/rpl/t/rpl_relay_space_myisam.test @@ -1,3 +1,2 @@ --- source include/not_ndb_default.inc let $engine_type=MyISAM; -- source extra/rpl_tests/rpl_sv_relay_space.test diff --git a/mysql-test/suite/rpl/t/rpl_relayrotate.test b/mysql-test/suite/rpl/t/rpl_relayrotate.test index f187fbc033f..4c0840446ec 100644 --- a/mysql-test/suite/rpl/t/rpl_relayrotate.test +++ b/mysql-test/suite/rpl/t/rpl_relayrotate.test @@ -1,10 +1,7 @@ ####################################################### # Wrapper for rpl_relayrotate.test to allow multi # # Engines to reuse test code. By JBM 2006-02-15 # -# Added comments section and to skip when ndb is # -# Default engine. # ####################################################### --- source include/not_ndb_default.inc -- source include/have_innodb.inc # Slow test, don't run during staging part -- source include/not_staging.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_001.test b/mysql-test/suite/rpl/t/rpl_row_001.test index 96521280afd..47b6da75dd6 100644 --- a/mysql-test/suite/rpl/t/rpl_row_001.test +++ b/mysql-test/suite/rpl/t/rpl_row_001.test @@ -1,8 +1,6 @@ ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc # Slow test, don't run during staging part -- source include/not_staging.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_USER.test b/mysql-test/suite/rpl/t/rpl_row_USER.test index 010e0e6ea05..7e05e4e0041 100644 --- a/mysql-test/suite/rpl/t/rpl_row_USER.test +++ b/mysql-test/suite/rpl/t/rpl_row_USER.test @@ -6,7 +6,6 @@ ############################################################################# # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB ########## # Includes diff --git a/mysql-test/suite/rpl/t/rpl_row_UUID.test b/mysql-test/suite/rpl/t/rpl_row_UUID.test index ce3a4ed40a9..1de44af8156 100644 --- a/mysql-test/suite/rpl/t/rpl_row_UUID.test +++ b/mysql-test/suite/rpl/t/rpl_row_UUID.test @@ -1,8 +1,6 @@ ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc let $engine_type=myisam; diff --git a/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test b/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test index 9e99c1eb0b3..f4b371b737f 100644 --- a/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test +++ b/mysql-test/suite/rpl/t/rpl_row_basic_11bugs.test @@ -4,10 +4,6 @@ let $SERVER_VERSION=`select version()`; -#This test case is not written for NDB, the result files -#will not match when NDB is the default engine ---source include/not_ndb_default.inc - --source include/master-slave.inc # Add suppression for expected warning(s) in slaves error log diff --git a/mysql-test/suite/rpl/t/rpl_row_basic_8partition.test b/mysql-test/suite/rpl/t/rpl_row_basic_8partition.test index 3ec79ec8cd5..4262d765148 100644 --- a/mysql-test/suite/rpl/t/rpl_row_basic_8partition.test +++ b/mysql-test/suite/rpl/t/rpl_row_basic_8partition.test @@ -9,15 +9,12 @@ --source include/have_binlog_format_row.inc --source include/have_partition.inc ---source include/not_ndb_default.inc --source include/master-slave.inc connection master; --disable_warnings DROP TABLE IF EXISTS t1; -let $maybe_ro_var = @@BINLOG_FORMAT; -let $val4var = ROW; ---source include/safe_set_to_maybe_ro_var.inc +SET @@BINLOG_FORMAT = ROW; --echo **** Partition RANGE testing **** diff --git a/mysql-test/suite/rpl/t/rpl_row_blob_innodb.test b/mysql-test/suite/rpl/t/rpl_row_blob_innodb.test index eaf86688a29..5c1c705f48d 100644 --- a/mysql-test/suite/rpl/t/rpl_row_blob_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_row_blob_innodb.test @@ -3,9 +3,7 @@ ################################# ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_blob_myisam.test b/mysql-test/suite/rpl/t/rpl_row_blob_myisam.test index 482ccb7ecc0..e83da6ba142 100644 --- a/mysql-test/suite/rpl/t/rpl_row_blob_myisam.test +++ b/mysql-test/suite/rpl/t/rpl_row_blob_myisam.test @@ -3,9 +3,7 @@ ################################# ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc let $engine_type=myisam; diff --git a/mysql-test/suite/rpl/t/rpl_row_delayed_ins.test b/mysql-test/suite/rpl/t/rpl_row_delayed_ins.test index db41ff09117..b898a9a120a 100644 --- a/mysql-test/suite/rpl/t/rpl_row_delayed_ins.test +++ b/mysql-test/suite/rpl/t/rpl_row_delayed_ins.test @@ -1,3 +1,2 @@ --- source include/not_ndb_default.inc let $engine_type=myisam; -- source extra/rpl_tests/rpl_row_delayed_ins.test diff --git a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test index 2429dbc1142..71cd4a5b9ae 100644 --- a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test +++ b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test @@ -8,10 +8,6 @@ let $rename_event_pos= `select @binlog_start_pos + 819`; # The use of the ps protocol causes extra table maps in the binlog, so # we disable the ps-protocol for this statement. -# Merge tables are not supported in NDB --- source include/not_ndb_default.inc - - --disable_ps_protocol -- source extra/rpl_tests/rpl_flsh_tbls.test --enable_ps_protocol diff --git a/mysql-test/suite/rpl/t/rpl_row_func003.test b/mysql-test/suite/rpl/t/rpl_row_func003.test index a17d70e4b04..c153bc70253 100644 --- a/mysql-test/suite/rpl/t/rpl_row_func003.test +++ b/mysql-test/suite/rpl/t/rpl_row_func003.test @@ -3,9 +3,7 @@ ################################### ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_loaddata_concurrent.test b/mysql-test/suite/rpl/t/rpl_row_loaddata_concurrent.test index cef259687fb..af4efed872f 100644 --- a/mysql-test/suite/rpl/t/rpl_row_loaddata_concurrent.test +++ b/mysql-test/suite/rpl/t/rpl_row_loaddata_concurrent.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_log_bin.inc -- source include/have_binlog_format_row.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_log.test b/mysql-test/suite/rpl/t/rpl_row_log.test index b156a4c8dd4..1d37d4327d0 100644 --- a/mysql-test/suite/rpl/t/rpl_row_log.test +++ b/mysql-test/suite/rpl/t/rpl_row_log.test @@ -1,15 +1,12 @@ ################################### # Wrapper for rpl_row_log.test # # Added wrapper so that MyISAM & # -# Innodb and NDB could all use the# -# Same test. NDB produced a diff # -# bin-log # +# Innodb could all use the# +# Same test ################################### ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc let $engine_type=MyISAM; diff --git a/mysql-test/suite/rpl/t/rpl_row_log_innodb.test b/mysql-test/suite/rpl/t/rpl_row_log_innodb.test index 631fb29c7bc..0eccde7ecb9 100644 --- a/mysql-test/suite/rpl/t/rpl_row_log_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_row_log_innodb.test @@ -1,11 +1,9 @@ ################################### # Wrapper for rpl_row_log.test # # Added wrapper so that MyISAM & # -# Innodb and NDB could all use the# -# Same test. NDB produced a diff # -# bin-log # +# Innodb could all use the# +# Same test. ################################### --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/have_innodb.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_max_relay_size.test b/mysql-test/suite/rpl/t/rpl_row_max_relay_size.test index ea4b958ae4c..3e057d48ec9 100644 --- a/mysql-test/suite/rpl/t/rpl_row_max_relay_size.test +++ b/mysql-test/suite/rpl/t/rpl_row_max_relay_size.test @@ -4,7 +4,6 @@ # Test of manual relay log rotation with FLUSH LOGS. # Requires statement logging -source include/not_ndb_default.inc; source include/have_binlog_format_row.inc; source extra/rpl_tests/rpl_max_relay_size.test; diff --git a/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test b/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test index f96603f69ed..0d6b08bd0c1 100644 --- a/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test +++ b/mysql-test/suite/rpl/t/rpl_row_rec_comp_myisam.test @@ -9,7 +9,6 @@ -- echo ## coverage purposes - Field_bits -- echo ## 1 X bit + 2 Null bits + 5 bits => last_null_bit_pos==0 -## Added here because AFAIK it's only MyISAM and NDB that use Field_bits --source include/rpl_reset.inc -- connection master diff --git a/mysql-test/suite/rpl/t/rpl_row_sp002_innodb.test b/mysql-test/suite/rpl/t/rpl_row_sp002_innodb.test index aec421407c9..c863c9c02e1 100644 --- a/mysql-test/suite/rpl/t/rpl_row_sp002_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_row_sp002_innodb.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=INNODB; -- source extra/rpl_tests/rpl_row_sp002.test diff --git a/mysql-test/suite/rpl/t/rpl_row_sp003.test b/mysql-test/suite/rpl/t/rpl_row_sp003.test index b5d62f60199..f1cad89b58e 100644 --- a/mysql-test/suite/rpl/t/rpl_row_sp003.test +++ b/mysql-test/suite/rpl/t/rpl_row_sp003.test @@ -3,9 +3,7 @@ ################################# ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/have_binlog_format_row.inc # Slow test, don't run during staging part diff --git a/mysql-test/suite/rpl/t/rpl_row_sp005.test b/mysql-test/suite/rpl/t/rpl_row_sp005.test index bbe4ce47f9e..89bc0ca4df0 100644 --- a/mysql-test/suite/rpl/t/rpl_row_sp005.test +++ b/mysql-test/suite/rpl/t/rpl_row_sp005.test @@ -6,8 +6,6 @@ # Test: Tests SPs with cursors, flow logic, and alter sp. In addition the # # tests SPs with insert and update operations. # ############################################################################# -# 2006-02-08 By JBM added ORDER BY for use with NDB engine -############################################################################# # Includes -- source include/have_binlog_format_row.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_sp006_InnoDB.test b/mysql-test/suite/rpl/t/rpl_row_sp006_InnoDB.test index df3952bead1..f8ff2b2ca8e 100644 --- a/mysql-test/suite/rpl/t/rpl_row_sp006_InnoDB.test +++ b/mysql-test/suite/rpl/t/rpl_row_sp006_InnoDB.test @@ -3,9 +3,7 @@ ################################# ######################################################## # By JBM 2005-02-15 Wrapped to allow reuse of test code# -# Added to skip if ndb is default # ######################################################## --- source include/not_ndb_default.inc -- source include/have_innodb.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_sp007_innodb.test b/mysql-test/suite/rpl/t/rpl_row_sp007_innodb.test index fcac31df780..e2003092d10 100644 --- a/mysql-test/suite/rpl/t/rpl_row_sp007_innodb.test +++ b/mysql-test/suite/rpl/t/rpl_row_sp007_innodb.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_innodb.inc let $engine_type=INNODB; -- source extra/rpl_tests/rpl_row_sp007.test diff --git a/mysql-test/suite/rpl/t/rpl_row_sp009.test b/mysql-test/suite/rpl/t/rpl_row_sp009.test index 6e7226593a1..83d743b8e07 100644 --- a/mysql-test/suite/rpl/t/rpl_row_sp009.test +++ b/mysql-test/suite/rpl/t/rpl_row_sp009.test @@ -5,8 +5,6 @@ ############################################################################# #TEST: Taken and modfied from http://bugs.mysql.com/bug.php?id=12168 # ############################################################################# -# 2006-02-08 By JBM : Added order by for ndb engine use -############################################################################# # Includes -- source include/have_binlog_format_row.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_trig004.test b/mysql-test/suite/rpl/t/rpl_row_trig004.test index fa5f973c355..e10e0d51039 100644 --- a/mysql-test/suite/rpl/t/rpl_row_trig004.test +++ b/mysql-test/suite/rpl/t/rpl_row_trig004.test @@ -4,12 +4,8 @@ ############################################################################# # TEST: Use before insert triggers and has the second insert fail # ############################################################################# -# Change by JBM 2006-02-14 added to skip when NDB default engine # -# This test has been wrapped to allow multipal engines to use same code # -############################################################################# # Includes --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/have_innodb.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_until.test b/mysql-test/suite/rpl/t/rpl_row_until.test index 97b60a3055f..f10f921eada 100644 --- a/mysql-test/suite/rpl/t/rpl_row_until.test +++ b/mysql-test/suite/rpl/t/rpl_row_until.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_binlog_format_row.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_row_view01.test b/mysql-test/suite/rpl/t/rpl_row_view01.test index cc60ab9912c..449196aefd5 100644 --- a/mysql-test/suite/rpl/t/rpl_row_view01.test +++ b/mysql-test/suite/rpl/t/rpl_row_view01.test @@ -5,8 +5,6 @@ ############################################################################# #TEST: row based replication of views # ############################################################################# -# 2006-02-08 By JBM added order by and sleep for use with ndb engine -############################################################################# # Includes -- source include/have_binlog_format_row.inc -- source include/master-slave.inc @@ -23,7 +21,6 @@ DROP TABLE IF EXISTS mysqltest1.t3; DROP TABLE IF EXISTS mysqltest1.t1; DROP TABLE IF EXISTS mysqltest1.t2; DROP TABLE IF EXISTS mysqltest1.t4; -DROP TABLE IF EXISTS mysqltest1.t10; # Begin test section 1 CREATE TABLE mysqltest1.t1 (a INT, c CHAR(6),PRIMARY KEY(a)); @@ -45,15 +42,6 @@ CREATE VIEW mysqltest1.v4 AS SELECT * FROM mysqltest1.v3 WHERE a > 1 WITH LOCAL SELECT * FROM mysqltest1.v2; SELECT * FROM mysqltest1.v1; - -# Had to add a waiting for use with NDB -# engine. Injector thread would have not -# populated binlog and data would not be on -# the slave. - -CREATE TABLE mysqltest1.t10 (a INT, PRIMARY KEY(a)); -let $wait_binlog_event= CREATE TABLE mysqltest1.t10; --- source include/wait_for_binlog_event.inc --sync_slave_with_master SELECT * FROM mysqltest1.v2; @@ -89,7 +77,6 @@ DROP TABLE IF EXISTS mysqltest1.t3; DROP TABLE IF EXISTS mysqltest1.t1; DROP TABLE IF EXISTS mysqltest1.t2; DROP TABLE IF EXISTS mysqltest1.t4; -DROP TABLE IF EXISTS mysqltest1.t10; DROP DATABASE mysqltest1; sync_slave_with_master; diff --git a/mysql-test/suite/rpl/t/rpl_sp_effects.test b/mysql-test/suite/rpl/t/rpl_sp_effects.test index 82d61d94bd4..b9d637320bb 100644 --- a/mysql-test/suite/rpl/t/rpl_sp_effects.test +++ b/mysql-test/suite/rpl/t/rpl_sp_effects.test @@ -1,7 +1,6 @@ ########################################## # Change Author: JBM # Change Date: 2006-05-02 -# Change: Added Order By for NDB testing ########################################## # Test of replication of stored procedures (WL#2146 for MySQL 5.0) diff --git a/mysql-test/suite/rpl/t/rpl_stm_loaddata_concurrent.test b/mysql-test/suite/rpl/t/rpl_stm_loaddata_concurrent.test index 7add76c0ef5..32de2a94cbb 100644 --- a/mysql-test/suite/rpl/t/rpl_stm_loaddata_concurrent.test +++ b/mysql-test/suite/rpl/t/rpl_stm_loaddata_concurrent.test @@ -1,4 +1,3 @@ --- source include/not_ndb_default.inc -- source include/have_log_bin.inc -- source include/have_binlog_format_statement.inc RESET MASTER; diff --git a/mysql-test/suite/rpl/t/rpl_stm_loadfile.test b/mysql-test/suite/rpl/t/rpl_stm_loadfile.test index 5d2c85b2cb6..e82c951ce0c 100644 --- a/mysql-test/suite/rpl/t/rpl_stm_loadfile.test +++ b/mysql-test/suite/rpl/t/rpl_stm_loadfile.test @@ -6,7 +6,6 @@ ############################################################################# # Change Author: JBM # Change Date: 2006-01-16 -# Change: Added Order by for NDB # Change: Split the original test file. This one forces STATEMENT only because # when in STATEMENT mode, the load_file will issue a warning, whereas # in RBR or MIXED mode it does not (by lsoares). diff --git a/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test b/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test index 4ba66b5fbf8..866e387cddb 100644 --- a/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test +++ b/mysql-test/suite/rpl/t/rpl_switch_stm_row_mixed.test @@ -9,7 +9,6 @@ # - correctness of execution --- source include/not_ndb_default.inc -- source include/have_binlog_format_mixed_or_row.inc -- source include/master-slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_truncate_2myisam.test b/mysql-test/suite/rpl/t/rpl_truncate_2myisam.test index bea6332963e..4a8994d1878 100644 --- a/mysql-test/suite/rpl/t/rpl_truncate_2myisam.test +++ b/mysql-test/suite/rpl/t/rpl_truncate_2myisam.test @@ -1,3 +1,2 @@ ---source include/not_ndb_default.inc let $engine=MyISAM; --source extra/rpl_tests/rpl_truncate.test diff --git a/mysql-test/suite/rpl/t/rpl_truncate_3innodb.test b/mysql-test/suite/rpl/t/rpl_truncate_3innodb.test index 093cf56a316..fbedaed87c1 100644 --- a/mysql-test/suite/rpl/t/rpl_truncate_3innodb.test +++ b/mysql-test/suite/rpl/t/rpl_truncate_3innodb.test @@ -1,4 +1,3 @@ --source include/have_innodb.inc ---source include/not_ndb_default.inc let $engine=InnoDB; --source extra/rpl_tests/rpl_truncate.test diff --git a/mysql-test/suite/stress/include/ddl7.inc b/mysql-test/suite/stress/include/ddl7.inc index 76ba8066c66..00308a1a5d0 100644 --- a/mysql-test/suite/stress/include/ddl7.inc +++ b/mysql-test/suite/stress/include/ddl7.inc @@ -65,13 +65,11 @@ while ($run) } eval $drop_index; eval $insert_record; - # NDB: ER_DUP_UNIQUE, others: ER_DUP_ENTRY - --error 0,ER_DUP_ENTRY,ER_DUP_UNIQUE + --error 0,ER_DUP_ENTRY eval $create_index; if (!$mysql_errno) { --echo # Error: CREATE INDEX was successful though we expected ER_DUP_ENTRY - --echo # Error: or ER_DUP_UNIQUE (NDB only) --echo # abort exit; } diff --git a/mysql-test/suite/sys_vars/r/have_ndbcluster_basic.result b/mysql-test/suite/sys_vars/r/have_ndbcluster_basic.result deleted file mode 100644 index 1b662311072..00000000000 --- a/mysql-test/suite/sys_vars/r/have_ndbcluster_basic.result +++ /dev/null @@ -1,53 +0,0 @@ -'#---------------------BS_STVARS_012_01----------------------#' -SELECT COUNT(@@GLOBAL.have_ndbcluster); -COUNT(@@GLOBAL.have_ndbcluster) -1 -1 Expected -'#---------------------BS_STVARS_012_02----------------------#' -SET @@GLOBAL.have_ndbcluster=1; -ERROR HY000: Variable 'have_ndbcluster' is a read only variable -Expected error 'Read only variable' -SELECT COUNT(@@GLOBAL.have_ndbcluster); -COUNT(@@GLOBAL.have_ndbcluster) -1 -1 Expected -'#---------------------BS_STVARS_012_03----------------------#' -SELECT @@GLOBAL.have_ndbcluster = VARIABLE_VALUE -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='have_ndbcluster'; -@@GLOBAL.have_ndbcluster = VARIABLE_VALUE -1 -1 Expected -SELECT COUNT(@@GLOBAL.have_ndbcluster); -COUNT(@@GLOBAL.have_ndbcluster) -1 -1 Expected -SELECT COUNT(VARIABLE_VALUE) -FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES -WHERE VARIABLE_NAME='have_ndbcluster'; -COUNT(VARIABLE_VALUE) -1 -1 Expected -'#---------------------BS_STVARS_012_04----------------------#' -SELECT @@have_ndbcluster = @@GLOBAL.have_ndbcluster; -@@have_ndbcluster = @@GLOBAL.have_ndbcluster -1 -1 Expected -'#---------------------BS_STVARS_012_05----------------------#' -SELECT COUNT(@@have_ndbcluster); -COUNT(@@have_ndbcluster) -1 -1 Expected -SELECT COUNT(@@local.have_ndbcluster); -ERROR HY000: Variable 'have_ndbcluster' is a GLOBAL variable -Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@SESSION.have_ndbcluster); -ERROR HY000: Variable 'have_ndbcluster' is a GLOBAL variable -Expected error 'Variable is a GLOBAL variable' -SELECT COUNT(@@GLOBAL.have_ndbcluster); -COUNT(@@GLOBAL.have_ndbcluster) -1 -1 Expected -SELECT have_ndbcluster = @@SESSION.have_ndbcluster; -ERROR 42S22: Unknown column 'have_ndbcluster' in 'field list' -Expected error 'Readonly variable' diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 9375dd78adc..0927b96621c 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -50,7 +50,7 @@ insert into t5 values (10); create view v1 (c) as SELECT table_name FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND - table_name not like 'ndb_%' AND table_name not like 'innodb_%' AND + table_name not like 'innodb_%' AND table_name not like 'xtradb_%'; --sorted_result select * from v1; diff --git a/mysql-test/t/information_schema_all_engines.test b/mysql-test/t/information_schema_all_engines.test index c7955a38e1b..9c056d934de 100644 --- a/mysql-test/t/information_schema_all_engines.test +++ b/mysql-test/t/information_schema_all_engines.test @@ -83,4 +83,4 @@ group by t.table_name order by num1, t.table_name; # Bug #9404 information_schema: Weird error messages # with SELECT SUM() ... GROUP BY queries # -SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') AND table_name<>'ndb_binlog_index' AND table_name<>'ndb_apply_status' GROUP BY TABLE_SCHEMA; +SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') GROUP BY TABLE_SCHEMA; diff --git a/mysql-test/t/log_tables.test b/mysql-test/t/log_tables.test index 8a2bd4cf6c1..64c70c039ab 100644 --- a/mysql-test/t/log_tables.test +++ b/mysql-test/t/log_tables.test @@ -265,8 +265,6 @@ alter table mysql.slow_log engine=NonExistentEngine; --error ER_UNSUPORTED_LOG_ENGINE alter table mysql.slow_log engine=memory; #--error ER_UNSUPORTED_LOG_ENGINE -#alter table mysql.slow_log engine=ndb; -#--error ER_UNSUPORTED_LOG_ENGINE #alter table mysql.slow_log engine=innodb; #--error ER_UNSUPORTED_LOG_ENGINE #alter table mysql.slow_log engine=archive; diff --git a/mysql-test/t/mysqlbinlog.test b/mysql-test/t/mysqlbinlog.test index b778a1deb9b..e167893fb9f 100644 --- a/mysql-test/t/mysqlbinlog.test +++ b/mysql-test/t/mysqlbinlog.test @@ -477,21 +477,6 @@ FLUSH LOGS; # rollback; # Transaction2 end -# Test case3: Test if the 'BEGIN' and 'COMMIT' are output for the 'test' database -# in transaction3 base on NDB engine tables -# use test; -# create table t5(a int) engine= NDB; -# use mysql; -# create table t6(a int) engine= NDB; -# Transaction3 begin -# begin; -# use test; -# insert into t5 (a) values (3); -# use mysql; -# insert into t6 (a) values (3); -# commit; -# Transaction3 end - --echo # --echo # Test if the 'BEGIN', 'ROLLBACK' and 'COMMIT' are output if the database specified exists --exec $MYSQL_BINLOG --database=test --short-form $MYSQLTEST_VARDIR/std_data/binlog_transaction.000001 diff --git a/mysql-test/t/mysqld--help.test b/mysql-test/t/mysqld--help.test index f6a832a34fd..0fdc01ba520 100644 --- a/mysql-test/t/mysqld--help.test +++ b/mysql-test/t/mysqld--help.test @@ -25,7 +25,7 @@ perl; version.*/; # Plugins which may or may not be there: - @plugins=qw/innodb ndb archive blackhole federated partition ndbcluster + @plugins=qw/innodb archive blackhole federated partition feedback debug temp-pool ssl des-key-file xtradb sequence thread-concurrency super-large-pages mutex-deadlock-detector connect null-audit aria oqgraph sphinx thread-handling diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index d378ab551e6..5179bb1b03d 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -4,7 +4,7 @@ # Test cases for bugs are added at the end. See template there. # # Some tests that require --error go into sp-error.test -# Tests that require inndb go into sp_trans.test +# Tests that require innodb go into sp_trans.test # Tests that check privilege and security issues go to sp-security.test. # Tests that require multiple connections, except security/privilege tests, # go to sp-thread. diff --git a/mysql-test/t/system_mysql_db_fix50117.test b/mysql-test/t/system_mysql_db_fix50117.test index fd452668a9e..2b01934edea 100644 --- a/mysql-test/t/system_mysql_db_fix50117.test +++ b/mysql-test/t/system_mysql_db_fix50117.test @@ -84,9 +84,6 @@ CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events'; -# MariaDB: don't: -# CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; - # Run the mysql_fix_privilege_tables.sql using "mysql --force" --exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp index 45499e5891f..09af8903ad4 100644 --- a/mysql-test/valgrind.supp +++ b/mysql-test/valgrind.supp @@ -623,21 +623,6 @@ fun:__lll_mutex_unlock_wake } -# -# BUG#19940: NDB sends uninitialized parts of field buffers across the wire. -# This is "works as designed"; the uninitialized part is not used at the -# other end (but Valgrind cannot see this). -# -{ - bug19940 - Memcheck:Param - socketcall.sendto(msg) - fun:send - fun:_ZN15TCP_Transporter6doSendEv - fun:_ZN19TransporterRegistry11performSendEv - fun:_ZN19TransporterRegistry14forceSendCheckEi -} - # # Warning when printing stack trace (to suppress some not needed warnings) # diff --git a/mysys/my_compress.c b/mysys/my_compress.c index 4cd43596031..78d09bb5f36 100644 --- a/mysys/my_compress.c +++ b/mysys/my_compress.c @@ -203,129 +203,4 @@ my_bool my_uncompress(uchar *packet, size_t len, size_t *complen) DBUG_RETURN(0); } -/* - Internal representation of the frm blob is: - - ver 4 bytes - orglen 4 bytes - complen 4 bytes -*/ - -#define BLOB_HEADER 12 - - -/* - packfrm is a method used to compress the frm file for storage in a - handler. This method was developed for the NDB handler and has been moved - here to serve also other uses. - - SYNOPSIS - packfrm() - data Data reference to frm file data. - len Length of frm file data - out:pack_data Reference to the pointer to the packed frm data - out:pack_len Length of packed frm file data - - NOTES - data is replaced with compressed content - - RETURN VALUES - 0 Success - >0 Failure -*/ - -int packfrm(const uchar *data, size_t len, - uchar **pack_data, size_t *pack_len) -{ - int error; - size_t org_len, comp_len, blob_len; - uchar *blob; - DBUG_ENTER("packfrm"); - DBUG_PRINT("enter", ("data: 0x%lx len: %lu", (long) data, (ulong) len)); - - error= 1; - org_len= len; - if (my_compress((uchar*)data, &org_len, &comp_len)) - goto err; - - DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", (ulong) org_len, - (ulong) comp_len)); - DBUG_DUMP("compressed", data, org_len); - - error= 2; - blob_len= BLOB_HEADER + org_len; - if (!(blob= (uchar*) my_malloc(blob_len,MYF(MY_WME)))) - goto err; - - /* Store compressed blob in machine independent format */ - int4store(blob, 1); - int4store(blob+4, (uint32) len); - int4store(blob+8, (uint32) org_len); /* compressed length */ - - /* Copy frm data into blob, already in machine independent format */ - memcpy(blob+BLOB_HEADER, data, org_len); - - *pack_data= blob; - *pack_len= blob_len; - error= 0; - - DBUG_PRINT("exit", ("pack_data: 0x%lx pack_len: %lu", - (long) *pack_data, (ulong) *pack_len)); -err: - DBUG_RETURN(error); - -} - -/* - unpackfrm is a method used to decompress the frm file received from a - handler. This method was developed for the NDB handler and has been moved - here to serve also other uses for other clustered storage engines. - - SYNOPSIS - unpackfrm() - pack_data Data reference to packed frm file data - out:unpack_data Reference to the pointer to the unpacked frm data - out:unpack_len Length of unpacked frm file data - - RETURN VALUES¨ - 0 Success - >0 Failure -*/ - -int unpackfrm(uchar **unpack_data, size_t *unpack_len, - const uchar *pack_data) -{ - uchar *data; - size_t complen, orglen; - ulong ver; - DBUG_ENTER("unpackfrm"); - DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data)); - - ver= uint4korr(pack_data); - orglen= uint4korr(pack_data+4); - complen= uint4korr(pack_data+8); - - DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu", - ver, (ulong) complen, (ulong) orglen)); - DBUG_DUMP("blob->data", pack_data + BLOB_HEADER, complen); - - if (ver != 1) - DBUG_RETURN(1); - if (!(data= my_malloc(MY_MAX(orglen, complen), MYF(MY_WME)))) - DBUG_RETURN(2); - memcpy(data, pack_data + BLOB_HEADER, complen); - - if (my_uncompress(data, complen, &orglen)) - { - my_free(data); - DBUG_RETURN(3); - } - - *unpack_data= data; - *unpack_len= orglen; - - DBUG_PRINT("exit", ("frmdata: 0x%lx len: %lu", (long) *unpack_data, - (ulong) *unpack_len)); - DBUG_RETURN(0); -} #endif /* HAVE_COMPRESS */ diff --git a/packaging/rpm-oel/CMakeLists.txt b/packaging/rpm-oel/CMakeLists.txt deleted file mode 100644 index 95249a9404f..00000000000 --- a/packaging/rpm-oel/CMakeLists.txt +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - -IF(UNIX) - SET(prefix ${CMAKE_INSTALL_PREFIX}) - - SET(SPECFILENAME "mysql.spec") - IF("${VERSION}" MATCHES "-ndb-") - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - SET(SPECFILENAME "mysql-cluster-${NDBVERSION}.spec") - ENDIF() - - # Left in current directory, to be taken during build - CONFIGURE_FILE(mysql.spec.in ${CMAKE_CURRENT_BINARY_DIR}/${SPECFILENAME} @ONLY) - - FOREACH(fedfile my.cnf my_config.h mysql_config.sh - mysqld.service mysql-systemd-start mysql.conf - filter-requires.sh filter-provides.sh mysql.init - mysql-5.5-libmysqlclient-symbols.patch) - CONFIGURE_FILE(${fedfile} ${CMAKE_CURRENT_BINARY_DIR}/${fedfile} COPYONLY) - ENDFOREACH() -ENDIF() - diff --git a/packaging/rpm-oel/filter-provides.sh b/packaging/rpm-oel/filter-provides.sh deleted file mode 100755 index bc166bd82d0..00000000000 --- a/packaging/rpm-oel/filter-provides.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash -# - -/usr/lib/rpm/perl.prov $* | -sed -e '/perl(hostnames)/d' -e '/perl(lib::mtr.*/d' -e '/perl(lib::v1.*/d' -e '/perl(mtr_.*/d' -e '/perl(My::.*/d' - diff --git a/packaging/rpm-oel/filter-requires.sh b/packaging/rpm-oel/filter-requires.sh deleted file mode 100755 index 521eb0ca7d9..00000000000 --- a/packaging/rpm-oel/filter-requires.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/bash -# - -/usr/lib/rpm/perl.req $* | -sed -e '/perl(hostnames)/d' -e '/perl(lib::mtr.*/d' -e '/perl(lib::v1.*/d' -e '/perl(mtr_.*/d' -e '/perl(My::.*/d' - diff --git a/packaging/rpm-oel/my.cnf b/packaging/rpm-oel/my.cnf deleted file mode 100644 index b8ee584d485..00000000000 --- a/packaging/rpm-oel/my.cnf +++ /dev/null @@ -1,31 +0,0 @@ -# For advice on how to change settings please see -# http://dev.mysql.com/doc/refman/5.5/en/server-configuration-defaults.html - -[mysqld] -# -# Remove leading # and set to the amount of RAM for the most important data -# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%. -# innodb_buffer_pool_size = 128M -# -# Remove leading # to turn on a very important data integrity option: logging -# changes to the binary log between backups. -# log_bin -# -# Remove leading # to set options mainly useful for reporting servers. -# The server defaults are faster for transactions and fast SELECTs. -# Adjust sizes as needed, experiment to find the optimal values. -# join_buffer_size = 128M -# sort_buffer_size = 2M -# read_rnd_buffer_size = 2M -datadir=/var/lib/mysql -socket=/var/lib/mysql/mysql.sock - -# Disabling symbolic-links is recommended to prevent assorted security risks -symbolic-links=0 - -# Recommended in standard MySQL setup -#sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES - -[mysqld_safe] -log-error=/var/log/mysqld.log -pid-file=/var/run/mysqld/mysqld.pid diff --git a/packaging/rpm-oel/my_config.h b/packaging/rpm-oel/my_config.h deleted file mode 100644 index 75dc5e1d754..00000000000 --- a/packaging/rpm-oel/my_config.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Fedora supports multi arch: having 32 and 64 versions of MySQL - * installed at the same time. my_config.h will differ due arch - * dependent defs creating a file conflict. We move arch specific - * headers to arch specific file names and include the correct arch - * specific file by installing this generic file. - * - */ - -#if defined(__i386__) -#include "my_config_i386.h" -#elif defined(__ia64__) -#include "my_config_ia64.h" -#elif defined(__powerpc__) -#include "my_config_ppc.h" -#elif defined(__powerpc64__) -#include "my_config_ppc64.h" -#elif defined(__s390x__) -#include "my_config_s390x.h" -#elif defined(__s390__) -#include "my_config_s390.h" -#elif defined(__sparc__) && defined(__arch64__) -#include "my_config_sparc64.h" -#elif defined(__sparc__) -#include "my_config_sparc.h" -#elif defined(__x86_64__) -#include "my_config_x86_64.h" -#else -#error "This MySQL devel package does not work your architecture?" -#endif diff --git a/packaging/rpm-oel/mysql-5.5-libmysqlclient-symbols.patch b/packaging/rpm-oel/mysql-5.5-libmysqlclient-symbols.patch deleted file mode 100644 index ce5455ee29b..00000000000 --- a/packaging/rpm-oel/mysql-5.5-libmysqlclient-symbols.patch +++ /dev/null @@ -1,982 +0,0 @@ -diff -rup old/libmysql/CMakeLists.txt new/libmysql/CMakeLists.txt ---- old/libmysql/CMakeLists.txt 2013-11-05 08:19:26.000000000 +0100 -+++ new/libmysql/CMakeLists.txt 2014-01-10 15:41:30.530068723 +0100 -@@ -205,13 +205,14 @@ IF(NOT DISABLE_SHARED) - OUTPUT_NAME mysqlclient - VERSION "${OS_SHARED_LIB_VERSION}" - SOVERSION "${SHARED_LIB_MAJOR_VERSION}") -+ CONFIGURE_FILE(libmysql.ver.in ${CMAKE_CURRENT_BINARY_DIR}/libmysql.ver) - IF(LINK_FLAG_NO_UNDEFINED) - GET_TARGET_PROPERTY(libmysql_link_flags libmysql LINK_FLAGS) - IF(NOT libmysql_link_flag) - SET(libmysql_link_flags) - ENDIF() - SET_TARGET_PROPERTIES(libmysql PROPERTIES LINK_FLAGS -- "${libmysql_link_flags} ${LINK_FLAG_NO_UNDEFINED}") -+ "${libmysql_link_flags} -Wl,--version-script=libmysql.ver ${LINK_FLAG_NO_UNDEFINED}") - ENDIF() - # clean direct output needs to be set several targets have the same name - #(mysqlclient in this case) -diff -rup old/libmysql/libmysql.c new/libmysql/libmysql.c ---- old/libmysql/libmysql.c 2013-11-05 08:19:26.000000000 +0100 -+++ new/libmysql/libmysql.c 2014-01-10 15:46:35.708928462 +0100 -@@ -4870,3 +4870,612 @@ my_bool STDCALL mysql_read_query_result( - return (*mysql->methods->read_query_result)(mysql); - } - -+#ifndef EMBEDDED_LIBRARY -+ -+// Hack to provide both libmysqlclient_16 and libmysqlclient_18 symbol versions -+ -+#define SYM_16(_exportedsym) __asm__(".symver symver16_" #_exportedsym "," #_exportedsym "@libmysqlclient_16") -+ -+void STDCALL symver16_myodbc_remove_escape(MYSQL *mysql,char *name) -+{ -+ return myodbc_remove_escape(mysql, name); -+} -+SYM_16(myodbc_remove_escape); -+ -+ -+my_ulonglong STDCALL symver16_mysql_affected_rows(MYSQL *mysql) -+{ -+ return mysql_affected_rows(mysql); -+} -+SYM_16(mysql_affected_rows); -+ -+ -+my_bool STDCALL symver16_mysql_autocommit(MYSQL * mysql, my_bool auto_mode) -+{ -+ return mysql_autocommit(mysql, auto_mode); -+} -+SYM_16(mysql_autocommit); -+ -+ -+my_bool STDCALL symver16_mysql_change_user(MYSQL *mysql, const char *user, const char *passwd, const char *db) -+{ -+ return mysql_change_user(mysql, user, passwd, db); -+} -+SYM_16(mysql_change_user); -+ -+ -+const char * STDCALL symver16_mysql_character_set_name(MYSQL *mysql) -+{ -+ return mysql_character_set_name(mysql); -+} -+SYM_16(mysql_character_set_name); -+ -+ -+my_bool STDCALL symver16_mysql_commit(MYSQL * mysql) -+{ -+ return mysql_commit(mysql); -+} -+SYM_16(mysql_commit); -+ -+ -+void STDCALL symver16_mysql_data_seek(MYSQL_RES *result, my_ulonglong row) -+{ -+ return mysql_data_seek(result, row); -+} -+SYM_16(mysql_data_seek); -+ -+ -+void STDCALL symver16_mysql_debug(const char *debug __attribute__((unused))) -+{ -+ return mysql_debug(debug); -+} -+SYM_16(mysql_debug); -+ -+ -+int STDCALL symver16_mysql_dump_debug_info(MYSQL *mysql) -+{ -+ return mysql_dump_debug_info(mysql); -+} -+SYM_16(mysql_dump_debug_info); -+ -+ -+my_bool STDCALL symver16_mysql_embedded(void) -+{ -+ return mysql_embedded(); -+} -+SYM_16(mysql_embedded); -+ -+ -+my_bool STDCALL symver16_mysql_eof(MYSQL_RES *res) -+{ -+ return mysql_eof(res); -+} -+SYM_16(mysql_eof); -+ -+ -+ulong STDCALL symver16_mysql_escape_string(char *to,const char *from,ulong length) -+{ -+ return mysql_escape_string(to, from, length); -+} -+SYM_16(mysql_escape_string); -+ -+ -+MYSQL_FIELD * STDCALL symver16_mysql_fetch_field(MYSQL_RES *result) -+{ -+ return mysql_fetch_field(result); -+} -+SYM_16(mysql_fetch_field); -+ -+ -+MYSQL_FIELD * STDCALL symver16_mysql_fetch_field_direct(MYSQL_RES *res,uint fieldnr) -+{ -+ return mysql_fetch_field_direct(res, fieldnr); -+} -+SYM_16(mysql_fetch_field_direct); -+ -+ -+MYSQL_FIELD * STDCALL symver16_mysql_fetch_fields(MYSQL_RES *res) -+{ -+ return mysql_fetch_fields(res); -+} -+SYM_16(mysql_fetch_fields); -+ -+ -+unsigned int STDCALL symver16_mysql_field_count(MYSQL *mysql) -+{ -+ return mysql_field_count(mysql); -+} -+SYM_16(mysql_field_count); -+ -+ -+MYSQL_FIELD_OFFSET STDCALL symver16_mysql_field_seek(MYSQL_RES *result, MYSQL_FIELD_OFFSET field_offset) -+{ -+ return mysql_field_seek(result, field_offset); -+} -+SYM_16(mysql_field_seek); -+ -+ -+MYSQL_FIELD_OFFSET STDCALL symver16_mysql_field_tell(MYSQL_RES *res) -+{ -+ return mysql_field_tell(res); -+} -+SYM_16(mysql_field_tell); -+ -+ -+void STDCALL symver16_mysql_get_character_set_info(MYSQL *mysql, MY_CHARSET_INFO *csinfo) -+{ -+ return mysql_get_character_set_info(mysql, csinfo); -+} -+SYM_16(mysql_get_character_set_info); -+ -+ -+const char * STDCALL symver16_mysql_get_client_info(void) -+{ -+ return mysql_get_client_info(); -+} -+SYM_16(mysql_get_client_info); -+ -+ulong STDCALL symver16_mysql_get_client_version(void) -+{ -+ return mysql_get_client_version(); -+} -+SYM_16(mysql_get_client_version); -+ -+ -+const char * STDCALL symver16_mysql_get_host_info(MYSQL *mysql) -+{ -+ return mysql_get_host_info(mysql); -+} -+SYM_16(mysql_get_host_info); -+ -+ -+MYSQL_PARAMETERS *STDCALL symver16_mysql_get_parameters(void) -+{ -+ return mysql_get_parameters(); -+} -+SYM_16(mysql_get_parameters); -+ -+ -+uint STDCALL symver16_mysql_get_proto_info(MYSQL *mysql) -+{ -+ return mysql_get_proto_info(mysql); -+} -+SYM_16(mysql_get_proto_info); -+ -+ -+const char * STDCALL symver16_mysql_get_server_info(MYSQL *mysql) -+{ -+ return mysql_get_server_info(mysql); -+} -+SYM_16(mysql_get_server_info); -+ -+ -+ulong STDCALL symver16_mysql_hex_string(char *to, const char *from, ulong length) -+{ -+ return mysql_hex_string(to, from, length); -+} -+SYM_16(mysql_hex_string); -+ -+ -+const char *STDCALL symver16_mysql_info(MYSQL *mysql) -+{ -+ return mysql_info(mysql); -+} -+SYM_16(mysql_info); -+ -+ -+my_ulonglong STDCALL symver16_mysql_insert_id(MYSQL *mysql) -+{ -+ return mysql_insert_id(mysql); -+} -+SYM_16(mysql_insert_id); -+ -+ -+int STDCALL symver16_mysql_kill(MYSQL *mysql,ulong pid) -+{ -+ return mysql_kill(mysql, pid); -+} -+SYM_16(mysql_kill); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_list_dbs(MYSQL *mysql, const char *wild) -+{ -+ return mysql_list_dbs(mysql, wild); -+} -+SYM_16(mysql_list_dbs); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_list_fields(MYSQL *mysql, const char *table, const char *wild) -+{ -+ return mysql_list_fields(mysql, table, wild); -+} -+SYM_16(mysql_list_fields); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_list_processes(MYSQL *mysql) -+{ -+ return mysql_list_processes(mysql); -+} -+SYM_16(mysql_list_processes); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_list_tables(MYSQL *mysql, const char *wild) -+{ -+ return mysql_list_tables(mysql, wild); -+} -+SYM_16(mysql_list_tables); -+ -+ -+my_bool STDCALL symver16_mysql_more_results(MYSQL *mysql) -+{ -+ return mysql_more_results(mysql); -+} -+SYM_16(mysql_more_results); -+ -+ -+int STDCALL symver16_mysql_next_result(MYSQL *mysql) -+{ -+ return mysql_next_result(mysql); -+} -+SYM_16(mysql_next_result); -+ -+ -+int STDCALL symver16_mysql_ping(MYSQL *mysql) -+{ -+ return mysql_ping(mysql); -+} -+SYM_16(mysql_ping); -+ -+ -+int STDCALL symver16_mysql_query(MYSQL *mysql, const char *query) -+{ -+ return mysql_query(mysql, query); -+} -+SYM_16(mysql_query); -+ -+ -+my_bool STDCALL symver16_mysql_read_query_result(MYSQL *mysql) -+{ -+ return mysql_read_query_result(mysql); -+} -+SYM_16(mysql_read_query_result); -+ -+ -+ulong STDCALL symver16_mysql_real_escape_string(MYSQL *mysql, char *to,const char *from, ulong length) -+{ -+ return mysql_real_escape_string(mysql, to, from, length); -+} -+SYM_16(mysql_real_escape_string); -+ -+ -+int STDCALL symver16_mysql_refresh(MYSQL *mysql,uint options) -+{ -+ return mysql_refresh(mysql, options); -+} -+SYM_16(mysql_refresh); -+ -+ -+my_bool STDCALL symver16_mysql_rollback(MYSQL * mysql) -+{ -+ return mysql_rollback(mysql); -+} -+SYM_16(mysql_rollback); -+ -+ -+MYSQL_ROW_OFFSET STDCALL symver16_mysql_row_seek(MYSQL_RES *result, MYSQL_ROW_OFFSET row) -+{ -+ return mysql_row_seek(result, row); -+} -+SYM_16(mysql_row_seek); -+ -+ -+MYSQL_ROW_OFFSET STDCALL symver16_mysql_row_tell(MYSQL_RES *res) -+{ -+ return mysql_row_tell(res); -+} -+SYM_16(mysql_row_tell); -+ -+ -+void STDCALL symver16_mysql_server_end() -+{ -+ return mysql_server_end(); -+} -+SYM_16(mysql_server_end); -+ -+ -+int STDCALL symver16_mysql_server_init(int argc __attribute__((unused)), char **argv __attribute__((unused)), char **groups __attribute__((unused))) -+{ -+ return mysql_server_init(argc, argv, groups); -+} -+SYM_16(mysql_server_init); -+ -+ -+void symver16_mysql_set_local_infile_default(MYSQL *mysql) -+{ -+ return mysql_set_local_infile_default(mysql); -+} -+SYM_16(mysql_set_local_infile_default); -+ -+ -+void symver16_mysql_set_local_infile_handler(MYSQL *mysql, int (*local_infile_init)(void **, const char *, void *), int (*local_infile_read)(void *, char *, uint), void (*local_infile_end)(void *), int (*local_infile_error)(void *, char *, uint), void *userdata) -+{ -+ return mysql_set_local_infile_handler(mysql, local_infile_init, local_infile_read, local_infile_end, local_infile_error, userdata); -+} -+SYM_16(mysql_set_local_infile_handler); -+ -+ -+int STDCALL symver16_mysql_set_server_option(MYSQL *mysql, enum enum_mysql_set_option option) -+{ -+ return mysql_set_server_option(mysql, option); -+} -+SYM_16(mysql_set_server_option); -+ -+ -+int STDCALL symver16_mysql_shutdown(MYSQL *mysql, enum mysql_enum_shutdown_level shutdown_level) -+{ -+ return mysql_shutdown(mysql, shutdown_level); -+} -+SYM_16(mysql_shutdown); -+ -+ -+const char *STDCALL symver16_mysql_sqlstate(MYSQL *mysql) -+{ -+ return mysql_sqlstate(mysql); -+} -+SYM_16(mysql_sqlstate); -+ -+ -+const char * STDCALL symver16_mysql_stat(MYSQL *mysql) -+{ -+ return mysql_stat(mysql); -+} -+SYM_16(mysql_stat); -+ -+ -+my_ulonglong STDCALL symver16_mysql_stmt_affected_rows(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_affected_rows(stmt); -+} -+SYM_16(mysql_stmt_affected_rows); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_attr_get(MYSQL_STMT *stmt, enum enum_stmt_attr_type attr_type, void *value) -+{ -+ return mysql_stmt_attr_get(stmt, attr_type, value); -+} -+SYM_16(mysql_stmt_attr_get); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_attr_set(MYSQL_STMT *stmt, enum enum_stmt_attr_type attr_type, const void *value) -+{ -+ return mysql_stmt_attr_set(stmt, attr_type, value); -+} -+SYM_16(mysql_stmt_attr_set); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_bind_param(MYSQL_STMT *stmt, MYSQL_BIND *my_bind) -+{ -+ return mysql_stmt_bind_param(stmt, my_bind); -+} -+SYM_16(mysql_stmt_bind_param); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_bind_result(MYSQL_STMT *stmt, MYSQL_BIND *my_bind) -+{ -+ return mysql_stmt_bind_result(stmt, my_bind); -+} -+SYM_16(mysql_stmt_bind_result); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_close(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_close(stmt); -+} -+SYM_16(mysql_stmt_close); -+ -+ -+void STDCALL symver16_mysql_stmt_data_seek(MYSQL_STMT *stmt, my_ulonglong row) -+{ -+ return mysql_stmt_data_seek(stmt, row); -+} -+SYM_16(mysql_stmt_data_seek); -+ -+ -+uint STDCALL symver16_mysql_stmt_errno(MYSQL_STMT * stmt) -+{ -+ return mysql_stmt_errno(stmt); -+} -+SYM_16(mysql_stmt_errno); -+ -+ -+const char *STDCALL symver16_mysql_stmt_error(MYSQL_STMT * stmt) -+{ -+ return mysql_stmt_error(stmt); -+} -+SYM_16(mysql_stmt_error); -+ -+ -+int STDCALL symver16_mysql_stmt_execute(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_execute(stmt); -+} -+SYM_16(mysql_stmt_execute); -+ -+ -+int STDCALL symver16_mysql_stmt_fetch(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_fetch(stmt); -+} -+SYM_16(mysql_stmt_fetch); -+ -+ -+int STDCALL symver16_mysql_stmt_fetch_column(MYSQL_STMT *stmt, MYSQL_BIND *my_bind, uint column, ulong offset) -+{ -+ return mysql_stmt_fetch_column(stmt, my_bind, column, offset); -+} -+SYM_16(mysql_stmt_fetch_column); -+ -+ -+unsigned int STDCALL symver16_mysql_stmt_field_count(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_field_count(stmt); -+} -+SYM_16(mysql_stmt_field_count); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_free_result(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_free_result(stmt); -+} -+SYM_16(mysql_stmt_free_result); -+ -+ -+MYSQL_STMT * STDCALL symver16_mysql_stmt_init(MYSQL *mysql) -+{ -+ return mysql_stmt_init(mysql); -+} -+SYM_16(mysql_stmt_init); -+ -+ -+my_ulonglong STDCALL symver16_mysql_stmt_insert_id(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_insert_id(stmt); -+} -+SYM_16(mysql_stmt_insert_id); -+ -+ -+my_ulonglong STDCALL symver16_mysql_stmt_num_rows(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_num_rows(stmt); -+} -+SYM_16(mysql_stmt_num_rows); -+ -+ -+ulong STDCALL symver16_mysql_stmt_param_count(MYSQL_STMT * stmt) -+{ -+ return mysql_stmt_param_count(stmt); -+} -+SYM_16(mysql_stmt_param_count); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_stmt_param_metadata(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_param_metadata(stmt); -+} -+SYM_16(mysql_stmt_param_metadata); -+ -+ -+int STDCALL symver16_mysql_stmt_prepare(MYSQL_STMT *stmt, const char *query, ulong length) -+{ -+ return mysql_stmt_prepare(stmt, query, length); -+} -+SYM_16(mysql_stmt_prepare); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_reset(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_reset(stmt); -+} -+SYM_16(mysql_stmt_reset); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_stmt_result_metadata(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_result_metadata(stmt); -+} -+SYM_16(mysql_stmt_result_metadata); -+ -+ -+MYSQL_ROW_OFFSET STDCALL symver16_mysql_stmt_row_seek(MYSQL_STMT *stmt, MYSQL_ROW_OFFSET row) -+{ -+ return mysql_stmt_row_seek(stmt, row); -+} -+SYM_16(mysql_stmt_row_seek); -+ -+ -+MYSQL_ROW_OFFSET STDCALL symver16_mysql_stmt_row_tell(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_row_tell(stmt); -+} -+SYM_16(mysql_stmt_row_tell); -+ -+ -+my_bool STDCALL symver16_mysql_stmt_send_long_data(MYSQL_STMT *stmt, uint param_number, const char *data, ulong length) -+{ -+ return mysql_stmt_send_long_data(stmt, param_number, data, length); -+} -+SYM_16(mysql_stmt_send_long_data); -+ -+ -+const char *STDCALL symver16_mysql_stmt_sqlstate(MYSQL_STMT * stmt) -+{ -+ return mysql_stmt_sqlstate(stmt); -+} -+SYM_16(mysql_stmt_sqlstate); -+ -+ -+int STDCALL symver16_mysql_stmt_store_result(MYSQL_STMT *stmt) -+{ -+ return mysql_stmt_store_result(stmt); -+} -+SYM_16(mysql_stmt_store_result); -+ -+ -+void STDCALL symver16_mysql_thread_end() -+{ -+ return mysql_thread_end(); -+} -+SYM_16(mysql_thread_end); -+ -+ -+ulong STDCALL symver16_mysql_thread_id(MYSQL *mysql) -+{ -+ return mysql_thread_id(mysql); -+} -+SYM_16(mysql_thread_id); -+ -+ -+my_bool STDCALL symver16_mysql_thread_init() -+{ -+ return mysql_thread_init(); -+} -+SYM_16(mysql_thread_init); -+ -+ -+uint STDCALL symver16_mysql_thread_safe(void) -+{ -+ return mysql_thread_safe(); -+} -+SYM_16(mysql_thread_safe); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_use_result(MYSQL *mysql) -+{ -+ return mysql_use_result(mysql); -+} -+SYM_16(mysql_use_result); -+ -+ -+uint STDCALL symver16_mysql_warning_count(MYSQL *mysql) -+{ -+ return mysql_warning_count(mysql); -+} -+SYM_16(mysql_warning_count); -+ -+/*****/ -+ -+MYSQL * STDCALL symver16_mysql_real_connect(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, uint port, const char *unix_socket,ulong client_flag) -+{ -+ return mysql_real_connect(mysql, host, user, passwd, db, port, unix_socket, client_flag); -+} -+SYM_16(mysql_real_connect); -+ -+/*****/ -+ -+my_bool symver16_my_init(void) -+{ -+ return my_init(); -+} -+SYM_16(my_init); -+ -+#endif -diff -rup old/libmysql/libmysql.ver.in new/libmysql/libmysql.ver.in ---- old/libmysql/libmysql.ver.in 2013-11-05 08:19:26.000000000 +0100 -+++ new/libmysql/libmysql.ver.in 2014-01-10 15:41:30.545182782 +0100 -@@ -1 +1,136 @@ --libmysqlclient_@SHARED_LIB_MAJOR_VERSION@ { global: *; }; -+libmysqlclient_16 -+{ -+ local: -+ symver16_*; -+}; -+ -+libmysqlclient_18 -+{ -+ global: -+ my_init; -+ myodbc_remove_escape; -+ mysql_affected_rows; -+ mysql_autocommit; -+ mysql_change_user; -+ mysql_character_set_name; -+ mysql_close; -+ mysql_commit; -+ mysql_data_seek; -+ mysql_debug; -+ mysql_dump_debug_info; -+ mysql_embedded; -+ mysql_eof; -+ mysql_errno; -+ mysql_error; -+ mysql_escape_string; -+ mysql_fetch_field; -+ mysql_fetch_field_direct; -+ mysql_fetch_fields; -+ mysql_fetch_lengths; -+ mysql_fetch_row; -+ mysql_field_count; -+ mysql_field_seek; -+ mysql_field_tell; -+ mysql_free_result; -+ mysql_get_character_set_info; -+ mysql_get_client_info; -+ mysql_get_client_version; -+ mysql_get_host_info; -+ mysql_get_parameters; -+ mysql_get_proto_info; -+ mysql_get_server_info; -+ mysql_get_server_version; -+ mysql_get_ssl_cipher; -+ mysql_hex_string; -+ mysql_info; -+ mysql_init; -+ mysql_insert_id; -+ mysql_kill; -+ mysql_list_dbs; -+ mysql_list_fields; -+ mysql_list_processes; -+ mysql_list_tables; -+ mysql_more_results; -+ mysql_next_result; -+ mysql_num_fields; -+ mysql_num_rows; -+ mysql_options; -+ mysql_ping; -+ mysql_query; -+ mysql_read_query_result; -+ mysql_real_connect; -+ mysql_real_escape_string; -+ mysql_real_query; -+ mysql_refresh; -+ mysql_rollback; -+ mysql_row_seek; -+ mysql_row_tell; -+ mysql_select_db; -+ mysql_send_query; -+ mysql_server_end; -+ mysql_server_init; -+ mysql_set_character_set; -+ mysql_set_local_infile_default; -+ mysql_set_local_infile_handler; -+ mysql_set_server_option; -+ mysql_shutdown; -+ mysql_sqlstate; -+ mysql_ssl_set; -+ mysql_stat; -+ mysql_stmt_affected_rows; -+ mysql_stmt_attr_get; -+ mysql_stmt_attr_set; -+ mysql_stmt_bind_param; -+ mysql_stmt_bind_result; -+ mysql_stmt_close; -+ mysql_stmt_data_seek; -+ mysql_stmt_errno; -+ mysql_stmt_error; -+ mysql_stmt_execute; -+ mysql_stmt_fetch; -+ mysql_stmt_fetch_column; -+ mysql_stmt_field_count; -+ mysql_stmt_free_result; -+ mysql_stmt_init; -+ mysql_stmt_insert_id; -+ mysql_stmt_num_rows; -+ mysql_stmt_param_count; -+ mysql_stmt_param_metadata; -+ mysql_stmt_prepare; -+ mysql_stmt_reset; -+ mysql_stmt_result_metadata; -+ mysql_stmt_row_seek; -+ mysql_stmt_row_tell; -+ mysql_stmt_send_long_data; -+ mysql_stmt_sqlstate; -+ mysql_stmt_store_result; -+ mysql_store_result; -+ mysql_thread_end; -+ mysql_thread_id; -+ mysql_thread_init; -+ mysql_thread_safe; -+ mysql_use_result; -+ mysql_warning_count; -+ -+ free_defaults; -+ handle_options; -+ load_defaults; -+ my_print_help; -+ -+ #my_make_scrambled_password; -+ THR_KEY_mysys; -+ -+ mysql_client_find_plugin; -+ mysql_client_register_plugin; -+ mysql_load_plugin; -+ mysql_load_plugin_v; -+ mysql_plugin_options; -+ mysql_stmt_next_result; -+ -+ #mysql_default_charset_info; -+ mysql_get_charset; -+ mysql_get_charset_by_csname; -+ mysql_net_realloc; -+ #mysql_client_errors; -+ *; -+} libmysqlclient_16; -diff -rup old/mysys/charset.c new/mysys/charset.c ---- old/mysys/charset.c 2013-11-05 08:19:26.000000000 +0100 -+++ new/mysys/charset.c 2014-01-10 15:41:30.552919678 +0100 -@@ -941,3 +941,20 @@ size_t escape_quotes_for_mysql(CHARSET_I - *to= 0; - return overflow ? (ulong)~0 : (ulong) (to - to_start); - } -+ -+#ifndef EMBEDDED_LIBRARY -+ -+// Hack to provide Fedora symbols -+ -+CHARSET_INFO *mysql_get_charset(uint cs_number, myf flags) -+{ -+ return get_charset(cs_number, flags); -+} -+ -+ -+CHARSET_INFO * mysql_get_charset_by_csname(const char *cs_name, uint cs_flags, myf flags) -+{ -+ return get_charset_by_csname(cs_name, cs_flags, flags); -+} -+ -+#endif -diff -rup old/sql/net_serv.cc new/sql/net_serv.cc ---- old/sql/net_serv.cc 2013-11-05 08:19:26.000000000 +0100 -+++ new/sql/net_serv.cc 2014-01-10 15:41:30.563377346 +0100 -@@ -1190,3 +1190,17 @@ void my_net_set_write_timeout(NET *net, - #endif - DBUG_VOID_RETURN; - } -+ -+#ifndef EMBEDDED_LIBRARY -+C_MODE_START -+ -+// Hack to provide Fedora symbols -+ -+my_bool mysql_net_realloc(NET *net, size_t length) -+{ -+ return net_realloc(net, length); -+} -+ -+C_MODE_END -+#endif -+ -diff -rup old/sql/password.c new/sql/password.c ---- old/sql/password.c 2013-11-05 08:19:26.000000000 +0100 -+++ new/sql/password.c 2014-01-10 15:41:30.567134663 +0100 -@@ -563,3 +563,17 @@ void make_password_from_salt(char *to, c - *to++= PVERSION41_CHAR; - octet2hex(to, (const char*) hash_stage2, SHA1_HASH_SIZE); - } -+ -+#ifndef EMBEDDED_LIBRARY -+ -+// Hack to provide both libmysqlclient_16 and libmysqlclient_18 symbol versions -+ -+#define SYM_16(_exportedsym) __asm__(".symver symver16_" #_exportedsym "," #_exportedsym "@libmysqlclient_16") -+ -+void symver16_my_make_scrambled_password(char *to, const char *password, size_t pass_len) -+{ -+ my_make_scrambled_password(to, password, pass_len); -+} -+SYM_16(my_make_scrambled_password); -+ -+#endif -diff -rup old/sql-common/client.c new/sql-common/client.c ---- old/sql-common/client.c 2013-11-05 08:19:26.000000000 +0100 -+++ new/sql-common/client.c 2014-01-10 15:41:30.574151024 +0100 -@@ -4399,3 +4399,136 @@ static int clear_password_auth_client(MY - - return res ? CR_ERROR : CR_OK; - } -+ -+#ifndef EMBEDDED_LIBRARY -+ -+// Hack to provide both libmysqlclient_16 and libmysqlclient_18 symbol versions -+ -+#define SYM_16(_exportedsym) __asm__(".symver symver16_" #_exportedsym "," #_exportedsym "@libmysqlclient_16") -+ -+void STDCALL symver16_mysql_close(MYSQL *mysql) -+{ -+ return mysql_close(mysql); -+} -+SYM_16(mysql_close); -+ -+ -+uint STDCALL symver16_mysql_errno(MYSQL *mysql) -+{ -+ return mysql_errno(mysql); -+} -+SYM_16(mysql_errno); -+ -+ -+const char * STDCALL symver16_mysql_error(MYSQL *mysql) -+{ -+ return mysql_error(mysql); -+} -+SYM_16(mysql_error); -+ -+ -+ulong * STDCALL symver16_mysql_fetch_lengths(MYSQL_RES *res) -+{ -+ return mysql_fetch_lengths(res); -+} -+SYM_16(mysql_fetch_lengths); -+ -+ -+MYSQL_ROW STDCALL symver16_mysql_fetch_row(MYSQL_RES *res) -+{ -+ return mysql_fetch_row(res); -+} -+SYM_16(mysql_fetch_row); -+ -+ -+void STDCALL symver16_mysql_free_result(MYSQL_RES *result) -+{ -+ return mysql_free_result(result); -+} -+SYM_16(mysql_free_result); -+ -+ -+ulong STDCALL symver16_mysql_get_server_version(MYSQL *mysql) -+{ -+ return mysql_get_server_version(mysql); -+} -+SYM_16(mysql_get_server_version); -+ -+ -+const char * STDCALL symver16_mysql_get_ssl_cipher(MYSQL *mysql __attribute__((unused))) -+{ -+ return mysql_get_ssl_cipher(mysql); -+} -+SYM_16(mysql_get_ssl_cipher); -+ -+ -+MYSQL * STDCALL symver16_mysql_init(MYSQL *mysql) -+{ -+ return mysql_init(mysql); -+} -+SYM_16(mysql_init); -+ -+ -+unsigned int STDCALL symver16_mysql_num_fields(MYSQL_RES *res) -+{ -+ return mysql_num_fields(res); -+} -+SYM_16(mysql_num_fields); -+ -+ -+my_ulonglong STDCALL symver16_mysql_num_rows(MYSQL_RES *res) -+{ -+ return mysql_num_rows(res); -+} -+SYM_16(mysql_num_rows); -+ -+ -+int STDCALL symver16_mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg) -+{ -+ return mysql_options(mysql, option, arg); -+} -+SYM_16(mysql_options); -+ -+ -+int STDCALL symver16_mysql_real_query(MYSQL *mysql, const char *query, ulong length) -+{ -+ return mysql_real_query(mysql, query, length); -+} -+SYM_16(mysql_real_query); -+ -+ -+int STDCALL symver16_mysql_select_db(MYSQL *mysql, const char *db) -+{ -+ return mysql_select_db(mysql, db); -+} -+SYM_16(mysql_select_db); -+ -+ -+int STDCALL symver16_mysql_send_query(MYSQL* mysql, const char* query, ulong length) -+{ -+ return mysql_send_query(mysql, query, length); -+} -+SYM_16(mysql_send_query); -+ -+ -+int STDCALL symver16_mysql_set_character_set(MYSQL *mysql, const char *cs_name) -+{ -+ return mysql_set_character_set(mysql, cs_name); -+} -+SYM_16(mysql_set_character_set); -+ -+ -+my_bool STDCALL symver16_mysql_ssl_set(MYSQL *mysql __attribute__((unused)), const char *key __attribute__((unused)), const char *cert __attribute__((unused)), const char *ca __attribute__((unused)), const char *capath __attribute__((unused)), const char *cipher __attribute__((unused))) -+{ -+ return mysql_ssl_set(mysql, key, cert, ca, capath, cipher); -+} -+SYM_16(mysql_ssl_set); -+ -+ -+MYSQL_RES * STDCALL symver16_mysql_store_result(MYSQL *mysql) -+{ -+ return mysql_store_result(mysql); -+} -+SYM_16(mysql_store_result); -+ -+#endif diff --git a/packaging/rpm-oel/mysql-systemd-start b/packaging/rpm-oel/mysql-systemd-start deleted file mode 100644 index 9cb2a25c990..00000000000 --- a/packaging/rpm-oel/mysql-systemd-start +++ /dev/null @@ -1,52 +0,0 @@ -#! /bin/bash -# -# Scripts to run by MySQL systemd service -# -# Needed argument: pre | post -# -# pre mode : try to run mysql_install_db and fix perms and SELinux contexts -# post mode : ping server until answer is received -# - -install_db () { - # Note: something different than datadir=/var/lib/mysql requires SELinux policy changes (in enforcing mode) - datadir=$(/usr/bin/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p') - - # Restore log, dir, perms and SELinux contexts - [ -d "$datadir" ] || install -d -m 0755 -omysql -gmysql "$datadir" || exit 1 - log=/var/log/mysqld.log - [ -e $log ] || touch $log - chmod 0640 $log - chown mysql:mysql $log || exit 1 - if [ -x /usr/sbin/restorecon ]; then - /usr/sbin/restorecon "$datadir" - /usr/sbin/restorecon $log - fi - - # If special mysql dir is in place, skip db install - [ -d "$datadir/mysql" ] && exit 0 - - # Create initial db - /usr/bin/mysql_install_db --rpm --datadir="$datadir" --user=mysql - exit 0 -} - -pinger () { - # Wait for ping to answer to signal startup completed, - # might take a while in case of e.g. crash recovery - # MySQL systemd service will timeout script if no answer - while /bin/true ; do - sleep 1 - mysqladmin ping >/dev/null 2>&1 && break - done - exit 0 -} - -# main -case $1 in - "pre") install_db ;; - "post") pinger ;; -esac - -exit 0 - diff --git a/packaging/rpm-oel/mysql.conf b/packaging/rpm-oel/mysql.conf deleted file mode 100644 index 74cd5f836e7..00000000000 --- a/packaging/rpm-oel/mysql.conf +++ /dev/null @@ -1 +0,0 @@ -d /var/run/mysqld 0755 mysql mysql - diff --git a/packaging/rpm-oel/mysql.init b/packaging/rpm-oel/mysql.init deleted file mode 100644 index d6f8f023850..00000000000 --- a/packaging/rpm-oel/mysql.init +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/sh -# -# mysqld This shell script takes care of starting and stopping -# the MySQL subsystem (mysqld). -# -# chkconfig: - 64 36 -# description: MySQL database server. -# processname: mysqld -# config: /etc/my.cnf -# pidfile: /var/run/mysqld/mysqld.pid - -# Source function library. -. /etc/rc.d/init.d/functions - -# Source networking configuration. -. /etc/sysconfig/network - - -exec="/usr/bin/mysqld_safe" -prog="mysqld" - -# Set timeouts here so they can be overridden from /etc/sysconfig/mysqld -STARTTIMEOUT=120 -STOPTIMEOUT=60 - -[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog - -lockfile=/var/lock/subsys/$prog - - -# extract value of a MySQL option from config files -# Usage: get_mysql_option SECTION VARNAME DEFAULT -# result is returned in $result -# We use my_print_defaults which prints all options from multiple files, -# with the more specific ones later; hence take the last match. -get_mysql_option(){ - result=`/usr/bin/my_print_defaults "$1" | sed -n "s/^--$2=//p" | tail -n 1` - if [ -z "$result" ]; then - # not found, use default - result="$3" - fi -} - -get_mysql_option mysqld datadir "/var/lib/mysql" -datadir="$result" -get_mysql_option mysqld socket "$datadir/mysql.sock" -socketfile="$result" -get_mysql_option mysqld_safe log-error "/var/log/mysqld.log" -errlogfile="$result" -get_mysql_option mysqld_safe pid-file "/var/run/mysqld/mysqld.pid" -mypidfile="$result" - - -start(){ - [ -x $exec ] || exit 5 - # check to see if it's already running - RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` - if [ $? = 0 ]; then - # already running, do nothing - action $"Starting $prog: " /bin/true - ret=0 - elif echo "$RESPONSE" | grep -q "Access denied for user" - then - # already running, do nothing - action $"Starting $prog: " /bin/true - ret=0 - else - # prepare for start - touch "$errlogfile" - chown mysql:mysql "$errlogfile" - chmod 0640 "$errlogfile" - [ -x /sbin/restorecon ] && /sbin/restorecon "$errlogfile" - if [ ! -d "$datadir/mysql" ] ; then - # First, make sure $datadir is there with correct permissions - if [ ! -e "$datadir" -a ! -h "$datadir" ] - then - mkdir -p "$datadir" || exit 1 - fi - chown mysql:mysql "$datadir" - chmod 0755 "$datadir" - [ -x /sbin/restorecon ] && /sbin/restorecon "$datadir" - # Now create the database - action $"Initializing MySQL database: " /usr/bin/mysql_install_db --rpm --datadir="$datadir" --user=mysql - ret=$? - chown -R mysql:mysql "$datadir" - if [ $ret -ne 0 ] ; then - return $ret - fi - fi - chown mysql:mysql "$datadir" - chmod 0755 "$datadir" - # Pass all the options determined above, to ensure consistent behavior. - # In many cases mysqld_safe would arrive at the same conclusions anyway - # but we need to be sure. (An exception is that we don't force the - # log-error setting, since this script doesn't really depend on that, - # and some users might prefer to configure logging to syslog.) - # Note: set --basedir to prevent probes that might trigger SELinux - # alarms, per bug #547485 - $exec --datadir="$datadir" --socket="$socketfile" \ - --pid-file="$mypidfile" \ - --basedir=/usr --user=mysql >/dev/null 2>&1 & - safe_pid=$! - # Spin for a maximum of N seconds waiting for the server to come up; - # exit the loop immediately if mysqld_safe process disappears. - # Rather than assuming we know a valid username, accept an "access - # denied" response as meaning the server is functioning. - ret=0 - TIMEOUT="$STARTTIMEOUT" - while [ $TIMEOUT -gt 0 ]; do - RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` && break - echo "$RESPONSE" | grep -q "Access denied for user" && break - if ! /bin/kill -0 $safe_pid 2>/dev/null; then - echo "MySQL Daemon failed to start." - ret=1 - break - fi - sleep 1 - let TIMEOUT=${TIMEOUT}-1 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Timeout error occurred trying to start MySQL Daemon." - ret=1 - fi - if [ $ret -eq 0 ]; then - action $"Starting $prog: " /bin/true - touch $lockfile - else - action $"Starting $prog: " /bin/false - fi - fi - return $ret -} - -stop(){ - if [ ! -f "$mypidfile" ]; then - # not running; per LSB standards this is "ok" - action $"Stopping $prog: " /bin/true - return 0 - fi - MYSQLPID=`cat "$mypidfile"` - if [ -n "$MYSQLPID" ]; then - /bin/kill "$MYSQLPID" >/dev/null 2>&1 - ret=$? - if [ $ret -eq 0 ]; then - TIMEOUT="$STOPTIMEOUT" - while [ $TIMEOUT -gt 0 ]; do - /bin/kill -0 "$MYSQLPID" >/dev/null 2>&1 || break - sleep 1 - let TIMEOUT=${TIMEOUT}-1 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Timeout error occurred trying to stop MySQL Daemon." - ret=1 - action $"Stopping $prog: " /bin/false - else - rm -f $lockfile - rm -f "$socketfile" - action $"Stopping $prog: " /bin/true - fi - else - action $"Stopping $prog: " /bin/false - fi - else - # failed to read pidfile, probably insufficient permissions - action $"Stopping $prog: " /bin/false - ret=4 - fi - return $ret -} - -restart(){ - stop - start -} - -condrestart(){ - [ -e $lockfile ] && restart || : -} - - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p "$mypidfile" $prog - ;; - restart) - restart - ;; - condrestart|try-restart) - condrestart - ;; - reload) - exit 3 - ;; - force-reload) - restart - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" - exit 2 -esac - -exit $? diff --git a/packaging/rpm-oel/mysql.spec.in b/packaging/rpm-oel/mysql.spec.in deleted file mode 100644 index d28e89b4216..00000000000 --- a/packaging/rpm-oel/mysql.spec.in +++ /dev/null @@ -1,1666 +0,0 @@ -# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; see the file COPYING. If not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston -# MA 02110-1301 USA. - -# Rebuild on OL5/RHEL5 needs following rpmbuild options: -# rpmbuild --define 'dist .el5' --define 'rhel 5' --define 'el5 1' mysql.spec - -# NOTE: "vendor" is used in upgrade/downgrade check, so you can't -# change these, has to be exactly as is. - -%global mysql_vendor Oracle and/or its affiliates -%global mysqldatadir /var/lib/mysql - -# By default, a build will include the bundeled "yaSSL" library for SSL. -%{?with_ssl: %global ssl_option -DWITH_SSL=%{with_ssl}} - -# Regression tests may take a long time, override the default to skip them -%{!?runselftest:%global runselftest 0} - -%{!?with_systemd: %global systemd 0} -%{?el7: %global systemd 1} -%{!?with_debuginfo: %global nodebuginfo 1} -%{!?product_suffix: %global product_suffix community} -%{!?feature_set: %global feature_set community} -%{!?compilation_comment_release: %global compilation_comment_release MySQL Community Server - (GPL)} -%{!?compilation_comment_debug: %global compilation_comment_debug MySQL Community Server - Debug (GPL)} -%{!?src_base: %global src_base mysql} - -# Version for compat libs -%if 0%{?rhel} == 5 -%global compatver 5.0.96 -%global compatlib 15 -%global compatsrc http://downloads.mysql.com/archives/mysql-5.0/mysql-%{compatver}.tar.gz -%endif - -%if 0%{?rhel} == 6 -%global compatver 5.1.72 -%global compatlib 16 -%global compatsrc https://cdn.mysql.com/Downloads/MySQL-5.1/mysql-%{compatver}.tar.gz -%endif - -# multiarch -%global multiarchs ppc %{power64} %{ix86} x86_64 %{sparc} - -# Hack to support el5 where __isa_bits not defined. Note: supports i386 and x86_64 only, sorry. -%if x%{?__isa_bits} == x -%ifarch %{ix86} -%global __isa_bits 32 -%endif -%ifarch x86_64 -%global __isa_bits 64 -%endif -%endif - -%global src_dir %{src_base}-%{version} - -# No debuginfo for now, ships /usr/sbin/mysqld-debug and libmysqlcliet-debug.a -%if 0%{?nodebuginfo} -%global _enable_debug_package 0 -%global debug_package %{nil} -%global __os_install_post /usr/lib/rpm/brp-compress %{nil} -%endif - -%if 0%{?commercial} -%global license_files_server %{src_dir}/LICENSE.mysql -%global license_type Commercial -%else -%global license_files_server %{src_dir}/COPYING %{src_dir}/README -%global license_type GPLv2 -%endif - -Name: mysql-%{product_suffix} -Summary: A very fast and reliable SQL database server -Group: Applications/Databases -Version: @VERSION@ -Release: 2%{?commercial:.1}%{?dist} -License: Copyright (c) 2000, @MYSQL_COPYRIGHT_YEAR@, %{mysql_vendor}. All rights reserved. Under %{?license_type} license as shown in the Description field. -Source0: https://cdn.mysql.com/Downloads/MySQL-@MYSQL_BASE_VERSION@/%{src_dir}.tar.gz -URL: http://www.mysql.com/ -Packager: MySQL Release Engineering -Vendor: %{mysql_vendor} -Source1: mysql-systemd-start -Source2: mysqld.service -Source3: mysql.conf -Source4: my_config.h -Source5: mysql_config.sh -%if 0%{?compatlib} -Source7: %{compatsrc} -%endif -Source90: filter-provides.sh -Source91: filter-requires.sh -Patch0: mysql-5.5-libmysqlclient-symbols.patch -BuildRequires: cmake -BuildRequires: perl -%{?el7:BuildRequires: perl(Time::HiRes)} -%{?el7:BuildRequires: perl(Env)} -BuildRequires: time -BuildRequires: libaio-devel -BuildRequires: ncurses-devel -BuildRequires: openssl-devel -BuildRequires: zlib-devel -%if 0%{?systemd} -BuildRequires: systemd -%endif -BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) - -%if 0%{?rhel} > 6 -# For rpm => 4.9 only: https://fedoraproject.org/wiki/Packaging:AutoProvidesAndRequiresFiltering -%global __requires_exclude ^perl\\((hostnames|lib::mtr|lib::v1|mtr_|My::) -%global __provides_exclude_from ^(/usr/share/(mysql|mysql-test)/.*|%{_libdir}/mysql/plugin/.*\\.so)$ -%else -# https://fedoraproject.org/wiki/EPEL:Packaging#Generic_Filtering_on_EPEL6 -%global __perl_provides %{SOURCE90} -%global __perl_requires %{SOURCE91} -%endif - -%description -The MySQL(TM) software delivers a very fast, multi-threaded, multi-user, -and robust SQL (Structured Query Language) database server. MySQL Server -is intended for mission-critical, heavy-load production systems as well -as for embedding into mass-deployed software. MySQL is a trademark of -%{mysql_vendor} - -The MySQL software has Dual Licensing, which means you can use the MySQL -software free of charge under the GNU General Public License -(http://www.gnu.org/licenses/). You can also purchase commercial MySQL -licenses from %{mysql_vendor} if you do not wish to be bound by the terms of -the GPL. See the chapter "Licensing and Support" in the manual for -further info. - -The MySQL web site (http://www.mysql.com/) provides the latest -news and information about the MySQL software. Also please see the -documentation and the manual for more information. - -%package server -Summary: A very fast and reliable SQL database server -Group: Applications/Databases -Requires: coreutils -Requires: grep -Requires: procps -Requires: shadow-utils -Requires: net-tools -%if 0%{?commercial} -Provides: MySQL-server-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-server-advanced < %{version}-%{release} -Obsoletes: mysql-community-server < %{version}-%{release} -Requires: mysql-enterprise-client%{?_isa} = %{version}-%{release} -Requires: mysql-enterprise-common%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-server%{?_isa} = %{version}-%{release} -Requires: mysql-community-client%{?_isa} = %{version}-%{release} -Requires: mysql-community-common%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-server < %{version}-%{release} -Obsoletes: mysql-server < %{version}-%{release} -Obsoletes: mariadb-server -Provides: mysql-server = %{version}-%{release} -Provides: mysql-server%{?_isa} = %{version}-%{release} -%if 0%{?systemd} -Requires(post): systemd -Requires(preun): systemd -Requires(postun): systemd -%else -Requires(post): /sbin/chkconfig -Requires(preun): /sbin/chkconfig -Requires(preun): /sbin/service -%endif - -%description server -The MySQL(TM) software delivers a very fast, multi-threaded, multi-user, -and robust SQL (Structured Query Language) database server. MySQL Server -is intended for mission-critical, heavy-load production systems as well -as for embedding into mass-deployed software. MySQL is a trademark of -%{mysql_vendor} - -The MySQL software has Dual Licensing, which means you can use the MySQL -software free of charge under the GNU General Public License -(http://www.gnu.org/licenses/). You can also purchase commercial MySQL -licenses from %{mysql_vendor} if you do not wish to be bound by the terms of -the GPL. See the chapter "Licensing and Support" in the manual for -further info. - -The MySQL web site (http://www.mysql.com/) provides the latest news and -information about the MySQL software. Also please see the documentation -and the manual for more information. - -This package includes the MySQL server binary as well as related utilities -to run and administer a MySQL server. - -%package client -Summary: MySQL database client applications and tools -Group: Applications/Databases -%if 0%{?commercial} -Provides: MySQL-client-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-client-advanced < %{version}-%{release} -Obsoletes: mysql-community-client < %{version}-%{release} -Requires: mysql-enterprise-libs%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-client%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-client < %{version}-%{release} -Obsoletes: mariadb -%if 0%{?rhel} > 5 -Obsoletes: mysql < %{version}-%{release} -Provides: mysql = %{version}-%{release} -Provides: mysql%{?_isa} = %{version}-%{release} -%endif - -%description client -This package contains the standard MySQL clients and administration -tools. - -%package common -Summary: MySQL database common files for server and client libs -Group: Applications/Databases -%if 0%{?commercial} -Obsoletes: mysql-community-common < %{version}-%{release} -%endif -Provides: mysql-common = %{version}-%{release} -Provides: mysql-common%{?_isa} = %{version}-%{release} -%{?el5:Requires: mysql%{?_isa} = %{version}-%{release}} - -%description common -This packages contains common files needed by MySQL client library, -MySQL database server, and MySQL embedded server. - - -%package test -Summary: Test suite for the MySQL database server -Group: Applications/Databases -%if 0%{?commercial} -Provides: MySQL-test-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-test-advanced < %{version}-%{release} -Obsoletes: mysql-community-test < %{version}-%{release} -Requires: mysql-enterprise-server%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-test%{?_isa} = %{version}-%{release} -Requires: mysql-community-server%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-test < %{version}-%{release} -Obsoletes: mysql-test < %{version}-%{release} -Obsoletes: mariadb-test -Provides: mysql-test = %{version}-%{release} -Provides: mysql-test%{?_isa} = %{version}-%{release} - - -%description test -This package contains the MySQL regression test suite for MySQL -database server. - - -%package devel -Summary: Development header files and libraries for MySQL database client applications -Group: Applications/Databases -%if 0%{?commercial} -Provides: MySQL-devel-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-devel-advanced < %{version}-%{release} -Obsoletes: mysql-community-devel < %{version}-%{release} -Requires: mysql-enterprise-libs%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-devel%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-devel < %{version}-%{release} -Obsoletes: mysql-devel < %{version}-%{release} -Obsoletes: mariadb-devel -Provides: mysql-devel = %{version}-%{release} -Provides: mysql-devel%{?_isa} = %{version}-%{release} - -%description devel -This package contains the development header files and libraries necessary -to develop MySQL client applications. - -%package libs -Summary: Shared libraries for MySQL database client applications -Group: Applications/Databases -%if 0%{?commercial} -Provides: MySQL-shared-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-shared-advanced < %{version}-%{release} -Obsoletes: mysql-community-libs < %{version}-%{release} -Requires: mysql-enterprise-common%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-shared%{?_isa} = %{version}-%{release} -Requires: mysql-community-common%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-shared < %{version}-%{release} -Obsoletes: mysql-libs < %{version}-%{release} -Obsoletes: mariadb-libs -Provides: mysql-libs = %{version}-%{release} -Provides: mysql-libs%{?_isa} = %{version}-%{release} - -%description libs -This package contains the shared libraries for MySQL client -applications. - -%if 0%{?compatlib} -%package libs-compat -Summary: Shared compat libraries for MySQL %{compatver} database client applications -Group: Applications/Databases -Obsoletes: mysql-libs-compat < %{version}-%{release} -Provides: mysql-libs-compat = %{version}-%{release} -Provides: mysql-libs-compat%{?_isa} = %{version}-%{release} -%if 0%{?commercial} -Provides: MySQL-shared-compat-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-shared-compat-advanced < %{version}-%{release} -Obsoletes: mysql-community-libs-compat < %{version}-%{release} -Requires: mysql-enterprise-libs%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-shared-compat%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-shared-compat < %{version}-%{release} -%if 0%{?rhel} > 5 -Obsoletes: mysql-libs < %{version}-%{release} -%endif - -%description libs-compat -This package contains the shared compat libraries for MySQL %{compatver} client -applications. -%endif - -%package embedded -Summary: MySQL embedded library -Group: Applications/Databases -%if 0%{?commercial} -Provides: MySQL-embedded-advanced%{?_isa} = %{version}-%{release} -Obsoletes: MySQL-embedded-advanced < %{version}-%{release} -Obsoletes: mysql-community-embedded < %{version}-%{release} -Requires: mysql-enterprise-common%{?_isa} = %{version}-%{release} -%else -Provides: MySQL-embedded%{?_isa} = %{version}-%{release} -Requires: mysql-community-common%{?_isa} = %{version}-%{release} -%endif -Obsoletes: MySQL-embedded < %{version}-%{release} -Obsoletes: mysql-embedded < %{version}-%{release} -Provides: mysql-embedded = %{version}-%{release} -Provides: mysql-emdedded%{?_isa} = %{version}-%{release} - -%description embedded -This package contains the MySQL server as an embedded library. - -The embedded MySQL server library makes it possible to run a full-featured -MySQL server inside the client application. The main benefits are increased -speed and more simple management for embedded applications. - -The API is identical for the embedded MySQL version and the -client/server version. - -For a description of MySQL see the base MySQL RPM or http://www.mysql.com/ - -%package embedded-devel -Summary: Development header files and libraries for MySQL as an embeddable library -Group: Applications/Databases -%if 0%{?commercial} -Obsoletes: mysql-community-embedded-devel < %{version}-%{release} -Requires: mysql-enterprise-devel%{?_isa} = %{version}-%{release} -Requires: mysql-enterprise-embedded%{?_isa} = %{version}-%{release} -%else -Requires: mysql-community-devel%{?_isa} = %{version}-%{release} -Requires: mysql-community-embedded%{?_isa} = %{version}-%{release} -%endif -Obsoletes: mysql-embedded-devel < %{version}-%{release} -Provides: mysql-embedded-devel = %{version}-%{release} -Provides: mysql-embedded-devel%{?_isa} = %{version}-%{release} - -%description embedded-devel -This package contains files needed for developing applications using -the embedded version of the MySQL server. - -%if 0%{?rhel} == 5 -%package -n mysql -Summary: Convenience package for easy upgrades of MySQL package set -Group: Applications/Databases -%if 0%{?commercial} -Requires: mysql-enterprise-client%{?_isa} = %{version}-%{release} -Requires: mysql-enterprise-libs%{?_isa} = %{version}-%{release} -Requires: mysql-enterprise-libs-compat%{?_isa} = %{version}-%{release} -%else -Requires: mysql-community-client%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs%{?_isa} = %{version}-%{release} -Requires: mysql-community-libs-compat%{?_isa} = %{version}-%{release} -%endif - -%description -n mysql -This package has as sole purpose to require other MySQL packages such -that upgrades will be more convenient. - -Technical background: this is done to reflect the fact that mysql -package has been split into several subpackages. -%endif - -%prep -%if 0%{?compatlib} -%setup -q -T -a 0 -a 7 -c -n %{src_dir} -%else -%setup -q -T -a 0 -c -n %{src_dir} -%endif # 0%{?compatlib} -pushd %{src_dir} -%{?el7:%patch0 -p1} - -%build -# Fail quickly and obviously if user tries to build as root -%if 0%{?runselftest} -if [ "x$(id -u)" = "x0" ] ; then - echo "The MySQL regression tests may fail if run as root." - echo "If you really need to build the RPM as root, use" - echo "--define='runselftest 0' to skip the regression tests." - exit 1 -fi -%endif - -%if 0%{?compatlib} -# Build compat libs -( -export CFLAGS="%{optflags} -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -fno-strict-aliasing -fwrapv" -export CXXFLAGS="$CFLAGS %{?el6:-felide-constructors} -fno-rtti -fno-exceptions" -pushd mysql-%{compatver} -%configure \ - --with-readline \ - --without-debug \ - --enable-shared \ - --localstatedir=/var/lib/mysql \ - --with-unix-socket-path=/var/lib/mysql/mysql.sock \ - --with-mysqld-user="mysql" \ - --with-extra-charsets=all \ - --enable-local-infile \ - --enable-largefile \ - --enable-thread-safe-client \ -%if 0%{?rhel} == 6 - --with-ssl=%{_prefix} \ - --with-embedded-server \ - --with-big-tables \ - --with-pic \ - --with-plugin-innobase \ - --with-plugin-innodb_plugin \ - --with-plugin-partition \ -%endif -%if 0%{?rhel} == 5 - --with-openssl \ - --with-bench \ - -with-innodb \ - --with-berkeley-db \ - --enable-community-features \ - --enable-profiling \ - --with-named-thread-libs="-lpthread" \ -%endif - --disable-dependency-tracking -make %{?_smp_mflags} -popd -) -%endif # 0%{?compatlib} - -# Build debug versions of mysqld and libmysqld.a -mkdir debug -( - cd debug - # Attempt to remove any optimisation flags from the debug build - optflags=$(echo "%{optflags}" | sed -e 's/-O2 / /' -e 's/-Wp,-D_FORTIFY_SOURCE=2/ /') - cmake ../%{src_dir} \ - -DBUILD_CONFIG=mysql_release \ - -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=Debug %{?el7:-DENABLE_DTRACE=OFF} \ - -DCMAKE_C_FLAGS="$optflags" \ - -DCMAKE_CXX_FLAGS="$optflags" \ - -DINSTALL_LIBDIR="%{_lib}/mysql" \ - -DINSTALL_PLUGINDIR="%{_lib}/mysql/plugin" \ - -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ - -DFEATURE_SET="%{feature_set}" \ - -DWITH_EMBEDDED_SERVER=1 \ - -DWITH_EMBEDDED_SHARED_LIBRARY=1 \ - %{?ssl_option} \ - -DCOMPILATION_COMMENT="%{compilation_comment_debug}" \ - -DMYSQL_SERVER_SUFFIX="%{?server_suffix}" - echo BEGIN_DEBUG_CONFIG ; egrep '^#define' include/config.h ; echo END_DEBUG_CONFIG - make %{?_smp_mflags} VERBOSE=1 -) - -# Build full release -mkdir release -( - cd release - cmake ../%{src_dir} \ - -DBUILD_CONFIG=mysql_release \ - -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo %{?el7:-DENABLE_DTRACE=OFF} \ - -DCMAKE_C_FLAGS="%{optflags}" \ - -DCMAKE_CXX_FLAGS="%{optflags}" \ - -DINSTALL_LIBDIR="%{_lib}/mysql" \ - -DINSTALL_PLUGINDIR="%{_lib}/mysql/plugin" \ - -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ - -DFEATURE_SET="%{feature_set}" \ - -DWITH_EMBEDDED_SERVER=1 \ - -DWITH_EMBEDDED_SHARED_LIBRARY=1 \ - %{?ssl_option} \ - -DCOMPILATION_COMMENT="%{compilation_comment_release}" \ - -DMYSQL_SERVER_SUFFIX="%{?server_suffix}" - echo BEGIN_NORMAL_CONFIG ; egrep '^#define' include/config.h ; echo END_NORMAL_CONFIG - make %{?_smp_mflags} VERBOSE=1 -) - -%install -%if 0%{?compatlib} -# Install compat libs -for dir in libmysql libmysql_r ; do - pushd mysql-%{compatver}/$dir - make DESTDIR=%{buildroot} install - popd -done -rm -f %{buildroot}%{_libdir}/mysql/libmysqlclient{,_r}.{a,la,so} -%endif # 0%{?compatlib} - -MBD=$RPM_BUILD_DIR/%{src_dir} - -# Ensure that needed directories exists -install -d -m 0755 %{buildroot}%{_datadir}/mysql/SELinux/RHEL4 -install -d -m 0755 %{buildroot}/var/lib/mysql -install -d -m 0755 %{buildroot}/var/run/mysqld - -# Install all binaries -cd $MBD/release -make DESTDIR=%{buildroot} install - -# Install logrotate and autostart -install -D -m 0644 $MBD/release/support-files/mysql-log-rotate %{buildroot}%{_sysconfdir}/logrotate.d/mysql -install -D -m 0644 $MBD/release/packaging/rpm-oel/my.cnf %{buildroot}%{_sysconfdir}/my.cnf -%if 0%{?systemd} -install -D -m 0755 %{SOURCE1} %{buildroot}%{_bindir}/mysql-systemd-start -install -D -m 0644 %{SOURCE2} %{buildroot}%{_unitdir}/mysqld.service -%else -install -D -m 0755 $MBD/release/packaging/rpm-oel/mysql.init %{buildroot}%{_sysconfdir}/init.d/mysqld -%endif -install -D -m 0644 %{SOURCE3} %{buildroot}%{_prefix}/lib/tmpfiles.d/mysql.conf - -# Make library links -install -d -m 0755 %{buildroot}%{_sysconfdir}/ld.so.conf.d -echo "%{_libdir}/mysql" > %{buildroot}%{_sysconfdir}/ld.so.conf.d/mysql-%{_arch}.conf - -# multiarch support -%ifarch %{multiarchs} -mv %{buildroot}/%{_includedir}/mysql/my_config.h \ - %{buildroot}/%{_includedir}/mysql/my_config_%{_arch}.h -install -p -m 0644 %{SOURCE4} %{buildroot}/%{_includedir}/mysql/my_config.h -mv %{buildroot}/%{_bindir}/mysql_config %{buildroot}/%{_bindir}/mysql_config-%{__isa_bits} -install -p -m 0755 %{SOURCE5} %{buildroot}/%{_bindir}/mysql_config -%endif - -# Install SELinux files in datadir -install -m 0644 $MBD/%{src_dir}/support-files/RHEL4-SElinux/mysql.{fc,te} \ - %{buildroot}%{_datadir}/mysql/SELinux/RHEL4 - -# Remove files pages we explicitly do not want to package -rm -rf %{buildroot}%{_datadir}/mysql/solaris -rm -rf %{buildroot}%{_infodir}/mysql.info* -rm -rf %{buildroot}%{_datadir}/mysql/binary-configure -rm -rf %{buildroot}%{_datadir}/mysql/mysql.server -rm -rf %{buildroot}%{_datadir}/mysql/mysqld_multi.server -rm -f %{buildroot}%{_datadir}/mysql/{ndb-config-2-node,config*}.ini -rm -f %{buildroot}%{_datadir}/mysql/my-*.cnf -%if 0%{?systemd} -rm -rf %{buildroot}%{_sysconfdir}/init.d/mysqld -%endif -rm -rf %{buildroot}%{_bindir}/mysql_embedded -rm -rf %{buildroot}%{_bindir}/mysql_setpermission -rm -rf %{buildroot}%{_mandir}/man1/mysql_setpermission.1* - -%check -%if 0%{?runselftest} -pushd release -make test VERBOSE=1 -export MTR_BUILD_THREAD=auto -pushd mysql-test -./mtr \ - --mem --parallel=auto --force --retry=0 \ - --mysqld=--binlog-format=mixed \ - --suite-timeout=720 --testcase-timeout=30 \ - --clean-vardir -rm -r $(readlink var) var -%endif - -%pre server -/usr/sbin/groupadd -g 27 -o -r mysql >/dev/null 2>&1 || : -/usr/sbin/useradd -M -N -g mysql -o -r -d /var/lib/mysql -s /bin/bash \ - -c "MySQL Server" -u 27 mysql >/dev/null 2>&1 || : - -%post server -datadir=$(/usr/bin/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p') -/bin/chmod 0755 "$datadir" -/bin/touch /var/log/mysqld.log -%if 0%{?systemd} -%systemd_post mysqld.service -/sbin/service mysqld enable >/dev/null 2>&1 || : -%else -/sbin/chkconfig --add mysqld -%endif - -%preun server -%if 0%{?systemd} -%systemd_preun mysqld.service -%else -if [ "$1" = 0 ]; then - /sbin/service mysqld stop >/dev/null 2>&1 || : - /sbin/chkconfig --del mysqld -fi -%endif - -%postun server -%if 0%{?systemd} -%systemd_postun_with_restart mysqld.service -%else -if [ $1 -ge 1 ]; then - /sbin/service mysqld condrestart >/dev/null 2>&1 || : -fi -%endif - -%post libs -p /sbin/ldconfig - -%postun libs -p /sbin/ldconfig - -%if 0%{?compatlib} -%post libs-compat -p /sbin/ldconfig - -%postun libs-compat -p /sbin/ldconfig -%endif - -%post embedded -p /sbin/ldconfig - -%postun embedded -p /sbin/ldconfig - -%files server -%defattr(-, root, root, -) -%doc %{?license_files_server} %{src_dir}/Docs/ChangeLog -%doc %{src_dir}/Docs/INFO_SRC* -%doc release/Docs/INFO_BIN* -%attr(644, root, root) %{_mandir}/man1/innochecksum.1* -%attr(644, root, root) %{_mandir}/man1/my_print_defaults.1* -%attr(644, root, root) %{_mandir}/man1/myisam_ftdump.1* -%attr(644, root, root) %{_mandir}/man1/myisamchk.1* -%attr(644, root, root) %{_mandir}/man1/myisamlog.1* -%attr(644, root, root) %{_mandir}/man1/myisampack.1* -%attr(644, root, root) %{_mandir}/man1/mysql_convert_table_format.1* -%attr(644, root, root) %{_mandir}/man1/mysql_fix_extensions.1* -%attr(644, root, root) %{_mandir}/man8/mysqld.8* -%attr(644, root, root) %{_mandir}/man1/mysqld_multi.1* -%attr(644, root, root) %{_mandir}/man1/mysqld_safe.1* -%attr(644, root, root) %{_mandir}/man1/mysqldumpslow.1* -%attr(644, root, root) %{_mandir}/man1/mysql_install_db.1* -%attr(644, root, root) %{_mandir}/man1/mysql_plugin.1* -%attr(644, root, root) %{_mandir}/man1/mysql_secure_installation.1* -%attr(644, root, root) %{_mandir}/man1/mysql_upgrade.1* -%attr(644, root, root) %{_mandir}/man1/mysqlhotcopy.1* -%attr(644, root, root) %{_mandir}/man1/mysqlman.1* -%attr(644, root, root) %{_mandir}/man1/mysql.server.1* -%attr(644, root, root) %{_mandir}/man1/mysqltest.1* -%attr(644, root, root) %{_mandir}/man1/mysql_tzinfo_to_sql.1* -%attr(644, root, root) %{_mandir}/man1/mysql_zap.1* -%attr(644, root, root) %{_mandir}/man1/mysqlbug.1* -%attr(644, root, root) %{_mandir}/man1/perror.1* -%attr(644, root, root) %{_mandir}/man1/replace.1* -%attr(644, root, root) %{_mandir}/man1/resolve_stack_dump.1* -%attr(644, root, root) %{_mandir}/man1/resolveip.1* - -%config(noreplace) %{_sysconfdir}/my.cnf - -%attr(755, root, root) %{_bindir}/innochecksum -%attr(755, root, root) %{_bindir}/my_print_defaults -%attr(755, root, root) %{_bindir}/myisam_ftdump -%attr(755, root, root) %{_bindir}/myisamchk -%attr(755, root, root) %{_bindir}/myisamlog -%attr(755, root, root) %{_bindir}/myisampack -%attr(755, root, root) %{_bindir}/mysql_convert_table_format -%attr(755, root, root) %{_bindir}/mysql_fix_extensions -%attr(755, root, root) %{_bindir}/mysql_install_db -%attr(755, root, root) %{_bindir}/mysql_plugin -%attr(755, root, root) %{_bindir}/mysql_secure_installation -%attr(755, root, root) %{_bindir}/mysql_tzinfo_to_sql -%attr(755, root, root) %{_bindir}/mysql_upgrade -%attr(755, root, root) %{_bindir}/mysql_zap -%attr(755, root, root) %{_bindir}/mysqlbug -%attr(755, root, root) %{_bindir}/mysqld_multi -%attr(755, root, root) %{_bindir}/mysqld_safe -%attr(755, root, root) %{_bindir}/mysqldumpslow -%attr(755, root, root) %{_bindir}/mysqlhotcopy -%attr(755, root, root) %{_bindir}/mysqltest -%attr(755, root, root) %{_bindir}/perror -%attr(755, root, root) %{_bindir}/replace -%attr(755, root, root) %{_bindir}/resolve_stack_dump -%attr(755, root, root) %{_bindir}/resolveip -%if 0%{?systemd} -%attr(755, root, root) %{_bindir}/mysql-systemd-start -%endif -%attr(755, root, root) %{_sbindir}/mysqld -%attr(755, root, root) %{_sbindir}/mysqld-debug - -%dir %{_libdir}/mysql/plugin -%attr(755, root, root) %{_libdir}/mysql/plugin/adt_null.so -%attr(755, root, root) %{_libdir}/mysql/plugin/auth_socket.so -%attr(755, root, root) %{_libdir}/mysql/plugin/mypluglib.so -%attr(755, root, root) %{_libdir}/mysql/plugin/semisync_master.so -%attr(755, root, root) %{_libdir}/mysql/plugin/semisync_slave.so -%dir %{_libdir}/mysql/plugin/debug -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/adt_null.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/auth_socket.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/mypluglib.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/semisync_master.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/semisync_slave.so - -%attr(755, root, root) %{_libdir}/mysql/plugin/auth.so -%attr(755, root, root) %{_libdir}/mysql/plugin/auth_test_plugin.so -%attr(644, root, root) %{_libdir}/mysql/plugin/daemon_example.ini -%attr(755, root, root) %{_libdir}/mysql/plugin/libdaemon_example.so -%attr(755, root, root) %{_libdir}/mysql/plugin/qa_auth_client.so -%attr(755, root, root) %{_libdir}/mysql/plugin/qa_auth_interface.so -%attr(755, root, root) %{_libdir}/mysql/plugin/qa_auth_server.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/auth.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/auth_test_plugin.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/libdaemon_example.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/qa_auth_client.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/qa_auth_interface.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/qa_auth_server.so - -%if 0%{?commercial} -%attr(755, root, root) %{_libdir}/mysql/plugin/audit_log.so -%attr(755, root, root) %{_libdir}/mysql/plugin/authentication_pam.so -%attr(755, root, root) %{_libdir}/mysql/plugin/thread_pool.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/audit_log.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/authentication_pam.so -%attr(755, root, root) %{_libdir}/mysql/plugin/debug/thread_pool.so -%endif - -%attr(644, root, root) %{_datadir}/mysql/fill_help_tables.sql -%attr(644, root, root) %{_datadir}/mysql/mysql_system_tables.sql -%attr(644, root, root) %{_datadir}/mysql/mysql_system_tables_data.sql -%attr(644, root, root) %{_datadir}/mysql/mysql_test_data_timezone.sql -%attr(644, root, root) %{_datadir}/mysql/mysql-log-rotate -%attr(644, root, root) %{_datadir}/mysql/SELinux/RHEL4/mysql.fc -%attr(644, root, root) %{_datadir}/mysql/SELinux/RHEL4/mysql.te -%attr(644, root, root) %{_datadir}/mysql/magic -%attr(644, root, root) %{_prefix}/lib/tmpfiles.d/mysql.conf -%if 0%{?systemd} -%attr(644, root, root) %{_unitdir}/mysqld.service -%else -%attr(755, root, root) %{_sysconfdir}/init.d/mysqld -%endif -%attr(644, root, root) %config(noreplace,missingok) %{_sysconfdir}/logrotate.d/mysql -%dir %attr(755, mysql, mysql) /var/lib/mysql -%dir %attr(755, mysql, mysql) /var/run/mysqld - -%files common -%defattr(-, root, root, -) -%{_datadir}/mysql/charsets/ -%{_datadir}/mysql/errmsg-utf8.txt -%{_datadir}/mysql/czech/ -%{_datadir}/mysql/danish/ -%{_datadir}/mysql/dutch/ -%{_datadir}/mysql/english/ -%{_datadir}/mysql/estonian/ -%{_datadir}/mysql/french/ -%{_datadir}/mysql/german/ -%{_datadir}/mysql/greek/ -%{_datadir}/mysql/hungarian/ -%{_datadir}/mysql/italian/ -%{_datadir}/mysql/japanese/ -%{_datadir}/mysql/korean/ -%{_datadir}/mysql/norwegian-ny/ -%{_datadir}/mysql/norwegian/ -%{_datadir}/mysql/polish/ -%{_datadir}/mysql/portuguese/ -%{_datadir}/mysql/romanian/ -%{_datadir}/mysql/russian/ -%{_datadir}/mysql/serbian/ -%{_datadir}/mysql/slovak/ -%{_datadir}/mysql/spanish/ -%{_datadir}/mysql/swedish/ -%{_datadir}/mysql/ukrainian/ - -%files client -%defattr(-, root, root, -) -%attr(755, root, root) %{_bindir}/msql2mysql -%attr(755, root, root) %{_bindir}/mysql -%attr(755, root, root) %{_bindir}/mysql_find_rows -%attr(755, root, root) %{_bindir}/mysql_waitpid -%attr(755, root, root) %{_bindir}/mysqlaccess -# XXX: This should be moved to %{_sysconfdir} -%attr(644, root, root) %{_bindir}/mysqlaccess.conf -%attr(755, root, root) %{_bindir}/mysqladmin -%attr(755, root, root) %{_bindir}/mysqlbinlog -%attr(755, root, root) %{_bindir}/mysqlcheck -%attr(755, root, root) %{_bindir}/mysqldump -%attr(755, root, root) %{_bindir}/mysqlimport -%attr(755, root, root) %{_bindir}/mysqlshow -%attr(755, root, root) %{_bindir}/mysqlslap -%attr(755, root, root) %{_bindir}/mysql_config -%attr(755, root, root) %{_bindir}/mysql_config-%{__isa_bits} - -%attr(644, root, root) %{_mandir}/man1/msql2mysql.1* -%attr(644, root, root) %{_mandir}/man1/mysql.1* -%attr(644, root, root) %{_mandir}/man1/mysql_find_rows.1* -%attr(644, root, root) %{_mandir}/man1/mysql_waitpid.1* -%attr(644, root, root) %{_mandir}/man1/mysqlaccess.1* -%attr(644, root, root) %{_mandir}/man1/mysqladmin.1* -%attr(644, root, root) %{_mandir}/man1/mysqlbinlog.1* -%attr(644, root, root) %{_mandir}/man1/mysqlcheck.1* -%attr(644, root, root) %{_mandir}/man1/mysqldump.1* -%attr(644, root, root) %{_mandir}/man1/mysqlimport.1* -%attr(644, root, root) %{_mandir}/man1/mysqlshow.1* -%attr(644, root, root) %{_mandir}/man1/mysqlslap.1* - -%files devel -%defattr(-, root, root, -) -%attr(644, root, root) %{_mandir}/man1/comp_err.1* -%attr(644, root, root) %{_mandir}/man1/mysql_config.1* -%attr(755, root, root) %{_bindir}/mysql_config -%attr(755, root, root) %{_bindir}/mysql_config-%{__isa_bits} -%{_includedir}/mysql -%{_datadir}/aclocal/mysql.m4 -%{_libdir}/mysql/libmysqlclient.a -%{_libdir}/mysql/libmysqlclient_r.a -%{_libdir}/mysql/libmysqlservices.a -%{_libdir}/mysql/libmysqlclient_r.so -%{_libdir}/mysql/libmysqlclient.so - -%files libs -%defattr(-, root, root, -) -%dir %attr(755, root, root) %{_libdir}/mysql -%attr(644, root, root) %{_sysconfdir}/ld.so.conf.d/mysql-%{_arch}.conf -%{_libdir}/mysql/libmysqlclient.so.18* -%{_libdir}/mysql/libmysqlclient_r.so.18* - -%if 0%{?compatlib} -%files libs-compat -%defattr(-, root, root, -) -%dir %attr(755, root, root) %{_libdir}/mysql -%attr(644, root, root) %{_sysconfdir}/ld.so.conf.d/mysql-%{_arch}.conf -%{_libdir}/mysql/libmysqlclient.so.%{compatlib} -%{_libdir}/mysql/libmysqlclient.so.%{compatlib}.0.0 -%{_libdir}/mysql/libmysqlclient_r.so.%{compatlib} -%{_libdir}/mysql/libmysqlclient_r.so.%{compatlib}.0.0 -%endif - -%files test -%defattr(-, root, root, -) -%attr(-, root, root) %{_datadir}/mysql-test -%attr(755, root, root) %{_bindir}/mysql_client_test -%attr(755, root, root) %{_bindir}/mysql_client_test_embedded -%attr(755, root, root) %{_bindir}/mysqltest_embedded -%attr(644, root, root) %{_mandir}/man1/mysql_client_test.1* -%attr(644, root, root) %{_mandir}/man1/mysql-stress-test.pl.1* -%attr(644, root, root) %{_mandir}/man1/mysql-test-run.pl.1* -%attr(644, root, root) %{_mandir}/man1/mysql_client_test_embedded.1* -%attr(644, root, root) %{_mandir}/man1/mysqltest_embedded.1* - -%files embedded -%defattr(-, root, root, -) -%dir %attr(755, root, root) %{_libdir}/mysql -%attr(644, root, root) %{_sysconfdir}/ld.so.conf.d/mysql-%{_arch}.conf -%attr(755, root, root) %{_libdir}/mysql/libmysqld.so.* - -%files embedded-devel -%defattr(-, root, root, -) -%attr(644, root, root) %{_libdir}/mysql/libmysqld.a -%attr(644, root, root) %{_libdir}/mysql/libmysqld-debug.a -%attr(755, root, root) %{_libdir}/mysql/libmysqld.so - -%if 0%{?rhel} == 5 -%files -n mysql -%defattr(-, root, root, -) -%doc %{?license_files_server} -%endif - -%changelog -* Sun May 11 2014 Balasubramanian Kandasamy - 5.5.38-2 -- Increment release version to resolve upgrade conflict issue - -* Thu Apr 24 2014 Balasubramanian Kandasamy - 5.5.38-1 -- Updated for 5.5.38 - -* Mon Apr 07 2014 Balasubramanian Kandasamy - 5.5.37-6 -- Fix Cflags for el7 - -* Mon Mar 31 2014 Balasubramanian Kandasamy - 5.5.37-5 -- Support for enterprise packages -- Upgrade from MySQL-* packages - -* Fri Mar 14 2014 Balasubramanian Kandasamy - 5.5.37-4 -- Resolve mysql conflict with mysql-community-client - -* Wed Mar 12 2014 Balasubramanian Kandasamy - 5.5.37-3 -- Resolve conflict with mysql-libs-compat - -* Thu Mar 06 2014 Balasubramanian Kandasamy - 5.5.37-2 -- Resolve conflict issues during upgrade - -* Mon Feb 10 2014 Balasubramanian Kandasamy - 5.5.37-1 -- Add support for el7 (with systemd enabled) -- Enable shared libmysqld by cmake option - -* Fri Oct 25 2013 Balasubramanian Kandasamy - 5.5.35-1 -- Backport changes from 5.6 - -* Mon Nov 05 2012 Joerg Bruehe - -- Allow to override the default to use the bundled yaSSL by an option like - --define="with_ssl /path/to/ssl" - -* Wed Oct 10 2012 Bjorn Munch - -- Replace old my-*.cnf config file examples with template my-default.cnf - -* Fri Oct 05 2012 Joerg Bruehe - -- Let the installation use the new option "--random-passwords" of "mysql_install_db". - (Bug# 12794345 Ensure root password) -- Fix an inconsistency: "new install" vs "upgrade" are told from the (non)existence - of "$mysql_datadir/mysql" (holding table "mysql.user" and other system stuff). - -* Tue Jul 24 2012 Joerg Bruehe - -- Add a macro "runselftest": - if set to 1 (default), the test suite will be run during the RPM build; - this can be oveeridden via the command line by adding - --define "runselftest 0" - Failures of the test suite will NOT make the RPM build fail! - -* Mon Jul 16 2012 Joerg Bruehe - -- Add the man page for the "mysql_config_editor". - -* Mon Jun 11 2012 Joerg Bruehe - -- Make sure newly added "SPECIFIC-ULN/" directory does not disturb packaging. - -* Wed Feb 29 2012 Brajmohan Saxena - -- Removal all traces of the readline library from mysql (BUG 13738013) - -* Wed Sep 28 2011 Joerg Bruehe - -- Fix duplicate mentioning of "mysql_plugin" and its manual page, - it is better to keep alphabetic order in the files list (merging!). - -* Wed Sep 14 2011 Joerg Bruehe - -- Let the RPM capabilities ("obsoletes" etc) ensure that an upgrade may replace - the RPMs of any configuration (of the current or the preceding release series) - by the new ones. This is done by not using the implicitly generated capabilities - (which include the configuration name) and relying on more generic ones which - just list the function ("server", "client", ...). - The implicit generation cannot be prevented, so all these capabilities must be - explicitly listed in "Obsoletes:" - -* Tue Sep 13 2011 Jonathan Perkin - -- Add support for Oracle Linux 6 and Red Hat Enterprise Linux 6. Due to - changes in RPM behaviour ($RPM_BUILD_ROOT is removed prior to install) - this necessitated a move of the libmygcc.a installation to the install - phase, which is probably where it belonged in the first place. - -* Tue Sep 13 2011 Joerg Bruehe - -- "make_win_bin_dist" and its manual are dropped, cmake does it different. - -* Thu Sep 08 2011 Daniel Fischer - -- Add mysql_plugin man page. - -* Tue Aug 30 2011 Tor Didriksen - -- Set CXX=g++ by default to add a dependency on libgcc/libstdc++. - Also, remove the use of the -fno-exceptions and -fno-rtti flags. - TODO: update distro_buildreq/distro_requires - -* Tue Aug 30 2011 Joerg Bruehe - -- Add the manual page for "mysql_plugin" to the server package. - -* Fri Aug 19 2011 Joerg Bruehe - -- Null-upmerge the fix of bug#37165: This spec file is not affected. -- Replace "/var/lib/mysql" by the spec file variable "%%{mysqldatadir}". - -* Fri Aug 12 2011 Daniel Fischer - -- Source plugin library files list from cmake-generated file. - -* Mon Jul 25 2011 Chuck Bell - -- Added the mysql_plugin client - enables or disables plugins. - -* Thu Jul 21 2011 Sunanda Menon - -- Fix bug#12561297: Added the MySQL embedded binary - -* Thu Jul 07 2011 Joerg Bruehe - -- Fix bug#45415: "rpm upgrade recreates test database" - Let the creation of the "test" database happen only during a new installation, - not in an RPM upgrade. - This affects both the "mkdir" and the call of "mysql_install_db". - -* Wed Feb 09 2011 Joerg Bruehe - -- Fix bug#56581: If an installation deviates from the default file locations - ("datadir" and "pid-file"), the mechanism to detect a running server (on upgrade) - should still work, and use these locations. - The problem was that the fix for bug#27072 did not check for local settings. - -* Mon Jan 31 2011 Joerg Bruehe - -- Install the new "manifest" files: "INFO_SRC" and "INFO_BIN". - -* Tue Nov 23 2010 Jonathan Perkin - -- EXCEPTIONS-CLIENT has been deleted, remove it from here too -- Support MYSQL_BUILD_MAKE_JFLAG environment variable for passing - a '-j' argument to make. - -* Mon Nov 1 2010 Georgi Kodinov - -- Added test authentication (WL#1054) plugin binaries - -* Wed Oct 6 2010 Georgi Kodinov - -- Added example external authentication (WL#1054) plugin binaries - -* Wed Aug 11 2010 Joerg Bruehe - -- With a recent spec file cleanup, names have changed: A "-community" part was dropped. - Reflect that in the "Obsoletes" specifications. -- Add a "triggerpostun" to handle the uninstall of the "-community" server RPM. -- This fixes bug#55015 "MySQL server is not restarted properly after RPM upgrade". - -* Tue Jun 15 2010 Joerg Bruehe - -- Change the behaviour on installation and upgrade: - On installation, do not autostart the server. - *Iff* the server was stopped before the upgrade is started, this is taken as a - sign the administrator is handling that manually, and so the new server will - not be started automatically at the end of the upgrade. - The start/stop scripts will still be installed, so the server will be started - on the next machine boot. - This is the 5.5 version of fixing bug#27072 (RPM autostarting the server). - -* Tue Jun 1 2010 Jonathan Perkin - -- Implement SELinux checks from distribution-specific spec file. - -* Wed May 12 2010 Jonathan Perkin - -- Large number of changes to build using CMake -- Introduce distribution-specific RPMs -- Drop debuginfo, build all binaries with debug/symbols -- Remove __os_install_post, use native macro -- Remove _unpackaged_files_terminate_build, make it an error to have - unpackaged files -- Remove cluster RPMs - -* Wed Mar 24 2010 Joerg Bruehe - -- Add "--with-perfschema" to the configure options. - -* Mon Mar 22 2010 Joerg Bruehe - -- User "usr/lib*" to allow for both "usr/lib" and "usr/lib64", - mask "rmdir" return code 1. -- Remove "ha_example.*" files from the list, they aren't built. - -* Wed Mar 17 2010 Joerg Bruehe - -- Fix a wrong path name in handling the debug plugins. - -* Wed Mar 10 2010 Joerg Bruehe - -- Take the result of the debug plugin build and put it into the optimized tree, - so that it becomes part of the final installation; - include the files in the packlist. Part of the fixes for bug#49022. - -* Mon Mar 01 2010 Joerg Bruehe - -- Set "Oracle and/or its affiliates" as the vendor and copyright owner, - accept upgrading from packages showing MySQL or Sun as vendor. - -* Fri Feb 12 2010 Joerg Bruehe - -- Formatting changes: - Have a consistent structure of separator lines and of indentation - (8 leading blanks => tab). -- Introduce the variable "src_dir". -- Give the environment variables "MYSQL_BUILD_CC(CXX)" precedence - over "CC" ("CXX"). -- Drop the old "with_static" argument analysis, this is not supported - in 5.1 since ages. -- Introduce variables to control the handlers individually, as well - as other options. -- Use the new "--with-plugin" notation for the table handlers. -- Drop handling "/etc/rc.d/init.d/mysql", the switch to "/etc/init.d/mysql" - was done back in 2002 already. -- Make "--with-zlib-dir=bundled" the default, add an option to disable it. -- Add missing manual pages to the file list. -- Improve the runtime check for "libgcc.a", protect it against being tried - with the Intel compiler "icc". - -* Mon Jan 11 2010 Joerg Bruehe - -- Change RPM file naming: - - Suffix like "-m2", "-rc" becomes part of version as "_m2", "_rc". - - Release counts from 1, not 0. - -* Wed Dec 23 2009 Joerg Bruehe - -- The "semisync" plugin file name has lost its introductory "lib", - adapt the file lists for the subpackages. - This is a part missing from the fix for bug#48351. -- Remove the "fix_privilege_tables" manual, it does not exist in 5.5 - (and likely, the whole script will go, too). - -* Mon Nov 16 2009 Joerg Bruehe - -- Fix some problems with the directives around "tcmalloc" (experimental), - remove erroneous traces of the InnoDB plugin (that is 5.1 only). - -* Tue Oct 06 2009 Magnus Blaudd - -- Removed mysql_fix_privilege_tables - -* Fri Oct 02 2009 Alexander Nozdrin - -- "mysqlmanager" got removed from version 5.4, all references deleted. - -* Fri Aug 28 2009 Joerg Bruehe - -- Merge up from 5.1 to 5.4: Remove handling for the InnoDB plugin. - -* Thu Aug 27 2009 Joerg Bruehe - -- This version does not contain the "Instance manager", "mysqlmanager": - Remove it from the spec file so that packaging succeeds. - -* Mon Aug 24 2009 Jonathan Perkin - -- Add conditionals for bundled zlib and innodb plugin - -* Fri Aug 21 2009 Jonathan Perkin - -- Install plugin libraries in appropriate packages. -- Disable libdaemon_example and ftexample plugins. - -* Thu Aug 20 2009 Jonathan Perkin - -- Update variable used for mysql-test suite location to match source. - -* Fri Nov 07 2008 Joerg Bruehe - -- Correct yesterday's fix, so that it also works for the last flag, - and fix a wrong quoting: un-quoted quote marks must not be escaped. - -* Thu Nov 06 2008 Kent Boortz - -- Removed "mysql_upgrade_shell" -- Removed some copy/paste between debug and normal build - -* Thu Nov 06 2008 Joerg Bruehe - -- Modify CFLAGS and CXXFLAGS such that a debug build is not optimized. - This should cover both gcc and icc flags. Fixes bug#40546. - -* Fri Aug 29 2008 Kent Boortz - -- Removed the "Federated" storage engine option, and enabled in all - -* Tue Aug 26 2008 Joerg Bruehe - -- Get rid of the "warning: Installed (but unpackaged) file(s) found:" - Some generated files aren't needed in RPMs: - - the "sql-bench/" subdirectory - Some files were missing: - - /usr/share/aclocal/mysql.m4 ("devel" subpackage) - - Manual "mysqlbug" ("server" subpackage) - - Program "innochecksum" and its manual ("server" subpackage) - - Manual "mysql_find_rows" ("client" subpackage) - - Script "mysql_upgrade_shell" ("client" subpackage) - - Program "ndb_cpcd" and its manual ("ndb-extra" subpackage) - - Manuals "ndb_mgm" + "ndb_restore" ("ndb-tools" subpackage) - -* Mon Mar 31 2008 Kent Boortz - -- Made the "Federated" storage engine an option -- Made the "Cluster" storage engine and sub packages an option - -* Wed Mar 19 2008 Joerg Bruehe - -- Add the man pages for "ndbd" and "ndb_mgmd". - -* Mon Feb 18 2008 Timothy Smith - -- Require a manual upgrade if the alread-installed mysql-server is - from another vendor, or is of a different major version. - -* Wed May 02 2007 Joerg Bruehe - -- "ndb_size.tmpl" is not needed any more, - "man1/mysql_install_db.1" lacked the trailing '*'. - -* Sat Apr 07 2007 Kent Boortz - -- Removed man page for "mysql_create_system_tables" - -* Wed Mar 21 2007 Daniel Fischer - -- Add debug server. - -* Mon Mar 19 2007 Daniel Fischer - -- Remove Max RPMs; the server RPMs contain a mysqld compiled with all - features that previously only were built into Max. - -* Fri Mar 02 2007 Joerg Bruehe - -- Add several man pages for NDB which are now created. - -* Fri Jan 05 2007 Kent Boortz - -- Put back "libmygcc.a", found no real reason it was removed. - -- Add CFLAGS to gcc call with --print-libgcc-file, to make sure the - correct "libgcc.a" path is returned for the 32/64 bit architecture. - -* Mon Dec 18 2006 Joerg Bruehe - -- Fix the move of "mysqlmanager" to section 8: Directory name was wrong. - -* Thu Dec 14 2006 Joerg Bruehe - -- Include the new man pages for "my_print_defaults" and "mysql_tzinfo_to_sql" - in the server RPM. -- The "mysqlmanager" man page got moved from section 1 to 8. - -* Thu Nov 30 2006 Joerg Bruehe - -- Call "make install" using "benchdir_root=%%{_datadir}", - because that is affecting the regression test suite as well. - -* Thu Nov 16 2006 Joerg Bruehe - -- Explicitly note that the "MySQL-shared" RPMs (as built by MySQL AB) - replace "mysql-shared" (as distributed by SuSE) to allow easy upgrading - (bug#22081). - -* Mon Nov 13 2006 Joerg Bruehe - -- Add "--with-partition" t 2006 Joerg Bruehe - -- Use the Perl script to run the tests, because it will automatically check - whether the server is configured with SSL. - -* Tue Jun 27 2006 Joerg Bruehe - -- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216) - -- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade, - there are some more aspects which need to be solved before this is possible. - For now, just ensure the binary "mysql_upgrade" is delivered and installysql.com> - -- To run "mysql_upgrade", we need a running server; - start it in isolation and skip password checks. - -* Sat May 20 2006 Kent Boortz - -- Always compile for PIC, position independent code. - -* Wed May 10 2006 Kent Boortz - -- Use character set "all" when compiling with Cluster, to make Cluster - nodes independent on the character set directory, and the problem - that two RPM sub packages both wants to install this directory. - -* Mon May 01 2006 Kent Boortz - -- Use "./libtool --mode=execute" instead of searching for the - executable in current directory and ".libs". - -* Fri Apr 28 2006 Kent Boortz - -- Install and run "mysql_upgrade" - -* Wed Apr 12 2006 Jim Winstead - -- Remove sql-bench, and MySQL-bench RPM (will be built as an independent - project from the mysql-bench repository) - -* Tue Apr 11 2006 Jim Winstead - -- Remove old mysqltestmanager and related programs -* Sat Apr 01 2006 Kent Boortz - -- Set $LDFLAGS from $MYSQL_BUILD_LDFLAGS - -* Tue Mar 07 2006 Kent Boortz - -- Changed product name from "Community Edition" to "Community Server" - -* Mon Mar 06 2006 Kent Boortz - -- Fast mutexes is now disabled by default, but should be - used in Linux builds. - -* Mon Feb 20 2006 Kent Boortz - -- Reintroduced a max build -- Limited testing of 'debug' and 'max' servers -- Berkeley DB only in 'max' - -* Mon Feb 13 2006 Joerg Bruehe - -- Use "-i" on "make test-force"; - this is essential for later evaluation of this log file. - -* Thu Feb 09 2006 Kent Boortz - -- Pass '-static' to libtool, link static with our own libraries, dynamic - with system libraries. Link with the bundled zlib. - -* Wed Feb 08 2006 Kristian Nielsen - -- Modified RPM spec to match new 5.1 debug+max combined community packaging. - -* Sun Dec 18 2005 Kent Boortz - -- Added "client/mysqlslap" - -* Mon Dec 12 2005 Rodrigo Novo - -- Added zlib to the list of (static) libraries installed -- Added check against libtool wierdness (WRT: sql/mysqld || sql/.libs/mysqld) -- Compile MySQL with bundled zlib -- Fixed %%packager name to "MySQL Production Engineering Team" - -* Mon Dec 05 2005 Joerg Bruehe - -- Avoid using the "bundled" zlib on "shared" builds: - As it is not installed (on the build system), this gives dependency - problems with "libtool" causing the build to fail. - (Change was done on Nov 11, but left uncommented.) - -* Tue Nov 22 2005 Joerg Bruehe - -- Extend the file existence check for "init.d/mysql" on un-install - to also guard the call to "insserv"/"chkconfig". - -* Thu Oct 27 2005 Lenz Grimmer - -- added more man pages - -* Wed Oct 19 2005 Kent Boortz - -- Made yaSSL support an option (off by default) - -* Wed Oct 19 2005 Kent Boortz - -- Enabled yaSSL support - -* Sat Oct 15 2005 Kent Boortz - -- Give mode arguments the same way in all places -lenz@mysql.com> - -- fixed the removing of the RPM_BUILD_ROOT in the %%clean section (the - $RBR variable did not get expanded, thus leaving old build roots behind) - -* Thu Aug 04 2005 Lenz Grimmer - -- Fixed the creation of the mysql user group account in the postinstall - section (BUG 12348) -- Fixed enabling the Archive storage engine in the Max binary - -* Tue Aug 02 2005 Lenz Grimmer - -- Fixed the Requires: tag for the server RPM (BUG 12233) - -* Fri Jul 15 2005 Lenz Grimmer - -- create a "mysql" user group and assign the mysql user account to that group - in the server postinstall section. (BUG 10984) - -* Tue Jun 14 2005 Lenz Grimmer - -- Do not build statically on i386 by default, only when adding either "--with - static" or "--define '_with_static 1'" to the RPM build options. Static - linking really only makes sense when linking against the specially patched - glibc 2.2.5. - -* Mon Jun 06 2005 Lenz Grimmer - -- added mysql_client_test to the "bench" subpackage (BUG 10676) -- added the libndbclient static and shared libraries (BUG 10676) - -* Wed Jun 01 2005 Lenz Grimmer - -- use "mysqldatadir" variable instead of hard-coding the path multiple times -- use the "mysqld_user" variable on all occasions a user name is referenced -- removed (incomplete) Brazilian translations -- removed redundant release tags from the subpackage descriptions - -* Wed May 25 2005 Joerg Bruehe - -- Added a "make clean" between separate calls to "BuildMySQL". - -* Thu May 12 2005 Guilhem Bichot - -- Removed the mysql_tableinfo script made obsolete by the information schema - -* Wed Apr 20 2005 Lenz Grimmer - -- Enabled the "blackhole" storage engine for the Max RPM - -* Wed Apr 13 2005 Lenz Grimmer - -- removed the MySQL manual files (html/ps/texi) - they have been removed - from the MySQL sources and are now available seperately. - -* Mon Apr 4 2005 Petr Chardin - -- old mysqlmanager, mysq* Mon Feb 7 2005 Tomas Ulin - -- enabled the "Ndbcluster" storage engine for the max binary -- added extra make install in ndb subdir after Max build to get ndb binaries -- added packages for ndbcluster storage engine - -* Fri Jan 14 2005 Lenz Grimmer - -- replaced obsoleted "BuildPrereq" with "BuildRequires" instead - -* Thu Jan 13 2005 Lenz Grimmer - -- enabled the "Federated" storage engine for the max binary - -* Tue Jan 04 2005 Petr Chardin - -- ISAM and merge storage engines were purged. As well as appropriate - tools and manpages (isamchk and isamlog) - -* Fri Dec 31 2004 Lenz Grimmer - -- enabled the "Archive" storage engine for the max binary -- enabled the "CSV" storage engine for the max binary -- enabled the "Example" storage engine for the max binary - -* Thu Aug 26 2004 Lenz Grimmer - -- MySQL-Max now requires MySQL-server instead of MySQL (BUG 3860) - -* Fri Aug 20 2004 Lenz Grimmer - -- do not link statically on IA64/AMD64 as these systems do not have - a patched glibc installed - -* Tue Aug 10 2004 Lenz Grimmer - -- Added libmygcc.a to the devel subpackage (required to link applications - against the the embedded server libmysqld.a) (BUG 4921) - -* Mon Aug 09 2004 Lenz Grimmer - -- Added EXCEPTIONS-CLIENT to the "devel" package - -* Thu Jul 29 2004 Lenz Grimmer - -- disabled OpenSSL in the Max binaries again (the RPM packages were the - only exception to this anyway) (BUG 1043) - -* Wed Jun 30 2004 Lenz Grimmer - -- fixed server postinstall (mysql_install_db was called with the wrong - parameter) - -* Thu Jun 24 2004 Lenz Grimmer - -- added mysql_tzinfo_to_sql to the server subpackage -- run "make clean" instead of "make distclean" - -* Mon Apr 05 2004 Lenz Grimmer - -- added ncurses-devel to the build prerequisites (BUG 3377) - -* Thu Feb 12 2004 Lenz Grimmer - -- when using gcc, _always_ use CXX=gcc -- replaced Copyright with License field (Copyright is obsolete) - -* Tue Feb 03 2004 Lenz Grimmer - -- added myisam_ftdump to the Server package - -* Tue Jan 13 2004 Lenz Grimmer - -- link the mysql client against libreadline instead of libedit (BUG 2289) - -* Mon Dec 22 2003 Lenz Grimmer - -- marked /etc/logrotate.d/mysql as a config file (BUG 2156) - -* Sat Dec 13 2003 Lenz Grimmer - -- fixed file permissions (BUG 1672) - -* Thu Dec 11 2003 Lenz Grimmer - -- made testing for gcc3 a bit more robust - -* Fri Dec 05 2003 Lenz Grimmer - -- added missing file mysql_create_system_tables to the server subpackage - -* Fri Nov 21 2003 Lenz Grimmer - -- removed dependency on MySQL-client from the MySQL-devel subpackage - as it is not really required. (BUG 1610) - -* Fri Aug 29 2003 Lenz Grimmer - -- Fixed BUG 1162 (removed macro names from the changelog) -- Really fixed BUG 998 (disable the checking for installed but - unpackaged files) - -* Tue Aug 05 2003 Lenz Grimmer - -- Fixed BUG 959 (libmysqld not being compiled properly) -- Fixed BUG 998 (RPM build errors): added missing files to the - distribution (mysql_fix_extensions, mysql_tableinfo, mysqldumpslow, - mysql_fix_privilege_tables.1), removed "-n" from install section. - -* Wed Jul 09 2003 Lenz Grimmer - -- removed the GIF Icon (file was not included in the sources anyway) -- removed unused variable shared_lib_version -- do not run automake before building the standard binary - (should not be necessary) -- add server suffix '-standard' to standard binary (to be in line - with the binary tarball distributions) -- Use more RPM macros (_exec_prefix, _sbindir, _libdir, _sysconfdir, - _datadir, _includedir) throughout the spec file. -- allow overriding CC and CXX (required when building with other compilers) - -* Fri May 16 2003 Lenz Grimmer - -- re-enabled RAID again - -* Wed Apr 30 2003 Lenz Grimmer - -- disabled MyISAM RAID (--with-raid)- it throws an assertion which - needs to be investigated first. - -* Mon Mar 10 2003 Lenz Grimmer - -- added missing file mysql_secure_installation to server subpackage - (BUG 141) - -* Tue Feb 11 2003 Lenz Grimmer - -- re-added missing pre- and post(un)install scripts to server subpackage -- added config file /etc/my.cnf to the file list (just for completeness) -- make sure to create the datadir with 755 permissions - -* Mon Jan 27 2003 Lenz Grimmer - -- removed unusedql.com> - -- Reworked the build steps a little bit: the Max binary is supposed - to include OpenSSL, which cannot be linked statically, thus trying - to statically link against a special glibc is futile anyway -- because of this, it is not required to make yet another build run - just to compile the shared libs (saves a lot of time) -- updated package description of the Max subpackage -- clean up the BuildRoot directory afterwards - -* Mon Jul 15 2002 Lenz Grimmer - -- Updated Packager information -- Fixed the build options: the regular package is supposed to - include InnoDB and linked statically, while the Max package - should include BDB and SSL support - -* Fri May 03 2002 Lenz Grimmer - -- Use more RPM macros (e.g. infodir, mandir) to make the spec - file more portable -- reorganized the installation of documentation files: let RPM - take care of this -- reorganized the file list: actually install man pages along - with the binaries of the respective subpackage -- do not include libmysqld.a in the devel subpackage as well, if we - have a special "embedded" subpackage -- reworked the package descriptions - -* Mon Oct 8 2001 Monty - -- Added embedded server as a separate RPM - -* Fri Apr 13 2001 Monty - -- Added mysqld-max to the distribution - -* Tue Jan 2 2001 Monty - -- Added mysql-test to the bench package - -* Fri Aug 18 2000 Tim Smith - -- Added separate libmysql_r directory; now both a threaded - and non-threaded library is shipped. - -* Tue Sep 28 1999 David Axmark - -- Added the support-files/my-example.cnf to the docs directory. - -- Removed devel dependency on base since it is about client - development. - -* Wed Sep 8 1999 David Axmark - -- Cleaned up some for 3.23. - -* Thu Jul 1 1999 David Axmark - -- Added support for shared libraries in a separate sub - package. Original fix by David Fox (dsfox@cogsci.ucsd.edu) - -- The --enable-assembler switch is now automatically disables on - platforms there assembler code is unavailable. This should allow - building this RPM on non i386 systems. - -* Mon Feb 22 1999 David Axmark - -- Removed unportable cc switches from the spec file. The defaults can - now be overridden with environment variables. This feature is used - to compile the official RPM with optimal (but compiler version - specific) switches. - -- Removed the repetitive description parts for the sub rpms. Maybe add - again if RPM gets a multiline macro capability. - -- Added support for a pt_BR translation. Translation contributed by - Jorge Godoy . - -* Wed Nov 4 1998 David Axmark - -- A lot of changes in all the rpm and install scripts. This may even - be a working RPM :-) - -* Sun Aug 16 1998 David Axmark - -- A developers changelog for MySQL is available in the source RPM. And - there is a history of major user visible changed in the Reference - Manual. Only RPM specific changes will be documented here. diff --git a/packaging/rpm-oel/mysql_config.sh b/packaging/rpm-oel/mysql_config.sh deleted file mode 100644 index abe46e0ed74..00000000000 --- a/packaging/rpm-oel/mysql_config.sh +++ /dev/null @@ -1,28 +0,0 @@ -#! /bin/bash -# -# Wrapper script for mysql_config to support multilib -# -# Only works on OEL6/RHEL6 and similar -# -# This command respects setarch - -bits=$(rpm --eval %__isa_bits) - -case $bits in - 32|64) status=known ;; - *) status=unknown ;; -esac - -if [ "$status" = "unknown" ] ; then - echo "$0: error: command 'rpm --eval %__isa_bits' returned unknown value: $bits" - exit 1 -fi - - -if [ -x /usr/bin/mysql_config-$bits ] ; then - /usr/bin/mysql_config-$bits "$@" -else - echo "$0: error: needed binary: /usr/bin/mysql_config-$bits is missing. Please check your MySQL installation." - exit 1 -fi - diff --git a/packaging/rpm-oel/mysqld.service b/packaging/rpm-oel/mysqld.service deleted file mode 100644 index 78ef3bffe60..00000000000 --- a/packaging/rpm-oel/mysqld.service +++ /dev/null @@ -1,48 +0,0 @@ -# -# Simple MySQL systemd service file -# -# systemd supports lots of fancy features, look here (and linked docs) for a full list: -# http://www.freedesktop.org/software/systemd/man/systemd.exec.html -# -# Note: this file ( /usr/lib/systemd/system/mysql.service ) -# will be overwritten on package upgrade, please copy the file to -# -# /etc/systemd/system/mysql.service -# -# to make needed changes. -# -# systemd-delta can be used to check differences between the two mysql.service files. -# - -[Unit] -Description=MySQL Community Server -After=network.target -After=syslog.target - -[Install] -WantedBy=multi-user.target -Alias=mysql.service - -[Service] -User=mysql -Group=mysql - -# Execute pre and post scripts as root -PermissionsStartOnly=true - -# Needed to create system tables etc. -ExecStartPre=/usr/bin/mysql-systemd-start pre - -# Start main service -ExecStart=/usr/bin/mysqld_safe - -# Don't signal startup success before a ping works -ExecStartPost=/usr/bin/mysql-systemd-start post - -# Give up if ping don't get an answer -TimeoutSec=600 - -Restart=always -PrivateTmp=false - - diff --git a/packaging/rpm-uln/CMakeLists.txt b/packaging/rpm-uln/CMakeLists.txt deleted file mode 100644 index c8f13379697..00000000000 --- a/packaging/rpm-uln/CMakeLists.txt +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - -IF(UNIX) - SET(prefix ${CMAKE_INSTALL_PREFIX}) - - SET(SPECFILENAME "mysql.${VERSION}.spec") - IF("${VERSION}" MATCHES "-ndb-") - STRING(REGEX REPLACE "^.*-ndb-" "" NDBVERSION "${VERSION}") - SET(SPECFILENAME "mysql-cluster-${NDBVERSION}.spec") - ENDIF() - - # Left in current directory, to be taken during build - CONFIGURE_FILE(mysql.spec.sh ${CMAKE_CURRENT_BINARY_DIR}/${SPECFILENAME} @ONLY) - - FOREACH(ulnfile filter-requires-mysql.sh generate-tarball.sh my.cnf my_config.h - mysql-5.5-errno.patch mysql-5.5-fix-tests.patch mysql-5.5-libdir.patch - mysql-5.5-mtr1.patch mysql-5.5-stack-guard.patch mysql-5.5-testing.patch - mysql-chain-certs.patch mysql-embedded-check.c mysql-expired-certs.patch - mysql.init mysql-install-test.patch mysql-strmov.patch scriptstub.c - README.mysql-docs) - CONFIGURE_FILE(${ulnfile} ${CMAKE_CURRENT_BINARY_DIR}/${ulnfile} COPYONLY) - ENDFOREACH() -ENDIF() - diff --git a/packaging/rpm-uln/README-ULN b/packaging/rpm-uln/README-ULN deleted file mode 100644 index 8ae44a18605..00000000000 --- a/packaging/rpm-uln/README-ULN +++ /dev/null @@ -1,15 +0,0 @@ -In order to have RPMs of MySQL which are distributed via ULN for Oracle Linux -to be as closely compatible to such RPMs built and distributed by RedHat, -this directory contains additional files which originated at RedHat -and are used only for such RPMs intended for distribution via ULN. - -Especially, this directory contains the spec file used to build these RPMs, -named "mysql.spec". Please regard the following note: - - You are receiving a copy of the Red Hat spec file. - The terms of the Oracle license do NOT apply to the Red Hat spec file; - it is licensed under the - GNU GENERAL PUBLIC LICENSE Version 2, June 1991 - separately from the Oracle programs you receive. - - diff --git a/packaging/rpm-uln/README.mysql-docs b/packaging/rpm-uln/README.mysql-docs deleted file mode 100644 index dd894a7b9c0..00000000000 --- a/packaging/rpm-uln/README.mysql-docs +++ /dev/null @@ -1,4 +0,0 @@ -The official MySQL documentation is not freely redistributable, so we cannot -include it in RHEL or Fedora. You can find it on-line at - -http://dev.mysql.com/doc/ diff --git a/packaging/rpm-uln/filter-requires-mysql.sh b/packaging/rpm-uln/filter-requires-mysql.sh deleted file mode 100755 index d435062b8dc..00000000000 --- a/packaging/rpm-uln/filter-requires-mysql.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -/usr/lib/rpm/perl.req $* | grep -v -e "perl(th" -e "perl(lib::mtr" -e "perl(mtr" diff --git a/packaging/rpm-uln/generate-tarball.sh b/packaging/rpm-uln/generate-tarball.sh deleted file mode 100755 index 2ff4bff2349..00000000000 --- a/packaging/rpm-uln/generate-tarball.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -VERSION=$1 - -rm -rf mysql-$VERSION - -tar xfz mysql-$VERSION.tar.gz || exit 1 - -rm mysql-$VERSION/Docs/mysql.info - -tar cfz mysql-$VERSION-nodocs.tar.gz mysql-$VERSION || exit 1 - -rm -rf mysql-$VERSION - -exit 0 diff --git a/packaging/rpm-uln/my.cnf b/packaging/rpm-uln/my.cnf deleted file mode 100644 index fae0fa276e1..00000000000 --- a/packaging/rpm-uln/my.cnf +++ /dev/null @@ -1,10 +0,0 @@ -[mysqld] -datadir=/var/lib/mysql -socket=/var/lib/mysql/mysql.sock -user=mysql -# Disabling symbolic-links is recommended to prevent assorted security risks -symbolic-links=0 - -[mysqld_safe] -log-error=/var/log/mysqld.log -pid-file=/var/run/mysqld/mysqld.pid diff --git a/packaging/rpm-uln/my_config.h b/packaging/rpm-uln/my_config.h deleted file mode 100644 index 435a126ac97..00000000000 --- a/packaging/rpm-uln/my_config.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Kluge to support multilib installation of both 32- and 64-bit RPMS: - * we need to arrange that header files that appear in both RPMs are - * identical. Hence, this file is architecture-independent and calls - * in an arch-dependent file that will appear in just one RPM. - * - * To avoid breaking arches not explicitly supported by Red Hat, we - * use this indirection file *only* on known multilib arches. - * - * Note: this may well fail if user tries to use gcc's -I- option. - * But that option is deprecated anyway. - */ -#if defined(__x86_64__) -#include "my_config_x86_64.h" -#elif defined(__i386__) -#include "my_config_i386.h" -#elif defined(__ppc64__) || defined(__powerpc64__) -#include "my_config_ppc64.h" -#elif defined(__ppc__) || defined(__powerpc__) -#include "my_config_ppc.h" -#elif defined(__s390x__) -#include "my_config_s390x.h" -#elif defined(__s390__) -#include "my_config_s390.h" -#elif defined(__sparc__) && defined(__arch64__) -#include "my_config_sparc64.h" -#elif defined(__sparc__) -#include "my_config_sparc.h" -#endif diff --git a/packaging/rpm-uln/mysql-5.5-errno.patch b/packaging/rpm-uln/mysql-5.5-errno.patch deleted file mode 100644 index 033e5195973..00000000000 --- a/packaging/rpm-uln/mysql-5.5-errno.patch +++ /dev/null @@ -1,21 +0,0 @@ -"extern int errno" is just a really bad idea. - - -diff -Naur mysql-5.1.32.orig/include/my_sys.h mysql-5.1.32/include/my_sys.h ---- mysql-5.1.32.orig/include/my_sys.h 2009-02-13 19:52:19.000000000 -0500 -+++ mysql-5.1.32/include/my_sys.h 2009-03-04 18:08:40.000000000 -0500 -@@ -199,13 +199,8 @@ - #define my_afree(PTR) my_free(PTR) - #endif /* HAVE_ALLOCA */ - --#ifndef errno /* did we already get it? */ --#ifdef HAVE_ERRNO_AS_DEFINE - #include /* errno is a define */ --#else --extern int errno; /* declare errno */ --#endif --#endif /* #ifndef errno */ -+ - extern char *home_dir; /* Home directory for user */ - extern const char *my_progname; /* program-name (printed in errors) */ - extern char curr_dir[]; /* Current directory for user */ diff --git a/packaging/rpm-uln/mysql-5.5-fix-tests.patch b/packaging/rpm-uln/mysql-5.5-fix-tests.patch deleted file mode 100644 index a1ab7a82210..00000000000 --- a/packaging/rpm-uln/mysql-5.5-fix-tests.patch +++ /dev/null @@ -1,34 +0,0 @@ -Adapt tests (where needed) to RedHat conventions. - -1) The RedHat convention uses the package name "mysql*" whereas upstream uses "MySQL*". - Test "file_contents" constructs path names and needs to be adapted. - -=== modified file 'mysql-test/t/file_contents.test' ---- mysql-5.5.17-orig/mysql-test/t/file_contents.test 2011-10-10 12:03:29 +0000 -+++ mysql-5.5.17/mysql-test/t/file_contents.test 2011-11-16 18:07:55 +0000 -@@ -17,20 +17,20 @@ if ($dir_bin =~ m|/usr/|) { - $dir_docs =~ s|/lib|/share/doc|; - if(-d "$dir_docs/packages") { - # SuSE: "packages/" in the documentation path -- $dir_docs = glob "$dir_docs/packages/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/packages/mysql-server*"; - } else { - # RedHat: version number in directory name -- $dir_docs = glob "$dir_docs/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/mysql-server*"; - } - } elsif ($dir_bin =~ m|/usr$|) { - # RPM build during development - $dir_docs = "$dir_bin/share/doc"; - if(-d "$dir_docs/packages") { - # SuSE: "packages/" in the documentation path -- $dir_docs = glob "$dir_docs/packages/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/packages/mysql-server*"; - } else { - # RedHat: version number in directory name -- $dir_docs = glob "$dir_docs/MySQL-server*"; -+ $dir_docs = glob "$dir_docs/mysql-server*"; - } - } else { - # tar.gz package, Windows, or developer work (in BZR) - diff --git a/packaging/rpm-uln/mysql-5.5-libdir.patch b/packaging/rpm-uln/mysql-5.5-libdir.patch deleted file mode 100644 index 2ab3e9eec27..00000000000 --- a/packaging/rpm-uln/mysql-5.5-libdir.patch +++ /dev/null @@ -1,28 +0,0 @@ -The RPMs built by MySQL AB (-> Sun -> Oracle) put the libraries into "/usr/lib". -Those built by RedHat put them into "/usr/lib/mysql". -This patch is to modify the cmake files to follow the RedHat convention. -Similar, the server is now in "/usr/libexec" (formerly "/usr/sbin"). - - -diff -Naur mysql-5.5.17.orig/cmake/install_layout.cmake mysql-5.5.17/cmake/install_layout.cmake ---- mysql-5.5.17.orig/cmake/install_layout.cmake 2011-06-30 15:46:53 +0000 -+++ mysql-5.5.17/cmake/install_layout.cmake 2011-10-27 16:40:10 +0000 -@@ -140,14 +140,14 @@ SET(INSTALL_SBINDIR_RPM - # be applied at build time via "rpmbuild". - # - SET(INSTALL_BINDIR_RPM "bin") --SET(INSTALL_SBINDIR_RPM "sbin") -+SET(INSTALL_SBINDIR_RPM "libexec") - SET(INSTALL_SCRIPTDIR_RPM "bin") - # - IF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") -- SET(INSTALL_LIBDIR_RPM "lib64") -+ SET(INSTALL_LIBDIR_RPM "lib64/mysql") - SET(INSTALL_PLUGINDIR_RPM "lib64/mysql/plugin") - ELSE() -- SET(INSTALL_LIBDIR_RPM "lib") -+ SET(INSTALL_LIBDIR_RPM "lib/mysql") - SET(INSTALL_PLUGINDIR_RPM "lib/mysql/plugin") - ENDIF() - # - diff --git a/packaging/rpm-uln/mysql-5.5-mtr1.patch b/packaging/rpm-uln/mysql-5.5-mtr1.patch deleted file mode 100644 index 7a7dc85f16c..00000000000 --- a/packaging/rpm-uln/mysql-5.5-mtr1.patch +++ /dev/null @@ -1,25 +0,0 @@ -Drop support for version 1 of "mysql-test-run.pl" from the RPMs: - -1) The auto-generation of Perl dependencies will mishandle that code, - probably because its run directory differs from its storage location. -2) It does not provide several variables which are used in tests of MySQL 5.5 - -If you really need it, take it from the source tarball. - -=== modified file 'mysql-test/mysql-test-run.pl' ---- mysql-5.5.17-orig/mysql-test/mysql-test-run.pl 2011-10-03 11:16:40 +0000 -+++ mysql-5.5.17/mysql-test/mysql-test-run.pl 2011-11-16 19:06:38 +0000 -@@ -58,10 +58,9 @@ BEGIN { - if ( $version == 1 ) - { - print "=======================================================\n"; -- print " WARNING: Using mysql-test-run.pl version 1! \n"; -+ print " ERROR: Support for version 1 is dropped in this distribution! \n"; - print "=======================================================\n"; -- # Should use exec() here on *nix but this appears not to work on Windows -- exit(system($^X, "lib/v1/mysql-test-run.pl", @ARGV) >> 8); -+ exit(1); - } - elsif ( $version == 2 ) - { - diff --git a/packaging/rpm-uln/mysql-5.5-stack-guard.patch b/packaging/rpm-uln/mysql-5.5-stack-guard.patch deleted file mode 100644 index b2624d982de..00000000000 --- a/packaging/rpm-uln/mysql-5.5-stack-guard.patch +++ /dev/null @@ -1,140 +0,0 @@ -mysql is not accounting for the "guard page" when setting thread stack size -requests. This is fatal on PPC systems, which may use guard pages as large -as 64K. This patch also documents the IA64 situation a bit better. - -Note: there are quite a few other setstacksize calls besides the two in -mysqld.cc; is it important to fix any of the others? - -Filed upstream at http://bugs.mysql.com/bug.php?id=35019 - - -diff -Naur mysql-5.1.30.orig/sql/mysqld.cc mysql-5.1.30/sql/mysqld.cc ---- mysql-5.1.30.orig/sql/mysqld.cc 2008-11-14 11:37:13.000000000 -0500 -+++ mysql-5.1.30/sql/mysqld.cc 2009-01-13 12:08:35.000000000 -0500 -@@ -2653,6 +2653,70 @@ - } - - -+/* pthread_attr_setstacksize without so much platform-dependency */ -+/* returns the actual stack size if possible */ -+static size_t my_setstacksize(pthread_attr_t *attr, size_t stacksize) -+{ -+ size_t guard_size = 0; -+ -+#if defined(__ia64__) || defined(__ia64) -+ /* -+ On IA64, half of the requested stack size is used for "normal stack" -+ and half for "register stack". The space measured by check_stack_overrun -+ is the "normal stack", so double the request to make sure we have the -+ caller-expected amount of normal stack. -+ -+ NOTE: there is no guarantee that the register stack can't grow faster -+ than normal stack, so it's very unclear that we won't dump core due to -+ stack overrun despite check_stack_overrun's efforts. Experimentation -+ shows that in the execution_constants test, the register stack grows -+ less than half as fast as normal stack, but perhaps other scenarios are -+ less forgiving. If it turns out that more space is needed for the -+ register stack, that could be forced (rather inefficiently) by using a -+ multiplier higher than 2 here. -+ */ -+ stacksize *= 2; -+#endif -+ -+ /* -+ On many machines, the "guard space" is subtracted from the requested -+ stack size, and that space is quite large on some platforms. So add -+ it to our request, if we can find out what it is. -+ -+ FIXME: autoconfiscate use of pthread_attr_getguardsize -+ */ -+ if (pthread_attr_getguardsize(attr, &guard_size)) -+ guard_size = 0; /* if can't find it out, treat as 0 */ -+ -+ pthread_attr_setstacksize(attr, stacksize + guard_size); -+ -+ /* Retrieve actual stack size if possible */ -+#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE -+ { -+ size_t real_stack_size= 0; -+ /* We must ignore real_stack_size = 0 as Solaris 2.9 can return 0 here */ -+ if (pthread_attr_getstacksize(attr, &real_stack_size) == 0 && -+ real_stack_size > guard_size) -+ { -+ real_stack_size -= guard_size; -+ if (real_stack_size < stacksize) -+ { -+ if (global_system_variables.log_warnings) -+ sql_print_warning("Asked for %ld thread stack, but got %ld", -+ (long) stacksize, (long) real_stack_size); -+ stacksize= real_stack_size; -+ } -+ } -+ } -+#endif -+ -+#if defined(__ia64__) || defined(__ia64) -+ stacksize /= 2; -+#endif -+ return stacksize; -+} -+ -+ - static void start_signal_handler(void) - { - int error; -@@ -2663,15 +2727,7 @@ - #if !defined(HAVE_DEC_3_2_THREADS) - pthread_attr_setscope(&thr_attr,PTHREAD_SCOPE_SYSTEM); - (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED); --#if defined(__ia64__) || defined(__ia64) -- /* -- Peculiar things with ia64 platforms - it seems we only have half the -- stack size in reality, so we have to double it here -- */ -- pthread_attr_setstacksize(&thr_attr,my_thread_stack_size*2); --#else -- pthread_attr_setstacksize(&thr_attr,my_thread_stack_size); --#endif -+ (void) my_setstacksize(&thr_attr,my_thread_stack_size); - #endif - - mysql_mutex_lock(&LOCK_thread_count); -@@ -4445,37 +4501,7 @@ - unireg_abort(1); // Will do exit - - init_signals(); --#if defined(__ia64__) || defined(__ia64) -- /* -- Peculiar things with ia64 platforms - it seems we only have half the -- stack size in reality, so we have to double it here -- */ -- pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size*2); --#else -- pthread_attr_setstacksize(&connection_attrib,my_thread_stack_size); --#endif --#ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE -- { -- /* Retrieve used stack size; Needed for checking stack overflows */ -- size_t stack_size= 0; -- pthread_attr_getstacksize(&connection_attrib, &stack_size); --#if defined(__ia64__) || defined(__ia64) -- stack_size/= 2; --#endif -- /* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */ -- if (stack_size && stack_size < my_thread_stack_size) -- { -- if (global_system_variables.log_warnings) -- sql_print_warning("Asked for %lu thread stack, but got %ld", -- my_thread_stack_size, (long) stack_size); --#if defined(__ia64__) || defined(__ia64) -- my_thread_stack_size= stack_size*2; --#else -- my_thread_stack_size= stack_size; --#endif -- } -- } --#endif -+ my_thread_stack_size = my_setstacksize(&connection_attrib,my_thread_stack_size); - - (void) thr_setconcurrency(concurrency); // 10 by default - diff --git a/packaging/rpm-uln/mysql-5.5-testing.patch b/packaging/rpm-uln/mysql-5.5-testing.patch deleted file mode 100644 index 74387135346..00000000000 --- a/packaging/rpm-uln/mysql-5.5-testing.patch +++ /dev/null @@ -1,23 +0,0 @@ -Hack the top-level Makefile to enable the openssl regression tests. -(Why doesn't this happen automatically given the configure option??) - -Also, increase the overall timeout for the regression tests to 12 hours, -because on a slow or heavily-loaded build machine sometimes the default of -5 hours isn't enough. (This has been demonstrated to fail in mass-rebuild -scenarios, which aren't that uncommon for Fedora.) Similarly increase the -per-testcase timeout to 30 minutes, since the default of 15 hasn't got a -great deal of headroom either. - - -diff -Naur mysql-5.1.32.orig/Makefile.am mysql-5.1.32/Makefile.am ---- mysql-5.1.32.orig/Makefile.am 2009-02-13 19:51:56.000000000 -0500 -+++ mysql-5.1.32/Makefile.am 2009-03-04 18:12:36.000000000 -0500 -@@ -98,7 +98,7 @@ - - test-ns: - cd mysql-test ; \ -- @PERL@ ./mysql-test-run.pl $(force) $(mem) --mysqld=--binlog-format=mixed -+ @PERL@ ./mysql-test-run.pl $(force) $(mem) --ssl --mysqld=--binlog-format=mixed --suite-timeout=720 --testcase-timeout=30 - - test-binlog-statement: - cd mysql-test ; \ diff --git a/packaging/rpm-uln/mysql-chain-certs.patch b/packaging/rpm-uln/mysql-chain-certs.patch deleted file mode 100644 index 4e26af16cb0..00000000000 --- a/packaging/rpm-uln/mysql-chain-certs.patch +++ /dev/null @@ -1,45 +0,0 @@ -Fix things so that chains of certificates work in the server and client -certificate files. - -This only really works for OpenSSL-based builds, as yassl is unable to read -multiple certificates from a file. The patch below to yassl/src/ssl.cpp -doesn't fix that, but just arranges that the viosslfactories.c patch won't -have any ill effects in a yassl build. Since we don't use yassl in Red Hat/ -Fedora builds, I'm not feeling motivated to try to fix yassl for this. - -See RH bug #598656. Filed upstream at http://bugs.mysql.com/bug.php?id=54158 - - === - -Joerg Bruehe, MySQL Build Team at Oracle: First patch adapted to code changes in MySQL 5.5 - - -diff -Naur mysql-5.5.29.orig/vio/viosslfactories.c mysql-5.5.29/vio/viosslfactories.c ---- mysql-5.5.29.orig/vio/viosslfactories.c 2010-05-06 11:28:07.000000000 -0400 -+++ mysql-5.5.29/vio/viosslfactories.c 2010-05-26 23:23:46.000000000 -0400 -@@ -106,7 +106,7 @@ - key_file= cert_file; - - if (cert_file && -- SSL_CTX_use_certificate_file(ctx, cert_file, SSL_FILETYPE_PEM) <= 0) -+ SSL_CTX_use_certificate_chain_file(ctx, cert_file) <= 0) - { - *error= SSL_INITERR_CERT; - DBUG_PRINT("error",("%s from file '%s'", sslGetErrString(*error), cert_file)); -diff -Naur mysql-5.1.47.orig/extra/yassl/src/ssl.cpp mysql-5.1.47/extra/yassl/src/ssl.cpp ---- mysql-5.1.47.orig/extra/yassl/src/ssl.cpp 2010-05-06 11:24:26.000000000 -0400 -+++ mysql-5.1.47/extra/yassl/src/ssl.cpp 2010-05-26 23:29:13.000000000 -0400 -@@ -1606,10 +1606,10 @@ - } - - -- int SSL_CTX_use_certificate_chain_file(SSL_CTX*, const char*) -+ int SSL_CTX_use_certificate_chain_file(SSL_CTX* ctx, const char* file) - { -- // TDOD: -- return SSL_SUCCESS; -+ // For the moment, treat like use_certificate_file -+ return read_file(ctx, file, SSL_FILETYPE_PEM, Cert); - } - - diff --git a/packaging/rpm-uln/mysql-embedded-check.c b/packaging/rpm-uln/mysql-embedded-check.c deleted file mode 100644 index 8bf8ca53dad..00000000000 --- a/packaging/rpm-uln/mysql-embedded-check.c +++ /dev/null @@ -1,26 +0,0 @@ -/* simple test program to see if we can link the embedded server library */ - -#include -#include -#include - -#include "mysql.h" - -MYSQL *mysql; - -static char *server_options[] = \ - { "mysql_test", "--defaults-file=my.cnf", NULL }; -int num_elements = (sizeof(server_options) / sizeof(char *)) - 1; - -static char *server_groups[] = { "libmysqld_server", - "libmysqld_client", NULL }; - -int main(int argc, char **argv) -{ - mysql_library_init(num_elements, server_options, server_groups); - mysql = mysql_init(NULL); - mysql_close(mysql); - mysql_library_end(); - - return 0; -} diff --git a/packaging/rpm-uln/mysql-expired-certs.patch b/packaging/rpm-uln/mysql-expired-certs.patch deleted file mode 100644 index acd3a78cce7..00000000000 --- a/packaging/rpm-uln/mysql-expired-certs.patch +++ /dev/null @@ -1,555 +0,0 @@ -Upstream insists on generating SSL testing certificates with relatively short -lifespan, which has repeatedly caused problems (ie, one day the regression -tests suddenly stop working). Replace them with certificates with 20-year -lifespan. We should periodically regenerate these, too, but at least not -very often. - - -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/cacert.pem mysql-5.1.50/mysql-test/std_data/cacert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/cacert.pem 2010-08-03 13:55:04.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/cacert.pem 2010-08-27 23:42:05.751428144 -0400 -@@ -1,17 +1,22 @@ - -----BEGIN CERTIFICATE----- --MIICrTCCAhagAwIBAgIJAMI7xZKjhrDbMA0GCSqGSIb3DQEBBAUAMEQxCzAJBgNV -+MIIDsjCCApqgAwIBAgIJAL5YrUwfPSWVMA0GCSqGSIb3DQEBBQUAMEQxCzAJBgNV - BAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdVcHBzYWxhMREwDwYD --VQQKEwhNeVNRTCBBQjAeFw0xMDAxMjkxMTQ3MTBaFw0xNTAxMjgxMTQ3MTBaMEQx -+VQQKEwhNeVNRTCBBQjAeFw0xMDAxMjkwNTU5NTNaFw0xNTAxMjgwNTU5NTNaMEQx - CzAJBgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdVcHBzYWxh --MREwDwYDVQQKEwhNeVNRTCBBQjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA --wQYsOEfrN4ESP3FjsI8cghE+tZVuyK2gck61lwieVxjgFMtBd65mI5a1y9pmlOI1 --yM4SB2Ppqcuw7/e1CdV1y7lvHrGNt5yqEHbN4QX1gvsN8TQauP/2WILturk4R4Hq --rKg0ZySu7f1Xhl0ed9a48LpaEHD17IcxWEGMMJwAxF0CAwEAAaOBpjCBozAMBgNV --HRMEBTADAQH/MB0GA1UdDgQWBBSvktYQ0ahLnyxyVKqty+WpBbBrDTB0BgNVHSME --bTBrgBSvktYQ0ahLnyxyVKqty+WpBbBrDaFIpEYwRDELMAkGA1UEBhMCU0UxEDAO --BgNVBAgTB1VwcHNhbGExEDAOBgNVBAcTB1VwcHNhbGExETAPBgNVBAoTCE15U1FM --IEFCggkAwjvFkqOGsNswDQYJKoZIhvcNAQEEBQADgYEAdKN1PjwMHAKG2Ww1145g --JQGBnKxSFOUaoSvkBi/4ntTM+ysnViWh7WvxyWjR9zU9arfr7aqsDeQxm0XDOqzj --AQ/cQIla2/Li8tXyfc06bisH/IHRaSc2zWqioTKbEwMdVOdrvq4a8V8ic3xYyIWn --7F4WeS07J8LKardSvM0+hOA= -+MREwDwYDVQQKEwhNeVNRTCBBQjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -+ggEBAL6kNN4peX7uhK9rb06W/QbPEpVuejmdWdl2PqMshP/eSuXXw7kwVgfpxx9R -+vC000CKQQSG9MCoZjtqPnFRsetmWLZgApRpEalGXTXJqq9sEbCfoFizg94U8G7d2 -+u5XJjLVmcG34ru36KoBgVx1zeH1puBAf8dOzrE4L7Y+ZQBFzFohjh8C2LqWC4nM5 -+qsLmOkDWMipGqYU5DvkKjIbTbwTyRNRgZHWSPfVDDPUIUOsY4BGUp2DpgeGY9aEv -+lIs57Ev9JqlIUCV65lOhhDkG+xwmkHKHA+ECEU9cALI8+uXbh48MB9XpMOuk408X -+/lX89aZwD0/G9kmObVGnE2G+H5UCAwEAAaOBpjCBozAdBgNVHQ4EFgQUsft+d7VA -+jWgRftkR5cPG2k2sUbAwdAYDVR0jBG0wa4AUsft+d7VAjWgRftkR5cPG2k2sUbCh -+SKRGMEQxCzAJBgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdV -+cHBzYWxhMREwDwYDVQQKEwhNeVNRTCBBQoIJAL5YrUwfPSWVMAwGA1UdEwQFMAMB -+Af8wDQYJKoZIhvcNAQEFBQADggEBALRUOAmdL8R8sl1y8kiEiFgDatdXK5RDqWai -+8yZChfmwTIToHhmQsOEshJe2e8hky3huUj+33VyXjINoMbebIwMuXPwEkbJal8RZ -+nSJmF0jN1Qz7J/jFffwK9xmejWZJx49Kt2+Qwrwp6kDeq9TLFqQOoVczgyJPYsTL -+NAOib5WqTud3XWvCwxrhqmWu7JZq6sp1fomP/uunprb8y2miWfLESZN2mKAhm44Q -+Lws867LT8v2lskEjq2dT1LutD5+R66XcdjgSr0uDziDs64jZwCD6ea94hVFM7ej0 -+ZOXYeSEZJ56FjUxu632e9fY8NyMh30yKjjmQf1mM9PuGJvdvsWU= - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/client-cert.pem mysql-5.1.50/mysql-test/std_data/client-cert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/client-cert.pem 2010-08-03 13:55:04.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/client-cert.pem 2010-08-27 23:42:05.752428395 -0400 -@@ -1,46 +1,69 @@ - Certificate: - Data: -- Version: 1 (0x0) -- Serial Number: 1048577 (0x100001) -- Signature Algorithm: md5WithRSAEncryption -+ Version: 3 (0x2) -+ Serial Number: 6 (0x6) -+ Signature Algorithm: sha1WithRSAEncryption - Issuer: C=SE, ST=Uppsala, L=Uppsala, O=MySQL AB - Validity -- Not Before: Jan 29 11:50:22 2010 GMT -- Not After : Jan 28 11:50:22 2015 GMT -+ Not Before: Feb 20 03:03:26 2010 GMT -+ Not After : Sep 3 03:03:26 2030 GMT - Subject: C=SE, ST=Uppsala, O=MySQL AB - Subject Public Key Info: - Public Key Algorithm: rsaEncryption -- Public-Key: (1024 bit) -- Modulus: -- 00:cc:9a:37:49:13:66:dc:cf:e3:0b:13:a1:23:ed: -- 78:db:4e:bd:11:f6:8c:0d:76:f9:a3:32:56:9a:f8: -- a1:21:6a:55:4e:4d:3f:e6:67:9d:26:99:b2:cd:a4: -- 9a:d2:2b:59:5c:d7:8a:d3:60:68:f8:18:bd:c5:be: -- 15:e1:2a:3c:a3:d4:61:cb:f5:11:94:17:81:81:f7: -- 87:8c:f6:6a:d2:ee:d8:e6:77:f6:62:66:4d:2e:16: -- 8d:08:81:4a:c9:c6:4b:31:e5:b9:c7:8a:84:96:48: -- a7:47:8c:0d:26:90:56:4e:e6:a5:6e:8c:b3:f2:9f: -- fc:3d:78:9b:49:6e:86:83:77 -+ RSA Public Key: (1024 bit) -+ Modulus (1024 bit): -+ 00:c2:e7:20:cf:89:59:2f:67:cb:4c:9f:e8:11:f2: -+ 23:e5:f1:b1:ee:3f:66:5f:c3:f5:fd:1e:31:ee:8f: -+ 4c:2a:bd:c0:4a:a5:9f:c8:44:d5:77:8f:15:1b:4d: -+ 78:6e:b2:a2:48:a5:24:33:05:40:02:b3:c1:87:8d: -+ 59:3c:1a:07:aa:86:f0:04:e1:9c:20:4b:22:32:c4: -+ 51:9e:40:e4:31:c3:57:f5:98:bf:2e:b1:fd:2c:56: -+ bf:49:d9:9b:e7:17:cc:95:5f:b5:08:19:5e:9d:df: -+ 65:22:39:2c:48:fb:69:96:31:7a:35:4d:de:60:b4: -+ c1:60:19:5f:96:56:7e:55:19 - Exponent: 65537 (0x10001) -- Signature Algorithm: md5WithRSAEncryption -- 5e:1f:a3:53:5f:24:13:1c:f8:28:32:b0:7f:69:69:f3:0e:c0: -- 34:87:10:03:7d:da:15:8b:bd:19:b8:1a:56:31:e7:85:49:81: -- c9:7f:45:20:74:3e:89:c0:e0:26:84:51:cc:04:16:ce:69:99: -- 01:e1:26:99:b3:e3:f5:bd:ec:5f:a0:84:e4:38:da:75:78:7b: -- 89:9c:d2:cd:60:95:20:ba:8e:e3:7c:e6:df:76:3a:7c:89:77: -- 02:94:86:11:3a:c4:61:7d:6f:71:83:21:8a:17:fb:17:e2:ee: -- 02:6b:61:c1:b4:52:63:d7:d8:46:b2:c5:9c:6f:38:91:8a:35: -- 32:0b -+ X509v3 extensions: -+ X509v3 Basic Constraints: -+ CA:FALSE -+ X509v3 Subject Key Identifier: -+ 8D:10:67:91:33:76:9C:02:E5:78:5D:D8:C5:EF:25:96:B2:D7:FA:1F -+ X509v3 Authority Key Identifier: -+ keyid:B1:FB:7E:77:B5:40:8D:68:11:7E:D9:11:E5:C3:C6:DA:4D:AC:51:B0 -+ DirName:/C=SE/ST=Uppsala/L=Uppsala/O=MySQL AB -+ serial:BE:58:AD:4C:1F:3D:25:95 -+ -+ Signature Algorithm: sha1WithRSAEncryption -+ a9:88:10:3e:5d:2a:47:29:c8:03:27:7a:31:5a:8e:10:03:bc: -+ b5:4e:37:1d:12:7b:eb:5f:50:71:70:b1:a3:8e:93:0e:77:17: -+ 6c:47:b6:c9:a4:4d:2a:c4:38:f0:61:55:b2:7f:28:ba:06:79: -+ ee:67:11:7d:d4:c9:7f:0a:18:c8:c1:cb:d0:2c:f9:63:0f:bb: -+ 45:ca:de:ea:bb:ac:00:01:52:48:36:2b:07:2b:c8:46:c7:b1: -+ 21:81:bd:77:39:e7:4c:39:aa:bd:ac:60:d8:a7:bf:cf:14:98: -+ 4a:0b:a1:40:55:06:8d:6f:35:a9:39:a0:71:a9:97:ba:7c:73: -+ 3c:41:ba:c5:1c:11:4b:2b:43:1d:2d:ba:7b:5f:14:b5:3d:64: -+ 62:15:36:b4:16:bd:78:c8:43:8d:f9:1c:a5:d2:ac:a1:58:74: -+ e1:99:de:ad:04:19:43:a8:bd:0a:fd:19:9b:50:44:46:6d:18: -+ 55:4d:bf:b4:5b:a4:93:62:c7:64:91:6c:54:34:d1:f8:f3:ff: -+ 12:6d:5f:85:e7:35:9e:5c:42:81:5e:fb:c8:bb:44:51:98:b2: -+ ef:1b:9f:5a:22:77:28:7d:da:fb:08:c2:94:9a:0f:42:08:93: -+ 54:10:1e:ad:f2:4f:fc:62:98:51:e9:9b:b9:3a:93:d9:e4:1f: -+ 1d:c4:76:d0 - -----BEGIN CERTIFICATE----- --MIIB5zCCAVACAxAAATANBgkqhkiG9w0BAQQFADBEMQswCQYDVQQGEwJTRTEQMA4G --A1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwg --QUIwHhcNMTAwMTI5MTE1MDIyWhcNMTUwMTI4MTE1MDIyWjAyMQswCQYDVQQGEwJT --RTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIwgZ8wDQYJKoZI --hvcNAQEBBQADgY0AMIGJAoGBAMyaN0kTZtzP4wsToSPteNtOvRH2jA12+aMyVpr4 --oSFqVU5NP+ZnnSaZss2kmtIrWVzXitNgaPgYvcW+FeEqPKPUYcv1EZQXgYH3h4z2 --atLu2OZ39mJmTS4WjQiBSsnGSzHluceKhJZIp0eMDSaQVk7mpW6Ms/Kf/D14m0lu --hoN3AgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAXh+jU18kExz4KDKwf2lp8w7ANIcQ --A33aFYu9GbgaVjHnhUmByX9FIHQ+icDgJoRRzAQWzmmZAeEmmbPj9b3sX6CE5Dja --dXh7iZzSzWCVILqO43zm33Y6fIl3ApSGETrEYX1vcYMhihf7F+LuAmthwbRSY9fY --RrLFnG84kYo1Mgs= -+MIIDETCCAfmgAwIBAgIBBjANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJTRTEQ -+MA4GA1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlT -+UUwgQUIwHhcNMTAwMjIwMDMwMzI2WhcNMzAwOTAzMDMwMzI2WjAyMQswCQYDVQQG -+EwJTRTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIwgZ8wDQYJ -+KoZIhvcNAQEBBQADgY0AMIGJAoGBAMLnIM+JWS9ny0yf6BHyI+Xxse4/Zl/D9f0e -+Me6PTCq9wEqln8hE1XePFRtNeG6yokilJDMFQAKzwYeNWTwaB6qG8AThnCBLIjLE -+UZ5A5DHDV/WYvy6x/SxWv0nZm+cXzJVftQgZXp3fZSI5LEj7aZYxejVN3mC0wWAZ -+X5ZWflUZAgMBAAGjgaMwgaAwCQYDVR0TBAIwADAdBgNVHQ4EFgQUjRBnkTN2nALl -+eF3Yxe8llrLX+h8wdAYDVR0jBG0wa4AUsft+d7VAjWgRftkR5cPG2k2sUbChSKRG -+MEQxCzAJBgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMRAwDgYDVQQHEwdVcHBz -+YWxhMREwDwYDVQQKEwhNeVNRTCBBQoIJAL5YrUwfPSWVMA0GCSqGSIb3DQEBBQUA -+A4IBAQCpiBA+XSpHKcgDJ3oxWo4QA7y1TjcdEnvrX1BxcLGjjpMOdxdsR7bJpE0q -+xDjwYVWyfyi6BnnuZxF91Ml/ChjIwcvQLPljD7tFyt7qu6wAAVJINisHK8hGx7Eh -+gb13OedMOaq9rGDYp7/PFJhKC6FAVQaNbzWpOaBxqZe6fHM8QbrFHBFLK0MdLbp7 -+XxS1PWRiFTa0Fr14yEON+Ryl0qyhWHThmd6tBBlDqL0K/RmbUERGbRhVTb+0W6ST -+YsdkkWxUNNH48/8SbV+F5zWeXEKBXvvIu0RRmLLvG59aIncofdr7CMKUmg9CCJNU -+EB6t8k/8YphR6Zu5OpPZ5B8dxHbQ - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/client-key.pem mysql-5.1.50/mysql-test/std_data/client-key.pem ---- mysql-5.1.50.orig/mysql-test/std_data/client-key.pem 2010-08-03 13:55:05.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/client-key.pem 2010-08-27 23:42:05.752428395 -0400 -@@ -1,15 +1,15 @@ - -----BEGIN RSA PRIVATE KEY----- --MIICXQIBAAKBgQDMmjdJE2bcz+MLE6Ej7XjbTr0R9owNdvmjMlaa+KEhalVOTT/m --Z50mmbLNpJrSK1lc14rTYGj4GL3FvhXhKjyj1GHL9RGUF4GB94eM9mrS7tjmd/Zi --Zk0uFo0IgUrJxksx5bnHioSWSKdHjA0mkFZO5qVujLPyn/w9eJtJboaDdwIDAQAB --AoGASqk/4We2En+93y3jkIO4pXafIe3w/3zZ7caRue1ehx4RUQh5d+95djuB9u7J --HEZ7TpjM7QNyao5EueL6gvbxt0LXFvqAMni7yM9tt/HUYtHHPqYiRtUny9bKYFTm --l8szCCMal/wD9GZU9ByHDNHm7tHUMyMhARNTYSgx+SERFmECQQD/6jJocC4SXf6f --T3LqimWR02lbJ7qCoDgRglsUXh0zjrG+IIiAyE+QOCCx1GMe3Uw6bsIuYwdHT6as --WcdPs04xAkEAzKulvEvLVvN5zfa/DTYRTV7jh6aDleOxjsD5oN/oJXoACnPzVuUL --qQQMNtuAXm6Q1QItrRxpQsSKbY0UQka6JwJBAOSgoNoG5lIIYTKIMvzwGV+XBLeo --HYsXgh+6Wo4uql3mLErUG78ZtWL9kc/tE4R+ZdyKGLaCR/1gXmH5bwN4B/ECQEBb --uUH8k3REG4kojesZlVc+/00ojzgS4UKCa/yqa9VdB6ZBz8MDQydinnShkTwgiGpy --xOoqhO753o2UT0qH8wECQQC99IEJWUnwvExVMkLaZH5NjAFJkb22sjkmuT11tAgU --RQgOMoDOm6driojnOnDWOkx1r1Gy9NgMLooduja4v6cx -+MIICWwIBAAKBgQDC5yDPiVkvZ8tMn+gR8iPl8bHuP2Zfw/X9HjHuj0wqvcBKpZ/I -+RNV3jxUbTXhusqJIpSQzBUACs8GHjVk8GgeqhvAE4ZwgSyIyxFGeQOQxw1f1mL8u -+sf0sVr9J2ZvnF8yVX7UIGV6d32UiOSxI+2mWMXo1Td5gtMFgGV+WVn5VGQIDAQAB -+AoGARXcXLKDpVooJ3W+IyQyiWsw//IhANpWjUOm4JiyQmxMyO+i4ACr4Yjpu7WI5 -+MEseqAGj20NdwxjKO0PXsCIe5LmrGZ+SI8+CSERFOWXWRtCWz7y7SG30i1k6suvM -+mwqWom0tJLwn93uA1lm/WSwKQwUrJRahRQd3EaZqrl7DP5kCQQD/8gbuYAT5pxQe -+ULLGM0RvEsXxDYbEDxNbY5wrBazfklBwpumxZpFl6jEAT++7Kh2Ns3A7kB1oUNlA -+FPYr+dYPAkEAwvHEwRtoyUr8jqoqVVJWI76CDmBjEOzVeMKW97ztqbs2LxZW8dYI -+iOh/myFGpdoUwgu0U8w9MmXcj3ZeZCYKVwJALyQ+AJPw9qa+fuLwOq9gsHCtwrty -+EhSQxSlwrz/pWniRll439vPkXfgntF4E0t1r+hiN2Hqv3/HcQgBaYzkuIwJAG023 -+bACFxaOuCeFFepvEms8E8jSHy4gQQhCnCl24v8wLw76SQN7kZSCDNtwLRBFuVNtE -+z3PMonFn2eQPRmGZkwJAP1c1BHprMQx/ruafdscROILv3JrH40C1bR6KVVBKt1dK -+Qpnpgi7hK5rUQjDF8k3bn9ugTt06jyeHe/QhAml0kg== - -----END RSA PRIVATE KEY----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server-cert.pem mysql-5.1.50/mysql-test/std_data/server-cert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server-cert.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server-cert.pem 2010-08-27 23:42:05.753428361 -0400 -@@ -1,41 +1,69 @@ - Certificate: - Data: -- Version: 1 (0x0) -- Serial Number: 1048578 (0x100002) -- Signature Algorithm: md5WithRSAEncryption -+ Version: 3 (0x2) -+ Serial Number: 4 (0x4) -+ Signature Algorithm: sha1WithRSAEncryption - Issuer: C=SE, ST=Uppsala, L=Uppsala, O=MySQL AB - Validity -- Not Before: Jan 29 11:56:49 2010 GMT -- Not After : Jan 28 11:56:49 2015 GMT -+ Not Before: Feb 20 02:55:06 2010 GMT -+ Not After : Sep 3 02:55:06 2030 GMT - Subject: C=SE, ST=Uppsala, O=MySQL AB, CN=localhost - Subject Public Key Info: - Public Key Algorithm: rsaEncryption -- Public-Key: (512 bit) -- Modulus: -- 00:cd:e4:87:51:9d:72:11:a0:d1:fa:f3:92:8b:13: -- 1c:eb:f7:e2:9a:2f:72:a8:d6:65:48:d1:69:af:1b: -- c0:4c:13:e5:60:60:51:41:e9:ab:a6:bc:13:bb:0c: -- 5e:32:7c:d9:6c:9e:cd:05:24:84:78:db:80:91:2e: -- d8:88:2b:c2:ed -+ RSA Public Key: (1024 bit) -+ Modulus (1024 bit): -+ 00:e3:7d:4f:c2:23:77:a9:3a:2c:d2:69:59:a0:2f: -+ 4e:d1:51:4c:ae:8d:f5:17:cc:ce:58:9c:83:4f:0b: -+ a3:bb:29:a2:b8:1d:3e:1b:04:f9:a9:3e:e2:61:d0: -+ e6:7b:b9:7c:12:d8:1f:86:c9:53:b5:04:dd:df:26: -+ e9:c0:2b:de:4a:96:2e:f3:23:6f:79:6d:a9:d2:4e: -+ 17:af:2f:de:8b:68:44:ae:de:a3:e2:c4:37:1c:04: -+ ad:73:4b:85:f9:83:ac:fe:b7:c1:54:47:2e:96:d4: -+ 31:96:85:94:69:d6:5a:63:24:04:99:89:19:1d:56: -+ 8a:d1:77:aa:87:fb:38:cd:b7 - Exponent: 65537 (0x10001) -- Signature Algorithm: md5WithRSAEncryption -- 73:ce:9c:6e:39:46:b4:14:be:da:3f:f3:1b:ba:90:bc:23:43: -- d7:82:2a:70:4e:a6:d9:5a:65:5c:b7:df:71:df:75:77:c5:80: -- a4:af:fa:d2:59:e2:fd:c9:9c:f0:98:95:8e:69:a9:8c:7c:d8: -- 6f:48:d2:e3:36:e0:cd:ff:3f:d1:a5:e6:ab:75:09:c4:50:10: -- c4:96:dd:bf:3b:de:32:46:da:ca:4a:f1:d6:52:8a:33:2f:ab: -- f5:2e:70:3f:d4:9c:be:00:c8:03:f9:39:8a:df:5b:70:3c:40: -- ef:03:be:7c:3d:1d:32:32:f3:51:81:e2:83:30:6e:3d:38:9b: -- fb:3c -+ X509v3 extensions: -+ X509v3 Basic Constraints: -+ CA:FALSE -+ X509v3 Subject Key Identifier: -+ CC:8C:71:40:D0:0F:BF:D1:99:79:3F:1B:E9:10:76:19:67:36:0F:A3 -+ X509v3 Authority Key Identifier: -+ keyid:B1:FB:7E:77:B5:40:8D:68:11:7E:D9:11:E5:C3:C6:DA:4D:AC:51:B0 -+ DirName:/C=SE/ST=Uppsala/L=Uppsala/O=MySQL AB -+ serial:BE:58:AD:4C:1F:3D:25:95 -+ -+ Signature Algorithm: sha1WithRSAEncryption -+ 6f:ad:5e:59:fa:84:3a:be:e2:72:b1:e8:66:2a:4e:f8:73:19: -+ 11:06:11:92:78:56:3e:d6:e8:68:29:90:8b:59:d2:fe:aa:ae: -+ 25:59:c7:e9:99:bb:4a:06:43:dd:40:bd:cb:f4:ae:79:95:7d: -+ 8e:90:ef:58:d2:a8:fc:bf:07:f3:37:b2:9b:bd:da:e6:8c:56: -+ dd:5e:c6:4a:70:7c:3e:3d:a1:e8:35:06:b8:a7:7b:ac:26:85: -+ 54:5d:09:a2:7b:77:b4:17:7f:72:31:cb:ff:cc:67:6d:e6:3e: -+ c6:dc:96:eb:4a:0a:ae:e9:48:ae:8a:e0:d6:73:57:6e:32:4c: -+ 00:dc:28:da:55:b3:9f:9f:d8:98:cc:d9:f1:b6:b3:14:67:2e: -+ a1:47:1e:51:11:cf:70:9f:31:8f:ba:59:29:f2:d0:88:0b:e2: -+ 51:6b:f8:31:ed:6d:ac:00:5e:d3:78:4c:95:97:02:cc:74:2b: -+ 3b:c6:28:e6:2a:c3:30:99:35:b4:4d:31:46:d4:90:f2:47:ed: -+ 64:85:1a:75:2a:72:0a:2f:c6:3a:2f:d2:ac:6b:31:cc:e5:a8: -+ 07:c2:d6:22:f3:c6:0f:bf:67:d9:d6:b2:79:cd:48:b5:c3:e0: -+ e3:18:7f:b5:74:c9:43:19:fb:c4:93:29:ca:cc:90:2b:1b:6f: -+ 45:f6:25:f9 - -----BEGIN CERTIFICATE----- --MIIBtzCCASACAxAAAjANBgkqhkiG9w0BAQQFADBEMQswCQYDVQQGEwJTRTEQMA4G --A1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwg --QUIwHhcNMTAwMTI5MTE1NjQ5WhcNMTUwMTI4MTE1NjQ5WjBGMQswCQYDVQQGEwJT --RTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIxEjAQBgNVBAMT --CWxvY2FsaG9zdDBcMA0GCSqGSIb3DQEBAQUAA0sAMEgCQQDN5IdRnXIRoNH685KL --Exzr9+KaL3Ko1mVI0WmvG8BME+VgYFFB6aumvBO7DF4yfNlsns0FJIR424CRLtiI --K8LtAgMBAAEwDQYJKoZIhvcNAQEEBQADgYEAc86cbjlGtBS+2j/zG7qQvCND14Iq --cE6m2VplXLffcd91d8WApK/60lni/cmc8JiVjmmpjHzYb0jS4zbgzf8/0aXmq3UJ --xFAQxJbdvzveMkbaykrx1lKKMy+r9S5wP9ScvgDIA/k5it9bcDxA7wO+fD0dMjLz --UYHigzBuPTib+zw= -+MIIDJTCCAg2gAwIBAgIBBDANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJTRTEQ -+MA4GA1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlT -+UUwgQUIwHhcNMTAwMjIwMDI1NTA2WhcNMzAwOTAzMDI1NTA2WjBGMQswCQYDVQQG -+EwJTRTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIxEjAQBgNV -+BAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA431PwiN3 -+qTos0mlZoC9O0VFMro31F8zOWJyDTwujuymiuB0+GwT5qT7iYdDme7l8EtgfhslT -+tQTd3ybpwCveSpYu8yNveW2p0k4Xry/ei2hErt6j4sQ3HAStc0uF+YOs/rfBVEcu -+ltQxloWUadZaYyQEmYkZHVaK0Xeqh/s4zbcCAwEAAaOBozCBoDAJBgNVHRMEAjAA -+MB0GA1UdDgQWBBTMjHFA0A+/0Zl5PxvpEHYZZzYPozB0BgNVHSMEbTBrgBSx+353 -+tUCNaBF+2RHlw8baTaxRsKFIpEYwRDELMAkGA1UEBhMCU0UxEDAOBgNVBAgTB1Vw -+cHNhbGExEDAOBgNVBAcTB1VwcHNhbGExETAPBgNVBAoTCE15U1FMIEFCggkAvlit -+TB89JZUwDQYJKoZIhvcNAQEFBQADggEBAG+tXln6hDq+4nKx6GYqTvhzGREGEZJ4 -+Vj7W6GgpkItZ0v6qriVZx+mZu0oGQ91Avcv0rnmVfY6Q71jSqPy/B/M3spu92uaM -+Vt1exkpwfD49oeg1Brine6wmhVRdCaJ7d7QXf3Ixy//MZ23mPsbclutKCq7pSK6K -+4NZzV24yTADcKNpVs5+f2JjM2fG2sxRnLqFHHlERz3CfMY+6WSny0IgL4lFr+DHt -+bawAXtN4TJWXAsx0KzvGKOYqwzCZNbRNMUbUkPJH7WSFGnUqcgovxjov0qxrMczl -+qAfC1iLzxg+/Z9nWsnnNSLXD4OMYf7V0yUMZ+8STKcrMkCsbb0X2Jfk= - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server-key.pem mysql-5.1.50/mysql-test/std_data/server-key.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server-key.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server-key.pem 2010-08-27 23:42:05.754428433 -0400 -@@ -1,9 +1,15 @@ - -----BEGIN RSA PRIVATE KEY----- --MIIBOwIBAAJBAM3kh1GdchGg0frzkosTHOv34povcqjWZUjRaa8bwEwT5WBgUUHp --q6a8E7sMXjJ82WyezQUkhHjbgJEu2Igrwu0CAwEAAQJBAJuwhFbF3NzRpBbEmnqJ --4GPa1UJMQMLFJF+04tqj/HxJcAIVhOJhGmmtYNw1yjz/ZsPnfJCMz4eFOtdjvGtf --peECIQDmFFg2WLvYo+2m9w9V7z4ZIkg7ixYkI/ObUUctfZkPOQIhAOUWnrvjFrAX --bIvYT/YR50+3ZDLEc51XxNgJnWqWYl1VAiEAnTOFWgyivFC1DgF8PvDp8u5TgCt2 --A1d1GMgd490O+TECIC/WMl0/hTxOF9930vKqOGf//o9PUGkZq8QE9fcM4gtlAiAE --iOcFpnLjtWj57jrhuw214ucnB5rklkQQe+AtcARNkg== -+MIICXgIBAAKBgQDjfU/CI3epOizSaVmgL07RUUyujfUXzM5YnINPC6O7KaK4HT4b -+BPmpPuJh0OZ7uXwS2B+GyVO1BN3fJunAK95Kli7zI295banSThevL96LaESu3qPi -+xDccBK1zS4X5g6z+t8FURy6W1DGWhZRp1lpjJASZiRkdVorRd6qH+zjNtwIDAQAB -+AoGAUb0o91y/FjMs/72S0pes/lDz+JRRSGfyjKxQEgrgndNsADOhqRu0iTdrKDJj -+XnlbN3ooecnFJfnFrvTQcJhSmlS30j6VrBw6LXpCBK3dvjYgJ9LOne7WK+dF1+vS -+FMQtsP04C56Sxy6HJDpMyWJ6oS3Bu169ygG2AxKo+Fk+E6ECQQD38w/MzmrARz2Z -+AGeEPDUnVZPYgtmXkmks95S0/2jSoLhmgpvJimzxwpYwVG/BG8dSDVuTDu5kp05D -+3bZIp3EzAkEA6uAwJsCZPtHXlWU3wYZJsA697rUNjPaCQOIaZ/lnh5RUHTmUiw1h -+Oj/VORqKB0kXqcDfawwLjZEvh1Xli+H5bQJBANTmhw2TvEPnp/OFTl1UGUvyBmXl -+TRMB639qAu07VfVtfYi/4ya1zn/0VmOfTOoigQ5qW9Q1AOu6YNCTQl62L9MCQQDc -+YfEsW2kvNYxYJHoVfuBjbuGuOnn1e1Oqd70ZND59S6NFLMMBWlORaVWzWACNZ3rp -+kAzSj6HDeqgjD2jsQONdAkEAt7S1YHUn8F760bRn4AnAto2TVOYdArtTP/wYjd4o -+9rJREO/d8AYkYJ96APLvF0SZ4n3t1pLwQRsKKN8ZGTmzLA== - -----END RSA PRIVATE KEY----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server8k-cert.pem mysql-5.1.50/mysql-test/std_data/server8k-cert.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server8k-cert.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server8k-cert.pem 2010-08-27 23:43:00.005366270 -0400 -@@ -1,51 +1,69 @@ -+Certificate: -+ Data: -+ Version: 3 (0x2) -+ Serial Number: 5 (0x5) -+ Signature Algorithm: sha1WithRSAEncryption -+ Issuer: C=SE, ST=Uppsala, L=Uppsala, O=MySQL AB -+ Validity -+ Not Before: Feb 20 03:00:54 2010 GMT -+ Not After : Sep 3 03:00:54 2030 GMT -+ Subject: C=SE, ST=Uppsala, O=MySQL AB, CN=server -+ Subject Public Key Info: -+ Public Key Algorithm: rsaEncryption -+ RSA Public Key: (1024 bit) -+ Modulus (1024 bit): -+ 00:c5:da:44:95:06:77:16:21:af:a0:c4:3c:e9:f8: -+ 1d:2d:95:f9:63:90:8c:3f:86:ba:77:76:4a:52:4b: -+ 6b:af:29:f5:1c:aa:d4:3f:3e:42:9f:6d:46:ba:86: -+ 90:b1:2d:cc:db:c6:33:15:a3:f4:af:53:33:4f:a1: -+ 56:d1:aa:3b:26:10:f7:64:b5:f9:bf:1b:b1:47:8e: -+ cc:a6:d6:0d:aa:4a:77:e3:a3:63:9d:2a:dc:65:f4: -+ 7f:91:17:38:2d:d6:cd:4e:8d:53:52:97:6e:87:fc: -+ 64:60:a6:a1:00:ac:96:6c:e4:42:94:75:17:46:6f: -+ 91:b5:dd:06:47:ed:05:e3:db -+ Exponent: 65537 (0x10001) -+ X509v3 extensions: -+ X509v3 Basic Constraints: -+ CA:FALSE -+ X509v3 Subject Key Identifier: -+ 6E:60:3F:29:13:60:99:ED:0C:F7:15:B5:DB:7B:1C:FB:6F:60:19:ED -+ X509v3 Authority Key Identifier: -+ keyid:B1:FB:7E:77:B5:40:8D:68:11:7E:D9:11:E5:C3:C6:DA:4D:AC:51:B0 -+ DirName:/C=SE/ST=Uppsala/L=Uppsala/O=MySQL AB -+ serial:BE:58:AD:4C:1F:3D:25:95 -+ -+ Signature Algorithm: sha1WithRSAEncryption -+ 63:2e:0f:07:14:06:cf:74:90:3d:37:42:f2:48:70:60:21:bc: -+ 34:52:31:f1:87:70:d2:b2:fb:ff:13:38:dc:f0:5e:43:d7:ee: -+ a7:c7:1f:ac:aa:d2:8c:4f:fa:3c:4c:73:f6:b6:c2:0c:a0:ea: -+ a2:c9:e2:73:61:c3:2e:78:40:0f:2a:d3:63:50:9b:b8:f9:89: -+ 40:ed:98:08:97:c3:07:24:17:34:b5:78:89:0a:bb:83:4c:e2: -+ 5c:2e:13:d6:21:30:ad:30:48:b5:70:12:ff:4a:6f:42:f0:f8: -+ 9f:b1:4b:bd:89:2b:f0:9d:e2:49:2b:35:69:18:1f:76:40:b4: -+ 76:bd:cb:dd:27:2f:c0:c1:e2:33:3e:6e:df:68:54:19:92:8a: -+ bb:13:9c:cf:d6:17:56:da:bf:0d:64:70:3a:45:b7:aa:5f:e3: -+ f5:96:ae:34:f2:17:37:27:d0:4b:e8:30:4a:c0:02:42:e2:d2: -+ 30:eb:eb:c7:d7:ec:d8:df:5c:43:58:e2:6f:b7:58:54:0d:c4: -+ 01:71:2d:59:8f:44:c7:a1:6c:0b:41:28:fa:b7:63:a7:68:d3: -+ 4f:c3:0f:17:9e:b2:32:50:e6:0b:87:3d:e2:39:47:c0:d8:0a: -+ 3b:f6:af:50:68:0f:9d:ef:6e:34:0d:3a:07:94:f8:a4:d7:24: -+ 86:32:d3:b4 - -----BEGIN CERTIFICATE----- --MIIJFDCCBPwCAQEwDQYJKoZIhvcNAQEEBQAwTjELMAkGA1UEBhMCU0UxEDAOBgNV --BAgTB1VwcHNhbGExETAPBgNVBAoTCE15U1FMIEFCMQ0wCwYDVQQLEwRUZXN0MQsw --CQYDVQQDEwJDQTAeFw0xMDA3MjgxNDA3MjhaFw0xODEwMTQxNDA3MjhaMFIxCzAJ --BgNVBAYTAlNFMRAwDgYDVQQIEwdVcHBzYWxhMREwDwYDVQQKEwhNeVNRTCBBQjEN --MAsGA1UECxMEVGVzdDEPMA0GA1UEAxMGc2VydmVyMIIEIjANBgkqhkiG9w0BAQEF --AAOCBA8AMIIECgKCBAEA6h3v1OWb9I9U/Z8diBu/xYGS8NCTD3ZESboHxVI2qSEC --PgxNNcG8Lh0ktQdgYcOe64MnDTZX0Bibm47hoDldrAlTSffFxQhylqBBoXxDF+Lr --hXIqCz7K0PsK+bYusL9ezJ7PETDnCT7oy95q4GXbKsutbNsm9if4ZE41gs2KnoU2 --DA7kvMmkKojrMIL4+BqTXA20LLo0iSbgvUTvpSJw4u96BeyzMNnxK2wP5vvTtUo5 --hACbfU87YjaSKs+q2VXCzfyYGZk1L1xk5GUI0bP+jutf1dDzNttW2/q2Nf5rxx09 --Gh/GwmOnEk1O7cOZ8VQCsOHirIM39NuSARsY6Y3G5XM4k2W4nxyR/RtdG9bvs/33 --aGsZ5V5yp7WSs8s9HHwaCPSsUiLKckQ7uA0TTRgbeweMrrLKovG57jsbBBB8pQD4 --PRd31qgxCdstWXHiWwRyI8vOLWENPXPFqA/rJwwqNdWTogy38aqVXxGYR8PIwjA2 --OaIwFjwGZcsPNLqw6bgAN8O2UBqZHWiMF8mi7brvioDvAIufZuqa2SqT/At45H83 --psQ6R4FsxZt6SAK7EsdPo8OYTrY1i4iPZd/eKhnEu2srEZgsKRwY5H1mvDH5fWCc --HSFu07sWmlmK6Or65Fsa0IaKLJiQDVVETd6xrI0wkM4AOcbKDrS7aywJ426dopbs --+LFdt4N0cdII4gBgJAfLuuA2yrDXRq4P6cgpVMy0R+0dEYE8zzm8zf1a+Ud273LS --9+LB+LJKwqbW8nOPBoiekimIKfJYoOA4+C/mAjsYl1sVjjEhXJAs9S9L2UvnUk1P --sZi4UKHI6eAIEl7VM1sQ4GbdZ0px2dF2Ax7pGkhD+DLpYyYkCprharKZdmuUNLUd --NhXxi/HSEiE+Uy+o8RIzmH7LuROl/ZgnfHjJEiBLt2qPvwrwYd4c3XuXWs4YsWfV --JTt8Mx2ihgVcdGy9//shCSmgJwR1oWrhgC10AEL2fKeRnYUal1i+IxFPp7nb8uwx --UADgR0cY4A3qR/JP489QFIcxBTVs65De+Bq3ecnujk6yeGpD9iptonq4Y8uNZMc1 --kOE7GiFGwR4EufT5SEMh+tUkjth2r+842vmZZuxrVQaohDiATmIJA07W51zKH+nQ --uw4qVKnAhPaDLCLc7YMIH9JcmkeQX0nf8/S2O2WYDH8glVDi5hfW08tCmV647vRY --nTIywUTO0lFpz7M+VyMNaJ6yXU6biBV5hLAI8C5ldr/SWI789W2+ebBaJ9gfK+PT --trohFSK37GcoSH4V6qSLJHCBASEsiddqHIHMLJZRYD+B6J3tLhjVUM43u+MEGbFT --d33ZDke/WzLTExWkaOv36e67gDBmgDuj9yroq3wGfwIDAQABMA0GCSqGSIb3DQEB --BAUAA4IEAQCc9RBhRbuWlmRZPZkqIdi5/+enyjoMmOa6ryJPxFSP8D2jrlHgQsk1 --+GsJmPFT3rwWfoGAQu/aeSX4sp8OhKVJtqNA6MJrGYnZIMolgYa1wZPbkjJsdEfi --UsZdIB0n2+KA0xwEdGPdkGCfNPBtOg557DkcyEvsIZ9ELp4Pp2XzWRhyFGasJZc4 --YwgD/3K2rpOPZoMkBKeKqV19j41OfLKGBVyuaqzitbu9+KT4RU1ibr2a+UuFCwdT --oqyN7bfWXjcjXOMkxCsOmLfKmqQxs7TEOVrYPTdYjamDxLy/e5g5FgoCxGY8iil0 --+YFLZyH6eEx/Os9DlG/M3O1MeRD9U97CdsphbDVZIDyWw5xeX8qQHJe0KSprAgiG --TLhTZHeyrKujQCQS1oFFmNy4gSqXt0j1/6/9T80j6HeyjiiYEaEQK9YLTAjRoA7W --VN8wtHI5F3RlNOVQEJks/bjdlpLL3VhaWtfewGh/mXRGcow84cgcsejMexmhreHm --JfTUl9+X1IFFxGq2/606A9ROQ7kN/s4rXu7/TiMODXI/kZijoWd2SCc7Z0YWoNo7 --IRKkmZtrsflJbObEuK2Jk59uqzSxyQOBId8qtbPo8qJJyHGV5GCp34g4x67BxJBo --h1iyVMamBAS5Ip1ejghuROrB8Hit8NhAZApXju62btJeXLX+mQayXb/wC/IXNJJD --83tXiLfZgs6GzLAq7+KW/64sZSvj87CPiNtxkvjchAvyr+fhbBXCrf4rlOjJE6SH --Je2/Jon7uqijncARGLBeYUT0Aa6k1slpXuSKxDNt7EIkP21kDZ5/OJ0Y1u587KVB --dEhuDgNf2/8ij7gAQBwBoZMe1DrwddrxgLLBlyHpAZetNYFZNT+Cs/OlpqI0Jm59 --kK9pX0BY4AGOd23XM3K/uLawdmf67kkftim7aVaqXFHPiWsJVtlzmidKvNSmbmZe --dOmMXp6PBoqcdusFVUS7vjd3KAes5wUX/CaTyOOPRu0LMSnpwEnaL76IC9x4Jd6d --7QqY/OFTjpPH8nP57LwouiT6MgSUCWGaOkPuBJ9w9sENSbbINpgJJ42iAe2kE+R7 --qEIvf/2ETCTseeQUqm2nWiSPLkNagEh6kojmEoKrGyrv3YjrSXSOY1a70tDVy43+ --ueQDQzNZm3Q7inpke2ZKvWyY0LQmLzP2te+tnNBcdLyKJx7emPRTuMUlEdK7cLbt --V3Sy9IKtyAXqqd66fPFj4NhJygyncj8M6CSqhG5L0GhDbkA8UJ8yK/gfKm3h5xe2 --utULK5VMtAhQt6cVahO59A9t/OI17y45bmlIgdlEQISzVFe9ZbIUJW44zBfPx74k --/w8pMRr8gEuRqpL2WdJiKGG6lhMHLVFo -+MIIDIjCCAgqgAwIBAgIBBTANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJTRTEQ -+MA4GA1UECBMHVXBwc2FsYTEQMA4GA1UEBxMHVXBwc2FsYTERMA8GA1UEChMITXlT -+UUwgQUIwHhcNMTAwMjIwMDMwMDU0WhcNMzAwOTAzMDMwMDU0WjBDMQswCQYDVQQG -+EwJTRTEQMA4GA1UECBMHVXBwc2FsYTERMA8GA1UEChMITXlTUUwgQUIxDzANBgNV -+BAMTBnNlcnZlcjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAxdpElQZ3FiGv -+oMQ86fgdLZX5Y5CMP4a6d3ZKUktrryn1HKrUPz5Cn21GuoaQsS3M28YzFaP0r1Mz -+T6FW0ao7JhD3ZLX5vxuxR47MptYNqkp346NjnSrcZfR/kRc4LdbNTo1TUpduh/xk -+YKahAKyWbORClHUXRm+Rtd0GR+0F49sCAwEAAaOBozCBoDAJBgNVHRMEAjAAMB0G -+A1UdDgQWBBRuYD8pE2CZ7Qz3FbXbexz7b2AZ7TB0BgNVHSMEbTBrgBSx+353tUCN -+aBF+2RHlw8baTaxRsKFIpEYwRDELMAkGA1UEBhMCU0UxEDAOBgNVBAgTB1VwcHNh -+bGExEDAOBgNVBAcTB1VwcHNhbGExETAPBgNVBAoTCE15U1FMIEFCggkAvlitTB89 -+JZUwDQYJKoZIhvcNAQEFBQADggEBAGMuDwcUBs90kD03QvJIcGAhvDRSMfGHcNKy -++/8TONzwXkPX7qfHH6yq0oxP+jxMc/a2wgyg6qLJ4nNhwy54QA8q02NQm7j5iUDt -+mAiXwwckFzS1eIkKu4NM4lwuE9YhMK0wSLVwEv9Kb0Lw+J+xS72JK/Cd4kkrNWkY -+H3ZAtHa9y90nL8DB4jM+bt9oVBmSirsTnM/WF1bavw1kcDpFt6pf4/WWrjTyFzcn -+0EvoMErAAkLi0jDr68fX7NjfXENY4m+3WFQNxAFxLVmPRMehbAtBKPq3Y6do00/D -+DxeesjJQ5guHPeI5R8DYCjv2r1BoD53vbjQNOgeU+KTXJIYy07Q= - -----END CERTIFICATE----- -diff -Naur mysql-5.1.50.orig/mysql-test/std_data/server8k-key.pem mysql-5.1.50/mysql-test/std_data/server8k-key.pem ---- mysql-5.1.50.orig/mysql-test/std_data/server8k-key.pem 2010-08-03 13:55:08.000000000 -0400 -+++ mysql-5.1.50/mysql-test/std_data/server8k-key.pem 2010-08-27 23:43:10.165365998 -0400 -@@ -1,99 +1,15 @@ - -----BEGIN RSA PRIVATE KEY----- --MIISKQIBAAKCBAEA6h3v1OWb9I9U/Z8diBu/xYGS8NCTD3ZESboHxVI2qSECPgxN --NcG8Lh0ktQdgYcOe64MnDTZX0Bibm47hoDldrAlTSffFxQhylqBBoXxDF+LrhXIq --Cz7K0PsK+bYusL9ezJ7PETDnCT7oy95q4GXbKsutbNsm9if4ZE41gs2KnoU2DA7k --vMmkKojrMIL4+BqTXA20LLo0iSbgvUTvpSJw4u96BeyzMNnxK2wP5vvTtUo5hACb --fU87YjaSKs+q2VXCzfyYGZk1L1xk5GUI0bP+jutf1dDzNttW2/q2Nf5rxx09Gh/G --wmOnEk1O7cOZ8VQCsOHirIM39NuSARsY6Y3G5XM4k2W4nxyR/RtdG9bvs/33aGsZ --5V5yp7WSs8s9HHwaCPSsUiLKckQ7uA0TTRgbeweMrrLKovG57jsbBBB8pQD4PRd3 --1qgxCdstWXHiWwRyI8vOLWENPXPFqA/rJwwqNdWTogy38aqVXxGYR8PIwjA2OaIw --FjwGZcsPNLqw6bgAN8O2UBqZHWiMF8mi7brvioDvAIufZuqa2SqT/At45H83psQ6 --R4FsxZt6SAK7EsdPo8OYTrY1i4iPZd/eKhnEu2srEZgsKRwY5H1mvDH5fWCcHSFu --07sWmlmK6Or65Fsa0IaKLJiQDVVETd6xrI0wkM4AOcbKDrS7aywJ426dopbs+LFd --t4N0cdII4gBgJAfLuuA2yrDXRq4P6cgpVMy0R+0dEYE8zzm8zf1a+Ud273LS9+LB --+LJKwqbW8nOPBoiekimIKfJYoOA4+C/mAjsYl1sVjjEhXJAs9S9L2UvnUk1PsZi4 --UKHI6eAIEl7VM1sQ4GbdZ0px2dF2Ax7pGkhD+DLpYyYkCprharKZdmuUNLUdNhXx --i/HSEiE+Uy+o8RIzmH7LuROl/ZgnfHjJEiBLt2qPvwrwYd4c3XuXWs4YsWfVJTt8 --Mx2ihgVcdGy9//shCSmgJwR1oWrhgC10AEL2fKeRnYUal1i+IxFPp7nb8uwxUADg --R0cY4A3qR/JP489QFIcxBTVs65De+Bq3ecnujk6yeGpD9iptonq4Y8uNZMc1kOE7 --GiFGwR4EufT5SEMh+tUkjth2r+842vmZZuxrVQaohDiATmIJA07W51zKH+nQuw4q --VKnAhPaDLCLc7YMIH9JcmkeQX0nf8/S2O2WYDH8glVDi5hfW08tCmV647vRYnTIy --wUTO0lFpz7M+VyMNaJ6yXU6biBV5hLAI8C5ldr/SWI789W2+ebBaJ9gfK+PTtroh --FSK37GcoSH4V6qSLJHCBASEsiddqHIHMLJZRYD+B6J3tLhjVUM43u+MEGbFTd33Z --Dke/WzLTExWkaOv36e67gDBmgDuj9yroq3wGfwIDAQABAoIEAQCSt6YoZqigz/50 --XvYT6Uf6T6S1lBDFXNmY1qOuDkLBJTWRiwYMDViQEaWCaZgGTKDYeT3M8uR/Phyu --lRFi5vCEMufmcAeZ3hxptw7KU+R8ILJ207/zgit6YglTys9h5txTIack39+6FJmx --wbZ64HpETJZnpMO6+fuZaMXyLjuT8mmXjvHcOgXOvjWeFkZOveDhjJkAesUXuqyX --EI+ajoXuQiPXeKonkD2qd7NTjzfy4gw/ZF4NXs0ZVJeviqtIPo2xp33udOw2vRFh --bMvlF4cNLAbIKYVyOG0ruOfd2I7Unsc/CvD1u5vlRVuUd8OO0JZLIZR7hlRX+A58 --8O1g2H/wJZAsF1BnLnFzDGYCX2WjCCK3Zn85FkKGRa0lTdYDduad/C/N3Y2/pHFE --e7U/2D7IkEei59tD2HcsDBB3MJnckkn/hyiL9qWcxqWZ61vurE+XjU6tc6fnfhk9 --pJQ6yU3epPU7Vfsk0UGA7bbgKpsyzyH8Zl76YC2mN2ZVJjZekfhY+ibT9odEPdOl --yLB5iXA6/WhKkDWaOqZGOH+7MblWgT9wHINlcn+nKzOr00JHl26ac6aMlXXi9vbe --4jgJbFK1HYlFIndyX/BdqRTsFemDoDrVqrEYsaONoVYDd9c5qrqYOeh34DhOksQW --hNwWBfmMlfzgOGtCYhMeK+AajqTtUbMYQA6qp47KJd/Oa5Dvi3ZCpvZh3Ll5iIau --rqCtmojsWCqmpWSu7P+Wu4+O3XkUMPdQUuQ5rJFESEBB3yEJcxqk/RItTcKNElNC --PASrPrMD9cli7S/pJ+frbhu1Gna1ArXzXQE9pMozPaBpjCig7+15R0lL3pmOKO6e --WK3dgSwrnW6TQdLPlSD4lbRoiIdTHVBczztDeUqVvFiV3/cuaEi1nvaVdAYLqjuL --ogK4HwE/FQ54S0ijAsP52n25usoH6OTU3bSd/7NTp0vZCy3yf10x7HUdsh2DvhRO --3+TSK5t0yz0Nt7hNwcI6pLmWUIYcZgpFc/WsiiGscTfhy8rh3kRHI8ylGq53KNF+ --yCVmjqnBRWs91ArxmeF1ctX2t3w5p7gf65hJWqoX/2DiSi5FBsr6HLxa5sUi4wRZ --136aCNt5Wu7w+AzPDbQW6qKUGSyfHJAw4JZasZcaZLise5IWb1ks0DtFbWWdT3ux --8r2AM7IO1WopnekrYCnx/aBvBAv4NjWozVA517ztVttPERt3AGb4nm387nYt5R2U --NO2GBWcDyT8JQLKmffE1AkWolCR1GsvcNLQfLCbnNppgsnsLE/viTG4mq1wjnd8O --2Q8nH1SVTuyGFREMp/zsiAEaGfdd0hI2r1J7OdNPBBCtmhITsy9ZYHqm5vrGvy3s --vi2GuB2RAoICAQD/oWUsg4eTJxHifTJLz/tVSTXnw7DhfbFVa1K1rUV63/MRQAFW --pabN4T6Yfp3CpdRkljCA8KPJZj7euwhm4OEg1ulpOouA+cfWlE9RFE8wyOK5SYwM --k+nk31P9MUC866pZg/ghzBGDub91OW1+ZGEtqnLI/n/LhiAIWt0hJvgZclTc1cAL --xffHVlFwoSyNl/nc3ueZCC95nOLst2XcuxZLLbOFtZCmDYsp49q/Jn6EFjn4Ge2o --qp38z6eZgDMP1F4lb9nDqXPHfUSt2jxKlmpfXS+IPKdba67+EjhbtmUYzaR4EoPI --zh+o6SrVWT6Yve7KGiYv06fuRz1m/lLQO/Arbd9ntSjgn+ZEXGOkbhnHUX3DJ4ny --/6XEGB9NLQjern4uNTn0AaV+uvhncapFMaIBnVfq0Cw8eog0136PBYRaVX7T44j5 --HwIyGXWtYGA/SzDEQoksD0Y/T61BEGnLZaKeavNd82WwFvcYHZtE0J4aQGjCEE7N --+nijzCy+j5ETmme9KJvQHpEyXP3N4RBko1eWvyTwFZDdIXtoa6TTEI51lm+FXJ/b --Y+BzMr6KRo29FB+7//1ptUoMvn5hzL0PwOv2ZSTQuoG5hLDEbxWXLNhd1VHcfznF --3EZHwfD2F8aGQ3kz+fkMTNfK955KorDrmLgvmV9eZZ5yQxGZrs5H5YfKpwKCAgEA --6nSUbzfSdVFUH89NM5FmEJgkD06vqCgHl2mpyF+VmDGcay4K06eA4QbRO5kns13+ --n6PcBl/YVW/rNE8iFi+WxfqUpAjdR1HlShvTuTRVqtFTfuN8XhbYU6VMjKyuE0kd --LKe3KRdwubjVNhXRZLBknU+3Y/4hnIR7mcE3/M5Zv5hjb7XnwWg/SzxV9WojCKiu --vQ7cXhH5/o7EuKcl1d6vueGhWsRylCG9RimwgViR2H7zD9kpkOc0nNym9cSpb0Gv --Lui4cf/fVwIt2HfNEGBjbM/83e2MH6b8Xp1fFAy0aXCdRtOo4LVOzJVAxn5dERMX --4JJ4d5cSFbssDN1bITOKzuytfBqRIQGNkOfizgQNWUiaFI0MhEN/icymjm1ybOIh --Gc9tzqKI4wP2X9g+u3+Oof1QaBcZ4UbZEU9ITN87Pa6XVJmpNx7A81BafWoEPFeE --ahoO4XDwlHZazDuSlOseEShxXcVwaIiqySy7OBEPBVuYdEd2Qw/z3JTx9Kw8MKnf --hu+ar5tz5dPnJIsvLeYCcJDe/K6loiZuHTtPbWEy9p6It7qubQNPBvTSBN5eVDKc --Q2bTQNCx8SAAA9C5gJiwWoQKsXJzbRFRY77P9JjuGpua3YJ2nYBHEJmF+fp1R33c --uHIyMphPMkKC4GC3/43kkMr6tck8kZbXGSYsLsBr2GkCggIBAJvvrjILQianzKcm --zAmnI6AQ+ssYesvyyrxaraeZvSqJdlLtgmOCxVANuQt5IW9djUSWwZvGL4Np1aw0 --15k6UNqhftzsE7FnrVneOsww4WXXBUcV8FKz4Bf3i9qFswILmGzmrfSf8YczRfGS --SJKzVPxwX3jwlrBmbx/pnb7dcLbFIbNcyLvl1ZJJu4BDMVRmgssTRp/5eExtQZg4 --//A4SA8wH7TO3yAMXvn8vrGgH8kfbdlEp88d1SYk3g4rP/rGB3A63NIYikIEzmJn --ICQ3wUfPJnGq3kRMWgEuyCZaCy2oNE3yrWVPJ8z3/2MJ/79ZDVNHxEeki2o1FuW+ --+nGAPq+fZIp03iy4HdVRro7dgugtc9QaSHJtNId8V4vSjviX5Oz3FxUb9AJst58S --nVV8Q2FMxBa/SlzSOkhRtCg2q1gXkzhaMnIVUleRZFGQ2uWBToxKMjcoUifIyN1J --z999bkfI4hBLq5pRSAXz+YVu5SMKa10GaawIwJLat+i+1zboF6QyI2o/Wz8nrsNq --KX/ajFGu5C94WFgsVoWKNI90KBLe48Ssje9c68waBlV/WHMg1YLvU3yqVDOV+K5c --IHB9tPMnG+AgBYZPxSzuvnLrrkj/GeKx0WI7TrvzOLRGKJo6irMEJ8IzFegASRUq --TVZKYQDYRG7m+lKlSxU+pyMAh2c9AoICAE4kavCip1eIssQjYLTGSkFPo/0iGbOv --G9CgXAE3snFWX67tWphupKrbjdMSWcQTmPD2OTg6q6zWL4twsIi6dcMooHAHsFC7 --//LyUV/SDJdxSyXohiQJ8zH1zwy35RDydnHSuF5OvLh53T44iWDI1dAEqLgAFI3J --LjTxzEpLMGiGTuYFt+ejai0WQAQayvBw4ESM9m+4CB2K0hBFTXv5y5HlnNTW0uWC --VUZUUMrbjUieDz8B/zOXi9aYSGFzmZFGUDAPSqJcSMEELemPDF7f8WNr8vi42tIV --4tlaFD1nep4F9bWMiCXU6B2RxVQi+7vcJEIqL1KUnGd3ydfD00K+ng4Xnj7Vz/cz --QE7CqrpFaXmPlCMzW6+dm51/AyhHXDLkL2od05hiXcNkJ7KMLWRqwExHVIxM3shR --x7lYNl3ArUsCrNd6m4aOjnrKFk7kjeLavHxskPccoGKrC9o0JMfTkWLgmuBJFQ0S --N/HzIbcvIFWF0Ms4ojb50yp6ziXhXfJOO/0KUQEki71XIhvw89mVZszDzD5lqzjf --HCZMBU4MbmL6NdEevFIDH0zPPkx3HPNtJt3kIJbit9wI8VhUMe+ldGnGxpWb8tKw --SfM3vrHkYr+lizk26XfXMFhdAuVtT7dzQKSNEyP/1a2Hs307Xzgiv8JulJ8QIkrX --/nsYWPOAGLG5AoICABmdW9Ppkvuhb1AEcjTWb+XCyopoBc6vit/uQWD9uO+CeX7a --cfzq+iH01CAjyVMc4E1JDc5Lpi106U+GRGcAAaPJB2Sp5NznoxaOVrb71blu4Q4x --bNjtKM/P/DXpO+yJYoOPdKtaSDhtnfNDM7H/jztJ3XIrOltKA7CcRDohbBWIx8Q0 --0uEpvfFpZZBco3yVmjP0RLgIVYn/ZDj9wGhSvFWIJ5vv6GXmtDrcHGMLxcfv7t76 --UVcMW/Yy4mYJRCzGOrWagyVijJ6MTVNciqadWcH1KcbB3EGoMFYMn61or2qJABPM --xz89IlhnROU1Re3X/QRx5t86cw6oa+FqrWMOhSs31I0dNWSuS/xDympG27YIYSDd --mv5seT78GjFmMJC5pPOLoXsbTPB0HpsX2/UL/w/eRAfilTOef/Cf9VE5MP/C2YR7 --NBxUU7/+21D6WvdtBTcZbrXWGroAo8zPP+PwX0+c6WoAvqDJvCPndp8xZhSgEJN/ --0kScptezi8n3ZHI95EA9U5mAHxHz0IhDDVzWw/z1f1SBPxKVX3+By3zaa3lrD2ch --cHq7nBkX72veEevnHUY8Z2rHE2G2jdmRfOtwm4sjL0VBV9fRRoxzJWRduKyeOtDL --EhhBhUoTrT48UnfW9hxnbNLB9P/hh+UJu9HrS2uAwHoGE1+8gcyundupGDBn -+MIICXgIBAAKBgQDF2kSVBncWIa+gxDzp+B0tlfljkIw/hrp3dkpSS2uvKfUcqtQ/ -+PkKfbUa6hpCxLczbxjMVo/SvUzNPoVbRqjsmEPdktfm/G7FHjsym1g2qSnfjo2Od -+Ktxl9H+RFzgt1s1OjVNSl26H/GRgpqEArJZs5EKUdRdGb5G13QZH7QXj2wIDAQAB -+AoGBAJLCjh7Q9eLnx+QDzH9s+Q/IcH4nSbERmh1lFEopAc6j29qQ6PGkmDy0DUPs -+70VOCOh5A4mo3aZzm9sUfVb24/nRtmyTP/AtMuIVGCsUqzI28dJRGvRlY0aSQG/C -+ILqMP69kiMNGBvuyEIiJhisOmMvDFEp7HrrXHJM9qcc217DpAkEA4nzJ9yyy2e4O -+r6/D711hdfcU/F+ktXw+pL77kSSdTABUap92Uv2RL36UA4q5h8RNvq/GrzMNm6Ye -+u2IMvBCiTQJBAN+iRbiMJCSitTg5YVMluVbT87co7jbTqk7LN1ujyIFEklm4xlHG -+DLJNgEoDR7QJtAkL++FyogC4zsQsey5voscCQQCp54trTbDuI9QIoAaQrrDKWgz4 -+NpfNPeOQm2UFQT5vIWAyjGWrZGViB8bp0UvVOcJI5nxaOiZfOYOcdrWu75uRAkAn -+67zMc9/j1lPJRJz2Dc7nDBD+ikTz7pcBV897AWLCiK4jbBOi91q+3YzgKXO8VNsZ -+nlUJasA2psbqSBJ5OJ5zAkEA2UxoMju54hASjT54Z92IzraVw4Vo8CYwOcw5fr7z -++m5xg1mmWdLBclmZ+WjARzDuTHIW6u/WCxNGg42AykWzfw== - -----END RSA PRIVATE KEY----- diff --git a/packaging/rpm-uln/mysql-install-test.patch b/packaging/rpm-uln/mysql-install-test.patch deleted file mode 100644 index 5980aea6a9f..00000000000 --- a/packaging/rpm-uln/mysql-install-test.patch +++ /dev/null @@ -1,33 +0,0 @@ -Improve the documentation that will be installed in the mysql-test RPM. - - -diff -Naur mysql-5.1.43.orig/mysql-test/README mysql-5.1.43/mysql-test/README ---- mysql-5.1.43.orig/mysql-test/README 2010-01-15 12:14:43.000000000 -0500 -+++ mysql-5.1.43/mysql-test/README 2010-02-13 21:18:06.000000000 -0500 -@@ -6,6 +6,16 @@ - actually have a co-existing MySQL installation. The tests will not - conflict with it. - -+For use in Red Hat distributions, you should run the script as user mysql, -+so the best bet is something like -+ cd /usr/share/mysql-test -+ sudo -u mysql ./mysql-test-run -+This will use the installed mysql executables, but will run a private copy -+of the server process (using data files within /usr/share/mysql-test), -+so you need not start the mysqld service beforehand. -+To clean up afterwards, remove the created "var" subdirectory, eg -+ sudo -u mysql rm -rf /usr/share/mysql-test/var -+ - All tests must pass. If one or more of them fail on your system, please - read the following manual section for instructions on how to report the - problem: -@@ -25,7 +35,8 @@ - - With no test cases named on the command line, mysql-test-run falls back - to the normal "non-extern" behavior. The reason for this is that some --tests cannot run with an external server. -+tests cannot run with an external server (because they need to control the -+options with which the server is started). - - - You can create your own test cases. To create a test case, create a new diff --git a/packaging/rpm-uln/mysql-strmov.patch b/packaging/rpm-uln/mysql-strmov.patch deleted file mode 100644 index a144d0936e4..00000000000 --- a/packaging/rpm-uln/mysql-strmov.patch +++ /dev/null @@ -1,32 +0,0 @@ -Remove overly optimistic definition of strmov() as stpcpy(). - -mysql uses this macro with overlapping source and destination strings, -which is verboten per spec, and fails on some Red Hat platforms. -Deleting the definition is sufficient to make it fall back to a -byte-at-a-time copy loop, which should consistently give the -expected behavior. - -Note: the particular case that prompted this patch is reported and fixed -at http://bugs.mysql.com/bug.php?id=48864. However, my faith in upstream's -ability to detect this type of error is low, and I also see little evidence -of any real performance gain from optimizing these calls. So I'm keeping -this patch. - - -diff -Naur mysql-5.1.37.orig/include/m_string.h mysql-5.1.37/include/m_string.h ---- mysql-5.1.37.orig/include/m_string.h 2009-07-13 19:08:50.000000000 -0400 -+++ mysql-5.1.37/include/m_string.h 2009-08-31 21:49:49.000000000 -0400 -@@ -81,13 +81,6 @@ - extern void *(*my_str_malloc)(size_t); - extern void (*my_str_free)(void *); - --#if defined(HAVE_STPCPY) --#define strmov(A,B) stpcpy((A),(B)) --#ifndef stpcpy --extern char *stpcpy(char *, const char *); /* For AIX with gcc 2.95.3 */ --#endif --#endif -- - /* Declared in int2str() */ - extern char NEAR _dig_vec_upper[]; - extern char NEAR _dig_vec_lower[]; diff --git a/packaging/rpm-uln/mysql.init b/packaging/rpm-uln/mysql.init deleted file mode 100644 index 310e8cfa023..00000000000 --- a/packaging/rpm-uln/mysql.init +++ /dev/null @@ -1,209 +0,0 @@ -#!/bin/sh -# -# mysqld This shell script takes care of starting and stopping -# the MySQL subsystem (mysqld). -# -# chkconfig: - 64 36 -# description: MySQL database server. -# processname: mysqld -# config: /etc/my.cnf -# pidfile: /var/run/mysqld/mysqld.pid - -# Source function library. -. /etc/rc.d/init.d/functions - -# Source networking configuration. -. /etc/sysconfig/network - - -exec="/usr/bin/mysqld_safe" -prog="mysqld" - -# Set timeouts here so they can be overridden from /etc/sysconfig/mysqld -STARTTIMEOUT=120 -STOPTIMEOUT=60 - -[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog - -lockfile=/var/lock/subsys/$prog - - -# extract value of a MySQL option from config files -# Usage: get_mysql_option SECTION VARNAME DEFAULT -# result is returned in $result -# We use my_print_defaults which prints all options from multiple files, -# with the more specific ones later; hence take the last match. -get_mysql_option(){ - result=`/usr/bin/my_print_defaults "$1" | sed -n "s/^--$2=//p" | tail -n 1` - if [ -z "$result" ]; then - # not found, use default - result="$3" - fi -} - -get_mysql_option mysqld datadir "/var/lib/mysql" -datadir="$result" -get_mysql_option mysqld socket "$datadir/mysql.sock" -socketfile="$result" -get_mysql_option mysqld_safe log-error "/var/log/mysqld.log" -errlogfile="$result" -get_mysql_option mysqld_safe pid-file "/var/run/mysqld/mysqld.pid" -mypidfile="$result" - - -start(){ - [ -x $exec ] || exit 5 - # check to see if it's already running - RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` - if [ $? = 0 ]; then - # already running, do nothing - action $"Starting $prog: " /bin/true - ret=0 - elif echo "$RESPONSE" | grep -q "Access denied for user" - then - # already running, do nothing - action $"Starting $prog: " /bin/true - ret=0 - else - # prepare for start - touch "$errlogfile" - chown mysql:mysql "$errlogfile" - chmod 0640 "$errlogfile" - [ -x /sbin/restorecon ] && /sbin/restorecon "$errlogfile" - if [ ! -d "$datadir/mysql" ] ; then - # First, make sure $datadir is there with correct permissions - if [ ! -e "$datadir" -a ! -h "$datadir" ] - then - mkdir -p "$datadir" || exit 1 - fi - chown mysql:mysql "$datadir" - chmod 0755 "$datadir" - [ -x /sbin/restorecon ] && /sbin/restorecon "$datadir" - # Now create the database - action $"Initializing MySQL database: " /usr/bin/mysql_install_db --datadir="$datadir" --user=mysql - ret=$? - chown -R mysql:mysql "$datadir" - if [ $ret -ne 0 ] ; then - return $ret - fi - fi - chown mysql:mysql "$datadir" - chmod 0755 "$datadir" - # Pass all the options determined above, to ensure consistent behavior. - # In many cases mysqld_safe would arrive at the same conclusions anyway - # but we need to be sure. (An exception is that we don't force the - # log-error setting, since this script doesn't really depend on that, - # and some users might prefer to configure logging to syslog.) - # Note: set --basedir to prevent probes that might trigger SELinux - # alarms, per bug #547485 - $exec --datadir="$datadir" --socket="$socketfile" \ - --pid-file="$mypidfile" \ - --basedir=/usr --user=mysql >/dev/null 2>&1 & - safe_pid=$! - # Spin for a maximum of N seconds waiting for the server to come up; - # exit the loop immediately if mysqld_safe process disappears. - # Rather than assuming we know a valid username, accept an "access - # denied" response as meaning the server is functioning. - ret=0 - TIMEOUT="$STARTTIMEOUT" - while [ $TIMEOUT -gt 0 ]; do - RESPONSE=`/usr/bin/mysqladmin --socket="$socketfile" --user=UNKNOWN_MYSQL_USER ping 2>&1` && break - echo "$RESPONSE" | grep -q "Access denied for user" && break - if ! /bin/kill -0 $safe_pid 2>/dev/null; then - echo "MySQL Daemon failed to start." - ret=1 - break - fi - sleep 1 - let TIMEOUT=${TIMEOUT}-1 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Timeout error occurred trying to start MySQL Daemon." - ret=1 - fi - if [ $ret -eq 0 ]; then - action $"Starting $prog: " /bin/true - touch $lockfile - else - action $"Starting $prog: " /bin/false - fi - fi - return $ret -} - -stop(){ - if [ ! -f "$mypidfile" ]; then - # not running; per LSB standards this is "ok" - action $"Stopping $prog: " /bin/true - return 0 - fi - MYSQLPID=`cat "$mypidfile"` - if [ -n "$MYSQLPID" ]; then - /bin/kill "$MYSQLPID" >/dev/null 2>&1 - ret=$? - if [ $ret -eq 0 ]; then - TIMEOUT="$STOPTIMEOUT" - while [ $TIMEOUT -gt 0 ]; do - /bin/kill -0 "$MYSQLPID" >/dev/null 2>&1 || break - sleep 1 - let TIMEOUT=${TIMEOUT}-1 - done - if [ $TIMEOUT -eq 0 ]; then - echo "Timeout error occurred trying to stop MySQL Daemon." - ret=1 - action $"Stopping $prog: " /bin/false - else - rm -f $lockfile - rm -f "$socketfile" - action $"Stopping $prog: " /bin/true - fi - else - action $"Stopping $prog: " /bin/false - fi - else - # failed to read pidfile, probably insufficient permissions - action $"Stopping $prog: " /bin/false - ret=4 - fi - return $ret -} - -restart(){ - stop - start -} - -condrestart(){ - [ -e $lockfile ] && restart || : -} - - -# See how we were called. -case "$1" in - start) - start - ;; - stop) - stop - ;; - status) - status -p "$mypidfile" $prog - ;; - restart) - restart - ;; - condrestart|try-restart) - condrestart - ;; - reload) - exit 3 - ;; - force-reload) - restart - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" - exit 2 -esac - -exit $? diff --git a/packaging/rpm-uln/mysql.spec.sh b/packaging/rpm-uln/mysql.spec.sh deleted file mode 100644 index 34aed51048f..00000000000 --- a/packaging/rpm-uln/mysql.spec.sh +++ /dev/null @@ -1,1991 +0,0 @@ -# -# This file was modified by Oracle in 2011 and later. -# Details of the modifications are described in the "changelog" section. -# -# Modifications copyright (c) 2011, 2012, Oracle and/or its -# affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; see the file COPYING. If not, write to the -# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston -# MA 02110-1301 USA. - -############################################################################## -# Some common macro definitions -############################################################################## - -# NOTE: "vendor" is used in upgrade/downgrade check, so you can't -# change these, has to be exactly as is. -# %define mysql_old_vendor MySQL AB # Applies to traditional MySQL RPMs only. -# %define mysql_vendor_2 Sun Microsystems, Inc. -%define mysql_vendor Oracle and/or its affiliates - -%define mysql_version @VERSION@ - -%define mysqldatadir /var/lib/mysql - -%define release 1 - -############################################################################## -# Command line handling -############################################################################## -# -# To set options: -# -# $ rpmbuild --define="option " ... -# - -# ---------------------------------------------------------------------------- -# Commercial builds -# ---------------------------------------------------------------------------- -%if %{undefined commercial} -%define commercial 0 -%endif - -# ---------------------------------------------------------------------------- -# Source name -# ---------------------------------------------------------------------------- -%if %{undefined src_base} -%define src_base mysql -%endif -%define src_dir %{src_base}-%{mysql_version} - -# ---------------------------------------------------------------------------- -# Feature set (storage engines, options). Default to community (everything) -# ---------------------------------------------------------------------------- -%if %{undefined feature_set} -%define feature_set community -%endif - -# ---------------------------------------------------------------------------- -# Server comment strings -# ---------------------------------------------------------------------------- -%if %{undefined compilation_comment_debug} -%define compilation_comment_debug MySQL Community Server - Debug (GPL) -%endif -%if %{undefined compilation_comment_release} -%define compilation_comment_release MySQL Community Server (GPL) -%endif - -# ---------------------------------------------------------------------------- -# Product and server suffixes -# ---------------------------------------------------------------------------- -%if %{undefined product_suffix} - %if %{defined short_product_tag} - %define product_suffix -%{short_product_tag} - %else - %define product_suffix %{nil} - %endif -%endif - -%if %{undefined server_suffix} -%define server_suffix %{nil} -%endif - -# ---------------------------------------------------------------------------- -# Distribution support -# ---------------------------------------------------------------------------- -%if %{undefined distro_specific} -%define distro_specific 0 -%endif -%if %{distro_specific} - %if %(test -f /etc/oracle-release && echo 1 || echo 0) - %define elver %(rpm -qf --qf '%%{version}\\n' /etc/oracle-release | sed -e 's/^\\([0-9]*\\).*/\\1/g') - %if "%elver" == "6" - %define distro_description Oracle Linux 6 - %define distro_releasetag el6 - %define distro_buildreq gcc-c++ ncurses-devel perl readline-devel time zlib-devel - %define distro_requires chkconfig coreutils grep procps shadow-utils net-tools - %else - %{error:Oracle Linux %{elver} is unsupported} - %endif - %else - %if %(test -f /etc/redhat-release && echo 1 || echo 0) - %define rhelver %(rpm -qf --qf '%%{version}\\n' /etc/redhat-release | sed -e 's/^\\([0-9]*\\).*/\\1/g') - %if "%rhelver" == "5" - %define distro_description Red Hat Enterprise Linux 5 - %define distro_releasetag rhel5 - %define distro_buildreq gcc-c++ gperf ncurses-devel perl readline-devel time zlib-devel - %define distro_requires chkconfig coreutils grep procps shadow-utils net-tools - %else - %if "%rhelver" == "6" - %define distro_description Red Hat Enterprise Linux 6 - %define distro_releasetag rhel6 - %define distro_buildreq gcc-c++ ncurses-devel perl readline-devel time zlib-devel - %define distro_requires chkconfig coreutils grep procps shadow-utils net-tools - %else - %{error:Red Hat Enterprise Linux %{rhelver} is unsupported} - %endif - %endif - %else - %if %(test -f /etc/SuSE-release && echo 1 || echo 0) - %define susever %(rpm -qf --qf '%%{version}\\n' /etc/SuSE-release | cut -d. -f1) - %if "%susever" == "10" - %define distro_description SUSE Linux Enterprise Server 10 - %define distro_releasetag sles10 - %define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client readline-devel zlib-devel - %define distro_requires aaa_base coreutils grep procps pwdutils - %else - %if "%susever" == "11" - %define distro_description SUSE Linux Enterprise Server 11 - %define distro_releasetag sles11 - %define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client procps pwdutils readline-devel zlib-devel - %define distro_requires aaa_base coreutils grep procps pwdutils - %else - %{error:SuSE %{susever} is unsupported} - %endif - %endif - %else - %{error:Unsupported distribution} - %endif - %endif - %endif -%else - %define generic_kernel %(uname -r | cut -d. -f1-2) - %define distro_description Generic Linux (kernel %{generic_kernel}) - %define distro_releasetag linux%{generic_kernel} - %define distro_buildreq gcc-c++ gperf ncurses-devel perl readline-devel time zlib-devel - %define distro_requires coreutils grep procps /sbin/chkconfig /usr/sbin/useradd /usr/sbin/groupadd -%endif - -# Avoid debuginfo RPMs, leaves binaries unstripped -%define debug_package %{nil} - -# Hack to work around bug in RHEL5 __os_install_post macro, wrong inverted -# test for __debug_package -%define __strip /bin/true - -# ---------------------------------------------------------------------------- -# Support optional "tcmalloc" library (experimental) -# ---------------------------------------------------------------------------- -%if %{defined malloc_lib_target} -%define WITH_TCMALLOC 1 -%else -%define WITH_TCMALLOC 0 -%endif - -############################################################################## -# Configuration based upon above user input, not to be set directly -############################################################################## - -%if %{commercial} -%define license_files_server %{src_dir}/LICENSE.mysql -%define license_type Commercial -%else -%define license_files_server %{src_dir}/COPYING %{src_dir}/README -%define license_type GPL -%endif - -############################################################################## -# Main spec file section -############################################################################## - -Name: mysql%{product_suffix} -Summary: MySQL client programs and shared libraries -Group: Applications/Databases -Version: @MYSQL_RPM_VERSION@ -Release: %{release}%{?distro_releasetag:.%{distro_releasetag}} -# exceptions allow client libraries to be linked with most open source SW, -# not only GPL code. -License: Copyright (c) 2000, @MYSQL_COPYRIGHT_YEAR@, %{mysql_vendor}. All rights reserved. Under %{license_type} license as shown in the Description field. -URL: http://www.mysql.com/ -Packager: MySQL Release Engineering -Vendor: %{mysql_vendor} - -# Regression tests may take a long time, override the default to skip them -%{!?runselftest:%global runselftest 1} - -# Upstream has a mirror redirector for downloads, so the URL is hard to -# represent statically. You can get the tarball by following a link from -# http://dev.mysql.com/downloads/mysql/ -Source0: %{src_dir}.tar.gz -# The upstream tarball includes non-free documentation that only the -# copyright holder (MySQL -> Sun -> Oracle) may ship. -# To remove the non-free documentation, run this script after downloading -# the tarball into the current directory: -# ./generate-tarball.sh $VERSION -# Then, source name changes: -# Source0: mysql-%{version}-nodocs.tar.gz -%if %{commercial} -NoSource: 0 -%endif -Source1: generate-tarball.sh -Source2: mysql.init -Source3: my.cnf -Source4: scriptstub.c -Source5: my_config.h -# The below is only needed for packages built outside MySQL -> Sun -> Oracle: -Source6: README.mysql-docs -Source9: mysql-embedded-check.c -# Working around perl dependency checking bug in rpm FTTB. Remove later. -Source999: filter-requires-mysql.sh - -# Patch1: mysql-ssl-multilib.patch Not needed by MySQL (yaSSL), will not work in 5.5 (cmake) -Patch2: mysql-5.5-errno.patch -Patch4: mysql-5.5-testing.patch -Patch5: mysql-install-test.patch -Patch6: mysql-5.5-stack-guard.patch -# Patch7: mysql-disable-test.patch Already fixed in current 5.1 -# Patch8: mysql-setschedparam.patch Will not work in 5.5 (cmake) -# Patch9: mysql-no-docs.patch Will not work in 5.5 (cmake) -Patch10: mysql-strmov.patch - # Not used by MySQL -# Patch12: mysql-cve-2008-7247.patch Already fixed in 5.5 -Patch13: mysql-expired-certs.patch - # Will not be used by MySQL -# Patch14: mysql-missing-string-code.patch Undecided, will not work in 5.5 (cmake) -# Patch15: mysql-lowercase-bug.patch Fixed in MySQL 5.1.54 and 5.5.9 -Patch16: mysql-chain-certs.patch -Patch17: mysql-5.5-libdir.patch -Patch18: mysql-5.5-fix-tests.patch -Patch19: mysql-5.5-mtr1.patch - -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root -BuildRequires: %{distro_buildreq} -BuildRequires: gawk -# make test requires time and ps -BuildRequires: procps -# Socket and Time::HiRes are needed to run regression tests -BuildRequires: perl(Socket), perl(Time::HiRes) - -Requires: %{distro_requires} -Requires: fileutils -Requires: mysql-libs%{product_suffix} = %{version}-%{release} -Requires: bash - -# If %%{product_suffix} is non-empty, the auto-generated capability is insufficient: -# We want all dependency handling to use the generic name only. -# Similar in other sub-packages -Provides: mysql - -# MySQL (with caps) is upstream's spelling of their own RPMs for mysql -Obsoletes: MySQL -# mysql-cluster used to be built from this SRPM, but no more -Obsoletes: mysql-cluster < 5.1.44 -# We need cross-product "Obsoletes:" to allow cross-product upgrades: -Obsoletes: mysql < %{version}-%{release} -Obsoletes: mysql-advanced < %{version}-%{release} - -# Working around perl dependency checking bug in rpm FTTB. Remove later. -%global __perl_requires %{SOURCE999} - -%description -n mysql%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. MySQL is a -client/server implementation consisting of a server daemon (mysqld) -and many different client programs and libraries. The base package -contains the standard MySQL client programs and generic MySQL files. - -The MySQL software has Dual Licensing, which means you can use the MySQL -software free of charge under the GNU General Public License -(http://www.gnu.org/licenses/). You can also purchase commercial MySQL -licenses from %{mysql_vendor} if you do not wish to be bound by the terms of -the GPL. See the chapter "Licensing and Support" in the manual for -further info. - -%package -n mysql-libs%{product_suffix} - -Summary: The shared libraries required for MySQL clients -Group: Applications/Databases -Requires: /sbin/ldconfig -Obsoletes: mysql-libs < %{version}-%{release} -Obsoletes: mysql-libs-advanced < %{version}-%{release} -Provides: mysql-libs - -%description -n mysql-libs%{product_suffix} -The mysql-libs package provides the essential shared libraries for any -MySQL client program or interface. You will need to install this package -to use any other MySQL package or any clients that need to connect to a -MySQL server. - -%package -n mysql-server%{product_suffix} - -Summary: The MySQL server and related files -Group: Applications/Databases -Requires: mysql%{product_suffix} = %{version}-%{release} -Requires: sh-utils -Requires(pre): /usr/sbin/useradd -Requires(post): chkconfig -Requires(preun): chkconfig -# This is for /sbin/service -Requires(preun): initscripts -Requires(postun): initscripts -# mysqlhotcopy needs DBI/DBD support -Requires: perl-DBI, perl-DBD-MySQL -Obsoletes: MySQL-server -Obsoletes: mysql-server < %{version}-%{release} -Obsoletes: mysql-server-advanced < %{version}-%{release} -Provides: mysql-server - -%description -n mysql-server%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. MySQL is a -client/server implementation consisting of a server daemon (mysqld) -and many different client programs and libraries. This package contains -the MySQL server and some accompanying files and directories. - -%package -n mysql-devel%{product_suffix} - -Summary: Files for development of MySQL applications -Group: Applications/Databases -Requires: mysql%{product_suffix} = %{version}-%{release} -Requires: openssl-devel -Obsoletes: MySQL-devel -Obsoletes: mysql-devel < %{version}-%{release} -Obsoletes: mysql-devel-advanced < %{version}-%{release} -Provides: mysql-devel - -%description -n mysql-devel%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains the libraries and header files that are needed for -developing MySQL client applications. - -%package -n mysql-embedded%{product_suffix} - -Summary: MySQL as an embeddable library -Group: Applications/Databases -Obsoletes: mysql-embedded < %{version}-%{release} -Obsoletes: mysql-embedded-advanced < %{version}-%{release} -Provides: mysql-embedded - -%description -n mysql-embedded%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains a version of the MySQL server that can be embedded -into a client application instead of running as a separate process, -as well as a command line client with such an embedded server. - -%package -n mysql-embedded-devel%{product_suffix} - -Summary: Development files for MySQL as an embeddable library -Group: Applications/Databases -Requires: mysql-embedded%{product_suffix} = %{version}-%{release} -Requires: mysql-devel%{product_suffix} = %{version}-%{release} -Obsoletes: mysql-embedded-devel < %{version}-%{release} -Obsoletes: mysql-embedded-devel-advanced < %{version}-%{release} -Provides: mysql-embedded-devel - -%description -n mysql-embedded-devel%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains files needed for developing and testing with -the embedded version of the MySQL server. - -%package -n mysql-test%{product_suffix} - -Summary: The test suite distributed with MySQL -Group: Applications/Databases -Requires: mysql%{product_suffix} = %{version}-%{release} -Requires: mysql-server%{product_suffix} = %{version}-%{release} -Obsoletes: MySQL-test -Obsoletes: mysql-test < %{version}-%{release} -Obsoletes: mysql-test-advanced < %{version}-%{release} -Provides: mysql-test - -%description -n mysql-test%{product_suffix} -MySQL is a multi-user, multi-threaded SQL database server. This -package contains the regression test suite distributed with -the MySQL sources. - -%prep -%setup -T -a 0 -c -n %{src_dir} - -cd %{src_dir} # read about "%setup -n" -# %patch1 -p1 -%patch2 -p1 -# %patch4 -p1 TODO / FIXME: if wanted, needs to be adapted to new mysql-test-run setup -%patch5 -p1 -%patch6 -p1 -# %patch8 -p1 -# %patch9 -p1 -# %patch10 -p1 -# %patch13 -p1 -# %patch14 -p1 -%patch16 -p1 -%patch17 -p1 -%patch18 -p1 -%patch19 -p1 - -# workaround for upstream bug #56342 -rm -f mysql-test/t/ssl_8k_key-master.opt - -%build - -# Fail quickly and obviously if user tries to build as root -%if %runselftest - if [ x"`id -u`" = x0 ]; then - echo "The MySQL regression tests may fail if run as root." - echo "If you really need to build the RPM as root, use" - echo "--define='runselftest 0' to skip the regression tests." - exit 1 - fi -%endif - -# Be strict about variables, bail at earliest opportunity, etc. -set -eu - -# Optional package files -touch optional-files-devel - -# -# Set environment in order of preference, MYSQL_BUILD_* first, then variable -# name, finally a default. RPM_OPT_FLAGS is assumed to be a part of the -# default RPM build environment. -# -# We set CXX=gcc by default to support so-called 'generic' binaries, where we -# do not have a dependancy on libgcc/libstdc++. This only works while we do -# not require C++ features such as exceptions, and may need to be removed at -# a later date. -# - -# This is a hack, $RPM_OPT_FLAGS on ia64 hosts contains flags which break -# the compile in cmd-line-utils/readline - needs investigation, but for now -# we simply unset it and use those specified directly in cmake. -%if "%{_arch}" == "ia64" -RPM_OPT_FLAGS= -%endif - -# This goes in sync with Patch19. "rm" is faster than "patch" for this. -rm -rf %{src_dir}/mysql-test/lib/v1 - -export PATH=${MYSQL_BUILD_PATH:-$PATH} -export CC=${MYSQL_BUILD_CC:-${CC:-gcc}} -export CXX=${MYSQL_BUILD_CXX:-${CXX:-gcc}} -export CFLAGS=${MYSQL_BUILD_CFLAGS:-${CFLAGS:-$RPM_OPT_FLAGS}} -# Following "%ifarch" developed by RedHat, MySQL/Oracle does not support/maintain Linux/Sparc: -# gcc seems to have some bugs on sparc as of 4.4.1, back off optimization -# submitted as bz #529298 -%ifarch sparc sparcv9 sparc64 -CFLAGS=`echo $CFLAGS| sed -e "s|-O2|-O1|g" ` -%endif -export CXXFLAGS=${MYSQL_BUILD_CXXFLAGS:-${CXXFLAGS:-$RPM_OPT_FLAGS -felide-constructors -fno-exceptions -fno-rtti}} -export LDFLAGS=${MYSQL_BUILD_LDFLAGS:-${LDFLAGS:-}} -export CMAKE=${MYSQL_BUILD_CMAKE:-${CMAKE:-cmake}} -export MAKE_JFLAG=${MYSQL_BUILD_MAKE_JFLAG:-%{?_smp_mflags}} - -# Build debug mysqld and libmysqld.a -mkdir debug -( - cd debug - # Attempt to remove any optimisation flags from the debug build - CFLAGS=`echo " ${CFLAGS} " | \ - sed -e 's/ -O[0-9]* / /' \ - -e 's/ -unroll2 / /' \ - -e 's/ -ip / /' \ - -e 's/^ //' \ - -e 's/ $//'` - CXXFLAGS=`echo " ${CXXFLAGS} " | \ - sed -e 's/ -O[0-9]* / /' \ - -e 's/ -unroll2 / /' \ - -e 's/ -ip / /' \ - -e 's/^ //' \ - -e 's/ $//'` - # XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before - # XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM - ${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=Debug \ - -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ - -DFEATURE_SET="%{feature_set}" \ - -DCOMPILATION_COMMENT="%{compilation_comment_debug}" \ - -DMYSQL_SERVER_SUFFIX="%{server_suffix}" - echo BEGIN_DEBUG_CONFIG ; egrep '^#define' include/config.h ; echo END_DEBUG_CONFIG - make ${MAKE_JFLAG} VERBOSE=1 -) -# Build full release -mkdir release -( - cd release - # XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before - # XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM - ${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo \ - -DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \ - -DFEATURE_SET="%{feature_set}" \ - -DCOMPILATION_COMMENT="%{compilation_comment_release}" \ - -DMYSQL_SERVER_SUFFIX="%{server_suffix}" - echo BEGIN_NORMAL_CONFIG ; egrep '^#define' include/config.h ; echo END_NORMAL_CONFIG - make ${MAKE_JFLAG} VERBOSE=1 -) - -# TODO / FIXME: Do we need "scriptstub"? -gcc $CFLAGS $LDFLAGS -o scriptstub "-DLIBDIR=\"%{_libdir}/mysql\"" %{SOURCE4} - -# TODO / FIXME: "libmysqld.so" should have been produced above -# regular build will make libmysqld.a but not libmysqld.so :-( -cd release -mkdir libmysqld/work -cd libmysqld/work -# "libmysqld" provides the same ABI as "libmysqlclient", but it implements the server: -# The shared object is identified by the full version, -# for linkage selection the first two levels are sufficient so that upgrades are possible -# (see "man ld", option "-soname"). -SO_FULL='%{mysql_version}' -SO_USE=`echo $SO_FULL | sed -e 's/\([0-9]\.[0-9]\)\.[0-9]*/\1/'` -# These two modules should pull everything else which is needed: -ar -x ../libmysqld.a client.c.o signal_handler.cc.o -gcc $CFLAGS $LDFLAGS -shared -Wl,-soname,libmysqld.so.$SO_USE -o libmysqld.so.$SO_FULL \ - *.o ../libmysqld.a \ - -lpthread -lcrypt -laio -lnsl -lssl -lcrypto -lz -lrt -lstdc++ -lm -lc -# this is to check that we built a complete library -cp %{SOURCE9} . -PROGNAME=`basename %{SOURCE9} .c` -ln -s libmysqld.so.$SO_FULL libmysqld.so.$SO_USE -gcc -I../../include -I../../../%{src_dir}/include $CFLAGS -o $PROGNAME %{SOURCE9} libmysqld.so.$SO_USE -LD_LIBRARY_PATH=. ldd $PROGNAME -cd ../.. -cd .. - -# TODO / FIXME: autotools only? -# make check - -# TODO / FIXME: Test suite is run elsewhere in release builds - -# do we need this for users who want to build from source? -# Also, check whether MTR_BUILD_THREAD=auto would solve all issues -%if %runselftest - # hack to let 32- and 64-bit tests run concurrently on same build machine - case `uname -m` in - ppc64 | s390x | x86_64 | sparc64 ) - MTR_BUILD_THREAD=7 - ;; - *) - MTR_BUILD_THREAD=11 - ;; - esac - export MTR_BUILD_THREAD - - # if you want to change which tests are run, look at mysql-5.5-testing.patch too. - (cd release && make test-bt-fast ) -%endif - -%install -RBR=$RPM_BUILD_ROOT -MBD=$RPM_BUILD_DIR/%{src_dir} -[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT - -# Ensure that needed directories exists -# TODO / FIXME: needed ? install -d $RBR%{mysqldatadir}/mysql -# TODO / FIXME: needed ? install -d $RBR%{_datadir}/mysql-test -# TODO / FIXME: needed ? install -d $RBR%{_datadir}/mysql/SELinux/RHEL4 -# TODO / FIXME: needed ? install -d $RBR%{_includedir} -# TODO / FIXME: needed ? install -d $RBR%{_libdir} -# TODO / FIXME: needed ? install -d $RBR%{_mandir} -# TODO / FIXME: needed ? install -d $RBR%{_sbindir} - -# Install all binaries -( - cd $MBD/release - make DESTDIR=$RBR install -) - -# For gcc builds, include libgcc.a in the devel subpackage (BUG 4921). Do -# this in a sub-shell to ensure we don't pollute the install environment -# with compiler bits. -( - PATH=${MYSQL_BUILD_PATH:-$PATH} - CC=${MYSQL_BUILD_CC:-${CC:-gcc}} - CFLAGS=${MYSQL_BUILD_CFLAGS:-${CFLAGS:-$RPM_OPT_FLAGS}} - if "${CC}" -v 2>&1 | grep '^gcc.version' >/dev/null 2>&1; then - libgcc=`${CC} ${CFLAGS} --print-libgcc-file` - if [ -f ${libgcc} ]; then - mkdir -p $RBR%{_libdir}/mysql - install -m 644 ${libgcc} $RBR%{_libdir}/mysql/libmygcc.a - echo "%{_libdir}/mysql/libmygcc.a" >>optional-files-devel - fi - fi -) - -# multilib header hacks -# we only apply this to known Red Hat multilib arches, per bug #181335 -case `uname -i` in - i386 | x86_64 | ppc | ppc64 | s390 | s390x | sparc | sparc64 ) - mv $RPM_BUILD_ROOT/usr/include/mysql/my_config.h $RPM_BUILD_ROOT/usr/include/mysql/my_config_`uname -i`.h - install -m 644 %{SOURCE5} $RPM_BUILD_ROOT/usr/include/mysql/ - ;; - *) - ;; -esac - -mkdir -p $RPM_BUILD_ROOT/var/log -touch $RPM_BUILD_ROOT/var/log/mysqld.log - -# List the installed tree for RPM package maintenance purposes. -find $RPM_BUILD_ROOT -print | sed "s|^$RPM_BUILD_ROOT||" | sort > ROOTFILES - -mkdir -p $RPM_BUILD_ROOT/etc/rc.d/init.d -mkdir -p $RPM_BUILD_ROOT/var/run/mysqld -install -m 0755 -d $RPM_BUILD_ROOT/var/lib/mysql -install -m 0755 %{SOURCE2} $RPM_BUILD_ROOT/etc/rc.d/init.d/mysqld -install -m 0644 %{SOURCE3} $RPM_BUILD_ROOT/etc/my.cnf -# obsolete: mv $RPM_BUILD_ROOT/usr/sql-bench $RPM_BUILD_ROOT%{_datadir}/sql-bench # 'sql-bench' is dropped -# obsolete: mv $RPM_BUILD_ROOT/usr/mysql-test $RPM_BUILD_ROOT%{_datadir}/mysql-test # 'mysql-test' is there already -# 5.1.32 forgets to install the mysql-test README file -# obsolete: install -m 0644 mysql-test/README $RPM_BUILD_ROOT%{_datadir}/mysql-test/README # 'README' is there already - -mv ${RPM_BUILD_ROOT}%{_bindir}/mysqlbug ${RPM_BUILD_ROOT}%{_libdir}/mysql/mysqlbug -install -m 0755 scriptstub ${RPM_BUILD_ROOT}%{_bindir}/mysqlbug -mv ${RPM_BUILD_ROOT}%{_bindir}/mysql_config ${RPM_BUILD_ROOT}%{_libdir}/mysql/mysql_config -install -m 0755 scriptstub ${RPM_BUILD_ROOT}%{_bindir}/mysql_config - -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.a -SO_FULL='%{mysql_version}' -SO_USE=`echo $SO_FULL | sed -e 's/\([0-9]\.[0-9]\)\.[0-9]*/\1/'` -install -m 0755 release/libmysqld/work/libmysqld.so.$SO_FULL ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.so.$SO_FULL -ln -s libmysqld.so.$SO_FULL ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.so.$SO_USE -ln -s libmysqld.so.$SO_USE ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqld.so - -rm -f ${RPM_BUILD_ROOT}%{_bindir}/comp_err -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/comp_err.1* -rm -f ${RPM_BUILD_ROOT}%{_bindir}/make_win_binary_distribution -rm -f ${RPM_BUILD_ROOT}%{_bindir}/make_win_src_distribution -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/make_win_bin_dist.1* -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/make_win_src_distribution.1* -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/libmysqlclient*.la -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/*.a -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/plugin/*.la -rm -f ${RPM_BUILD_ROOT}%{_libdir}/mysql/plugin/*.a -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/binary-configure -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/make_binary_distribution -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/make_sharedlib_distribution -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mi_test_all* -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/ndb-config-2-node.ini -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysql.server -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysqld_multi.server -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/MySQL-shared-compat.spec -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/*.plist -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/preinstall -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/postinstall -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysql-*.spec -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/mysql-log-rotate -rm -f ${RPM_BUILD_ROOT}%{_datadir}/mysql/ChangeLog -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/mysql-stress-test.pl.1* -rm -f ${RPM_BUILD_ROOT}%{_mandir}/man1/mysql-test-run.pl.1* -rm -rf ${RPM_BUILD_ROOT}%{_datadir}/mysql/solaris - -mkdir -p $RPM_BUILD_ROOT/etc/ld.so.conf.d -echo "%{_libdir}/mysql" > $RPM_BUILD_ROOT/etc/ld.so.conf.d/%{name}-%{_arch}.conf - -# The below *only* applies to builds not done by MySQL / Sun / Oracle: -# copy additional docs into build tree so %%doc will find them -# cp %{SOURCE6} README.mysql-docs - -%clean -[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT - -%pre -n mysql-server%{product_suffix} - -# Check if we can safely upgrade. An upgrade is only safe if it's from one -# of our RPMs in the same version family. - -# Handle both ways of spelling the capability. -installed=`rpm -q --whatprovides mysql-server 2> /dev/null` -if [ $? -ne 0 -o -z "$installed" ]; then - installed=`rpm -q --whatprovides MySQL-server 2> /dev/null` -fi -if [ $? -eq 0 -a -n "$installed" ]; then - installed=`echo $installed | sed 's/\([^ ]*\) .*/\1/'` # Tests have shown duplicated package names - vendor=`rpm -q --queryformat='%{VENDOR}' "$installed" 2>&1` - version=`rpm -q --queryformat='%{VERSION}' "$installed" 2>&1` - myvendor='%{mysql_vendor}' - myversion='%{mysql_version}' - - old_family=`echo $version \ - | sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'` - new_family=`echo $myversion \ - | sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'` - - [ -z "$vendor" ] && vendor='' - [ -z "$old_family" ] && old_family="" - [ -z "$new_family" ] && new_family="" - - error_text= - if [ "$vendor" != "$myvendor" ]; then - error_text="$error_text -The current MySQL server package is provided by a different -vendor ($vendor) than $myvendor. -Some files may be installed to different locations, including log -files and the service startup script in %{_sysconfdir}/init.d/. -" - fi - - if [ "$old_family" != "$new_family" ]; then - error_text="$error_text -Upgrading directly from MySQL $old_family to MySQL $new_family may not -be safe in all cases. A manual dump and restore using mysqldump is -recommended. It is important to review the MySQL manual's Upgrading -section for version-specific incompatibilities. -" - fi - - if [ -n "$error_text" ]; then - cat <&2 - -****************************************************************** -A MySQL server package ($installed) is installed. -$error_text -A manual upgrade is required. - -- Ensure that you have a complete, working backup of your data and my.cnf - files -- Shut down the MySQL server cleanly -- Remove the existing MySQL packages. Usually this command will - list the packages you should remove: - rpm -qa | grep -i '^mysql-' - - You may choose to use 'rpm --nodeps -ev ' to remove - the package which contains the mysqlclient shared library. The - library will be reinstalled by the MySQL-shared-compat package. -- Install the new MySQL packages supplied by $myvendor -- Ensure that the MySQL server is started -- Run the 'mysql_upgrade' program - -This is a brief description of the upgrade process. Important details -can be found in the MySQL manual, in the Upgrading section. -****************************************************************** -HERE - exit 1 - fi -fi - -/usr/sbin/groupadd -g 27 -o -r mysql >/dev/null 2>&1 || : -/usr/sbin/useradd -M -N -g mysql -o -r -d /var/lib/mysql -s /bin/bash \ - -c "MySQL Server" -u 27 mysql >/dev/null 2>&1 || : - -%post -n mysql-libs%{product_suffix} -/sbin/ldconfig - -%post -n mysql-server%{product_suffix} -if [ $1 = 1 ]; then - /sbin/chkconfig --add mysqld -fi -/bin/chmod 0755 /var/lib/mysql -/bin/touch /var/log/mysqld.log - -%preun -n mysql-server%{product_suffix} -if [ $1 = 0 ]; then - /sbin/service mysqld stop >/dev/null 2>&1 - /sbin/chkconfig --del mysqld -fi - -%postun -n mysql-libs%{product_suffix} -if [ $1 = 0 ] ; then - /sbin/ldconfig -fi - -%postun -n mysql-server%{product_suffix} -if [ $1 -ge 1 ]; then - /sbin/service mysqld condrestart >/dev/null 2>&1 || : -fi - - -%files -n mysql%{product_suffix} -%defattr(-,root,root) -%doc %{license_files_server} - -# The below file *only* applies to builds not done by MySQL / Sun / Oracle: -# %doc README.mysql-docs - -%{_bindir}/msql2mysql -%{_bindir}/mysql -%{_bindir}/mysql_config -%{_bindir}/mysql_find_rows -%{_bindir}/mysql_waitpid -%{_bindir}/mysqlaccess -%{_bindir}/mysqlaccess.conf -%{_bindir}/mysqladmin -%{_bindir}/mysqlbinlog -%{_bindir}/mysqlcheck -%{_bindir}/mysqldump -%{_bindir}/mysqlimport -%{_bindir}/mysqlshow -%{_bindir}/mysqlslap -%{_bindir}/my_print_defaults - -%{_mandir}/man1/mysql.1* -%{_mandir}/man1/mysql_config.1* -%{_mandir}/man1/mysql_find_rows.1* -%{_mandir}/man1/mysql_waitpid.1* -%{_mandir}/man1/mysqlaccess.1* -%{_mandir}/man1/mysqladmin.1* -%{_mandir}/man1/mysqldump.1* -%{_mandir}/man1/mysqlshow.1* -%{_mandir}/man1/mysqlslap.1* -%{_mandir}/man1/my_print_defaults.1* - -%{_libdir}/mysql/mysqlbug -%{_libdir}/mysql/mysql_config - -%files -n mysql-libs%{product_suffix} -%defattr(-,root,root) -%doc %{license_files_server} -# although the default my.cnf contains only server settings, we put it in the -# libs package because it can be used for client settings too. -%config(noreplace) /etc/my.cnf -%dir %{_libdir}/mysql -%{_libdir}/mysql/libmysqlclient*.so.* -/etc/ld.so.conf.d/* - -%dir %{_datadir}/mysql -%{_datadir}/mysql/english -%lang(cs) %{_datadir}/mysql/czech -%lang(da) %{_datadir}/mysql/danish -%lang(nl) %{_datadir}/mysql/dutch -%lang(et) %{_datadir}/mysql/estonian -%lang(fr) %{_datadir}/mysql/french -%lang(de) %{_datadir}/mysql/german -%lang(el) %{_datadir}/mysql/greek -%lang(hu) %{_datadir}/mysql/hungarian -%lang(it) %{_datadir}/mysql/italian -%lang(ja) %{_datadir}/mysql/japanese -%lang(ko) %{_datadir}/mysql/korean -%lang(no) %{_datadir}/mysql/norwegian -%lang(no) %{_datadir}/mysql/norwegian-ny -%lang(pl) %{_datadir}/mysql/polish -%lang(pt) %{_datadir}/mysql/portuguese -%lang(ro) %{_datadir}/mysql/romanian -%lang(ru) %{_datadir}/mysql/russian -%lang(sr) %{_datadir}/mysql/serbian -%lang(sk) %{_datadir}/mysql/slovak -%lang(es) %{_datadir}/mysql/spanish -%lang(sv) %{_datadir}/mysql/swedish -%lang(uk) %{_datadir}/mysql/ukrainian -%{_datadir}/mysql/charsets - -%files -n mysql-server%{product_suffix} -f release/support-files/plugins.files -%defattr(-,root,root) -%doc release/support-files/*.cnf -%if 0%{?commercial} - %doc %{_datadir}/info/mysql.info* -%endif -%doc %{src_dir}/Docs/ChangeLog -%doc %{src_dir}/Docs/INFO_SRC* -%doc release/Docs/INFO_BIN* - -%{_bindir}/myisamchk -%{_bindir}/myisam_ftdump -%{_bindir}/myisamlog -%{_bindir}/myisampack -%{_bindir}/mysql_convert_table_format -%{_bindir}/mysql_fix_extensions -%{_bindir}/mysql_install_db -%{_bindir}/mysql_plugin -%{_bindir}/mysql_secure_installation -%if %{commercial} -%else -%{_bindir}/mysql_setpermission -%endif -%{_bindir}/mysql_tzinfo_to_sql -%{_bindir}/mysql_upgrade -%{_bindir}/mysql_zap -%{_bindir}/mysqlbug -%{_bindir}/mysqldumpslow -%{_bindir}/mysqld_multi -%{_bindir}/mysqld_safe -%{_bindir}/mysqlhotcopy -%{_bindir}/mysqltest -%{_bindir}/innochecksum -%{_bindir}/perror -%{_bindir}/replace -%{_bindir}/resolve_stack_dump -%{_bindir}/resolveip - -/usr/libexec/mysqld -/usr/libexec/mysqld-debug -%{_libdir}/mysql/plugin/daemon_example.ini - -%if %{WITH_TCMALLOC} -%{_libdir}/mysql/%{malloc_lib_target} -%endif - -# obsolete by "-f release/support-files/plugins.files" above -# %{_libdir}/mysql/plugin - -%{_mandir}/man1/msql2mysql.1* -%{_mandir}/man1/myisamchk.1* -%{_mandir}/man1/myisamlog.1* -%{_mandir}/man1/myisampack.1* -%{_mandir}/man1/mysql_convert_table_format.1* -%{_mandir}/man1/myisam_ftdump.1* -%{_mandir}/man1/mysql.server.1* -%{_mandir}/man1/mysql_fix_extensions.1* -%{_mandir}/man1/mysql_install_db.1* -%{_mandir}/man1/mysql_plugin.1* -%{_mandir}/man1/mysql_secure_installation.1* -%{_mandir}/man1/mysql_upgrade.1* -%{_mandir}/man1/mysql_zap.1* -%{_mandir}/man1/mysqlbug.1* -%{_mandir}/man1/mysqldumpslow.1* -%{_mandir}/man1/mysqlbinlog.1* -%{_mandir}/man1/mysqlcheck.1* -%{_mandir}/man1/mysqld_multi.1* -%{_mandir}/man1/mysqld_safe.1* -%{_mandir}/man1/mysqlhotcopy.1* -%{_mandir}/man1/mysqlimport.1* -%{_mandir}/man1/mysqlman.1* -%if %{commercial} -%else -%{_mandir}/man1/mysql_setpermission.1* -%endif -%{_mandir}/man1/mysqltest.1* -%{_mandir}/man1/innochecksum.1* -%{_mandir}/man1/perror.1* -%{_mandir}/man1/replace.1* -%{_mandir}/man1/resolve_stack_dump.1* -%{_mandir}/man1/resolveip.1* -%{_mandir}/man1/mysql_tzinfo_to_sql.1* -%{_mandir}/man8/mysqld.8* - -%{_datadir}/mysql/errmsg-utf8.txt -%{_datadir}/mysql/fill_help_tables.sql -%{_datadir}/mysql/magic -%{_datadir}/mysql/mysql_system_tables.sql -%{_datadir}/mysql/mysql_system_tables_data.sql -%{_datadir}/mysql/mysql_test_data_timezone.sql -%{_datadir}/mysql/my-*.cnf -%{_datadir}/mysql/config.*.ini - -/etc/rc.d/init.d/mysqld -%attr(0755,mysql,mysql) %dir /var/run/mysqld -%attr(0755,mysql,mysql) %dir /var/lib/mysql -%attr(0640,mysql,mysql) %config(noreplace) %verify(not md5 size mtime) /var/log/mysqld.log - -# TODO / FIXME: Do we need "libmygcc.a"? If yes, append "-f optional-files-devel" -# and fix the "rm -f" list in the "install" section. -%files -n mysql-devel%{product_suffix} -%defattr(-,root,root) -/usr/include/mysql -/usr/share/aclocal/mysql.m4 -%{_libdir}/mysql/libmysqlclient*.so - -%files -n mysql-embedded%{product_suffix} -%defattr(-,root,root) -%doc %{license_files_server} -%{_libdir}/mysql/libmysqld.so.* -%{_bindir}/mysql_embedded - -%files -n mysql-embedded-devel%{product_suffix} -%defattr(-,root,root) -%{_libdir}/mysql/libmysqld.so -%{_bindir}/mysql_client_test_embedded -%{_bindir}/mysqltest_embedded -%{_mandir}/man1/mysql_client_test_embedded.1* -%{_mandir}/man1/mysqltest_embedded.1* - -%files -n mysql-test%{product_suffix} -%defattr(-,root,root) -%{_bindir}/mysql_client_test -%attr(-,mysql,mysql) %{_datadir}/mysql-test - -%{_mandir}/man1/mysql_client_test.1* - -%changelog -* Tue Nov 05 2013 Balasubramanian Kandasamy -- Removed non gpl file mysql.info from community packages - -* Wed Jul 10 2013 Balasubramanian Kandasamy -- Removed directory /usr/share/mysql/solaris/postinstall-solaris to resolve build - error - -* Thu Dec 7 2012 Joerg Bruehe -- Change the way in which "libmysqld.so" is created: Using all object modules - was wrong, gcc / ld can resolve the dependencies from "libmysqld.a". - Also, identify the ".so" version from the MySQL version, "0.0.1" was wrong. - Bug#15972480 - -* Tue Sep 18 2012 Joerg Bruehe -- Restrict the vendor check to Oracle: There is no history here - which we have to allow for. - -* Thu Jul 26 2012 Joerg Bruehe -- Add the vendor and release series checks from the traditional MySQL RPM - spec file, to protect against errors happening during upgrades. -- Do some code alignment with the traditional MySQL RPM spec file, - to make synchronous maintenance (and possibly even integration?) easier. - -* Mon Feb 13 2012 Joerg Bruehe -- Add "Provides:" lines for the generic names of the subpackages, - independent of "product_suffix". - -* Tue Feb 7 2012 Joerg Bruehe -- Make "mysql_setpermission" and its man page appear in GPL builds only. - -* Thu Nov 24 2011 Joerg Bruehe -- Add two patches (#18 + #19) regarding the test suite; - version 1 of "mysql-test-run.pl" had to go because the auto-detection - of Perl dependencies does not handle differences between run directory - and delivery location. - -* Thu Nov 3 2011 Joerg Bruehe -- Adapt from MySQL 5.1 to 5.5, tested using 5.5.17: - - Done by the MySQL Build Team at Oracle: - set as packager, set copyright owner and related info; - - handle command line options, allowing different configurations, platforms, ... - - configurations will show up in the file name as "product_suffix", - - use "-n" for all subpackage specifications, - - license may be GPL or commercial, mention that in the description, - the license output and the included license files will vary, - - commercial is "nosource", - - improve "requires" listings for different platforms, - - explicitly use "product_suffix" in the "requires" entries; - - adapt to 5.5 changes in features and function: - - remove "mysql-bench" package (files are outdated, not maintained), - - no InnoDB plugin, - - the set of plugins will vary by configuration, to control the "server" - package contents use "-f release/support-files/plugins.files" in the - "files" section, - - remove "mysqlmanager", "mysql_fix_privilege_tables", - - add "mysql_embedded", "mysql-plugin", "mysqlaccess.conf", "magic", - - "errmsg.txt" is now in UTF8: "errmsg-utf8.txt", - - adapt patches to changed code where needed, rename these to include "5.5", - - stop using patches which are not applicable to 5.5; - - 5.5 uses a different way of building: - - autotools are replaced by cmake, - - both a "release" and a "debug" server are built in separate subtrees - ("out of source"!), this also affects path names in further handling, - - the debug server is added to the "server" subpackage, - - add "mysql-5.5-libdir.patch" to handle file placement at user site. - -* Mon Dec 20 2010 Tom Lane 5.1.52-1.1 -- Update to MySQL 5.1.52, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-52.html - including numerous small security issues -Resolves: #652553 -- Sync with current Fedora package; this includes: -- Duplicate COPYING and EXCEPTIONS-CLIENT in -libs and -embedded subpackages, - to ensure they are available when any subset of mysql RPMs are installed, - per revised packaging guidelines -- Allow init script's STARTTIMEOUT/STOPTIMEOUT to be overridden from sysconfig - -* Thu Jul 15 2010 Tom Lane 5.1.47-4 -- Add backported patch for CVE-2010-2008 (upstream bug 53804) -Resolves: #614215 -- Add BuildRequires perl(Time::HiRes) ... seems to no longer be installed - by just pulling in perl. - -* Mon Jun 28 2010 Tom Lane 5.1.47-3 -- Add -p "$mypidfile" to initscript's status call to improve corner cases. - (Note: can't be fixed in Fedora until 595597 is fixed there.) -Resolves: #596008 - -* Mon Jun 7 2010 Tom Lane 5.1.47-2 -- Add back "partition" storage engine -Resolves: #598585 -- Fix broken "federated" storage engine plugin -Resolves: #587170 -- Read all certificates in SSL certificate files, to support chained certs -Resolves: #598656 - -* Mon May 24 2010 Tom Lane 5.1.47-1 -- Update to MySQL 5.1.47, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-47.html - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-46.html - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-45.html - including fixes for CVE-2010-1621, CVE-2010-1626, - CVE-2010-1848, CVE-2010-1849, CVE-2010-1850 -Resolves: #590598 -- Create mysql group explicitly in pre-server script, to ensure correct GID - -* Mon Mar 8 2010 Tom Lane 5.1.44-2 -- Update to MySQL 5.1.44, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-44.html -Resolves: #565554 -- Remove mysql.info, which is not freely redistributable -Related: #560181 -- Revert broken upstream fix for their bug 45058 -Related: #566547 -- Bring init script into some modicum of compliance with Fedora/LSB standards -Resolves: #557711 -Resolves: #562749 - -* Mon Feb 15 2010 Tom Lane 5.1.43-2 -- Update to MySQL 5.1.43, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-43.html -Resolves: #565554 -- Remove mysql-cluster, which is no longer supported by upstream in this - source distribution. If we want it we'll need a separate SRPM for it. -Resolves: #565210 - -* Fri Jan 29 2010 Tom Lane 5.1.42-7 -- Add backported patch for CVE-2008-7247 (upstream bug 39277) -Resolves: #549329 -- Use non-expired certificates for SSL testing (upstream bug 50702) - -* Tue Jan 26 2010 Tom Lane 5.1.42-6 -- Emit explicit error message if user tries to build RPM as root -Resolves: #558915 - -* Wed Jan 20 2010 Tom Lane 5.1.42-5 -- Correct Source0: tag and comment to reflect how to get the tarball - -* Fri Jan 8 2010 Tom Lane 5.1.42-4 -- Sync with current Fedora build, including: -- Update to MySQL 5.1.42, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-42.html -- Disable symbolic links by default in /etc/my.cnf -Resolves: #553653 -- Remove static libraries (.a files) from package, per packaging guidelines -- Change %%define to %%global, per packaging guidelines -- Disable building the innodb plugin; it tickles assorted gcc bugs and - doesn't seem entirely ready for prime time anyway. -Resolves: #553632 -- Start mysqld_safe with --basedir=/usr, to avoid unwanted SELinux messages - (see 547485) -- Stop waiting during "service mysqld start" if mysqld_safe exits -Resolves: #544095 - -* Mon Nov 23 2009 Tom Lane 5.1.41-1 -- Update to MySQL 5.1.41, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-41.html - including fixes for CVE-2009-4019 -Resolves: #549327 -- Don't set old_passwords=1; we aren't being bug-compatible with 3.23 anymore -Resolves: #540735 - -* Tue Nov 10 2009 Tom Lane 5.1.40-1 -- Update to MySQL 5.1.40, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-40.html -- Do not force the --log-error setting in mysqld init script -Resolves: #533736 - -* Sat Oct 17 2009 Tom Lane 5.1.39-4 -- Replace kluge fix for ndbd sparc crash with a real fix (mysql bug 48132) - -* Thu Oct 15 2009 Tom Lane 5.1.39-3 -- Work around two different compiler bugs on sparc, one by backing off - optimization from -O2 to -O1, and the other with a klugy patch -Related: #529298, #529299 -- Clean up bogosity in multilib stub header support: ia64 should not be - listed (it's not multilib), sparc and sparc64 should be - -* Wed Sep 23 2009 Tom Lane 5.1.39-2 -- Work around upstream bug 46895 by disabling outfile_loaddata test - -* Tue Sep 22 2009 Tom Lane 5.1.39-1 -- Update to MySQL 5.1.39, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-39.html - -* Mon Aug 31 2009 Tom Lane 5.1.37-5 -- Work around unportable assumptions about stpcpy(); re-enable main.mysql test -- Clean up some obsolete parameters to the configure script - -* Sat Aug 29 2009 Tom Lane 5.1.37-4 -- Remove one misguided patch; turns out I was chasing a glibc bug -- Temporarily disable "main.mysql" test; there's something broken there too, - but we need to get mysql built in rawhide for dependency reasons - -* Fri Aug 21 2009 Tomas Mraz - 5.1.37-3 -- rebuilt with new openssl - -* Fri Aug 14 2009 Tom Lane 5.1.37-2 -- Add a couple of patches to improve the probability of the regression tests - completing in koji builds - -* Sun Aug 2 2009 Tom Lane 5.1.37-1 -- Update to MySQL 5.1.37, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-37.html - -* Sat Jul 25 2009 Fedora Release Engineering - 5.1.36-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild - -* Fri Jul 10 2009 Tom Lane 5.1.36-1 -- Update to MySQL 5.1.36, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-36.html - -* Sat Jun 6 2009 Tom Lane 5.1.35-1 -- Update to MySQL 5.1.35, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-35.html -- Ensure that /var/lib/mysql is created with the right SELinux context -Resolves: #502966 - -* Fri May 15 2009 Tom Lane 5.1.34-1 -- Update to MySQL 5.1.34, for various fixes described at - http://dev.mysql.com/doc/refman/5.1/en/news-5-1-34.html -- Increase startup timeout per bug #472222 - -* Wed Apr 15 2009 Tom Lane 5.1.33-2 -- Increase stack size of ndbd threads for safety's sake. -Related: #494631 - -* Tue Apr 7 2009 Tom Lane 5.1.33-1 -- Update to MySQL 5.1.33. -- Disable use of pthread_setschedparam; doesn't work the way code expects. -Related: #477624 - -* Wed Mar 4 2009 Tom Lane 5.1.32-1 -- Update to MySQL 5.1.32. - -* Wed Feb 25 2009 Fedora Release Engineering - 5.1.31-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild - -* Fri Feb 13 2009 Tom Lane 5.1.31-1 -- Update to MySQL 5.1.31. - -* Thu Jan 22 2009 Tom Lane 5.1.30-2 -- hm, apparently --with-innodb and --with-ndbcluster are still needed - even though no longer documented ... - -* Thu Jan 22 2009 Tom Lane 5.1.30-1 -- Update to MySQL 5.1.30. Note that this includes an ABI break for - libmysqlclient (it's now got .so major version 16). -- This also updates mysql for new openssl build - -* Wed Oct 1 2008 Tom Lane 5.0.67-2 -- Build the "embedded server" library, and package it in a new sub-RPM - mysql-embedded, along with mysql-embedded-devel for devel support files. -Resolves: #149829 - -* Sat Aug 23 2008 Tom Lane 5.0.67-1 -- Update to mysql version 5.0.67 -- Move mysql_config's man page to base package, again (apparently I synced - that change the wrong way while importing specfile changes for ndbcluster) - -* Sun Jul 27 2008 Tom Lane 5.0.51a-2 -- Enable ndbcluster support -Resolves: #163758 -- Suppress odd crash messages during package build, caused by trying to - build dbug manual (which we don't install anyway) with dbug disabled -Resolves: #437053 -- Improve mysql.init to pass configured datadir to mysql_install_db, - and to force user=mysql for both mysql_install_db and mysqld_safe. -Related: #450178 - -* Mon Mar 3 2008 Tom Lane 5.0.51a-1 -- Update to mysql version 5.0.51a - -* Mon Mar 3 2008 Tom Lane 5.0.45-11 -- Fix mysql-stack-guard patch to work correctly on IA64 -- Fix mysql.init to wait correctly when socket is not in default place -Related: #435494 - -* Mon Mar 03 2008 Dennis Gilmore 5.0.45-10 -- add sparc64 to 64 bit arches for test suite checking -- add sparc, sparcv9 and sparc64 to multilib handling - -* Thu Feb 28 2008 Tom Lane 5.0.45-9 -- Fix the stack overflow problem encountered in January. It seems the real -issue is that the buildfarm machines were moved to RHEL5, which uses 64K not -4K pages on PPC, and because RHEL5 takes the guard area out of the requested -thread stack size we no longer had enough headroom. -Related: #435337 - -* Tue Feb 19 2008 Fedora Release Engineering - 5.0.45-8 -- Autorebuild for GCC 4.3 - -* Tue Jan 8 2008 Tom Lane 5.0.45-7 -- Unbelievable ... upstream still thinks that it's a good idea to have a - regression test that is guaranteed to begin failing come January 1. -- ... and it seems we need to raise STACK_MIN_SIZE again too. - -* Thu Dec 13 2007 Tom Lane 5.0.45-6 -- Back-port upstream fixes for CVE-2007-5925, CVE-2007-5969, CVE-2007-6303. -Related: #422211 - -* Wed Dec 5 2007 Tom Lane 5.0.45-5 -- Rebuild for new openssl - -* Sat Aug 25 2007 Tom Lane 5.0.45-4 -- Seems we need explicit BuildRequires on gawk and procps now -- Rebuild to fix Fedora toolchain issues - -* Sun Aug 12 2007 Tom Lane 5.0.45-3 -- Recent perl changes in rawhide mean we need a more specific BuildRequires - -* Thu Aug 2 2007 Tom Lane 5.0.45-2 -- Update License tag to match code. -- Work around recent Fedora change that makes "open" a macro name. - -* Sun Jul 22 2007 Tom Lane 5.0.45-1 -- Update to MySQL 5.0.45 -Resolves: #246535 -- Move mysql_config's man page to base package -Resolves: #245770 -- move my_print_defaults to base RPM, for consistency with Stacks packaging -- mysql user is no longer deleted at RPM uninstall -Resolves: #241912 - -* Thu Mar 29 2007 Tom Lane 5.0.37-2 -- Use a less hacky method of getting default values in initscript -Related: #233771, #194596 -- Improve packaging of mysql-libs per suggestions from Remi Collet -Resolves: #233731 -- Update default /etc/my.cnf ([mysql.server] has been bogus for a long time) - -* Mon Mar 12 2007 Tom Lane 5.0.37-1 -- Update to MySQL 5.0.37 -Resolves: #231838 -- Put client library into a separate mysql-libs RPM to reduce dependencies -Resolves: #205630 - -* Fri Feb 9 2007 Tom Lane 5.0.33-1 -- Update to MySQL 5.0.33 -- Install band-aid fix for "view" regression test designed to fail after 2006 -- Don't chmod -R the entire database directory tree on every startup -Related: #221085 -- Fix unsafe use of install-info -Resolves: #223713 -- Cope with new automake in F7 -Resolves: #224171 - -* Thu Nov 9 2006 Tom Lane 5.0.27-1 -- Update to MySQL 5.0.27 (see CVE-2006-4031, CVE-2006-4226, CVE-2006-4227) -Resolves: #202247, #202675, #203427, #203428, #203432, #203434, #208641 -- Fix init script to return status 1 on server start timeout -Resolves: #203910 -- Move mysqldumpslow from base package to mysql-server -Resolves: #193559 -- Adjust link options for BDB module -Resolves: #199368 - -* Wed Jul 12 2006 Jesse Keating - 5.0.22-2.1 -- rebuild - -* Sat Jun 10 2006 Tom Lane 5.0.22-2 -- Work around brew's tendency not to clean up failed builds completely, - by adding code in mysql-testing.patch to kill leftover mysql daemons. - -* Thu Jun 8 2006 Tom Lane 5.0.22-1 -- Update to MySQL 5.0.22 (fixes CVE-2006-2753) -- Install temporary workaround for gcc bug on s390x (bz #193912) - -* Tue May 2 2006 Tom Lane 5.0.21-2 -- Fix bogus perl Requires for mysql-test - -* Mon May 1 2006 Tom Lane 5.0.21-1 -- Update to MySQL 5.0.21 - -* Mon Mar 27 2006 Tom Lane 5.0.18-4 -- Modify multilib header hack to not break non-RH arches, per bug #181335 -- Remove logrotate script, per bug #180639. -- Add a new mysql-test RPM to carry the regression test files; - hack up test scripts as needed to make them run in /usr/share/mysql-test. - -* Fri Feb 10 2006 Jesse Keating - 5.0.18-2.1 -- bump again for double-long bug on ppc(64) - -* Thu Feb 9 2006 Tom Lane 5.0.18-2 -- err-log option has been renamed to log-error, fix my.cnf and initscript - -* Tue Feb 07 2006 Jesse Keating - 5.0.18-1.1 -- rebuilt for new gcc4.1 snapshot and glibc changes - -* Thu Jan 5 2006 Tom Lane 5.0.18-1 -- Update to MySQL 5.0.18 - -* Thu Dec 15 2005 Tom Lane 5.0.16-4 -- fix my_config.h for ppc platforms - -* Thu Dec 15 2005 Tom Lane 5.0.16-3 -- my_config.h needs to guard against 64-bit platforms that also define the - 32-bit symbol - -* Wed Dec 14 2005 Tom Lane 5.0.16-2 -- oops, looks like we want uname -i not uname -m - -* Mon Dec 12 2005 Tom Lane 5.0.16-1 -- Update to MySQL 5.0.16 -- Add EXCEPTIONS-CLIENT license info to the shipped documentation -- Make my_config.h architecture-independent for multilib installs; - put the original my_config.h into my_config_$ARCH.h -- Add -fwrapv to CFLAGS so that gcc 4.1 doesn't break it - -* Fri Dec 09 2005 Jesse Keating -- rebuilt - -* Mon Nov 14 2005 Tom Lane 5.0.15-3 -- Make stop script wait for daemon process to disappear (bz#172426) - -* Wed Nov 9 2005 Tom Lane 5.0.15-2 -- Rebuild due to openssl library update. - -* Thu Nov 3 2005 Tom Lane 5.0.15-1 -- Update to MySQL 5.0.15 (scratch build for now) - -* Wed Oct 5 2005 Tom Lane 4.1.14-1 -- Update to MySQL 4.1.14 - -* Tue Aug 23 2005 Tom Lane 4.1.12-3 -- Use politically correct patch name. - -* Tue Jul 12 2005 Tom Lane 4.1.12-2 -- Fix buffer overflow newly exposed in isam code; it's the same issue - previously found in myisam, and not very exciting, but I'm tired of - seeing build warnings. - -* Mon Jul 11 2005 Tom Lane 4.1.12-1 -- Update to MySQL 4.1.12 (includes a fix for bz#158688, bz#158689) -- Extend mysql-test-ssl.patch to solve rpl_openssl test failure (bz#155850) -- Update mysql-lock-ssl.patch to match the upstream committed version -- Add --with-isam to re-enable the old ISAM table type, per bz#159262 -- Add dependency on openssl-devel per bz#159569 -- Remove manual.txt, as upstream decided not to ship it anymore; - it was redundant with the mysql.info file anyway. - -* Mon May 9 2005 Tom Lane 4.1.11-4 -- Include proper locking for OpenSSL in the server, per bz#155850 - -* Mon Apr 25 2005 Tom Lane 4.1.11-3 -- Enable openssl tests during build, per bz#155850 -- Might as well turn on --disable-dependency-tracking - -* Fri Apr 8 2005 Tom Lane 4.1.11-2 -- Avoid dependency on , cause it won't build anymore on ia64. - This is probably a cleaner solution for bz#143537, too. - -* Thu Apr 7 2005 Tom Lane 4.1.11-1 -- Update to MySQL 4.1.11 to fix bz#152911 as well as other issues -- Move perl-DBI, perl-DBD-MySQL dependencies to server package (bz#154123) -- Override configure thread library test to suppress HAVE_LINUXTHREADS check -- Fix BDB failure on s390x (bz#143537) -- At last we can enable "make test" on all arches - -* Fri Mar 11 2005 Tom Lane 4.1.10a-1 -- Update to MySQL 4.1.10a to fix security vulnerabilities (bz#150868, - for CAN-2005-0711, and bz#150871 for CAN-2005-0709, CAN-2005-0710). - -* Sun Mar 6 2005 Tom Lane 4.1.10-3 -- Fix package Requires: interdependencies. - -* Sat Mar 5 2005 Tom Lane 4.1.10-2 -- Need -fno-strict-aliasing in at least one place, probably more. -- Work around some C spec violations in mysql. - -* Fri Feb 18 2005 Tom Lane 4.1.10-1 -- Update to MySQL 4.1.10. - -* Sat Jan 15 2005 Tom Lane 4.1.9-1 -- Update to MySQL 4.1.9. - -* Wed Jan 12 2005 Tom Lane 4.1.7-10 -- Don't assume /etc/my.cnf will specify pid-file (bz#143724) - -* Wed Jan 12 2005 Tim Waugh 4.1.7-9 -- Rebuilt for new readline. - -* Tue Dec 21 2004 Tom Lane 4.1.7-8 -- Run make test on all archs except s390x (which seems to have a bdb issue) - -* Mon Dec 13 2004 Tom Lane 4.1.7-7 -- Suppress someone's silly idea that libtool overhead can be skipped - -* Sun Dec 12 2004 Tom Lane 4.1.7-6 -- Fix init script to not need a valid username for startup check (bz#142328) -- Fix init script to honor settings appearing in /etc/my.cnf (bz#76051) -- Enable SSL (bz#142032) - -* Thu Dec 2 2004 Tom Lane 4.1.7-5 -- Add a restorecon to keep the mysql.log file in the right context (bz#143887) - -* Tue Nov 23 2004 Tom Lane 4.1.7-4 -- Turn off old_passwords in default /etc/my.cnf file, for better compatibility - with mysql 3.x clients (per suggestion from Joe Orton). - -* Fri Oct 29 2004 Tom Lane 4.1.7-3 -- Handle ldconfig more cleanly (put a file in /etc/ld.so.conf.d/). - -* Thu Oct 28 2004 Tom Lane 4.1.7-2 -- rebuild in devel branch - -* Wed Oct 27 2004 Tom Lane 4.1.7-1 -- Update to MySQL 4.1.x. - -* Tue Oct 12 2004 Tom Lane 3.23.58-13 -- fix security issues CAN-2004-0835, CAN-2004-0836, CAN-2004-0837 - (bugs #135372, 135375, 135387) -- fix privilege escalation on GRANT ALL ON `Foo\_Bar` (CAN-2004-0957) - -* Wed Oct 06 2004 Tom Lane 3.23.58-12 -- fix multilib problem with mysqlbug and mysql_config -- adjust chkconfig priority per bug #128852 -- remove bogus quoting per bug #129409 (MySQL 4.0 has done likewise) -- add sleep to mysql.init restart(); may or may not fix bug #133993 - -* Tue Oct 05 2004 Tom Lane 3.23.58-11 -- fix low-priority security issues CAN-2004-0388, CAN-2004-0381, CAN-2004-0457 - (bugs #119442, 125991, 130347, 130348) -- fix bug with dropping databases under recent kernels (bug #124352) - -* Tue Jun 15 2004 Elliot Lee 3.23.58-10 -- rebuilt - -* Sat Apr 17 2004 Warren Togami 3.23.58-9 -- remove redundant INSTALL-SOURCE, manual.* -- compress manual.txt.bz2 -- BR time - -* Tue Mar 16 2004 Tom Lane 3.23.58-8 -- repair logfile attributes in %%files, per bug #102190 -- repair quoting problem in mysqlhotcopy, per bug #112693 -- repair missing flush in mysql_setpermission, per bug #113960 -- repair broken error message printf, per bug #115165 -- delete mysql user during uninstall, per bug #117017 -- rebuilt - -* Tue Mar 02 2004 Elliot Lee -- rebuilt - -* Tue Feb 24 2004 Tom Lane -- fix chown syntax in mysql.init -- rebuild - -* Fri Feb 13 2004 Elliot Lee -- rebuilt - -* Tue Nov 18 2003 Kim Ho 3.23.58-5 -- update mysql.init to use anonymous user (UNKNOWN_MYSQL_USER) for - pinging mysql server (#108779) - -* Mon Oct 27 2003 Kim Ho 3.23.58-4 -- update mysql.init to wait (max 10 seconds) for mysql server to - start (#58732) - -* Mon Oct 27 2003 Patrick Macdonald 3.23.58-3 -- re-enable Berkeley DB support (#106832) -- re-enable ia64 testing - -* Fri Sep 19 2003 Patrick Macdonald 3.23.58-2 -- rebuilt - -* Mon Sep 15 2003 Patrick Macdonald 3.23.58-1 -- upgrade to 3.23.58 for security fix - -* Tue Aug 26 2003 Patrick Macdonald 3.23.57-2 -- rebuilt - -* Wed Jul 02 2003 Patrick Macdonald 3.23.57-1 -- revert to prior version of MySQL due to license incompatibilities - with packages that link against the client. The MySQL folks are - looking into the issue. - -* Wed Jun 18 2003 Patrick Macdonald 4.0.13-4 -- restrict test on ia64 (temporary) - -* Wed Jun 04 2003 Elliot Lee 4.0.13-3 -- rebuilt - -* Thu May 29 2003 Patrick Macdonald 4.0.13-2 -- fix filter-requires-mysql.sh with less restrictive for mysql-bench - -* Wed May 28 2003 Patrick Macdonald 4.0.13-1 -- update for MySQL 4.0 -- back-level shared libraries available in mysqlclient10 package - -* Fri May 09 2003 Patrick Macdonald 3.23.56-2 -- add sql-bench package (#90110) - -* Wed Mar 19 2003 Patrick Macdonald 3.23.56-1 -- upgrade to 3.23.56 for security fixes -- remove patch for double-free (included in 3.23.56) - -* Tue Feb 18 2003 Patrick Macdonald 3.23.54a-11 -- enable thread safe client -- add patch for double free fix - -* Wed Jan 22 2003 Tim Powers -- rebuilt - -* Mon Jan 13 2003 Karsten Hopp 3.23.54a-9 -- disable checks on s390x - -* Sat Jan 4 2003 Jeff Johnson 3.23.54a-8 -- use internal dep generator. - -* Wed Jan 1 2003 Bill Nottingham 3.23.54a-7 -- fix mysql_config on hammer - -* Sun Dec 22 2002 Tim Powers 3.23.54a-6 -- don't use rpms internal dep generator - -* Tue Dec 17 2002 Elliot Lee 3.23.54a-5 -- Push it into the build system - -* Mon Dec 16 2002 Joe Orton 3.23.54a-4 -- upgrade to 3.23.54a for safe_mysqld fix - -* Thu Dec 12 2002 Joe Orton 3.23.54-3 -- upgrade to 3.23.54 for latest security fixes - -* Tue Nov 19 2002 Jakub Jelinek 3.23.52-5 -- Always include for errno -- Remove unpackaged files - -* Tue Nov 12 2002 Florian La Roche -- do not prereq userdel, not used at all - -* Mon Sep 9 2002 Trond Eivind Glomsrd 3.23.52-4 -- Use %%{_libdir} -- Add patch for x86-64 - -* Wed Sep 4 2002 Jakub Jelinek 3.23.52-3 -- rebuilt with gcc-3.2-7 - -* Thu Aug 29 2002 Trond Eivind Glomsrd 3.23.52-2 -- Add --enable-local-infile to configure - a new option - which doesn't default to the old behaviour (#72885) - -* Fri Aug 23 2002 Trond Eivind Glomsrd 3.23.52-1 -- 3.23.52. Fixes a minor security problem, various bugfixes. - -* Sat Aug 10 2002 Elliot Lee 3.23.51-5 -- rebuilt with gcc-3.2 (we hope) - -* Mon Jul 22 2002 Trond Eivind Glomsrd 3.23.51-4 -- rebuild - -* Thu Jul 18 2002 Trond Eivind Glomsrd 3.23.51-3 -- Fix #63543 and #63542 - -* Thu Jul 11 2002 Trond Eivind Glomsrd 3.23.51-2 -- Turn off bdb on PPC(#68591) -- Turn off the assembly optimizations, for safety. - -* Wed Jun 26 2002 Trond Eivind Glomsrd 3.23.51-1 -- Work around annoying auto* thinking this is a crosscompile -- 3.23.51 - -* Fri Jun 21 2002 Tim Powers -- automated rebuild - -* Mon Jun 10 2002 Trond Eivind Glomsrd 3.23.50-2 -- Add dependency on perl-DBI and perl-DBD-MySQL (#66349) - -* Thu May 30 2002 Trond Eivind Glomsrd 3.23.50-1 -- 3.23.50 - -* Thu May 23 2002 Tim Powers -- automated rebuild - -* Mon May 13 2002 Trond Eivind Glomsrd 3.23.49-4 -- Rebuild -- Don't set CXX to gcc, it doesn't work anymore -- Exclude Alpha - -* Mon Apr 8 2002 Trond Eivind Glomsrd 3.23.49-3 -- Add the various .cnf examples as doc files to mysql-server (#60349) -- Don't include manual.ps, it's just 200 bytes with a URL inside (#60349) -- Don't include random files in /usr/share/mysql (#60349) -- langify (#60349) - -* Thu Feb 21 2002 Trond Eivind Glomsrd 3.23.49-2 -- Rebuild - -* Sun Feb 17 2002 Trond Eivind Glomsrd 3.23.49-1 -- 3.23.49 - -* Thu Feb 14 2002 Trond Eivind Glomsrd 3.23.48-2 -- work around perl dependency bug. - -* Mon Feb 11 2002 Trond Eivind Glomsrd 3.23.48-1 -- 3.23.48 - -* Thu Jan 17 2002 Trond Eivind Glomsrd 3.23.47-4 -- Use kill, not mysqladmin, to flush logs and shut down. Thus, - an admin password can be set with no problems. -- Remove reload from init script - -* Wed Jan 16 2002 Trond Eivind Glomsrd 3.23.47-3 -- remove db3-devel from buildrequires, - MySQL has had its own bundled copy since the mid thirties - -* Sun Jan 6 2002 Trond Eivind Glomsrd 3.23.47-1 -- 3.23.47 -- Don't build for alpha, toolchain immature. - -* Mon Dec 3 2001 Trond Eivind Glomsrd 3.23.46-1 -- 3.23.46 -- use -fno-rtti and -fno-exceptions, and set CXX to increase stability. - Recommended by mysql developers. - -* Sun Nov 25 2001 Trond Eivind Glomsrd 3.23.45-1 -- 3.23.45 - -* Wed Nov 14 2001 Trond Eivind Glomsrd 3.23.44-2 -- centralize definition of datadir in the initscript (#55873) - -* Fri Nov 2 2001 Trond Eivind Glomsrd 3.23.44-1 -- 3.23.44 - -* Thu Oct 4 2001 Trond Eivind Glomsrd 3.23.43-1 -- 3.23.43 - -* Mon Sep 10 2001 Trond Eivind Glomsrd 3.23.42-1 -- 3.23.42 -- reenable innodb - -* Tue Aug 14 2001 Trond Eivind Glomsrd 3.23.41-1 -- 3.23.41 bugfix release -- disable innodb, to avoid the broken updates -- Use "mysqladmin flush_logs" instead of kill -HUP in logrotate - script (#51711) - -* Sat Jul 21 2001 Trond Eivind Glomsrd -- 3.23.40, bugfix release -- Add zlib-devel to buildrequires: - -* Fri Jul 20 2001 Trond Eivind Glomsrd -- BuildRequires-tweaking - -* Thu Jun 28 2001 Trond Eivind Glomsrd -- Reenable test, but don't run them for s390, s390x or ia64 -- Make /etc/my.cnf config(noplace). Same for /etc/logrotate.d/mysqld - -* Thu Jun 14 2001 Trond Eivind Glomsrd -- 3.23.29 -- enable innodb -- enable assembly again -- disable tests for now... - -* Tue May 15 2001 Trond Eivind Glomsrd -- 3.23.38 -- Don't use BDB on Alpha - no fast mutexes - -* Tue Apr 24 2001 Trond Eivind Glomsrd -- 3.23.37 -- Add _GNU_SOURCE to the compile flags - -* Wed Mar 28 2001 Trond Eivind Glomsrd -- Make it obsolete our 6.2 PowerTools packages -- 3.23.36 bugfix release - fixes some security issues - which didn't apply to our standard configuration -- Make "make test" part of the build process, except on IA64 - (it fails there) - -* Tue Mar 20 2001 Trond Eivind Glomsrd -- 3.23.35 bugfix release -- Don't delete the mysql user on uninstall - -* Tue Mar 13 2001 Trond Eivind Glomsrd -- 3.23.34a bugfix release - -* Wed Feb 7 2001 Trond Eivind Glomsrd -- added readline-devel to BuildRequires: - -* Tue Feb 6 2001 Trond Eivind Glomsrd -- small i18n-fixes to initscript (action needs $) - -* Tue Jan 30 2001 Trond Eivind Glomsrd -- make it shut down and rotate logs without using mysqladmin - (from #24909) - -* Mon Jan 29 2001 Trond Eivind Glomsrd -- conflict with "MySQL" - -* Tue Jan 23 2001 Trond Eivind Glomsrd -- improve gettextizing - -* Mon Jan 22 2001 Trond Eivind Glomsrd -- 3.23.32 -- fix logrotate script (#24589) - -* Wed Jan 17 2001 Trond Eivind Glomsrd -- gettextize -- move the items in Requires(post): to Requires: in preparation - for an errata for 7.0 when 3.23.31 is released -- 3.23.31 - -* Tue Jan 16 2001 Trond Eivind Glomsrd -- add the log file to the rpm database, and make it 0640 - (#24116) -- as above in logrotate script -- changes to the init sequence - put most of the data - in /etc/my.cnf instead of hardcoding in the init script -- use /var/run/mysqld/mysqld.pid instead of - /var/run/mysqld/pid -- use standard safe_mysqld -- shut down cleaner - -* Mon Jan 08 2001 Trond Eivind Glomsrd -- 3.23.30 -- do an explicit chmod on /var/lib/mysql in post, to avoid - any problems with broken permissons. There is a report - of rm not changing this on its own (#22989) - -* Mon Jan 01 2001 Trond Eivind Glomsrd -- bzipped source -- changed from 85 to 78 in startup, so it starts before - apache (which can use modules requiring mysql) - -* Wed Dec 27 2000 Trond Eivind Glomsrd -- 3.23.29a - -* Tue Dec 19 2000 Trond Eivind Glomsrd -- add requirement for new libstdc++, build for errata - -* Mon Dec 18 2000 Trond Eivind Glomsrd -- 3.23.29 - -* Mon Nov 27 2000 Trond Eivind Glomsrd -- 3.23.28 (gamma) -- remove old patches, as they are now upstreamed - -* Thu Nov 14 2000 Trond Eivind Glomsrd -- Add a requirement for a new glibc (#20735) -- build on IA64 - -* Wed Nov 1 2000 Trond Eivind Glomsrd -- disable more assembly - -* Wed Nov 1 2000 Jakub Jelinek -- fix mysql on SPARC (#20124) - -* Tue Oct 31 2000 Trond Eivind Glomsrd -- 3.23.27 - -* Wed Oct 25 2000 Trond Eivind Glomsrd -- add patch for fixing bogus aliasing in mysql from Jakub, - which should fix #18905 and #18620 - -* Mon Oct 23 2000 Trond Eivind Glomsrd -- check for negative niceness values, and negate it - if present (#17899) -- redefine optflags on IA32 FTTB - -* Wed Oct 18 2000 Trond Eivind Glomsrd -- 3.23.26, which among other fixes now uses mkstemp() - instead of tempnam(). -- revert changes made yesterday, the problem is now - isolated - -* Tue Oct 17 2000 Trond Eivind Glomsrd -- use the compat C++ compiler FTTB. Argh. -- add requirement of ncurses4 (see above) - -* Sun Oct 01 2000 Trond Eivind Glomsrd -- 3.23.25 -- fix shutdown problem (#17956) - -* Tue Sep 26 2000 Trond Eivind Glomsrd -- Don't try to include no-longer-existing PUBLIC file - as doc (#17532) - -* Thu Sep 12 2000 Trond Eivind Glomsrd -- rename config file to /etc/my.cnf, which is what - mysqld wants... doh. (#17432) -- include a changed safe_mysqld, so the pid file option - works. -- make mysql dir world readable to they can access the - mysql socket. (#17432) -- 3.23.24 - -* Wed Sep 06 2000 Trond Eivind Glomsrd -- 3.23.23 - -* Sun Aug 27 2000 Trond Eivind Glomsrd -- Add "|| :" to condrestart to avoid non-zero exit code - -* Thu Aug 24 2000 Trond Eivind Glomsrd -- it's mysql.com, not mysql.org and use correct path to - source (#16830) - -* Wed Aug 16 2000 Trond Eivind Glomsrd -- source file from /etc/rc.d, not /etc/rd.d. Doh. - -* Sun Aug 13 2000 Trond Eivind Glomsrd -- don't run ldconfig -n, it doesn't update ld.so.cache - (#16034) -- include some missing binaries -- use safe_mysqld to start the server (request from - mysql developers) - -* Sat Aug 05 2000 Bill Nottingham -- condrestart fixes - -* Mon Aug 01 2000 Trond Eivind Glomsrd -- 3.23.22. Disable the old patches, they're now in. - -* Thu Jul 27 2000 Trond Eivind Glomsrd -- bugfixes in the initscript -- move the .so link to the devel package - -* Wed Jul 19 2000 Trond Eivind Glomsrd -- rebuild due to glibc changes - -* Tue Jul 18 2000 Trond Eivind Glomsrd -- disable compiler patch -- don't include info directory file - -* Mon Jul 17 2000 Trond Eivind Glomsrd -- move back to /etc/rc.d/init.d - -* Fri Jul 14 2000 Trond Eivind Glomsrd -- more cleanups in initscript - -* Thu Jul 13 2000 Trond Eivind Glomsrd -- add a patch to work around compiler bug - (from monty@mysql.com) - -* Wed Jul 12 2000 Trond Eivind Glomsrd -- don't build the SQL daemon statically (glibc problems) -- fix the logrotate script - only flush log if mysql - is running -- change the reloading procedure -- remove icon - glint is obsolete a long time ago - -* Wed Jul 12 2000 Prospector -- automatic rebuild - -* Mon Jul 10 2000 Trond Eivind Glomsrd -- try the new compiler again -- build the SQL daemon statically -- add compile time support for complex charsets -- enable assembler -- more cleanups in initscript - -* Sun Jul 09 2000 Trond Eivind Glomsrd -- use old C++ compiler -- Exclusivearch x86 - -* Sat Jul 08 2000 Trond Eivind Glomsrd -- move .so files to devel package -- more cleanups -- exclude sparc for now - -* Wed Jul 05 2000 Trond Eivind Glomsrd -- 3.23.21 -- remove file from /etc/sysconfig -- Fix initscript a bit - initialization of databases doesn't - work yet -- specify the correct licenses -- include a /etc/my.conf (empty, FTTB) -- add conditional restart to spec file - -* Tue Jul 2 2000 Jakub Jelinek -- Rebuild with new C++ - -* Fri Jun 30 2000 Trond Eivind Glomsrd -- update to 3.23.20 -- use %%configure, %%makeinstall, %%{_tmppath}, %%{_mandir}, - %%{_infodir}, /etc/init.d -- remove the bench package -- change some of the descriptions a little bit -- fix the init script -- some compile fixes -- specify mysql user -- use mysql uid 27 (postgresql is 26) -- don't build on ia64 - -* Sat Feb 26 2000 Jos Vos -- Version 3.22.32 release XOS.1 for LinuX/OS 1.8.0 -- Upgrade from version 3.22.27 to 3.22.32. -- Do "make install" instead of "make install-strip", because "install -s" - now appears to fail on various scripts. Afterwards, strip manually. -- Reorganize subpackages, according to common Red Hat packages: the client - program and shared library become the base package and the server and - some accompanying files are now in a separate server package. The - server package implicitly requires the base package (shared library), - but we have added a manual require tag anyway (because of the shared - config file, and more). -- Rename the mysql-benchmark subpackage to mysql-bench. - -* Mon Jan 31 2000 Jos Vos -- Version 3.22.27 release XOS.2 for LinuX/OS 1.7.1 -- Add post(un)install scripts for updating ld.so.conf (client subpackage). - -* Sun Nov 21 1999 Jos Vos -- Version 3.22.27 release XOS.1 for LinuX/OS 1.7.0 -- Initial version. -- Some ideas borrowed from Red Hat Powertools 6.1, although this spec - file is a full rewrite from scratch. diff --git a/packaging/rpm-uln/scriptstub.c b/packaging/rpm-uln/scriptstub.c deleted file mode 100644 index de942c136e7..00000000000 --- a/packaging/rpm-uln/scriptstub.c +++ /dev/null @@ -1,32 +0,0 @@ -#include -#include -#include -#include - -/* Translate call of myself into call of same-named script in LIBDIR */ -/* The macro LIBDIR must be defined as a double-quoted string */ - -int main (int argc, char **argv) -{ - char *basename; - char *fullname; - char **newargs; - int i; - - basename = strrchr(argv[0], '/'); - if (basename) - basename++; - else - basename = argv[0]; - fullname = malloc(strlen(LIBDIR) + strlen(basename) + 2); - sprintf(fullname, "%s/%s", LIBDIR, basename); - newargs = malloc((argc+1) * sizeof(char *)); - newargs[0] = fullname; - for (i = 1; i < argc; i++) - newargs[i] = argv[i]; - newargs[argc] = NULL; - - execvp(fullname, newargs); - - return 1; -} diff --git a/plugin/handler_socket/plug.in b/plugin/handler_socket/plug.in deleted file mode 100644 index fd351dec98d..00000000000 --- a/plugin/handler_socket/plug.in +++ /dev/null @@ -1,20 +0,0 @@ -MYSQL_PLUGIN(handlersocket, [HandlerSocket], [HandlerSocket], [max]) -MYSQL_PLUGIN_DYNAMIC(handlersocket, handlersocket.la) -MYSQL_PLUGIN_ACTIONS(handlersocket, -[ - ac_mysql_source_dir='$(top_srcdir)' - MYSQL_INC="-I$ac_mysql_source_dir/sql" - MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir/include" - MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir/regex" - MYSQL_INC="$MYSQL_INC -I$ac_mysql_source_dir" - MYSQL_LIB='-L$(top_builddir)/libservices -lmysqlservices' - PLUGIN_DIR='$(pkglibdir)/plugin' - HANDLERSOCKET_SUBDIRS="libhsclient handlersocket client" - - AC_SUBST(MYSQL_INC) - AC_SUBST(MYSQL_CFLAGS) - AC_SUBST(MYSQL_LIB) - AC_SUBST(PLUGIN_DIR) - AC_SUBST(HANDLERSOCKET_SUBDIRS) - AC_CONFIG_FILES(plugin/handler_socket/perl-Net-HandlerSocket/Makefile.PL) -]) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh deleted file mode 100644 index 3a0f34ae44d..00000000000 --- a/scripts/make_binary_distribution.sh +++ /dev/null @@ -1,386 +0,0 @@ -#!/bin/sh -# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -############################################################################## -# -# This is a script to create a TAR or ZIP binary distribution out of a -# built source tree. The output file will be put at the top level of -# the source tree, as "mysql-....{tar.gz,zip}" -# -# Note that the structure created by this script is slightly different from -# what a normal "make install" would produce. No extra "mysql" sub directory -# will be created, i.e. no "$prefix/include/mysql", "$prefix/lib/mysql" or -# "$prefix/share/mysql". This is because the build system explicitly calls -# make with pkgdatadir=, etc. -# -# In GNU make/automake terms -# -# "pkglibdir" is set to the same as "libdir" -# "pkgincludedir" is set to the same as "includedir" -# "pkgdatadir" is set to the same as "datadir" -# "pkgplugindir" is set to "$pkglibdir/plugin" -# "pkgsuppdir" is set to "@prefix@/support-files", -# normally the same as "datadir" -# -# The temporary directory path given to "--tmp=" has to be -# absolute and with no spaces. -# -# Note that for best result, the original "make" should be done with -# the same arguments as used for "make install" below, especially the -# 'pkglibdir', as the RPATH should to be set correctly. -# -############################################################################## - -############################################################################## -# -# Read the command line arguments that control this script -# -############################################################################## - -machine=@MACHINE_TYPE@ -system=@SYSTEM_TYPE@ -SOURCE=`pwd` -CP="cp -p" -MV="mv" - -# There are platforms, notably OS X on Intel (x86 + x86_64), -# for which "uname" does not provide sufficient information. -# The value of CFLAGS as used during compilation is the most exact info -# we can get - after all, we care about _what_ we built, not _where_ we did it. -cflags="@CFLAGS@" - -STRIP=1 # Option ignored -SILENT=0 -MALLOC_LIB= -PLATFORM="" -TMP=/tmp -NEW_NAME="" # Final top directory and TAR package name -SUFFIX="" -SHORT_PRODUCT_TAG="" # If don't want server suffix in package name -NDBCLUSTER="" # Option ignored - -for arg do - case "$arg" in - --tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;; - --suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;; - --short-product-tag=*) SHORT_PRODUCT_TAG=`echo "$arg" | sed -e "s;--short-product-tag=;;"` ;; - --inject-malloc-lib=*) MALLOC_LIB=`echo "$arg" | sed -e 's;^[^=]*=;;'` ;; - --no-strip) STRIP=0 ;; - --machine=*) machine=`echo "$arg" | sed -e "s;--machine=;;"` ;; - --platform=*) PLATFORM=`echo "$arg" | sed -e "s;--platform=;;"` ;; - --silent) SILENT=1 ;; - --with-ndbcluster) NDBCLUSTER=1 ;; - *) - echo "Unknown argument '$arg'" - exit 1 - ;; - esac -done - -# ---------------------------------------------------------------------- -# Adjust "system" output from "uname" to be more human readable -# ---------------------------------------------------------------------- - -if [ x"$PLATFORM" = x"" ] ; then - # FIXME move this to the build tools - # Remove vendor from $system - system=`echo $system | sed -e 's/[a-z]*-\(.*\)/\1/g'` - - # Map OS names to "our" OS names (eg. darwin6.8 -> osx10.2) - system=`echo $system | sed -e 's/darwin6.*/osx10.2/g'` - system=`echo $system | sed -e 's/darwin7.*/osx10.3/g'` - system=`echo $system | sed -e 's/darwin8.*/osx10.4/g'` - system=`echo $system | sed -e 's/darwin9.*/osx10.5/g'` - system=`echo $system | sed -e 's/\(aix4.3\).*/\1/g'` - system=`echo $system | sed -e 's/\(aix5.1\).*/\1/g'` - system=`echo $system | sed -e 's/\(aix5.2\).*/\1/g'` - system=`echo $system | sed -e 's/\(aix5.3\).*/\1/g'` - system=`echo $system | sed -e 's/osf5.1b/tru64/g'` - system=`echo $system | sed -e 's/linux-gnu/linux/g'` - system=`echo $system | sed -e 's/solaris2.\([0-9]*\)/solaris\1/g'` - system=`echo $system | sed -e 's/sco3.2v\(.*\)/openserver\1/g'` -fi - -# Get the "machine", which really is the CPU architecture (including the size). -# The precedence is: -# 1) use an explicit argument, if given; -# 2) use platform-specific fixes, if there are any (see bug#37808); -# 3) stay with the default (determined during "configure", using predefined macros). - -if [ x"$MACHINE" != x"" ] ; then - machine=$MACHINE -else - case $system in - osx* ) - # Extract "XYZ" from CFLAGS "... -arch XYZ ...", or empty! - cflag_arch=`echo "$cflags" | sed -n -e 's=.* -arch \([^ ]*\) .*=\1=p'` - case "$cflag_arch" in - i386 ) case $system in - osx10.4 ) machine=i686 ;; # Used a different naming - * ) machine=x86 ;; - esac ;; - x86_64 ) machine=x86_64 ;; - ppc ) ;; # No treatment needed with PPC - ppc64 ) ;; - * ) # No matching compiler flag? "--platform" is needed - if [ x"$PLATFORM" != x"" ] ; then - : # See below: "$PLATFORM" will take precedence anyway - elif [ "$system" = "osx10.3" -a -z "$cflag_arch" ] ; then - : # Special case of OS X 10.3, which is PPC-32 only and doesn't use "-arch" - else - echo "On system '$system' only specific '-arch' values are expected." - echo "It is taken from the 'CFLAGS' whose value is:" - echo "$cflags" - echo "'-arch $cflag_arch' is unexpected, and no '--platform' was given: ABORT" - exit 1 - fi ;; - esac # "$cflag_arch" - ;; - esac # $system -fi - -# Combine OS and CPU to the "platform". Again, an explicit argument takes precedence. -if [ x"$PLATFORM" != x"" ] ; then - : -else - PLATFORM="$system-$machine" -fi - -# Print the platform name for build logs -echo "PLATFORM NAME: $PLATFORM" - -# Change the distribution to a long descriptive name -# For the cluster product, concentrate on the second part -VERSION_NAME=@VERSION@ -case $VERSION_NAME in - *-ndb-* ) VERSION_NAME=`echo $VERSION_NAME | sed -e 's/[.0-9]*-ndb-//'` ;; - *-MariaDB-* ) VERSION_NAME=`echo $VERSION_NAME | sed -e 's/-MariaDB//'` ;; -esac -if [ x"$SHORT_PRODUCT_TAG" != x"" ] ; then - NEW_NAME=mariadb-$SHORT_PRODUCT_TAG-$VERSION_NAME-$PLATFORM$SUFFIX -else - NEW_NAME=mariadb@MYSQL_SERVER_SUFFIX@-$VERSION_NAME-$PLATFORM$SUFFIX -fi - -# ---------------------------------------------------------------------- -# Define BASE, and remove the old BASE directory if any -# ---------------------------------------------------------------------- -BASE=$TMP/my_dist$SUFFIX -if [ -d $BASE ] ; then - rm -rf $BASE -fi - -# ---------------------------------------------------------------------- -# Find the TAR to use -# ---------------------------------------------------------------------- - -# This is needed to prefer GNU tar over platform tar because that can't -# always handle long filenames - -PATH_DIRS=`echo $PATH | \ - sed -e 's/^:/. /' -e 's/:$/ ./' -e 's/::/ . /g' -e 's/:/ /g' ` - -which_1 () -{ - for cmd - do - for d in $PATH_DIRS - do - for file in $d/$cmd - do - if [ -x $file -a ! -d $file ] ; then - echo $file - exit 0 - fi - done - done - done - exit 1 -} - -tar=`which_1 gnutar gtar` -if [ $? -ne 0 -o x"$tar" = x"" ] ; then - tar=tar -fi - - -############################################################################## -# -# Handle the Unix/Linux packaging using "make install" -# -############################################################################## - -# ---------------------------------------------------------------------- -# Terminate on any base level error -# ---------------------------------------------------------------------- -set -e - -# -# Check that the client is compiled with libmysqlclient.a -# -if test -f ./client/.libs/mysql -then - echo "" - echo "The MariaDB clients are compiled dynamically, which is not allowed for" - echo "a MariaDB binary tar file. Please configure with" - echo "--with-client-ldflags=-all-static and try again" - exit 1; -fi - -# ---------------------------------------------------------------------- -# Really ugly, one script, "mysql_install_db", needs prefix set to ".", -# i.e. makes access relative the current directory. This matches -# the documentation, so better not change this. And for another script, -# "mysql.server", we make some relative, others not. -# ---------------------------------------------------------------------- - -cd scripts -rm -f mysql_install_db mysqld_safe mysql_fix_privilege_tables -@MAKE@ mysql_install_db mysqld_safe mysql_fix_privilege_tables \ - prefix=. \ - bindir=./bin \ - sbindir=./bin \ - scriptdir=./bin \ - libexecdir=./bin \ - pkgdatadir=./share \ - localstatedir=./data -cd .. - -cd support-files -rm -f mysql.server -@MAKE@ mysql.server \ - bindir=./bin \ - sbindir=./bin \ - scriptdir=./bin \ - libexecdir=./bin \ - pkgdatadir=./share -cd .. - -# ---------------------------------------------------------------------- -# Do a install that we later are to pack. Use the same paths as in -# the build for the relevant directories. -# ---------------------------------------------------------------------- -@MAKE@ DESTDIR=$BASE install \ - libexecdir=@prefix@/libexec \ - pkglibdir=@pkglibdir@ \ - pkgincludedir=@pkgincludedir@ \ - pkgdatadir=@pkgdatadir@ \ - pkgplugindir=@pkgplugindir@ \ - pkgsuppdir=@pkgsuppdir@ \ - mandir=@mandir@ \ - infodir=@infodir@ - -# ---------------------------------------------------------------------- -# Rename top directory, and set DEST to the new directory -# ---------------------------------------------------------------------- -mv $BASE@prefix@ $BASE/$NEW_NAME -DEST=$BASE/$NEW_NAME - -# ---------------------------------------------------------------------- -# If we compiled with gcc, copy libgcc.a to the dist as libmygcc.a -# ---------------------------------------------------------------------- -if [ x"@GXX@" = x"yes" ] ; then - gcclib=`@CC@ @CFLAGS@ --print-libgcc-file 2>/dev/null` || true - if [ -z "$gcclib" ] ; then - echo "Warning: Compiler doesn't tell libgcc.a!" - elif [ -f "$gcclib" ] ; then - $CP $gcclib $DEST/lib/libmygcc.a - else - echo "Warning: Compiler result '$gcclib' not found / no file!" - fi -fi - -# If requested, add a malloc library .so into pkglibdir for use -# by mysqld_safe -if [ -n "$MALLOC_LIB" ]; then - cp "$MALLOC_LIB" "$DEST/lib/" -fi - -# Note, no legacy "safe_mysqld" link to "mysqld_safe" in 5.1 - -# Copy readme and license files -cp README Docs/INSTALL-BINARY TODO CREDITS $DEST/ -if [ -f COPYING -a -f EXCEPTIONS-CLIENT ] ; then - cp COPYING EXCEPTIONS-CLIENT $DEST/ -elif [ -f LICENSE.mysql ] ; then - cp LICENSE.mysql $DEST/ -else - echo "ERROR: no license files found" - exit 1 -fi - -# FIXME should be handled by make file, and to other dir -mkdir -p $DEST/bin $DEST/support-files -cp scripts/mysqlaccess.conf $DEST/bin/ -cp support-files/magic $DEST/support-files/ - -# Create empty data directories, set permission (FIXME why?) -mkdir $DEST/data $DEST/data/mysql $DEST/data/test -chmod o-rwx $DEST/data $DEST/data/mysql $DEST/data/test - -# Remove not needed files -rm $DEST/share/mysql/errmsg.txt - -# Remove NDB files -rm -f $DEST/share/mysql/ndb-config-2-node.ini \ - $DEST/share/mysql/config* - -# -# Move things to make them easier to find in tar installation -# - -# The following test is needed if the original configure was done with -# something like --libexecdir=/usr/local/mysql/bin -if test -f $DEST/libexec/mysqld -then - mv $DEST/libexec/* $DEST/bin - rmdir $DEST/libexec -fi -mv $DEST/share/man $DEST -mv $DEST/share/mysql/binary-configure $DEST/configure -mv $DEST/share/mysql/*.sql $DEST/share -mv $DEST/share/mysql/*.cnf $DEST/share/mysql/*.server $DEST/share/mysql/mysql-log-rotate $DEST/support-files - -# -# Move some scripts that are only run once to 'scripts' directory -# but add symbolic links instead to old place for compatibility -# -mkdir $DEST/scripts -for i in mysql_secure_installation mysql_fix_extensions mysql_fix_privilege_tables mysql_install_db mytop -do - mv $DEST/bin/$i $DEST/scripts - ln -s "../scripts/$i" $DEST/bin/$i -done - -# ---------------------------------------------------------------------- -# Create the result tar file -# ---------------------------------------------------------------------- - -echo "Using $tar to create archive" -OPT=cvf -if [ x$SILENT = x1 ] ; then - OPT=cf -fi - -echo "Creating and compressing archive" -rm -f $NEW_NAME.tar.gz -(cd $BASE ; $tar $OPT - $NEW_NAME) | gzip -9 > $NEW_NAME.tar.gz -echo "$NEW_NAME.tar.gz created" - -echo "Removing temporary directory" -rm -rf $BASE -exit 0 - diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index 801fdae5565..52dae44c61f 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -417,7 +417,7 @@ mysqld_install_cmd_line() { "$mysqld_bootstrap" $defaults "$mysqld_opt" --bootstrap \ "--basedir=$basedir" "--datadir=$ldata" --log-warnings=0 \ - --loose-skip-ndbcluster $args --max_allowed_packet=8M \ + $args --max_allowed_packet=8M \ --net_buffer_length=16K } diff --git a/sql-bench/server-cfg.sh b/sql-bench/server-cfg.sh index 4cc39481523..892d301c70d 100644 --- a/sql-bench/server-cfg.sh +++ b/sql-bench/server-cfg.sh @@ -191,14 +191,6 @@ sub new { $self->{'transactions'} = 1; # Transactions enabled } - if (defined($main::opt_create_options) && - $main::opt_create_options =~ /engine=ndb/i) - { - $self->{'transactions'} = 1; # Transactions enabled - $limits{'max_columns'} = 90; # Max number of columns in table - $limits{'max_tables'} = 32; # No comments - $limits{'max_temporary_tables'}= $limits{"max_tables"}; - } if (defined($main::opt_create_options) && $main::opt_create_options =~ /engine=bdb/i) { diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc deleted file mode 100644 index 9524a0366d3..00000000000 --- a/sql/ha_ndbcluster.cc +++ /dev/null @@ -1,11060 +0,0 @@ -/* Copyright (c) 2004, 2011, Oracle and/or its affiliates. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -/** - @file - - @brief - This file defines the NDB Cluster handler: the interface between - MySQL and NDB Cluster -*/ - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "sql_priv.h" -#include "unireg.h" // REQUIRED: for other includes -#include "sql_table.h" // build_table_filename, - // tablename_to_filename, - // filename_to_tablename -#include "sql_partition.h" // HA_CAN_*, partition_info, part_id_range -#include "sql_base.h" // close_cached_tables -#include "discover.h" // readfrm -#include "sql_acl.h" // wild_case_compare -#include "rpl_mi.h" -#include "transaction.h" - -/* - There is an incompatibility between GNU ar and the Solaris linker - which makes the Solaris linker return an elf error when compiling - without NDB support (which makes libndb.a an empty library). - To avoid this we add a dummy declaration of a static variable - which makes us avoid this bug. -*/ -int ha_ndb_dummy; -#include -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -#include "ha_ndbcluster.h" -#include -#include "ha_ndbcluster_cond.h" -#include <../util/Bitmask.hpp> -#include - -#include "ha_ndbcluster_binlog.h" -#include "ha_ndbcluster_tables.h" - -#include "sql_plugin.h" -#include "probes_mysql.h" -#include "sql_show.h" // init_fill_schema_files_row, - // schema_table_store_record -#include "sql_test.h" // print_where - -#ifdef ndb_dynamite -#undef assert -#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0) -#endif - -// ndb interface initialization/cleanup functions -extern "C" void ndb_init_internal(); -extern "C" void ndb_end_internal(); - -static const int DEFAULT_PARALLELISM= 0; -static const ha_rows DEFAULT_AUTO_PREFETCH= 32; -static const ulong ONE_YEAR_IN_SECONDS= (ulong) 3600L*24L*365L; - -ulong opt_ndb_extra_logging; -static ulong opt_ndb_cache_check_time; -static char* opt_ndb_connectstring; -static char* opt_ndb_mgmd_host; -static uint opt_ndb_nodeid; - - -static MYSQL_THDVAR_UINT( - autoincrement_prefetch_sz, /* name */ - PLUGIN_VAR_RQCMDARG, - "Specify number of autoincrement values that are prefetched.", - NULL, /* check func. */ - NULL, /* update func. */ - 1, /* default */ - 1, /* min */ - 256, /* max */ - 0 /* block */ -); - - -static MYSQL_THDVAR_BOOL( - force_send, /* name */ - PLUGIN_VAR_OPCMDARG, - "Force send of buffers to ndb immediately without waiting for " - "other threads.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ -); - - -static MYSQL_THDVAR_BOOL( - use_exact_count, /* name */ - PLUGIN_VAR_OPCMDARG, - "Use exact records count during query planning and for fast " - "select count(*), disable for faster queries.", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ -); - - -static MYSQL_THDVAR_BOOL( - use_transactions, /* name */ - PLUGIN_VAR_OPCMDARG, - "Use transactions for large inserts, if enabled then large " - "inserts will be split into several smaller transactions", - NULL, /* check func. */ - NULL, /* update func. */ - 1 /* default */ -); - - -static MYSQL_THDVAR_BOOL( - use_copying_alter_table, /* name */ - PLUGIN_VAR_OPCMDARG, - "Force ndbcluster to always copy tables at alter table (should " - "only be used if on-line alter table fails).", - NULL, /* check func. */ - NULL, /* update func. */ - 0 /* default */ -); - - -static MYSQL_THDVAR_UINT( - optimized_node_selection, /* name */ - PLUGIN_VAR_OPCMDARG, - "Select nodes for transactions in a more optimal way.", - NULL, /* check func. */ - NULL, /* update func. */ - 3, /* default */ - 0, /* min */ - 3, /* max */ - 0 /* block */ -); - - -static MYSQL_THDVAR_BOOL( - index_stat_enable, /* name */ - PLUGIN_VAR_OPCMDARG, - "Use ndb index statistics in query optimization.", - NULL, /* check func. */ - NULL, /* update func. */ - FALSE /* default */ -); - - -static MYSQL_THDVAR_ULONG( - index_stat_cache_entries, /* name */ - PLUGIN_VAR_NOCMDARG, - "", - NULL, /* check func. */ - NULL, /* update func. */ - 32, /* default */ - 0, /* min */ - ULONG_MAX, /* max */ - 0 /* block */ -); - - -static MYSQL_THDVAR_ULONG( - index_stat_update_freq, /* name */ - PLUGIN_VAR_NOCMDARG, - "", - NULL, /* check func. */ - NULL, /* update func. */ - 20, /* default */ - 0, /* min */ - ULONG_MAX, /* max */ - 0 /* block */ -); - -// Default value for parallelism -static const int parallelism= 0; - -// Default value for max number of transactions -// createable against NDB from this handler -static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used - -static uint ndbcluster_partition_flags(); -static uint ndbcluster_alter_table_flags(uint flags); -static int ndbcluster_init(void *); -static int ndbcluster_end(handlerton *hton, ha_panic_function flag); -static bool ndbcluster_show_status(handlerton *hton, THD*, - stat_print_fn *, - enum ha_stat_type); -static int ndbcluster_alter_tablespace(handlerton *hton, - THD* thd, - st_alter_tablespace *info); -static int ndbcluster_fill_is_table(handlerton *hton, - THD *thd, - TABLE_LIST *tables, - COND *cond, - enum enum_schema_tables); -static int ndbcluster_fill_files_table(handlerton *hton, - THD *thd, - TABLE_LIST *tables, - COND *cond); - -handlerton *ndbcluster_hton; - -static handler *ndbcluster_create_handler(handlerton *hton, - TABLE_SHARE *table, - MEM_ROOT *mem_root) -{ - return new (mem_root) ha_ndbcluster(hton, table); -} - -static uint ndbcluster_partition_flags() -{ - return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY | - HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION); -} - -static uint ndbcluster_alter_table_flags(uint flags) -{ - if (flags & ALTER_DROP_PARTITION) - return 0; - else - return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX | - HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX | - HA_PARTITION_FUNCTION_SUPPORTED); - -} - -#define NDB_AUTO_INCREMENT_RETRIES 10 - -#define ERR_PRINT(err) \ - DBUG_PRINT("error", ("%d message: %s", err.code, err.message)) - -#define ERR_RETURN(err) \ -{ \ - const NdbError& tmp= err; \ - set_ndb_err(current_thd, tmp); \ - DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ -} - -#define ERR_RETURN_PREPARE(rc, err) \ -{ \ - const NdbError& tmp= err; \ - set_ndb_err(current_thd, tmp); \ - rc= ndb_to_mysql_error(&tmp); \ -} - -#define ERR_BREAK(err, code) \ -{ \ - const NdbError& tmp= err; \ - set_ndb_err(current_thd, tmp); \ - code= ndb_to_mysql_error(&tmp); \ - break; \ -} - -static int ndbcluster_inited= 0; -int ndbcluster_terminating= 0; - -static Ndb* g_ndb= NULL; -Ndb_cluster_connection* g_ndb_cluster_connection= NULL; -uchar g_node_id_map[max_ndb_nodes]; - -/// Handler synchronization -mysql_mutex_t ndbcluster_mutex; - -/// Table lock handling -HASH ndbcluster_open_tables; - -static uchar *ndbcluster_get_key(NDB_SHARE *share, size_t *length, - my_bool not_used __attribute__((unused))); -#ifdef HAVE_NDB_BINLOG -static int rename_share(NDB_SHARE *share, const char *new_key); -#endif -static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const NDBTAB *, - struct Ndb_statistics *); - - -// Util thread variables -pthread_t ndb_util_thread; -int ndb_util_thread_running= 0; -mysql_mutex_t LOCK_ndb_util_thread; -mysql_cond_t COND_ndb_util_thread; -mysql_cond_t COND_ndb_util_ready; -pthread_handler_t ndb_util_thread_func(void *arg); - -/** - Dummy buffer to read zero pack_length fields - which are mapped to 1 char. -*/ -static uint32 dummy_buf; - -/** - Stats that can be retrieved from ndb. -*/ - -struct Ndb_statistics { - Uint64 row_count; - Uint64 commit_count; - Uint64 row_size; - Uint64 fragment_memory; -}; - -/* Status variables shown with 'show status like 'Ndb%' */ - -static long ndb_cluster_node_id= 0; -static const char * ndb_connected_host= 0; -static long ndb_connected_port= 0; -static long ndb_number_of_replicas= 0; -long ndb_number_of_data_nodes= 0; -long ndb_number_of_ready_data_nodes= 0; -long ndb_connect_count= 0; - -static int update_status_variables(Ndb_cluster_connection *c) -{ - ndb_cluster_node_id= c->node_id(); - ndb_connected_port= c->get_connected_port(); - ndb_connected_host= c->get_connected_host(); - ndb_number_of_replicas= 0; - ndb_number_of_ready_data_nodes= c->get_no_ready(); - ndb_number_of_data_nodes= c->no_db_nodes(); - ndb_connect_count= c->get_connect_count(); - return 0; -} - -SHOW_VAR ndb_status_variables[]= { - {"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG}, - {"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR}, - {"config_from_port", (char*) &ndb_connected_port, SHOW_LONG}, -// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG}, - {"number_of_data_nodes",(char*) &ndb_number_of_data_nodes, SHOW_LONG}, - {NullS, NullS, SHOW_LONG} -}; - -/* - Error handling functions -*/ - -/* Note for merge: old mapping table, moved to storage/ndb/ndberror.c */ - -static int ndb_to_mysql_error(const NdbError *ndberr) -{ - /* read the mysql mapped error code */ - int error= ndberr->mysql_code; - - switch (error) - { - /* errors for which we do not add warnings, just return mapped error code - */ - case HA_ERR_NO_SUCH_TABLE: - case HA_ERR_KEY_NOT_FOUND: - return error; - - /* Mapping missing, go with the ndb error code*/ - case -1: - error= ndberr->code; - break; - /* Mapping exists, go with the mapped code */ - default: - break; - } - - /* - Push the NDB error message as warning - - Used to be able to use SHOW WARNINGS toget more info on what the error is - - Used by replication to see if the error was temporary - */ - if (ndberr->status == NdbError::TemporaryError) - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG), - ndberr->code, ndberr->message, "NDB"); - else - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - ndberr->code, ndberr->message, "NDB"); - return error; -} - -int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans) -{ - if (trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - h->m_force_send) == -1) - return -1; - - const NdbError &err= trans->getNdbError(); - if (err.classification != NdbError::NoError && - err.classification != NdbError::ConstraintViolation && - err.classification != NdbError::NoDataFound) - return -1; - - return 0; -} - -inline -int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans, - bool force_release) -{ - h->release_completed_operations(trans, force_release); - return h->m_ignore_no_key ? - execute_no_commit_ignore_no_key(h,trans) : - trans->execute(NdbTransaction::NoCommit, - NdbOperation::AbortOnError, - h->m_force_send); -} - -inline -int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) -{ - return trans->execute(NdbTransaction::Commit, - NdbOperation::AbortOnError, - h->m_force_send); -} - -inline -int execute_commit(THD *thd, NdbTransaction *trans) -{ - return trans->execute(NdbTransaction::Commit, - NdbOperation::AbortOnError, - THDVAR(thd, force_send)); -} - -inline -int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans, - bool force_release) -{ - h->release_completed_operations(trans, force_release); - return trans->execute(NdbTransaction::NoCommit, - NdbOperation::AO_IgnoreError, - h->m_force_send); -} - -/* - Place holder for ha_ndbcluster thread specific data -*/ -typedef struct st_thd_ndb_share { - const void *key; - struct Ndb_local_table_statistics stat; -} THD_NDB_SHARE; -static -uchar *thd_ndb_share_get_key(THD_NDB_SHARE *thd_ndb_share, size_t *length, - my_bool not_used __attribute__((unused))) -{ - *length= sizeof(thd_ndb_share->key); - return (uchar*) &thd_ndb_share->key; -} - -Thd_ndb::Thd_ndb() -{ - ndb= new Ndb(g_ndb_cluster_connection, ""); - lock_count= 0; - start_stmt_count= 0; - count= 0; - trans= NULL; - m_error= FALSE; - m_error_code= 0; - query_state&= NDB_QUERY_NORMAL; - options= 0; - (void) my_hash_init(&open_tables, &my_charset_bin, 5, 0, 0, - (my_hash_get_key)thd_ndb_share_get_key, 0, 0); -} - -Thd_ndb::~Thd_ndb() -{ - if (ndb) - { -#ifndef DBUG_OFF - Ndb::Free_list_usage tmp; - tmp.m_name= 0; - while (ndb->get_free_list_usage(&tmp)) - { - uint leaked= (uint) tmp.m_created - tmp.m_free; - if (leaked) - fprintf(stderr, "NDB: Found %u %s%s that %s not been released\n", - leaked, tmp.m_name, - (leaked == 1)?"":"'s", - (leaked == 1)?"has":"have"); - } -#endif - delete ndb; - ndb= NULL; - } - changed_tables.empty(); - my_hash_free(&open_tables); -} - -void -Thd_ndb::init_open_tables() -{ - count= 0; - m_error= FALSE; - m_error_code= 0; - my_hash_reset(&open_tables); -} - -inline -Ndb *ha_ndbcluster::get_ndb() -{ - return get_thd_ndb(current_thd)->ndb; -} - -/* - * manage uncommitted insert/deletes during transactio to get records correct - */ - -void ha_ndbcluster::set_rec_per_key() -{ - DBUG_ENTER("ha_ndbcluster::get_status_const"); - for (uint i=0 ; i < table_share->keys ; i++) - { - table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1; - } - DBUG_VOID_RETURN; -} - -ha_rows ha_ndbcluster::records() -{ - ha_rows retval; - DBUG_ENTER("ha_ndbcluster::records"); - struct Ndb_local_table_statistics *local_info= m_table_info; - DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - local_info->no_uncommitted_rows_count)); - - Ndb *ndb= get_ndb(); - ndb->setDatabaseName(m_dbname); - struct Ndb_statistics stat; - if (ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat) == 0) - { - retval= stat.row_count; - } - else - { - DBUG_RETURN(HA_POS_ERROR); - } - - THD *thd= current_thd; - if (get_thd_ndb(thd)->m_error) - local_info->no_uncommitted_rows_count= 0; - - DBUG_RETURN(retval + local_info->no_uncommitted_rows_count); -} - -int ha_ndbcluster::records_update() -{ - if (m_ha_not_exact_count) - return 0; - DBUG_ENTER("ha_ndbcluster::records_update"); - int result= 0; - - struct Ndb_local_table_statistics *local_info= m_table_info; - DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - local_info->no_uncommitted_rows_count)); - { - Ndb *ndb= get_ndb(); - struct Ndb_statistics stat; - if (ndb->setDatabaseName(m_dbname)) - { - return my_errno= HA_ERR_OUT_OF_MEM; - } - result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat); - if (result == 0) - { - stats.mean_rec_length= stat.row_size; - stats.data_file_length= stat.fragment_memory; - local_info->records= stat.row_count; - } - } - { - THD *thd= current_thd; - if (get_thd_ndb(thd)->m_error) - local_info->no_uncommitted_rows_count= 0; - } - if (result == 0) - stats.records= local_info->records+ local_info->no_uncommitted_rows_count; - DBUG_RETURN(result); -} - -void ha_ndbcluster::no_uncommitted_rows_execute_failure() -{ - if (m_ha_not_exact_count) - return; - DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); - get_thd_ndb(current_thd)->m_error= TRUE; - get_thd_ndb(current_thd)->m_error_code= 0; - DBUG_VOID_RETURN; -} - -void ha_ndbcluster::no_uncommitted_rows_update(int c) -{ - if (m_ha_not_exact_count) - return; - DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); - struct Ndb_local_table_statistics *local_info= m_table_info; - local_info->no_uncommitted_rows_count+= c; - DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - local_info->no_uncommitted_rows_count)); - DBUG_VOID_RETURN; -} - -void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) -{ - if (m_ha_not_exact_count) - return; - DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset"); - Thd_ndb *thd_ndb= get_thd_ndb(thd); - thd_ndb->count++; - thd_ndb->m_error= FALSE; - DBUG_VOID_RETURN; -} - -/* - Sets the latest ndb error code on the thd_ndb object such that it - can be retrieved later to know which ndb error caused the handler - error. -*/ -static void set_ndb_err(THD *thd, const NdbError &err) -{ - DBUG_ENTER("set_ndb_err"); - ERR_PRINT(err); - - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (thd_ndb == NULL) - DBUG_VOID_RETURN; -#ifdef NOT_YET - /* - Check if error code is overwritten, in this case the original - failure cause will be lost. E.g. if 4350 error is given. So - push a warning so that it can be detected which is the root - error cause. - */ - if (thd_ndb->m_query_id == thd->query_id && - thd_ndb->m_error_code != 0 && - thd_ndb->m_error_code != err.code) - { - char buf[FN_REFLEN]; - ndb_error_string(thd_ndb->m_error_code, buf, sizeof(buf)); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - thd_ndb->m_error_code, buf, "NDB"); - } -#endif - thd_ndb->m_query_id= thd->query_id; - thd_ndb->m_error_code= err.code; - DBUG_VOID_RETURN; -} - -int ha_ndbcluster::ndb_err(NdbTransaction *trans) -{ - THD *thd= current_thd; - int res; - NdbError err= trans->getNdbError(); - DBUG_ENTER("ndb_err"); - - set_ndb_err(thd, err); - - switch (err.classification) { - case NdbError::SchemaError: - { - // TODO perhaps we need to do more here, invalidate also in the cache - m_table->setStatusInvalid(); - /* Close other open handlers not used by any thread */ - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= m_dbname; - table_list.alias= table_list.table_name= m_tabname; - close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT); - break; - } - default: - break; - } - res= ndb_to_mysql_error(&err); - DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", - err.code, res)); - if (res == HA_ERR_FOUND_DUPP_KEY) - { - char *error_data= err.details; - uint dupkey= MAX_KEY; - - for (uint i= 0; i < MAX_KEY; i++) - { - if (m_index[i].type == UNIQUE_INDEX || - m_index[i].type == UNIQUE_ORDERED_INDEX) - { - const NDBINDEX *unique_index= - (const NDBINDEX *) m_index[i].unique_index; - if (unique_index && - (char *) unique_index->getObjectId() == error_data) - { - dupkey= i; - break; - } - } - } - if (m_rows_to_insert == 1) - { - /* - We can only distinguish between primary and non-primary - violations here, so we need to return MAX_KEY for non-primary - to signal that key is unknown - */ - m_dupkey= err.code == 630 ? table_share->primary_key : dupkey; - } - else - { - /* We are batching inserts, offending key is not available */ - m_dupkey= (uint) -1; - } - } - DBUG_RETURN(res); -} - - -/** - Override the default get_error_message in order to add the - error message of NDB . -*/ - -bool ha_ndbcluster::get_error_message(int error, - String *buf) -{ - DBUG_ENTER("ha_ndbcluster::get_error_message"); - DBUG_PRINT("enter", ("error: %d", error)); - - Ndb *ndb= check_ndb_in_thd(current_thd); - if (!ndb) - DBUG_RETURN(FALSE); - - const NdbError err= ndb->getNdbError(error); - bool temporary= err.status==NdbError::TemporaryError; - buf->set(err.message, strlen(err.message), &my_charset_bin); - DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary)); - DBUG_RETURN(temporary); -} - - -#ifndef DBUG_OFF -/** - Check if type is supported by NDB. -*/ - -static bool ndb_supported_type(enum_field_types type) -{ - switch (type) { - case MYSQL_TYPE_TINY: - case MYSQL_TYPE_SHORT: - case MYSQL_TYPE_LONG: - case MYSQL_TYPE_INT24: - case MYSQL_TYPE_LONGLONG: - case MYSQL_TYPE_FLOAT: - case MYSQL_TYPE_DOUBLE: - case MYSQL_TYPE_DECIMAL: - case MYSQL_TYPE_NEWDECIMAL: - case MYSQL_TYPE_TIMESTAMP: - case MYSQL_TYPE_DATETIME: - case MYSQL_TYPE_DATE: - case MYSQL_TYPE_NEWDATE: - case MYSQL_TYPE_TIME: - case MYSQL_TYPE_YEAR: - case MYSQL_TYPE_STRING: - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_VARCHAR: - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_ENUM: - case MYSQL_TYPE_SET: - case MYSQL_TYPE_BIT: - case MYSQL_TYPE_GEOMETRY: - return TRUE; - case MYSQL_TYPE_NULL: - break; - } - return FALSE; -} -#endif /* !DBUG_OFF */ - - -/** - Check if MySQL field type forces var part in ndb storage. -*/ -static bool field_type_forces_var_part(enum_field_types type) -{ - switch (type) { - case MYSQL_TYPE_VAR_STRING: - case MYSQL_TYPE_VARCHAR: - return TRUE; - case MYSQL_TYPE_TINY_BLOB: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - case MYSQL_TYPE_GEOMETRY: - return FALSE; - default: - return FALSE; - } -} - -/** - Instruct NDB to set the value of the hidden primary key. -*/ - -bool ha_ndbcluster::set_hidden_key(NdbOperation *ndb_op, - uint fieldnr, const uchar *field_ptr) -{ - DBUG_ENTER("set_hidden_key"); - DBUG_RETURN(ndb_op->equal(fieldnr, (char*)field_ptr) != 0); -} - - -/** - Instruct NDB to set the value of one primary key attribute. -*/ - -int ha_ndbcluster::set_ndb_key(NdbOperation *ndb_op, Field *field, - uint fieldnr, const uchar *field_ptr) -{ - uint32 pack_len= field->pack_length(); - DBUG_ENTER("set_ndb_key"); - DBUG_PRINT("enter", ("%d: %s, ndb_type: %u, len=%d", - fieldnr, field->field_name, field->type(), - pack_len)); - DBUG_DUMP("key", field_ptr, pack_len); - - DBUG_ASSERT(ndb_supported_type(field->type())); - DBUG_ASSERT(! (field->flags & BLOB_FLAG)); - // Common implementation for most field types - DBUG_RETURN(ndb_op->equal(fieldnr, (char*) field_ptr, pack_len) != 0); -} - - -/** - Instruct NDB to set the value of one attribute. -*/ - -int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, - uint fieldnr, int row_offset, - bool *set_blob_value) -{ - const uchar* field_ptr= field->ptr + row_offset; - uint32 pack_len= field->pack_length(); - DBUG_ENTER("set_ndb_value"); - DBUG_PRINT("enter", ("%d: %s type: %u len=%d is_null=%s", - fieldnr, field->field_name, field->type(), - pack_len, field->is_null(row_offset) ? "Y" : "N")); - DBUG_DUMP("value", field_ptr, pack_len); - - DBUG_ASSERT(ndb_supported_type(field->type())); - { - // ndb currently does not support size 0 - uint32 empty_field; - if (pack_len == 0) - { - pack_len= sizeof(empty_field); - field_ptr= (uchar *)&empty_field; - if (field->is_null(row_offset)) - empty_field= 0; - else - empty_field= 1; - } - if (! (field->flags & BLOB_FLAG)) - { - if (field->type() != MYSQL_TYPE_BIT) - { - if (field->is_null(row_offset)) - { - DBUG_PRINT("info", ("field is NULL")); - // Set value to NULL - DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL) != 0)); - } - // Common implementation for most field types - DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)field_ptr) != 0); - } - else // if (field->type() == MYSQL_TYPE_BIT) - { - longlong bits= field->val_int(); - - // Round up bit field length to nearest word boundry - pack_len= ((pack_len + 3) >> 2) << 2; - DBUG_ASSERT(pack_len <= 8); - if (field->is_null(row_offset)) - // Set value to NULL - DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL) != 0)); - DBUG_PRINT("info", ("bit field")); - DBUG_DUMP("value", (uchar*)&bits, pack_len); -#ifdef WORDS_BIGENDIAN - /* store lsw first */ - bits = ((bits >> 32) & 0x00000000FFFFFFFFLL) - | ((bits << 32) & 0xFFFFFFFF00000000LL); -#endif - DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits) != 0); - } - } - // Blob type - NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr); - if (ndb_blob != NULL) - { - if (field->is_null(row_offset)) - DBUG_RETURN(ndb_blob->setNull() != 0); - - Field_blob *field_blob= (Field_blob*)field; - - // Get length and pointer to data - uint32 blob_len= field_blob->get_length(field_ptr); - uchar* blob_ptr= NULL; - field_blob->get_ptr(&blob_ptr); - - // Looks like NULL ptr signals length 0 blob - if (blob_ptr == NULL) { - DBUG_ASSERT(blob_len == 0); - blob_ptr= (uchar*)""; - } - - DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u", - (long) blob_ptr, blob_len)); - DBUG_DUMP("value", blob_ptr, MY_MIN(blob_len, 26)); - - if (set_blob_value) - *set_blob_value= TRUE; - // No callback needed to write value - DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0); - } - DBUG_RETURN(1); - } -} - - -NdbBlob::ActiveHook g_get_ndb_blobs_value; - -/** - Callback to read all blob values. - - not done in unpack_record because unpack_record is valid - after execute(Commit) but reading blobs is not - - may only generate read operations; they have to be executed - somewhere before the data is available - - due to single buffer for all blobs, we let the last blob - process all blobs (last so that all are active) - - null bit is still set in unpack_record. - - @todo - allocate blob part aligned buffers -*/ - -int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) -{ - DBUG_ENTER("g_get_ndb_blobs_value"); - if (ndb_blob->blobsNextBlob() != NULL) - DBUG_RETURN(0); - ha_ndbcluster *ha= (ha_ndbcluster *)arg; - int ret= get_ndb_blobs_value(ha->table, ha->m_value, - ha->m_blobs_buffer, ha->m_blobs_buffer_size, - ha->m_blobs_offset); - DBUG_RETURN(ret); -} - -/* - This routine is shared by injector. There is no common blobs buffer - so the buffer and length are passed by reference. Injector also - passes a record pointer diff. - */ -int get_ndb_blobs_value(TABLE* table, NdbValue* value_array, - uchar*& buffer, uint& buffer_size, - my_ptrdiff_t ptrdiff) -{ - DBUG_ENTER("get_ndb_blobs_value"); - - // Field has no field number so cannot use TABLE blob_field - // Loop twice, first only counting total buffer size - for (int loop= 0; loop <= 1; loop++) - { - uint32 offset= 0; - for (uint i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; - NdbValue value= value_array[i]; - if (! (field->flags & BLOB_FLAG)) - continue; - if (value.blob == NULL) - { - DBUG_PRINT("info",("[%u] skipped", i)); - continue; - } - Field_blob *field_blob= (Field_blob *)field; - NdbBlob *ndb_blob= value.blob; - int isNull; - if (ndb_blob->getNull(isNull) != 0) - ERR_RETURN(ndb_blob->getNdbError()); - if (isNull == 0) { - Uint64 len64= 0; - if (ndb_blob->getLength(len64) != 0) - ERR_RETURN(ndb_blob->getNdbError()); - // Align to Uint64 - uint32 size= len64; - if (size % 8 != 0) - size+= 8 - size % 8; - if (loop == 1) - { - uchar *buf= buffer + offset; - uint32 len= 0xffffffff; // Max uint32 - if (ndb_blob->readData(buf, len) != 0) - ERR_RETURN(ndb_blob->getNdbError()); - DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]", - i, offset, (long) buf, len, (int)ptrdiff)); - DBUG_ASSERT(len == len64); - // Ugly hack assumes only ptr needs to be changed - field_blob->set_ptr_offset(ptrdiff, len, buf); - } - offset+= size; - } - else if (loop == 1) // undefined or null - { - // have to set length even in this case - uchar *buf= buffer + offset; // or maybe NULL - uint32 len= 0; - field_blob->set_ptr_offset(ptrdiff, len, buf); - DBUG_PRINT("info", ("[%u] isNull=%d", i, isNull)); - } - } - if (loop == 0 && offset > buffer_size) - { - my_free(buffer); - buffer_size= 0; - DBUG_PRINT("info", ("allocate blobs buffer size %u", offset)); - buffer= (uchar*) my_malloc(offset, MYF(MY_WME)); - if (buffer == NULL) - { - sql_print_error("ha_ndbcluster::get_ndb_blobs_value: " - "my_malloc(%u) failed", offset); - DBUG_RETURN(-1); - } - buffer_size= offset; - } - } - DBUG_RETURN(0); -} - - -/** - Instruct NDB to fetch one field. - - Data is read directly into buffer provided by field - if field is NULL, data is read into memory provided by NDBAPI. -*/ - -int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, - uint fieldnr, uchar* buf) -{ - DBUG_ENTER("get_ndb_value"); - DBUG_PRINT("enter", ("fieldnr: %d flags: %o", fieldnr, - (int)(field != NULL ? field->flags : 0))); - - if (field != NULL) - { - DBUG_ASSERT(buf); - DBUG_ASSERT(ndb_supported_type(field->type())); - DBUG_ASSERT(field->ptr != NULL); - if (! (field->flags & BLOB_FLAG)) - { - if (field->type() != MYSQL_TYPE_BIT) - { - uchar *field_buf; - if (field->pack_length() != 0) - field_buf= buf + (field->ptr - table->record[0]); - else - field_buf= (uchar *)&dummy_buf; - m_value[fieldnr].rec= ndb_op->getValue(fieldnr, - (char*) field_buf); - } - else // if (field->type() == MYSQL_TYPE_BIT) - { - m_value[fieldnr].rec= ndb_op->getValue(fieldnr); - } - DBUG_RETURN(m_value[fieldnr].rec == NULL); - } - - // Blob type - NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr); - m_value[fieldnr].blob= ndb_blob; - if (ndb_blob != NULL) - { - // Set callback - m_blobs_offset= buf - (uchar*) table->record[0]; - void *arg= (void *)this; - DBUG_RETURN(ndb_blob->setActiveHook(g_get_ndb_blobs_value, arg) != 0); - } - DBUG_RETURN(1); - } - - // Used for hidden key only - m_value[fieldnr].rec= ndb_op->getValue(fieldnr, (char*) m_ref); - DBUG_RETURN(m_value[fieldnr].rec == NULL); -} - -/* - Instruct NDB to fetch the partition id (fragment id) -*/ -int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op) -{ - DBUG_ENTER("get_ndb_partition_id"); - DBUG_RETURN(ndb_op->getValue(NdbDictionary::Column::FRAGMENT, - (char *)&m_part_id) == NULL); -} - -/** - Check if any set or get of blob value in current query. -*/ - -bool ha_ndbcluster::uses_blob_value() -{ - MY_BITMAP *bitmap; - uint *blob_index, *blob_index_end; - if (table_share->blob_fields == 0) - return FALSE; - - bitmap= m_write_op ? table->write_set : table->read_set; - blob_index= table_share->blob_field; - blob_index_end= blob_index + table_share->blob_fields; - do - { - if (bitmap_is_set(bitmap, table->field[*blob_index]->field_index)) - return TRUE; - } while (++blob_index != blob_index_end); - return FALSE; -} - - -/** - Get metadata for this table from NDB. - - Check that frm-file on disk is equal to frm-file - of table accessed in NDB. - - @retval - 0 ok - @retval - -2 Meta data has changed; Re-read data and try again -*/ - -int cmp_frm(const NDBTAB *ndbtab, const void *pack_data, - uint pack_length) -{ - DBUG_ENTER("cmp_frm"); - /* - Compare FrmData in NDB with frm file from disk. - */ - if ((pack_length != ndbtab->getFrmLength()) || - (memcmp(pack_data, ndbtab->getFrmData(), pack_length))) - DBUG_RETURN(1); - DBUG_RETURN(0); -} - -int ha_ndbcluster::get_metadata(const char *path) -{ - Ndb *ndb= get_ndb(); - NDBDICT *dict= ndb->getDictionary(); - const NDBTAB *tab; - int error; - DBUG_ENTER("get_metadata"); - DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); - - DBUG_ASSERT(m_table == NULL); - DBUG_ASSERT(m_table_info == NULL); - - uchar *data= NULL, *pack_data= NULL; - size_t length, pack_length; - - /* - Compare FrmData in NDB with frm file from disk. - */ - error= 0; - if (readfrm(path, &data, &length) || - packfrm(data, length, &pack_data, &pack_length)) - { - my_free(data); - my_free(pack_data); - DBUG_RETURN(1); - } - - Ndb_table_guard ndbtab_g(dict, m_tabname); - if (!(tab= ndbtab_g.get_table())) - ERR_RETURN(dict->getNdbError()); - - if (get_ndb_share_state(m_share) != NSS_ALTERED - && cmp_frm(tab, pack_data, pack_length)) - { - DBUG_PRINT("error", - ("metadata, pack_length: %lu getFrmLength: %d memcmp: %d", - (ulong) pack_length, tab->getFrmLength(), - memcmp(pack_data, tab->getFrmData(), pack_length))); - DBUG_DUMP("pack_data", (uchar*) pack_data, pack_length); - DBUG_DUMP("frm", (uchar*) tab->getFrmData(), tab->getFrmLength()); - error= HA_ERR_TABLE_DEF_CHANGED; - } - my_free(data); - my_free(pack_data); - - if (error) - goto err; - - DBUG_PRINT("info", ("fetched table %s", tab->getName())); - m_table= tab; - if ((error= open_indexes(ndb, table, FALSE)) == 0) - { - ndbtab_g.release(); - DBUG_RETURN(0); - } -err: - ndbtab_g.invalidate(); - m_table= NULL; - DBUG_RETURN(error); -} - -static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, - const NDBINDEX *index, - KEY *key_info) -{ - DBUG_ENTER("fix_unique_index_attr_order"); - unsigned sz= index->getNoOfIndexColumns(); - - if (data.unique_index_attrid_map) - my_free(data.unique_index_attrid_map); - data.unique_index_attrid_map= (uchar*)my_malloc(sz,MYF(MY_WME)); - if (data.unique_index_attrid_map == 0) - { - sql_print_error("fix_unique_index_attr_order: my_malloc(%u) failure", - (unsigned int)sz); - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - } - - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - DBUG_ASSERT(key_info->user_defined_key_parts == sz); - for (unsigned i= 0; key_part != end; key_part++, i++) - { - const char *field_name= key_part->field->field_name; -#ifndef DBUG_OFF - data.unique_index_attrid_map[i]= 255; -#endif - for (unsigned j= 0; j < sz; j++) - { - const NDBCOL *c= index->getColumn(j); - if (strcmp(field_name, c->getName()) == 0) - { - data.unique_index_attrid_map[i]= j; - break; - } - } - DBUG_ASSERT(data.unique_index_attrid_map[i] != 255); - } - DBUG_RETURN(0); -} - -/* - Create all the indexes for a table. - If any index should fail to be created, - the error is returned immediately -*/ -int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab) -{ - uint i; - int error= 0; - const char *index_name; - KEY* key_info= tab->key_info; - const char **key_name= tab->s->keynames.type_names; - DBUG_ENTER("ha_ndbcluster::create_indexes"); - - for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) - { - index_name= *key_name; - NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); - error= create_index(index_name, key_info, idx_type, i); - if (error) - { - DBUG_PRINT("error", ("Failed to create index %u", i)); - break; - } - } - - DBUG_RETURN(error); -} - -static void ndb_init_index(NDB_INDEX_DATA &data) -{ - data.type= UNDEFINED_INDEX; - data.status= UNDEFINED; - data.unique_index= NULL; - data.index= NULL; - data.unique_index_attrid_map= NULL; - data.index_stat=NULL; - data.index_stat_cache_entries=0; - data.index_stat_update_freq=0; - data.index_stat_query_count=0; -} - -static void ndb_clear_index(NDB_INDEX_DATA &data) -{ - if (data.unique_index_attrid_map) - { - my_free(data.unique_index_attrid_map); - } - if (data.index_stat) - { - delete data.index_stat; - } - ndb_init_index(data); -} - -/* - Associate a direct reference to an index handle - with an index (for faster access) - */ -int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info, - const char *index_name, uint index_no) -{ - int error= 0; - NDB_INDEX_TYPE idx_type= get_index_type_from_table(index_no); - m_index[index_no].type= idx_type; - DBUG_ENTER("ha_ndbcluster::add_index_handle"); - DBUG_PRINT("enter", ("table %s", m_tabname)); - - if (idx_type != PRIMARY_KEY_INDEX && idx_type != UNIQUE_INDEX) - { - DBUG_PRINT("info", ("Get handle to index %s", index_name)); - const NDBINDEX *index; - do - { - index= dict->getIndexGlobal(index_name, *m_table); - if (!index) - ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d", - (long) index, - index->getObjectId(), - index->getObjectVersion() & 0xFFFFFF, - index->getObjectVersion() >> 24, - index->getObjectStatus())); - DBUG_ASSERT(index->getObjectStatus() == - NdbDictionary::Object::Retrieved); - break; - } while (1); - m_index[index_no].index= index; - // ordered index - add stats - NDB_INDEX_DATA& d=m_index[index_no]; - delete d.index_stat; - d.index_stat=NULL; - if (THDVAR(thd, index_stat_enable)) - { - d.index_stat=new NdbIndexStat(index); - d.index_stat_cache_entries=THDVAR(thd, index_stat_cache_entries); - d.index_stat_update_freq=THDVAR(thd, index_stat_update_freq); - d.index_stat_query_count=0; - d.index_stat->alloc_cache(d.index_stat_cache_entries); - DBUG_PRINT("info", ("index %s stat=on cache_entries=%u update_freq=%u", - index->getName(), - d.index_stat_cache_entries, - d.index_stat_update_freq)); - } else - { - DBUG_PRINT("info", ("index %s stat=off", index->getName())); - } - } - if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) - { - char unique_index_name[FN_LEN + 1]; - static const char* unique_suffix= "$unique"; - m_has_unique_index= TRUE; - strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS); - DBUG_PRINT("info", ("Get handle to unique_index %s", unique_index_name)); - const NDBINDEX *index; - do - { - index= dict->getIndexGlobal(unique_index_name, *m_table); - if (!index) - ERR_RETURN(dict->getNdbError()); - DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d", - (long) index, - index->getObjectId(), - index->getObjectVersion() & 0xFFFFFF, - index->getObjectVersion() >> 24, - index->getObjectStatus())); - DBUG_ASSERT(index->getObjectStatus() == - NdbDictionary::Object::Retrieved); - break; - } while (1); - m_index[index_no].unique_index= index; - error= fix_unique_index_attr_order(m_index[index_no], index, key_info); - } - if (!error) - m_index[index_no].status= ACTIVE; - - DBUG_RETURN(error); -} - -/* - Associate index handles for each index of a table -*/ -int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) -{ - uint i; - int error= 0; - THD *thd=current_thd; - NDBDICT *dict= ndb->getDictionary(); - KEY* key_info= tab->key_info; - const char **key_name= tab->s->keynames.type_names; - DBUG_ENTER("ha_ndbcluster::open_indexes"); - m_has_unique_index= FALSE; - for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) - { - if ((error= add_index_handle(thd, dict, key_info, *key_name, i))) - { - if (ignore_error) - m_index[i].index= m_index[i].unique_index= NULL; - else - break; - } - m_index[i].null_in_unique_index= FALSE; - if (check_index_fields_not_null(key_info)) - m_index[i].null_in_unique_index= TRUE; - } - - if (error && !ignore_error) - { - while (i > 0) - { - i--; - if (m_index[i].index) - { - dict->removeIndexGlobal(*m_index[i].index, 1); - m_index[i].index= NULL; - } - if (m_index[i].unique_index) - { - dict->removeIndexGlobal(*m_index[i].unique_index, 1); - m_index[i].unique_index= NULL; - } - } - } - - DBUG_ASSERT(error == 0 || error == 4243); - - DBUG_RETURN(error); -} - -/* - Renumber indexes in index list by shifting out - indexes that are to be dropped - */ -void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab) -{ - uint i; - const char *index_name; - KEY* key_info= tab->key_info; - const char **key_name= tab->s->keynames.type_names; - DBUG_ENTER("ha_ndbcluster::renumber_indexes"); - - for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) - { - index_name= *key_name; - NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); - m_index[i].type= idx_type; - if (m_index[i].status == TO_BE_DROPPED) - { - DBUG_PRINT("info", ("Shifting index %s(%i) out of the list", - index_name, i)); - NDB_INDEX_DATA tmp; - uint j= i + 1; - // Shift index out of list - while(j != MAX_KEY && m_index[j].status != UNDEFINED) - { - tmp= m_index[j - 1]; - m_index[j - 1]= m_index[j]; - m_index[j]= tmp; - j++; - } - } - } - - DBUG_VOID_RETURN; -} - -/* - Drop all indexes that are marked for deletion -*/ -int ha_ndbcluster::drop_indexes(Ndb *ndb, TABLE *tab) -{ - uint i; - int error= 0; - const char *index_name; - KEY* key_info= tab->key_info; - NDBDICT *dict= ndb->getDictionary(); - DBUG_ENTER("ha_ndbcluster::drop_indexes"); - - for (i= 0; i < tab->s->keys; i++, key_info++) - { - NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); - m_index[i].type= idx_type; - if (m_index[i].status == TO_BE_DROPPED) - { - const NdbDictionary::Index *index= m_index[i].index; - const NdbDictionary::Index *unique_index= m_index[i].unique_index; - - if (index) - { - index_name= index->getName(); - DBUG_PRINT("info", ("Dropping index %u: %s", i, index_name)); - // Drop ordered index from ndb - error= dict->dropIndexGlobal(*index); - if (!error) - { - dict->removeIndexGlobal(*index, 1); - m_index[i].index= NULL; - } - } - if (!error && unique_index) - { - index_name= unique_index->getName(); - DBUG_PRINT("info", ("Dropping unique index %u: %s", i, index_name)); - // Drop unique index from ndb - error= dict->dropIndexGlobal(*unique_index); - if (!error) - { - dict->removeIndexGlobal(*unique_index, 1); - m_index[i].unique_index= NULL; - } - } - if (error) - DBUG_RETURN(error); - ndb_clear_index(m_index[i]); - continue; - } - } - - DBUG_RETURN(error); -} - -/** - Decode the type of an index from information - provided in table object. -*/ -NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const -{ - return get_index_type_from_key(inx, table_share->key_info, - inx == table_share->primary_key); -} - -NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx, - KEY *key_info, - bool primary) const -{ - bool is_hash_index= (key_info[inx].algorithm == - HA_KEY_ALG_HASH); - if (primary) - return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX; - - return ((key_info[inx].flags & HA_NOSAME) ? - (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : - ORDERED_INDEX); -} - -bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) -{ - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null"); - - for (; key_part != end; key_part++) - { - Field* field= key_part->field; - if (field->maybe_null()) - DBUG_RETURN(TRUE); - } - - DBUG_RETURN(FALSE); -} - -void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) -{ - uint i; - - DBUG_ENTER("release_metadata"); - DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); - - NDBDICT *dict= ndb->getDictionary(); - int invalidate_indexes= 0; - if (thd && thd->lex && thd->lex->sql_command == SQLCOM_FLUSH) - { - invalidate_indexes = 1; - } - if (m_table != NULL) - { - if (m_table->getObjectStatus() == NdbDictionary::Object::Invalid) - invalidate_indexes= 1; - dict->removeTableGlobal(*m_table, invalidate_indexes); - } - // TODO investigate - DBUG_ASSERT(m_table_info == NULL); - m_table_info= NULL; - - // Release index list - for (i= 0; i < MAX_KEY; i++) - { - if (m_index[i].unique_index) - { - DBUG_ASSERT(m_table != NULL); - dict->removeIndexGlobal(*m_index[i].unique_index, invalidate_indexes); - } - if (m_index[i].index) - { - DBUG_ASSERT(m_table != NULL); - dict->removeIndexGlobal(*m_index[i].index, invalidate_indexes); - } - ndb_clear_index(m_index[i]); - } - - m_table= NULL; - DBUG_VOID_RETURN; -} - -int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type) -{ - if (type >= TL_WRITE_ALLOW_WRITE) - return NdbOperation::LM_Exclusive; - if (type == TL_READ_WITH_SHARED_LOCKS || - uses_blob_value()) - return NdbOperation::LM_Read; - return NdbOperation::LM_CommittedRead; -} - -static const ulong index_type_flags[]= -{ - /* UNDEFINED_INDEX */ - 0, - - /* PRIMARY_KEY_INDEX */ - HA_ONLY_WHOLE_INDEX, - - /* PRIMARY_KEY_ORDERED_INDEX */ - /* - Enable HA_KEYREAD_ONLY when "sorted" indexes are supported, - thus ORDERD BY clauses can be optimized by reading directly - through the index. - */ - // HA_KEYREAD_ONLY | - HA_READ_NEXT | - HA_READ_PREV | - HA_READ_RANGE | - HA_READ_ORDER, - - /* UNIQUE_INDEX */ - HA_ONLY_WHOLE_INDEX, - - /* UNIQUE_ORDERED_INDEX */ - HA_READ_NEXT | - HA_READ_PREV | - HA_READ_RANGE | - HA_READ_ORDER, - - /* ORDERED_INDEX */ - HA_READ_NEXT | - HA_READ_PREV | - HA_READ_RANGE | - HA_READ_ORDER -}; - -static const int index_flags_size= sizeof(index_type_flags)/sizeof(ulong); - -inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const -{ - DBUG_ASSERT(idx_no < MAX_KEY); - return m_index[idx_no].type; -} - -inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no) const -{ - DBUG_ASSERT(idx_no < MAX_KEY); - return m_index[idx_no].null_in_unique_index; -} - - -/** - Get the flags for an index. - - @return - flags depending on the type of the index. -*/ - -inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part, - bool all_parts) const -{ - DBUG_ENTER("ha_ndbcluster::index_flags"); - DBUG_PRINT("enter", ("idx_no: %u", idx_no)); - DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size); - DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)] | - HA_KEY_SCAN_NOT_ROR); -} - -static void shrink_varchar(Field* field, const uchar* & ptr, uchar* buf) -{ - if (field->type() == MYSQL_TYPE_VARCHAR && ptr != NULL) { - Field_varstring* f= (Field_varstring*)field; - if (f->length_bytes == 1) { - uint pack_len= field->pack_length(); - DBUG_ASSERT(1 <= pack_len && pack_len <= 256); - if (ptr[1] == 0) { - buf[0]= ptr[0]; - } else { - DBUG_ASSERT(FALSE); - buf[0]= 255; - } - memmove(buf + 1, ptr + 2, pack_len - 1); - ptr= buf; - } - } -} - -int ha_ndbcluster::set_primary_key(NdbOperation *op, const uchar *key) -{ - KEY* key_info= table->key_info + table_share->primary_key; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - DBUG_ENTER("set_primary_key"); - - for (; key_part != end; key_part++) - { - Field* field= key_part->field; - const uchar* ptr= key; - uchar buf[256]; - shrink_varchar(field, ptr, buf); - if (set_ndb_key(op, field, - key_part->fieldnr-1, ptr)) - ERR_RETURN(op->getNdbError()); - key += key_part->store_length; - } - DBUG_RETURN(0); -} - - -int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const uchar *record) -{ - KEY* key_info= table->key_info + table_share->primary_key; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - DBUG_ENTER("set_primary_key_from_record"); - - for (; key_part != end; key_part++) - { - Field* field= key_part->field; - if (set_ndb_key(op, field, - key_part->fieldnr-1, record+key_part->offset)) - ERR_RETURN(op->getNdbError()); - } - DBUG_RETURN(0); -} - -bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno) -{ - KEY* key_info= table->key_info + keyno; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - uint i; - DBUG_ENTER("check_index_fields_in_write_set"); - - for (i= 0; key_part != end; key_part++, i++) - { - Field* field= key_part->field; - if (!bitmap_is_set(table->write_set, field->field_index)) - { - DBUG_RETURN(false); - } - } - - DBUG_RETURN(true); -} - -int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, - const uchar *record, uint keyno) -{ - KEY* key_info= table->key_info + keyno; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - uint i; - DBUG_ENTER("set_index_key_from_record"); - - for (i= 0; key_part != end; key_part++, i++) - { - Field* field= key_part->field; - if (set_ndb_key(op, field, m_index[keyno].unique_index_attrid_map[i], - record+key_part->offset)) - ERR_RETURN(m_active_trans->getNdbError()); - } - DBUG_RETURN(0); -} - -int -ha_ndbcluster::set_index_key(NdbOperation *op, - const KEY *key_info, - const uchar * key_ptr) -{ - DBUG_ENTER("ha_ndbcluster::set_index_key"); - uint i; - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - - for (i= 0; key_part != end; key_part++, i++) - { - Field* field= key_part->field; - const uchar* ptr= key_part->null_bit ? key_ptr + 1 : key_ptr; - uchar buf[256]; - shrink_varchar(field, ptr, buf); - if (set_ndb_key(op, field, m_index[active_index].unique_index_attrid_map[i], ptr)) - ERR_RETURN(m_active_trans->getNdbError()); - key_ptr+= key_part->store_length; - } - DBUG_RETURN(0); -} - -inline -int ha_ndbcluster::define_read_attrs(uchar* buf, NdbOperation* op) -{ - uint i; - DBUG_ENTER("define_read_attrs"); - - // Define attributes to read - for (i= 0; i < table_share->fields; i++) - { - Field *field= table->field[i]; - if (bitmap_is_set(table->read_set, i) || - ((field->flags & PRI_KEY_FLAG))) - { - if (get_ndb_value(op, field, i, buf)) - ERR_RETURN(op->getNdbError()); - } - else - { - m_value[i].ptr= NULL; - } - } - - if (table_share->primary_key == MAX_KEY) - { - DBUG_PRINT("info", ("Getting hidden key")); - // Scanning table with no primary key - int hidden_no= table_share->fields; -#ifndef DBUG_OFF - const NDBTAB *tab= (const NDBTAB *) m_table; - if (!tab->getColumn(hidden_no)) - DBUG_RETURN(1); -#endif - if (get_ndb_value(op, NULL, hidden_no, NULL)) - ERR_RETURN(op->getNdbError()); - } - DBUG_RETURN(0); -} - - -/** - Read one record from NDB using primary key. -*/ - -int ha_ndbcluster::pk_read(const uchar *key, uint key_len, uchar *buf, - uint32 part_id) -{ - uint no_fields= table_share->fields; - NdbConnection *trans= m_active_trans; - NdbOperation *op; - - int res; - DBUG_ENTER("pk_read"); - DBUG_PRINT("enter", ("key_len: %u", key_len)); - DBUG_DUMP("key", key, key_len); - m_write_op= FALSE; - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || - op->readTuple(lm) != 0) - ERR_RETURN(trans->getNdbError()); - - if (table_share->primary_key == MAX_KEY) - { - // This table has no primary key, use "hidden" primary key - DBUG_PRINT("info", ("Using hidden key")); - DBUG_DUMP("key", key, 8); - if (set_hidden_key(op, no_fields, key)) - ERR_RETURN(trans->getNdbError()); - - // Read key at the same time, for future reference - if (get_ndb_value(op, NULL, no_fields, NULL)) - ERR_RETURN(trans->getNdbError()); - } - else - { - if ((res= set_primary_key(op, key))) - return res; - } - - if ((res= define_read_attrs(buf, op))) - DBUG_RETURN(res); - - if (m_use_partition_function) - { - op->setPartitionId(part_id); - // If table has user defined partitioning - // and no indexes, we need to read the partition id - // to support ORDER BY queries - if (table_share->primary_key == MAX_KEY && - get_ndb_partition_id(op)) - ERR_RETURN(trans->getNdbError()); - } - - if ((res = execute_no_commit_ie(this,trans,FALSE)) != 0 || - op->getNdbError().code) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(ndb_err(trans)); - } - - // The value have now been fetched from NDB - unpack_record(buf); - table->status= 0; - DBUG_RETURN(0); -} - -/** - Read one complementing record from NDB using primary key from old_data - or hidden key. -*/ - -int ha_ndbcluster::complemented_read(const uchar *old_data, uchar *new_data, - uint32 old_part_id) -{ - uint no_fields= table_share->fields, i; - NdbTransaction *trans= m_active_trans; - NdbOperation *op; - DBUG_ENTER("complemented_read"); - m_write_op= FALSE; - - if (bitmap_is_set_all(table->read_set)) - { - // We have allready retrieved all fields, nothing to complement - DBUG_RETURN(0); - } - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || - op->readTuple(lm) != 0) - ERR_RETURN(trans->getNdbError()); - if (table_share->primary_key != MAX_KEY) - { - if (set_primary_key_from_record(op, old_data)) - ERR_RETURN(trans->getNdbError()); - } - else - { - // This table has no primary key, use "hidden" primary key - if (set_hidden_key(op, table->s->fields, m_ref)) - ERR_RETURN(op->getNdbError()); - } - - if (m_use_partition_function) - op->setPartitionId(old_part_id); - - // Read all unreferenced non-key field(s) - for (i= 0; i < no_fields; i++) - { - Field *field= table->field[i]; - if (!((field->flags & PRI_KEY_FLAG) || - bitmap_is_set(table->read_set, i)) && - !bitmap_is_set(table->write_set, i)) - { - if (get_ndb_value(op, field, i, new_data)) - ERR_RETURN(trans->getNdbError()); - } - } - - if (execute_no_commit(this,trans,FALSE) != 0) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(ndb_err(trans)); - } - - // The value have now been fetched from NDB - unpack_record(new_data); - table->status= 0; - - /* - * restore m_value - */ - for (i= 0; i < no_fields; i++) - { - Field *field= table->field[i]; - if (!((field->flags & PRI_KEY_FLAG) || - bitmap_is_set(table->read_set, i))) - { - m_value[i].ptr= NULL; - } - } - - DBUG_RETURN(0); -} - -/** - Check that all operations between first and last all - have gotten the errcode - If checking for HA_ERR_KEY_NOT_FOUND then update m_dupkey - for all succeeding operations -*/ -bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, - const NdbOperation *first, - const NdbOperation *last, - uint errcode) -{ - const NdbOperation *op= first; - DBUG_ENTER("ha_ndbcluster::check_all_operations_for_error"); - - while(op) - { - NdbError err= op->getNdbError(); - if (err.status != NdbError::Success) - { - if (ndb_to_mysql_error(&err) != (int) errcode) - DBUG_RETURN(FALSE); - if (op == last) break; - op= trans->getNextCompletedOperation(op); - } - else - { - // We found a duplicate - if (op->getType() == NdbOperation::UniqueIndexAccess) - { - if (errcode == HA_ERR_KEY_NOT_FOUND) - { - NdbIndexOperation *iop= (NdbIndexOperation *) op; - const NDBINDEX *index= iop->getIndex(); - // Find the key_no of the index - for(uint i= 0; is->keys; i++) - { - if (m_index[i].unique_index == index) - { - m_dupkey= i; - break; - } - } - } - } - else - { - // Must have been primary key access - DBUG_ASSERT(op->getType() == NdbOperation::PrimaryKeyAccess); - if (errcode == HA_ERR_KEY_NOT_FOUND) - m_dupkey= table->s->primary_key; - } - DBUG_RETURN(FALSE); - } - } - DBUG_RETURN(TRUE); -} - - -/** - * Check if record contains any null valued columns that are part of a key - */ -static -int -check_null_in_record(const KEY* key_info, const uchar *record) -{ - KEY_PART_INFO *curr_part, *end_part; - curr_part= key_info->key_part; - end_part= curr_part + key_info->user_defined_key_parts; - - while (curr_part != end_part) - { - if (curr_part->null_bit && - (record[curr_part->null_offset] & curr_part->null_bit)) - return 1; - curr_part++; - } - return 0; - /* - We could instead pre-compute a bitmask in table_share with one bit for - every null-bit in the key, and so check this just by OR'ing the bitmask - with the null bitmap in the record. - But not sure it's worth it. - */ -} - -/** - Peek to check if any rows already exist with conflicting - primary key or unique index values -*/ - -int ha_ndbcluster::peek_indexed_rows(const uchar *record, - NDB_WRITE_OP write_op) -{ - NdbTransaction *trans= m_active_trans; - NdbOperation *op; - const NdbOperation *first, *last; - uint i; - int res; - DBUG_ENTER("peek_indexed_rows"); - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - first= NULL; - if (write_op != NDB_UPDATE && table->s->primary_key != MAX_KEY) - { - /* - * Fetch any row with colliding primary key - */ - if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || - op->readTuple(lm) != 0) - ERR_RETURN(trans->getNdbError()); - - first= op; - if ((res= set_primary_key_from_record(op, record))) - ERR_RETURN(trans->getNdbError()); - - if (m_use_partition_function) - { - uint32 part_id; - int error; - longlong func_value; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); - dbug_tmp_restore_column_map(table->read_set, old_map); - if (error) - { - m_part_info->err_value= func_value; - DBUG_RETURN(error); - } - op->setPartitionId(part_id); - } - } - /* - * Fetch any rows with colliding unique indexes - */ - KEY* key_info; - KEY_PART_INFO *key_part, *end; - for (i= 0, key_info= table->key_info; i < table->s->keys; i++, key_info++) - { - if (i != table->s->primary_key && - key_info->flags & HA_NOSAME) - { - /* - A unique index is defined on table. - We cannot look up a NULL field value in a unique index. But since - keys with NULLs are not indexed, such rows cannot conflict anyway, so - we just skip the index in this case. - */ - if (check_null_in_record(key_info, record)) - { - DBUG_PRINT("info", ("skipping check for key with NULL")); - continue; - } - if (write_op != NDB_INSERT && !check_index_fields_in_write_set(i)) - { - DBUG_PRINT("info", ("skipping check for key %u not in write_set", i)); - continue; - } - NdbIndexOperation *iop; - const NDBINDEX *unique_index = m_index[i].unique_index; - key_part= key_info->key_part; - end= key_part + key_info->user_defined_key_parts; - if (!(iop= trans->getNdbIndexOperation(unique_index, m_table)) || - iop->readTuple(lm) != 0) - ERR_RETURN(trans->getNdbError()); - - if (!first) - first= iop; - if ((res= set_index_key_from_record(iop, record, i))) - ERR_RETURN(trans->getNdbError()); - } - } - last= trans->getLastDefinedOperation(); - if (first) - res= execute_no_commit_ie(this,trans,FALSE); - else - { - // Table has no keys - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); - } - if (check_all_operations_for_error(trans, first, last, - HA_ERR_KEY_NOT_FOUND)) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(ndb_err(trans)); - } - else - { - DBUG_PRINT("info", ("m_dupkey %d", m_dupkey)); - } - DBUG_RETURN(0); -} - - -/** - Read one record from NDB using unique secondary index. -*/ - -int ha_ndbcluster::unique_index_read(const uchar *key, - uint key_len, uchar *buf) -{ - int res; - NdbTransaction *trans= m_active_trans; - NdbIndexOperation *op; - DBUG_ENTER("ha_ndbcluster::unique_index_read"); - DBUG_PRINT("enter", ("key_len: %u, index: %u", key_len, active_index)); - DBUG_DUMP("key", key, key_len); - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - if (!(op= trans->getNdbIndexOperation(m_index[active_index].unique_index, - m_table)) || - op->readTuple(lm) != 0) - ERR_RETURN(trans->getNdbError()); - - // Set secondary index key(s) - if ((res= set_index_key(op, table->key_info + active_index, key))) - DBUG_RETURN(res); - - if ((res= define_read_attrs(buf, op))) - DBUG_RETURN(res); - - if (execute_no_commit_ie(this,trans,FALSE) != 0 || - op->getNdbError().code) - { - int err= ndb_err(trans); - if(err==HA_ERR_KEY_NOT_FOUND) - table->status= STATUS_NOT_FOUND; - else - table->status= STATUS_GARBAGE; - - DBUG_RETURN(err); - } - - // The value have now been fetched from NDB - unpack_record(buf); - table->status= 0; - DBUG_RETURN(0); -} - -inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) -{ - DBUG_ENTER("fetch_next"); - int local_check; - NdbTransaction *trans= m_active_trans; - - if (m_lock_tuple) - { - /* - Lock level m_lock.type either TL_WRITE_ALLOW_WRITE - (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT - LOCK WITH SHARE MODE) and row was not explictly unlocked - with unlock_row() call - */ - NdbConnection *con_trans= m_active_trans; - NdbOperation *op; - // Lock row - DBUG_PRINT("info", ("Keeping lock on scanned row")); - - if (!(op= m_active_cursor->lockCurrentTuple())) - { - /* purecov: begin inspected */ - m_lock_tuple= FALSE; - ERR_RETURN(con_trans->getNdbError()); - /* purecov: end */ - } - m_ops_pending++; - } - m_lock_tuple= FALSE; - - bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && - m_lock.type != TL_READ_WITH_SHARED_LOCKS;; - do { - DBUG_PRINT("info", ("Call nextResult, contact_ndb: %d", contact_ndb)); - /* - We can only handle one tuple with blobs at a time. - */ - if (m_ops_pending && m_blobs_pending) - { - if (execute_no_commit(this,trans,FALSE) != 0) - DBUG_RETURN(ndb_err(trans)); - m_ops_pending= 0; - m_blobs_pending= FALSE; - } - - if ((local_check= cursor->nextResult(contact_ndb, m_force_send)) == 0) - { - /* - Explicitly lock tuple if "select for update" or - "select lock in share mode" - */ - m_lock_tuple= (m_lock.type == TL_WRITE_ALLOW_WRITE - || - m_lock.type == TL_READ_WITH_SHARED_LOCKS); - DBUG_RETURN(0); - } - else if (local_check == 1 || local_check == 2) - { - // 1: No more records - // 2: No more cached records - - /* - Before fetching more rows and releasing lock(s), - all pending update or delete operations should - be sent to NDB - */ - DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); - if (m_ops_pending) - { - if (m_transaction_on) - { - if (execute_no_commit(this,trans,FALSE) != 0) - DBUG_RETURN(-1); - } - else - { - if (execute_commit(this,trans) != 0) - DBUG_RETURN(-1); - if (trans->restart() != 0) - { - DBUG_ASSERT(0); - DBUG_RETURN(-1); - } - } - m_ops_pending= 0; - } - contact_ndb= (local_check == 2); - } - else - { - DBUG_RETURN(-1); - } - } while (local_check == 2); - - DBUG_RETURN(1); -} - -/** - Get the next record of a started scan. Try to fetch - it locally from NdbApi cached records if possible, - otherwise ask NDB for more. - - @note - If this is a update/delete make sure to not contact - NDB before any pending ops have been sent to NDB. -*/ - -inline int ha_ndbcluster::next_result(uchar *buf) -{ - int res; - DBUG_ENTER("next_result"); - - if (!m_active_cursor) - DBUG_RETURN(HA_ERR_END_OF_FILE); - - if ((res= fetch_next(m_active_cursor)) == 0) - { - DBUG_PRINT("info", ("One more record found")); - - unpack_record(buf); - table->status= 0; - DBUG_RETURN(0); - } - else if (res == 1) - { - // No more records - table->status= STATUS_NOT_FOUND; - - DBUG_PRINT("info", ("No more records")); - DBUG_RETURN(HA_ERR_END_OF_FILE); - } - else - { - DBUG_RETURN(ndb_err(m_active_trans)); - } -} - -/** - Set bounds for ordered index scan. -*/ - -int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, - uint inx, - bool rir, - const key_range *keys[2], - uint range_no) -{ - const KEY *const key_info= table->key_info + inx; - const uint key_parts= key_info->user_defined_key_parts; - uint key_tot_len[2]; - uint tot_len; - uint i, j; - - DBUG_ENTER("set_bounds"); - DBUG_PRINT("info", ("key_parts=%d", key_parts)); - - for (j= 0; j <= 1; j++) - { - const key_range *key= keys[j]; - if (key != NULL) - { - // for key->flag see ha_rkey_function - DBUG_PRINT("info", ("key %d length=%d flag=%d", - j, key->length, key->flag)); - key_tot_len[j]= key->length; - } - else - { - DBUG_PRINT("info", ("key %d not present", j)); - key_tot_len[j]= 0; - } - } - tot_len= 0; - - for (i= 0; i < key_parts; i++) - { - KEY_PART_INFO *key_part= &key_info->key_part[i]; - Field *field= key_part->field; -#ifndef DBUG_OFF - uint part_len= key_part->length; -#endif - uint part_store_len= key_part->store_length; - // Info about each key part - struct part_st { - bool part_last; - const key_range *key; - const uchar *part_ptr; - bool part_null; - int bound_type; - const uchar* bound_ptr; - }; - struct part_st part[2]; - - for (j= 0; j <= 1; j++) - { - struct part_st &p= part[j]; - p.key= NULL; - p.bound_type= -1; - if (tot_len < key_tot_len[j]) - { - p.part_last= (tot_len + part_store_len >= key_tot_len[j]); - p.key= keys[j]; - p.part_ptr= &p.key->key[tot_len]; - p.part_null= key_part->null_bit && *p.part_ptr; - p.bound_ptr= (const char *) - p.part_null ? 0 : key_part->null_bit ? p.part_ptr + 1 : p.part_ptr; - - if (j == 0) - { - switch (p.key->flag) - { - case HA_READ_KEY_EXACT: - if (! rir) - p.bound_type= NdbIndexScanOperation::BoundEQ; - else // differs for records_in_range - p.bound_type= NdbIndexScanOperation::BoundLE; - break; - // ascending - case HA_READ_KEY_OR_NEXT: - p.bound_type= NdbIndexScanOperation::BoundLE; - break; - case HA_READ_AFTER_KEY: - if (! p.part_last) - p.bound_type= NdbIndexScanOperation::BoundLE; - else - p.bound_type= NdbIndexScanOperation::BoundLT; - break; - // descending - case HA_READ_PREFIX_LAST: // weird - p.bound_type= NdbIndexScanOperation::BoundEQ; - break; - case HA_READ_PREFIX_LAST_OR_PREV: // weird - p.bound_type= NdbIndexScanOperation::BoundGE; - break; - case HA_READ_BEFORE_KEY: - if (! p.part_last) - p.bound_type= NdbIndexScanOperation::BoundGE; - else - p.bound_type= NdbIndexScanOperation::BoundGT; - break; - default: - break; - } - } - if (j == 1) { - switch (p.key->flag) - { - // ascending - case HA_READ_BEFORE_KEY: - if (! p.part_last) - p.bound_type= NdbIndexScanOperation::BoundGE; - else - p.bound_type= NdbIndexScanOperation::BoundGT; - break; - case HA_READ_AFTER_KEY: // weird - p.bound_type= NdbIndexScanOperation::BoundGE; - break; - default: - break; - // descending strangely sets no end key - } - } - - if (p.bound_type == -1) - { - DBUG_PRINT("error", ("key %d unknown flag %d", j, p.key->flag)); - DBUG_ASSERT(FALSE); - // Stop setting bounds but continue with what we have - DBUG_RETURN(op->end_of_bound(range_no)); - } - } - } - - // Seen with e.g. b = 1 and c > 1 - if (part[0].bound_type == NdbIndexScanOperation::BoundLE && - part[1].bound_type == NdbIndexScanOperation::BoundGE && - memcmp(part[0].part_ptr, part[1].part_ptr, part_store_len) == 0) - { - DBUG_PRINT("info", ("replace LE/GE pair by EQ")); - part[0].bound_type= NdbIndexScanOperation::BoundEQ; - part[1].bound_type= -1; - } - // Not seen but was in previous version - if (part[0].bound_type == NdbIndexScanOperation::BoundEQ && - part[1].bound_type == NdbIndexScanOperation::BoundGE && - memcmp(part[0].part_ptr, part[1].part_ptr, part_store_len) == 0) - { - DBUG_PRINT("info", ("remove GE from EQ/GE pair")); - part[1].bound_type= -1; - } - - for (j= 0; j <= 1; j++) - { - struct part_st &p= part[j]; - // Set bound if not done with this key - if (p.key != NULL) - { - DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d", - j, i, tot_len, part_len, p.part_last, p.bound_type)); - DBUG_DUMP("info", p.part_ptr, part_store_len); - - // Set bound if not cancelled via type -1 - if (p.bound_type != -1) - { - const uchar* ptr= p.bound_ptr; - uchar buf[256]; - shrink_varchar(field, ptr, buf); - if (op->setBound(i, p.bound_type, ptr)) - ERR_RETURN(op->getNdbError()); - } - } - } - - tot_len+= part_store_len; - } - DBUG_RETURN(op->end_of_bound(range_no)); -} - -/** - Start ordered index scan in NDB. -*/ - -int ha_ndbcluster::ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, bool descending, - uchar* buf, part_id_range *part_spec) -{ - int res; - bool restart; - NdbTransaction *trans= m_active_trans; - NdbIndexScanOperation *op; - - DBUG_ENTER("ha_ndbcluster::ordered_index_scan"); - DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d", - active_index, sorted, descending)); - DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname)); - m_write_op= FALSE; - - // Check that sorted seems to be initialised - DBUG_ASSERT(sorted == 0 || sorted == 1); - - if (m_active_cursor == 0) - { - restart= FALSE; - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - bool need_pk = (lm == NdbOperation::LM_Read); - if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index, - m_table)) || - op->readTuples(lm, 0, parallelism, sorted, descending, FALSE, need_pk)) - ERR_RETURN(trans->getNdbError()); - if (m_use_partition_function && part_spec != NULL && - part_spec->start_part == part_spec->end_part) - op->setPartitionId(part_spec->start_part); - m_active_cursor= op; - } else { - restart= TRUE; - op= (NdbIndexScanOperation*)m_active_cursor; - - if (m_use_partition_function && part_spec != NULL && - part_spec->start_part == part_spec->end_part) - op->setPartitionId(part_spec->start_part); - DBUG_ASSERT(op->getSorted() == sorted); - DBUG_ASSERT(op->getLockMode() == - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); - if (op->reset_bounds(m_force_send)) - DBUG_RETURN(ndb_err(m_active_trans)); - } - - { - const key_range *keys[2]= { start_key, end_key }; - res= set_bounds(op, active_index, FALSE, keys); - if (res) - DBUG_RETURN(res); - } - - if (!restart) - { - if (m_cond && m_cond->generate_scan_filter(op)) - DBUG_RETURN(ndb_err(trans)); - - if ((res= define_read_attrs(buf, op))) - { - DBUG_RETURN(res); - } - - // If table has user defined partitioning - // and no primary key, we need to read the partition id - // to support ORDER BY queries - if (m_use_partition_function && - (table_share->primary_key == MAX_KEY) && - (get_ndb_partition_id(op))) - ERR_RETURN(trans->getNdbError()); - } - - if (execute_no_commit(this,trans,FALSE) != 0) - DBUG_RETURN(ndb_err(trans)); - - DBUG_RETURN(next_result(buf)); -} - -static -int -guess_scan_flags(NdbOperation::LockMode lm, - const NDBTAB* tab, const MY_BITMAP* readset) -{ - int flags= 0; - flags|= (lm == NdbOperation::LM_Read) ? NdbScanOperation::SF_KeyInfo : 0; - if (tab->checkColumns(0, 0) & 2) - { - int ret = tab->checkColumns(readset->bitmap, no_bytes_in_map(readset)); - - if (ret & 2) - { // If disk columns...use disk scan - flags |= NdbScanOperation::SF_DiskScan; - } - else if ((ret & 4) == 0 && (lm == NdbOperation::LM_Exclusive)) - { - // If no mem column is set and exclusive...guess disk scan - flags |= NdbScanOperation::SF_DiskScan; - } - } - return flags; -} - - -/* - Unique index scan in NDB (full table scan with scan filter) - */ - -int ha_ndbcluster::unique_index_scan(const KEY* key_info, - const uchar *key, - uint key_len, - uchar *buf) -{ - int res; - NdbScanOperation *op; - NdbTransaction *trans= m_active_trans; - part_id_range part_spec; - - DBUG_ENTER("unique_index_scan"); - DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - int flags= guess_scan_flags(lm, m_table, table->read_set); - if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) || - op->readTuples(lm, flags, parallelism)) - ERR_RETURN(trans->getNdbError()); - m_active_cursor= op; - - if (m_use_partition_function) - { - part_spec.start_part= 0; - part_spec.end_part= m_part_info->get_tot_partitions() - 1; - prune_partition_set(table, &part_spec); - DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u", - part_spec.start_part, part_spec.end_part)); - /* - If partition pruning has found no partition in set - we can return HA_ERR_END_OF_FILE - If partition pruning has found exactly one partition in set - we can optimize scan to run towards that partition only. - */ - if (part_spec.start_part > part_spec.end_part) - { - DBUG_RETURN(HA_ERR_END_OF_FILE); - } - else if (part_spec.start_part == part_spec.end_part) - { - /* - Only one partition is required to scan, if sorted is required we - don't need it any more since output from one ordered partitioned - index is always sorted. - */ - m_active_cursor->setPartitionId(part_spec.start_part); - } - // If table has user defined partitioning - // and no primary key, we need to read the partition id - // to support ORDER BY queries - if ((table_share->primary_key == MAX_KEY) && - (get_ndb_partition_id(op))) - ERR_RETURN(trans->getNdbError()); - } - if (!m_cond) - m_cond= new ha_ndbcluster_cond; - if (!m_cond) - { - my_errno= HA_ERR_OUT_OF_MEM; - DBUG_RETURN(my_errno); - } - if (m_cond->generate_scan_filter_from_key(op, key_info, key, key_len, buf)) - DBUG_RETURN(ndb_err(trans)); - if ((res= define_read_attrs(buf, op))) - DBUG_RETURN(res); - - if (execute_no_commit(this,trans,FALSE) != 0) - DBUG_RETURN(ndb_err(trans)); - DBUG_PRINT("exit", ("Scan started successfully")); - DBUG_RETURN(next_result(buf)); -} - - -/** - Start full table scan in NDB. -*/ -int ha_ndbcluster::full_table_scan(uchar *buf) -{ - int res; - NdbScanOperation *op; - NdbTransaction *trans= m_active_trans; - part_id_range part_spec; - - DBUG_ENTER("full_table_scan"); - DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); - m_write_op= FALSE; - - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - int flags= guess_scan_flags(lm, m_table, table->read_set); - if (!(op=trans->getNdbScanOperation(m_table)) || - op->readTuples(lm, flags, parallelism)) - ERR_RETURN(trans->getNdbError()); - m_active_cursor= op; - - if (m_use_partition_function) - { - part_spec.start_part= 0; - part_spec.end_part= m_part_info->get_tot_partitions() - 1; - prune_partition_set(table, &part_spec); - DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", - part_spec.start_part, part_spec.end_part)); - /* - If partition pruning has found no partition in set - we can return HA_ERR_END_OF_FILE - If partition pruning has found exactly one partition in set - we can optimize scan to run towards that partition only. - */ - if (part_spec.start_part > part_spec.end_part) - { - DBUG_RETURN(HA_ERR_END_OF_FILE); - } - else if (part_spec.start_part == part_spec.end_part) - { - /* - Only one partition is required to scan, if sorted is required we - don't need it any more since output from one ordered partitioned - index is always sorted. - */ - m_active_cursor->setPartitionId(part_spec.start_part); - } - // If table has user defined partitioning - // and no primary key, we need to read the partition id - // to support ORDER BY queries - if ((table_share->primary_key == MAX_KEY) && - (get_ndb_partition_id(op))) - ERR_RETURN(trans->getNdbError()); - } - - if (m_cond && m_cond->generate_scan_filter(op)) - DBUG_RETURN(ndb_err(trans)); - if ((res= define_read_attrs(buf, op))) - DBUG_RETURN(res); - - if (execute_no_commit(this,trans,FALSE) != 0) - DBUG_RETURN(ndb_err(trans)); - DBUG_PRINT("exit", ("Scan started successfully")); - DBUG_RETURN(next_result(buf)); -} - -int -ha_ndbcluster::set_auto_inc(Field *field) -{ - DBUG_ENTER("ha_ndbcluster::set_auto_inc"); - Ndb *ndb= get_ndb(); - bool read_bit= bitmap_is_set(table->read_set, field->field_index); - bitmap_set_bit(table->read_set, field->field_index); - Uint64 next_val= (Uint64) field->val_int() + 1; - if (!read_bit) - bitmap_clear_bit(table->read_set, field->field_index); -#ifndef DBUG_OFF - char buff[22]; - DBUG_PRINT("info", - ("Trying to set next auto increment value to %s", - llstr(next_val, buff))); -#endif - if (ndb->checkUpdateAutoIncrementValue(m_share->tuple_id_range, next_val)) - { - Ndb_tuple_id_range_guard g(m_share); - if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE) - == -1) - ERR_RETURN(ndb->getNdbError()); - } - DBUG_RETURN(0); -} - -/** - Insert one record into NDB. -*/ -int ha_ndbcluster::write_row(uchar *record) -{ - bool has_auto_increment; - uint i; - NdbTransaction *trans= m_active_trans; - NdbOperation *op; - int res; - THD *thd= table->in_use; - longlong func_value= 0; - DBUG_ENTER("ha_ndbcluster::write_row"); - - m_write_op= TRUE; - has_auto_increment= (table->next_number_field && record == table->record[0]); - if (table_share->primary_key != MAX_KEY) - { - /* - * Increase any auto_incremented primary key - */ - if (has_auto_increment) - { - int error; - - m_skip_auto_increment= FALSE; - if ((error= update_auto_increment())) - DBUG_RETURN(error); - m_skip_auto_increment= (insert_id_for_cur_row == 0); - } - } - - /* - * If IGNORE the ignore constraint violations on primary and unique keys - */ - if (!m_use_write && m_ignore_dup_key) - { - /* - compare if expression with that in start_bulk_insert() - start_bulk_insert will set parameters to ensure that each - write_row is committed individually - */ - int peek_res= peek_indexed_rows(record, NDB_INSERT); - - if (!peek_res) - { - DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); - } - if (peek_res != HA_ERR_KEY_NOT_FOUND) - DBUG_RETURN(peek_res); - } - - ha_statistic_increment(&SSV::ha_write_count); - - if (!(op= trans->getNdbOperation(m_table))) - ERR_RETURN(trans->getNdbError()); - - res= (m_use_write) ? op->writeTuple() :op->insertTuple(); - if (res != 0) - ERR_RETURN(trans->getNdbError()); - - if (m_use_partition_function) - { - uint32 part_id; - int error; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); - dbug_tmp_restore_column_map(table->read_set, old_map); - if (error) - { - m_part_info->err_value= func_value; - DBUG_RETURN(error); - } - op->setPartitionId(part_id); - } - - if (table_share->primary_key == MAX_KEY) - { - // Table has hidden primary key - Ndb *ndb= get_ndb(); - Uint64 auto_value; - uint retries= NDB_AUTO_INCREMENT_RETRIES; - int retry_sleep= 30; /* 30 milliseconds, transaction */ - for (;;) - { - Ndb_tuple_id_range_guard g(m_share); - if (ndb->getAutoIncrementValue(m_table, g.range, auto_value, 1) == -1) - { - if (--retries && - ndb->getNdbError().status == NdbError::TemporaryError) - { - my_sleep(retry_sleep); - continue; - } - ERR_RETURN(ndb->getNdbError()); - } - break; - } - if (set_hidden_key(op, table_share->fields, (const uchar*)&auto_value)) - ERR_RETURN(op->getNdbError()); - } - else - { - int error; - if ((error= set_primary_key_from_record(op, record))) - DBUG_RETURN(error); - } - - // Set non-key attribute(s) - bool set_blob_value= FALSE; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - for (i= 0; i < table_share->fields; i++) - { - Field *field= table->field[i]; - if (!(field->flags & PRI_KEY_FLAG) && - (bitmap_is_set(table->write_set, i) || !m_use_write) && - set_ndb_value(op, field, i, record-table->record[0], &set_blob_value)) - { - m_skip_auto_increment= TRUE; - dbug_tmp_restore_column_map(table->read_set, old_map); - ERR_RETURN(op->getNdbError()); - } - } - dbug_tmp_restore_column_map(table->read_set, old_map); - - if (m_use_partition_function) - { - /* - We need to set the value of the partition function value in - NDB since the NDB kernel doesn't have easy access to the function - to calculate the value. - */ - if (func_value >= INT_MAX32) - func_value= INT_MAX32; - uint32 part_func_value= (uint32)func_value; - uint no_fields= table_share->fields; - if (table_share->primary_key == MAX_KEY) - no_fields++; - op->setValue(no_fields, part_func_value); - } - - if (unlikely(m_slow_path)) - { - /* - ignore TNTO_NO_LOGGING for slave thd. It is used to indicate - log-slave-updates option. This is instead handled in the - injector thread, by looking explicitly at the - opt_log_slave_updates flag. - */ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (thd->slave_thread) - op->setAnyValue(thd->server_id); - else if (thd_ndb->trans_options & TNTO_NO_LOGGING) - op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); - } - m_rows_changed++; - - /* - Execute write operation - NOTE When doing inserts with many values in - each INSERT statement it should not be necessary - to NoCommit the transaction between each row. - Find out how this is detected! - */ - m_rows_inserted++; - no_uncommitted_rows_update(1); - m_bulk_insert_not_flushed= TRUE; - if ((m_rows_to_insert == (ha_rows) 1) || - ((m_rows_inserted % m_bulk_insert_rows) == 0) || - m_primary_key_update || - set_blob_value) - { - // Send rows to NDB - DBUG_PRINT("info", ("Sending inserts to NDB, "\ - "rows_inserted: %d bulk_insert_rows: %d", - (int)m_rows_inserted, (int)m_bulk_insert_rows)); - - m_bulk_insert_not_flushed= FALSE; - if (m_transaction_on) - { - if (execute_no_commit(this,trans,FALSE) != 0) - { - m_skip_auto_increment= TRUE; - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); - } - } - else - { - if (execute_commit(this,trans) != 0) - { - m_skip_auto_increment= TRUE; - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); - } - if (trans->restart() != 0) - { - DBUG_ASSERT(0); - DBUG_RETURN(-1); - } - } - } - if ((has_auto_increment) && (m_skip_auto_increment)) - { - int ret_val; - if ((ret_val= set_auto_inc(table->next_number_field))) - { - DBUG_RETURN(ret_val); - } - } - m_skip_auto_increment= TRUE; - - DBUG_PRINT("exit",("ok")); - DBUG_RETURN(0); -} - - -/** - Compare if a key in a row has changed. -*/ - -int ha_ndbcluster::key_cmp(uint keynr, const uchar * old_row, - const uchar * new_row) -{ - KEY_PART_INFO *key_part=table->key_info[keynr].key_part; - KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts; - - for (; key_part != end ; key_part++) - { - if (key_part->null_bit) - { - if ((old_row[key_part->null_offset] & key_part->null_bit) != - (new_row[key_part->null_offset] & key_part->null_bit)) - return 1; - } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) - { - - if (key_part->field->cmp_binary((old_row + key_part->offset), - (new_row + key_part->offset), - (ulong) key_part->length)) - return 1; - } - else - { - if (memcmp(old_row+key_part->offset, new_row+key_part->offset, - key_part->length)) - return 1; - } - } - return 0; -} - -/** - Update one record in NDB using primary key. -*/ - -int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data) -{ - THD *thd= table->in_use; - NdbTransaction *trans= m_active_trans; - NdbScanOperation* cursor= m_active_cursor; - NdbOperation *op; - uint i; - uint32 old_part_id= 0, new_part_id= 0; - int error; - longlong func_value; - bool pk_update= (table_share->primary_key != MAX_KEY && - key_cmp(table_share->primary_key, old_data, new_data)); - DBUG_ENTER("update_row"); - m_write_op= TRUE; - - /* - * If IGNORE the ignore constraint violations on primary and unique keys, - * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE - */ - if (m_ignore_dup_key && (thd->lex->sql_command == SQLCOM_UPDATE || - thd->lex->sql_command == SQLCOM_UPDATE_MULTI)) - { - NDB_WRITE_OP write_op= (pk_update) ? NDB_PK_UPDATE : NDB_UPDATE; - int peek_res= peek_indexed_rows(new_data, write_op); - - if (!peek_res) - { - DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); - } - if (peek_res != HA_ERR_KEY_NOT_FOUND) - DBUG_RETURN(peek_res); - } - - ha_statistic_increment(&SSV::ha_update_count); - - if (m_use_partition_function && - (error= get_parts_for_update(old_data, new_data, table->record[0], - m_part_info, &old_part_id, &new_part_id, - &func_value))) - { - m_part_info->err_value= func_value; - DBUG_RETURN(error); - } - - /* - * Check for update of primary key or partition change - * for special handling - */ - if (pk_update || old_part_id != new_part_id) - { - int read_res, insert_res, delete_res, undo_res; - - DBUG_PRINT("info", ("primary key update or partition change, " - "doing read+delete+insert")); - // Get all old fields, since we optimize away fields not in query - read_res= complemented_read(old_data, new_data, old_part_id); - if (read_res) - { - DBUG_PRINT("info", ("read failed")); - DBUG_RETURN(read_res); - } - // Delete old row - m_primary_key_update= TRUE; - delete_res= delete_row(old_data); - m_primary_key_update= FALSE; - if (delete_res) - { - DBUG_PRINT("info", ("delete failed")); - DBUG_RETURN(delete_res); - } - // Insert new row - DBUG_PRINT("info", ("delete succeded")); - m_primary_key_update= TRUE; - /* - If we are updating a primary key with auto_increment - then we need to update the auto_increment counter - */ - if (table->found_next_number_field && - bitmap_is_set(table->write_set, - table->found_next_number_field->field_index) && - (error= set_auto_inc(table->found_next_number_field))) - { - DBUG_RETURN(error); - } - insert_res= write_row(new_data); - m_primary_key_update= FALSE; - if (insert_res) - { - DBUG_PRINT("info", ("insert failed")); - if (trans->commitStatus() == NdbConnection::Started) - { - // Undo delete_row(old_data) - m_primary_key_update= TRUE; - undo_res= write_row((uchar *)old_data); - if (undo_res) - push_warning(current_thd, - Sql_condition::WARN_LEVEL_WARN, - undo_res, - "NDB failed undoing delete at primary key update"); - m_primary_key_update= FALSE; - } - DBUG_RETURN(insert_res); - } - DBUG_PRINT("info", ("delete+insert succeeded")); - DBUG_RETURN(0); - } - /* - If we are updating a unique key with auto_increment - then we need to update the auto_increment counter - */ - if (table->found_next_number_field && - bitmap_is_set(table->write_set, - table->found_next_number_field->field_index) && - (error= set_auto_inc(table->found_next_number_field))) - { - DBUG_RETURN(error); - } - if (cursor) - { - /* - We are scanning records and want to update the record - that was just found, call updateTuple on the cursor - to take over the lock to a new update operation - And thus setting the primary key of the record from - the active record in cursor - */ - DBUG_PRINT("info", ("Calling updateTuple on cursor")); - if (!(op= cursor->updateCurrentTuple())) - ERR_RETURN(trans->getNdbError()); - m_lock_tuple= FALSE; - m_ops_pending++; - if (uses_blob_value()) - m_blobs_pending= TRUE; - if (m_use_partition_function) - cursor->setPartitionId(new_part_id); - } - else - { - if (!(op= trans->getNdbOperation(m_table)) || - op->updateTuple() != 0) - ERR_RETURN(trans->getNdbError()); - - if (m_use_partition_function) - op->setPartitionId(new_part_id); - if (table_share->primary_key == MAX_KEY) - { - // This table has no primary key, use "hidden" primary key - DBUG_PRINT("info", ("Using hidden key")); - - // Require that the PK for this record has previously been - // read into m_ref - DBUG_DUMP("key", m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH); - - if (set_hidden_key(op, table->s->fields, m_ref)) - ERR_RETURN(op->getNdbError()); - } - else - { - int res; - if ((res= set_primary_key_from_record(op, old_data))) - DBUG_RETURN(res); - } - } - - m_rows_changed++; - - // Set non-key attribute(s) - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - for (i= 0; i < table_share->fields; i++) - { - Field *field= table->field[i]; - if (bitmap_is_set(table->write_set, i) && - (!(field->flags & PRI_KEY_FLAG)) && - set_ndb_value(op, field, i, new_data - table->record[0])) - { - dbug_tmp_restore_column_map(table->read_set, old_map); - ERR_RETURN(op->getNdbError()); - } - } - dbug_tmp_restore_column_map(table->read_set, old_map); - - if (m_use_partition_function) - { - if (func_value >= INT_MAX32) - func_value= INT_MAX32; - uint32 part_func_value= (uint32)func_value; - uint no_fields= table_share->fields; - if (table_share->primary_key == MAX_KEY) - no_fields++; - op->setValue(no_fields, part_func_value); - } - - if (unlikely(m_slow_path)) - { - /* - ignore TNTO_NO_LOGGING for slave thd. It is used to indicate - log-slave-updates option. This is instead handled in the - injector thread, by looking explicitly at the - opt_log_slave_updates flag. - */ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (thd->slave_thread) - op->setAnyValue(thd->server_id); - else if (thd_ndb->trans_options & TNTO_NO_LOGGING) - op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); - } - /* - Execute update operation if we are not doing a scan for update - and there exist UPDATE AFTER triggers - */ - - if ((!cursor || m_update_cannot_batch) && - execute_no_commit(this,trans,false) != 0) { - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); - } - - DBUG_RETURN(0); -} - - -/** - Delete one record from NDB, using primary key . -*/ - -int ha_ndbcluster::delete_row(const uchar *record) -{ - THD *thd= table->in_use; - NdbTransaction *trans= m_active_trans; - NdbScanOperation* cursor= m_active_cursor; - NdbOperation *op; - uint32 part_id; - int error; - DBUG_ENTER("delete_row"); - m_write_op= TRUE; - - ha_statistic_increment(&SSV::ha_delete_count); - m_rows_changed++; - - if (m_use_partition_function && - (error= get_part_for_delete(record, table->record[0], m_part_info, - &part_id))) - { - DBUG_RETURN(error); - } - - if (cursor) - { - /* - We are scanning records and want to delete the record - that was just found, call deleteTuple on the cursor - to take over the lock to a new delete operation - And thus setting the primary key of the record from - the active record in cursor - */ - DBUG_PRINT("info", ("Calling deleteTuple on cursor")); - if (cursor->deleteCurrentTuple() != 0) - ERR_RETURN(trans->getNdbError()); - m_lock_tuple= FALSE; - m_ops_pending++; - - if (m_use_partition_function) - cursor->setPartitionId(part_id); - - no_uncommitted_rows_update(-1); - - if (unlikely(m_slow_path)) - { - /* - ignore TNTO_NO_LOGGING for slave thd. It is used to indicate - log-slave-updates option. This is instead handled in the - injector thread, by looking explicitly at the - opt_log_slave_updates flag. - */ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (thd->slave_thread) - ((NdbOperation *)trans->getLastDefinedOperation())-> - setAnyValue(thd->server_id); - else if (thd_ndb->trans_options & TNTO_NO_LOGGING) - ((NdbOperation *)trans->getLastDefinedOperation())-> - setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); - } - if (!(m_primary_key_update || m_delete_cannot_batch)) - // If deleting from cursor, NoCommit will be handled in next_result - DBUG_RETURN(0); - } - else - { - - if (!(op=trans->getNdbOperation(m_table)) || - op->deleteTuple() != 0) - ERR_RETURN(trans->getNdbError()); - - if (m_use_partition_function) - op->setPartitionId(part_id); - - no_uncommitted_rows_update(-1); - - if (table_share->primary_key == MAX_KEY) - { - // This table has no primary key, use "hidden" primary key - DBUG_PRINT("info", ("Using hidden key")); - - if (set_hidden_key(op, table->s->fields, m_ref)) - ERR_RETURN(op->getNdbError()); - } - else - { - if ((error= set_primary_key_from_record(op, record))) - DBUG_RETURN(error); - } - - if (unlikely(m_slow_path)) - { - /* - ignore TNTO_NO_LOGGING for slave thd. It is used to indicate - log-slave-updates option. This is instead handled in the - injector thread, by looking explicitly at the - opt_log_slave_updates flag. - */ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (thd->slave_thread) - op->setAnyValue(thd->server_id); - else if (thd_ndb->trans_options & TNTO_NO_LOGGING) - op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); - } - } - - // Execute delete operation - if (execute_no_commit(this,trans,FALSE) != 0) { - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); - } - DBUG_RETURN(0); -} - -/** - Unpack a record read from NDB. - - @param buf Buffer to store read row - - @note - The data for each row is read directly into the - destination buffer. This function is primarily - called in order to check if any fields should be - set to null. -*/ - -void ndb_unpack_record(TABLE *table, NdbValue *value, - MY_BITMAP *defined, uchar *buf) -{ - Field **p_field= table->field, *field= *p_field; - my_ptrdiff_t row_offset= (my_ptrdiff_t) (buf - table->record[0]); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); - DBUG_ENTER("ndb_unpack_record"); - - /* - Set the filler bits of the null byte, since they are - not touched in the code below. - - The filler bits are the MSBs in the last null byte - */ - if (table->s->null_bytes > 0) - buf[table->s->null_bytes - 1]|= 256U - (1U << - table->s->last_null_bit_pos); - /* - Set null flag(s) - */ - for ( ; field; - p_field++, value++, field= *p_field) - { - field->set_notnull(row_offset); - if ((*value).ptr) - { - if (!(field->flags & BLOB_FLAG)) - { - int is_null= (*value).rec->isNULL(); - if (is_null) - { - if (is_null > 0) - { - DBUG_PRINT("info",("[%u] NULL", - (*value).rec->getColumn()->getColumnNo())); - field->set_null(row_offset); - } - else - { - DBUG_PRINT("info",("[%u] UNDEFINED", - (*value).rec->getColumn()->getColumnNo())); - bitmap_clear_bit(defined, - (*value).rec->getColumn()->getColumnNo()); - } - } - else if (field->type() == MYSQL_TYPE_BIT) - { - Field_bit *field_bit= static_cast(field); - - /* - Move internal field pointer to point to 'buf'. Calling - the correct member function directly since we know the - type of the object. - */ - field_bit->Field_bit::move_field_offset(row_offset); - if (field->pack_length() < 5) - { - DBUG_PRINT("info", ("bit field H'%.8X", - (*value).rec->u_32_value())); - field_bit->Field_bit::store((longlong) (*value).rec->u_32_value(), - FALSE); - } - else - { - DBUG_PRINT("info", ("bit field H'%.8X%.8X", - *(Uint32 *)(*value).rec->aRef(), - *((Uint32 *)(*value).rec->aRef()+1))); -#ifdef WORDS_BIGENDIAN - /* lsw is stored first */ - Uint32 *buf= (Uint32 *)(*value).rec->aRef(); - field_bit->Field_bit::store((((longlong)*buf) - & 0x000000000FFFFFFFFLL) - | - ((((longlong)*(buf+1)) << 32) - & 0xFFFFFFFF00000000LL), - TRUE); -#else - field_bit->Field_bit::store((longlong) - (*value).rec->u_64_value(), TRUE); -#endif - } - /* - Move back internal field pointer to point to original - value (usually record[0]). - */ - field_bit->Field_bit::move_field_offset(-row_offset); - DBUG_PRINT("info",("[%u] SET", - (*value).rec->getColumn()->getColumnNo())); - DBUG_DUMP("info", field->ptr, field->pack_length()); - } - else - { - DBUG_PRINT("info",("[%u] SET", - (*value).rec->getColumn()->getColumnNo())); - DBUG_DUMP("info", field->ptr, field->pack_length()); - } - } - else - { - NdbBlob *ndb_blob= (*value).blob; - uint col_no = ndb_blob->getColumn()->getColumnNo(); - int isNull; - ndb_blob->getDefined(isNull); - if (isNull == 1) - { - DBUG_PRINT("info",("[%u] NULL", col_no)); - field->set_null(row_offset); - } - else if (isNull == -1) - { - DBUG_PRINT("info",("[%u] UNDEFINED", col_no)); - bitmap_clear_bit(defined, col_no); - } - else - { -#ifndef DBUG_OFF - // pointer vas set in get_ndb_blobs_value - Field_blob *field_blob= (Field_blob*)field; - uchar *ptr; - field_blob->get_ptr(&ptr, row_offset); - uint32 len= field_blob->get_length(row_offset); - DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u", - col_no, (long) ptr, len)); -#endif - } - } - } - } - dbug_tmp_restore_column_map(table->write_set, old_map); - DBUG_VOID_RETURN; -} - -void ha_ndbcluster::unpack_record(uchar *buf) -{ - ndb_unpack_record(table, m_value, 0, buf); -#ifndef DBUG_OFF - // Read and print all values that was fetched - if (table_share->primary_key == MAX_KEY) - { - // Table with hidden primary key - int hidden_no= table_share->fields; - const NDBTAB *tab= m_table; - char buff[22]; - const NDBCOL *hidden_col= tab->getColumn(hidden_no); - const NdbRecAttr* rec= m_value[hidden_no].rec; - DBUG_ASSERT(rec); - DBUG_PRINT("hidden", ("%d: %s \"%s\"", hidden_no, - hidden_col->getName(), - llstr(rec->u_64_value(), buff))); - } - //DBUG_EXECUTE("value", print_results();); -#endif -} - -/** - Utility function to print/dump the fetched field. - - To avoid unnecessary work, wrap in DBUG_EXECUTE as in: - DBUG_EXECUTE("value", print_results();); -*/ - -void ha_ndbcluster::print_results() -{ - DBUG_ENTER("print_results"); - -#ifndef DBUG_OFF - - char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH]; - String type(buf_type, sizeof(buf_type), &my_charset_bin); - String val(buf_val, sizeof(buf_val), &my_charset_bin); - for (uint f= 0; f < table_share->fields; f++) - { - /* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */ - char buf[2000]; - Field *field; - void* ptr; - NdbValue value; - - buf[0]= 0; - field= table->field[f]; - if (!(value= m_value[f]).ptr) - { - strmov(buf, "not read"); - goto print_value; - } - - ptr= field->ptr; - - if (! (field->flags & BLOB_FLAG)) - { - if (value.rec->isNULL()) - { - strmov(buf, "NULL"); - goto print_value; - } - type.length(0); - val.length(0); - field->sql_type(type); - field->val_str(&val); - my_snprintf(buf, sizeof(buf), "%s %s", type.c_ptr(), val.c_ptr()); - } - else - { - NdbBlob *ndb_blob= value.blob; - bool isNull= TRUE; - ndb_blob->getNull(isNull); - if (isNull) - strmov(buf, "NULL"); - } - -print_value: - DBUG_PRINT("value", ("%u,%s: %s", f, field->field_name, buf)); - } -#endif - DBUG_VOID_RETURN; -} - - -int ha_ndbcluster::index_init(uint index, bool sorted) -{ - DBUG_ENTER("ha_ndbcluster::index_init"); - DBUG_PRINT("enter", ("index: %u sorted: %d", index, sorted)); - active_index= index; - m_sorted= sorted; - /* - Locks are are explicitly released in scan - unless m_lock.type == TL_READ_HIGH_PRIORITY - and no sub-sequent call to unlock_row() - */ - m_lock_tuple= FALSE; - DBUG_RETURN(0); -} - - -int ha_ndbcluster::index_end() -{ - DBUG_ENTER("ha_ndbcluster::index_end"); - DBUG_RETURN(close_scan()); -} - -/** - Check if key contains null. -*/ -static -int -check_null_in_key(const KEY* key_info, const uchar *key, uint key_len) -{ - KEY_PART_INFO *curr_part, *end_part; - const uchar* end_ptr= key + key_len; - curr_part= key_info->key_part; - end_part= curr_part + key_info->user_defined_key_parts; - - for (; curr_part != end_part && key < end_ptr; curr_part++) - { - if (curr_part->null_bit && *key) - return 1; - - key += curr_part->store_length; - } - return 0; -} - -int ha_ndbcluster::index_read(uchar *buf, - const uchar *key, uint key_len, - enum ha_rkey_function find_flag) -{ - key_range start_key; - bool descending= FALSE; - int rc; - DBUG_ENTER("ha_ndbcluster::index_read"); - DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", - active_index, key_len, find_flag)); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - - start_key.key= key; - start_key.length= key_len; - start_key.flag= find_flag; - descending= FALSE; - switch (find_flag) { - case HA_READ_KEY_OR_PREV: - case HA_READ_BEFORE_KEY: - case HA_READ_PREFIX_LAST: - case HA_READ_PREFIX_LAST_OR_PREV: - descending= TRUE; - break; - default: - break; - } - rc= read_range_first_to_buf(&start_key, 0, descending, - m_sorted, buf); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - - -int ha_ndbcluster::index_next(uchar *buf) -{ - int rc; - DBUG_ENTER("ha_ndbcluster::index_next"); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - ha_statistic_increment(&SSV::ha_read_next_count); - rc= next_result(buf); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - - -int ha_ndbcluster::index_prev(uchar *buf) -{ - int rc; - DBUG_ENTER("ha_ndbcluster::index_prev"); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - ha_statistic_increment(&SSV::ha_read_prev_count); - rc= next_result(buf); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - - -int ha_ndbcluster::index_first(uchar *buf) -{ - int rc; - DBUG_ENTER("ha_ndbcluster::index_first"); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - ha_statistic_increment(&SSV::ha_read_first_count); - // Start the ordered index scan and fetch the first row - - // Only HA_READ_ORDER indexes get called by index_first - rc= ordered_index_scan(0, 0, TRUE, FALSE, buf, NULL); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - - -int ha_ndbcluster::index_last(uchar *buf) -{ - int rc; - DBUG_ENTER("ha_ndbcluster::index_last"); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - ha_statistic_increment(&SSV::ha_read_last_count); - rc= ordered_index_scan(0, 0, TRUE, TRUE, buf, NULL); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - -int ha_ndbcluster::index_read_last(uchar * buf, const uchar * key, uint key_len) -{ - DBUG_ENTER("ha_ndbcluster::index_read_last"); - DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST)); -} - -int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool desc, bool sorted, - uchar* buf) -{ - part_id_range part_spec; - ndb_index_type type= get_index_type(active_index); - const KEY* key_info= table->key_info+active_index; - int error; - DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf"); - DBUG_PRINT("info", ("desc: %d, sorted: %d", desc, sorted)); - - if (m_use_partition_function) - { - get_partition_set(table, buf, active_index, start_key, &part_spec); - DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", - part_spec.start_part, part_spec.end_part)); - /* - If partition pruning has found no partition in set - we can return HA_ERR_END_OF_FILE - If partition pruning has found exactly one partition in set - we can optimize scan to run towards that partition only. - */ - if (part_spec.start_part > part_spec.end_part) - { - DBUG_RETURN(HA_ERR_END_OF_FILE); - } - else if (part_spec.start_part == part_spec.end_part) - { - /* - Only one partition is required to scan, if sorted is required we - don't need it any more since output from one ordered partitioned - index is always sorted. - */ - sorted= FALSE; - } - } - - m_write_op= FALSE; - switch (type){ - case PRIMARY_KEY_ORDERED_INDEX: - case PRIMARY_KEY_INDEX: - if (start_key && - start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT) - { - if (m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); - error= pk_read(start_key->key, start_key->length, buf, - part_spec.start_part); - DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); - } - break; - case UNIQUE_ORDERED_INDEX: - case UNIQUE_INDEX: - if (start_key && start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT && - !check_null_in_key(key_info, start_key->key, start_key->length)) - { - if (m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); - - error= unique_index_read(start_key->key, start_key->length, buf); - DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); - } - else if (type == UNIQUE_INDEX) - DBUG_RETURN(unique_index_scan(key_info, - start_key->key, - start_key->length, - buf)); - break; - default: - break; - } - // Start the ordered index scan and fetch the first row - DBUG_RETURN(ordered_index_scan(start_key, end_key, sorted, desc, buf, - &part_spec)); -} - -int ha_ndbcluster::read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_r, bool sorted) -{ - int rc; - uchar* buf= table->record[0]; - DBUG_ENTER("ha_ndbcluster::read_range_first"); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - rc= read_range_first_to_buf(start_key, end_key, FALSE, - sorted, buf); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - -int ha_ndbcluster::read_range_next() -{ - int rc; - DBUG_ENTER("ha_ndbcluster::read_range_next"); - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - rc= next_result(table->record[0]); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - - -int ha_ndbcluster::rnd_init(bool scan) -{ - NdbScanOperation *cursor= m_active_cursor; - DBUG_ENTER("rnd_init"); - DBUG_PRINT("enter", ("scan: %d", scan)); - // Check if scan is to be restarted - if (cursor) - { - if (!scan) - DBUG_RETURN(1); - if (cursor->restart(m_force_send) != 0) - { - DBUG_ASSERT(0); - DBUG_RETURN(-1); - } - } - index_init(table_share->primary_key, 0); - DBUG_RETURN(0); -} - -int ha_ndbcluster::close_scan() -{ - NdbTransaction *trans= m_active_trans; - DBUG_ENTER("close_scan"); - - m_multi_cursor= 0; - if (!m_active_cursor && !m_multi_cursor) - DBUG_RETURN(0); - - NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor; - - if (m_lock_tuple) - { - /* - Lock level m_lock.type either TL_WRITE_ALLOW_WRITE - (SELECT FOR UPDATE) or TL_READ_WITH_SHARED_LOCKS (SELECT - LOCK WITH SHARE MODE) and row was not explictly unlocked - with unlock_row() call - */ - NdbOperation *op; - // Lock row - DBUG_PRINT("info", ("Keeping lock on scanned row")); - - if (!(op= cursor->lockCurrentTuple())) - { - m_lock_tuple= FALSE; - ERR_RETURN(trans->getNdbError()); - } - m_ops_pending++; - } - m_lock_tuple= FALSE; - if (m_ops_pending) - { - /* - Take over any pending transactions to the - deleteing/updating transaction before closing the scan - */ - DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); - if (execute_no_commit(this,trans,FALSE) != 0) { - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); - } - m_ops_pending= 0; - } - - cursor->close(m_force_send, TRUE); - m_active_cursor= m_multi_cursor= NULL; - DBUG_RETURN(0); -} - -int ha_ndbcluster::rnd_end() -{ - DBUG_ENTER("rnd_end"); - DBUG_RETURN(close_scan()); -} - - -int ha_ndbcluster::rnd_next(uchar *buf) -{ - int rc; - DBUG_ENTER("rnd_next"); - MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str, - TRUE); - ha_statistic_increment(&SSV::ha_read_rnd_next_count); - - if (!m_active_cursor) - rc= full_table_scan(buf); - else - rc= next_result(buf); - MYSQL_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - - -/** - An "interesting" record has been found and it's pk - retrieved by calling position. Now it's time to read - the record from db once again. -*/ - -int ha_ndbcluster::rnd_pos(uchar *buf, uchar *pos) -{ - int rc; - DBUG_ENTER("rnd_pos"); - MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str, - FALSE); - ha_statistic_increment(&SSV::ha_read_rnd_count); - // The primary key for the record is stored in pos - // Perform a pk_read using primary key "index" - { - part_id_range part_spec; - uint key_length= ref_length; - if (m_use_partition_function) - { - if (table_share->primary_key == MAX_KEY) - { - /* - The partition id has been fetched from ndb - and has been stored directly after the hidden key - */ - DBUG_DUMP("key+part", pos, key_length); - key_length= ref_length - sizeof(m_part_id); - part_spec.start_part= part_spec.end_part= *(uint32 *)(pos + key_length); - } - else - { - key_range key_spec; - KEY *key_info= table->key_info + table_share->primary_key; - key_spec.key= pos; - key_spec.length= key_length; - key_spec.flag= HA_READ_KEY_EXACT; - get_full_part_id_from_key(table, buf, key_info, - &key_spec, &part_spec); - DBUG_ASSERT(part_spec.start_part == part_spec.end_part); - } - DBUG_PRINT("info", ("partition id %u", part_spec.start_part)); - } - DBUG_DUMP("key", pos, key_length); - rc= pk_read(pos, key_length, buf, part_spec.start_part); - MYSQL_READ_ROW_DONE(rc); - DBUG_RETURN(rc); - } -} - - -/** - Store the primary key of this record in ref - variable, so that the row can be retrieved again later - using "reference" in rnd_pos. -*/ - -void ha_ndbcluster::position(const uchar *record) -{ - KEY *key_info; - KEY_PART_INFO *key_part; - KEY_PART_INFO *end; - uchar *buff; - uint key_length; - - DBUG_ENTER("position"); - - if (table_share->primary_key != MAX_KEY) - { - key_length= ref_length; - key_info= table->key_info + table_share->primary_key; - key_part= key_info->key_part; - end= key_part + key_info->user_defined_key_parts; - buff= ref; - - for (; key_part != end; key_part++) - { - if (key_part->null_bit) { - /* Store 0 if the key part is a NULL part */ - if (record[key_part->null_offset] - & key_part->null_bit) { - *buff++= 1; - continue; - } - *buff++= 0; - } - - size_t len = key_part->length; - const uchar * ptr = record + key_part->offset; - Field *field = key_part->field; - if (field->type() == MYSQL_TYPE_VARCHAR) - { - if (((Field_varstring*)field)->length_bytes == 1) - { - /** - * Keys always use 2 bytes length - */ - buff[0] = ptr[0]; - buff[1] = 0; - memcpy(buff+2, ptr + 1, len); - } - else - { - memcpy(buff, ptr, len + 2); - } - len += 2; - } - else - { - memcpy(buff, ptr, len); - } - buff += len; - } - } - else - { - // No primary key, get hidden key - DBUG_PRINT("info", ("Getting hidden key")); - // If table has user defined partition save the partition id as well - if(m_use_partition_function) - { - DBUG_PRINT("info", ("Saving partition id %u", m_part_id)); - key_length= ref_length - sizeof(m_part_id); - memcpy(ref+key_length, (void *)&m_part_id, sizeof(m_part_id)); - } - else - key_length= ref_length; -#ifndef DBUG_OFF - int hidden_no= table->s->fields; - const NDBTAB *tab= m_table; - const NDBCOL *hidden_col= tab->getColumn(hidden_no); - DBUG_ASSERT(hidden_col->getPrimaryKey() && - hidden_col->getAutoIncrement() && - key_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH); -#endif - memcpy(ref, m_ref, key_length); - } -#ifndef DBUG_OFF - if (table_share->primary_key == MAX_KEY && m_use_partition_function) - DBUG_DUMP("key+part", ref, key_length+sizeof(m_part_id)); -#endif - DBUG_DUMP("ref", ref, key_length); - DBUG_VOID_RETURN; -} - - -int ha_ndbcluster::info(uint flag) -{ - int result= 0; - DBUG_ENTER("info"); - DBUG_PRINT("enter", ("flag: %d", flag)); - - if (flag & HA_STATUS_POS) - DBUG_PRINT("info", ("HA_STATUS_POS")); - if (flag & HA_STATUS_NO_LOCK) - DBUG_PRINT("info", ("HA_STATUS_NO_LOCK")); - if (flag & HA_STATUS_TIME) - DBUG_PRINT("info", ("HA_STATUS_TIME")); - if (flag & HA_STATUS_VARIABLE) - { - DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); - if (m_table_info) - { - if (m_ha_not_exact_count) - stats.records= 100; - else - result= records_update(); - } - else - { - if ((my_errno= check_ndb_connection())) - DBUG_RETURN(my_errno); - Ndb *ndb= get_ndb(); - ndb->setDatabaseName(m_dbname); - struct Ndb_statistics stat; - if (ndb->setDatabaseName(m_dbname)) - { - DBUG_RETURN(my_errno= HA_ERR_OUT_OF_MEM); - } - if (THDVAR(current_thd, use_exact_count) && - (result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat)) - == 0) - { - stats.mean_rec_length= stat.row_size; - stats.data_file_length= stat.fragment_memory; - stats.records= stat.row_count; - } - else - { - stats.mean_rec_length= 0; - stats.records= 100; - } - } - } - if (flag & HA_STATUS_CONST) - { - DBUG_PRINT("info", ("HA_STATUS_CONST")); - set_rec_per_key(); - } - if (flag & HA_STATUS_ERRKEY) - { - DBUG_PRINT("info", ("HA_STATUS_ERRKEY")); - errkey= m_dupkey; - } - if (flag & HA_STATUS_AUTO) - { - DBUG_PRINT("info", ("HA_STATUS_AUTO")); - if (m_table && table->found_next_number_field) - { - if ((my_errno= check_ndb_connection())) - DBUG_RETURN(my_errno); - Ndb *ndb= get_ndb(); - Ndb_tuple_id_range_guard g(m_share); - - Uint64 auto_increment_value64; - if (ndb->readAutoIncrementValue(m_table, g.range, - auto_increment_value64) == -1) - { - const NdbError err= ndb->getNdbError(); - sql_print_error("Error %lu in readAutoIncrementValue(): %s", - (ulong) err.code, err.message); - stats.auto_increment_value= ~(ulonglong)0; - } - else - stats.auto_increment_value= (ulonglong)auto_increment_value64; - } - } - - if(result == -1) - result= HA_ERR_NO_CONNECTION; - - DBUG_RETURN(result); -} - - -void ha_ndbcluster::get_dynamic_partition_info(PARTITION_STATS *stat_info, - uint part_id) -{ - /* - This functions should be fixed. Suggested fix: to - implement ndb function which retrives the statistics - about ndb partitions. - */ - bzero((char*) stat_info, sizeof(PARTITION_STATS)); - return; -} - - -int ha_ndbcluster::extra(enum ha_extra_function operation) -{ - DBUG_ENTER("extra"); - switch (operation) { - case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ - DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); - DBUG_PRINT("info", ("Ignoring duplicate key")); - m_ignore_dup_key= TRUE; - break; - case HA_EXTRA_NO_IGNORE_DUP_KEY: - DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); - m_ignore_dup_key= FALSE; - break; - case HA_EXTRA_IGNORE_NO_KEY: - DBUG_PRINT("info", ("HA_EXTRA_IGNORE_NO_KEY")); - DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); - m_ignore_no_key= TRUE; - break; - case HA_EXTRA_NO_IGNORE_NO_KEY: - DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_NO_KEY")); - DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit")); - m_ignore_no_key= FALSE; - break; - case HA_EXTRA_WRITE_CAN_REPLACE: - DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); - if (!m_has_unique_index || - current_thd->slave_thread) /* always set if slave, quick fix for bug 27378 */ - { - DBUG_PRINT("info", ("Turning ON use of write instead of insert")); - m_use_write= TRUE; - } - break; - case HA_EXTRA_WRITE_CANNOT_REPLACE: - DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE")); - DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); - m_use_write= FALSE; - break; - case HA_EXTRA_DELETE_CANNOT_BATCH: - DBUG_PRINT("info", ("HA_EXTRA_DELETE_CANNOT_BATCH")); - m_delete_cannot_batch= TRUE; - break; - case HA_EXTRA_UPDATE_CANNOT_BATCH: - DBUG_PRINT("info", ("HA_EXTRA_UPDATE_CANNOT_BATCH")); - m_update_cannot_batch= TRUE; - break; - default: - break; - } - - DBUG_RETURN(0); -} - - -int ha_ndbcluster::reset() -{ - DBUG_ENTER("ha_ndbcluster::reset"); - if (m_cond) - { - m_cond->cond_clear(); - } - - /* - Regular partition pruning will set the bitmap appropriately. - Some queries like ALTER TABLE doesn't use partition pruning and - thus the 'used_partitions' bitmap needs to be initialized - */ - if (m_part_info) - bitmap_set_all(&m_part_info->used_partitions); - - /* reset flags set by extra calls */ - m_ignore_dup_key= FALSE; - m_use_write= FALSE; - m_ignore_no_key= FALSE; - m_delete_cannot_batch= FALSE; - m_update_cannot_batch= FALSE; - - DBUG_RETURN(0); -} - - -/** - Start of an insert, remember number of rows to be inserted, it will - be used in write_row and get_autoincrement to send an optimal number - of rows in each roundtrip to the server. - - @param - rows number of rows to insert, 0 if unknown -*/ - -void ha_ndbcluster::start_bulk_insert(ha_rows rows) -{ - int bytes, batch; - const NDBTAB *tab= m_table; - - DBUG_ENTER("start_bulk_insert"); - DBUG_PRINT("enter", ("rows: %d", (int)rows)); - - m_rows_inserted= (ha_rows) 0; - if (!m_use_write && m_ignore_dup_key) - { - /* - compare if expression with that in write_row - we have a situation where peek_indexed_rows() will be called - so we cannot batch - */ - DBUG_PRINT("info", ("Batching turned off as duplicate key is " - "ignored by using peek_row")); - m_rows_to_insert= 1; - m_bulk_insert_rows= 1; - DBUG_VOID_RETURN; - } - if (rows == (ha_rows) 0) - { - /* We don't know how many will be inserted, guess */ - m_rows_to_insert= m_autoincrement_prefetch; - } - else - m_rows_to_insert= rows; - - /* - Calculate how many rows that should be inserted - per roundtrip to NDB. This is done in order to minimize the - number of roundtrips as much as possible. However performance will - degrade if too many bytes are inserted, thus it's limited by this - calculation. - */ - const int bytesperbatch= 8192; - bytes= 12 + tab->getRowSizeInBytes() + 4 * tab->getNoOfColumns(); - batch= bytesperbatch/bytes; - batch= batch == 0 ? 1 : batch; - DBUG_PRINT("info", ("batch: %d, bytes: %d", batch, bytes)); - m_bulk_insert_rows= batch; - - DBUG_VOID_RETURN; -} - -/** - End of an insert. -*/ -int ha_ndbcluster::end_bulk_insert() -{ - int error= 0; - DBUG_ENTER("end_bulk_insert"); - - // Check if last inserts need to be flushed - if (m_bulk_insert_not_flushed) - { - NdbTransaction *trans= m_active_trans; - // Send rows to NDB - DBUG_PRINT("info", ("Sending inserts to NDB, "\ - "rows_inserted: %d bulk_insert_rows: %d", - (int) m_rows_inserted, (int) m_bulk_insert_rows)); - m_bulk_insert_not_flushed= FALSE; - if (m_transaction_on) - { - if (execute_no_commit(this, trans,FALSE) != 0) - { - no_uncommitted_rows_execute_failure(); - my_errno= error= ndb_err(trans); - } - } - else - { - if (execute_commit(this, trans) != 0) - { - no_uncommitted_rows_execute_failure(); - my_errno= error= ndb_err(trans); - } - else - { - int res __attribute__((unused))= trans->restart(); - DBUG_ASSERT(res == 0); - } - } - } - - m_rows_inserted= (ha_rows) 0; - m_rows_to_insert= (ha_rows) 1; - DBUG_RETURN(error); -} - - -int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size) -{ - DBUG_ENTER("extra_opt"); - DBUG_PRINT("enter", ("cache_size: %lu", cache_size)); - DBUG_RETURN(extra(operation)); -} - -static const char *ha_ndbcluster_exts[] = { - ha_ndb_ext, - NullS -}; - -const char** ha_ndbcluster::bas_ext() const -{ - return ha_ndbcluster_exts; -} - -/** - How many seeks it will take to read through the table. - - This is to be comparable to the number returned by records_in_range so - that we can decide if we should scan the table or use keys. -*/ - -double ha_ndbcluster::scan_time() -{ - DBUG_ENTER("ha_ndbcluster::scan_time()"); - double res= rows2double(stats.records*1000); - DBUG_PRINT("exit", ("table: %s value: %f", - m_tabname, res)); - DBUG_RETURN(res); -} - -/* - Convert MySQL table locks into locks supported by Ndb Cluster. - Note that MySQL Cluster does currently not support distributed - table locks, so to be safe one should set cluster in Single - User Mode, before relying on table locks when updating tables - from several MySQL servers -*/ - -THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - DBUG_ENTER("store_lock"); - if (lock_type != TL_IGNORE && m_lock.type == TL_UNLOCK) - { - - /* If we are not doing a LOCK TABLE, then allow multiple - writers */ - - /* Since NDB does not currently have table locks - this is treated as a ordinary lock */ - - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && - lock_type <= TL_WRITE) && !thd->in_lock_tables) - lock_type= TL_WRITE_ALLOW_WRITE; - - /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ... - MySQL would use the lock TL_READ_NO_INSERT on t2, and that - would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts - to t2. Convert the lock to a normal read lock to allow - concurrent inserts to t2. */ - - if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) - lock_type= TL_READ; - - m_lock.type=lock_type; - } - *to++= &m_lock; - - DBUG_PRINT("exit", ("lock_type: %d", lock_type)); - - DBUG_RETURN(to); -} - -#ifndef DBUG_OFF -#define PRINT_OPTION_FLAGS(t) { \ - if (t->variables.option_bits & OPTION_NOT_AUTOCOMMIT) \ - DBUG_PRINT("thd->variables.option_bits", ("OPTION_NOT_AUTOCOMMIT")); \ - if (t->variables.option_bits & OPTION_BEGIN) \ - DBUG_PRINT("thd->variables.option_bits", ("OPTION_BEGIN")); \ - if (t->variables.option_bits & OPTION_TABLE_LOCK) \ - DBUG_PRINT("thd->variables.option_bits", ("OPTION_TABLE_LOCK")); \ -} -#else -#define PRINT_OPTION_FLAGS(t) -#endif - - -/* - As MySQL will execute an external lock for every new table it uses - we can use this to start the transactions. - If we are in auto_commit mode we just need to start a transaction - for the statement, this will be stored in thd_ndb.stmt. - If not, we have to start a master transaction if there doesn't exist - one from before, this will be stored in thd_ndb.all - - When a table lock is held one transaction will be started which holds - the table lock and for each statement a hupp transaction will be started - If we are locking the table then: - - save the NdbDictionary::Table for easy access - - save reference to table statistics - - refresh list of the indexes for the table if needed (if altered) - */ - -#ifdef HAVE_NDB_BINLOG -extern Master_info *active_mi; -static int ndbcluster_update_apply_status(THD *thd, int do_update) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NDBDICT *dict= ndb->getDictionary(); - const NDBTAB *ndbtab; - NdbTransaction *trans= thd_ndb->trans; - ndb->setDatabaseName(NDB_REP_DB); - Ndb_table_guard ndbtab_g(dict, NDB_APPLY_TABLE); - if (!(ndbtab= ndbtab_g.get_table())) - { - return -1; - } - NdbOperation *op= 0; - int r= 0; - r|= (op= trans->getNdbOperation(ndbtab)) == 0; - DBUG_ASSERT(r == 0); - if (do_update) - r|= op->updateTuple(); - else - r|= op->writeTuple(); - DBUG_ASSERT(r == 0); - // server_id - r|= op->equal(0u, (Uint32)thd->server_id); - DBUG_ASSERT(r == 0); - if (!do_update) - { - // epoch - r|= op->setValue(1u, (Uint64)0); - DBUG_ASSERT(r == 0); - } - // log_name - char tmp_buf[FN_REFLEN]; - ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf, - active_mi->rli.group_master_log_name, - strlen(active_mi->rli.group_master_log_name)); - r|= op->setValue(2u, tmp_buf); - DBUG_ASSERT(r == 0); - // start_pos - r|= op->setValue(3u, (Uint64)active_mi->rli.group_master_log_pos); - DBUG_ASSERT(r == 0); - // end_pos - r|= op->setValue(4u, (Uint64)active_mi->rli.group_master_log_pos + - ((Uint64)active_mi->rli.future_event_relay_log_pos - - (Uint64)active_mi->rli.group_relay_log_pos)); - DBUG_ASSERT(r == 0); - return 0; -} -#endif /* HAVE_NDB_BINLOG */ - -void ha_ndbcluster::transaction_checks(THD *thd) -{ - if (thd->lex->sql_command == SQLCOM_LOAD) - { - m_transaction_on= FALSE; - /* Would be simpler if has_transactions() didn't always say "yes" */ - thd->transaction.all.modified_non_trans_table= - thd->transaction.stmt.modified_non_trans_table= TRUE; - } - else if (!thd->transaction.on) - m_transaction_on= FALSE; - else - m_transaction_on= THDVAR(thd, use_transactions); -} - -int ha_ndbcluster::start_statement(THD *thd, - Thd_ndb *thd_ndb, - Ndb *ndb) -{ - DBUG_ENTER("ha_ndbcluster::start_statement"); - PRINT_OPTION_FLAGS(thd); - - trans_register_ha(thd, FALSE, ndbcluster_hton); - if (!thd_ndb->trans) - { - if (thd->in_multi_stmt_transaction_mode()) - trans_register_ha(thd, TRUE, ndbcluster_hton); - DBUG_PRINT("trans",("Starting transaction")); - thd_ndb->trans= ndb->startTransaction(); - if (thd_ndb->trans == NULL) - ERR_RETURN(ndb->getNdbError()); - thd_ndb->init_open_tables(); - thd_ndb->query_state&= NDB_QUERY_NORMAL; - thd_ndb->trans_options= 0; - thd_ndb->m_slow_path= FALSE; - if (!(thd->variables.option_bits & OPTION_BIN_LOG) || - thd->variables.binlog_format == BINLOG_FORMAT_STMT) - { - thd_ndb->trans_options|= TNTO_NO_LOGGING; - thd_ndb->m_slow_path= TRUE; - } - else if (thd->slave_thread) - thd_ndb->m_slow_path= TRUE; - } - /* - If this is the start of a LOCK TABLE, a table look - should be taken on the table in NDB - - Check if it should be read or write lock - */ - if (thd->variables.option_bits & OPTION_TABLE_LOCK) - { - //lockThisTable(); - DBUG_PRINT("info", ("Locking the table..." )); - } - DBUG_RETURN(0); -} - -int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb) -{ - /* - This is the place to make sure this handler instance - has a started transaction. - - The transaction is started by the first handler on which - MySQL Server calls external lock - - Other handlers in the same stmt or transaction should use - the same NDB transaction. This is done by setting up the m_active_trans - pointer to point to the NDB transaction. - */ - - DBUG_ENTER("ha_ndbcluster::init_handler_for_statement"); - // store thread specific data first to set the right context - m_force_send= THDVAR(thd, force_send); - m_ha_not_exact_count= !THDVAR(thd, use_exact_count); - m_autoincrement_prefetch= - (THDVAR(thd, autoincrement_prefetch_sz) > - DEFAULT_AUTO_PREFETCH) ? - (ha_rows) THDVAR(thd, autoincrement_prefetch_sz) - : (ha_rows) DEFAULT_AUTO_PREFETCH; - m_active_trans= thd_ndb->trans; - DBUG_ASSERT(m_active_trans); - // Start of transaction - m_rows_changed= 0; - m_ops_pending= 0; - m_slow_path= thd_ndb->m_slow_path; -#ifdef HAVE_NDB_BINLOG - if (unlikely(m_slow_path)) - { - if (m_share == ndb_apply_status_share && thd->slave_thread) - thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS; - } -#endif - - if (thd->in_multi_stmt_transaction_mode()) - { - const void *key= m_table; - HASH_SEARCH_STATE state; - THD_NDB_SHARE *thd_ndb_share= - (THD_NDB_SHARE*)my_hash_first(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state); - while (thd_ndb_share && thd_ndb_share->key != key) - thd_ndb_share= (THD_NDB_SHARE*)my_hash_next(&thd_ndb->open_tables, (uchar *)&key, sizeof(key), &state); - if (thd_ndb_share == 0) - { - thd_ndb_share= (THD_NDB_SHARE *) alloc_root(&thd->transaction.mem_root, - sizeof(THD_NDB_SHARE)); - if (!thd_ndb_share) - { - mem_alloc_error(sizeof(THD_NDB_SHARE)); - DBUG_RETURN(1); - } - thd_ndb_share->key= key; - thd_ndb_share->stat.last_count= thd_ndb->count; - thd_ndb_share->stat.no_uncommitted_rows_count= 0; - thd_ndb_share->stat.records= ~(ha_rows)0; - my_hash_insert(&thd_ndb->open_tables, (uchar *)thd_ndb_share); - } - else if (thd_ndb_share->stat.last_count != thd_ndb->count) - { - thd_ndb_share->stat.last_count= thd_ndb->count; - thd_ndb_share->stat.no_uncommitted_rows_count= 0; - thd_ndb_share->stat.records= ~(ha_rows)0; - } - DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx", - (long) thd_ndb_share, (long) key)); - m_table_info= &thd_ndb_share->stat; - } - else - { - struct Ndb_local_table_statistics &stat= m_table_info_instance; - stat.last_count= thd_ndb->count; - stat.no_uncommitted_rows_count= 0; - stat.records= ~(ha_rows)0; - m_table_info= &stat; - } - DBUG_RETURN(0); -} - -int ha_ndbcluster::external_lock(THD *thd, int lock_type) -{ - int error=0; - DBUG_ENTER("external_lock"); - - /* - Check that this handler instance has a connection - set up to the Ndb object of thd - */ - if (check_ndb_connection(thd)) - DBUG_RETURN(1); - - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - - DBUG_PRINT("enter", ("this: 0x%lx thd: 0x%lx thd_ndb: 0x%lx " - "thd_ndb->lock_count: %d", - (long) this, (long) thd, (long) thd_ndb, - thd_ndb->lock_count)); - - if (lock_type != F_UNLCK) - { - DBUG_PRINT("info", ("lock_type != F_UNLCK")); - transaction_checks(thd); - if (!thd_ndb->lock_count++) - { - if ((error= start_statement(thd, thd_ndb, ndb))) - goto error; - } - if ((error= init_handler_for_statement(thd, thd_ndb))) - goto error; - DBUG_RETURN(0); - } - else - { - DBUG_PRINT("info", ("lock_type == F_UNLCK")); - - if (opt_ndb_cache_check_time && m_rows_changed) - { - DBUG_PRINT("info", ("Rows has changed and util thread is running")); - if (thd->in_multi_stmt_transaction_mode()) - { - DBUG_PRINT("info", ("Add share to list of tables to be invalidated")); - /* NOTE push_back allocates memory using transactions mem_root! */ - thd_ndb->changed_tables.push_back(m_share, &thd->transaction.mem_root); - } - - mysql_mutex_lock(&m_share->mutex); - DBUG_PRINT("info", ("Invalidating commit_count")); - m_share->commit_count= 0; - m_share->commit_count_lock++; - mysql_mutex_unlock(&m_share->mutex); - } - - if (!--thd_ndb->lock_count) - { - DBUG_PRINT("trans", ("Last external_lock")); - PRINT_OPTION_FLAGS(thd); - - if (!thd->in_multi_stmt_transaction_mode()) - { - if (thd_ndb->trans) - { - /* - Unlock is done without a transaction commit / rollback. - This happens if the thread didn't update any rows - We must in this case close the transaction to release resources - */ - DBUG_PRINT("trans",("ending non-updating transaction")); - ndb->closeTransaction(thd_ndb->trans); - thd_ndb->trans= NULL; - } - } - } - m_table_info= NULL; - - /* - This is the place to make sure this handler instance - no longer are connected to the active transaction. - - And since the handler is no longer part of the transaction - it can't have open cursors, ops or blobs pending. - */ - m_active_trans= NULL; - - if (m_active_cursor) - DBUG_PRINT("warning", ("m_active_cursor != NULL")); - m_active_cursor= NULL; - - if (m_multi_cursor) - DBUG_PRINT("warning", ("m_multi_cursor != NULL")); - m_multi_cursor= NULL; - - if (m_blobs_pending) - DBUG_PRINT("warning", ("blobs_pending != 0")); - m_blobs_pending= 0; - - if (m_ops_pending) - DBUG_PRINT("warning", ("ops_pending != 0L")); - m_ops_pending= 0; - DBUG_RETURN(0); - } -error: - thd_ndb->lock_count--; - DBUG_RETURN(error); -} - -/** - Unlock the last row read in an open scan. - Rows are unlocked by default in ndb, but - for SELECT FOR UPDATE and SELECT LOCK WIT SHARE MODE - locks are kept if unlock_row() is not called. -*/ - -void ha_ndbcluster::unlock_row() -{ - DBUG_ENTER("unlock_row"); - - DBUG_PRINT("info", ("Unlocking row")); - m_lock_tuple= FALSE; - DBUG_VOID_RETURN; -} - -/** - Start a transaction for running a statement if one is not - already running in a transaction. This will be the case in - a BEGIN; COMMIT; block - When using LOCK TABLE's external_lock will start a transaction - since ndb does not currently does not support table locking. -*/ - -int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) -{ - int error=0; - DBUG_ENTER("start_stmt"); - - Thd_ndb *thd_ndb= get_thd_ndb(thd); - transaction_checks(thd); - if (!thd_ndb->start_stmt_count++) - { - Ndb *ndb= thd_ndb->ndb; - if ((error= start_statement(thd, thd_ndb, ndb))) - goto error; - } - if ((error= init_handler_for_statement(thd, thd_ndb))) - goto error; - DBUG_RETURN(0); -error: - thd_ndb->start_stmt_count--; - DBUG_RETURN(error); -} - - -/** - Commit a transaction started in NDB. -*/ - -static int ndbcluster_commit(handlerton *hton, THD *thd, bool all) -{ - int res= 0; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NdbTransaction *trans= thd_ndb->trans; - - DBUG_ENTER("ndbcluster_commit"); - DBUG_ASSERT(ndb); - PRINT_OPTION_FLAGS(thd); - DBUG_PRINT("enter", ("Commit %s", (all ? "all" : "stmt"))); - thd_ndb->start_stmt_count= 0; - if (trans == NULL || (!all && thd->in_multi_stmt_transaction_mode())) - { - /* - An odditity in the handler interface is that commit on handlerton - is called to indicate end of statement only in cases where - autocommit isn't used and the all flag isn't set. - - We also leave quickly when a transaction haven't even been started, - in this case we are safe that no clean up is needed. In this case - the MySQL Server could handle the query without contacting the - NDB kernel. - */ - DBUG_PRINT("info", ("Commit before start or end-of-statement only")); - DBUG_RETURN(0); - } - -#ifdef HAVE_NDB_BINLOG - if (unlikely(thd_ndb->m_slow_path)) - { - if (thd->slave_thread) - ndbcluster_update_apply_status - (thd, thd_ndb->trans_options & TNTO_INJECTED_APPLY_STATUS); - } -#endif /* HAVE_NDB_BINLOG */ - - if (execute_commit(thd,trans) != 0) - { - const NdbError err= trans->getNdbError(); - const NdbOperation *error_op= trans->getNdbErrorOperation(); - set_ndb_err(thd, err); - res= ndb_to_mysql_error(&err); - if (res != -1) - ndbcluster_print_error(res, error_op); - } - ndb->closeTransaction(trans); - thd_ndb->trans= NULL; - - /* Clear commit_count for tables changed by transaction */ - NDB_SHARE* share; - List_iterator_fast it(thd_ndb->changed_tables); - while ((share= it++)) - { - mysql_mutex_lock(&share->mutex); - DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %lu", - share->table_name, (ulong) share->commit_count)); - share->commit_count= 0; - share->commit_count_lock++; - mysql_mutex_unlock(&share->mutex); - } - thd_ndb->changed_tables.empty(); - - DBUG_RETURN(res); -} - - -/** - Rollback a transaction started in NDB. -*/ - -static int ndbcluster_rollback(handlerton *hton, THD *thd, bool all) -{ - int res= 0; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - NdbTransaction *trans= thd_ndb->trans; - - DBUG_ENTER("ndbcluster_rollback"); - DBUG_ASSERT(ndb); - thd_ndb->start_stmt_count= 0; - if (trans == NULL || (!all && - thd->in_multi_stmt_transaction_mode())) - { - /* Ignore end-of-statement until real rollback or commit is called */ - DBUG_PRINT("info", ("Rollback before start or end-of-statement only")); - DBUG_RETURN(0); - } - - if (trans->execute(NdbTransaction::Rollback) != 0) - { - const NdbError err= trans->getNdbError(); - const NdbOperation *error_op= trans->getNdbErrorOperation(); - set_ndb_err(thd, err); - res= ndb_to_mysql_error(&err); - if (res != -1) - ndbcluster_print_error(res, error_op); - } - ndb->closeTransaction(trans); - thd_ndb->trans= NULL; - - /* Clear list of tables changed by transaction */ - thd_ndb->changed_tables.empty(); - - DBUG_RETURN(res); -} - - -/** - Define NDB column based on Field. - - Not member of ha_ndbcluster because NDBCOL cannot be declared. - - MySQL text types with character set "binary" are mapped to true - NDB binary types without a character set. This may change. - - @return - Returns 0 or mysql error code. -*/ - -static int create_ndb_column(NDBCOL &col, - Field *field, - HA_CREATE_INFO *info) -{ - // Set name - if (col.setName(field->field_name)) - { - return (my_errno= errno); - } - // Get char set - CHARSET_INFO *cs= field->charset(); - // Set type and sizes - const enum enum_field_types mysql_type= field->real_type(); - switch (mysql_type) { - // Numeric types - case MYSQL_TYPE_TINY: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Tinyunsigned); - else - col.setType(NDBCOL::Tinyint); - col.setLength(1); - break; - case MYSQL_TYPE_SHORT: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Smallunsigned); - else - col.setType(NDBCOL::Smallint); - col.setLength(1); - break; - case MYSQL_TYPE_LONG: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Unsigned); - else - col.setType(NDBCOL::Int); - col.setLength(1); - break; - case MYSQL_TYPE_INT24: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Mediumunsigned); - else - col.setType(NDBCOL::Mediumint); - col.setLength(1); - break; - case MYSQL_TYPE_LONGLONG: - if (field->flags & UNSIGNED_FLAG) - col.setType(NDBCOL::Bigunsigned); - else - col.setType(NDBCOL::Bigint); - col.setLength(1); - break; - case MYSQL_TYPE_FLOAT: - col.setType(NDBCOL::Float); - col.setLength(1); - break; - case MYSQL_TYPE_DOUBLE: - col.setType(NDBCOL::Double); - col.setLength(1); - break; - case MYSQL_TYPE_DECIMAL: - { - Field_decimal *f= (Field_decimal*)field; - uint precision= f->pack_length(); - uint scale= f->decimals(); - if (field->flags & UNSIGNED_FLAG) - { - col.setType(NDBCOL::Olddecimalunsigned); - precision-= (scale > 0); - } - else - { - col.setType(NDBCOL::Olddecimal); - precision-= 1 + (scale > 0); - } - col.setPrecision(precision); - col.setScale(scale); - col.setLength(1); - } - break; - case MYSQL_TYPE_NEWDECIMAL: - { - Field_new_decimal *f= (Field_new_decimal*)field; - uint precision= f->precision; - uint scale= f->decimals(); - if (field->flags & UNSIGNED_FLAG) - { - col.setType(NDBCOL::Decimalunsigned); - } - else - { - col.setType(NDBCOL::Decimal); - } - col.setPrecision(precision); - col.setScale(scale); - col.setLength(1); - } - break; - // Date types - case MYSQL_TYPE_DATETIME: - col.setType(NDBCOL::Datetime); - col.setLength(1); - break; - case MYSQL_TYPE_DATE: // ? - col.setType(NDBCOL::Char); - col.setLength(field->pack_length()); - break; - case MYSQL_TYPE_NEWDATE: - col.setType(NDBCOL::Date); - col.setLength(1); - break; - case MYSQL_TYPE_TIME: - col.setType(NDBCOL::Time); - col.setLength(1); - break; - case MYSQL_TYPE_YEAR: - col.setType(NDBCOL::Year); - col.setLength(1); - break; - case MYSQL_TYPE_TIMESTAMP: - col.setType(NDBCOL::Timestamp); - col.setLength(1); - break; - // Char types - case MYSQL_TYPE_STRING: - if (field->pack_length() == 0) - { - col.setType(NDBCOL::Bit); - col.setLength(1); - } - else if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - { - col.setType(NDBCOL::Binary); - col.setLength(field->pack_length()); - } - else - { - col.setType(NDBCOL::Char); - col.setCharset(cs); - col.setLength(field->pack_length()); - } - break; - case MYSQL_TYPE_VAR_STRING: // ? - case MYSQL_TYPE_VARCHAR: - { - Field_varstring* f= (Field_varstring*)field; - if (f->length_bytes == 1) - { - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Varbinary); - else { - col.setType(NDBCOL::Varchar); - col.setCharset(cs); - } - } - else if (f->length_bytes == 2) - { - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Longvarbinary); - else { - col.setType(NDBCOL::Longvarchar); - col.setCharset(cs); - } - } - else - { - return HA_ERR_UNSUPPORTED; - } - col.setLength(field->field_length); - } - break; - // Blob types (all come in as MYSQL_TYPE_BLOB) - mysql_type_tiny_blob: - case MYSQL_TYPE_TINY_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - col.setInlineSize(256); - // No parts - col.setPartSize(0); - col.setStripeSize(0); - break; - //mysql_type_blob: - case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - { - Field_blob *field_blob= (Field_blob *)field; - /* - * max_data_length is 2^8-1, 2^16-1, 2^24-1 for tiny, blob, medium. - * Tinyblob gets no blob parts. The other cases are just a crude - * way to control part size and striping. - * - * In mysql blob(256) is promoted to blob(65535) so it does not - * in fact fit "inline" in NDB. - */ - if (field_blob->max_data_length() < (1 << 8)) - goto mysql_type_tiny_blob; - else if (field_blob->max_data_length() < (1 << 16)) - { - col.setInlineSize(256); - col.setPartSize(2000); - col.setStripeSize(16); - } - else if (field_blob->max_data_length() < (1 << 24)) - goto mysql_type_medium_blob; - else - goto mysql_type_long_blob; - } - break; - mysql_type_medium_blob: - case MYSQL_TYPE_MEDIUM_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - col.setInlineSize(256); - col.setPartSize(4000); - col.setStripeSize(8); - break; - mysql_type_long_blob: - case MYSQL_TYPE_LONG_BLOB: - if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) - col.setType(NDBCOL::Blob); - else { - col.setType(NDBCOL::Text); - col.setCharset(cs); - } - col.setInlineSize(256); - col.setPartSize(8000); - col.setStripeSize(4); - break; - // Other types - case MYSQL_TYPE_ENUM: - col.setType(NDBCOL::Char); - col.setLength(field->pack_length()); - break; - case MYSQL_TYPE_SET: - col.setType(NDBCOL::Char); - col.setLength(field->pack_length()); - break; - case MYSQL_TYPE_BIT: - { - int no_of_bits= field->field_length; - col.setType(NDBCOL::Bit); - if (!no_of_bits) - col.setLength(1); - else - col.setLength(no_of_bits); - break; - } - case MYSQL_TYPE_NULL: - goto mysql_type_unsupported; - mysql_type_unsupported: - default: - return HA_ERR_UNSUPPORTED; - } - // Set nullable and pk - col.setNullable(field->maybe_null()); - col.setPrimaryKey(field->flags & PRI_KEY_FLAG); - // Set autoincrement - if (field->flags & AUTO_INCREMENT_FLAG) - { -#ifndef DBUG_OFF - char buff[22]; -#endif - col.setAutoIncrement(TRUE); - ulonglong value= info->auto_increment_value ? - info->auto_increment_value : (ulonglong) 1; - DBUG_PRINT("info", ("Autoincrement key, initial: %s", llstr(value, buff))); - col.setAutoIncrementInitialValue(value); - } - else - col.setAutoIncrement(FALSE); - return 0; -} - -/** - Create a table in NDB Cluster -*/ - -int ha_ndbcluster::create(const char *name, - TABLE *form, - HA_CREATE_INFO *create_info) -{ - THD *thd= current_thd; - NDBTAB tab; - NDBCOL col; - size_t pack_length, length; - uint i, pk_length= 0; - uchar *data= NULL, *pack_data= NULL; - bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE); - bool is_truncate= (thd->lex->sql_command == SQLCOM_TRUNCATE); - char tablespace[FN_LEN + 1]; - NdbDictionary::Table::SingleUserMode single_user_mode= NdbDictionary::Table::SingleUserModeLocked; - - DBUG_ENTER("ha_ndbcluster::create"); - DBUG_PRINT("enter", ("name: %s", name)); - - DBUG_ASSERT(*fn_rext((char*)name) == 0); - set_dbname(name); - set_tabname(name); - - if ((my_errno= check_ndb_connection())) - DBUG_RETURN(my_errno); - - Ndb *ndb= get_ndb(); - NDBDICT *dict= ndb->getDictionary(); - - if (is_truncate) - { - { - Ndb_table_guard ndbtab_g(dict, m_tabname); - if (!(m_table= ndbtab_g.get_table())) - ERR_RETURN(dict->getNdbError()); - if ((get_tablespace_name(thd, tablespace, FN_LEN))) - create_info->tablespace= tablespace; - m_table= NULL; - } - DBUG_PRINT("info", ("Dropping and re-creating table for TRUNCATE")); - if ((my_errno= delete_table(name))) - DBUG_RETURN(my_errno); - } - table= form; - if (create_from_engine) - { - /* - Table already exists in NDB and frm file has been created by - caller. - Do Ndb specific stuff, such as create a .ndb file - */ - if ((my_errno= write_ndb_file(name))) - DBUG_RETURN(my_errno); -#ifdef HAVE_NDB_BINLOG - ndbcluster_create_binlog_setup(get_ndb(), name, strlen(name), - m_dbname, m_tabname, FALSE); -#endif /* HAVE_NDB_BINLOG */ - DBUG_RETURN(my_errno); - } - -#ifdef HAVE_NDB_BINLOG - /* - Don't allow table creation unless - schema distribution table is setup - ( unless it is a creation of the schema dist table itself ) - */ - if (!ndb_schema_share) - { - if (!(strcmp(m_dbname, NDB_REP_DB) == 0 && - strcmp(m_tabname, NDB_SCHEMA_TABLE) == 0)) - { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_ASSERT(ndb_schema_share); - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } - single_user_mode = NdbDictionary::Table::SingleUserModeReadWrite; - } -#endif /* HAVE_NDB_BINLOG */ - - DBUG_PRINT("table", ("name: %s", m_tabname)); - if (tab.setName(m_tabname)) - { - DBUG_RETURN(my_errno= errno); - } - tab.setLogging(!(create_info->options & HA_LEX_CREATE_TMP_TABLE)); - tab.setSingleUserMode(single_user_mode); - - // Save frm data for this table - if (readfrm(name, &data, &length)) - DBUG_RETURN(1); - if (packfrm(data, length, &pack_data, &pack_length)) - { - my_free(data); - DBUG_RETURN(2); - } - DBUG_PRINT("info", - ("setFrm data: 0x%lx len: %lu", (long) pack_data, - (ulong) pack_length)); - tab.setFrm(pack_data, pack_length); - my_free(data); - my_free(pack_data); - - /* - Check for disk options - */ - if (create_info->storage_media == HA_SM_DISK) - { - if (create_info->tablespace) - tab.setTablespaceName(create_info->tablespace); - else - tab.setTablespaceName("DEFAULT-TS"); - } - else if (create_info->tablespace) - { - if (create_info->storage_media == HA_SM_MEMORY) - { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "TABLESPACE currently only supported for " - "STORAGE DISK"); - DBUG_RETURN(HA_ERR_UNSUPPORTED); - } - tab.setTablespaceName(create_info->tablespace); - create_info->storage_media = HA_SM_DISK; //if use tablespace, that also means store on disk - } - - /* - Handle table row type - - Default is to let table rows have var part reference so that online - add column can be performed in the future. Explicitly setting row - type to fixed will omit var part reference, which will save data - memory in ndb, but at the cost of not being able to online add - column to this table - */ - switch (create_info->row_type) { - case ROW_TYPE_FIXED: - tab.setForceVarPart(FALSE); - break; - case ROW_TYPE_DYNAMIC: - /* fall through, treat as default */ - default: - /* fall through, treat as default */ - case ROW_TYPE_DEFAULT: - tab.setForceVarPart(TRUE); - break; - } - - /* - Setup columns - */ - for (i= 0; i < form->s->fields; i++) - { - Field *field= form->field[i]; - DBUG_PRINT("info", ("name: %s type: %u pack_length: %d", - field->field_name, field->real_type(), - field->pack_length())); - if ((my_errno= create_ndb_column(col, field, create_info))) - DBUG_RETURN(my_errno); - - if (create_info->storage_media == HA_SM_DISK) - col.setStorageType(NdbDictionary::Column::StorageTypeDisk); - else - col.setStorageType(NdbDictionary::Column::StorageTypeMemory); - - switch (create_info->row_type) { - case ROW_TYPE_FIXED: - if (field_type_forces_var_part(field->type())) - { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "Row format FIXED incompatible with " - "variable sized attribute"); - DBUG_RETURN(HA_ERR_UNSUPPORTED); - } - break; - case ROW_TYPE_DYNAMIC: - /* - Future: make columns dynamic in this case - */ - break; - default: - break; - } - if (tab.addColumn(col)) - { - DBUG_RETURN(my_errno= errno); - } - if (col.getPrimaryKey()) - pk_length += (field->pack_length() + 3) / 4; - } - - KEY* key_info; - for (i= 0, key_info= form->key_info; i < form->s->keys; i++, key_info++) - { - KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; - for (; key_part != end; key_part++) - tab.getColumn(key_part->fieldnr-1)->setStorageType( - NdbDictionary::Column::StorageTypeMemory); - } - - // No primary key, create shadow key as 64 bit, auto increment - if (form->s->primary_key == MAX_KEY) - { - DBUG_PRINT("info", ("Generating shadow key")); - if (col.setName("$PK")) - { - DBUG_RETURN(my_errno= errno); - } - col.setType(NdbDictionary::Column::Bigunsigned); - col.setLength(1); - col.setNullable(FALSE); - col.setPrimaryKey(TRUE); - col.setAutoIncrement(TRUE); - if (tab.addColumn(col)) - { - DBUG_RETURN(my_errno= errno); - } - pk_length += 2; - } - - // Make sure that blob tables don't have to big part size - for (i= 0; i < form->s->fields; i++) - { - /** - * The extra +7 concists - * 2 - words from pk in blob table - * 5 - from extra words added by tup/dict?? - */ - switch (form->field[i]->real_type()) { - case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_MEDIUM_BLOB: - case MYSQL_TYPE_LONG_BLOB: - { - NdbDictionary::Column * column= tab.getColumn(i); - int size= pk_length + (column->getPartSize()+3)/4 + 7; - if (size > NDB_MAX_TUPLE_SIZE_IN_WORDS && - (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) - { - size= NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; - column->setPartSize(4*size); - } - /** - * If size > NDB_MAX and pk_length+7 >= NDB_MAX - * then the table can't be created anyway, so skip - * changing part size, and have error later - */ - } - default: - break; - } - } - - // Check partition info - partition_info *part_info= form->part_info; - if ((my_errno= set_up_partition_info(part_info, form, (void*)&tab))) - { - DBUG_RETURN(my_errno); - } - - // Create the table in NDB - if (dict->createTable(tab) != 0) - { - const NdbError err= dict->getNdbError(); - set_ndb_err(thd, err); - my_errno= ndb_to_mysql_error(&err); - DBUG_RETURN(my_errno); - } - - Ndb_table_guard ndbtab_g(dict, m_tabname); - // temporary set m_table during create - // reset at return - m_table= ndbtab_g.get_table(); - // TODO check also that we have the same frm... - if (!m_table) - { - /* purecov: begin deadcode */ - const NdbError err= dict->getNdbError(); - set_ndb_err(thd, err); - my_errno= ndb_to_mysql_error(&err); - DBUG_RETURN(my_errno); - /* purecov: end */ - } - - DBUG_PRINT("info", ("Table %s/%s created successfully", - m_dbname, m_tabname)); - - // Create secondary indexes - my_errno= create_indexes(ndb, form); - - if (!my_errno) - my_errno= write_ndb_file(name); - else - { - /* - Failed to create an index, - drop the table (and all it's indexes) - */ - while (dict->dropTableGlobal(*m_table)) - { - switch (dict->getNdbError().status) - { - case NdbError::TemporaryError: - if (!thd->killed) - continue; // retry indefinitly - break; - default: - break; - } - break; - } - m_table = 0; - DBUG_RETURN(my_errno); - } - -#ifdef HAVE_NDB_BINLOG - if (!my_errno) - { - NDB_SHARE *share= 0; - mysql_mutex_lock(&ndbcluster_mutex); - /* - First make sure we get a "fresh" share here, not an old trailing one... - */ - { - uint length= (uint) strlen(name); - if ((share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables, - (uchar*) name, length))) - handle_trailing_share(share); - } - /* - get a new share - */ - - /* ndb_share reference create */ - if (!(share= get_share(name, form, TRUE, TRUE))) - { - sql_print_error("NDB: allocating table share for %s failed", name); - /* my_errno is set */ - } - else - { - DBUG_PRINT("NDB_SHARE", ("%s binlog create use_count: %u", - share->key, share->use_count)); - } - mysql_mutex_unlock(&ndbcluster_mutex); - - while (!IS_TMP_PREFIX(m_tabname)) - { - String event_name(INJECTOR_EVENT_LEN); - ndb_rep_event_name(&event_name,m_dbname,m_tabname); - int do_event_op= ndb_binlog_running; - - if (!ndb_schema_share && - strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0) - do_event_op= 1; - - /* - Always create an event for the table, as other mysql servers - expect it to be there. - */ - if (!ndbcluster_create_event(ndb, m_table, event_name.c_ptr(), share, - share && do_event_op ? 2 : 1/* push warning */)) - { - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: CREATE TABLE Event: %s", - event_name.c_ptr()); - if (share && - ndbcluster_create_event_ops(share, m_table, event_name.c_ptr())) - { - sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations." - " Event: %s", name); - /* a warning has been issued to the client */ - } - } - /* - warning has been issued if ndbcluster_create_event failed - and (share && do_event_op) - */ - if (share && !do_event_op) - share->flags|= NSF_NO_BINLOG; - ndbcluster_log_schema_op(thd, share, - thd->query(), thd->query_length(), - share->db, share->table_name, - m_table->getObjectId(), - m_table->getObjectVersion(), - (is_truncate) ? - SOT_TRUNCATE_TABLE : SOT_CREATE_TABLE, - 0, 0); - break; - } - } -#endif /* HAVE_NDB_BINLOG */ - - m_table= 0; - DBUG_RETURN(my_errno); -} - -int ha_ndbcluster::create_handler_files(const char *file, - const char *old_name, - int action_flag, - HA_CREATE_INFO *create_info) -{ - Ndb* ndb; - const NDBTAB *tab; - uchar *data= NULL, *pack_data= NULL; - size_t length, pack_length; - int error= 0; - - DBUG_ENTER("create_handler_files"); - - if (action_flag != CHF_INDEX_FLAG) - { - DBUG_RETURN(FALSE); - } - DBUG_PRINT("enter", ("file: %s", file)); - if (!(ndb= get_ndb())) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - - NDBDICT *dict= ndb->getDictionary(); - if (!create_info->frm_only) - DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create - - // TODO handle this - DBUG_ASSERT(m_table != 0); - - set_dbname(file); - set_tabname(file); - Ndb_table_guard ndbtab_g(dict, m_tabname); - DBUG_PRINT("info", ("m_dbname: %s, m_tabname: %s", m_dbname, m_tabname)); - if (!(tab= ndbtab_g.get_table())) - DBUG_RETURN(0); // Unkown table, must be temporary table - - DBUG_ASSERT(get_ndb_share_state(m_share) == NSS_ALTERED); - if (readfrm(file, &data, &length) || - packfrm(data, length, &pack_data, &pack_length)) - { - DBUG_PRINT("info", ("Missing frm for %s", m_tabname)); - my_free(data); - my_free(pack_data); - error= 1; - } - else - { - DBUG_PRINT("info", ("Table %s has changed, altering frm in ndb", - m_tabname)); - NdbDictionary::Table new_tab= *tab; - new_tab.setFrm(pack_data, pack_length); - if (dict->alterTableGlobal(*tab, new_tab)) - { - set_ndb_err(current_thd, dict->getNdbError()); - error= ndb_to_mysql_error(&dict->getNdbError()); - } - my_free(data); - my_free(pack_data); - } - - set_ndb_share_state(m_share, NSS_INITIAL); - /* ndb_share reference schema(?) free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog schema(?) free use_count: %u", - m_share->key, m_share->use_count)); - free_share(&m_share); // Decrease ref_count - - DBUG_RETURN(error); -} - -int ha_ndbcluster::create_index(const char *name, KEY *key_info, - NDB_INDEX_TYPE idx_type, uint idx_no) -{ - int error= 0; - char unique_name[FN_LEN + 1]; - static const char* unique_suffix= "$unique"; - DBUG_ENTER("ha_ndbcluster::create_ordered_index"); - DBUG_PRINT("info", ("Creating index %u: %s", idx_no, name)); - - if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) - { - strxnmov(unique_name, FN_LEN, name, unique_suffix, NullS); - DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d", - unique_name, idx_no)); - } - - switch (idx_type){ - case PRIMARY_KEY_INDEX: - // Do nothing, already created - break; - case PRIMARY_KEY_ORDERED_INDEX: - error= create_ordered_index(name, key_info); - break; - case UNIQUE_ORDERED_INDEX: - if (!(error= create_ordered_index(name, key_info))) - error= create_unique_index(unique_name, key_info); - break; - case UNIQUE_INDEX: - if (check_index_fields_not_null(key_info)) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_NULL_COLUMN_IN_INDEX, - "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan"); - } - error= create_unique_index(unique_name, key_info); - break; - case ORDERED_INDEX: - if (key_info->algorithm == HA_KEY_ALG_HASH) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "Ndb does not support non-unique " - "hash based indexes"); - error= HA_ERR_UNSUPPORTED; - break; - } - error= create_ordered_index(name, key_info); - break; - default: - DBUG_ASSERT(FALSE); - break; - } - - DBUG_RETURN(error); -} - -int ha_ndbcluster::create_ordered_index(const char *name, - KEY *key_info) -{ - DBUG_ENTER("ha_ndbcluster::create_ordered_index"); - DBUG_RETURN(create_ndb_index(name, key_info, FALSE)); -} - -int ha_ndbcluster::create_unique_index(const char *name, - KEY *key_info) -{ - - DBUG_ENTER("ha_ndbcluster::create_unique_index"); - DBUG_RETURN(create_ndb_index(name, key_info, TRUE)); -} - - -/** - Create an index in NDB Cluster. - - @todo - Only temporary ordered indexes supported -*/ - -int ha_ndbcluster::create_ndb_index(const char *name, - KEY *key_info, - bool unique) -{ - Ndb *ndb= get_ndb(); - NdbDictionary::Dictionary *dict= ndb->getDictionary(); - KEY_PART_INFO *key_part= key_info->key_part; - KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts; - - DBUG_ENTER("ha_ndbcluster::create_index"); - DBUG_PRINT("enter", ("name: %s ", name)); - - NdbDictionary::Index ndb_index(name); - if (unique) - ndb_index.setType(NdbDictionary::Index::UniqueHashIndex); - else - { - ndb_index.setType(NdbDictionary::Index::OrderedIndex); - // TODO Only temporary ordered indexes supported - ndb_index.setLogging(FALSE); - } - if (ndb_index.setTable(m_tabname)) - { - DBUG_RETURN(my_errno= errno); - } - - for (; key_part != end; key_part++) - { - Field *field= key_part->field; - DBUG_PRINT("info", ("attr: %s", field->field_name)); - if (ndb_index.addColumnName(field->field_name)) - { - DBUG_RETURN(my_errno= errno); - } - } - - if (dict->createIndex(ndb_index, *m_table)) - ERR_RETURN(dict->getNdbError()); - - // Success - DBUG_PRINT("info", ("Created index %s", name)); - DBUG_RETURN(0); -} - -/* - Prepare for an on-line alter table -*/ -void ha_ndbcluster::prepare_for_alter() -{ - /* ndb_share reference schema */ - ndbcluster_get_share(m_share); // Increase ref_count - DBUG_PRINT("NDB_SHARE", ("%s binlog schema use_count: %u", - m_share->key, m_share->use_count)); - set_ndb_share_state(m_share, NSS_ALTERED); -} - -/* - Add an index on-line to a table -*/ -int ha_ndbcluster::add_index(TABLE *table_arg, - KEY *key_info, uint num_of_keys) -{ - int error= 0; - uint idx; - DBUG_ENTER("ha_ndbcluster::add_index"); - DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str)); - DBUG_ASSERT(m_share->state == NSS_ALTERED); - - for (idx= 0; idx < num_of_keys; idx++) - { - KEY *key= key_info + idx; - KEY_PART_INFO *key_part= key->key_part; - KEY_PART_INFO *end= key_part + key->key_parts; - NDB_INDEX_TYPE idx_type= get_index_type_from_key(idx, key_info, false); - DBUG_PRINT("info", ("Adding index: '%s'", key_info[idx].name)); - // Add fields to key_part struct - for (; key_part != end; key_part++) - key_part->field= table->field[key_part->fieldnr]; - // Check index type - // Create index in ndb - if((error= create_index(key_info[idx].name, key, idx_type, idx))) - break; - } - if (error) - { - set_ndb_share_state(m_share, NSS_INITIAL); - /* ndb_share reference schema free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog schema free use_count: %u", - m_share->key, m_share->use_count)); - free_share(&m_share); // Decrease ref_count - } - DBUG_RETURN(error); -} - -/* - Mark one or several indexes for deletion. and - renumber the remaining indexes -*/ -int ha_ndbcluster::prepare_drop_index(TABLE *table_arg, - uint *key_num, uint num_of_keys) -{ - DBUG_ENTER("ha_ndbcluster::prepare_drop_index"); - DBUG_ASSERT(m_share->state == NSS_ALTERED); - // Mark indexes for deletion - uint idx; - for (idx= 0; idx < num_of_keys; idx++) - { - DBUG_PRINT("info", ("ha_ndbcluster::prepare_drop_index %u", *key_num)); - m_index[*key_num++].status= TO_BE_DROPPED; - } - // Renumber indexes - THD *thd= current_thd; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - renumber_indexes(ndb, table_arg); - DBUG_RETURN(0); -} - -/* - Really drop all indexes marked for deletion -*/ -int ha_ndbcluster::final_drop_index(TABLE *table_arg) -{ - int error; - DBUG_ENTER("ha_ndbcluster::final_drop_index"); - DBUG_PRINT("info", ("ha_ndbcluster::final_drop_index")); - // Really drop indexes - THD *thd= current_thd; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - Ndb *ndb= thd_ndb->ndb; - if((error= drop_indexes(ndb, table_arg))) - { - m_share->state= NSS_INITIAL; - /* ndb_share reference schema free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog schema free use_count: %u", - m_share->key, m_share->use_count)); - free_share(&m_share); // Decrease ref_count - } - DBUG_RETURN(error); -} - -/** - Rename a table in NDB Cluster. -*/ - -int ha_ndbcluster::rename_table(const char *from, const char *to) -{ - NDBDICT *dict; - char old_dbname[FN_HEADLEN]; - char new_dbname[FN_HEADLEN]; - char new_tabname[FN_HEADLEN]; - const NDBTAB *orig_tab; - int result; - bool recreate_indexes= FALSE; - NDBDICT::List index_list; - - DBUG_ENTER("ha_ndbcluster::rename_table"); - DBUG_PRINT("info", ("Renaming %s to %s", from, to)); - set_dbname(from, old_dbname); - set_dbname(to, new_dbname); - set_tabname(from); - set_tabname(to, new_tabname); - - if (check_ndb_connection()) - DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION); - - Ndb *ndb= get_ndb(); - ndb->setDatabaseName(old_dbname); - dict= ndb->getDictionary(); - Ndb_table_guard ndbtab_g(dict, m_tabname); - if (!(orig_tab= ndbtab_g.get_table())) - ERR_RETURN(dict->getNdbError()); - -#ifdef HAVE_NDB_BINLOG - int ndb_table_id= orig_tab->getObjectId(); - int ndb_table_version= orig_tab->getObjectVersion(); - - /* ndb_share reference temporary */ - NDB_SHARE *share= get_share(from, 0, FALSE); - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - int r __attribute__((unused))= rename_share(share, to); - DBUG_ASSERT(r == 0); - } -#endif - if (my_strcasecmp(system_charset_info, new_dbname, old_dbname)) - { - dict->listIndexes(index_list, *orig_tab); - recreate_indexes= TRUE; - } - // Change current database to that of target table - set_dbname(to); - if (ndb->setDatabaseName(m_dbname)) - { - ERR_RETURN(ndb->getNdbError()); - } - - NdbDictionary::Table new_tab= *orig_tab; - new_tab.setName(new_tabname); - if (dict->alterTableGlobal(*orig_tab, new_tab) != 0) - { - NdbError ndb_error= dict->getNdbError(); -#ifdef HAVE_NDB_BINLOG - if (share) - { - int ret __attribute__((unused))= rename_share(share, from); - DBUG_ASSERT(ret == 0); - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } -#endif - ERR_RETURN(ndb_error); - } - - // Rename .ndb file - if ((result= handler::rename_table(from, to))) - { - // ToDo in 4.1 should rollback alter table... -#ifdef HAVE_NDB_BINLOG - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - free_share(&share); - } -#endif - DBUG_RETURN(result); - } - -#ifdef HAVE_NDB_BINLOG - int is_old_table_tmpfile= 1; - if (share && share->op) - dict->forceGCPWait(); - - /* handle old table */ - if (!IS_TMP_PREFIX(m_tabname)) - { - is_old_table_tmpfile= 0; - String event_name(INJECTOR_EVENT_LEN); - ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0); - ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share, - "rename table"); - } - - if (!result && !IS_TMP_PREFIX(new_tabname)) - { - /* always create an event for the table */ - String event_name(INJECTOR_EVENT_LEN); - ndb_rep_event_name(&event_name, to + sizeof(share_prefix) - 1, 0); - Ndb_table_guard ndbtab_g2(dict, new_tabname); - const NDBTAB *ndbtab= ndbtab_g2.get_table(); - - if (!ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share, - share && ndb_binlog_running ? 2 : 1/* push warning */)) - { - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: RENAME Event: %s", - event_name.c_ptr()); - if (share && - ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr())) - { - sql_print_error("NDB Binlog: FAILED create event operations " - "during RENAME. Event %s", event_name.c_ptr()); - /* a warning has been issued to the client */ - } - } - /* - warning has been issued if ndbcluster_create_event failed - and (share && ndb_binlog_running) - */ - if (!is_old_table_tmpfile) - ndbcluster_log_schema_op(current_thd, share, - current_thd->query(), - current_thd->query_length(), - old_dbname, m_tabname, - ndb_table_id, ndb_table_version, - SOT_RENAME_TABLE, - m_dbname, new_tabname); - } - - // If we are moving tables between databases, we need to recreate - // indexes - if (recreate_indexes) - { - for (unsigned i = 0; i < index_list.count; i++) - { - NDBDICT::List::Element& index_el = index_list.elements[i]; - // Recreate any indexes not stored in the system database - if (my_strcasecmp(system_charset_info, - index_el.database, NDB_SYSTEM_DATABASE)) - { - set_dbname(from); - ndb->setDatabaseName(m_dbname); - const NDBINDEX * index= dict->getIndexGlobal(index_el.name, new_tab); - DBUG_PRINT("info", ("Creating index %s/%s", - index_el.database, index->getName())); - dict->createIndex(*index, new_tab); - DBUG_PRINT("info", ("Dropping index %s/%s", - index_el.database, index->getName())); - set_dbname(from); - ndb->setDatabaseName(m_dbname); - dict->dropIndexGlobal(*index); - } - } - } - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } -#endif - - DBUG_RETURN(result); -} - - -/** - Delete table from NDB Cluster. -*/ - -/* static version which does not need a handler */ - -int -ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb, - const char *path, - const char *db, - const char *table_name) -{ - THD *thd= current_thd; - DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table"); - NDBDICT *dict= ndb->getDictionary(); - int ndb_table_id= 0; - int ndb_table_version= 0; -#ifdef HAVE_NDB_BINLOG - /* - Don't allow drop table unless - schema distribution table is setup - */ - if (!ndb_schema_share) - { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_ASSERT(ndb_schema_share); - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } - /* ndb_share reference temporary */ - NDB_SHARE *share= get_share(path, 0, FALSE); - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - } -#endif - - /* Drop the table from NDB */ - - int res= 0; - if (h && h->m_table) - { -retry_temporary_error1: - if (dict->dropTableGlobal(*h->m_table) == 0) - { - ndb_table_id= h->m_table->getObjectId(); - ndb_table_version= h->m_table->getObjectVersion(); - DBUG_PRINT("info", ("success 1")); - } - else - { - switch (dict->getNdbError().status) - { - case NdbError::TemporaryError: - if (!thd->killed) - goto retry_temporary_error1; // retry indefinitly - break; - default: - break; - } - set_ndb_err(thd, dict->getNdbError()); - res= ndb_to_mysql_error(&dict->getNdbError()); - DBUG_PRINT("info", ("error(1) %u", res)); - } - h->release_metadata(thd, ndb); - } - else - { - ndb->setDatabaseName(db); - while (1) - { - Ndb_table_guard ndbtab_g(dict, table_name); - if (ndbtab_g.get_table()) - { - retry_temporary_error2: - if (dict->dropTableGlobal(*ndbtab_g.get_table()) == 0) - { - ndb_table_id= ndbtab_g.get_table()->getObjectId(); - ndb_table_version= ndbtab_g.get_table()->getObjectVersion(); - DBUG_PRINT("info", ("success 2")); - break; - } - else - { - switch (dict->getNdbError().status) - { - case NdbError::TemporaryError: - if (!thd->killed) - goto retry_temporary_error2; // retry indefinitly - break; - default: - if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT) - { - ndbtab_g.invalidate(); - continue; - } - break; - } - } - } - set_ndb_err(thd, dict->getNdbError()); - res= ndb_to_mysql_error(&dict->getNdbError()); - DBUG_PRINT("info", ("error(2) %u", res)); - break; - } - } - - if (res) - { -#ifdef HAVE_NDB_BINLOG - /* the drop table failed for some reason, drop the share anyways */ - if (share) - { - mysql_mutex_lock(&ndbcluster_mutex); - if (share->state != NSS_DROPPED) - { - /* - The share kept by the server has not been freed, free it - */ - share->state= NSS_DROPPED; - /* ndb_share reference create free */ - DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u", - share->key, share->use_count)); - free_share(&share, TRUE); - } - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share, TRUE); - mysql_mutex_unlock(&ndbcluster_mutex); - } -#endif - DBUG_RETURN(res); - } - -#ifdef HAVE_NDB_BINLOG - /* stop the logging of the dropped table, and cleanup */ - - /* - drop table is successful even if table does not exist in ndb - and in case table was actually not dropped, there is no need - to force a gcp, and setting the event_name to null will indicate - that there is no event to be dropped - */ - int table_dropped= dict->getNdbError().code != 709; - - if (!IS_TMP_PREFIX(table_name) && share && - current_thd->lex->sql_command != SQLCOM_TRUNCATE) - { - ndbcluster_log_schema_op(thd, share, - thd->query(), thd->query_length(), - share->db, share->table_name, - ndb_table_id, ndb_table_version, - SOT_DROP_TABLE, 0, 0); - } - else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op - will do a force GCP */ - dict->forceGCPWait(); - - if (!IS_TMP_PREFIX(table_name)) - { - String event_name(INJECTOR_EVENT_LEN); - ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0); - ndbcluster_handle_drop_table(ndb, - table_dropped ? event_name.c_ptr() : 0, - share, "delete table"); - } - - if (share) - { - mysql_mutex_lock(&ndbcluster_mutex); - if (share->state != NSS_DROPPED) - { - /* - The share kept by the server has not been freed, free it - */ - share->state= NSS_DROPPED; - /* ndb_share reference create free */ - DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u", - share->key, share->use_count)); - free_share(&share, TRUE); - } - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share, TRUE); - mysql_mutex_unlock(&ndbcluster_mutex); - } -#endif - DBUG_RETURN(0); -} - -int ha_ndbcluster::delete_table(const char *name) -{ - DBUG_ENTER("ha_ndbcluster::delete_table"); - DBUG_PRINT("enter", ("name: %s", name)); - set_dbname(name); - set_tabname(name); - -#ifdef HAVE_NDB_BINLOG - /* - Don't allow drop table unless - schema distribution table is setup - */ - if (!ndb_schema_share) - { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_ASSERT(ndb_schema_share); - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } -#endif - - if (check_ndb_connection()) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - - /* Call ancestor function to delete .ndb file */ - handler::delete_table(name); - - DBUG_RETURN(delete_table(this, get_ndb(),name, m_dbname, m_tabname)); -} - - -void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment, - ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values) -{ - uint cache_size; - Uint64 auto_value; - THD *thd= current_thd; - DBUG_ENTER("get_auto_increment"); - DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); - Ndb *ndb= get_ndb(); - - if (m_rows_inserted > m_rows_to_insert) - { - /* We guessed too low */ - m_rows_to_insert+= m_autoincrement_prefetch; - } - uint remaining= m_rows_to_insert - m_rows_inserted; - ha_rows prefetch= THDVAR(thd, autoincrement_prefetch_sz); - uint min_prefetch= - (remaining < prefetch) ? prefetch : remaining; - cache_size= ((remaining < m_autoincrement_prefetch) ? - min_prefetch - : remaining); - uint retries= NDB_AUTO_INCREMENT_RETRIES; - int retry_sleep= 30; /* 30 milliseconds, transaction */ - for (;;) - { - Ndb_tuple_id_range_guard g(m_share); - if ((m_skip_auto_increment && - ndb->readAutoIncrementValue(m_table, g.range, auto_value)) || - ndb->getAutoIncrementValue(m_table, g.range, auto_value, cache_size, increment, offset)) - { - if (--retries && - ndb->getNdbError().status == NdbError::TemporaryError) - { - my_sleep(retry_sleep); - continue; - } - const NdbError err= ndb->getNdbError(); - sql_print_error("Error %lu in ::get_auto_increment(): %s", - (ulong) err.code, err.message); - *first_value= ~(ulonglong) 0; - DBUG_VOID_RETURN; - } - break; - } - *first_value= (longlong)auto_value; - /* From the point of view of MySQL, NDB reserves one row at a time */ - *nb_reserved_values= 1; - DBUG_VOID_RETURN; -} - - -/** - Constructor for the NDB Cluster table handler . -*/ - -/* - Normal flags for binlogging is that ndb has HA_HAS_OWN_BINLOGGING - and preferes HA_BINLOG_ROW_CAPABLE - Other flags are set under certain circumstaces in table_flags() -*/ -#define HA_NDBCLUSTER_TABLE_FLAGS \ - HA_REC_NOT_IN_SEQ | \ - HA_NULL_IN_KEY | \ - HA_AUTO_PART_KEY | \ - HA_NO_PREFIX_CHAR_KEYS | \ - HA_NEED_READ_RANGE_BUFFER | \ - HA_CAN_GEOMETRY | \ - HA_CAN_BIT_FIELD | \ - HA_PRIMARY_KEY_REQUIRED_FOR_POSITION | \ - HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | \ - HA_PARTIAL_COLUMN_READ | \ - HA_HAS_OWN_BINLOGGING | \ - HA_BINLOG_ROW_CAPABLE | \ - HA_HAS_RECORDS - -ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg): - handler(hton, table_arg), - m_active_trans(NULL), - m_active_cursor(NULL), - m_table(NULL), - m_table_info(NULL), - m_table_flags(HA_NDBCLUSTER_TABLE_FLAGS), - m_share(0), - m_part_info(NULL), - m_use_partition_function(FALSE), - m_sorted(FALSE), - m_use_write(FALSE), - m_ignore_dup_key(FALSE), - m_has_unique_index(FALSE), - m_primary_key_update(FALSE), - m_ignore_no_key(FALSE), - m_rows_to_insert((ha_rows) 1), - m_rows_inserted((ha_rows) 0), - m_bulk_insert_rows((ha_rows) 1024), - m_rows_changed((ha_rows) 0), - m_bulk_insert_not_flushed(FALSE), - m_delete_cannot_batch(FALSE), - m_update_cannot_batch(FALSE), - m_ops_pending(0), - m_skip_auto_increment(TRUE), - m_blobs_pending(0), - m_blobs_offset(0), - m_blobs_buffer(0), - m_blobs_buffer_size(0), - m_dupkey((uint) -1), - m_ha_not_exact_count(FALSE), - m_force_send(TRUE), - m_autoincrement_prefetch(DEFAULT_AUTO_PREFETCH), - m_transaction_on(TRUE), - m_cond(NULL), - m_multi_cursor(NULL) -{ - int i; - - DBUG_ENTER("ha_ndbcluster"); - - m_tabname[0]= '\0'; - m_dbname[0]= '\0'; - - stats.records= ~(ha_rows)0; // uninitialized - stats.block_size= 1024; - - for (i= 0; i < MAX_KEY; i++) - ndb_init_index(m_index[i]); - - DBUG_VOID_RETURN; -} - - -int ha_ndbcluster::ha_initialise() -{ - DBUG_ENTER("ha_ndbcluster::ha_initialise"); - if (check_ndb_in_thd(current_thd)) - { - DBUG_RETURN(FALSE); - } - DBUG_RETURN(TRUE); -} - -/** - Destructor for NDB Cluster table handler. -*/ - -ha_ndbcluster::~ha_ndbcluster() -{ - THD *thd= current_thd; - Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb; - DBUG_ENTER("~ha_ndbcluster"); - - if (m_share) - { - /* ndb_share reference handler free */ - DBUG_PRINT("NDB_SHARE", ("%s handler free use_count: %u", - m_share->key, m_share->use_count)); - free_share(&m_share); - } - release_metadata(thd, ndb); - my_free(m_blobs_buffer); - m_blobs_buffer= 0; - - // Check for open cursor/transaction - if (m_active_cursor) { - } - DBUG_ASSERT(m_active_cursor == NULL); - if (m_active_trans) { - } - DBUG_ASSERT(m_active_trans == NULL); - - // Discard any generated condition - DBUG_PRINT("info", ("Deleting generated condition")); - if (m_cond) - { - delete m_cond; - m_cond= NULL; - } - - DBUG_VOID_RETURN; -} - - - -/** - Open a table for further use. - - - fetch metadata for this table from NDB - - check that table exists - - @retval - 0 ok - @retval - < 0 Table has changed -*/ - -int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) -{ - int res; - KEY *key; - DBUG_ENTER("ha_ndbcluster::open"); - DBUG_PRINT("enter", ("name: %s mode: %d test_if_locked: %d", - name, mode, test_if_locked)); - - /* - Setup ref_length to make room for the whole - primary key to be written in the ref variable - */ - - if (table_share->primary_key != MAX_KEY) - { - key= table->key_info+table_share->primary_key; - ref_length= key->key_length; - } - else // (table_share->primary_key == MAX_KEY) - { - if (m_use_partition_function) - { - ref_length+= sizeof(m_part_id); - } - } - - DBUG_PRINT("info", ("ref_length: %d", ref_length)); - - // Init table lock structure - /* ndb_share reference handler */ - if (!(m_share=get_share(name, table))) - DBUG_RETURN(1); - DBUG_PRINT("NDB_SHARE", ("%s handler use_count: %u", - m_share->key, m_share->use_count)); - thr_lock_data_init(&m_share->lock,&m_lock,(void*) 0); - - set_dbname(name); - set_tabname(name); - - if ((res= check_ndb_connection()) || - (res= get_metadata(name))) - { - /* ndb_share reference handler free */ - DBUG_PRINT("NDB_SHARE", ("%s handler free use_count: %u", - m_share->key, m_share->use_count)); - free_share(&m_share); - m_share= 0; - DBUG_RETURN(res); - } - while (1) - { - Ndb *ndb= get_ndb(); - if (ndb->setDatabaseName(m_dbname)) - { - set_ndb_err(current_thd, ndb->getNdbError()); - res= ndb_to_mysql_error(&ndb->getNdbError()); - break; - } - struct Ndb_statistics stat; - res= ndb_get_table_statistics(NULL, FALSE, ndb, m_table, &stat); - stats.mean_rec_length= stat.row_size; - stats.data_file_length= stat.fragment_memory; - stats.records= stat.row_count; - if(!res) - res= info(HA_STATUS_CONST); - break; - } - if (res) - { - free_share(&m_share); - m_share= 0; - release_metadata(current_thd, get_ndb()); - DBUG_RETURN(res); - } -#ifdef HAVE_NDB_BINLOG - if (!ndb_binlog_tables_inited) - { - table->db_stat|= HA_READ_ONLY; - sql_print_information("table '%s' opened read only", name); - } -#endif - DBUG_RETURN(0); -} - -/* - Set partition info - - SYNOPSIS - set_part_info() - part_info - - RETURN VALUE - NONE - - DESCRIPTION - Set up partition info when handler object created -*/ - -void ha_ndbcluster::set_part_info(partition_info *part_info) -{ - m_part_info= part_info; - if (!(m_part_info->part_type == HASH_PARTITION && - m_part_info->list_of_part_fields && - !m_part_info->is_sub_partitioned())) - m_use_partition_function= TRUE; -} - -/** - Close the table; release resources setup by open(). -*/ - -int ha_ndbcluster::close(void) -{ - DBUG_ENTER("close"); - THD *thd= table->in_use; - Ndb *ndb= thd ? check_ndb_in_thd(thd) : g_ndb; - /* ndb_share reference handler free */ - DBUG_PRINT("NDB_SHARE", ("%s handler free use_count: %u", - m_share->key, m_share->use_count)); - free_share(&m_share); - m_share= 0; - release_metadata(thd, ndb); - DBUG_RETURN(0); -} - - -/** - @todo - - Alt.1 If init fails because to many allocated Ndb - wait on condition for a Ndb object to be released. - - Alt.2 Seize/release from pool, wait until next release -*/ -Thd_ndb* ha_ndbcluster::seize_thd_ndb() -{ - Thd_ndb *thd_ndb; - DBUG_ENTER("seize_thd_ndb"); - - thd_ndb= new Thd_ndb(); - if (thd_ndb == NULL) - { - my_errno= HA_ERR_OUT_OF_MEM; - return NULL; - } - if (thd_ndb->ndb->init(max_transactions) != 0) - { - ERR_PRINT(thd_ndb->ndb->getNdbError()); - /* - TODO - Alt.1 If init fails because to many allocated Ndb - wait on condition for a Ndb object to be released. - Alt.2 Seize/release from pool, wait until next release - */ - delete thd_ndb; - thd_ndb= NULL; - } - DBUG_RETURN(thd_ndb); -} - - -void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) -{ - DBUG_ENTER("release_thd_ndb"); - delete thd_ndb; - DBUG_VOID_RETURN; -} - - -/** - If this thread already has a Thd_ndb object allocated - in current THD, reuse it. Otherwise - seize a Thd_ndb object, assign it to current THD and use it. - -*/ - -Ndb* check_ndb_in_thd(THD* thd) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (!thd_ndb) - { - if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - return NULL; - set_thd_ndb(thd, thd_ndb); - } - return thd_ndb->ndb; -} - - - -int ha_ndbcluster::check_ndb_connection(THD* thd) -{ - Ndb *ndb; - DBUG_ENTER("check_ndb_connection"); - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - if (ndb->setDatabaseName(m_dbname)) - { - ERR_RETURN(ndb->getNdbError()); - } - DBUG_RETURN(0); -} - - -static int ndbcluster_close_connection(handlerton *hton, THD *thd) -{ - Thd_ndb *thd_ndb= get_thd_ndb(thd); - DBUG_ENTER("ndbcluster_close_connection"); - if (thd_ndb) - { - ha_ndbcluster::release_thd_ndb(thd_ndb); - set_thd_ndb(thd, NULL); // not strictly required but does not hurt either - } - DBUG_RETURN(0); -} - - -/** - Try to discover one table from NDB. -*/ - -int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, - const char *name, - uchar **frmblob, - size_t *frmlen) -{ - int error= 0; - NdbError ndb_error; - size_t len; - uchar* data= NULL; - Ndb* ndb; - char key[FN_REFLEN + 1]; - DBUG_ENTER("ndbcluster_discover"); - DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - if (ndb->setDatabaseName(db)) - { - ERR_RETURN(ndb->getNdbError()); - } - NDBDICT* dict= ndb->getDictionary(); - build_table_filename(key, sizeof(key) - 1, db, name, "", 0); - /* ndb_share reference temporary */ - NDB_SHARE *share= get_share(key, 0, FALSE); - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - } - if (share && get_ndb_share_state(share) == NSS_ALTERED) - { - // Frm has been altered on disk, but not yet written to ndb - if (readfrm(key, &data, &len)) - { - DBUG_PRINT("error", ("Could not read frm")); - error= 1; - goto err; - } - } - else - { - Ndb_table_guard ndbtab_g(dict, name); - const NDBTAB *tab= ndbtab_g.get_table(); - if (!tab) - { - const NdbError err= dict->getNdbError(); - if (err.code == 709 || err.code == 723) - { - error= -1; - DBUG_PRINT("info", ("ndb_error.code: %u", ndb_error.code)); - } - else - { - error= -1; - ndb_error= err; - DBUG_PRINT("info", ("ndb_error.code: %u", ndb_error.code)); - } - goto err; - } - DBUG_PRINT("info", ("Found table %s", tab->getName())); - - len= tab->getFrmLength(); - if (len == 0 || tab->getFrmData() == NULL) - { - DBUG_PRINT("error", ("No frm data found.")); - error= 1; - goto err; - } - - if (unpackfrm(&data, &len, (uchar*) tab->getFrmData())) - { - DBUG_PRINT("error", ("Could not unpack table")); - error= 1; - goto err; - } - } - - *frmlen= len; - *frmblob= data; - - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } - - DBUG_RETURN(0); -err: - my_free(data); - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } - if (ndb_error.code) - { - ERR_RETURN(ndb_error); - } - DBUG_RETURN(error); -} - -/** - Check if a table exists in NDB. -*/ - -int ndbcluster_table_exists_in_engine(handlerton *hton, THD* thd, - const char *db, - const char *name) -{ - Ndb* ndb; - DBUG_ENTER("ndbcluster_table_exists_in_engine"); - DBUG_PRINT("enter", ("db: %s name: %s", db, name)); - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - NDBDICT* dict= ndb->getDictionary(); - NdbDictionary::Dictionary::List list; - if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - ERR_RETURN(dict->getNdbError()); - for (uint i= 0 ; i < list.count ; i++) - { - NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; - if (my_strcasecmp(system_charset_info, elmt.database, db)) - continue; - if (my_strcasecmp(system_charset_info, elmt.name, name)) - continue; - DBUG_PRINT("info", ("Found table")); - DBUG_RETURN(HA_ERR_TABLE_EXIST); - } - DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); -} - - - -extern "C" uchar* tables_get_key(const char *entry, size_t *length, - my_bool not_used __attribute__((unused))) -{ - *length= strlen(entry); - return (uchar*) entry; -} - - -/** - Drop a database in NDB Cluster - - @note - add a dummy void function, since stupid handlerton is returning void instead of int... -*/ -int ndbcluster_drop_database_impl(const char *path) -{ - DBUG_ENTER("ndbcluster_drop_database"); - THD *thd= current_thd; - char dbname[FN_HEADLEN]; - Ndb* ndb; - NdbDictionary::Dictionary::List list; - uint i; - char *tabname; - List drop_list; - int ret= 0; - ha_ndbcluster::set_dbname(path, (char *)&dbname); - DBUG_PRINT("enter", ("db: %s", dbname)); - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(-1); - - // List tables in NDB - NDBDICT *dict= ndb->getDictionary(); - if (dict->listObjects(list, - NdbDictionary::Object::UserTable) != 0) - DBUG_RETURN(-1); - for (i= 0 ; i < list.count ; i++) - { - NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; - DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name)); - - // Add only tables that belongs to db - if (my_strcasecmp(system_charset_info, elmt.database, dbname)) - continue; - DBUG_PRINT("info", ("%s must be dropped", elmt.name)); - drop_list.push_back(thd->strdup(elmt.name)); - } - // Drop any tables belonging to database - char full_path[FN_REFLEN + 1]; - char *tmp= full_path + - build_table_filename(full_path, sizeof(full_path) - 1, dbname, "", "", 0); - if (ndb->setDatabaseName(dbname)) - { - ERR_RETURN(ndb->getNdbError()); - } - List_iterator_fast it(drop_list); - while ((tabname=it++)) - { - tablename_to_filename(tabname, tmp, FN_REFLEN - (tmp - full_path)-1); - if (ha_ndbcluster::delete_table(0, ndb, full_path, dbname, tabname)) - { - const NdbError err= dict->getNdbError(); - if (err.code != 709 && err.code != 723) - { - set_ndb_err(thd, err); - ret= ndb_to_mysql_error(&err); - } - } - } - DBUG_RETURN(ret); -} - -static void ndbcluster_drop_database(handlerton *hton, char *path) -{ - DBUG_ENTER("ndbcluster_drop_database"); -#ifdef HAVE_NDB_BINLOG - /* - Don't allow drop database unless - schema distribution table is setup - */ - if (!ndb_schema_share) - { - DBUG_PRINT("info", ("Schema distribution table not setup")); - DBUG_ASSERT(ndb_schema_share); - DBUG_VOID_RETURN; - } -#endif - ndbcluster_drop_database_impl(path); -#ifdef HAVE_NDB_BINLOG - char db[FN_REFLEN]; - THD *thd= current_thd; - ha_ndbcluster::set_dbname(path, db); - ndbcluster_log_schema_op(thd, 0, - thd->query(), thd->query_length(), - db, "", 0, 0, SOT_DROP_DB, 0, 0); -#endif - DBUG_VOID_RETURN; -} - -int ndb_create_table_from_engine(THD *thd, const char *db, - const char *table_name) -{ - LEX *old_lex= thd->lex, newlex; - thd->lex= &newlex; - newlex.current_select= NULL; - int res= ha_create_table_from_engine(thd, db, table_name); - thd->lex= old_lex; - return res; -} - -/* - find all tables in ndb and discover those needed -*/ -int ndbcluster_find_all_files(THD *thd) -{ - Ndb* ndb; - char key[FN_REFLEN + 1]; - NDBDICT *dict; - int unhandled, retries= 5, skipped; - DBUG_ENTER("ndbcluster_find_all_files"); - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - - dict= ndb->getDictionary(); - - LINT_INIT(unhandled); - LINT_INIT(skipped); - do - { - NdbDictionary::Dictionary::List list; - if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - ERR_RETURN(dict->getNdbError()); - unhandled= 0; - skipped= 0; - retries--; - for (uint i= 0 ; i < list.count ; i++) - { - NDBDICT::List::Element& elmt= list.elements[i]; - if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name)) - { - DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name)); - continue; - } - DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name)); - if (elmt.state != NDBOBJ::StateOnline && - elmt.state != NDBOBJ::StateBackup && - elmt.state != NDBOBJ::StateBuilding) - { - sql_print_information("NDB: skipping setup table %s.%s, in state %d", - elmt.database, elmt.name, elmt.state); - skipped++; - continue; - } - - ndb->setDatabaseName(elmt.database); - Ndb_table_guard ndbtab_g(dict, elmt.name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - if (!ndbtab) - { - if (retries == 0) - sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s", - elmt.database, elmt.name, - dict->getNdbError().code, - dict->getNdbError().message); - unhandled++; - continue; - } - - if (ndbtab->getFrmLength() == 0) - continue; - - /* check if database exists */ - char *end= key + - build_table_filename(key, sizeof(key) - 1, elmt.database, "", "", 0); - if (my_access(key, F_OK)) - { - /* no such database defined, skip table */ - continue; - } - /* finalize construction of path */ - end+= tablename_to_filename(elmt.name, end, - sizeof(key)-(end-key)); - uchar *data= 0, *pack_data= 0; - size_t length, pack_length; - int discover= 0; - if (readfrm(key, &data, &length) || - packfrm(data, length, &pack_data, &pack_length)) - { - discover= 1; - sql_print_information("NDB: missing frm for %s.%s, discovering...", - elmt.database, elmt.name); - } - else if (cmp_frm(ndbtab, pack_data, pack_length)) - { - /* ndb_share reference temporary */ - NDB_SHARE *share= get_share(key, 0, FALSE); - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - } - if (!share || get_ndb_share_state(share) != NSS_ALTERED) - { - discover= 1; - sql_print_information("NDB: mismatch in frm for %s.%s, discovering...", - elmt.database, elmt.name); - } - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } - } - my_free(data); - my_free(pack_data); - - if (discover) - { - /* ToDo 4.1 database needs to be created if missing */ - if (ndb_create_table_from_engine(thd, elmt.database, elmt.name)) - { - /* ToDo 4.1 handle error */ - } - } -#ifdef HAVE_NDB_BINLOG - else - { - /* set up replication for this table */ - ndbcluster_create_binlog_setup(ndb, key, end-key, - elmt.database, elmt.name, - TRUE); - } -#endif - } - } - while (unhandled && retries); - - DBUG_RETURN(-(skipped + unhandled)); -} - -int ndbcluster_find_files(handlerton *hton, THD *thd, - const char *db, - const char *path, - const char *wild, bool dir, List *files) -{ - DBUG_ENTER("ndbcluster_find_files"); - DBUG_PRINT("enter", ("db: %s", db)); - { // extra bracket to avoid gcc 2.95.3 warning - uint i; - Ndb* ndb; - char name[FN_REFLEN + 1]; - HASH ndb_tables, ok_tables; - NDBDICT::List list; - - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); - - if (dir) - DBUG_RETURN(0); // Discover of databases not yet supported - - // List tables in NDB - NDBDICT *dict= ndb->getDictionary(); - if (dict->listObjects(list, - NdbDictionary::Object::UserTable) != 0) - ERR_RETURN(dict->getNdbError()); - - if (my_hash_init(&ndb_tables, system_charset_info,list.count,0,0, - (my_hash_get_key)tables_get_key,0,0)) - { - DBUG_PRINT("error", ("Failed to init HASH ndb_tables")); - DBUG_RETURN(-1); - } - - if (my_hash_init(&ok_tables, system_charset_info,32,0,0, - (my_hash_get_key)tables_get_key,0,0)) - { - DBUG_PRINT("error", ("Failed to init HASH ok_tables")); - my_hash_free(&ndb_tables); - DBUG_RETURN(-1); - } - - for (i= 0 ; i < list.count ; i++) - { - NDBDICT::List::Element& elmt= list.elements[i]; - if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name)) - { - DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name)); - continue; - } - DBUG_PRINT("info", ("Found %s/%s in NDB", elmt.database, elmt.name)); - - // Add only tables that belongs to db - if (my_strcasecmp(system_charset_info, elmt.database, db)) - continue; - - // Apply wildcard to list of tables in NDB - if (wild) - { - if (lower_case_table_names) - { - if (wild_case_compare(files_charset_info, elmt.name, wild)) - continue; - } - else if (wild_compare(elmt.name,wild,0)) - continue; - } - DBUG_PRINT("info", ("Inserting %s into ndb_tables hash", elmt.name)); - my_hash_insert(&ndb_tables, (uchar*)thd->strdup(elmt.name)); - } - - LEX_STRING *file_name; - List_iterator it(*files); - List delete_list; - char *file_name_str; - while ((file_name=it++)) - { - bool file_on_disk= FALSE; - DBUG_PRINT("info", ("%s", file_name->str)); - if (my_hash_search(&ndb_tables, (uchar*) file_name->str, - file_name->length)) - { - build_table_filename(name, sizeof(name) - 1, db, - file_name->str, reg_ext, 0); - if (my_access(name, F_OK)) - { - DBUG_PRINT("info", ("Table %s listed and need discovery", - file_name->str)); - if (ndb_create_table_from_engine(thd, db, file_name->str)) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_TABLE_EXISTS_ERROR, - "Discover of table %s.%s failed", - db, file_name->str); - continue; - } - } - DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name->str)); - file_on_disk= TRUE; - } - - // Check for .ndb file with this name - build_table_filename(name, sizeof(name) - 1, db, - file_name->str, ha_ndb_ext, 0); - DBUG_PRINT("info", ("Check access for %s", name)); - if (my_access(name, F_OK)) - { - DBUG_PRINT("info", ("%s did not exist on disk", name)); - // .ndb file did not exist on disk, another table type - if (file_on_disk) - { - // Ignore this ndb table - uchar *record= my_hash_search(&ndb_tables, (uchar*) file_name->str, - file_name->length); - DBUG_ASSERT(record); - my_hash_delete(&ndb_tables, record); - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_TABLE_EXISTS_ERROR, - "Local table %s.%s shadows ndb table", - db, file_name->str); - } - continue; - } - if (file_on_disk) - { - // File existed in NDB and as frm file, put in ok_tables list - my_hash_insert(&ok_tables, (uchar*) file_name->str); - continue; - } - DBUG_PRINT("info", ("%s existed on disk", name)); - // The .ndb file exists on disk, but it's not in list of tables in ndb - // Verify that handler agrees table is gone. - if (ndbcluster_table_exists_in_engine(hton, thd, db, file_name->str) == - HA_ERR_NO_SUCH_TABLE) - { - DBUG_PRINT("info", ("NDB says %s does not exists", file_name->str)); - it.remove(); - // Put in list of tables to remove from disk - delete_list.push_back(thd->strdup(file_name->str)); - } - } - -#ifdef HAVE_NDB_BINLOG - /* setup logging to binlog for all discovered tables */ - { - char *end, *end1= name + - build_table_filename(name, sizeof(name) - 1, db, "", "", 0); - for (i= 0; i < ok_tables.records; i++) - { - file_name_str= (char*)my_hash_element(&ok_tables, i); - end= end1 + - tablename_to_filename(file_name_str, end1, sizeof(name) - (end1 - name)); - ndbcluster_create_binlog_setup(ndb, name, end-name, - db, file_name_str, TRUE); - } - } -#endif - - // Check for new files to discover - DBUG_PRINT("info", ("Checking for new files to discover")); - List create_list; - for (i= 0 ; i < ndb_tables.records ; i++) - { - file_name_str= (char*) my_hash_element(&ndb_tables, i); - if (!my_hash_search(&ok_tables, (uchar*) file_name_str, - strlen(file_name_str))) - { - build_table_filename(name, sizeof(name) - 1, - db, file_name_str, reg_ext, 0); - if (my_access(name, F_OK)) - { - DBUG_PRINT("info", ("%s must be discovered", file_name_str)); - // File is in list of ndb tables and not in ok_tables - // This table need to be created - create_list.push_back(thd->strdup(file_name_str)); - } - } - } - - /* - Delete old files. - - ndbcluster_find_files() may be called from I_S code and ndbcluster_binlog - thread in situations when some tables are already open. This means that - code below will try to obtain exclusive metadata lock on some table - while holding shared meta-data lock on other tables. This might lead to a - deadlock but such a deadlock should be detected by MDL deadlock detector. - - XXX: the scenario described above is not covered with any test. - */ - List_iterator_fast it3(delete_list); - while ((file_name_str= it3++)) - { - DBUG_PRINT("info", ("Remove table %s/%s", db, file_name_str)); - /* Delete the table and all related files. */ - TABLE_LIST table_list; - table_list.init_one_table(db, strlen(db), file_name_str, - strlen(file_name_str), file_name_str, - TL_WRITE); - table_list.mdl_request.set_type(MDL_EXCLUSIVE); - (void)mysql_rm_table_part2(thd, &table_list, - FALSE, /* if_exists */ - FALSE, /* drop_temporary */ - FALSE, /* drop_view */ - TRUE /* dont_log_query*/); - trans_commit_implicit(thd); /* Safety, should be unnecessary. */ - thd->mdl_context.release_transactional_locks(); - /* Clear error message that is returned when table is deleted */ - thd->clear_error(); - } - - /* Lock mutex before creating .FRM files. */ - /* Create new files. */ - List_iterator_fast it2(create_list); - while ((file_name_str=it2++)) - { - DBUG_PRINT("info", ("Table %s need discovery", file_name_str)); - if (ndb_create_table_from_engine(thd, db, file_name_str) == 0) - { - LEX_STRING *tmp_file_name= 0; - tmp_file_name= thd->make_lex_string(tmp_file_name, file_name_str, - strlen(file_name_str), TRUE); - files->push_back(tmp_file_name); - } - } - - my_hash_free(&ok_tables); - my_hash_free(&ndb_tables); - - // Delete schema file from files - if (!strcmp(db, NDB_REP_DB)) - { - uint count = 0; - while (count++ < files->elements) - { - file_name = (LEX_STRING *)files->pop(); - if (!strcmp(file_name->str, NDB_SCHEMA_TABLE)) - { - DBUG_PRINT("info", ("skip %s.%s table, it should be hidden to user", - NDB_REP_DB, NDB_SCHEMA_TABLE)); - continue; - } - files->push_back(file_name); - } - } - } // extra bracket to avoid gcc 2.95.3 warning - DBUG_RETURN(0); -} - - -/* - Initialise all gloal variables before creating - a NDB Cluster table handler - */ - -/* Call back after cluster connect */ -static int connect_callback() -{ - mysql_mutex_lock(&LOCK_ndb_util_thread); - update_status_variables(g_ndb_cluster_connection); - - uint node_id, i= 0; - Ndb_cluster_connection_node_iter node_iter; - memset((void *)g_node_id_map, 0xFFFF, sizeof(g_node_id_map)); - while ((node_id= g_ndb_cluster_connection->get_next_node(node_iter))) - g_node_id_map[node_id]= i++; - - mysql_cond_signal(&COND_ndb_util_thread); - mysql_mutex_unlock(&LOCK_ndb_util_thread); - return 0; -} - -extern int ndb_dictionary_is_mysqld; - -#ifdef HAVE_PSI_INTERFACE - -#ifdef HAVE_NDB_BINLOG -PSI_mutex_key key_injector_mutex, key_ndb_schema_share_mutex, - key_ndb_schema_object_mutex; -#endif /* HAVE_NDB_BINLOG */ - -PSI_mutex_key key_NDB_SHARE_mutex, key_ndbcluster_mutex, - key_LOCK_ndb_util_thread; - -static PSI_mutex_info all_ndbcluster_mutexes[]= -{ -#ifdef HAVE_NDB_BINLOG - {& key_injector_mutex, "injector_mutex", PSI_FLAG_GLOBAL}, - {& key_ndb_schema_share_mutex, "ndb_schema_share_mutex", PSI_FLAG_GLOBAL}, - {& key_ndb_schema_object_mutex, "ndb_schema_object_mutex", PSI_FLAG_GLOBAL}, -#endif /* HAVE_NDB_BINLOG */ - {& key_NDB_SHARE_mutex, "NDB_SHARE::mutex", PSI_FLAG_GLOBAL}, - {& key_ndbcluster_mutex, "ndbcluster_mutex", PSI_FLAG_GLOBAL}, - {& key_LOCK_ndb_util_thread, "LOCK_ndb_util_thread", PSI_FLAG_GLOBAL} -}; - -#ifdef HAVE_NDB_BINLOG -PSI_cond_key key_injector_cond; -#endif /* HAVE_NDB_BINLOG */ - -PSI_cond_key key_COND_ndb_util_thread, key_COND_ndb_util_ready; - -static PSI_cond_info all_ndbcluster_conds[]= -{ -#ifdef HAVE_NDB_BINLOG - {& key_injector_cond, "injector_cond", PSI_FLAG_GLOBAL}, -#endif /* HAVE_NDB_BINLOG */ - {& key_COND_ndb_util_thread, "COND_ndb_util_thread", PSI_FLAG_GLOBAL}, - {& key_COND_ndb_util_ready, "COND_ndb_util_ready", PSI_FLAG_GLOBAL} -}; - -#ifdef HAVE_NDB_BINLOG -PSI_thread_key key_thread_ndb_binlog; -#endif /* HAVE_NDB_BINLOG */ -PSI_thread_key key_thread_ndb_util; - -static PSI_thread_info all_ndbcluster_threads[]= -{ -#ifdef HAVE_NDB_BINLOG - { &key_thread_ndb_binlog, "ndb_binlog", PSI_FLAG_GLOBAL}, -#endif /* HAVE_NDB_BINLOG */ - { &key_thread_ndb_util, "ndb_util", PSI_FLAG_GLOBAL} -}; - -PSI_file_key key_file_ndb; - -static PSI_file_info all_ndbcluster_files[]= -{ - { &key_file_ndb, "ndb", 0} -}; - -void init_ndbcluster_psi_keys() -{ - const char* category= "ndbcluster"; - int count; - - if (PSI_server == NULL) - return; - - count= array_elements(all_ndbcluster_mutexes); - PSI_server->register_mutex(category, all_ndbcluster_mutexes, count); - - count= array_elements(all_ndbcluster_conds); - PSI_server->register_cond(category, all_ndbcluster_conds, count); - - count= array_elements(all_ndbcluster_threads); - PSI_server->register_thread(category, all_ndbcluster_threads, count); - - count= array_elements(all_ndbcluster_files); - PSI_server->register_file(category, all_ndbcluster_files, count); -} -#endif /* HAVE_PSI_INTERFACE */ - -static int ndbcluster_init(void *p) -{ - int res; - DBUG_ENTER("ndbcluster_init"); - - if (ndbcluster_inited) - DBUG_RETURN(FALSE); - -#ifdef HAVE_PSI_INTERFACE - init_ndbcluster_psi_keys(); -#endif - - mysql_mutex_init(key_ndbcluster_mutex, - &ndbcluster_mutex, MY_MUTEX_INIT_FAST); - mysql_mutex_init(key_LOCK_ndb_util_thread, - &LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST); - mysql_cond_init(key_COND_ndb_util_thread, &COND_ndb_util_thread, NULL); - mysql_cond_init(key_COND_ndb_util_ready, &COND_ndb_util_ready, NULL); - ndb_util_thread_running= -1; - ndbcluster_terminating= 0; - ndb_dictionary_is_mysqld= 1; - ndbcluster_hton= (handlerton *)p; - - { - handlerton *h= ndbcluster_hton; - h->state= SHOW_OPTION_YES; - h->db_type= DB_TYPE_NDBCLUSTER; - h->close_connection= ndbcluster_close_connection; - h->commit= ndbcluster_commit; - h->rollback= ndbcluster_rollback; - h->create= ndbcluster_create_handler; /* Create a new handler */ - h->drop_database= ndbcluster_drop_database; /* Drop a database */ - h->panic= ndbcluster_end; /* Panic call */ - h->show_status= ndbcluster_show_status; /* Show status */ - h->alter_tablespace= ndbcluster_alter_tablespace; /* Show status */ - h->partition_flags= ndbcluster_partition_flags; /* Partition flags */ - h->alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */ - h->fill_is_table= ndbcluster_fill_is_table; -#ifdef HAVE_NDB_BINLOG - ndbcluster_binlog_init_handlerton(); -#endif - h->flags= HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED; - h->discover= ndbcluster_discover; - h->find_files= ndbcluster_find_files; - h->table_exists_in_engine= ndbcluster_table_exists_in_engine; - } - - // Format the connect string to be used for connecting to the cluster - int pos= 0; - char connectstring_buf[1024] = {0}; - if (opt_ndb_nodeid != 0) - pos+= my_snprintf(connectstring_buf, sizeof(connectstring_buf), - "nodeid=%u", opt_ndb_nodeid); - if (opt_ndb_mgmd_host) - pos+= my_snprintf(connectstring_buf+pos, sizeof(connectstring_buf)-pos, - "%s%s", pos ? "," : "", opt_ndb_mgmd_host); - if (opt_ndb_connectstring) - pos+= my_snprintf(connectstring_buf+pos, sizeof(connectstring_buf)-pos, - "%s%s", pos ? "," : "", opt_ndb_connectstring); - - - // Initialize ndb interface - ndb_init_internal(); - - // Set connectstring if specified - if (opt_ndb_connectstring != 0) - DBUG_PRINT("connectstring", ("%s", opt_ndb_connectstring)); - if ((g_ndb_cluster_connection= - new Ndb_cluster_connection(opt_ndb_connectstring)) == 0) - { - DBUG_PRINT("error",("Ndb_cluster_connection(%s)", - opt_ndb_connectstring)); - my_errno= HA_ERR_OUT_OF_MEM; - goto ndbcluster_init_error; - } - { - char buf[128]; - my_snprintf(buf, sizeof(buf), "mysqld --server-id=%lu", server_id); - g_ndb_cluster_connection->set_name(buf); - } - g_ndb_cluster_connection->set_optimized_node_selection - (THDVAR(0, optimized_node_selection)); - - // Create a Ndb object to open the connection to NDB - if ( (g_ndb= new Ndb(g_ndb_cluster_connection, "sys")) == 0 ) - { - DBUG_PRINT("error", ("failed to create global ndb object")); - my_errno= HA_ERR_OUT_OF_MEM; - goto ndbcluster_init_error; - } - if (g_ndb->init() != 0) - { - ERR_PRINT (g_ndb->getNdbError()); - goto ndbcluster_init_error; - } - - if ((res= g_ndb_cluster_connection->connect(0,0,0)) == 0) - { - connect_callback(); - DBUG_PRINT("info",("NDBCLUSTER storage engine at %s on port %d", - g_ndb_cluster_connection->get_connected_host(), - g_ndb_cluster_connection->get_connected_port())); - g_ndb_cluster_connection->wait_until_ready(10,3); - } - else if (res == 1) - { - if (g_ndb_cluster_connection->start_connect_thread(connect_callback)) - { - DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()")); - goto ndbcluster_init_error; - } -#ifndef DBUG_OFF - { - char buf[1024]; - DBUG_PRINT("info", - ("NDBCLUSTER storage engine not started, " - "will connect using %s", - g_ndb_cluster_connection-> - get_connectstring(buf,sizeof(buf)))); - } -#endif - } - else - { - DBUG_ASSERT(res == -1); - DBUG_PRINT("error", ("permanent error")); - goto ndbcluster_init_error; - } - - (void) my_hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, - (my_hash_get_key) ndbcluster_get_key,0,0); -#ifdef HAVE_NDB_BINLOG - /* start the ndb injector thread */ - if (ndbcluster_binlog_start()) - goto ndbcluster_init_error; -#endif /* HAVE_NDB_BINLOG */ - - // Create utility thread - pthread_t tmp; - if (mysql_thread_create(key_thread_ndb_util, - &tmp, &connection_attrib, ndb_util_thread_func, 0)) - { - DBUG_PRINT("error", ("Could not create ndb utility thread")); - my_hash_free(&ndbcluster_open_tables); - mysql_mutex_destroy(&ndbcluster_mutex); - mysql_mutex_destroy(&LOCK_ndb_util_thread); - mysql_cond_destroy(&COND_ndb_util_thread); - mysql_cond_destroy(&COND_ndb_util_ready); - goto ndbcluster_init_error; - } - - /* Wait for the util thread to start */ - mysql_mutex_lock(&LOCK_ndb_util_thread); - while (ndb_util_thread_running < 0) - mysql_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread); - mysql_mutex_unlock(&LOCK_ndb_util_thread); - - if (!ndb_util_thread_running) - { - DBUG_PRINT("error", ("ndb utility thread exited prematurely")); - my_hash_free(&ndbcluster_open_tables); - mysql_mutex_destroy(&ndbcluster_mutex); - mysql_mutex_destroy(&LOCK_ndb_util_thread); - mysql_cond_destroy(&COND_ndb_util_thread); - mysql_cond_destroy(&COND_ndb_util_ready); - goto ndbcluster_init_error; - } - - ndbcluster_inited= 1; - DBUG_RETURN(FALSE); - -ndbcluster_init_error: - if (g_ndb) - delete g_ndb; - g_ndb= NULL; - if (g_ndb_cluster_connection) - delete g_ndb_cluster_connection; - g_ndb_cluster_connection= NULL; - ndbcluster_hton->state= SHOW_OPTION_DISABLED; // If we couldn't use handler - - DBUG_RETURN(TRUE); -} - -/** - Used to fill in INFORMATION_SCHEMA* tables. - - @param hton handle to the handlerton structure - @param thd the thread/connection descriptor - @param[in,out] tables the information schema table that is filled up - @param cond used for conditional pushdown to storage engine - @param schema_table_idx the table id that distinguishes the type of table - - @return Operation status - */ -static int ndbcluster_fill_is_table(handlerton *hton, - THD *thd, - TABLE_LIST *tables, - COND *cond, - enum enum_schema_tables schema_table_idx) -{ - int ret= 0; - - if (schema_table_idx == SCH_FILES) - { - ret= ndbcluster_fill_files_table(hton, thd, tables, cond); - } - - return ret; -} - - -static int ndbcluster_end(handlerton *hton, ha_panic_function type) -{ - DBUG_ENTER("ndbcluster_end"); - - if (!ndbcluster_inited) - DBUG_RETURN(0); - ndbcluster_inited= 0; - - /* wait for util thread to finish */ - sql_print_information("Stopping Cluster Utility thread"); - mysql_mutex_lock(&LOCK_ndb_util_thread); - ndbcluster_terminating= 1; - mysql_cond_signal(&COND_ndb_util_thread); - while (ndb_util_thread_running > 0) - mysql_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread); - mysql_mutex_unlock(&LOCK_ndb_util_thread); - - -#ifdef HAVE_NDB_BINLOG - { - mysql_mutex_lock(&ndbcluster_mutex); - while (ndbcluster_open_tables.records) - { - NDB_SHARE *share= - (NDB_SHARE*) my_hash_element(&ndbcluster_open_tables, 0); -#ifndef DBUG_OFF - fprintf(stderr, "NDB: table share %s with use_count %d not freed\n", - share->key, share->use_count); -#endif - ndbcluster_real_free_share(&share); - } - mysql_mutex_unlock(&ndbcluster_mutex); - } -#endif - my_hash_free(&ndbcluster_open_tables); - - if (g_ndb) - { -#ifndef DBUG_OFF - Ndb::Free_list_usage tmp; - tmp.m_name= 0; - while (g_ndb->get_free_list_usage(&tmp)) - { - uint leaked= (uint) tmp.m_created - tmp.m_free; - if (leaked) - fprintf(stderr, "NDB: Found %u %s%s that %s not been released\n", - leaked, tmp.m_name, - (leaked == 1)?"":"'s", - (leaked == 1)?"has":"have"); - } -#endif - delete g_ndb; - g_ndb= NULL; - } - delete g_ndb_cluster_connection; - g_ndb_cluster_connection= NULL; - - // cleanup ndb interface - ndb_end_internal(); - - mysql_mutex_destroy(&ndbcluster_mutex); - mysql_mutex_destroy(&LOCK_ndb_util_thread); - mysql_cond_destroy(&COND_ndb_util_thread); - mysql_cond_destroy(&COND_ndb_util_ready); - DBUG_RETURN(0); -} - -void ha_ndbcluster::print_error(int error, myf errflag) -{ - DBUG_ENTER("ha_ndbcluster::print_error"); - DBUG_PRINT("enter", ("error: %d", error)); - - if (error == HA_ERR_NO_PARTITION_FOUND) - m_part_info->print_no_partition_found(table); - else - handler::print_error(error, errflag); - DBUG_VOID_RETURN; -} - - -/** - Static error print function called from static handler method - ndbcluster_commit and ndbcluster_rollback. -*/ - -void ndbcluster_print_error(int error, const NdbOperation *error_op) -{ - DBUG_ENTER("ndbcluster_print_error"); - TABLE_SHARE share; - const char *tab_name= (error_op) ? error_op->getTableName() : ""; - share.db.str= (char*) ""; - share.db.length= 0; - share.table_name.str= (char *) tab_name; - share.table_name.length= strlen(tab_name); - ha_ndbcluster error_handler(ndbcluster_hton, &share); - error_handler.print_error(error, MYF(0)); - DBUG_VOID_RETURN; -} - -/** - Set a given location from full pathname to database name. -*/ - -void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) -{ - char *end, *ptr, *tmp_name; - char tmp_buff[FN_REFLEN + 1]; - - tmp_name= tmp_buff; - /* Scan name from the end */ - ptr= strend(path_name)-1; - while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { - ptr--; - } - ptr--; - end= ptr; - while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { - ptr--; - } - uint name_len= end - ptr; - memcpy(tmp_name, ptr + 1, name_len); - tmp_name[name_len]= '\0'; -#ifdef __WIN__ - /* Put to lower case */ - - ptr= tmp_name; - - while (*ptr != '\0') { - *ptr= tolower(*ptr); - ptr++; - } -#endif - filename_to_tablename(tmp_name, dbname, sizeof(tmp_buff) - 1); -} - -/** - Set m_dbname from full pathname to table file. -*/ - -void ha_ndbcluster::set_dbname(const char *path_name) -{ - set_dbname(path_name, m_dbname); -} - -/** - Set a given location from full pathname to table file. -*/ - -void -ha_ndbcluster::set_tabname(const char *path_name, char * tabname) -{ - char *end, *ptr, *tmp_name; - char tmp_buff[FN_REFLEN + 1]; - - tmp_name= tmp_buff; - /* Scan name from the end */ - end= strend(path_name)-1; - ptr= end; - while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { - ptr--; - } - uint name_len= end - ptr; - memcpy(tmp_name, ptr + 1, end - ptr); - tmp_name[name_len]= '\0'; -#ifdef __WIN__ - /* Put to lower case */ - ptr= tmp_name; - - while (*ptr != '\0') { - *ptr= tolower(*ptr); - ptr++; - } -#endif - filename_to_tablename(tmp_name, tabname, sizeof(tmp_buff) - 1); -} - -/** - Set m_tabname from full pathname to table file. -*/ - -void ha_ndbcluster::set_tabname(const char *path_name) -{ - set_tabname(path_name, m_tabname); -} - - -ha_rows -ha_ndbcluster::records_in_range(uint inx, key_range *min_key, - key_range *max_key) -{ - KEY *key_info= table->key_info + inx; - uint key_length= key_info->key_length; - NDB_INDEX_TYPE idx_type= get_index_type(inx); - - DBUG_ENTER("records_in_range"); - // Prevent partial read of hash indexes by returning HA_POS_ERROR - if ((idx_type == UNIQUE_INDEX || idx_type == PRIMARY_KEY_INDEX) && - ((min_key && min_key->length < key_length) || - (max_key && max_key->length < key_length))) - DBUG_RETURN(HA_POS_ERROR); - - // Read from hash index with full key - // This is a "const" table which returns only one record! - if ((idx_type != ORDERED_INDEX) && - ((min_key && min_key->length == key_length) || - (max_key && max_key->length == key_length))) - DBUG_RETURN(1); - - if ((idx_type == PRIMARY_KEY_ORDERED_INDEX || - idx_type == UNIQUE_ORDERED_INDEX || - idx_type == ORDERED_INDEX) && - m_index[inx].index_stat != NULL) - { - NDB_INDEX_DATA& d=m_index[inx]; - const NDBINDEX* index= d.index; - Ndb* ndb=get_ndb(); - NdbTransaction* trans=NULL; - NdbIndexScanOperation* op=NULL; - int res=0; - Uint64 rows; - - do - { - // We must provide approx table rows - Uint64 table_rows=0; - Ndb_local_table_statistics *ndb_info= m_table_info; - if (ndb_info->records != ~(ha_rows)0 && ndb_info->records != 0) - { - table_rows = ndb_info->records; - DBUG_PRINT("info", ("use info->records: %lu", (ulong) table_rows)); - } - else - { - Ndb_statistics stat; - if ((res=ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat))) - break; - table_rows=stat.row_count; - DBUG_PRINT("info", ("use db row_count: %lu", (ulong) table_rows)); - if (table_rows == 0) { - // Problem if autocommit=0 -#ifdef ndb_get_table_statistics_uses_active_trans - rows=0; - break; -#endif - } - } - - // Define scan op for the range - if ((trans=m_active_trans) == NULL || - trans->commitStatus() != NdbTransaction::Started) - { - DBUG_PRINT("info", ("no active trans")); - if (! (trans=ndb->startTransaction())) - ERR_BREAK(ndb->getNdbError(), res); - } - if (! (op=trans->getNdbIndexScanOperation(index, (NDBTAB*)m_table))) - ERR_BREAK(trans->getNdbError(), res); - if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1) - ERR_BREAK(op->getNdbError(), res); - const key_range *keys[2]={ min_key, max_key }; - if ((res=set_bounds(op, inx, TRUE, keys)) != 0) - break; - - // Decide if db should be contacted - int flags=0; - if (d.index_stat_query_count < d.index_stat_cache_entries || - (d.index_stat_update_freq != 0 && - d.index_stat_query_count % d.index_stat_update_freq == 0)) - { - DBUG_PRINT("info", ("force stat from db")); - flags|=NdbIndexStat::RR_UseDb; - } - if (d.index_stat->records_in_range(index, op, table_rows, &rows, flags) == -1) - ERR_BREAK(d.index_stat->getNdbError(), res); - d.index_stat_query_count++; - } while (0); - - if (trans != m_active_trans && rows == 0) - rows = 1; - if (trans != m_active_trans && trans != NULL) - ndb->closeTransaction(trans); - if (res != 0) - DBUG_RETURN(HA_POS_ERROR); - DBUG_RETURN(rows); - } - - DBUG_RETURN(10); /* Good guess when you don't know anything */ -} - -ulonglong ha_ndbcluster::table_flags(void) const -{ - THD *thd= current_thd; - ulonglong f= m_table_flags; - if (m_ha_not_exact_count) - f= f & ~HA_STATS_RECORDS_IS_EXACT; - /* - To allow for logging of ndb tables during stmt based logging; - flag cabablity, but also turn off flag for OWN_BINLOGGING - */ - if (thd->variables.binlog_format == BINLOG_FORMAT_STMT) - f= (f | HA_BINLOG_STMT_CAPABLE) & ~HA_HAS_OWN_BINLOGGING; - return f; -} -const char * ha_ndbcluster::table_type() const -{ - return("NDBCLUSTER"); -} -uint ha_ndbcluster::max_supported_record_length() const -{ - return NDB_MAX_TUPLE_SIZE; -} -uint ha_ndbcluster::max_supported_keys() const -{ - return MAX_KEY; -} -uint ha_ndbcluster::max_supported_key_parts() const -{ - return NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY; -} -uint ha_ndbcluster::max_supported_key_length() const -{ - return NDB_MAX_KEY_SIZE; -} -uint ha_ndbcluster::max_supported_key_part_length() const -{ - return NDB_MAX_KEY_SIZE; -} -bool ha_ndbcluster::low_byte_first() const -{ -#ifdef WORDS_BIGENDIAN - return FALSE; -#else - return TRUE; -#endif -} -const char* ha_ndbcluster::index_type(uint key_number) -{ - switch (get_index_type(key_number)) { - case ORDERED_INDEX: - case UNIQUE_ORDERED_INDEX: - case PRIMARY_KEY_ORDERED_INDEX: - return "BTREE"; - case UNIQUE_INDEX: - case PRIMARY_KEY_INDEX: - default: - return "HASH"; - } -} - -uint8 ha_ndbcluster::table_cache_type() -{ - DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); - DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); -} - - -/** - Retrieve the commit count for the table object. - - @param thd Thread context. - @param norm_name Normalized path to the table. - @param[out] commit_count Commit count for the table. - - @return 0 on success. - @return 1 if an error occured. -*/ - -uint ndb_get_commitcount(THD *thd, char *norm_name, - Uint64 *commit_count) -{ - char dbname[NAME_LEN + 1]; - NDB_SHARE *share; - DBUG_ENTER("ndb_get_commitcount"); - - DBUG_PRINT("enter", ("name: %s", norm_name)); - pthread_mutex_lock(&ndbcluster_mutex); - if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables, - (const uchar*) norm_name, - strlen(norm_name)))) - { - pthread_mutex_unlock(&ndbcluster_mutex); - DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", - norm_name)); - DBUG_RETURN(1); - } - /* ndb_share reference temporary, free below */ - share->use_count++; - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - mysql_mutex_unlock(&ndbcluster_mutex); - - mysql_mutex_lock(&share->mutex); - if (opt_ndb_cache_check_time > 0) - { - if (share->commit_count != 0) - { - *commit_count= share->commit_count; -#ifndef DBUG_OFF - char buff[22]; -#endif - DBUG_PRINT("info", ("Getting commit_count: %s from share", - llstr(share->commit_count, buff))); - mysql_mutex_unlock(&share->mutex); - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - DBUG_RETURN(0); - } - } - DBUG_PRINT("info", ("Get commit_count from NDB")); - Ndb *ndb; - if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(1); - - ha_ndbcluster::set_dbname(norm_name, dbname); - if (ndb->setDatabaseName(dbname)) - { - ERR_RETURN(ndb->getNdbError()); - } - uint lock= share->commit_count_lock; - mysql_mutex_unlock(&share->mutex); - - struct Ndb_statistics stat; - { - char tblname[NAME_LEN + 1]; - ha_ndbcluster::set_tabname(norm_name, tblname); - Ndb_table_guard ndbtab_g(ndb->getDictionary(), tblname); - if (ndbtab_g.get_table() == 0 - || ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat)) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - DBUG_RETURN(1); - } - } - - mysql_mutex_lock(&share->mutex); - if (share->commit_count_lock == lock) - { -#ifndef DBUG_OFF - char buff[22]; -#endif - DBUG_PRINT("info", ("Setting commit_count to %s", - llstr(stat.commit_count, buff))); - share->commit_count= stat.commit_count; - *commit_count= stat.commit_count; - } - else - { - DBUG_PRINT("info", ("Discarding commit_count, comit_count_lock changed")); - *commit_count= 0; - } - mysql_mutex_unlock(&share->mutex); - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - DBUG_RETURN(0); -} - - -/** - Check if a cached query can be used. - - This is done by comparing the supplied engine_data to commit_count of - the table. - - The commit_count is either retrieved from the share for the table, where - it has been cached by the util thread. If the util thread is not started, - NDB has to be contacetd to retrieve the commit_count, this will introduce - a small delay while waiting for NDB to answer. - - - @param thd thread handle - @param full_name normalized path to the table in the canonical - format. - @param full_name_len length of the normalized path to the table. - @param engine_data parameter retrieved when query was first inserted into - the cache. If the value of engine_data is changed, - all queries for this table should be invalidated. - - @retval - TRUE Yes, use the query from cache - @retval - FALSE No, don't use the cached query, and if engine_data - has changed, all queries for this table should be invalidated - -*/ - -static my_bool -ndbcluster_cache_retrieval_allowed(THD *thd, - char *full_name, uint full_name_len, - ulonglong *engine_data) -{ - Uint64 commit_count; - char dbname[NAME_LEN + 1]; - char tabname[NAME_LEN + 1]; -#ifndef DBUG_OFF - char buff[22], buff2[22]; -#endif - - ha_ndbcluster::set_dbname(full_name, dbname); - ha_ndbcluster::set_tabname(full_name, tabname); - - DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); - DBUG_PRINT("enter", ("dbname: %s, tabname: %s", dbname, tabname)); - - if (thd->in_multi_stmt_transaction_mode()) - { - DBUG_PRINT("exit", ("No, don't use cache in transaction")); - DBUG_RETURN(FALSE); - } - - if (ndb_get_commitcount(thd, full_name, &commit_count)) - { - *engine_data= 0; /* invalidate */ - DBUG_PRINT("exit", ("No, could not retrieve commit_count")); - DBUG_RETURN(FALSE); - } - DBUG_PRINT("info", ("*engine_data: %s, commit_count: %s", - llstr(*engine_data, buff), llstr(commit_count, buff2))); - if (commit_count == 0) - { - *engine_data= 0; /* invalidate */ - DBUG_PRINT("exit", ("No, local commit has been performed")); - DBUG_RETURN(FALSE); - } - else if (*engine_data != commit_count) - { - *engine_data= commit_count; /* invalidate */ - DBUG_PRINT("exit", ("No, commit_count has changed")); - DBUG_RETURN(FALSE); - } - - DBUG_PRINT("exit", ("OK to use cache, engine_data: %s", - llstr(*engine_data, buff))); - DBUG_RETURN(TRUE); -} - - -/** - Register a table for use in the query cache. - - Fetch the commit_count for the table and return it in engine_data, - this will later be used to check if the table has changed, before - the cached query is reused. - - @param thd thread handle - @param full_name normalized path to the table in the - canonical format. - @param full_name_len length of the normalized path to the table. - @param engine_callback function to be called before using cache on - this table - @param[out] engine_data commit_count for this table - - @retval - TRUE Yes, it's ok to cahce this query - @retval - FALSE No, don't cach the query -*/ - -my_bool -ha_ndbcluster::register_query_cache_table(THD *thd, - char *full_name, uint full_name_len, - qc_engine_callback *engine_callback, - ulonglong *engine_data) -{ - Uint64 commit_count; -#ifndef DBUG_OFF - char buff[22]; -#endif - DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); - DBUG_PRINT("enter",("dbname: %s, tabname: %s", m_dbname, m_tabname)); - - if (thd->in_multi_stmt_transaction_mode()) - { - DBUG_PRINT("exit", ("Can't register table during transaction")); - DBUG_RETURN(FALSE); - } - - if (ndb_get_commitcount(thd, full_name, &commit_count)) - { - *engine_data= 0; - DBUG_PRINT("exit", ("Error, could not get commitcount")); - DBUG_RETURN(FALSE); - } - *engine_data= commit_count; - *engine_callback= ndbcluster_cache_retrieval_allowed; - DBUG_PRINT("exit", ("commit_count: %s", llstr(commit_count, buff))); - DBUG_RETURN(commit_count > 0); -} - - -/** - Handling the shared NDB_SHARE structure that is needed to - provide table locking. - - It's also used for sharing data with other NDB handlers - in the same MySQL Server. There is currently not much - data we want to or can share. -*/ - -static uchar *ndbcluster_get_key(NDB_SHARE *share, size_t *length, - my_bool not_used __attribute__((unused))) -{ - *length= share->key_length; - return (uchar*) share->key; -} - - -#ifndef DBUG_OFF - -static void print_share(const char* where, NDB_SHARE* share) -{ - fprintf(DBUG_FILE, - "%s %s.%s: use_count: %u, commit_count: %lu\n", - where, share->db, share->table_name, share->use_count, - (ulong) share->commit_count); - fprintf(DBUG_FILE, - " - key: %s, key_length: %d\n", - share->key, share->key_length); - -#ifdef HAVE_NDB_BINLOG - if (share->table) - fprintf(DBUG_FILE, - " - share->table: %p %s.%s\n", - share->table, share->table->s->db.str, - share->table->s->table_name.str); -#endif -} - - -static void print_ndbcluster_open_tables() -{ - DBUG_LOCK_FILE; - fprintf(DBUG_FILE, ">ndbcluster_open_tables\n"); - for (uint i= 0; i < ndbcluster_open_tables.records; i++) - print_share("", - (NDB_SHARE*)my_hash_element(&ndbcluster_open_tables, i)); - fprintf(DBUG_FILE, "use_count; - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - mysql_mutex_unlock(&ndbcluster_mutex); - - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= share->db; - table_list.alias= table_list.table_name= share->table_name; - close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT); - - mysql_mutex_lock(&ndbcluster_mutex); - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - if (!--share->use_count) - { - if (opt_ndb_extra_logging) - sql_print_information("NDB_SHARE: trailing share " - "%s(connect_count: %u) " - "released by close_cached_tables at " - "connect_count: %u", - share->key, - share->connect_count, - g_ndb_cluster_connection->get_connect_count()); - ndbcluster_real_free_share(&share); - DBUG_RETURN(0); - } - - /* - share still exists, if share has not been dropped by server - release that share - */ - if (share->state != NSS_DROPPED) - { - share->state= NSS_DROPPED; - /* ndb_share reference create free */ - DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u", - share->key, share->use_count)); - --share->use_count; - - if (share->use_count == 0) - { - if (opt_ndb_extra_logging) - sql_print_information("NDB_SHARE: trailing share " - "%s(connect_count: %u) " - "released after NSS_DROPPED check " - "at connect_count: %u", - share->key, - share->connect_count, - g_ndb_cluster_connection->get_connect_count()); - ndbcluster_real_free_share(&share); - DBUG_RETURN(0); - } - } - - sql_print_warning("NDB_SHARE: %s already exists use_count=%d." - " Moving away for safety, but possible memleak.", - share->key, share->use_count); - dbug_print_open_tables(); - - /* - Ndb share has not been released as it should - */ -#ifdef NOT_YET - DBUG_ASSERT(FALSE); -#endif - - /* - This is probably an error. We can however save the situation - at the cost of a possible mem leak, by "renaming" the share - - First remove from hash - */ - my_hash_delete(&ndbcluster_open_tables, (uchar*) share); - - /* - now give it a new name, just a running number - if space is not enough allocate some more - */ - { - const uint min_key_length= 10; - if (share->key_length < min_key_length) - { - share->key= (char*) alloc_root(&share->mem_root, min_key_length + 1); - share->key_length= min_key_length; - } - share->key_length= - my_snprintf(share->key, min_key_length + 1, "#leak%lu", - trailing_share_id++); - } - /* Keep it for possible the future trailing free */ - my_hash_insert(&ndbcluster_open_tables, (uchar*) share); - - DBUG_RETURN(0); -} - -/* - Rename share is used during rename table. -*/ -static int rename_share(NDB_SHARE *share, const char *new_key) -{ - NDB_SHARE *tmp; - mysql_mutex_lock(&ndbcluster_mutex); - uint new_length= (uint) strlen(new_key); - DBUG_PRINT("rename_share", ("old_key: %s old__length: %d", - share->key, share->key_length)); - if ((tmp= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables, - (uchar*) new_key, new_length))) - handle_trailing_share(tmp); - - /* remove the share from hash */ - my_hash_delete(&ndbcluster_open_tables, (uchar*) share); - dbug_print_open_tables(); - - /* save old stuff if insert should fail */ - uint old_length= share->key_length; - char *old_key= share->key; - - /* - now allocate and set the new key, db etc - enough space for key, db, and table_name - */ - share->key= (char*) alloc_root(&share->mem_root, 2 * (new_length + 1)); - strmov(share->key, new_key); - share->key_length= new_length; - - if (my_hash_insert(&ndbcluster_open_tables, (uchar*) share)) - { - // ToDo free the allocated stuff above? - DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed", - share->key)); - share->key= old_key; - share->key_length= old_length; - if (my_hash_insert(&ndbcluster_open_tables, (uchar*) share)) - { - sql_print_error("rename_share: failed to recover %s", share->key); - DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed", - share->key)); - } - dbug_print_open_tables(); - mysql_mutex_unlock(&ndbcluster_mutex); - return -1; - } - dbug_print_open_tables(); - - share->db= share->key + new_length + 1; - ha_ndbcluster::set_dbname(new_key, share->db); - share->table_name= share->db + strlen(share->db) + 1; - ha_ndbcluster::set_tabname(new_key, share->table_name); - - dbug_print_share("rename_share:", share); - if (share->table) - { - if (share->op == 0) - { - share->table->s->db.str= share->db; - share->table->s->db.length= strlen(share->db); - share->table->s->table_name.str= share->table_name; - share->table->s->table_name.length= strlen(share->table_name); - } - } - /* else rename will be handled when the ALTER event comes */ - share->old_names= old_key; - // ToDo free old_names after ALTER EVENT - - mysql_mutex_unlock(&ndbcluster_mutex); - return 0; -} -#endif - -/* - Increase refcount on existing share. - Always returns share and cannot fail. -*/ -NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share) -{ - mysql_mutex_lock(&ndbcluster_mutex); - share->use_count++; - - dbug_print_open_tables(); - dbug_print_share("ndbcluster_get_share:", share); - mysql_mutex_unlock(&ndbcluster_mutex); - return share; -} - - -/* - Get a share object for key - - Returns share for key, and increases the refcount on the share. - - create_if_not_exists == TRUE: - creates share if it does not alreade exist - returns 0 only due to out of memory, and then sets my_error - - create_if_not_exists == FALSE: - returns 0 if share does not exist - - have_lock == TRUE, mysql_mutex_lock(&ndbcluster_mutex) already taken -*/ - -NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, - bool create_if_not_exists, - bool have_lock) -{ - NDB_SHARE *share; - uint length= (uint) strlen(key); - DBUG_ENTER("ndbcluster_get_share"); - DBUG_PRINT("enter", ("key: '%s'", key)); - - if (!have_lock) - mysql_mutex_lock(&ndbcluster_mutex); - if (!(share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables, - (uchar*) key, - length))) - { - if (!create_if_not_exists) - { - DBUG_PRINT("error", ("get_share: %s does not exist", key)); - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(0); - } - if ((share= (NDB_SHARE*) my_malloc(sizeof(*share), - MYF(MY_WME | MY_ZEROFILL)))) - { - MEM_ROOT **root_ptr= - my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); - MEM_ROOT *old_root= *root_ptr; - init_sql_alloc(&share->mem_root, 1024, 0, MYF(0)); - *root_ptr= &share->mem_root; // remember to reset before return - share->state= NSS_INITIAL; - /* enough space for key, db, and table_name */ - share->key= (char*) alloc_root(*root_ptr, 2 * (length + 1)); - share->key_length= length; - strmov(share->key, key); - if (my_hash_insert(&ndbcluster_open_tables, (uchar*) share)) - { - free_root(&share->mem_root, MYF(0)); - my_free(share); - *root_ptr= old_root; - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(0); - } - thr_lock_init(&share->lock); - mysql_mutex_init(key_NDB_SHARE_mutex, &share->mutex, MY_MUTEX_INIT_FAST); - share->commit_count= 0; - share->commit_count_lock= 0; - share->db= share->key + length + 1; - ha_ndbcluster::set_dbname(key, share->db); - share->table_name= share->db + strlen(share->db) + 1; - ha_ndbcluster::set_tabname(key, share->table_name); -#ifdef HAVE_NDB_BINLOG - if (ndbcluster_binlog_init_share(share, table)) - { - DBUG_PRINT("error", ("get_share: %s could not init share", key)); - ndbcluster_real_free_share(&share); - *root_ptr= old_root; - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(0); - } -#endif - *root_ptr= old_root; - } - else - { - DBUG_PRINT("error", ("get_share: failed to alloc share")); - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - my_error(ER_OUTOFMEMORY, MYF(0), static_cast(sizeof(*share))); - DBUG_RETURN(0); - } - } - share->use_count++; - - dbug_print_open_tables(); - dbug_print_share("ndbcluster_get_share:", share); - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(share); -} - - -void ndbcluster_real_free_share(NDB_SHARE **share) -{ - DBUG_ENTER("ndbcluster_real_free_share"); - dbug_print_share("ndbcluster_real_free_share:", *share); - - my_hash_delete(&ndbcluster_open_tables, (uchar*) *share); - thr_lock_delete(&(*share)->lock); - mysql_mutex_destroy(&(*share)->mutex); - -#ifdef HAVE_NDB_BINLOG - if ((*share)->table) - { - // (*share)->table->mem_root is freed by closefrm - closefrm((*share)->table, 0); - // (*share)->table_share->mem_root is freed by free_table_share - free_table_share((*share)->table_share); -#ifndef DBUG_OFF - bzero((uchar*)(*share)->table_share, sizeof(*(*share)->table_share)); - bzero((uchar*)(*share)->table, sizeof(*(*share)->table)); - (*share)->table_share= 0; - (*share)->table= 0; -#endif - } -#endif - free_root(&(*share)->mem_root, MYF(0)); - my_free(*share); - *share= 0; - - dbug_print_open_tables(); - DBUG_VOID_RETURN; -} - - -void ndbcluster_free_share(NDB_SHARE **share, bool have_lock) -{ - if (!have_lock) - mysql_mutex_lock(&ndbcluster_mutex); - if ((*share)->util_lock == current_thd) - (*share)->util_lock= 0; - if (!--(*share)->use_count) - { - ndbcluster_real_free_share(share); - } - else - { - dbug_print_open_tables(); - dbug_print_share("ndbcluster_free_share:", *share); - } - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); -} - - -static -int -ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const NDBTAB *ndbtab, - struct Ndb_statistics * ndbstat) -{ - NdbTransaction* pTrans; - NdbError error; - int retries= 10; - int reterr= 0; - int retry_sleep= 30; /* 30 milliseconds, transaction */ -#ifndef DBUG_OFF - char buff[22], buff2[22], buff3[22], buff4[22]; -#endif - DBUG_ENTER("ndb_get_table_statistics"); - DBUG_PRINT("enter", ("table: %s", ndbtab->getName())); - - DBUG_ASSERT(ndbtab != 0); - - do - { - Uint64 rows, commits, fixed_mem, var_mem; - Uint32 size; - Uint32 count= 0; - Uint64 sum_rows= 0; - Uint64 sum_commits= 0; - Uint64 sum_row_size= 0; - Uint64 sum_mem= 0; - NdbScanOperation*pOp; - int check; - - if ((pTrans= ndb->startTransaction()) == NULL) - { - error= ndb->getNdbError(); - goto retry; - } - - if ((pOp= pTrans->getNdbScanOperation(ndbtab)) == NULL) - { - error= pTrans->getNdbError(); - goto retry; - } - - if (pOp->readTuples(NdbOperation::LM_CommittedRead)) - { - error= pOp->getNdbError(); - goto retry; - } - - if (pOp->interpret_exit_last_row() == -1) - { - error= pOp->getNdbError(); - goto retry; - } - - pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); - pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); - pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size); - pOp->getValue(NdbDictionary::Column::FRAGMENT_FIXED_MEMORY, - (char*)&fixed_mem); - pOp->getValue(NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY, - (char*)&var_mem); - - if (pTrans->execute(NdbTransaction::NoCommit, - NdbOperation::AbortOnError, - TRUE) == -1) - { - error= pTrans->getNdbError(); - goto retry; - } - - while ((check= pOp->nextResult(TRUE, TRUE)) == 0) - { - sum_rows+= rows; - sum_commits+= commits; - if (sum_row_size < size) - sum_row_size= size; - sum_mem+= fixed_mem + var_mem; - count++; - } - - if (check == -1) - { - error= pOp->getNdbError(); - goto retry; - } - - pOp->close(TRUE); - - ndb->closeTransaction(pTrans); - - ndbstat->row_count= sum_rows; - ndbstat->commit_count= sum_commits; - ndbstat->row_size= sum_row_size; - ndbstat->fragment_memory= sum_mem; - - DBUG_PRINT("exit", ("records: %s commits: %s " - "row_size: %s mem: %s count: %u", - llstr(sum_rows, buff), - llstr(sum_commits, buff2), - llstr(sum_row_size, buff3), - llstr(sum_mem, buff4), - count)); - - DBUG_RETURN(0); -retry: - if(report_error) - { - if (file && pTrans) - { - reterr= file->ndb_err(pTrans); - } - else - { - const NdbError& tmp= error; - ERR_PRINT(tmp); - reterr= ndb_to_mysql_error(&tmp); - } - } - else - reterr= error.code; - - if (pTrans) - { - ndb->closeTransaction(pTrans); - pTrans= NULL; - } - if (error.status == NdbError::TemporaryError && retries--) - { - my_sleep(retry_sleep); - continue; - } - set_ndb_err(current_thd, error); - break; - } while(1); - DBUG_PRINT("exit", ("failed, reterr: %u, NdbError %u(%s)", reterr, - error.code, error.message)); - DBUG_RETURN(reterr); -} - -/** - Create a .ndb file to serve as a placeholder indicating - that the table with this name is a ndb table. -*/ - -int ha_ndbcluster::write_ndb_file(const char *name) -{ - File file; - bool error=1; - char path[FN_REFLEN]; - - DBUG_ENTER("write_ndb_file"); - DBUG_PRINT("enter", ("name: %s", name)); - - (void)strxnmov(path, FN_REFLEN-1, - mysql_data_home,"/",name,ha_ndb_ext,NullS); - - if ((file= mysql_file_create(key_file_ndb, path, CREATE_MODE, - O_RDWR | O_TRUNC, MYF(MY_WME))) >= 0) - { - // It's an empty file - error=0; - mysql_file_close(file, MYF(0)); - } - DBUG_RETURN(error); -} - -void -ha_ndbcluster::release_completed_operations(NdbTransaction *trans, - bool force_release) -{ - if (trans->hasBlobOperation()) - { - /* We are reading/writing BLOB fields, - releasing operation records is unsafe - */ - return; - } - if (!force_release) - { - if (get_thd_ndb(current_thd)->query_state & NDB_QUERY_MULTI_READ_RANGE) - { - /* We are batching reads and have not consumed all fetched - rows yet, releasing operation records is unsafe - */ - return; - } - } - trans->releaseCompletedOperations(); -} - -bool -ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges, - KEY_MULTI_RANGE *end_range, - HANDLER_BUFFER *buffer) -{ - DBUG_ENTER("null_value_index_search"); - KEY* key_info= table->key_info + active_index; - KEY_MULTI_RANGE *range= ranges; - ulong reclength= table->s->reclength; - uchar *curr= (uchar*)buffer->buffer; - uchar *end_of_buffer= (uchar*)buffer->buffer_end; - - for (; rangestart_key.key; - uint key_len= range->start_key.length; - if (check_null_in_key(key_info, key, key_len)) - DBUG_RETURN(TRUE); - curr += reclength; - } - DBUG_RETURN(FALSE); -} - -#if 0 -/* MRR/NDB is disabled, for details see method declarations in ha_ndbcluster.h */ -int -ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, - KEY_MULTI_RANGE *ranges, - uint range_count, - bool sorted, - HANDLER_BUFFER *buffer) -{ - m_write_op= FALSE; - int res; - KEY* key_info= table->key_info + active_index; - NDB_INDEX_TYPE cur_index_type= get_index_type(active_index); - ulong reclength= table_share->reclength; - NdbOperation* op; - Thd_ndb *thd_ndb= get_thd_ndb(current_thd); - DBUG_ENTER("ha_ndbcluster::read_multi_range_first"); - - /** - * blobs and unique hash index with NULL can't be batched currently - */ - if (uses_blob_value() || - (cur_index_type == UNIQUE_INDEX && - has_null_in_unique_index(active_index) && - null_value_index_search(ranges, ranges+range_count, buffer)) - || m_delete_cannot_batch || m_update_cannot_batch) - { - m_disable_multi_read= TRUE; - DBUG_RETURN(handler::read_multi_range_first(found_range_p, - ranges, - range_count, - sorted, - buffer)); - } - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - thd_ndb->query_state|= NDB_QUERY_MULTI_READ_RANGE; - m_disable_multi_read= FALSE; - - /* - * Copy arguments into member variables - */ - m_multi_ranges= ranges; - multi_range_curr= ranges; - multi_range_end= ranges+range_count; - multi_range_sorted= sorted; - multi_range_buffer= buffer; - - /* - * read multi range will read ranges as follows (if not ordered) - * - * input read order - * ====== ========== - * pk-op 1 pk-op 1 - * pk-op 2 pk-op 2 - * range 3 range (3,5) NOTE result rows will be intermixed - * pk-op 4 pk-op 4 - * range 5 - * pk-op 6 pk-ok 6 - */ - - /* - * Variables for loop - */ - uchar *curr= (uchar*)buffer->buffer; - uchar *end_of_buffer= (uchar*)buffer->buffer_end; - NdbOperation::LockMode lm= - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - bool need_pk = (lm == NdbOperation::LM_Read); - const NDBTAB *tab= m_table; - const NDBINDEX *unique_idx= m_index[active_index].unique_index; - const NDBINDEX *idx= m_index[active_index].index; - const NdbOperation* lastOp= m_active_trans->getLastDefinedOperation(); - NdbIndexScanOperation* scanOp= 0; - for (; multi_range_currstart_key, - &part_spec); - DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u", - part_spec.start_part, part_spec.end_part)); - /* - If partition pruning has found no partition in set - we can skip this scan - */ - if (part_spec.start_part > part_spec.end_part) - { - /* - We can skip this partition since the key won't fit into any - partition - */ - curr += reclength; - multi_range_curr->range_flag |= SKIP_RANGE; - continue; - } - } - switch (cur_index_type) { - case PRIMARY_KEY_ORDERED_INDEX: - if (!(multi_range_curr->start_key.length == key_info->key_length && - multi_range_curr->start_key.flag == HA_READ_KEY_EXACT)) - goto range; - // else fall through - case PRIMARY_KEY_INDEX: - { - multi_range_curr->range_flag |= UNIQUE_RANGE; - if ((op= m_active_trans->getNdbOperation(tab)) && - !op->readTuple(lm) && - !set_primary_key(op, multi_range_curr->start_key.key) && - !define_read_attrs(curr, op) && - (!m_use_partition_function || - (op->setPartitionId(part_spec.start_part), TRUE))) - curr += reclength; - else - { - ERR_RETURN_PREPARE(res, - op ? op->getNdbError() : - m_active_trans->getNdbError()) - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); - } - break; - } - break; - case UNIQUE_ORDERED_INDEX: - if (!(multi_range_curr->start_key.length == key_info->key_length && - multi_range_curr->start_key.flag == HA_READ_KEY_EXACT && - !check_null_in_key(key_info, multi_range_curr->start_key.key, - multi_range_curr->start_key.length))) - goto range; - // else fall through - case UNIQUE_INDEX: - { - multi_range_curr->range_flag |= UNIQUE_RANGE; - if ((op= m_active_trans->getNdbIndexOperation(unique_idx, tab)) && - !op->readTuple(lm) && - !set_index_key(op, key_info, multi_range_curr->start_key.key) && - !define_read_attrs(curr, op)) - curr += reclength; - else - { - ERR_RETURN_PREPARE(res, - op ? op->getNdbError() : - m_active_trans->getNdbError()); - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); - } - break; - } - case ORDERED_INDEX: { - range: - multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE; - if (scanOp == 0) - { - if (m_multi_cursor) - { - scanOp= m_multi_cursor; - DBUG_ASSERT(scanOp->getSorted() == sorted); - DBUG_ASSERT(scanOp->getLockMode() == - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); - if (scanOp->reset_bounds(m_force_send)) - { - res= ndb_err(m_active_trans); - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); - } - - end_of_buffer -= reclength; - } - else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab)) - &&!scanOp->readTuples(lm, 0, parallelism, sorted, - FALSE, TRUE, need_pk, TRUE) - &&!(m_cond && m_cond->generate_scan_filter(scanOp)) - &&!define_read_attrs(end_of_buffer-reclength, scanOp)) - { - m_multi_cursor= scanOp; - m_multi_range_cursor_result_ptr= end_of_buffer-reclength; - } - else - { - ERR_RETURN_PREPARE(res, - scanOp ? scanOp->getNdbError() : - m_active_trans->getNdbError()); - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); - } - } - - const key_range *keys[2]= { &multi_range_curr->start_key, - &multi_range_curr->end_key }; - if ((res= set_bounds(scanOp, active_index, FALSE, keys, - multi_range_curr-ranges))) - { - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); - } - break; - } - case UNDEFINED_INDEX: - DBUG_ASSERT(FALSE); - MYSQL_INDEX_READ_ROW_DONE(1); - DBUG_RETURN(1); - break; - } - } - - if (multi_range_curr != multi_range_end) - { - /* - * Mark that we're using entire buffer (even if might not) as - * we haven't read all ranges for some reason - * This as we don't want mysqld to reuse the buffer when we read - * the remaining ranges - */ - buffer->end_of_used_area= (uchar*)buffer->buffer_end; - } - else - { - buffer->end_of_used_area= curr; - } - - /* - * Set first operation in multi range - */ - m_current_multi_operation= - lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation(); - if (!(res= execute_no_commit_ie(this, m_active_trans,true))) - { - m_multi_range_defined= multi_range_curr; - multi_range_curr= ranges; - m_multi_range_result_ptr= (uchar*)buffer->buffer; - res= loc_read_multi_range_next(found_range_p); - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); - } - ERR_RETURN_PREPARE(res, m_active_trans->getNdbError()); - MYSQL_INDEX_READ_ROW_DONE(res); - DBUG_RETURN(res); -} - -#if 0 -#define DBUG_MULTI_RANGE(x) DBUG_PRINT("info", ("read_multi_range_next: case %d\n", x)); -#else -#define DBUG_MULTI_RANGE(x) -#endif - -int -ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) -{ - int rc; - DBUG_ENTER("ha_ndbcluster::read_multi_range_next"); - if (m_disable_multi_read) - { - DBUG_MULTI_RANGE(11); - DBUG_RETURN(handler::read_multi_range_next(multi_range_found_p)); - } - MYSQL_INDEX_READ_ROW_START(table_share->db.str, table_share->table_name.str); - rc= loc_read_multi_range_next(multi_range_found_p); - MYSQL_INDEX_READ_ROW_DONE(rc); - DBUG_RETURN(rc); -} - -int ha_ndbcluster::loc_read_multi_range_next( - KEY_MULTI_RANGE **multi_range_found_p) -{ - int res; - int range_no; - ulong reclength= table_share->reclength; - const NdbOperation* op= m_current_multi_operation; - DBUG_ENTER("ha_ndbcluster::loc_read_multi_range_next"); - - for (;multi_range_curr < m_multi_range_defined; multi_range_curr++) - { - DBUG_MULTI_RANGE(12); - if (multi_range_curr->range_flag & SKIP_RANGE) - continue; - if (multi_range_curr->range_flag & UNIQUE_RANGE) - { - if (op->getNdbError().code == 0) - { - DBUG_MULTI_RANGE(13); - goto found_next; - } - - op= m_active_trans->getNextCompletedOperation(op); - m_multi_range_result_ptr += reclength; - continue; - } - else if (m_multi_cursor && !multi_range_sorted) - { - DBUG_MULTI_RANGE(1); - if ((res= fetch_next(m_multi_cursor)) == 0) - { - DBUG_MULTI_RANGE(2); - range_no= m_multi_cursor->get_range_no(); - goto found; - } - else - { - DBUG_MULTI_RANGE(14); - goto close_scan; - } - } - else if (m_multi_cursor && multi_range_sorted) - { - if (m_active_cursor && (res= fetch_next(m_multi_cursor))) - { - DBUG_MULTI_RANGE(3); - goto close_scan; - } - - range_no= m_multi_cursor->get_range_no(); - uint current_range_no= multi_range_curr - m_multi_ranges; - if ((uint) range_no == current_range_no) - { - DBUG_MULTI_RANGE(4); - // return current row - goto found; - } - else if (range_no > (int)current_range_no) - { - DBUG_MULTI_RANGE(5); - // wait with current row - m_active_cursor= 0; - continue; - } - else - { - DBUG_MULTI_RANGE(6); - // First fetch from cursor - DBUG_ASSERT(range_no == -1); - if ((res= m_multi_cursor->nextResult(TRUE))) - { - DBUG_MULTI_RANGE(15); - goto close_scan; - } - multi_range_curr--; // Will be increased in for-loop - continue; - } - } - else /* m_multi_cursor == 0 */ - { - DBUG_MULTI_RANGE(7); - /* - * Corresponds to range 5 in example in read_multi_range_first - */ - (void)1; - continue; - } - - DBUG_ASSERT(FALSE); // Should only get here via goto's -close_scan: - if (res == 1) - { - m_multi_cursor->close(FALSE, TRUE); - m_active_cursor= m_multi_cursor= 0; - DBUG_MULTI_RANGE(8); - continue; - } - else - { - DBUG_MULTI_RANGE(9); - DBUG_RETURN(ndb_err(m_active_trans)); - } - } - - if (multi_range_curr == multi_range_end) - { - DBUG_MULTI_RANGE(16); - Thd_ndb *thd_ndb= get_thd_ndb(current_thd); - thd_ndb->query_state&= NDB_QUERY_NORMAL; - DBUG_RETURN(HA_ERR_END_OF_FILE); - } - - /* - * Read remaining ranges - */ - MYSQL_INDEX_READ_ROW_DONE(1); - DBUG_RETURN(read_multi_range_first(multi_range_found_p, - multi_range_curr, - multi_range_end - multi_range_curr, - multi_range_sorted, - multi_range_buffer)); - -found: - /* - * Found a record belonging to a scan - */ - m_active_cursor= m_multi_cursor; - * multi_range_found_p= m_multi_ranges + range_no; - memcpy(table->record[0], m_multi_range_cursor_result_ptr, reclength); - setup_recattr(m_active_cursor->getFirstRecAttr()); - unpack_record(table->record[0]); - table->status= 0; - DBUG_RETURN(0); - -found_next: - /* - * Found a record belonging to a pk/index op, - * copy result and move to next to prepare for next call - */ - * multi_range_found_p= multi_range_curr; - memcpy(table->record[0], m_multi_range_result_ptr, reclength); - setup_recattr(op->getFirstRecAttr()); - unpack_record(table->record[0]); - table->status= 0; - - multi_range_curr++; - m_current_multi_operation= m_active_trans->getNextCompletedOperation(op); - m_multi_range_result_ptr += reclength; - DBUG_RETURN(0); -} -#endif - -int -ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) -{ - DBUG_ENTER("setup_recattr"); - - Field **field, **end; - NdbValue *value= m_value; - - end= table->field + table_share->fields; - - for (field= table->field; field < end; field++, value++) - { - if ((* value).ptr) - { - DBUG_ASSERT(curr != 0); - NdbValue* val= m_value + curr->getColumn()->getColumnNo(); - DBUG_ASSERT(val->ptr); - val->rec= curr; - curr= curr->next(); - } - } - - DBUG_RETURN(0); -} - -/** - @param[in] comment table comment defined by user - - @return - table comment + additional -*/ -char* -ha_ndbcluster::update_table_comment( - /* out: table comment + additional */ - const char* comment)/* in: table comment defined by user */ -{ - uint length= strlen(comment); - if (length > 64000 - 3) - { - return((char*)comment); /* string too long */ - } - - Ndb* ndb; - if (!(ndb= get_ndb())) - { - return((char*)comment); - } - - if (ndb->setDatabaseName(m_dbname)) - { - return((char*)comment); - } - const NDBTAB* tab= m_table; - DBUG_ASSERT(tab != NULL); - - char *str; - const char *fmt="%s%snumber_of_replicas: %d"; - const unsigned fmt_len_plus_extra= length + strlen(fmt); - if ((str= (char*) my_malloc(fmt_len_plus_extra, MYF(0))) == NULL) - { - sql_print_error("ha_ndbcluster::update_table_comment: " - "my_malloc(%u) failed", (unsigned int)fmt_len_plus_extra); - return (char*)comment; - } - - my_snprintf(str,fmt_len_plus_extra,fmt,comment, - length > 0 ? " ":"", - tab->getReplicaCount()); - return str; -} - - -/** - Utility thread main loop. -*/ -pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) -{ - THD *thd; /* needs to be first for thread_stack */ - struct timespec abstime; - Thd_ndb *thd_ndb; - uint share_list_size= 0; - NDB_SHARE **share_list= NULL; - - my_thread_init(); - DBUG_ENTER("ndb_util_thread"); - DBUG_PRINT("enter", ("cache_check_time: %lu", opt_ndb_cache_check_time)); - - mysql_mutex_lock(&LOCK_ndb_util_thread); - - thd= new THD; /* note that contructor of THD uses DBUG_ */ - if (thd == NULL) - { - my_errno= HA_ERR_OUT_OF_MEM; - DBUG_RETURN(NULL); - } - THD_CHECK_SENTRY(thd); - pthread_detach_this_thread(); - ndb_util_thread= pthread_self(); - - thd->thread_stack= (char*)&thd; /* remember where our stack is */ - if (thd->store_globals()) - goto ndb_util_thread_fail; - thd->init_for_queries(); - thd->main_security_ctx.host_or_ip= ""; - thd->client_capabilities = 0; - my_net_init(&thd->net, 0, MYF(MY_THREAD_SPECIFIC)); - thd->main_security_ctx.master_access= ~0; - thd->main_security_ctx.priv_user[0] = 0; - /* Do not use user-supplied timeout value for system threads. */ - thd->variables.lock_wait_timeout= LONG_TIMEOUT; - - CHARSET_INFO *charset_connection; - charset_connection= get_charset_by_csname("utf8", - MY_CS_PRIMARY, MYF(MY_WME)); - thd->variables.character_set_client= charset_connection; - thd->variables.character_set_results= charset_connection; - thd->variables.collation_connection= charset_connection; - thd->update_charset(); - - /* Signal successful initialization */ - ndb_util_thread_running= 1; - mysql_cond_signal(&COND_ndb_util_ready); - mysql_mutex_unlock(&LOCK_ndb_util_thread); - - /* - wait for mysql server to start - */ - mysql_mutex_lock(&LOCK_server_started); - while (!mysqld_server_started) - { - set_timespec(abstime, 1); - mysql_cond_timedwait(&COND_server_started, &LOCK_server_started, - &abstime); - if (ndbcluster_terminating) - { - mysql_mutex_unlock(&LOCK_server_started); - mysql_mutex_lock(&LOCK_ndb_util_thread); - goto ndb_util_thread_end; - } - } - mysql_mutex_unlock(&LOCK_server_started); - - /* - Wait for cluster to start - */ - mysql_mutex_lock(&LOCK_ndb_util_thread); - while (!ndb_cluster_node_id && (ndbcluster_hton->slot != ~(uint)0)) - { - /* ndb not connected yet */ - mysql_cond_wait(&COND_ndb_util_thread, &LOCK_ndb_util_thread); - if (ndbcluster_terminating) - goto ndb_util_thread_end; - } - mysql_mutex_unlock(&LOCK_ndb_util_thread); - - /* Get thd_ndb for this thread */ - if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - { - sql_print_error("Could not allocate Thd_ndb object"); - mysql_mutex_lock(&LOCK_ndb_util_thread); - goto ndb_util_thread_end; - } - set_thd_ndb(thd, thd_ndb); - thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP; - -#ifdef HAVE_NDB_BINLOG - if (opt_ndb_extra_logging && ndb_binlog_running) - sql_print_information("NDB Binlog: Ndb tables initially read only."); - /* create tables needed by the replication */ - ndbcluster_setup_binlog_table_shares(thd); -#else - /* - Get all table definitions from the storage node - */ - ndbcluster_find_all_files(thd); -#endif - - set_timespec(abstime, 0); - for (;;) - { - mysql_mutex_lock(&LOCK_ndb_util_thread); - if (!ndbcluster_terminating) - mysql_cond_timedwait(&COND_ndb_util_thread, - &LOCK_ndb_util_thread, - &abstime); - if (ndbcluster_terminating) /* Shutting down server */ - goto ndb_util_thread_end; - mysql_mutex_unlock(&LOCK_ndb_util_thread); -#ifdef NDB_EXTRA_DEBUG_UTIL_THREAD - DBUG_PRINT("ndb_util_thread", ("Started, opt_ndb_cache_check_time: %lu", - opt_ndb_cache_check_time)); -#endif - -#ifdef HAVE_NDB_BINLOG - /* - Check that the ndb_apply_status_share and ndb_schema_share - have been created. - If not try to create it - */ - if (!ndb_binlog_tables_inited) - ndbcluster_setup_binlog_table_shares(thd); -#endif - - if (opt_ndb_cache_check_time == 0) - { - /* Wake up in 1 second to check if value has changed */ - set_timespec(abstime, 1); - continue; - } - - /* Lock mutex and fill list with pointers to all open tables */ - NDB_SHARE *share; - mysql_mutex_lock(&ndbcluster_mutex); - uint i, open_count, record_count= ndbcluster_open_tables.records; - if (share_list_size < record_count) - { - NDB_SHARE ** new_share_list= new NDB_SHARE * [record_count]; - if (!new_share_list) - { - sql_print_warning("ndb util thread: malloc failure, " - "query cache not maintained properly"); - mysql_mutex_unlock(&ndbcluster_mutex); - goto next; // At least do not crash - } - delete [] share_list; - share_list_size= record_count; - share_list= new_share_list; - } - for (i= 0, open_count= 0; i < record_count; i++) - { - share= (NDB_SHARE *)my_hash_element(&ndbcluster_open_tables, i); -#ifdef HAVE_NDB_BINLOG - if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0)) - <= 0) - continue; // injector thread is the only user, skip statistics - share->util_lock= current_thd; // Mark that util thread has lock -#endif /* HAVE_NDB_BINLOG */ - /* ndb_share reference temporary, free below */ - share->use_count++; /* Make sure the table can't be closed */ - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - DBUG_PRINT("ndb_util_thread", - ("Found open table[%d]: %s, use_count: %d", - i, share->table_name, share->use_count)); - - /* Store pointer to table */ - share_list[open_count++]= share; - } - mysql_mutex_unlock(&ndbcluster_mutex); - - /* Iterate through the open files list */ - for (i= 0; i < open_count; i++) - { - share= share_list[i]; -#ifdef HAVE_NDB_BINLOG - if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0)) - <= 1) - { - /* - Util thread and injector thread is the only user, skip statistics - */ - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - continue; - } -#endif /* HAVE_NDB_BINLOG */ - DBUG_PRINT("ndb_util_thread", - ("Fetching commit count for: %s", share->key)); - - struct Ndb_statistics stat; - uint lock; - mysql_mutex_lock(&share->mutex); - lock= share->commit_count_lock; - mysql_mutex_unlock(&share->mutex); - { - /* Contact NDB to get commit count for table */ - Ndb* ndb= thd_ndb->ndb; - if (ndb->setDatabaseName(share->db)) - { - goto loop_next; - } - Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name); - if (ndbtab_g.get_table() && - ndb_get_table_statistics(NULL, FALSE, ndb, - ndbtab_g.get_table(), &stat) == 0) - { -#ifndef DBUG_OFF - char buff[22], buff2[22]; -#endif - DBUG_PRINT("info", - ("Table: %s commit_count: %s rows: %s", - share->key, - llstr(stat.commit_count, buff), - llstr(stat.row_count, buff2))); - } - else - { - DBUG_PRINT("ndb_util_thread", - ("Error: Could not get commit count for table %s", - share->key)); - stat.commit_count= 0; - } - } - loop_next: - mysql_mutex_lock(&share->mutex); - if (share->commit_count_lock == lock) - share->commit_count= stat.commit_count; - mysql_mutex_unlock(&share->mutex); - - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } -next: - /* Calculate new time to wake up */ - int secs= 0; - int msecs= opt_ndb_cache_check_time; - - struct timeval tick_time; - gettimeofday(&tick_time, 0); - abstime.tv_sec= tick_time.tv_sec; - abstime.tv_nsec= tick_time.tv_usec * 1000; - - if (msecs >= 1000){ - secs= msecs / 1000; - msecs= msecs % 1000; - } - - abstime.tv_sec+= secs; - abstime.tv_nsec+= msecs * 1000000; - if (abstime.tv_nsec >= 1000000000) { - abstime.tv_sec+= 1; - abstime.tv_nsec-= 1000000000; - } - } - - mysql_mutex_lock(&LOCK_ndb_util_thread); - -ndb_util_thread_end: -ndb_util_thread_fail: - if (share_list) - delete [] share_list; - delete thd; - - /* signal termination */ - ndb_util_thread_running= 0; - mysql_cond_signal(&COND_ndb_util_ready); - mysql_mutex_unlock(&LOCK_ndb_util_thread); - DBUG_PRINT("exit", ("ndb_util_thread")); - - DBUG_LEAVE; // Must match DBUG_ENTER() - my_thread_end(); - pthread_exit(0); - return NULL; // Avoid compiler warnings -} - -/* - Condition pushdown -*/ -/** - Push a condition to ndbcluster storage engine for evaluation - during table and index scans. The conditions will be stored on a stack - for possibly storing several conditions. The stack can be popped - by calling cond_pop, handler::extra(HA_EXTRA_RESET) (handler::reset()) - will clear the stack. - The current implementation supports arbitrary AND/OR nested conditions - with comparisons between columns and constants (including constant - expressions and function calls) and the following comparison operators: - =, !=, >, >=, <, <=, "is null", and "is not null". - - @retval - NULL The condition was supported and will be evaluated for each - row found during the scan - @retval - cond The condition was not supported and all rows will be returned from - the scan for evaluation (and thus not saved on stack) -*/ -const -COND* -ha_ndbcluster::cond_push(const COND *cond) -{ - DBUG_ENTER("cond_push"); - if (!m_cond) - m_cond= new ha_ndbcluster_cond; - if (!m_cond) - { - my_errno= HA_ERR_OUT_OF_MEM; - DBUG_RETURN(NULL); - } - DBUG_EXECUTE("where",print_where((COND *)cond, m_tabname, QT_ORDINARY);); - DBUG_RETURN(m_cond->cond_push(cond, table, (NDBTAB *)m_table)); -} - -/** - Pop the top condition from the condition stack of the handler instance. -*/ -void -ha_ndbcluster::cond_pop() -{ - if (m_cond) - m_cond->cond_pop(); -} - - -/* - get table space info for SHOW CREATE TABLE -*/ -char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len) -{ - Ndb *ndb= check_ndb_in_thd(thd); - NDBDICT *ndbdict= ndb->getDictionary(); - NdbError ndberr; - Uint32 id; - ndb->setDatabaseName(m_dbname); - const NDBTAB *ndbtab= m_table; - DBUG_ASSERT(ndbtab != NULL); - if (!ndbtab->getTablespace(&id)) - { - return 0; - } - { - NdbDictionary::Tablespace ts= ndbdict->getTablespace(id); - ndberr= ndbdict->getNdbError(); - if(ndberr.classification != NdbError::NoError) - goto err; - DBUG_PRINT("info", ("Found tablespace '%s'", ts.getName())); - if (name) - { - strxnmov(name, name_len, ts.getName(), NullS); - return name; - } - else - return (my_strdup(ts.getName(), MYF(0))); - } -err: - if (ndberr.status == NdbError::TemporaryError) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG), - ndberr.code, ndberr.message, "NDB"); - else - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - ndberr.code, ndberr.message, "NDB"); - return 0; -} - -/* - Implements the SHOW NDB STATUS command. -*/ -bool -ndbcluster_show_status(handlerton *hton, THD* thd, stat_print_fn *stat_print, - enum ha_stat_type stat_type) -{ - char buf[IO_SIZE]; - uint buflen; - DBUG_ENTER("ndbcluster_show_status"); - - if (stat_type != HA_ENGINE_STATUS) - { - DBUG_RETURN(FALSE); - } - - update_status_variables(g_ndb_cluster_connection); - buflen= - my_snprintf(buf, sizeof(buf), - "cluster_node_id=%ld, " - "connected_host=%s, " - "connected_port=%ld, " - "number_of_data_nodes=%ld, " - "number_of_ready_data_nodes=%ld, " - "connect_count=%ld", - ndb_cluster_node_id, - ndb_connected_host, - ndb_connected_port, - ndb_number_of_data_nodes, - ndb_number_of_ready_data_nodes, - ndb_connect_count); - if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length, - STRING_WITH_LEN("connection"), buf, buflen)) - DBUG_RETURN(TRUE); - - if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb) - { - Ndb* ndb= (get_thd_ndb(thd))->ndb; - Ndb::Free_list_usage tmp; - tmp.m_name= 0; - while (ndb->get_free_list_usage(&tmp)) - { - buflen= - my_snprintf(buf, sizeof(buf), - "created=%u, free=%u, sizeof=%u", - tmp.m_created, tmp.m_free, tmp.m_sizeof); - if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length, - tmp.m_name, strlen(tmp.m_name), buf, buflen)) - DBUG_RETURN(TRUE); - } - } -#ifdef HAVE_NDB_BINLOG - ndbcluster_show_status_binlog(thd, stat_print, stat_type); -#endif - - DBUG_RETURN(FALSE); -} - - -/* - Create a table in NDB Cluster - */ -static uint get_no_fragments(ulonglong max_rows) -{ -#if MYSQL_VERSION_ID >= 50000 - uint acc_row_size= 25 + /*safety margin*/ 2; -#else - uint acc_row_size= pk_length*4; - /* add acc overhead */ - if (pk_length <= 8) /* main page will set the limit */ - acc_row_size+= 25 + /*safety margin*/ 2; - else /* overflow page will set the limit */ - acc_row_size+= 4 + /*safety margin*/ 4; -#endif - ulonglong acc_fragment_size= 512*1024*1024; -#if MYSQL_VERSION_ID >= 50100 - return (max_rows*acc_row_size)/acc_fragment_size+1; -#else - return ((max_rows*acc_row_size)/acc_fragment_size+1 - +1/*correct rounding*/)/2; -#endif -} - - -/* - Routine to adjust default number of partitions to always be a multiple - of number of nodes and never more than 4 times the number of nodes. - -*/ -static bool adjusted_frag_count(uint no_fragments, uint no_nodes, - uint &reported_frags) -{ - uint i= 0; - reported_frags= no_nodes; - while (reported_frags < no_fragments && ++i < 4 && - (reported_frags + no_nodes) < MAX_PARTITIONS) - reported_frags+= no_nodes; - return (reported_frags < no_fragments); -} - -int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info) -{ - ha_rows max_rows, min_rows; - if (create_info) - { - max_rows= create_info->max_rows; - min_rows= create_info->min_rows; - } - else - { - max_rows= table_share->max_rows; - min_rows= table_share->min_rows; - } - uint reported_frags; - uint no_fragments= - get_no_fragments(max_rows >= min_rows ? max_rows : min_rows); - uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); - if (adjusted_frag_count(no_fragments, no_nodes, reported_frags)) - { - push_warning(current_thd, - Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, - "Ndb might have problems storing the max amount of rows specified"); - } - return (int)reported_frags; -} - - -/* - Set-up auto-partitioning for NDB Cluster - - SYNOPSIS - set_auto_partitions() - part_info Partition info struct to set-up - - RETURN VALUE - NONE - - DESCRIPTION - Set-up auto partitioning scheme for tables that didn't define any - partitioning. We'll use PARTITION BY KEY() in this case which - translates into partition by primary key if a primary key exists - and partition by hidden key otherwise. -*/ - - -enum ndb_distribution_enum { ND_KEYHASH= 0, ND_LINHASH= 1 }; -static const char* distribution_names[]= { "KEYHASH", "LINHASH", NullS }; -static ulong default_ndb_distribution= ND_KEYHASH; -static TYPELIB distribution_typelib= { - array_elements(distribution_names) - 1, - "", - distribution_names, - NULL -}; -static MYSQL_SYSVAR_ENUM( - distribution, /* name */ - default_ndb_distribution, /* var */ - PLUGIN_VAR_RQCMDARG, - "Default distribution for new tables in ndb", - NULL, /* check func. */ - NULL, /* update func. */ - ND_KEYHASH, /* default */ - &distribution_typelib /* typelib */ -); - -void ha_ndbcluster::set_auto_partitions(partition_info *part_info) -{ - DBUG_ENTER("ha_ndbcluster::set_auto_partitions"); - part_info->list_of_part_fields= TRUE; - part_info->part_type= HASH_PARTITION; - switch (default_ndb_distribution) - { - case ND_KEYHASH: - part_info->linear_hash_ind= FALSE; - break; - case ND_LINHASH: - part_info->linear_hash_ind= TRUE; - break; - } - DBUG_VOID_RETURN; -} - - -int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info) -{ - NDBTAB *tab= (NDBTAB*)tab_ref; - int32 *range_data= (int32*)my_malloc(part_info->num_parts*sizeof(int32), - MYF(0)); - uint i; - int error= 0; - bool unsigned_flag= part_info->part_expr->unsigned_flag; - DBUG_ENTER("set_range_data"); - - if (!range_data) - { - mem_alloc_error(part_info->num_parts*sizeof(int32)); - DBUG_RETURN(1); - } - for (i= 0; i < part_info->num_parts; i++) - { - longlong range_val= part_info->range_int_array[i]; - if (unsigned_flag) - range_val-= 0x8000000000000000ULL; - if (range_val < INT_MIN32 || range_val >= INT_MAX32) - { - if ((i != part_info->num_parts - 1) || - (range_val != LONGLONG_MAX)) - { - my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); - error= 1; - goto error; - } - range_val= INT_MAX32; - } - range_data[i]= (int32)range_val; - } - tab->setRangeListData(range_data, sizeof(int32)*part_info->num_parts); -error: - my_free(range_data); - DBUG_RETURN(error); -} - -int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info) -{ - NDBTAB *tab= (NDBTAB*)tab_ref; - int32 *list_data= (int32*)my_malloc(part_info->num_list_values * 2 - * sizeof(int32), MYF(0)); - uint32 *part_id, i; - int error= 0; - bool unsigned_flag= part_info->part_expr->unsigned_flag; - DBUG_ENTER("set_list_data"); - - if (!list_data) - { - mem_alloc_error(part_info->num_list_values*2*sizeof(int32)); - DBUG_RETURN(1); - } - for (i= 0; i < part_info->num_list_values; i++) - { - LIST_PART_ENTRY *list_entry= &part_info->list_array[i]; - longlong list_val= list_entry->list_value; - if (unsigned_flag) - list_val-= 0x8000000000000000ULL; - if (list_val < INT_MIN32 || list_val > INT_MAX32) - { - my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); - error= 1; - goto error; - } - list_data[2*i]= (int32)list_val; - part_id= (uint32*)&list_data[2*i+1]; - *part_id= list_entry->partition_id; - } - tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->num_list_values); -error: - my_free(list_data); - DBUG_RETURN(error); -} - -/* - User defined partitioning set-up. We need to check how many fragments the - user wants defined and which node groups to put those into. Later we also - want to attach those partitions to a tablespace. - - All the functionality of the partition function, partition limits and so - forth are entirely handled by the MySQL Server. There is one exception to - this rule for PARTITION BY KEY where NDB handles the hash function and - this type can thus be handled transparently also by NDB API program. - For RANGE, HASH and LIST and subpartitioning the NDB API programs must - implement the function to map to a partition. -*/ - -uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, - TABLE *table, - void *tab_par) -{ - uint16 frag_data[MAX_PARTITIONS]; - char *ts_names[MAX_PARTITIONS]; - ulong fd_index= 0, i, j; - NDBTAB *tab= (NDBTAB*)tab_par; - NDBTAB::FragmentType ftype= NDBTAB::UserDefined; - partition_element *part_elem; - bool first= TRUE; - uint tot_ts_name_len; - List_iterator part_it(part_info->partitions); - int error; - DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); - - if (part_info->part_type == HASH_PARTITION && - part_info->list_of_part_fields == TRUE) - { - Field **fields= part_info->part_field_array; - - if (part_info->linear_hash_ind) - ftype= NDBTAB::DistrKeyLin; - else - ftype= NDBTAB::DistrKeyHash; - - for (i= 0; i < part_info->part_field_list.elements; i++) - { - NDBCOL *col= tab->getColumn(fields[i]->field_index); - DBUG_PRINT("info",("setting dist key on %s", col->getName())); - col->setPartitionKey(TRUE); - } - } - else - { - if (!current_thd->variables.new_mode) - { - push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "LIST, RANGE and HASH partition disabled by default," - " use --new option to enable"); - DBUG_RETURN(HA_ERR_UNSUPPORTED); - } - /* - Create a shadow field for those tables that have user defined - partitioning. This field stores the value of the partition - function such that NDB can handle reorganisations of the data - even when the MySQL Server isn't available to assist with - calculation of the partition function value. - */ - NDBCOL col; - DBUG_PRINT("info", ("Generating partition func value field")); - col.setName("$PART_FUNC_VALUE"); - col.setType(NdbDictionary::Column::Int); - col.setLength(1); - col.setNullable(FALSE); - col.setPrimaryKey(FALSE); - col.setAutoIncrement(FALSE); - tab->addColumn(col); - if (part_info->part_type == RANGE_PARTITION) - { - if ((error= set_range_data((void*)tab, part_info))) - { - DBUG_RETURN(error); - } - } - else if (part_info->part_type == LIST_PARTITION) - { - if ((error= set_list_data((void*)tab, part_info))) - { - DBUG_RETURN(error); - } - } - } - tab->setFragmentType(ftype); - i= 0; - tot_ts_name_len= 0; - do - { - uint ng; - part_elem= part_it++; - if (!part_info->is_sub_partitioned()) - { - ng= part_elem->nodegroup_id; - if (first && ng == UNDEF_NODEGROUP) - ng= 0; - ts_names[fd_index]= part_elem->tablespace_name; - frag_data[fd_index++]= ng; - } - else - { - List_iterator sub_it(part_elem->subpartitions); - j= 0; - do - { - part_elem= sub_it++; - ng= part_elem->nodegroup_id; - if (first && ng == UNDEF_NODEGROUP) - ng= 0; - ts_names[fd_index]= part_elem->tablespace_name; - frag_data[fd_index++]= ng; - } while (++j < part_info->num_subparts); - } - first= FALSE; - } while (++i < part_info->num_parts); - tab->setDefaultNoPartitionsFlag(part_info->use_default_num_partitions); - tab->setLinearFlag(part_info->linear_hash_ind); - { - ha_rows max_rows= table_share->max_rows; - ha_rows min_rows= table_share->min_rows; - if (max_rows < min_rows) - max_rows= min_rows; - if (max_rows != (ha_rows)0) /* default setting, don't set fragmentation */ - { - tab->setMaxRows(max_rows); - tab->setMinRows(min_rows); - } - } - tab->setTablespaceNames(ts_names, fd_index*sizeof(char*)); - tab->setFragmentCount(fd_index); - tab->setFragmentData(&frag_data, fd_index*2); - DBUG_RETURN(0); -} - - -bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, - uint table_changes) -{ - DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data"); - uint i; - const NDBTAB *tab= (const NDBTAB *) m_table; - - if (THDVAR(current_thd, use_copying_alter_table)) - { - DBUG_PRINT("info", ("On-line alter table disabled")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - - int pk= 0; - int ai= 0; - - if (create_info->tablespace) - create_info->storage_media = HA_SM_DISK; - else - create_info->storage_media = HA_SM_MEMORY; - - for (i= 0; i < table->s->fields; i++) - { - Field *field= table->field[i]; - const NDBCOL *col= tab->getColumn(i); - if ((col->getStorageType() == NDB_STORAGETYPE_MEMORY && create_info->storage_media != HA_SM_MEMORY) || - (col->getStorageType() == NDB_STORAGETYPE_DISK && create_info->storage_media != HA_SM_DISK)) - { - DBUG_PRINT("info", ("Column storage media is changed")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - - if (field->flags & FIELD_IS_RENAMED) - { - DBUG_PRINT("info", ("Field has been renamed, copy table")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - if ((field->flags & FIELD_IN_ADD_INDEX) && - col->getStorageType() == NdbDictionary::Column::StorageTypeDisk) - { - DBUG_PRINT("info", ("add/drop index not supported for disk stored column")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - - if (field->flags & PRI_KEY_FLAG) - pk=1; - if (field->flags & FIELD_IN_ADD_INDEX) - ai=1; - } - - char tablespace_name[FN_LEN + 1]; - if (get_tablespace_name(current_thd, tablespace_name, FN_LEN)) - { - if (create_info->tablespace) - { - if (strcmp(create_info->tablespace, tablespace_name)) - { - DBUG_PRINT("info", ("storage media is changed, old tablespace=%s, new tablespace=%s", - tablespace_name, create_info->tablespace)); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - } - else - { - DBUG_PRINT("info", ("storage media is changed, old is DISK and tablespace=%s, new is MEM", - tablespace_name)); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - } - else - { - if (create_info->storage_media != HA_SM_MEMORY) - { - DBUG_PRINT("info", ("storage media is changed, old is MEM, new is DISK and tablespace=%s", - create_info->tablespace)); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - } - - if (table_changes != IS_EQUAL_YES) - DBUG_RETURN(COMPATIBLE_DATA_NO); - - /* Check that auto_increment value was not changed */ - if ((create_info->used_fields & HA_CREATE_USED_AUTO) && - create_info->auto_increment_value != 0) - { - DBUG_PRINT("info", ("auto_increment value changed")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - - /* Check that row format didn't change */ - if ((create_info->used_fields & HA_CREATE_USED_AUTO) && - get_row_type() != create_info->row_type) - { - DBUG_PRINT("info", ("row format changed")); - DBUG_RETURN(COMPATIBLE_DATA_NO); - } - - DBUG_PRINT("info", ("new table seems compatible")); - DBUG_RETURN(COMPATIBLE_DATA_YES); -} - -bool set_up_tablespace(st_alter_tablespace *alter_info, - NdbDictionary::Tablespace *ndb_ts) -{ - ndb_ts->setName(alter_info->tablespace_name); - ndb_ts->setExtentSize(alter_info->extent_size); - ndb_ts->setDefaultLogfileGroup(alter_info->logfile_group_name); - return FALSE; -} - -bool set_up_datafile(st_alter_tablespace *alter_info, - NdbDictionary::Datafile *ndb_df) -{ - if (alter_info->max_size > 0) - { - my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0)); - return TRUE; - } - ndb_df->setPath(alter_info->data_file_name); - ndb_df->setSize(alter_info->initial_size); - ndb_df->setTablespace(alter_info->tablespace_name); - return FALSE; -} - -bool set_up_logfile_group(st_alter_tablespace *alter_info, - NdbDictionary::LogfileGroup *ndb_lg) -{ - ndb_lg->setName(alter_info->logfile_group_name); - ndb_lg->setUndoBufferSize(alter_info->undo_buffer_size); - return FALSE; -} - -bool set_up_undofile(st_alter_tablespace *alter_info, - NdbDictionary::Undofile *ndb_uf) -{ - ndb_uf->setPath(alter_info->undo_file_name); - ndb_uf->setSize(alter_info->initial_size); - ndb_uf->setLogfileGroup(alter_info->logfile_group_name); - return FALSE; -} - -int ndbcluster_alter_tablespace(handlerton *hton, - THD* thd, st_alter_tablespace *alter_info) -{ - int is_tablespace= 0; - NdbError err; - NDBDICT *dict; - int error; - const char *errmsg; - Ndb *ndb; - DBUG_ENTER("ha_ndbcluster::alter_tablespace"); - LINT_INIT(errmsg); - - ndb= check_ndb_in_thd(thd); - if (ndb == NULL) - { - DBUG_RETURN(HA_ERR_NO_CONNECTION); - } - dict= ndb->getDictionary(); - - switch (alter_info->ts_cmd_type){ - case (CREATE_TABLESPACE): - { - error= ER_CREATE_FILEGROUP_FAILED; - - NdbDictionary::Tablespace ndb_ts; - NdbDictionary::Datafile ndb_df; - NdbDictionary::ObjectId objid; - if (set_up_tablespace(alter_info, &ndb_ts)) - { - DBUG_RETURN(1); - } - if (set_up_datafile(alter_info, &ndb_df)) - { - DBUG_RETURN(1); - } - errmsg= "TABLESPACE"; - if (dict->createTablespace(ndb_ts, &objid)) - { - DBUG_PRINT("error", ("createTablespace returned %d", error)); - goto ndberror; - } - DBUG_PRINT("alter_info", ("Successfully created Tablespace")); - errmsg= "DATAFILE"; - if (dict->createDatafile(ndb_df)) - { - err= dict->getNdbError(); - NdbDictionary::Tablespace tmp= dict->getTablespace(ndb_ts.getName()); - if (dict->getNdbError().code == 0 && - tmp.getObjectId() == objid.getObjectId() && - tmp.getObjectVersion() == objid.getObjectVersion()) - { - dict->dropTablespace(tmp); - } - - DBUG_PRINT("error", ("createDatafile returned %d", error)); - goto ndberror2; - } - is_tablespace= 1; - break; - } - case (ALTER_TABLESPACE): - { - error= ER_ALTER_FILEGROUP_FAILED; - if (alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) - { - NdbDictionary::Datafile ndb_df; - if (set_up_datafile(alter_info, &ndb_df)) - { - DBUG_RETURN(1); - } - errmsg= " CREATE DATAFILE"; - if (dict->createDatafile(ndb_df)) - { - goto ndberror; - } - } - else if(alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) - { - NdbDictionary::Tablespace ts= dict->getTablespace(alter_info->tablespace_name); - NdbDictionary::Datafile df= dict->getDatafile(0, alter_info->data_file_name); - NdbDictionary::ObjectId objid; - df.getTablespaceId(&objid); - if (ts.getObjectId() == objid.getObjectId() && - strcmp(df.getPath(), alter_info->data_file_name) == 0) - { - errmsg= " DROP DATAFILE"; - if (dict->dropDatafile(df)) - { - goto ndberror; - } - } - else - { - DBUG_PRINT("error", ("No such datafile")); - my_error(ER_ALTER_FILEGROUP_FAILED, MYF(0), " NO SUCH FILE"); - DBUG_RETURN(1); - } - } - else - { - DBUG_PRINT("error", ("Unsupported alter tablespace: %d", - alter_info->ts_alter_tablespace_type)); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - is_tablespace= 1; - break; - } - case (CREATE_LOGFILE_GROUP): - { - error= ER_CREATE_FILEGROUP_FAILED; - NdbDictionary::LogfileGroup ndb_lg; - NdbDictionary::Undofile ndb_uf; - NdbDictionary::ObjectId objid; - if (alter_info->undo_file_name == NULL) - { - /* - REDO files in LOGFILE GROUP not supported yet - */ - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - if (set_up_logfile_group(alter_info, &ndb_lg)) - { - DBUG_RETURN(1); - } - errmsg= "LOGFILE GROUP"; - if (dict->createLogfileGroup(ndb_lg, &objid)) - { - goto ndberror; - } - DBUG_PRINT("alter_info", ("Successfully created Logfile Group")); - if (set_up_undofile(alter_info, &ndb_uf)) - { - DBUG_RETURN(1); - } - errmsg= "UNDOFILE"; - if (dict->createUndofile(ndb_uf)) - { - err= dict->getNdbError(); - NdbDictionary::LogfileGroup tmp= dict->getLogfileGroup(ndb_lg.getName()); - if (dict->getNdbError().code == 0 && - tmp.getObjectId() == objid.getObjectId() && - tmp.getObjectVersion() == objid.getObjectVersion()) - { - dict->dropLogfileGroup(tmp); - } - goto ndberror2; - } - break; - } - case (ALTER_LOGFILE_GROUP): - { - error= ER_ALTER_FILEGROUP_FAILED; - if (alter_info->undo_file_name == NULL) - { - /* - REDO files in LOGFILE GROUP not supported yet - */ - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - NdbDictionary::Undofile ndb_uf; - if (set_up_undofile(alter_info, &ndb_uf)) - { - DBUG_RETURN(1); - } - errmsg= "CREATE UNDOFILE"; - if (dict->createUndofile(ndb_uf)) - { - goto ndberror; - } - break; - } - case (DROP_TABLESPACE): - { - error= ER_DROP_FILEGROUP_FAILED; - errmsg= "TABLESPACE"; - if (dict->dropTablespace(dict->getTablespace(alter_info->tablespace_name))) - { - goto ndberror; - } - is_tablespace= 1; - break; - } - case (DROP_LOGFILE_GROUP): - { - error= ER_DROP_FILEGROUP_FAILED; - errmsg= "LOGFILE GROUP"; - if (dict->dropLogfileGroup(dict->getLogfileGroup(alter_info->logfile_group_name))) - { - goto ndberror; - } - break; - } - case (CHANGE_FILE_TABLESPACE): - { - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - case (ALTER_ACCESS_MODE_TABLESPACE): - { - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - default: - { - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); - } - } -#ifdef HAVE_NDB_BINLOG - if (is_tablespace) - ndbcluster_log_schema_op(thd, 0, - thd->query(), thd->query_length(), - "", alter_info->tablespace_name, - 0, 0, - SOT_TABLESPACE, 0, 0); - else - ndbcluster_log_schema_op(thd, 0, - thd->query(), thd->query_length(), - "", alter_info->logfile_group_name, - 0, 0, - SOT_LOGFILE_GROUP, 0, 0); -#endif - DBUG_RETURN(FALSE); - -ndberror: - err= dict->getNdbError(); -ndberror2: - set_ndb_err(thd, err); - ndb_to_mysql_error(&err); - - my_error(error, MYF(0), errmsg); - DBUG_RETURN(1); -} - - -bool ha_ndbcluster::get_no_parts(const char *name, uint *num_parts) -{ - Ndb *ndb; - NDBDICT *dict; - int err; - DBUG_ENTER("ha_ndbcluster::get_no_parts"); - LINT_INIT(err); - - set_dbname(name); - set_tabname(name); - for (;;) - { - if (check_ndb_connection()) - { - err= HA_ERR_NO_CONNECTION; - break; - } - ndb= get_ndb(); - ndb->setDatabaseName(m_dbname); - Ndb_table_guard ndbtab_g(dict= ndb->getDictionary(), m_tabname); - if (!ndbtab_g.get_table()) - ERR_BREAK(dict->getNdbError(), err); - *num_parts= ndbtab_g.get_table()->getFragmentCount(); - DBUG_RETURN(FALSE); - } - - print_error(err, MYF(0)); - DBUG_RETURN(TRUE); -} - -static int ndbcluster_fill_files_table(handlerton *hton, - THD *thd, - TABLE_LIST *tables, - COND *cond) -{ - TABLE* table= tables->table; - Ndb *ndb= check_ndb_in_thd(thd); - NdbDictionary::Dictionary* dict= ndb->getDictionary(); - NdbDictionary::Dictionary::List dflist; - NdbError ndberr; - uint i; - DBUG_ENTER("ndbcluster_fill_files_table"); - - dict->listObjects(dflist, NdbDictionary::Object::Datafile); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - ERR_RETURN(ndberr); - - for (i= 0; i < dflist.count; i++) - { - NdbDictionary::Dictionary::List::Element& elt = dflist.elements[i]; - Ndb_cluster_connection_node_iter iter; - uint id; - - g_ndb_cluster_connection->init_get_next_node(iter); - - while ((id= g_ndb_cluster_connection->get_next_node(iter))) - { - init_fill_schema_files_row(table); - NdbDictionary::Datafile df= dict->getDatafile(id, elt.name); - ndberr= dict->getNdbError(); - if(ndberr.classification != NdbError::NoError) - { - if (ndberr.classification == NdbError::SchemaError) - continue; - - if (ndberr.classification == NdbError::UnknownResultError) - continue; - - ERR_RETURN(ndberr); - } - NdbDictionary::Tablespace ts= dict->getTablespace(df.getTablespace()); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - { - if (ndberr.classification == NdbError::SchemaError) - continue; - ERR_RETURN(ndberr); - } - table->field[IS_FILES_TABLE_CATALOG]->store(STRING_WITH_LEN("def"), - system_charset_info); - table->field[IS_FILES_FILE_NAME]->set_notnull(); - table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name), - system_charset_info); - table->field[IS_FILES_FILE_TYPE]->set_notnull(); - table->field[IS_FILES_FILE_TYPE]->store("DATAFILE",8, - system_charset_info); - table->field[IS_FILES_TABLESPACE_NAME]->set_notnull(); - table->field[IS_FILES_TABLESPACE_NAME]->store(df.getTablespace(), - strlen(df.getTablespace()), - system_charset_info); - table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull(); - table->field[IS_FILES_LOGFILE_GROUP_NAME]-> - store(ts.getDefaultLogfileGroup(), - strlen(ts.getDefaultLogfileGroup()), - system_charset_info); - table->field[IS_FILES_ENGINE]->set_notnull(); - table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name, - ndbcluster_hton_name_length, - system_charset_info); - - table->field[IS_FILES_FREE_EXTENTS]->set_notnull(); - table->field[IS_FILES_FREE_EXTENTS]->store(df.getFree() - / ts.getExtentSize()); - table->field[IS_FILES_TOTAL_EXTENTS]->set_notnull(); - table->field[IS_FILES_TOTAL_EXTENTS]->store(df.getSize() - / ts.getExtentSize()); - table->field[IS_FILES_EXTENT_SIZE]->set_notnull(); - table->field[IS_FILES_EXTENT_SIZE]->store(ts.getExtentSize()); - table->field[IS_FILES_INITIAL_SIZE]->set_notnull(); - table->field[IS_FILES_INITIAL_SIZE]->store(df.getSize()); - table->field[IS_FILES_MAXIMUM_SIZE]->set_notnull(); - table->field[IS_FILES_MAXIMUM_SIZE]->store(df.getSize()); - table->field[IS_FILES_VERSION]->set_notnull(); - table->field[IS_FILES_VERSION]->store(df.getObjectVersion()); - - table->field[IS_FILES_ROW_FORMAT]->set_notnull(); - table->field[IS_FILES_ROW_FORMAT]->store("FIXED", 5, system_charset_info); - - char extra[30]; - int len= my_snprintf(extra, sizeof(extra), "CLUSTER_NODE=%u", id); - table->field[IS_FILES_EXTRA]->set_notnull(); - table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info); - schema_table_store_record(thd, table); - } - } - - NdbDictionary::Dictionary::List uflist; - dict->listObjects(uflist, NdbDictionary::Object::Undofile); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - ERR_RETURN(ndberr); - - for (i= 0; i < uflist.count; i++) - { - NdbDictionary::Dictionary::List::Element& elt= uflist.elements[i]; - Ndb_cluster_connection_node_iter iter; - unsigned id; - - g_ndb_cluster_connection->init_get_next_node(iter); - - while ((id= g_ndb_cluster_connection->get_next_node(iter))) - { - NdbDictionary::Undofile uf= dict->getUndofile(id, elt.name); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - { - if (ndberr.classification == NdbError::SchemaError) - continue; - if (ndberr.classification == NdbError::UnknownResultError) - continue; - ERR_RETURN(ndberr); - } - NdbDictionary::LogfileGroup lfg= - dict->getLogfileGroup(uf.getLogfileGroup()); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - { - if (ndberr.classification == NdbError::SchemaError) - continue; - ERR_RETURN(ndberr); - } - - init_fill_schema_files_row(table); - table->field[IS_FILES_FILE_NAME]->set_notnull(); - table->field[IS_FILES_FILE_NAME]->store(elt.name, strlen(elt.name), - system_charset_info); - table->field[IS_FILES_FILE_TYPE]->set_notnull(); - table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8, - system_charset_info); - NdbDictionary::ObjectId objid; - uf.getLogfileGroupId(&objid); - table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull(); - table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(uf.getLogfileGroup(), - strlen(uf.getLogfileGroup()), - system_charset_info); - table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull(); - table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(objid.getObjectId()); - table->field[IS_FILES_ENGINE]->set_notnull(); - table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name, - ndbcluster_hton_name_length, - system_charset_info); - - table->field[IS_FILES_TOTAL_EXTENTS]->set_notnull(); - table->field[IS_FILES_TOTAL_EXTENTS]->store(uf.getSize()/4); - table->field[IS_FILES_EXTENT_SIZE]->set_notnull(); - table->field[IS_FILES_EXTENT_SIZE]->store(4); - - table->field[IS_FILES_INITIAL_SIZE]->set_notnull(); - table->field[IS_FILES_INITIAL_SIZE]->store(uf.getSize()); - table->field[IS_FILES_MAXIMUM_SIZE]->set_notnull(); - table->field[IS_FILES_MAXIMUM_SIZE]->store(uf.getSize()); - - table->field[IS_FILES_VERSION]->set_notnull(); - table->field[IS_FILES_VERSION]->store(uf.getObjectVersion()); - - char extra[100]; - int len= my_snprintf(extra,sizeof(extra),"CLUSTER_NODE=%u;UNDO_BUFFER_SIZE=%lu", - id, (ulong) lfg.getUndoBufferSize()); - table->field[IS_FILES_EXTRA]->set_notnull(); - table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info); - schema_table_store_record(thd, table); - } - } - - // now for LFGs - NdbDictionary::Dictionary::List lfglist; - dict->listObjects(lfglist, NdbDictionary::Object::LogfileGroup); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - ERR_RETURN(ndberr); - - for (i= 0; i < lfglist.count; i++) - { - NdbDictionary::Dictionary::List::Element& elt= lfglist.elements[i]; - - NdbDictionary::LogfileGroup lfg= dict->getLogfileGroup(elt.name); - ndberr= dict->getNdbError(); - if (ndberr.classification != NdbError::NoError) - { - if (ndberr.classification == NdbError::SchemaError) - continue; - ERR_RETURN(ndberr); - } - - init_fill_schema_files_row(table); - table->field[IS_FILES_FILE_TYPE]->set_notnull(); - table->field[IS_FILES_FILE_TYPE]->store("UNDO LOG", 8, - system_charset_info); - - table->field[IS_FILES_LOGFILE_GROUP_NAME]->set_notnull(); - table->field[IS_FILES_LOGFILE_GROUP_NAME]->store(elt.name, - strlen(elt.name), - system_charset_info); - table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->set_notnull(); - table->field[IS_FILES_LOGFILE_GROUP_NUMBER]->store(lfg.getObjectId()); - table->field[IS_FILES_ENGINE]->set_notnull(); - table->field[IS_FILES_ENGINE]->store(ndbcluster_hton_name, - ndbcluster_hton_name_length, - system_charset_info); - - table->field[IS_FILES_FREE_EXTENTS]->set_notnull(); - table->field[IS_FILES_FREE_EXTENTS]->store(lfg.getUndoFreeWords()); - table->field[IS_FILES_EXTENT_SIZE]->set_notnull(); - table->field[IS_FILES_EXTENT_SIZE]->store(4); - - table->field[IS_FILES_VERSION]->set_notnull(); - table->field[IS_FILES_VERSION]->store(lfg.getObjectVersion()); - - char extra[100]; - int len= my_snprintf(extra,sizeof(extra), - "UNDO_BUFFER_SIZE=%lu", - (ulong) lfg.getUndoBufferSize()); - table->field[IS_FILES_EXTRA]->set_notnull(); - table->field[IS_FILES_EXTRA]->store(extra, len, system_charset_info); - schema_table_store_record(thd, table); - } - DBUG_RETURN(0); -} - -SHOW_VAR ndb_status_variables_export[]= { - {"Ndb", (char*) &ndb_status_variables, SHOW_ARRAY}, - {NullS, NullS, SHOW_LONG} -}; - -static MYSQL_SYSVAR_ULONG( - cache_check_time, /* name */ - opt_ndb_cache_check_time, /* var */ - PLUGIN_VAR_RQCMDARG, - "A dedicated thread is created to, at the given " - "millisecond interval, invalidate the query cache " - "if another MySQL server in the cluster has changed " - "the data in the database.", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - ONE_YEAR_IN_SECONDS, /* max */ - 0 /* block */ -); - - -static MYSQL_SYSVAR_ULONG( - extra_logging, /* name */ - opt_ndb_extra_logging, /* var */ - PLUGIN_VAR_OPCMDARG, - "Turn on more logging in the error log.", - NULL, /* check func. */ - NULL, /* update func. */ - 1, /* default */ - 0, /* min */ - 0, /* max */ - 0 /* block */ -); - - -ulong opt_ndb_report_thresh_binlog_epoch_slip; -static MYSQL_SYSVAR_ULONG( - report_thresh_binlog_epoch_slip, /* name */ - opt_ndb_report_thresh_binlog_epoch_slip,/* var */ - PLUGIN_VAR_RQCMDARG, - "Threshold on number of epochs to be behind before reporting binlog " - "status. E.g. 3 means that if the difference between what epoch has " - "been received from the storage nodes and what has been applied to " - "the binlog is 3 or more, a status message will be sent to the cluster " - "log.", - NULL, /* check func. */ - NULL, /* update func. */ - 3, /* default */ - 0, /* min */ - 256, /* max */ - 0 /* block */ -); - - -ulong opt_ndb_report_thresh_binlog_mem_usage; -static MYSQL_SYSVAR_ULONG( - report_thresh_binlog_mem_usage, /* name */ - opt_ndb_report_thresh_binlog_mem_usage,/* var */ - PLUGIN_VAR_RQCMDARG, - "Threshold on percentage of free memory before reporting binlog " - "status. E.g. 10 means that if amount of available memory for " - "receiving binlog data from the storage nodes goes below 10%, " - "a status message will be sent to the cluster log.", - NULL, /* check func. */ - NULL, /* update func. */ - 10, /* default */ - 0, /* min */ - 100, /* max */ - 0 /* block */ -); - - -static MYSQL_SYSVAR_STR( - connectstring, /* name */ - opt_ndb_connectstring, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Connect string for ndbcluster.", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ -); - - -static MYSQL_SYSVAR_STR( - mgmd_host, /* name */ - opt_ndb_mgmd_host, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Set host and port for ndb_mgmd. Syntax: hostname[:port]", - NULL, /* check func. */ - NULL, /* update func. */ - NULL /* default */ -); - - -static MYSQL_SYSVAR_UINT( - nodeid, /* name */ - opt_ndb_nodeid, /* var */ - PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, - "Nodeid for this mysqld in the cluster.", - NULL, /* check func. */ - NULL, /* update func. */ - 0, /* default */ - 0, /* min */ - 255, /* max */ - 0 /* block */ -); - -static struct st_mysql_sys_var* system_variables[]= { - MYSQL_SYSVAR(cache_check_time), - MYSQL_SYSVAR(extra_logging), - MYSQL_SYSVAR(report_thresh_binlog_mem_usage), - MYSQL_SYSVAR(report_thresh_binlog_epoch_slip), - MYSQL_SYSVAR(distribution), - MYSQL_SYSVAR(autoincrement_prefetch_sz), - MYSQL_SYSVAR(force_send), - MYSQL_SYSVAR(use_exact_count), - MYSQL_SYSVAR(use_transactions), - MYSQL_SYSVAR(use_copying_alter_table), - MYSQL_SYSVAR(optimized_node_selection), - MYSQL_SYSVAR(index_stat_enable), - MYSQL_SYSVAR(index_stat_cache_entries), - MYSQL_SYSVAR(index_stat_update_freq), - MYSQL_SYSVAR(connectstring), - MYSQL_SYSVAR(mgmd_host), - MYSQL_SYSVAR(nodeid), - - NULL -}; - - -struct st_mysql_storage_engine ndbcluster_storage_engine= -{ MYSQL_HANDLERTON_INTERFACE_VERSION }; - -mysql_declare_plugin(ndbcluster) -{ - MYSQL_STORAGE_ENGINE_PLUGIN, - &ndbcluster_storage_engine, - ndbcluster_hton_name, - "MySQL AB", - "Clustered, fault-tolerant tables", - PLUGIN_LICENSE_GPL, - ndbcluster_init, /* Plugin Init */ - NULL, /* Plugin Deinit */ - 0x0100 /* 1.0 */, - ndb_status_variables_export,/* status variables */ - system_variables, /* system variables */ - NULL, /* config options */ - 0, /* flags */ -} -mysql_declare_plugin_end; -maria_declare_plugin(ndbcluster) -{ - MYSQL_STORAGE_ENGINE_PLUGIN, - &ndbcluster_storage_engine, - ndbcluster_hton_name, - "MySQL AB", - "Clustered, fault-tolerant tables", - PLUGIN_LICENSE_GPL, - ndbcluster_init, /* Plugin Init */ - NULL, /* Plugin Deinit */ - 0x0100 /* 1.0 */, - ndb_status_variables_export,/* status variables */ - NULL, /* system variables */ - "1.0", /* string version */ - MariaDB_PLUGIN_MATURITY_GAMMA /* maturity */ -} -maria_declare_plugin_end; - -#else -int Sun_ar_require_a_symbol_here= 0; -#endif diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h deleted file mode 100644 index 70e1e9dc7cf..00000000000 --- a/sql/ha_ndbcluster.h +++ /dev/null @@ -1,599 +0,0 @@ -#ifndef HA_NDBCLUSTER_INCLUDED -#define HA_NDBCLUSTER_INCLUDED - -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -/* - This file defines the NDB Cluster handler: the interface between MySQL and - NDB Cluster -*/ - -/* The class defining a handle to an NDB Cluster table */ - -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif - -/* Blob tables and events are internal to NDB and must never be accessed */ -#define IS_NDB_BLOB_PREFIX(A) is_prefix(A, "NDB$BLOB") - -#include -#include - -#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 - -#ifdef HAVE_PSI_INTERFACE -extern PSI_file_key key_file_ndb; -#endif /* HAVE_PSI_INTERFACE */ - - -class Ndb; // Forward declaration -class NdbOperation; // Forward declaration -class NdbTransaction; // Forward declaration -class NdbRecAttr; // Forward declaration -class NdbScanOperation; -class NdbIndexScanOperation; -class NdbBlob; -class NdbIndexStat; -class NdbEventOperation; -class ha_ndbcluster_cond; - -#include "sql_partition.h" /* part_id_range */ - -// connectstring to cluster if given by mysqld -extern const char *ndbcluster_connectstring; - -typedef enum ndb_index_type { - UNDEFINED_INDEX = 0, - PRIMARY_KEY_INDEX = 1, - PRIMARY_KEY_ORDERED_INDEX = 2, - UNIQUE_INDEX = 3, - UNIQUE_ORDERED_INDEX = 4, - ORDERED_INDEX = 5 -} NDB_INDEX_TYPE; - -typedef enum ndb_index_status { - UNDEFINED = 0, - ACTIVE = 1, - TO_BE_DROPPED = 2 -} NDB_INDEX_STATUS; - -typedef struct ndb_index_data { - NDB_INDEX_TYPE type; - NDB_INDEX_STATUS status; - const NdbDictionary::Index *index; - const NdbDictionary::Index *unique_index; - unsigned char *unique_index_attrid_map; - bool null_in_unique_index; - // In this version stats are not shared between threads - NdbIndexStat* index_stat; - uint index_stat_cache_entries; - // Simple counter mechanism to decide when to connect to db - uint index_stat_update_freq; - uint index_stat_query_count; -} NDB_INDEX_DATA; - -typedef enum ndb_write_op { - NDB_INSERT = 0, - NDB_UPDATE = 1, - NDB_PK_UPDATE = 2 -} NDB_WRITE_OP; - -typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue; - -int get_ndb_blobs_value(TABLE* table, NdbValue* value_array, - uchar*& buffer, uint& buffer_size, - my_ptrdiff_t ptrdiff); - -typedef enum { - NSS_INITIAL= 0, - NSS_DROPPED, - NSS_ALTERED -} NDB_SHARE_STATE; - -typedef struct st_ndbcluster_share { - NDB_SHARE_STATE state; - MEM_ROOT mem_root; - THR_LOCK lock; - mysql_mutex_t mutex; - char *key; - uint key_length; - THD *util_lock; - uint use_count; - uint commit_count_lock; - ulonglong commit_count; - char *db; - char *table_name; - Ndb::TupleIdRange tuple_id_range; -#ifdef HAVE_NDB_BINLOG - uint32 connect_count; - uint32 flags; - NdbEventOperation *op; - NdbEventOperation *op_old; // for rename table - char *old_names; // for rename table - TABLE_SHARE *table_share; - TABLE *table; - uchar *record[2]; // pointer to allocated records for receiving data - NdbValue *ndb_value[2]; - MY_BITMAP *subscriber_bitmap; -#endif -} NDB_SHARE; - -inline -NDB_SHARE_STATE -get_ndb_share_state(NDB_SHARE *share) -{ - NDB_SHARE_STATE state; - mysql_mutex_lock(&share->mutex); - state= share->state; - mysql_mutex_unlock(&share->mutex); - return state; -} - -inline -void -set_ndb_share_state(NDB_SHARE *share, NDB_SHARE_STATE state) -{ - mysql_mutex_lock(&share->mutex); - share->state= state; - mysql_mutex_unlock(&share->mutex); -} - -struct Ndb_tuple_id_range_guard { - Ndb_tuple_id_range_guard(NDB_SHARE* _share) : - share(_share), - range(share->tuple_id_range) { - mysql_mutex_lock(&share->mutex); - } - ~Ndb_tuple_id_range_guard() { - mysql_mutex_unlock(&share->mutex); - } - NDB_SHARE* share; - Ndb::TupleIdRange& range; -}; - -#ifdef HAVE_NDB_BINLOG -/* NDB_SHARE.flags */ -#define NSF_HIDDEN_PK 1 /* table has hidden primary key */ -#define NSF_BLOB_FLAG 2 /* table has blob attributes */ -#define NSF_NO_BINLOG 4 /* table should not be binlogged */ -#endif - -typedef enum ndb_query_state_bits { - NDB_QUERY_NORMAL = 0, - NDB_QUERY_MULTI_READ_RANGE = 1 -} NDB_QUERY_STATE_BITS; - -/* - Place holder for ha_ndbcluster thread specific data -*/ - -enum THD_NDB_OPTIONS -{ - TNO_NO_LOG_SCHEMA_OP= 1 << 0 -}; - -enum THD_NDB_TRANS_OPTIONS -{ - TNTO_INJECTED_APPLY_STATUS= 1 << 0 - ,TNTO_NO_LOGGING= 1 << 1 -}; - -struct Ndb_local_table_statistics { - int no_uncommitted_rows_count; - ulong last_count; - ha_rows records; -}; - -class Thd_ndb -{ - public: - Thd_ndb(); - ~Thd_ndb(); - - void init_open_tables(); - - Ndb *ndb; - ulong count; - uint lock_count; - uint start_stmt_count; - NdbTransaction *trans; - bool m_error; - bool m_slow_path; - int m_error_code; - uint32 m_query_id; /* query id whn m_error_code was set */ - uint32 options; - uint32 trans_options; - List changed_tables; - uint query_state; - HASH open_tables; -}; - -class ha_ndbcluster: public handler -{ - public: - ha_ndbcluster(handlerton *hton, TABLE_SHARE *table); - ~ha_ndbcluster(); - - int ha_initialise(); - int open(const char *name, int mode, uint test_if_locked); - int close(void); - - int write_row(uchar *buf); - int update_row(const uchar *old_data, uchar *new_data); - int delete_row(const uchar *buf); - int index_init(uint index, bool sorted); - int index_end(); - int index_read(uchar *buf, const uchar *key, uint key_len, - enum ha_rkey_function find_flag); - int index_next(uchar *buf); - int index_prev(uchar *buf); - int index_first(uchar *buf); - int index_last(uchar *buf); - int index_read_last(uchar * buf, const uchar * key, uint key_len); - int rnd_init(bool scan); - int rnd_end(); - int rnd_next(uchar *buf); - int rnd_pos(uchar *buf, uchar *pos); - void position(const uchar *record); - int read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted); - int read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted, - uchar* buf); - int read_range_next(); - int alter_tablespace(st_alter_tablespace *info); - - /** - * Multi range stuff - */ -#if 0 - /* - MRR/NDB is disabled in MariaDB. This is because in MariaDB, we've - backported - - the latest version of MRR interface (BKA needs this) - - the latest version of DS-MRR implementation - but didn't backport the latest version MRR/NDB implementation. - - */ - int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, - KEY_MULTI_RANGE*ranges, uint range_count, - bool sorted, HANDLER_BUFFER *buffer); - int read_multi_range_next(KEY_MULTI_RANGE **found_range_p); -#endif - bool null_value_index_search(KEY_MULTI_RANGE *ranges, - KEY_MULTI_RANGE *end_range, - HANDLER_BUFFER *buffer); - - bool get_error_message(int error, String *buf); - ha_rows records(); - ha_rows estimate_rows_upper_bound() - { return HA_POS_ERROR; } - int info(uint); - void get_dynamic_partition_info(PARTITION_STATS *stat_info, uint part_id); - int extra(enum ha_extra_function operation); - int extra_opt(enum ha_extra_function operation, ulong cache_size); - int reset(); - int external_lock(THD *thd, int lock_type); - void unlock_row(); - int start_stmt(THD *thd, thr_lock_type lock_type); - void print_error(int error, myf errflag); - const char * table_type() const; - const char ** bas_ext() const; - ulonglong table_flags(void) const; - void prepare_for_alter(); - int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys); - int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys); - int final_drop_index(TABLE *table_arg); - void set_part_info(partition_info *part_info); - ulong index_flags(uint idx, uint part, bool all_parts) const; - uint max_supported_record_length() const; - uint max_supported_keys() const; - uint max_supported_key_parts() const; - uint max_supported_key_length() const; - uint max_supported_key_part_length() const; - - int rename_table(const char *from, const char *to); - int delete_table(const char *name); - int create(const char *name, TABLE *form, HA_CREATE_INFO *info); - int create_handler_files(const char *file, const char *old_name, - int action_flag, HA_CREATE_INFO *info); - int get_default_no_partitions(HA_CREATE_INFO *info); - bool get_no_parts(const char *name, uint *no_parts); - void set_auto_partitions(partition_info *part_info); - virtual bool is_fatal_error(int error, uint flags) - { - if (!handler::is_fatal_error(error, flags) || - error == HA_ERR_NO_PARTITION_FOUND) - return FALSE; - return TRUE; - } - - THR_LOCK_DATA **store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type); - - bool low_byte_first() const; - - const char* index_type(uint key_number); - - double scan_time(); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); - void start_bulk_insert(ha_rows rows); - int end_bulk_insert(); - - static Thd_ndb* seize_thd_ndb(); - static void release_thd_ndb(Thd_ndb* thd_ndb); - -static void set_dbname(const char *pathname, char *dbname); -static void set_tabname(const char *pathname, char *tabname); - - /* - Condition pushdown - */ - - /* - Push condition down to the table handler. - SYNOPSIS - cond_push() - cond Condition to be pushed. The condition tree must not be - modified by the by the caller. - RETURN - The 'remainder' condition that caller must use to filter out records. - NULL means the handler will not return rows that do not match the - passed condition. - NOTES - The pushed conditions form a stack (from which one can remove the - last pushed condition using cond_pop). - The table handler filters out rows using (pushed_cond1 AND pushed_cond2 - AND ... AND pushed_condN) - or less restrictive condition, depending on handler's capabilities. - - handler->reset() call empties the condition stack. - Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the - condition stack. - The current implementation supports arbitrary AND/OR nested conditions - with comparisons between columns and constants (including constant - expressions and function calls) and the following comparison operators: - =, !=, >, >=, <, <=, like, "not like", "is null", and "is not null". - Negated conditions are supported by NOT which generate NAND/NOR groups. - */ - const COND *cond_push(const COND *cond); - /* - Pop the top condition from the condition stack of the handler instance. - SYNOPSIS - cond_pop() - Pops the top if condition stack, if stack is not empty - */ - void cond_pop(); - - uint8 table_cache_type(); - - /* - * Internal to ha_ndbcluster, used by C functions - */ - int ndb_err(NdbTransaction*); - - my_bool register_query_cache_table(THD *thd, char *table_key, - uint key_length, - qc_engine_callback *engine_callback, - ulonglong *engine_data); - - bool check_if_incompatible_data(HA_CREATE_INFO *info, - uint table_changes); - -private: - int loc_read_multi_range_next(KEY_MULTI_RANGE **found_range_p); - friend int ndbcluster_drop_database_impl(const char *path); - friend int ndb_handle_schema_change(THD *thd, - Ndb *ndb, NdbEventOperation *pOp, - NDB_SHARE *share); - - static int delete_table(ha_ndbcluster *h, Ndb *ndb, - const char *path, - const char *db, - const char *table_name); - int create_ndb_index(const char *name, KEY *key_info, bool unique); - int create_ordered_index(const char *name, KEY *key_info); - int create_unique_index(const char *name, KEY *key_info); - int create_index(const char *name, KEY *key_info, - NDB_INDEX_TYPE idx_type, uint idx_no); -// Index list management - int create_indexes(Ndb *ndb, TABLE *tab); - int open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error); - void renumber_indexes(Ndb *ndb, TABLE *tab); - int drop_indexes(Ndb *ndb, TABLE *tab); - int add_index_handle(THD *thd, NdbDictionary::Dictionary *dict, - KEY *key_info, const char *index_name, uint index_no); - int get_metadata(const char* path); - void release_metadata(THD *thd, Ndb *ndb); - NDB_INDEX_TYPE get_index_type(uint idx_no) const; - NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const; - NDB_INDEX_TYPE get_index_type_from_key(uint index_no, KEY *key_info, - bool primary) const; - bool has_null_in_unique_index(uint idx_no) const; - bool check_index_fields_not_null(KEY *key_info); - - uint set_up_partition_info(partition_info *part_info, - TABLE *table, - void *tab); - char* get_tablespace_name(THD *thd, char *name, uint name_len); - int set_range_data(void *tab, partition_info* part_info); - int set_list_data(void *tab, partition_info* part_info); - int complemented_read(const uchar *old_data, uchar *new_data, - uint32 old_part_id); - int pk_read(const uchar *key, uint key_len, uchar *buf, uint32 part_id); - int ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, bool descending, uchar* buf, - part_id_range *part_spec); - int unique_index_read(const uchar *key, uint key_len, - uchar *buf); - int unique_index_scan(const KEY* key_info, - const uchar *key, - uint key_len, - uchar *buf); - int full_table_scan(uchar * buf); - - bool check_all_operations_for_error(NdbTransaction *trans, - const NdbOperation *first, - const NdbOperation *last, - uint errcode); - int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op); - int fetch_next(NdbScanOperation* op); - int set_auto_inc(Field *field); - int next_result(uchar *buf); - int define_read_attrs(uchar* buf, NdbOperation* op); - int filtered_scan(const uchar *key, uint key_len, - uchar *buf, - enum ha_rkey_function find_flag); - int close_scan(); - void unpack_record(uchar *buf); - int get_ndb_lock_type(enum thr_lock_type type); - - void set_dbname(const char *pathname); - void set_tabname(const char *pathname); - - bool set_hidden_key(NdbOperation*, - uint fieldnr, const uchar* field_ptr); - int set_ndb_key(NdbOperation*, Field *field, - uint fieldnr, const uchar* field_ptr); - int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, - int row_offset= 0, bool *set_blob_value= 0); - int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, uchar*); - int get_ndb_partition_id(NdbOperation *); - friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); - int set_primary_key(NdbOperation *op, const uchar *key); - int set_primary_key_from_record(NdbOperation *op, const uchar *record); - bool check_index_fields_in_write_set(uint keyno); - int set_index_key_from_record(NdbOperation *op, const uchar *record, - uint keyno); - int set_bounds(NdbIndexScanOperation*, uint inx, bool rir, - const key_range *keys[2], uint= 0); - int key_cmp(uint keynr, const uchar * old_row, const uchar * new_row); - int set_index_key(NdbOperation *, const KEY *key_info, const uchar *key_ptr); - void print_results(); - - virtual void get_auto_increment(ulonglong offset, ulonglong increment, - ulonglong nb_desired_values, - ulonglong *first_value, - ulonglong *nb_reserved_values); - bool uses_blob_value(); - - char *update_table_comment(const char * comment); - - int write_ndb_file(const char *name); - - int check_ndb_connection(THD* thd= current_thd); - - void set_rec_per_key(); - int records_update(); - void no_uncommitted_rows_execute_failure(); - void no_uncommitted_rows_update(int); - void no_uncommitted_rows_reset(THD *); - - void release_completed_operations(NdbTransaction*, bool); - - friend int execute_commit(ha_ndbcluster*, NdbTransaction*); - friend int execute_no_commit_ignore_no_key(ha_ndbcluster*, NdbTransaction*); - friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool); - friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool); - - void transaction_checks(THD *thd); - int start_statement(THD *thd, Thd_ndb *thd_ndb, Ndb* ndb); - int init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb); - - NdbTransaction *m_active_trans; - NdbScanOperation *m_active_cursor; - const NdbDictionary::Table *m_table; - struct Ndb_local_table_statistics *m_table_info; - struct Ndb_local_table_statistics m_table_info_instance; - char m_dbname[FN_HEADLEN]; - //char m_schemaname[FN_HEADLEN]; - char m_tabname[FN_HEADLEN]; - ulonglong m_table_flags; - THR_LOCK_DATA m_lock; - bool m_lock_tuple; - NDB_SHARE *m_share; - NDB_INDEX_DATA m_index[MAX_KEY]; - // NdbRecAttr has no reference to blob - NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE]; - uchar m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH]; - partition_info *m_part_info; - uint32 m_part_id; - uchar *m_rec0; - Field **m_part_field_array; - bool m_use_partition_function; - bool m_sorted; - bool m_use_write; - bool m_ignore_dup_key; - bool m_has_unique_index; - bool m_primary_key_update; - bool m_write_op; - bool m_ignore_no_key; - ha_rows m_rows_to_insert; // TODO: merge it with handler::estimation_rows_to_insert? - ha_rows m_rows_inserted; - ha_rows m_bulk_insert_rows; - ha_rows m_rows_changed; - bool m_bulk_insert_not_flushed; - bool m_delete_cannot_batch; - bool m_update_cannot_batch; - ha_rows m_ops_pending; - bool m_skip_auto_increment; - bool m_blobs_pending; - bool m_slow_path; - my_ptrdiff_t m_blobs_offset; - // memory for blobs in one tuple - uchar *m_blobs_buffer; - uint32 m_blobs_buffer_size; - uint m_dupkey; - // set from thread variables at external lock - bool m_ha_not_exact_count; - bool m_force_send; - ha_rows m_autoincrement_prefetch; - bool m_transaction_on; - - ha_ndbcluster_cond *m_cond; - bool m_disable_multi_read; - uchar *m_multi_range_result_ptr; - KEY_MULTI_RANGE *m_multi_ranges; - KEY_MULTI_RANGE *m_multi_range_defined; - const NdbOperation *m_current_multi_operation; - NdbIndexScanOperation *m_multi_cursor; - uchar *m_multi_range_cursor_result_ptr; - int setup_recattr(const NdbRecAttr*); - Ndb *get_ndb(); -}; - -extern SHOW_VAR ndb_status_variables[]; - -int ndbcluster_discover(THD* thd, const char* dbname, const char* name, - const void** frmblob, uint* frmlen); -int ndbcluster_find_files(THD *thd,const char *db,const char *path, - const char *wild, bool dir, List *files); -int ndbcluster_table_exists_in_engine(THD* thd, - const char *db, const char *name); -void ndbcluster_print_error(int error, const NdbOperation *error_op); - -static const char ndbcluster_hton_name[]= "ndbcluster"; -static const int ndbcluster_hton_name_length=sizeof(ndbcluster_hton_name)-1; -extern int ndbcluster_terminating; -extern int ndb_util_thread_running; -extern mysql_cond_t COND_ndb_util_ready; - -#endif /* HA_NDBCLUSTER_INCLUDED */ diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc deleted file mode 100644 index 531211eb175..00000000000 --- a/sql/ha_ndbcluster_binlog.cc +++ /dev/null @@ -1,4425 +0,0 @@ -/* Copyright (c) 2006, 2013, Oracle and/or its affiliates. - Copyright (c) 2012, 2013, Monty Proram Ab. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ - -#include "sql_priv.h" -#include "unireg.h" // REQUIRED: for other includes -#include "sql_show.h" -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -#include "ha_ndbcluster.h" - -#ifdef HAVE_NDB_BINLOG -#include "rpl_injector.h" -#include "rpl_filter.h" -#include "slave.h" -#include "ha_ndbcluster_binlog.h" -#include "NdbDictionary.hpp" -#include "ndb_cluster_connection.hpp" -#include - -#include "sql_base.h" // close_thread_tables -#include "sql_table.h" // build_table_filename -#include "table.h" // open_table_from_share -#include "discover.h" // readfrm, writefrm -#include "lock.h" // MYSQL_LOCK_IGNORE_FLUSH, - // mysql_unlock_tables -#include "sql_parse.h" // mysql_parse -#include "transaction.h" - -#ifdef ndb_dynamite -#undef assert -#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0) -#endif - -extern my_bool opt_ndb_log_binlog_index; -extern ulong opt_ndb_extra_logging; -/* - defines for cluster replication table names -*/ -#include "ha_ndbcluster_tables.h" -#define NDB_APPLY_TABLE_FILE "./" NDB_REP_DB "/" NDB_APPLY_TABLE -#define NDB_SCHEMA_TABLE_FILE "./" NDB_REP_DB "/" NDB_SCHEMA_TABLE - -/* - Timeout for syncing schema events between - mysql servers, and between mysql server and the binlog -*/ -static const int DEFAULT_SYNC_TIMEOUT= 120; - - -/* - Flag showing if the ndb injector thread is running, if so == 1 - -1 if it was started but later stopped for some reason - 0 if never started -*/ -static int ndb_binlog_thread_running= 0; - -/* - Flag showing if the ndb binlog should be created, if so == TRUE - FALSE if not -*/ -my_bool ndb_binlog_running= FALSE; -my_bool ndb_binlog_tables_inited= FALSE; - -/* - Global reference to the ndb injector thread THD oject - - Has one sole purpose, for setting the in_use table member variable - in get_share(...) -*/ -THD *injector_thd= 0; - -/* - Global reference to ndb injector thd object. - - Used mainly by the binlog index thread, but exposed to the client sql - thread for one reason; to setup the events operations for a table - to enable ndb injector thread receiving events. - - Must therefore always be used with a surrounding - mysql_mutex_lock(&injector_mutex), when doing create/dropEventOperation -*/ -static Ndb *injector_ndb= 0; -static Ndb *schema_ndb= 0; - -static int ndbcluster_binlog_inited= 0; -/* - Flag "ndbcluster_binlog_terminating" set when shutting down mysqld. - Server main loop should call handlerton function: - - ndbcluster_hton->binlog_func == - ndbcluster_binlog_func(...,BFN_BINLOG_END,...) == - ndbcluster_binlog_end - - at shutdown, which sets the flag. And then server needs to wait for it - to complete. Otherwise binlog will not be complete. - - ndbcluster_hton->panic == ndbcluster_end() will not return until - ndb binlog is completed -*/ -static int ndbcluster_binlog_terminating= 0; - -/* - Mutex and condition used for interacting between client sql thread - and injector thread -*/ -pthread_t ndb_binlog_thread; -mysql_mutex_t injector_mutex; -mysql_cond_t injector_cond; - -/* NDB Injector thread (used for binlog creation) */ -static ulonglong ndb_latest_applied_binlog_epoch= 0; -static ulonglong ndb_latest_handled_binlog_epoch= 0; -static ulonglong ndb_latest_received_binlog_epoch= 0; - -NDB_SHARE *ndb_apply_status_share= 0; -NDB_SHARE *ndb_schema_share= 0; -mysql_mutex_t ndb_schema_share_mutex; - -extern my_bool opt_log_slave_updates; -static my_bool g_ndb_log_slave_updates; - -/* Schema object distribution handling */ -HASH ndb_schema_objects; -typedef struct st_ndb_schema_object { - mysql_mutex_t mutex; - char *key; - uint key_length; - uint use_count; - MY_BITMAP slock_bitmap; - uint32 slock[256/32]; // 256 bits for lock status of table -} NDB_SCHEMA_OBJECT; -static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key, - my_bool create_if_not_exists, - my_bool have_lock); -static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object, - bool have_lock); - -static Uint64 *p_latest_trans_gci= 0; - -/* - Global variables for holding the ndb_binlog_index table reference -*/ -static TABLE *ndb_binlog_index= 0; -static TABLE_LIST binlog_tables; - -/* - Helper functions -*/ - -#ifndef DBUG_OFF -/* purecov: begin deadcode */ -static void print_records(TABLE *table, const uchar *record) -{ - for (uint j= 0; j < table->s->fields; j++) - { - char buf[40]; - int pos= 0; - Field *field= table->field[j]; - const uchar* field_ptr= field->ptr - table->record[0] + record; - int pack_len= field->pack_length(); - int n= pack_len < 10 ? pack_len : 10; - - for (int i= 0; i < n && pos < 20; i++) - { - pos+= sprintf(&buf[pos]," %x", (int) (uchar) field_ptr[i]); - } - buf[pos]= 0; - DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf)); - } -} -/* purecov: end */ -#else -#define print_records(a,b) -#endif - - -#ifndef DBUG_OFF -static void dbug_print_table(const char *info, TABLE *table) -{ - if (table == 0) - { - DBUG_PRINT("info",("%s: (null)", info)); - return; - } - DBUG_PRINT("info", - ("%s: %s.%s s->fields: %d " - "reclength: %lu rec_buff_length: %u record[0]: 0x%lx " - "record[1]: 0x%lx", - info, - table->s->db.str, - table->s->table_name.str, - table->s->fields, - table->s->reclength, - table->s->rec_buff_length, - (long) table->record[0], - (long) table->record[1])); - - for (unsigned int i= 0; i < table->s->fields; i++) - { - Field *f= table->field[i]; - DBUG_PRINT("info", - ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d pack_length: %d " - "ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]", - i, - f->field_name, - (long) f->flags, - (f->flags & PRI_KEY_FLAG) ? "pri" : "attr", - (f->flags & NOT_NULL_FLAG) ? "" : ",nullable", - (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed", - (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "", - (f->flags & BLOB_FLAG) ? ",blob" : "", - (f->flags & BINARY_FLAG) ? ",binary" : "", - f->real_type(), - f->pack_length(), - (long) f->ptr, (int) (f->ptr - table->record[0]), - f->null_bit, - (long) f->null_ptr, - (int) ((uchar*) f->null_ptr - table->record[0]))); - if (f->type() == MYSQL_TYPE_BIT) - { - Field_bit *g= (Field_bit*) f; - DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] " - "bit_ofs: %d bit_len: %u", - g->field_length, (long) g->bit_ptr, - (int) ((uchar*) g->bit_ptr - - table->record[0]), - g->bit_ofs, g->bit_len)); - } - } -} -#else -#define dbug_print_table(a,b) -#endif - - -/* - Run a query through mysql_parse - - Used to: - - purging the ndb_binlog_index - - creating the ndb_apply_status table -*/ -static void run_query(THD *thd, char *buf, char *end, - const int *no_print_error, my_bool disable_binlog) -{ - ulong save_thd_query_length= thd->query_length(); - char *save_thd_query= thd->query(); - ulong save_thread_id= thd->variables.pseudo_thread_id; - struct system_status_var save_thd_status_var= thd->status_var; - THD_TRANS save_thd_transaction_all= thd->transaction.all; - THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt; - ulonglong save_thd_options= thd->variables.option_bits; - DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->variables.option_bits)); - NET save_thd_net= thd->net; - - bzero((char*) &thd->net, sizeof(NET)); - thd->set_query(buf, (uint) (end - buf)); - thd->variables.pseudo_thread_id= thread_id; - thd->transaction.stmt.modified_non_trans_table= FALSE; - if (disable_binlog) - thd->variables.option_bits&= ~OPTION_BIN_LOG; - - DBUG_PRINT("query", ("%s", thd->query())); - - DBUG_ASSERT(!thd->in_sub_stmt); - DBUG_ASSERT(!thd->locked_tables_mode); - - { - Parser_state parser_state; - if (!parser_state.init(thd, thd->query(), thd->query_length())) - mysql_parse(thd, thd->query(), thd->query_length(), &parser_state); - } - - if (no_print_error && thd->is_slave_error) - { - int i; - Thd_ndb *thd_ndb= get_thd_ndb(thd); - for (i= 0; no_print_error[i]; i++) - if ((thd_ndb->m_error_code == no_print_error[i]) || - (thd->get_stmt_da()->sql_errno() == (unsigned) no_print_error[i])) - break; - if (!no_print_error[i]) - sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d", - buf, - thd->get_stmt_da()->message(), - thd->get_stmt_da()->sql_errno(), - thd_ndb->m_error_code, - (int) thd->is_error(), thd->is_slave_error); - } - /* - XXX: this code is broken. mysql_parse()/mysql_reset_thd_for_next_command() - can not be called from within a statement, and - run_query() can be called from anywhere, including from within - a sub-statement. - This particular reset is a temporary hack to avoid an assert - for double assignment of the diagnostics area when run_query() - is called from ndbcluster_reset_logs(), which is called from - mysql_flush(). - */ - thd->get_stmt_da()->reset_diagnostics_area(); - - thd->variables.option_bits= save_thd_options; - thd->set_query(save_thd_query, save_thd_query_length); - thd->variables.pseudo_thread_id= save_thread_id; - thd->status_var= save_thd_status_var; - thd->transaction.all= save_thd_transaction_all; - thd->transaction.stmt= save_thd_transaction_stmt; - thd->net= save_thd_net; - thd->set_current_stmt_binlog_format_row(); - - if (thd == injector_thd) - { - /* - running the query will close all tables, including the ndb_binlog_index - used in injector_thd - */ - ndb_binlog_index= 0; - } -} - -static void -ndbcluster_binlog_close_table(THD *thd, NDB_SHARE *share) -{ - DBUG_ENTER("ndbcluster_binlog_close_table"); - if (share->table_share) - { - closefrm(share->table, 1); - share->table_share= 0; - share->table= 0; - } - DBUG_ASSERT(share->table == 0); - DBUG_VOID_RETURN; -} - - -/* - Creates a TABLE object for the ndb cluster table - - NOTES - This does not open the underlying table -*/ - -static int -ndbcluster_binlog_open_table(THD *thd, NDB_SHARE *share, - TABLE_SHARE *table_share, TABLE *table, - int reopen) -{ - int error; - DBUG_ENTER("ndbcluster_binlog_open_table"); - - init_tmp_table_share(thd, table_share, share->db, 0, share->table_name, - share->key); - if ((error= open_table_def(thd, table_share, 0))) - { - DBUG_PRINT("error", ("open_table_def failed: %d my_errno: %d", error, my_errno)); - free_table_share(table_share); - DBUG_RETURN(error); - } - if ((error= open_table_from_share(thd, table_share, "", 0 /* fon't allocate buffers */, - (uint) READ_ALL, 0, table, FALSE))) - { - DBUG_PRINT("error", ("open_table_from_share failed %d my_errno: %d", error, my_errno)); - free_table_share(table_share); - DBUG_RETURN(error); - } - tdc_assign_new_table_id(table_share); - - if (!reopen) - { - // allocate memory on ndb share so it can be reused after online alter table - (void)multi_alloc_root(&share->mem_root, - &(share->record[0]), table->s->rec_buff_length, - &(share->record[1]), table->s->rec_buff_length, - NULL); - } - { - my_ptrdiff_t row_offset= share->record[0] - table->record[0]; - Field **p_field; - for (p_field= table->field; *p_field; p_field++) - (*p_field)->move_field_offset(row_offset); - table->record[0]= share->record[0]; - table->record[1]= share->record[1]; - } - - table->in_use= injector_thd; - - table->s->db.str= share->db; - table->s->db.length= strlen(share->db); - table->s->table_name.str= share->table_name; - table->s->table_name.length= strlen(share->table_name); - - DBUG_ASSERT(share->table_share == 0); - share->table_share= table_share; - DBUG_ASSERT(share->table == 0); - share->table= table; - /* We can't use 'use_all_columns()' as the file object is not setup yet */ - table->column_bitmaps_set_no_signal(&table->s->all_set, &table->s->all_set); -#ifndef DBUG_OFF - dbug_print_table("table", table); -#endif - DBUG_RETURN(0); -} - - -/* - Initialize the binlog part of the NDB_SHARE -*/ -int ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) -{ - THD *thd= current_thd; - MEM_ROOT *mem_root= &share->mem_root; - int do_event_op= ndb_binlog_running; - int error= 0; - DBUG_ENTER("ndbcluster_binlog_init_share"); - - share->connect_count= g_ndb_cluster_connection->get_connect_count(); - - share->op= 0; - share->table= 0; - - if (!ndb_schema_share && - strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0) - do_event_op= 1; - else if (!ndb_apply_status_share && - strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_APPLY_TABLE) == 0) - do_event_op= 1; - - { - int i, no_nodes= g_ndb_cluster_connection->no_db_nodes(); - share->subscriber_bitmap= (MY_BITMAP*) - alloc_root(mem_root, no_nodes * sizeof(MY_BITMAP)); - for (i= 0; i < no_nodes; i++) - { - my_bitmap_init(&share->subscriber_bitmap[i], - (Uint32*)alloc_root(mem_root, max_ndb_nodes/8), - max_ndb_nodes, FALSE); - bitmap_clear_all(&share->subscriber_bitmap[i]); - } - } - - if (!do_event_op) - { - if (_table) - { - if (_table->s->primary_key == MAX_KEY) - share->flags|= NSF_HIDDEN_PK; - if (_table->s->blob_fields != 0) - share->flags|= NSF_BLOB_FLAG; - } - else - { - share->flags|= NSF_NO_BINLOG; - } - DBUG_RETURN(error); - } - while (1) - { - int error; - TABLE_SHARE *table_share= (TABLE_SHARE *) alloc_root(mem_root, sizeof(*table_share)); - TABLE *table= (TABLE*) alloc_root(mem_root, sizeof(*table)); - if ((error= ndbcluster_binlog_open_table(thd, share, table_share, table, 0))) - break; - /* - ! do not touch the contents of the table - it may be in use by the injector thread - */ - MEM_ROOT *mem_root= &share->mem_root; - share->ndb_value[0]= (NdbValue*) - alloc_root(mem_root, sizeof(NdbValue) * - (table->s->fields + 2 /*extra for hidden key and part key*/)); - share->ndb_value[1]= (NdbValue*) - alloc_root(mem_root, sizeof(NdbValue) * - (table->s->fields + 2 /*extra for hidden key and part key*/)); - - if (table->s->primary_key == MAX_KEY) - share->flags|= NSF_HIDDEN_PK; - if (table->s->blob_fields != 0) - share->flags|= NSF_BLOB_FLAG; - break; - } - DBUG_RETURN(error); -} - -/***************************************************************** - functions called from master sql client threads -****************************************************************/ - -/* - called in mysql_show_binlog_events and reset_logs to make sure we wait for - all events originating from this mysql server to arrive in the binlog - - Wait for the last epoch in which the last transaction is a part of. - - Wait a maximum of 30 seconds. -*/ -static void ndbcluster_binlog_wait(THD *thd) -{ - if (ndb_binlog_running) - { - DBUG_ENTER("ndbcluster_binlog_wait"); - const char *save_info= thd ? thd->proc_info : 0; - ulonglong wait_epoch= *p_latest_trans_gci; - int count= 30; - if (thd) - thd->proc_info= "Waiting for ndbcluster binlog update to " - "reach current position"; - while (count && ndb_binlog_running && - ndb_latest_handled_binlog_epoch < wait_epoch) - { - count--; - sleep(1); - } - if (thd) - thd->proc_info= save_info; - DBUG_VOID_RETURN; - } -} - -/* - Called from MYSQL_BIN_LOG::reset_logs in log.cc when binlog is emptied -*/ -static int ndbcluster_reset_logs(THD *thd) -{ - if (!ndb_binlog_running) - return 0; - - DBUG_ENTER("ndbcluster_reset_logs"); - - /* - Wait for all events orifinating from this mysql server has - reached the binlog before continuing to reset - */ - ndbcluster_binlog_wait(thd); - - char buf[1024]; - char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_REP_TABLE); - - run_query(thd, buf, end, NULL, TRUE); - - DBUG_RETURN(0); -} - -/* - Called from MYSQL_BIN_LOG::purge_logs in log.cc when the binlog "file" - is removed -*/ - -static int -ndbcluster_binlog_index_purge_file(THD *thd, const char *file) -{ - if (!ndb_binlog_running || thd->slave_thread) - return 0; - - DBUG_ENTER("ndbcluster_binlog_index_purge_file"); - DBUG_PRINT("enter", ("file: %s", file)); - - char buf[1024]; - char *end= strmov(strmov(strmov(buf, - "DELETE FROM " - NDB_REP_DB "." NDB_REP_TABLE - " WHERE File='"), file), "'"); - - run_query(thd, buf, end, NULL, TRUE); - - DBUG_RETURN(0); -} - -static void -ndbcluster_binlog_log_query(handlerton *hton, THD *thd, enum_binlog_command binlog_command, - const char *query, uint query_length, - const char *db, const char *table_name) -{ - DBUG_ENTER("ndbcluster_binlog_log_query"); - DBUG_PRINT("enter", ("db: %s table_name: %s query: %s", - db, table_name, query)); - enum SCHEMA_OP_TYPE type; - int log= 0; - switch (binlog_command) - { - case LOGCOM_CREATE_TABLE: - type= SOT_CREATE_TABLE; - DBUG_ASSERT(FALSE); - break; - case LOGCOM_ALTER_TABLE: - type= SOT_ALTER_TABLE; - log= 1; - break; - case LOGCOM_RENAME_TABLE: - type= SOT_RENAME_TABLE; - DBUG_ASSERT(FALSE); - break; - case LOGCOM_DROP_TABLE: - type= SOT_DROP_TABLE; - DBUG_ASSERT(FALSE); - break; - case LOGCOM_CREATE_DB: - type= SOT_CREATE_DB; - log= 1; - break; - case LOGCOM_ALTER_DB: - type= SOT_ALTER_DB; - log= 1; - break; - case LOGCOM_DROP_DB: - type= SOT_DROP_DB; - DBUG_ASSERT(FALSE); - break; - } - if (log) - { - ndbcluster_log_schema_op(thd, 0, query, query_length, - db, table_name, 0, 0, type, - 0, 0); - } - DBUG_VOID_RETURN; -} - - -/* - End use of the NDB Cluster binlog - - wait for binlog thread to shutdown -*/ - -static int ndbcluster_binlog_end(THD *thd) -{ - DBUG_ENTER("ndbcluster_binlog_end"); - - if (!ndbcluster_binlog_inited) - DBUG_RETURN(0); - ndbcluster_binlog_inited= 0; - -#ifdef HAVE_NDB_BINLOG - if (ndb_util_thread_running > 0) - { - /* - Wait for util thread to die (as this uses the injector mutex) - There is a very small change that ndb_util_thread dies and the - following mutex is freed before it's accessed. This shouldn't - however be a likely case as the ndbcluster_binlog_end is supposed to - be called before ndb_cluster_end(). - */ - mysql_mutex_lock(&LOCK_ndb_util_thread); - /* Ensure mutex are not freed if ndb_cluster_end is running at same time */ - ndb_util_thread_running++; - ndbcluster_terminating= 1; - mysql_cond_signal(&COND_ndb_util_thread); - while (ndb_util_thread_running > 1) - mysql_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread); - ndb_util_thread_running--; - mysql_mutex_unlock(&LOCK_ndb_util_thread); - } - - /* wait for injector thread to finish */ - ndbcluster_binlog_terminating= 1; - mysql_mutex_lock(&injector_mutex); - mysql_cond_signal(&injector_cond); - while (ndb_binlog_thread_running > 0) - mysql_cond_wait(&injector_cond, &injector_mutex); - mysql_mutex_unlock(&injector_mutex); - - mysql_mutex_destroy(&injector_mutex); - mysql_cond_destroy(&injector_cond); - mysql_mutex_destroy(&ndb_schema_share_mutex); -#endif - - DBUG_RETURN(0); -} - -/***************************************************************** - functions called from slave sql client threads -****************************************************************/ -static void ndbcluster_reset_slave(THD *thd) -{ - if (!ndb_binlog_running) - return; - - DBUG_ENTER("ndbcluster_reset_slave"); - char buf[1024]; - char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_APPLY_TABLE); - run_query(thd, buf, end, NULL, TRUE); - DBUG_VOID_RETURN; -} - -/* - Initialize the binlog part of the ndb handlerton -*/ - -/** - Upon the sql command flush logs, we need to ensure that all outstanding - ndb data to be logged has made it to the binary log to get a deterministic - behavior on the rotation of the log. - */ -static bool ndbcluster_flush_logs(handlerton *hton) -{ - ndbcluster_binlog_wait(current_thd); - return FALSE; -} - -static int ndbcluster_binlog_func(handlerton *hton, THD *thd, - enum_binlog_func fn, - void *arg) -{ - switch(fn) - { - case BFN_RESET_LOGS: - ndbcluster_reset_logs(thd); - break; - case BFN_RESET_SLAVE: - ndbcluster_reset_slave(thd); - break; - case BFN_BINLOG_WAIT: - ndbcluster_binlog_wait(thd); - break; - case BFN_BINLOG_END: - ndbcluster_binlog_end(thd); - break; - case BFN_BINLOG_PURGE_FILE: - ndbcluster_binlog_index_purge_file(thd, (const char *)arg); - break; - } - return 0; -} - -void ndbcluster_binlog_init_handlerton() -{ - handlerton *h= ndbcluster_hton; - h->flush_logs= ndbcluster_flush_logs; - h->binlog_func= ndbcluster_binlog_func; - h->binlog_log_query= ndbcluster_binlog_log_query; -} - - - - - -/* - check the availability af the ndb_apply_status share - - return share, but do not increase refcount - - return 0 if there is no share -*/ -static NDB_SHARE *ndbcluster_check_ndb_apply_status_share() -{ - mysql_mutex_lock(&ndbcluster_mutex); - - void *share= my_hash_search(&ndbcluster_open_tables, - (uchar*) NDB_APPLY_TABLE_FILE, - sizeof(NDB_APPLY_TABLE_FILE) - 1); - DBUG_PRINT("info",("ndbcluster_check_ndb_apply_status_share %s 0x%lx", - NDB_APPLY_TABLE_FILE, (long) share)); - mysql_mutex_unlock(&ndbcluster_mutex); - return (NDB_SHARE*) share; -} - -/* - check the availability af the schema share - - return share, but do not increase refcount - - return 0 if there is no share -*/ -static NDB_SHARE *ndbcluster_check_ndb_schema_share() -{ - mysql_mutex_lock(&ndbcluster_mutex); - - void *share= my_hash_search(&ndbcluster_open_tables, - (uchar*) NDB_SCHEMA_TABLE_FILE, - sizeof(NDB_SCHEMA_TABLE_FILE) - 1); - DBUG_PRINT("info",("ndbcluster_check_ndb_schema_share %s 0x%lx", - NDB_SCHEMA_TABLE_FILE, (long) share)); - mysql_mutex_unlock(&ndbcluster_mutex); - return (NDB_SHARE*) share; -} - -/* - Create the ndb_apply_status table -*/ -static int ndbcluster_create_ndb_apply_status_table(THD *thd) -{ - DBUG_ENTER("ndbcluster_create_ndb_apply_status_table"); - - /* - Check if we already have the apply status table. - If so it should have been discovered at startup - and thus have a share - */ - - if (ndbcluster_check_ndb_apply_status_share()) - DBUG_RETURN(0); - - if (g_ndb_cluster_connection->get_no_ready() <= 0) - DBUG_RETURN(0); - - char buf[1024 + 1], *end; - - if (opt_ndb_extra_logging) - sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_APPLY_TABLE); - - /* - Check if apply status table exists in MySQL "dictionary" - if so, remove it since there is none in Ndb - */ - { - build_table_filename(buf, sizeof(buf) - 1, - NDB_REP_DB, NDB_APPLY_TABLE, reg_ext, 0); - mysql_file_delete(key_file_frm, buf, MYF(0)); - } - - /* - Note, updating this table schema must be reflected in ndb_restore - */ - end= strmov(buf, "CREATE TABLE IF NOT EXISTS " - NDB_REP_DB "." NDB_APPLY_TABLE - " ( server_id INT UNSIGNED NOT NULL," - " epoch BIGINT UNSIGNED NOT NULL, " - " log_name VARCHAR(255) BINARY NOT NULL, " - " start_pos BIGINT UNSIGNED NOT NULL, " - " end_pos BIGINT UNSIGNED NOT NULL, " - " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB CHARACTER SET latin1"); - - const int no_print_error[6]= {ER_TABLE_EXISTS_ERROR, - 701, - 702, - 721, // Table already exist - 4009, - 0}; // do not print error 701 etc - run_query(thd, buf, end, no_print_error, TRUE); - - DBUG_RETURN(0); -} - - -/* - Create the schema table -*/ -static int ndbcluster_create_schema_table(THD *thd) -{ - DBUG_ENTER("ndbcluster_create_schema_table"); - - /* - Check if we already have the schema table. - If so it should have been discovered at startup - and thus have a share - */ - - if (ndbcluster_check_ndb_schema_share()) - DBUG_RETURN(0); - - if (g_ndb_cluster_connection->get_no_ready() <= 0) - DBUG_RETURN(0); - - char buf[1024 + 1], *end; - - if (opt_ndb_extra_logging) - sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_SCHEMA_TABLE); - - /* - Check if schema table exists in MySQL "dictionary" - if so, remove it since there is none in Ndb - */ - { - build_table_filename(buf, sizeof(buf) - 1, - NDB_REP_DB, NDB_SCHEMA_TABLE, reg_ext, 0); - mysql_file_delete(key_file_frm, buf, MYF(0)); - } - - /* - Update the defines below to reflect the table schema - */ - end= strmov(buf, "CREATE TABLE IF NOT EXISTS " - NDB_REP_DB "." NDB_SCHEMA_TABLE - " ( db VARBINARY(63) NOT NULL," - " name VARBINARY(63) NOT NULL," - " slock BINARY(32) NOT NULL," - " query BLOB NOT NULL," - " node_id INT UNSIGNED NOT NULL," - " epoch BIGINT UNSIGNED NOT NULL," - " id INT UNSIGNED NOT NULL," - " version INT UNSIGNED NOT NULL," - " type INT UNSIGNED NOT NULL," - " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB CHARACTER SET latin1"); - - const int no_print_error[6]= {ER_TABLE_EXISTS_ERROR, - 701, - 702, - 721, // Table already exist - 4009, - 0}; // do not print error 701 etc - run_query(thd, buf, end, no_print_error, TRUE); - - DBUG_RETURN(0); -} - -int ndbcluster_setup_binlog_table_shares(THD *thd) -{ - if (!ndb_schema_share && - ndbcluster_check_ndb_schema_share() == 0) - { - ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_SCHEMA_TABLE); - if (!ndb_schema_share) - { - ndbcluster_create_schema_table(thd); - // always make sure we create the 'schema' first - if (!ndb_schema_share) - return 1; - } - } - if (!ndb_apply_status_share && - ndbcluster_check_ndb_apply_status_share() == 0) - { - ndb_create_table_from_engine(thd, NDB_REP_DB, NDB_APPLY_TABLE); - if (!ndb_apply_status_share) - { - ndbcluster_create_ndb_apply_status_table(thd); - if (!ndb_apply_status_share) - return 1; - } - } - if (!ndbcluster_find_all_files(thd)) - { - ndb_binlog_tables_inited= TRUE; - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: ndb tables writable"); - close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT); - /* Signal injector thread that all is setup */ - mysql_cond_signal(&injector_cond); - } - return 0; -} - -/* - Defines and struct for schema table. - Should reflect table definition above. -*/ -#define SCHEMA_DB_I 0u -#define SCHEMA_NAME_I 1u -#define SCHEMA_SLOCK_I 2u -#define SCHEMA_QUERY_I 3u -#define SCHEMA_NODE_ID_I 4u -#define SCHEMA_EPOCH_I 5u -#define SCHEMA_ID_I 6u -#define SCHEMA_VERSION_I 7u -#define SCHEMA_TYPE_I 8u -#define SCHEMA_SIZE 9u -#define SCHEMA_SLOCK_SIZE 32u - -struct Cluster_schema -{ - uchar db_length; - char db[64]; - uchar name_length; - char name[64]; - uchar slock_length; - uint32 slock[SCHEMA_SLOCK_SIZE/4]; - unsigned short query_length; - char *query; - Uint64 epoch; - uint32 node_id; - uint32 id; - uint32 version; - uint32 type; - uint32 any_value; -}; - -static void print_could_not_discover_error(THD *thd, - const Cluster_schema *schema) -{ - sql_print_error("NDB Binlog: Could not discover table '%s.%s' from " - "binlog schema event '%s' from node %d. " - "my_errno: %d", - schema->db, schema->name, schema->query, - schema->node_id, my_errno); - List_iterator_fast it(thd->warning_info->warn_list()); - Sql_condition *err; - while ((err= it++)) - sql_print_warning("NDB Binlog: (%d)%s", err->get_sql_errno(), - err->get_message_text()); -} - -/* - Transfer schema table data into corresponding struct -*/ -static void ndbcluster_get_schema(NDB_SHARE *share, - Cluster_schema *s) -{ - TABLE *table= share->table; - Field **field; - /* unpack blob values */ - uchar* blobs_buffer= 0; - uint blobs_buffer_size= 0; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); - { - ptrdiff_t ptrdiff= 0; - int ret= get_ndb_blobs_value(table, share->ndb_value[0], - blobs_buffer, blobs_buffer_size, - ptrdiff); - if (ret != 0) - { - my_free(blobs_buffer); - DBUG_PRINT("info", ("blob read error")); - DBUG_ASSERT(FALSE); - } - } - /* db varchar 1 length uchar */ - field= table->field; - s->db_length= *(uint8*)(*field)->ptr; - DBUG_ASSERT(s->db_length <= (*field)->field_length); - DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->db)); - memcpy(s->db, (*field)->ptr + 1, s->db_length); - s->db[s->db_length]= 0; - /* name varchar 1 length uchar */ - field++; - s->name_length= *(uint8*)(*field)->ptr; - DBUG_ASSERT(s->name_length <= (*field)->field_length); - DBUG_ASSERT((*field)->field_length + 1 == sizeof(s->name)); - memcpy(s->name, (*field)->ptr + 1, s->name_length); - s->name[s->name_length]= 0; - /* slock fixed length */ - field++; - s->slock_length= (*field)->field_length; - DBUG_ASSERT((*field)->field_length == sizeof(s->slock)); - memcpy(s->slock, (*field)->ptr, s->slock_length); - /* query blob */ - field++; - { - Field_blob *field_blob= (Field_blob*)(*field); - uint blob_len= field_blob->get_length((*field)->ptr); - uchar *blob_ptr= 0; - field_blob->get_ptr(&blob_ptr); - DBUG_ASSERT(blob_len == 0 || blob_ptr != 0); - s->query_length= blob_len; - s->query= sql_strmake((char*) blob_ptr, blob_len); - } - /* node_id */ - field++; - s->node_id= ((Field_long *)*field)->val_int(); - /* epoch */ - field++; - s->epoch= ((Field_long *)*field)->val_int(); - /* id */ - field++; - s->id= ((Field_long *)*field)->val_int(); - /* version */ - field++; - s->version= ((Field_long *)*field)->val_int(); - /* type */ - field++; - s->type= ((Field_long *)*field)->val_int(); - /* free blobs buffer */ - my_free(blobs_buffer); - dbug_tmp_restore_column_map(table->read_set, old_map); -} - -/* - helper function to pack a ndb varchar -*/ -char *ndb_pack_varchar(const NDBCOL *col, char *buf, - const char *str, int sz) -{ - switch (col->getArrayType()) - { - case NDBCOL::ArrayTypeFixed: - memcpy(buf, str, sz); - break; - case NDBCOL::ArrayTypeShortVar: - *(uchar*)buf= (uchar)sz; - memcpy(buf + 1, str, sz); - break; - case NDBCOL::ArrayTypeMediumVar: - int2store(buf, sz); - memcpy(buf + 2, str, sz); - break; - } - return buf; -} - -/* - acknowledge handling of schema operation -*/ -static int -ndbcluster_update_slock(THD *thd, - const char *db, - const char *table_name) -{ - DBUG_ENTER("ndbcluster_update_slock"); - if (!ndb_schema_share) - { - DBUG_RETURN(0); - } - - const NdbError *ndb_error= 0; - uint32 node_id= g_ndb_cluster_connection->node_id(); - Ndb *ndb= check_ndb_in_thd(thd); - char save_db[FN_HEADLEN]; - strcpy(save_db, ndb->getDatabaseName()); - - char tmp_buf[FN_REFLEN]; - NDBDICT *dict= ndb->getDictionary(); - ndb->setDatabaseName(NDB_REP_DB); - Ndb_table_guard ndbtab_g(dict, NDB_SCHEMA_TABLE); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - NdbTransaction *trans= 0; - int retries= 100; - int retry_sleep= 10; /* 10 milliseconds, transaction */ - const NDBCOL *col[SCHEMA_SIZE]; - unsigned sz[SCHEMA_SIZE]; - - MY_BITMAP slock; - uint32 bitbuf[SCHEMA_SLOCK_SIZE/4]; - my_bitmap_init(&slock, bitbuf, sizeof(bitbuf)*8, false); - - if (ndbtab == 0) - { - abort(); - DBUG_RETURN(0); - } - - { - uint i; - for (i= 0; i < SCHEMA_SIZE; i++) - { - col[i]= ndbtab->getColumn(i); - if (i != SCHEMA_QUERY_I) - { - sz[i]= col[i]->getLength(); - DBUG_ASSERT(sz[i] <= sizeof(tmp_buf)); - } - } - } - - while (1) - { - if ((trans= ndb->startTransaction()) == 0) - goto err; - { - NdbOperation *op= 0; - int r= 0; - - /* read the bitmap exlusive */ - r|= (op= trans->getNdbOperation(ndbtab)) == 0; - DBUG_ASSERT(r == 0); - r|= op->readTupleExclusive(); - DBUG_ASSERT(r == 0); - - /* db */ - ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db)); - r|= op->equal(SCHEMA_DB_I, tmp_buf); - DBUG_ASSERT(r == 0); - /* name */ - ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name, - strlen(table_name)); - r|= op->equal(SCHEMA_NAME_I, tmp_buf); - DBUG_ASSERT(r == 0); - /* slock */ - r|= op->getValue(SCHEMA_SLOCK_I, (char*)slock.bitmap) == 0; - DBUG_ASSERT(r == 0); - } - if (trans->execute(NdbTransaction::NoCommit)) - goto err; - bitmap_clear_bit(&slock, node_id); - { - NdbOperation *op= 0; - int r= 0; - - /* now update the tuple */ - r|= (op= trans->getNdbOperation(ndbtab)) == 0; - DBUG_ASSERT(r == 0); - r|= op->updateTuple(); - DBUG_ASSERT(r == 0); - - /* db */ - ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, db, strlen(db)); - r|= op->equal(SCHEMA_DB_I, tmp_buf); - DBUG_ASSERT(r == 0); - /* name */ - ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, table_name, - strlen(table_name)); - r|= op->equal(SCHEMA_NAME_I, tmp_buf); - DBUG_ASSERT(r == 0); - /* slock */ - r|= op->setValue(SCHEMA_SLOCK_I, (char*)slock.bitmap); - DBUG_ASSERT(r == 0); - /* node_id */ - r|= op->setValue(SCHEMA_NODE_ID_I, node_id); - DBUG_ASSERT(r == 0); - /* type */ - r|= op->setValue(SCHEMA_TYPE_I, (uint32)SOT_CLEAR_SLOCK); - DBUG_ASSERT(r == 0); - } - if (trans->execute(NdbTransaction::Commit) == 0) - { - dict->forceGCPWait(); - DBUG_PRINT("info", ("node %d cleared lock on '%s.%s'", - node_id, db, table_name)); - break; - } - err: - const NdbError *this_error= trans ? - &trans->getNdbError() : &ndb->getNdbError(); - if (this_error->status == NdbError::TemporaryError) - { - if (retries--) - { - if (trans) - ndb->closeTransaction(trans); - my_sleep(retry_sleep); - continue; // retry - } - } - ndb_error= this_error; - break; - } - - if (ndb_error) - { - char buf[1024]; - my_snprintf(buf, sizeof(buf), "Could not release lock on '%s.%s'", - db, table_name); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - ndb_error->code, ndb_error->message, buf); - } - if (trans) - ndb->closeTransaction(trans); - ndb->setDatabaseName(save_db); - DBUG_RETURN(0); -} - -/* - log query in schema table -*/ -static void ndb_report_waiting(const char *key, - int the_time, - const char *op, - const char *obj) -{ - ulonglong ndb_latest_epoch= 0; - const char *proc_info= ""; - mysql_mutex_lock(&injector_mutex); - if (injector_ndb) - ndb_latest_epoch= injector_ndb->getLatestGCI(); - if (injector_thd) - proc_info= injector_thd->proc_info; - mysql_mutex_unlock(&injector_mutex); - sql_print_information("NDB %s:" - " waiting max %u sec for %s %s." - " epochs: (%u,%u,%u)" - " injector proc_info: %s" - ,key, the_time, op, obj - ,(uint)ndb_latest_handled_binlog_epoch - ,(uint)ndb_latest_received_binlog_epoch - ,(uint)ndb_latest_epoch - ,proc_info - ); -} - -int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, - const char *query, int query_length, - const char *db, const char *table_name, - uint32 ndb_table_id, - uint32 ndb_table_version, - enum SCHEMA_OP_TYPE type, - const char *new_db, const char *new_table_name) -{ - DBUG_ENTER("ndbcluster_log_schema_op"); - Thd_ndb *thd_ndb= get_thd_ndb(thd); - if (!thd_ndb) - { - if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - { - sql_print_error("Could not allocate Thd_ndb object"); - DBUG_RETURN(1); - } - set_thd_ndb(thd, thd_ndb); - } - - DBUG_PRINT("enter", - ("query: %s db: %s table_name: %s thd_ndb->options: %d", - query, db, table_name, thd_ndb->options)); - if (!ndb_schema_share || thd_ndb->options & TNO_NO_LOG_SCHEMA_OP) - { - DBUG_RETURN(0); - } - - char tmp_buf2_mem[FN_REFLEN]; - String tmp_buf2(tmp_buf2_mem, sizeof(tmp_buf2_mem), system_charset_info); - tmp_buf2.length(0); - const char *type_str; - switch (type) - { - case SOT_DROP_TABLE: - /* drop database command, do not log at drop table */ - if (thd->lex->sql_command == SQLCOM_DROP_DB) - DBUG_RETURN(0); - /* redo the drop table query as is may contain several tables */ - tmp_buf2.append(STRING_WITH_LEN("drop table ")); - append_identifier(thd, &tmp_buf2, table_name, strlen(table_name)); - query= tmp_buf2.c_ptr_safe(); - query_length= tmp_buf2.length(); - type_str= "drop table"; - break; - case SOT_RENAME_TABLE: - /* redo the rename table query as is may contain several tables */ - tmp_buf2.append(STRING_WITH_LEN("rename table ")); - append_identifier(thd, &tmp_buf2, db, strlen(db)); - tmp_buf2.append(STRING_WITH_LEN(".")); - append_identifier(thd, &tmp_buf2, table_name, strlen(table_name)); - tmp_buf2.append(STRING_WITH_LEN(" to ")); - append_identifier(thd, &tmp_buf2, new_db, strlen(new_db)); - tmp_buf2.append(STRING_WITH_LEN(".")); - append_identifier(thd, &tmp_buf2, new_table_name, strlen(new_table_name)); - query= tmp_buf2.c_ptr_safe(); - query_length= tmp_buf2.length(); - type_str= "rename table"; - break; - case SOT_CREATE_TABLE: - type_str= "create table"; - break; - case SOT_ALTER_TABLE: - type_str= "alter table"; - break; - case SOT_DROP_DB: - type_str= "drop db"; - break; - case SOT_CREATE_DB: - type_str= "create db"; - break; - case SOT_ALTER_DB: - type_str= "alter db"; - break; - case SOT_TABLESPACE: - type_str= "tablespace"; - break; - case SOT_LOGFILE_GROUP: - type_str= "logfile group"; - break; - case SOT_TRUNCATE_TABLE: - type_str= "truncate table"; - break; - default: - abort(); /* should not happen, programming error */ - } - - NDB_SCHEMA_OBJECT *ndb_schema_object; - { - char key[FN_REFLEN + 1]; - build_table_filename(key, sizeof(key) - 1, db, table_name, "", 0); - ndb_schema_object= ndb_get_schema_object(key, TRUE, FALSE); - } - - const NdbError *ndb_error= 0; - uint32 node_id= g_ndb_cluster_connection->node_id(); - Uint64 epoch= 0; - MY_BITMAP schema_subscribers; - uint32 bitbuf[sizeof(ndb_schema_object->slock)/4]; - char bitbuf_e[sizeof(bitbuf)]; - bzero(bitbuf_e, sizeof(bitbuf_e)); - { - int i, updated= 0; - int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes(); - my_bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, FALSE); - bitmap_set_all(&schema_subscribers); - - /* begin protect ndb_schema_share */ - mysql_mutex_lock(&ndb_schema_share_mutex); - if (ndb_schema_share == 0) - { - mysql_mutex_unlock(&ndb_schema_share_mutex); - if (ndb_schema_object) - ndb_free_schema_object(&ndb_schema_object, FALSE); - DBUG_RETURN(0); - } - mysql_mutex_lock(&ndb_schema_share->mutex); - for (i= 0; i < no_storage_nodes; i++) - { - MY_BITMAP *table_subscribers= &ndb_schema_share->subscriber_bitmap[i]; - if (!bitmap_is_clear_all(table_subscribers)) - { - bitmap_intersect(&schema_subscribers, - table_subscribers); - updated= 1; - } - } - mysql_mutex_unlock(&ndb_schema_share->mutex); - mysql_mutex_unlock(&ndb_schema_share_mutex); - /* end protect ndb_schema_share */ - - if (updated) - { - bitmap_clear_bit(&schema_subscribers, node_id); - /* - if setting own acknowledge bit it is important that - no other mysqld's are registred, as subsequent code - will cause the original event to be hidden (by blob - merge event code) - */ - if (bitmap_is_clear_all(&schema_subscribers)) - bitmap_set_bit(&schema_subscribers, node_id); - } - else - bitmap_clear_all(&schema_subscribers); - - if (ndb_schema_object) - { - mysql_mutex_lock(&ndb_schema_object->mutex); - memcpy(ndb_schema_object->slock, schema_subscribers.bitmap, - sizeof(ndb_schema_object->slock)); - mysql_mutex_unlock(&ndb_schema_object->mutex); - } - - DBUG_DUMP("schema_subscribers", (uchar*)schema_subscribers.bitmap, - no_bytes_in_map(&schema_subscribers)); - DBUG_PRINT("info", ("bitmap_is_clear_all(&schema_subscribers): %d", - bitmap_is_clear_all(&schema_subscribers))); - } - - Ndb *ndb= thd_ndb->ndb; - char save_db[FN_REFLEN]; - strcpy(save_db, ndb->getDatabaseName()); - - char tmp_buf[FN_REFLEN]; - NDBDICT *dict= ndb->getDictionary(); - ndb->setDatabaseName(NDB_REP_DB); - Ndb_table_guard ndbtab_g(dict, NDB_SCHEMA_TABLE); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - NdbTransaction *trans= 0; - int retries= 100; - int retry_sleep= 10; /* 10 milliseconds, transaction */ - const NDBCOL *col[SCHEMA_SIZE]; - unsigned sz[SCHEMA_SIZE]; - - if (ndbtab == 0) - { - if (strcmp(NDB_REP_DB, db) != 0 || - strcmp(NDB_SCHEMA_TABLE, table_name)) - { - ndb_error= &dict->getNdbError(); - } - goto end; - } - - { - uint i; - for (i= 0; i < SCHEMA_SIZE; i++) - { - col[i]= ndbtab->getColumn(i); - if (i != SCHEMA_QUERY_I) - { - sz[i]= col[i]->getLength(); - DBUG_ASSERT(sz[i] <= sizeof(tmp_buf)); - } - } - } - - while (1) - { - const char *log_db= db; - const char *log_tab= table_name; - const char *log_subscribers= (char*)schema_subscribers.bitmap; - uint32 log_type= (uint32)type; - if ((trans= ndb->startTransaction()) == 0) - goto err; - while (1) - { - NdbOperation *op= 0; - int r= 0; - r|= (op= trans->getNdbOperation(ndbtab)) == 0; - DBUG_ASSERT(r == 0); - r|= op->writeTuple(); - DBUG_ASSERT(r == 0); - - /* db */ - ndb_pack_varchar(col[SCHEMA_DB_I], tmp_buf, log_db, strlen(log_db)); - r|= op->equal(SCHEMA_DB_I, tmp_buf); - DBUG_ASSERT(r == 0); - /* name */ - ndb_pack_varchar(col[SCHEMA_NAME_I], tmp_buf, log_tab, - strlen(log_tab)); - r|= op->equal(SCHEMA_NAME_I, tmp_buf); - DBUG_ASSERT(r == 0); - /* slock */ - DBUG_ASSERT(sz[SCHEMA_SLOCK_I] == sizeof(bitbuf)); - r|= op->setValue(SCHEMA_SLOCK_I, log_subscribers); - DBUG_ASSERT(r == 0); - /* query */ - { - NdbBlob *ndb_blob= op->getBlobHandle(SCHEMA_QUERY_I); - DBUG_ASSERT(ndb_blob != 0); - uint blob_len= query_length; - const char* blob_ptr= query; - r|= ndb_blob->setValue(blob_ptr, blob_len); - DBUG_ASSERT(r == 0); - } - /* node_id */ - r|= op->setValue(SCHEMA_NODE_ID_I, node_id); - DBUG_ASSERT(r == 0); - /* epoch */ - r|= op->setValue(SCHEMA_EPOCH_I, epoch); - DBUG_ASSERT(r == 0); - /* id */ - r|= op->setValue(SCHEMA_ID_I, ndb_table_id); - DBUG_ASSERT(r == 0); - /* version */ - r|= op->setValue(SCHEMA_VERSION_I, ndb_table_version); - DBUG_ASSERT(r == 0); - /* type */ - r|= op->setValue(SCHEMA_TYPE_I, log_type); - DBUG_ASSERT(r == 0); - /* any value */ - if (!(thd->variables.option_bits & OPTION_BIN_LOG)) - r|= op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING); - else - r|= op->setAnyValue(thd->server_id); - DBUG_ASSERT(r == 0); - if (log_db != new_db && new_db && new_table_name) - { - log_db= new_db; - log_tab= new_table_name; - log_subscribers= bitbuf_e; // no ack expected on this - log_type= (uint32)SOT_RENAME_TABLE_NEW; - continue; - } - break; - } - if (trans->execute(NdbTransaction::Commit) == 0) - { - DBUG_PRINT("info", ("logged: %s", query)); - break; - } -err: - const NdbError *this_error= trans ? - &trans->getNdbError() : &ndb->getNdbError(); - if (this_error->status == NdbError::TemporaryError) - { - if (retries--) - { - if (trans) - ndb->closeTransaction(trans); - my_sleep(retry_sleep); - continue; // retry - } - } - ndb_error= this_error; - break; - } -end: - if (ndb_error) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - ndb_error->code, - ndb_error->message, - "Could not log query '%s' on other mysqld's"); - - if (trans) - ndb->closeTransaction(trans); - ndb->setDatabaseName(save_db); - - /* - Wait for other mysqld's to acknowledge the table operation - */ - if (ndb_error == 0 && - !bitmap_is_clear_all(&schema_subscribers)) - { - /* - if own nodeid is set we are a single mysqld registred - as an optimization we update the slock directly - */ - if (bitmap_is_set(&schema_subscribers, node_id)) - ndbcluster_update_slock(thd, db, table_name); - else - dict->forceGCPWait(); - - int max_timeout= DEFAULT_SYNC_TIMEOUT; - mysql_mutex_lock(&ndb_schema_object->mutex); - while (1) - { - struct timespec abstime; - int i; - int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes(); - set_timespec(abstime, 1); - int ret= mysql_cond_timedwait(&injector_cond, - &ndb_schema_object->mutex, - &abstime); - if (thd->killed) - break; - - /* begin protect ndb_schema_share */ - mysql_mutex_lock(&ndb_schema_share_mutex); - if (ndb_schema_share == 0) - { - mysql_mutex_unlock(&ndb_schema_share_mutex); - break; - } - mysql_mutex_lock(&ndb_schema_share->mutex); - for (i= 0; i < no_storage_nodes; i++) - { - /* remove any unsubscribed from schema_subscribers */ - MY_BITMAP *tmp= &ndb_schema_share->subscriber_bitmap[i]; - if (!bitmap_is_clear_all(tmp)) - bitmap_intersect(&schema_subscribers, tmp); - } - mysql_mutex_unlock(&ndb_schema_share->mutex); - mysql_mutex_unlock(&ndb_schema_share_mutex); - /* end protect ndb_schema_share */ - - /* remove any unsubscribed from ndb_schema_object->slock */ - bitmap_intersect(&ndb_schema_object->slock_bitmap, &schema_subscribers); - - DBUG_DUMP("ndb_schema_object->slock_bitmap.bitmap", - (uchar*)ndb_schema_object->slock_bitmap.bitmap, - no_bytes_in_map(&ndb_schema_object->slock_bitmap)); - - if (bitmap_is_clear_all(&ndb_schema_object->slock_bitmap)) - break; - - if (ret) - { - max_timeout--; - if (max_timeout == 0) - { - sql_print_error("NDB %s: distributing %s timed out. Ignoring...", - type_str, ndb_schema_object->key); - break; - } - if (opt_ndb_extra_logging) - ndb_report_waiting(type_str, max_timeout, - "distributing", ndb_schema_object->key); - } - } - mysql_mutex_unlock(&ndb_schema_object->mutex); - } - - if (ndb_schema_object) - ndb_free_schema_object(&ndb_schema_object, FALSE); - - DBUG_RETURN(0); -} - -/* - Handle _non_ data events from the storage nodes -*/ -int -ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, - NDB_SHARE *share) -{ - DBUG_ENTER("ndb_handle_schema_change"); - TABLE* table= share->table; - TABLE_SHARE *table_share= share->table_share; - const char *dbname= table_share->db.str; - const char *tabname= table_share->table_name.str; - bool do_close_cached_tables= FALSE; - bool is_online_alter_table= FALSE; - bool is_rename_table= FALSE; - bool is_remote_change= - (uint) pOp->getReqNodeId() != g_ndb_cluster_connection->node_id(); - - if (pOp->getEventType() == NDBEVENT::TE_ALTER) - { - if (pOp->tableFrmChanged()) - { - DBUG_PRINT("info", ("NDBEVENT::TE_ALTER: table frm changed")); - is_online_alter_table= TRUE; - } - else - { - DBUG_PRINT("info", ("NDBEVENT::TE_ALTER: name changed")); - DBUG_ASSERT(pOp->tableNameChanged()); - is_rename_table= TRUE; - } - } - - { - ndb->setDatabaseName(dbname); - Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname); - const NDBTAB *ev_tab= pOp->getTable(); - const NDBTAB *cache_tab= ndbtab_g.get_table(); - if (cache_tab && - cache_tab->getObjectId() == ev_tab->getObjectId() && - cache_tab->getObjectVersion() <= ev_tab->getObjectVersion()) - ndbtab_g.invalidate(); - } - - /* - Refresh local frm file and dictionary cache if - remote on-line alter table - */ - if (is_remote_change && is_online_alter_table) - { - const char *tabname= table_share->table_name.str; - char key[FN_REFLEN + 1]; - uchar *data= 0, *pack_data= 0; - size_t length, pack_length; - int error; - NDBDICT *dict= ndb->getDictionary(); - const NDBTAB *altered_table= pOp->getTable(); - - DBUG_PRINT("info", ("Detected frm change of table %s.%s", - dbname, tabname)); - build_table_filename(key, FN_LEN - 1, dbname, tabname, NullS, 0); - /* - If the there is no local table shadowing the altered table and - it has an frm that is different than the one on disk then - overwrite it with the new table definition - */ - if (!ndbcluster_check_if_local_table(dbname, tabname) && - readfrm(key, &data, &length) == 0 && - packfrm(data, length, &pack_data, &pack_length) == 0 && - cmp_frm(altered_table, pack_data, pack_length)) - { - DBUG_DUMP("frm", (uchar*) altered_table->getFrmData(), - altered_table->getFrmLength()); - Ndb_table_guard ndbtab_g(dict, tabname); - const NDBTAB *old= ndbtab_g.get_table(); - if (!old && - old->getObjectVersion() != altered_table->getObjectVersion()) - dict->putTable(altered_table); - - my_free(data); - data= NULL; - if ((error= unpackfrm(&data, &length, - (const uchar*) altered_table->getFrmData())) || - (error= writefrm(key, data, length))) - { - sql_print_information("NDB: Failed write frm for %s.%s, error %d", - dbname, tabname, error); - } - - // copy names as memory will be freed - NdbAutoPtr a1((char *)(dbname= strdup(dbname))); - NdbAutoPtr a2((char *)(tabname= strdup(tabname))); - ndbcluster_binlog_close_table(thd, share); - - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= (char *)dbname; - table_list.alias= table_list.table_name= (char *)tabname; - close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT); - - if ((error= ndbcluster_binlog_open_table(thd, share, - table_share, table, 1))) - sql_print_information("NDB: Failed to re-open table %s.%s", - dbname, tabname); - - table= share->table; - table_share= share->table_share; - dbname= table_share->db.str; - tabname= table_share->table_name.str; - } - my_free(data); - my_free(pack_data); - } - - // If only frm was changed continue replicating - if (is_online_alter_table) - { - /* Signal ha_ndbcluster::alter_table that drop is done */ - mysql_cond_signal(&injector_cond); - DBUG_RETURN(0); - } - - mysql_mutex_lock(&share->mutex); - if (is_rename_table && !is_remote_change) - { - DBUG_PRINT("info", ("Detected name change of table %s.%s", - share->db, share->table_name)); - /* ToDo: remove printout */ - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.", - share_prefix, share->table->s->db.str, - share->table->s->table_name.str, - share->key); - { - ndb->setDatabaseName(share->table->s->db.str); - Ndb_table_guard ndbtab_g(ndb->getDictionary(), - share->table->s->table_name.str); - const NDBTAB *ev_tab= pOp->getTable(); - const NDBTAB *cache_tab= ndbtab_g.get_table(); - if (cache_tab && - cache_tab->getObjectId() == ev_tab->getObjectId() && - cache_tab->getObjectVersion() <= ev_tab->getObjectVersion()) - ndbtab_g.invalidate(); - } - /* do the rename of the table in the share */ - share->table->s->db.str= share->db; - share->table->s->db.length= strlen(share->db); - share->table->s->table_name.str= share->table_name; - share->table->s->table_name.length= strlen(share->table_name); - } - DBUG_ASSERT(share->op == pOp || share->op_old == pOp); - if (share->op_old == pOp) - share->op_old= 0; - else - share->op= 0; - // either just us or drop table handling as well - - /* Signal ha_ndbcluster::delete/rename_table that drop is done */ - mysql_mutex_unlock(&share->mutex); - mysql_cond_signal(&injector_cond); - - mysql_mutex_lock(&ndbcluster_mutex); - /* ndb_share reference binlog free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog free use_count: %u", - share->key, share->use_count)); - free_share(&share, TRUE); - if (is_remote_change && share && share->state != NSS_DROPPED) - { - DBUG_PRINT("info", ("remote change")); - share->state= NSS_DROPPED; - if (share->use_count != 1) - { - /* open handler holding reference */ - /* wait with freeing create ndb_share to below */ - do_close_cached_tables= TRUE; - } - else - { - /* ndb_share reference create free */ - DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u", - share->key, share->use_count)); - free_share(&share, TRUE); - share= 0; - } - } - else - share= 0; - mysql_mutex_unlock(&ndbcluster_mutex); - - pOp->setCustomData(0); - - mysql_mutex_lock(&injector_mutex); - ndb->dropEventOperation(pOp); - pOp= 0; - mysql_mutex_unlock(&injector_mutex); - - if (do_close_cached_tables) - { - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= (char *)dbname; - table_list.alias= table_list.table_name= (char *)tabname; - close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT); - /* ndb_share reference create free */ - DBUG_PRINT("NDB_SHARE", ("%s create free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } - DBUG_RETURN(0); -} - -static void ndb_binlog_query(THD *thd, Cluster_schema *schema) -{ - if (schema->any_value & NDB_ANYVALUE_RESERVED) - { - if (schema->any_value != NDB_ANYVALUE_FOR_NOLOGGING) - sql_print_warning("NDB: unknown value for binlog signalling 0x%X, " - "query not logged", - schema->any_value); - return; - } - uint32 thd_server_id_save= thd->server_id; - DBUG_ASSERT(sizeof(thd_server_id_save) == sizeof(thd->server_id)); - char *thd_db_save= thd->db; - if (schema->any_value == 0) - thd->server_id= ::server_id; - else - thd->server_id= schema->any_value; - thd->db= schema->db; - int errcode = query_error_code(thd, thd->killed == NOT_KILLED); - thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query, - schema->query_length, FALSE, TRUE, - schema->name[0] == 0 || thd->db[0] == 0, - errcode); - thd->server_id= thd_server_id_save; - thd->db= thd_db_save; -} - -static int -ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, - NdbEventOperation *pOp, - List - *post_epoch_log_list, - List - *post_epoch_unlock_list, - MEM_ROOT *mem_root) -{ - DBUG_ENTER("ndb_binlog_thread_handle_schema_event"); - NDB_SHARE *tmp_share= (NDB_SHARE *)pOp->getCustomData(); - if (tmp_share && ndb_schema_share == tmp_share) - { - NDBEVENT::TableEvent ev_type= pOp->getEventType(); - DBUG_PRINT("enter", ("%s.%s ev_type: %d", - tmp_share->db, tmp_share->table_name, ev_type)); - if (ev_type == NDBEVENT::TE_UPDATE || - ev_type == NDBEVENT::TE_INSERT) - { - Cluster_schema *schema= (Cluster_schema *) - sql_alloc(sizeof(Cluster_schema)); - MY_BITMAP slock; - my_bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE); - uint node_id= g_ndb_cluster_connection->node_id(); - { - ndbcluster_get_schema(tmp_share, schema); - schema->any_value= pOp->getAnyValue(); - } - enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type; - DBUG_PRINT("info", - ("%s.%s: log query_length: %d query: '%s' type: %d", - schema->db, schema->name, - schema->query_length, schema->query, - schema_type)); - if (schema_type == SOT_CLEAR_SLOCK) - { - /* - handle slock after epoch is completed to ensure that - schema events get inserted in the binlog after any data - events - */ - post_epoch_log_list->push_back(schema, mem_root); - DBUG_RETURN(0); - } - if (schema->node_id != node_id) - { - int log_query= 0, post_epoch_unlock= 0; - switch (schema_type) - { - case SOT_DROP_TABLE: - // fall through - case SOT_RENAME_TABLE: - // fall through - case SOT_RENAME_TABLE_NEW: - // fall through - case SOT_ALTER_TABLE: - post_epoch_log_list->push_back(schema, mem_root); - /* acknowledge this query _after_ epoch completion */ - post_epoch_unlock= 1; - break; - case SOT_TRUNCATE_TABLE: - { - char key[FN_REFLEN + 1]; - build_table_filename(key, sizeof(key) - 1, - schema->db, schema->name, "", 0); - /* ndb_share reference temporary, free below */ - NDB_SHARE *share= get_share(key, 0, FALSE, FALSE); - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - } - // invalidation already handled by binlog thread - if (!share || !share->op) - { - { - injector_ndb->setDatabaseName(schema->db); - Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(), - schema->name); - ndbtab_g.invalidate(); - } - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= schema->db; - table_list.alias= table_list.table_name= schema->name; - close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT); - } - /* ndb_share reference temporary free */ - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - } - } - // fall through - case SOT_CREATE_TABLE: - if (ndbcluster_check_if_local_table(schema->db, schema->name)) - { - DBUG_PRINT("info", ("NDB Binlog: Skipping locally defined table '%s.%s'", - schema->db, schema->name)); - sql_print_error("NDB Binlog: Skipping locally defined table '%s.%s' from " - "binlog schema event '%s' from node %d. ", - schema->db, schema->name, schema->query, - schema->node_id); - } - else if (ndb_create_table_from_engine(thd, schema->db, schema->name)) - { - print_could_not_discover_error(thd, schema); - } - log_query= 1; - break; - case SOT_DROP_DB: - /* Drop the database locally if it only contains ndb tables */ - if (! ndbcluster_check_if_local_tables_in_db(thd, schema->db)) - { - const int no_print_error[1]= {0}; - run_query(thd, schema->query, - schema->query + schema->query_length, - no_print_error, /* print error */ - TRUE); /* don't binlog the query */ - /* binlog dropping database after any table operations */ - post_epoch_log_list->push_back(schema, mem_root); - /* acknowledge this query _after_ epoch completion */ - post_epoch_unlock= 1; - } - else - { - /* Database contained local tables, leave it */ - sql_print_error("NDB Binlog: Skipping drop database '%s' since it contained local tables " - "binlog schema event '%s' from node %d. ", - schema->db, schema->query, - schema->node_id); - log_query= 1; - } - break; - case SOT_CREATE_DB: - /* fall through */ - case SOT_ALTER_DB: - { - const int no_print_error[1]= {0}; - run_query(thd, schema->query, - schema->query + schema->query_length, - no_print_error, /* print error */ - TRUE); /* don't binlog the query */ - log_query= 1; - break; - } - case SOT_TABLESPACE: - case SOT_LOGFILE_GROUP: - log_query= 1; - break; - case SOT_CLEAR_SLOCK: - abort(); - } - if (log_query && ndb_binlog_running) - ndb_binlog_query(thd, schema); - /* signal that schema operation has been handled */ - DBUG_DUMP("slock", (uchar*) schema->slock, schema->slock_length); - if (bitmap_is_set(&slock, node_id)) - { - if (post_epoch_unlock) - post_epoch_unlock_list->push_back(schema, mem_root); - else - ndbcluster_update_slock(thd, schema->db, schema->name); - } - } - DBUG_RETURN(0); - } - /* - the normal case of UPDATE/INSERT has already been handled - */ - switch (ev_type) - { - case NDBEVENT::TE_DELETE: - // skip - break; - case NDBEVENT::TE_CLUSTER_FAILURE: - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.", - ndb_schema_share->key, (unsigned) pOp->getGCI()); - // fall through - case NDBEVENT::TE_DROP: - if (opt_ndb_extra_logging && - ndb_binlog_tables_inited && ndb_binlog_running) - sql_print_information("NDB Binlog: ndb tables initially " - "read only on reconnect."); - - /* begin protect ndb_schema_share */ - mysql_mutex_lock(&ndb_schema_share_mutex); - /* ndb_share reference binlog extra free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u", - ndb_schema_share->key, - ndb_schema_share->use_count)); - free_share(&ndb_schema_share); - ndb_schema_share= 0; - ndb_binlog_tables_inited= 0; - mysql_mutex_unlock(&ndb_schema_share_mutex); - /* end protect ndb_schema_share */ - - close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT); - // fall through - case NDBEVENT::TE_ALTER: - ndb_handle_schema_change(thd, ndb, pOp, tmp_share); - break; - case NDBEVENT::TE_NODE_FAILURE: - { - uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()]; - DBUG_ASSERT(node_id != 0xFF); - mysql_mutex_lock(&tmp_share->mutex); - bitmap_clear_all(&tmp_share->subscriber_bitmap[node_id]); - DBUG_PRINT("info",("NODE_FAILURE UNSUBSCRIBE[%d]", node_id)); - if (opt_ndb_extra_logging) - { - sql_print_information("NDB Binlog: Node: %d, down," - " Subscriber bitmask %x%x", - pOp->getNdbdNodeId(), - tmp_share->subscriber_bitmap[node_id].bitmap[1], - tmp_share->subscriber_bitmap[node_id].bitmap[0]); - } - mysql_mutex_unlock(&tmp_share->mutex); - mysql_cond_signal(&injector_cond); - break; - } - case NDBEVENT::TE_SUBSCRIBE: - { - uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()]; - uint8 req_id= pOp->getReqNodeId(); - DBUG_ASSERT(req_id != 0 && node_id != 0xFF); - mysql_mutex_lock(&tmp_share->mutex); - bitmap_set_bit(&tmp_share->subscriber_bitmap[node_id], req_id); - DBUG_PRINT("info",("SUBSCRIBE[%d] %d", node_id, req_id)); - if (opt_ndb_extra_logging) - { - sql_print_information("NDB Binlog: Node: %d, subscribe from node %d," - " Subscriber bitmask %x%x", - pOp->getNdbdNodeId(), - req_id, - tmp_share->subscriber_bitmap[node_id].bitmap[1], - tmp_share->subscriber_bitmap[node_id].bitmap[0]); - } - mysql_mutex_unlock(&tmp_share->mutex); - mysql_cond_signal(&injector_cond); - break; - } - case NDBEVENT::TE_UNSUBSCRIBE: - { - uint8 node_id= g_node_id_map[pOp->getNdbdNodeId()]; - uint8 req_id= pOp->getReqNodeId(); - DBUG_ASSERT(req_id != 0 && node_id != 0xFF); - mysql_mutex_lock(&tmp_share->mutex); - bitmap_clear_bit(&tmp_share->subscriber_bitmap[node_id], req_id); - DBUG_PRINT("info",("UNSUBSCRIBE[%d] %d", node_id, req_id)); - if (opt_ndb_extra_logging) - { - sql_print_information("NDB Binlog: Node: %d, unsubscribe from node %d," - " Subscriber bitmask %x%x", - pOp->getNdbdNodeId(), - req_id, - tmp_share->subscriber_bitmap[node_id].bitmap[1], - tmp_share->subscriber_bitmap[node_id].bitmap[0]); - } - mysql_mutex_unlock(&tmp_share->mutex); - mysql_cond_signal(&injector_cond); - break; - } - default: - sql_print_error("NDB Binlog: unknown non data event %d for %s. " - "Ignoring...", (unsigned) ev_type, tmp_share->key); - } - } - DBUG_RETURN(0); -} - -/* - process any operations that should be done after - the epoch is complete -*/ -static void -ndb_binlog_thread_handle_schema_event_post_epoch(THD *thd, - List - *post_epoch_log_list, - List - *post_epoch_unlock_list) -{ - if (post_epoch_log_list->elements == 0) - return; - DBUG_ENTER("ndb_binlog_thread_handle_schema_event_post_epoch"); - Cluster_schema *schema; - while ((schema= post_epoch_log_list->pop())) - { - DBUG_PRINT("info", - ("%s.%s: log query_length: %d query: '%s' type: %d", - schema->db, schema->name, - schema->query_length, schema->query, - schema->type)); - int log_query= 0; - { - enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type; - char key[FN_REFLEN + 1]; - build_table_filename(key, sizeof(key) - 1, schema->db, schema->name, "", 0); - if (schema_type == SOT_CLEAR_SLOCK) - { - mysql_mutex_lock(&ndbcluster_mutex); - NDB_SCHEMA_OBJECT *ndb_schema_object= - (NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects, - (uchar*) key, strlen(key)); - if (ndb_schema_object) - { - mysql_mutex_lock(&ndb_schema_object->mutex); - memcpy(ndb_schema_object->slock, schema->slock, - sizeof(ndb_schema_object->slock)); - DBUG_DUMP("ndb_schema_object->slock_bitmap.bitmap", - (uchar*)ndb_schema_object->slock_bitmap.bitmap, - no_bytes_in_map(&ndb_schema_object->slock_bitmap)); - mysql_mutex_unlock(&ndb_schema_object->mutex); - mysql_cond_signal(&injector_cond); - } - mysql_mutex_unlock(&ndbcluster_mutex); - continue; - } - /* ndb_share reference temporary, free below */ - NDB_SHARE *share= get_share(key, 0, FALSE, FALSE); - if (share) - { - DBUG_PRINT("NDB_SHARE", ("%s temporary use_count: %u", - share->key, share->use_count)); - } - switch (schema_type) - { - case SOT_DROP_DB: - log_query= 1; - break; - case SOT_DROP_TABLE: - log_query= 1; - // invalidation already handled by binlog thread - if (share && share->op) - { - break; - } - // fall through - case SOT_RENAME_TABLE: - // fall through - case SOT_ALTER_TABLE: - // invalidation already handled by binlog thread - if (!share || !share->op) - { - { - injector_ndb->setDatabaseName(schema->db); - Ndb_table_guard ndbtab_g(injector_ndb->getDictionary(), - schema->name); - ndbtab_g.invalidate(); - } - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= schema->db; - table_list.alias= table_list.table_name= schema->name; - close_cached_tables(thd, &table_list, FALSE, LONG_TIMEOUT); - } - if (schema_type != SOT_ALTER_TABLE) - break; - // fall through - case SOT_RENAME_TABLE_NEW: - log_query= 1; - if (ndb_binlog_running && (!share || !share->op)) - { - /* - we need to free any share here as command below - may need to call handle_trailing_share - */ - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - share= 0; - } - if (ndbcluster_check_if_local_table(schema->db, schema->name)) - { - DBUG_PRINT("info", ("NDB Binlog: Skipping locally defined table '%s.%s'", - schema->db, schema->name)); - sql_print_error("NDB Binlog: Skipping locally defined table '%s.%s' from " - "binlog schema event '%s' from node %d. ", - schema->db, schema->name, schema->query, - schema->node_id); - } - else if (ndb_create_table_from_engine(thd, schema->db, schema->name)) - { - print_could_not_discover_error(thd, schema); - } - } - break; - default: - DBUG_ASSERT(FALSE); - } - if (share) - { - /* ndb_share reference temporary free */ - DBUG_PRINT("NDB_SHARE", ("%s temporary free use_count: %u", - share->key, share->use_count)); - free_share(&share); - share= 0; - } - } - if (ndb_binlog_running && log_query) - ndb_binlog_query(thd, schema); - } - while ((schema= post_epoch_unlock_list->pop())) - { - ndbcluster_update_slock(thd, schema->db, schema->name); - } - DBUG_VOID_RETURN; -} - -/* - Timer class for doing performance measurements -*/ - -/********************************************************************* - Internal helper functions for handeling of the cluster replication tables - - ndb_binlog_index - - ndb_apply_status -*********************************************************************/ - -/* - struct to hold the data to be inserted into the - ndb_binlog_index table -*/ -struct ndb_binlog_index_row { - ulonglong gci; - const char *master_log_file; - ulonglong master_log_pos; - ulonglong n_inserts; - ulonglong n_updates; - ulonglong n_deletes; - ulonglong n_schemaops; -}; - -/* - Open the ndb_binlog_index table -*/ -static int open_ndb_binlog_index(THD *thd, TABLE **ndb_binlog_index) -{ - static char repdb[]= NDB_REP_DB; - static char reptable[]= NDB_REP_TABLE; - const char *save_proc_info= thd->proc_info; - TABLE_LIST *tables= &binlog_tables; - - tables->init_one_table(repdb, strlen(repdb), reptable, strlen(reptable), - reptable, TL_WRITE); - thd->proc_info= "Opening " NDB_REP_DB "." NDB_REP_TABLE; - - tables->required_type= FRMTYPE_TABLE; - thd->clear_error(); - if (open_and_lock_tables(thd, tables, FALSE, 0)) - { - if (thd->killed) - sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed"); - else - sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'", - thd->get_stmt_da()->sql_errno(), - thd->get_stmt_da()->message()); - thd->proc_info= save_proc_info; - return -1; - } - *ndb_binlog_index= tables->table; - thd->proc_info= save_proc_info; - (*ndb_binlog_index)->use_all_columns(); - return 0; -} - - -/* - Insert one row in the ndb_binlog_index -*/ - -int ndb_add_ndb_binlog_index(THD *thd, void *_row) -{ - ndb_binlog_index_row &row= *(ndb_binlog_index_row *) _row; - int error= 0; - /* - Turn of binlogging to prevent the table changes to be written to - the binary log. - */ - ulong saved_options= thd->variables.option_bits; - thd->variables.option_bits&= ~OPTION_BIN_LOG; - - if (!ndb_binlog_index && open_ndb_binlog_index(thd, &ndb_binlog_index)) - { - sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index"); - error= -1; - goto add_ndb_binlog_index_err; - } - - /* - Intialize ndb_binlog_index->record[0] - */ - empty_record(ndb_binlog_index); - - ndb_binlog_index->field[0]->store(row.master_log_pos); - ndb_binlog_index->field[1]->store(row.master_log_file, - strlen(row.master_log_file), - &my_charset_bin); - ndb_binlog_index->field[2]->store(row.gci); - ndb_binlog_index->field[3]->store(row.n_inserts); - ndb_binlog_index->field[4]->store(row.n_updates); - ndb_binlog_index->field[5]->store(row.n_deletes); - ndb_binlog_index->field[6]->store(row.n_schemaops); - - if ((error= ndb_binlog_index->file->ha_write_row(ndb_binlog_index->record[0]))) - { - sql_print_error("NDB Binlog: Writing row to ndb_binlog_index: %d", error); - error= -1; - goto add_ndb_binlog_index_err; - } - -add_ndb_binlog_index_err: - thd->get_stmt_da()->set_overwrite_status(true); - thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->get_stmt_da()->set_overwrite_status(false); - close_thread_tables(thd); - /* - There should be no need for rolling back transaction due to deadlock - (since ndb_binlog_index is non transactional). - */ - DBUG_ASSERT(! thd->transaction_rollback_request); - - thd->mdl_context.release_transactional_locks(); - ndb_binlog_index= 0; - thd->variables.option_bits= saved_options; - return error; -} - -/********************************************************************* - Functions for start, stop, wait for ndbcluster binlog thread -*********************************************************************/ - -enum Binlog_thread_state -{ - BCCC_running= 0, - BCCC_exit= 1, - BCCC_restart= 2 -}; - -static enum Binlog_thread_state do_ndbcluster_binlog_close_connection= BCCC_restart; - -int ndbcluster_binlog_start() -{ - DBUG_ENTER("ndbcluster_binlog_start"); - - if (::server_id == 0) - { - sql_print_warning("NDB: server id set to zero will cause any other mysqld " - "with bin log to log with wrong server id"); - } - else if (::server_id & 0x1 << 31) - { - sql_print_error("NDB: server id's with high bit set is reserved for internal " - "purposes"); - DBUG_RETURN(-1); - } - - mysql_mutex_init(key_injector_mutex, &injector_mutex, MY_MUTEX_INIT_FAST); - mysql_cond_init(key_injector_cond, &injector_cond, NULL); - mysql_mutex_init(key_ndb_schema_share_mutex, - &ndb_schema_share_mutex, MY_MUTEX_INIT_FAST); - - /* Create injector thread */ - if (mysql_thread_create(key_thread_ndb_binlog, - &ndb_binlog_thread, &connection_attrib, - ndb_binlog_thread_func, 0)) - { - DBUG_PRINT("error", ("Could not create ndb injector thread")); - mysql_cond_destroy(&injector_cond); - mysql_mutex_destroy(&injector_mutex); - DBUG_RETURN(-1); - } - - ndbcluster_binlog_inited= 1; - - /* Wait for the injector thread to start */ - mysql_mutex_lock(&injector_mutex); - while (!ndb_binlog_thread_running) - mysql_cond_wait(&injector_cond, &injector_mutex); - mysql_mutex_unlock(&injector_mutex); - - if (ndb_binlog_thread_running < 0) - DBUG_RETURN(-1); - - DBUG_RETURN(0); -} - - -/************************************************************** - Internal helper functions for creating/dropping ndb events - used by the client sql threads -**************************************************************/ -void -ndb_rep_event_name(String *event_name,const char *db, const char *tbl) -{ - event_name->set_ascii("REPL$", 5); - event_name->append(db); - if (tbl) - { - event_name->append('/'); - event_name->append(tbl); - } -} - -bool -ndbcluster_check_if_local_table(const char *dbname, const char *tabname) -{ - char key[FN_REFLEN + 1]; - char ndb_file[FN_REFLEN + 1]; - - DBUG_ENTER("ndbcluster_check_if_local_table"); - build_table_filename(key, FN_LEN-1, dbname, tabname, reg_ext, 0); - build_table_filename(ndb_file, FN_LEN-1, dbname, tabname, ha_ndb_ext, 0); - /* Check that any defined table is an ndb table */ - DBUG_PRINT("info", ("Looking for file %s and %s", key, ndb_file)); - if ((! my_access(key, F_OK)) && my_access(ndb_file, F_OK)) - { - DBUG_PRINT("info", ("table file %s not on disk, local table", ndb_file)); - - - DBUG_RETURN(true); - } - - DBUG_RETURN(false); -} - -bool -ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname) -{ - DBUG_ENTER("ndbcluster_check_if_local_tables_in_db"); - DBUG_PRINT("info", ("Looking for files in directory %s", dbname)); - LEX_STRING *tabname; - List files; - char path[FN_REFLEN + 1]; - - build_table_filename(path, sizeof(path) - 1, dbname, "", "", 0); - if (find_files(thd, &files, dbname, path, NullS, 0) != FIND_FILES_OK) - { - DBUG_PRINT("info", ("Failed to find files")); - DBUG_RETURN(true); - } - DBUG_PRINT("info",("found: %d files", files.elements)); - while ((tabname= files.pop())) - { - DBUG_PRINT("info", ("Found table %s", tabname->str)); - if (ndbcluster_check_if_local_table(dbname, tabname->str)) - DBUG_RETURN(true); - } - - DBUG_RETURN(false); -} - -/* - Common function for setting up everything for logging a table at - create/discover. -*/ -int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, - uint key_len, - const char *db, - const char *table_name, - my_bool share_may_exist) -{ - int do_event_op= ndb_binlog_running; - DBUG_ENTER("ndbcluster_create_binlog_setup"); - DBUG_PRINT("enter",("key: %s key_len: %d %s.%s share_may_exist: %d", - key, key_len, db, table_name, share_may_exist)); - DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(table_name)); - DBUG_ASSERT(strlen(key) == key_len); - - mysql_mutex_lock(&ndbcluster_mutex); - - /* Handle any trailing share */ - NDB_SHARE *share= (NDB_SHARE*) my_hash_search(&ndbcluster_open_tables, - (uchar*) key, key_len); - - if (share && share_may_exist) - { - if (share->flags & NSF_NO_BINLOG || - share->op != 0 || - share->op_old != 0) - { - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(0); // replication already setup, or should not - } - } - - if (share) - { - if (share->op || share->op_old) - { - my_errno= HA_ERR_TABLE_EXIST; - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(1); - } - if (!share_may_exist || share->connect_count != - g_ndb_cluster_connection->get_connect_count()) - { - handle_trailing_share(share); - share= NULL; - } - } - - /* Create share which is needed to hold replication information */ - if (share) - { - /* ndb_share reference create */ - ++share->use_count; - DBUG_PRINT("NDB_SHARE", ("%s create use_count: %u", - share->key, share->use_count)); - } - /* ndb_share reference create */ - else if (!(share= get_share(key, 0, TRUE, TRUE))) - { - sql_print_error("NDB Binlog: " - "allocating table share for %s failed", key); - } - else - { - DBUG_PRINT("NDB_SHARE", ("%s create use_count: %u", - share->key, share->use_count)); - } - - if (!ndb_schema_share && - strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0) - do_event_op= 1; - else if (!ndb_apply_status_share && - strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_APPLY_TABLE) == 0) - do_event_op= 1; - - if (!do_event_op) - { - share->flags|= NSF_NO_BINLOG; - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(0); - } - mysql_mutex_unlock(&ndbcluster_mutex); - - while (share && !IS_TMP_PREFIX(table_name)) - { - /* - ToDo make sanity check of share so that the table is actually the same - I.e. we need to do open file from frm in this case - Currently awaiting this to be fixed in the 4.1 tree in the general - case - */ - - /* Create the event in NDB */ - ndb->setDatabaseName(db); - - NDBDICT *dict= ndb->getDictionary(); - Ndb_table_guard ndbtab_g(dict, table_name); - const NDBTAB *ndbtab= ndbtab_g.get_table(); - if (ndbtab == 0) - { - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: Failed to get table %s from ndb: " - "%s, %d", key, dict->getNdbError().message, - dict->getNdbError().code); - break; // error - } - String event_name(INJECTOR_EVENT_LEN); - ndb_rep_event_name(&event_name, db, table_name); - /* - event should have been created by someone else, - but let's make sure, and create if it doesn't exist - */ - const NDBEVENT *ev= dict->getEvent(event_name.c_ptr()); - if (!ev) - { - if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share)) - { - sql_print_error("NDB Binlog: " - "FAILED CREATE (DISCOVER) TABLE Event: %s", - event_name.c_ptr()); - break; // error - } - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: " - "CREATE (DISCOVER) TABLE Event: %s", - event_name.c_ptr()); - } - else - { - delete ev; - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: DISCOVER TABLE Event: %s", - event_name.c_ptr()); - } - - /* - create the event operations for receiving logging events - */ - if (ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr())) - { - sql_print_error("NDB Binlog:" - "FAILED CREATE (DISCOVER) EVENT OPERATIONS Event: %s", - event_name.c_ptr()); - /* a warning has been issued to the client */ - DBUG_RETURN(0); - } - DBUG_RETURN(0); - } - DBUG_RETURN(-1); -} - -int -ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, - const char *event_name, NDB_SHARE *share, - int push_warning) -{ - THD *thd= current_thd; - DBUG_ENTER("ndbcluster_create_event"); - DBUG_PRINT("info", ("table=%s version=%d event=%s share=%s", - ndbtab->getName(), ndbtab->getObjectVersion(), - event_name, share ? share->key : "(nil)")); - DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName())); - if (!share) - { - DBUG_PRINT("info", ("share == NULL")); - DBUG_RETURN(0); - } - if (share->flags & NSF_NO_BINLOG) - { - DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x %d", - share->flags, share->flags & NSF_NO_BINLOG)); - DBUG_RETURN(0); - } - - NDBDICT *dict= ndb->getDictionary(); - NDBEVENT my_event(event_name); - my_event.setTable(*ndbtab); - my_event.addTableEvent(NDBEVENT::TE_ALL); - if (share->flags & NSF_HIDDEN_PK) - { - if (share->flags & NSF_BLOB_FLAG) - { - sql_print_error("NDB Binlog: logging of table %s " - "with BLOB attribute and no PK is not supported", - share->key); - if (push_warning) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_ILLEGAL_HA_CREATE_OPTION, - ER(ER_ILLEGAL_HA_CREATE_OPTION), - ndbcluster_hton_name, - "Binlog of table with BLOB attribute and no PK"); - - share->flags|= NSF_NO_BINLOG; - DBUG_RETURN(-1); - } - /* No primary key, subscribe for all attributes */ - my_event.setReport(NDBEVENT::ER_ALL); - DBUG_PRINT("info", ("subscription all")); - } - else - { - if (ndb_schema_share || strcmp(share->db, NDB_REP_DB) || - strcmp(share->table_name, NDB_SCHEMA_TABLE)) - { - my_event.setReport(NDBEVENT::ER_UPDATED); - DBUG_PRINT("info", ("subscription only updated")); - } - else - { - my_event.setReport((NDBEVENT::EventReport) - (NDBEVENT::ER_ALL | NDBEVENT::ER_SUBSCRIBE)); - DBUG_PRINT("info", ("subscription all and subscribe")); - } - } - if (share->flags & NSF_BLOB_FLAG) - my_event.mergeEvents(TRUE); - - /* add all columns to the event */ - int n_cols= ndbtab->getNoOfColumns(); - for(int a= 0; a < n_cols; a++) - my_event.addEventColumn(a); - - if (dict->createEvent(my_event)) // Add event to database - { - if (dict->getNdbError().classification != NdbError::SchemaObjectExists) - { - /* - failed, print a warning - */ - if (push_warning > 1) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - dict->getNdbError().code, - dict->getNdbError().message, "NDB"); - sql_print_error("NDB Binlog: Unable to create event in database. " - "Event: %s Error Code: %d Message: %s", event_name, - dict->getNdbError().code, dict->getNdbError().message); - DBUG_RETURN(-1); - } - - /* - try retrieving the event, if table version/id matches, we will get - a valid event. Otherwise we have a trailing event from before - */ - const NDBEVENT *ev; - if ((ev= dict->getEvent(event_name))) - { - delete ev; - DBUG_RETURN(0); - } - - /* - trailing event from before; an error, but try to correct it - */ - if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT && - dict->dropEvent(my_event.getName())) - { - if (push_warning > 1) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - dict->getNdbError().code, - dict->getNdbError().message, "NDB"); - sql_print_error("NDB Binlog: Unable to create event in database. " - " Attempt to correct with drop failed. " - "Event: %s Error Code: %d Message: %s", - event_name, - dict->getNdbError().code, - dict->getNdbError().message); - DBUG_RETURN(-1); - } - - /* - try to add the event again - */ - if (dict->createEvent(my_event)) - { - if (push_warning > 1) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - dict->getNdbError().code, - dict->getNdbError().message, "NDB"); - sql_print_error("NDB Binlog: Unable to create event in database. " - " Attempt to correct with drop ok, but create failed. " - "Event: %s Error Code: %d Message: %s", - event_name, - dict->getNdbError().code, - dict->getNdbError().message); - DBUG_RETURN(-1); - } -#ifdef NDB_BINLOG_EXTRA_WARNINGS - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - 0, "NDB Binlog: Removed trailing event", - "NDB"); -#endif - } - - DBUG_RETURN(0); -} - -inline int is_ndb_compatible_type(Field *field) -{ - return - !(field->flags & BLOB_FLAG) && - field->type() != MYSQL_TYPE_BIT && - field->pack_length() != 0; -} - -/* - - create eventOperations for receiving log events - - setup ndb recattrs for reception of log event data - - "start" the event operation - - used at create/discover of tables -*/ -int -ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, - const char *event_name) -{ - THD *thd= current_thd; - /* - we are in either create table or rename table so table should be - locked, hence we can work with the share without locks - */ - - DBUG_ENTER("ndbcluster_create_event_ops"); - DBUG_PRINT("enter", ("table: %s event: %s", ndbtab->getName(), event_name)); - DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(ndbtab->getName())); - - DBUG_ASSERT(share != 0); - - if (share->flags & NSF_NO_BINLOG) - { - DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x", - share->flags)); - DBUG_RETURN(0); - } - - int do_ndb_schema_share= 0, do_ndb_apply_status_share= 0; - if (!ndb_schema_share && strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_SCHEMA_TABLE) == 0) - do_ndb_schema_share= 1; - else if (!ndb_apply_status_share && strcmp(share->db, NDB_REP_DB) == 0 && - strcmp(share->table_name, NDB_APPLY_TABLE) == 0) - do_ndb_apply_status_share= 1; - else if (!binlog_filter->db_ok(share->db) || !ndb_binlog_running) - { - share->flags|= NSF_NO_BINLOG; - DBUG_RETURN(0); - } - - if (share->op) - { - assert(share->op->getCustomData() == (void *) share); - - DBUG_ASSERT(share->use_count > 1); - sql_print_error("NDB Binlog: discover reusing old ev op"); - /* ndb_share reference ToDo free */ - DBUG_PRINT("NDB_SHARE", ("%s ToDo free use_count: %u", - share->key, share->use_count)); - free_share(&share); // old event op already has reference - DBUG_RETURN(0); - } - - TABLE *table= share->table; - - int retries= 100; - /* - 100 milliseconds, temporary error on schema operation can - take some time to be resolved - */ - int retry_sleep= 100; - while (1) - { - mysql_mutex_lock(&injector_mutex); - Ndb *ndb= injector_ndb; - if (do_ndb_schema_share) - ndb= schema_ndb; - - if (ndb == 0) - { - mysql_mutex_unlock(&injector_mutex); - DBUG_RETURN(-1); - } - - NdbEventOperation* op; - if (do_ndb_schema_share) - op= ndb->createEventOperation(event_name); - else - { - // set injector_ndb database/schema from table internal name - int ret= ndb->setDatabaseAndSchemaName(ndbtab); - assert(ret == 0); - op= ndb->createEventOperation(event_name); - // reset to catch errors - ndb->setDatabaseName(""); - } - if (!op) - { - sql_print_error("NDB Binlog: Creating NdbEventOperation failed for" - " %s",event_name); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - ndb->getNdbError().code, - ndb->getNdbError().message, - "NDB"); - mysql_mutex_unlock(&injector_mutex); - DBUG_RETURN(-1); - } - - if (share->flags & NSF_BLOB_FLAG) - op->mergeEvents(TRUE); // currently not inherited from event - - DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx", - (long) share->ndb_value[0], - (long) share->ndb_value[1])); - int n_columns= ndbtab->getNoOfColumns(); - int n_fields= table ? table->s->fields : 0; // XXX ??? - for (int j= 0; j < n_columns; j++) - { - const char *col_name= ndbtab->getColumn(j)->getName(); - NdbValue attr0, attr1; - if (j < n_fields) - { - Field *f= share->table->field[j]; - if (is_ndb_compatible_type(f)) - { - DBUG_PRINT("info", ("%s compatible", col_name)); - attr0.rec= op->getValue(col_name, (char*) f->ptr); - attr1.rec= op->getPreValue(col_name, - (f->ptr - share->table->record[0]) + - (char*) share->table->record[1]); - } - else if (! (f->flags & BLOB_FLAG)) - { - DBUG_PRINT("info", ("%s non compatible", col_name)); - attr0.rec= op->getValue(col_name); - attr1.rec= op->getPreValue(col_name); - } - else - { - DBUG_PRINT("info", ("%s blob", col_name)); - DBUG_ASSERT(share->flags & NSF_BLOB_FLAG); - attr0.blob= op->getBlobHandle(col_name); - attr1.blob= op->getPreBlobHandle(col_name); - if (attr0.blob == NULL || attr1.blob == NULL) - { - sql_print_error("NDB Binlog: Creating NdbEventOperation" - " blob field %u handles failed (code=%d) for %s", - j, op->getNdbError().code, event_name); - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - op->getNdbError().code, - op->getNdbError().message, - "NDB"); - ndb->dropEventOperation(op); - mysql_mutex_unlock(&injector_mutex); - DBUG_RETURN(-1); - } - } - } - else - { - DBUG_PRINT("info", ("%s hidden key", col_name)); - attr0.rec= op->getValue(col_name); - attr1.rec= op->getPreValue(col_name); - } - share->ndb_value[0][j].ptr= attr0.ptr; - share->ndb_value[1][j].ptr= attr1.ptr; - DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx " - "share->ndb_value[0][%d]: 0x%lx", - j, (long) &share->ndb_value[0][j], - j, (long) attr0.ptr)); - DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx " - "share->ndb_value[1][%d]: 0x%lx", - j, (long) &share->ndb_value[0][j], - j, (long) attr1.ptr)); - } - op->setCustomData((void *) share); // set before execute - share->op= op; // assign op in NDB_SHARE - if (op->execute()) - { - share->op= NULL; - retries--; - if (op->getNdbError().status != NdbError::TemporaryError && - op->getNdbError().code != 1407) - retries= 0; - if (retries == 0) - { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - op->getNdbError().code, op->getNdbError().message, - "NDB"); - sql_print_error("NDB Binlog: ndbevent->execute failed for %s; %d %s", - event_name, - op->getNdbError().code, op->getNdbError().message); - } - ndb->dropEventOperation(op); - mysql_mutex_unlock(&injector_mutex); - if (retries) - { - my_sleep(retry_sleep); - continue; - } - DBUG_RETURN(-1); - } - mysql_mutex_unlock(&injector_mutex); - break; - } - - /* ndb_share reference binlog */ - get_share(share); - DBUG_PRINT("NDB_SHARE", ("%s binlog use_count: %u", - share->key, share->use_count)); - if (do_ndb_apply_status_share) - { - /* ndb_share reference binlog extra */ - ndb_apply_status_share= get_share(share); - DBUG_PRINT("NDB_SHARE", ("%s binlog extra use_count: %u", - share->key, share->use_count)); - mysql_cond_signal(&injector_cond); - } - else if (do_ndb_schema_share) - { - /* ndb_share reference binlog extra */ - ndb_schema_share= get_share(share); - DBUG_PRINT("NDB_SHARE", ("%s binlog extra use_count: %u", - share->key, share->use_count)); - mysql_cond_signal(&injector_cond); - } - - DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u", - share->key, (long) share->op, share->use_count)); - - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: logging %s", share->key); - DBUG_RETURN(0); -} - -/* - when entering the calling thread should have a share lock id share != 0 - then the injector thread will have one as well, i.e. share->use_count == 0 - (unless it has already dropped... then share->op == 0) -*/ -int -ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name, - NDB_SHARE *share, const char *type_str) -{ - DBUG_ENTER("ndbcluster_handle_drop_table"); - THD *thd= current_thd; - - NDBDICT *dict= ndb->getDictionary(); - if (event_name && dict->dropEvent(event_name)) - { - if (dict->getNdbError().code != 4710) - { - /* drop event failed for some reason, issue a warning */ - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - dict->getNdbError().code, - dict->getNdbError().message, "NDB"); - /* error is not that the event did not exist */ - sql_print_error("NDB Binlog: Unable to drop event in database. " - "Event: %s Error Code: %d Message: %s", - event_name, - dict->getNdbError().code, - dict->getNdbError().message); - /* ToDo; handle error? */ - if (share && share->op && - share->op->getState() == NdbEventOperation::EO_EXECUTING && - dict->getNdbError().mysql_code != HA_ERR_NO_CONNECTION) - { - DBUG_ASSERT(FALSE); - DBUG_RETURN(-1); - } - } - } - - if (share == 0 || share->op == 0) - { - DBUG_RETURN(0); - } - -/* - Syncronized drop between client thread and injector thread is - neccessary in order to maintain ordering in the binlog, - such that the drop occurs _after_ any inserts/updates/deletes. - - The penalty for this is that the drop table becomes slow. - - This wait is however not strictly neccessary to produce a binlog - that is usable. However the slave does not currently handle - these out of order, thus we are keeping the SYNC_DROP_ defined - for now. -*/ - const char *save_proc_info= thd->proc_info; -#define SYNC_DROP_ -#ifdef SYNC_DROP_ - thd->proc_info= "Syncing ndb table schema operation and binlog"; - mysql_mutex_lock(&share->mutex); - int max_timeout= DEFAULT_SYNC_TIMEOUT; - while (share->op) - { - struct timespec abstime; - set_timespec(abstime, 1); - int ret= mysql_cond_timedwait(&injector_cond, - &share->mutex, - &abstime); - if (thd->killed || - share->op == 0) - break; - if (ret) - { - max_timeout--; - if (max_timeout == 0) - { - sql_print_error("NDB %s: %s timed out. Ignoring...", - type_str, share->key); - break; - } - if (opt_ndb_extra_logging) - ndb_report_waiting(type_str, max_timeout, - type_str, share->key); - } - } - mysql_mutex_unlock(&share->mutex); -#else - mysql_mutex_lock(&share->mutex); - share->op_old= share->op; - share->op= 0; - mysql_mutex_unlock(&share->mutex); -#endif - thd->proc_info= save_proc_info; - - DBUG_RETURN(0); -} - - -/******************************************************************** - Internal helper functions for differentd events from the stoarage nodes - used by the ndb injector thread -********************************************************************/ - -/* - Handle error states on events from the storage nodes -*/ -static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp, - ndb_binlog_index_row &row) -{ - NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); - DBUG_ENTER("ndb_binlog_thread_handle_error"); - - int overrun= pOp->isOverrun(); - if (overrun) - { - /* - ToDo: this error should rather clear the ndb_binlog_index... - and continue - */ - sql_print_error("NDB Binlog: Overrun in event buffer, " - "this means we have dropped events. Cannot " - "continue binlog for %s", share->key); - pOp->clearError(); - DBUG_RETURN(-1); - } - - if (!pOp->isConsistent()) - { - /* - ToDo: this error should rather clear the ndb_binlog_index... - and continue - */ - sql_print_error("NDB Binlog: Not Consistent. Cannot " - "continue binlog for %s. Error code: %d" - " Message: %s", share->key, - pOp->getNdbError().code, - pOp->getNdbError().message); - pOp->clearError(); - DBUG_RETURN(-1); - } - sql_print_error("NDB Binlog: unhandled error %d for table %s", - pOp->hasError(), share->key); - pOp->clearError(); - DBUG_RETURN(0); -} - -static int -ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb, - NdbEventOperation *pOp, - ndb_binlog_index_row &row) -{ - NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData(); - NDBEVENT::TableEvent type= pOp->getEventType(); - - switch (type) - { - case NDBEVENT::TE_CLUSTER_FAILURE: - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: cluster failure for %s at epoch %u.", - share->key, (unsigned) pOp->getGCI()); - if (ndb_apply_status_share == share) - { - if (opt_ndb_extra_logging && - ndb_binlog_tables_inited && ndb_binlog_running) - sql_print_information("NDB Binlog: ndb tables initially " - "read only on reconnect."); - /* ndb_share reference binlog extra free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u", - share->key, share->use_count)); - free_share(&ndb_apply_status_share); - ndb_apply_status_share= 0; - ndb_binlog_tables_inited= 0; - } - DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: " - "%s received share: 0x%lx op: 0x%lx share op: 0x%lx " - "op_old: 0x%lx", - share->key, (long) share, (long) pOp, - (long) share->op, (long) share->op_old)); - break; - case NDBEVENT::TE_DROP: - if (ndb_apply_status_share == share) - { - if (opt_ndb_extra_logging && - ndb_binlog_tables_inited && ndb_binlog_running) - sql_print_information("NDB Binlog: ndb tables initially " - "read only on reconnect."); - /* ndb_share reference binlog extra free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u", - share->key, share->use_count)); - free_share(&ndb_apply_status_share); - ndb_apply_status_share= 0; - ndb_binlog_tables_inited= 0; - } - /* ToDo: remove printout */ - if (opt_ndb_extra_logging) - sql_print_information("NDB Binlog: drop table %s.", share->key); - // fall through - case NDBEVENT::TE_ALTER: - row.n_schemaops++; - DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx " - "share op: 0x%lx op_old: 0x%lx", - type == NDBEVENT::TE_DROP ? "DROP" : "ALTER", - share->key, (long) share, (long) pOp, - (long) share->op, (long) share->op_old)); - break; - case NDBEVENT::TE_NODE_FAILURE: - /* fall through */ - case NDBEVENT::TE_SUBSCRIBE: - /* fall through */ - case NDBEVENT::TE_UNSUBSCRIBE: - /* ignore */ - return 0; - default: - sql_print_error("NDB Binlog: unknown non data event %d for %s. " - "Ignoring...", (unsigned) type, share->key); - return 0; - } - - ndb_handle_schema_change(thd, ndb, pOp, share); - return 0; -} - -/* - Handle data events from the storage nodes -*/ -static int -ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp, - ndb_binlog_index_row &row, - injector::transaction &trans) -{ - NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); - if (share == ndb_apply_status_share) - return 0; - - uint32 originating_server_id= pOp->getAnyValue(); - if (originating_server_id == 0) - originating_server_id= ::server_id; - else if (originating_server_id & NDB_ANYVALUE_RESERVED) - { - if (originating_server_id != NDB_ANYVALUE_FOR_NOLOGGING) - sql_print_warning("NDB: unknown value for binlog signalling 0x%X, " - "event not logged", - originating_server_id); - return 0; - } - else if (!g_ndb_log_slave_updates) - { - /* - This event comes from a slave applier since it has an originating - server id set. Since option to log slave updates is not set, skip it. - */ - return 0; - } - - TABLE *table= share->table; - DBUG_ASSERT(trans.good()); - DBUG_ASSERT(table != 0); - - dbug_print_table("table", table); - - TABLE_SHARE *table_s= table->s; - uint n_fields= table_s->fields; - MY_BITMAP b; - /* Potential buffer for the bitmap */ - uint32 bitbuf[128 / (sizeof(uint32) * 8)]; - my_bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL, - n_fields, FALSE); - bitmap_set_all(&b); - - /* - row data is already in table->record[0] - As we told the NdbEventOperation to do this - (saves moving data about many times) - */ - - /* - for now malloc/free blobs buffer each time - TODO if possible share single permanent buffer with handlers - */ - uchar* blobs_buffer[2] = { 0, 0 }; - uint blobs_buffer_size[2] = { 0, 0 }; - - switch(pOp->getEventType()) - { - case NDBEVENT::TE_INSERT: - row.n_inserts++; - DBUG_PRINT("info", ("INSERT INTO %s.%s", - table_s->db.str, table_s->table_name.str)); - { - if (share->flags & NSF_BLOB_FLAG) - { - my_ptrdiff_t ptrdiff= 0; - int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[0], - blobs_buffer[0], - blobs_buffer_size[0], - ptrdiff); - DBUG_ASSERT(ret == 0); - } - ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]); - int ret __attribute__((unused))= trans.write_row(originating_server_id, - injector::transaction::table(table, - TRUE), - &b, n_fields, table->record[0]); - DBUG_ASSERT(ret == 0); - } - break; - case NDBEVENT::TE_DELETE: - row.n_deletes++; - DBUG_PRINT("info",("DELETE FROM %s.%s", - table_s->db.str, table_s->table_name.str)); - { - /* - table->record[0] contains only the primary key in this case - since we do not have an after image - */ - int n; - if (table->s->primary_key != MAX_KEY) - n= 0; /* - use the primary key only as it save time and space and - it is the only thing needed to log the delete - */ - else - n= 1; /* - we use the before values since we don't have a primary key - since the mysql server does not handle the hidden primary - key - */ - - if (share->flags & NSF_BLOB_FLAG) - { - my_ptrdiff_t ptrdiff= table->record[n] - table->record[0]; - int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[n], - blobs_buffer[n], - blobs_buffer_size[n], - ptrdiff); - DBUG_ASSERT(ret == 0); - } - ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]); - DBUG_EXECUTE("info", print_records(table, table->record[n]);); - int ret __attribute__((unused))= trans.delete_row(originating_server_id, - injector::transaction::table(table, - TRUE), - &b, n_fields, table->record[n]); - DBUG_ASSERT(ret == 0); - } - break; - case NDBEVENT::TE_UPDATE: - row.n_updates++; - DBUG_PRINT("info", ("UPDATE %s.%s", - table_s->db.str, table_s->table_name.str)); - { - if (share->flags & NSF_BLOB_FLAG) - { - my_ptrdiff_t ptrdiff= 0; - int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[0], - blobs_buffer[0], - blobs_buffer_size[0], - ptrdiff); - DBUG_ASSERT(ret == 0); - } - ndb_unpack_record(table, share->ndb_value[0], - &b, table->record[0]); - DBUG_EXECUTE("info", print_records(table, table->record[0]);); - if (table->s->primary_key != MAX_KEY) - { - /* - since table has a primary key, we can do a write - using only after values - */ - trans.write_row(originating_server_id, - injector::transaction::table(table, TRUE), - &b, n_fields, table->record[0]);// after values - } - else - { - /* - mysql server cannot handle the ndb hidden key and - therefore needs the before image as well - */ - if (share->flags & NSF_BLOB_FLAG) - { - my_ptrdiff_t ptrdiff= table->record[1] - table->record[0]; - int ret __attribute__((unused))= get_ndb_blobs_value(table, share->ndb_value[1], - blobs_buffer[1], - blobs_buffer_size[1], - ptrdiff); - DBUG_ASSERT(ret == 0); - } - ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]); - DBUG_EXECUTE("info", print_records(table, table->record[1]);); - int ret __attribute__((unused))= trans.update_row(originating_server_id, - injector::transaction::table(table, - TRUE), - &b, n_fields, - table->record[1], // before values - table->record[0]);// after values - DBUG_ASSERT(ret == 0); - } - } - break; - default: - /* We should REALLY never get here. */ - DBUG_PRINT("info", ("default - uh oh, a brain exploded.")); - break; - } - - if (share->flags & NSF_BLOB_FLAG) - { - my_free(blobs_buffer[0]); - my_free(blobs_buffer[1]); - } - - return 0; -} - -//#define RUN_NDB_BINLOG_TIMER -#ifdef RUN_NDB_BINLOG_TIMER -class Timer -{ -public: - Timer() { start(); } - void start() { gettimeofday(&m_start, 0); } - void stop() { gettimeofday(&m_stop, 0); } - ulong elapsed_ms() - { - return (ulong) - (((longlong) m_stop.tv_sec - (longlong) m_start.tv_sec) * 1000 + - ((longlong) m_stop.tv_usec - - (longlong) m_start.tv_usec + 999) / 1000); - } -private: - struct timeval m_start,m_stop; -}; -#endif - -/**************************************************************** - Injector thread main loop -****************************************************************/ - -static uchar * -ndb_schema_objects_get_key(NDB_SCHEMA_OBJECT *schema_object, - size_t *length, - my_bool not_used __attribute__((unused))) -{ - *length= schema_object->key_length; - return (uchar*) schema_object->key; -} - -static NDB_SCHEMA_OBJECT *ndb_get_schema_object(const char *key, - my_bool create_if_not_exists, - my_bool have_lock) -{ - NDB_SCHEMA_OBJECT *ndb_schema_object; - uint length= (uint) strlen(key); - DBUG_ENTER("ndb_get_schema_object"); - DBUG_PRINT("enter", ("key: '%s'", key)); - - if (!have_lock) - mysql_mutex_lock(&ndbcluster_mutex); - while (!(ndb_schema_object= - (NDB_SCHEMA_OBJECT*) my_hash_search(&ndb_schema_objects, - (uchar*) key, - length))) - { - if (!create_if_not_exists) - { - DBUG_PRINT("info", ("does not exist")); - break; - } - if (!(ndb_schema_object= - (NDB_SCHEMA_OBJECT*) my_malloc(sizeof(*ndb_schema_object) + length + 1, - MYF(MY_WME | MY_ZEROFILL)))) - { - DBUG_PRINT("info", ("malloc error")); - break; - } - ndb_schema_object->key= (char *)(ndb_schema_object+1); - memcpy(ndb_schema_object->key, key, length + 1); - ndb_schema_object->key_length= length; - if (my_hash_insert(&ndb_schema_objects, (uchar*) ndb_schema_object)) - { - my_free(ndb_schema_object); - break; - } - mysql_mutex_init(key_ndb_schema_object_mutex, &ndb_schema_object->mutex, MY_MUTEX_INIT_FAST); - my_bitmap_init(&ndb_schema_object->slock_bitmap, ndb_schema_object->slock, - sizeof(ndb_schema_object->slock)*8, FALSE); - bitmap_clear_all(&ndb_schema_object->slock_bitmap); - break; - } - if (ndb_schema_object) - { - ndb_schema_object->use_count++; - DBUG_PRINT("info", ("use_count: %d", ndb_schema_object->use_count)); - } - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_RETURN(ndb_schema_object); -} - - -static void ndb_free_schema_object(NDB_SCHEMA_OBJECT **ndb_schema_object, - bool have_lock) -{ - DBUG_ENTER("ndb_free_schema_object"); - DBUG_PRINT("enter", ("key: '%s'", (*ndb_schema_object)->key)); - if (!have_lock) - mysql_mutex_lock(&ndbcluster_mutex); - if (!--(*ndb_schema_object)->use_count) - { - DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count)); - my_hash_delete(&ndb_schema_objects, (uchar*) *ndb_schema_object); - mysql_mutex_destroy(&(*ndb_schema_object)->mutex); - my_free(*ndb_schema_object); - *ndb_schema_object= 0; - } - else - { - DBUG_PRINT("info", ("use_count: %d", (*ndb_schema_object)->use_count)); - } - if (!have_lock) - mysql_mutex_unlock(&ndbcluster_mutex); - DBUG_VOID_RETURN; -} - -extern ulong opt_ndb_report_thresh_binlog_epoch_slip; -extern ulong opt_ndb_report_thresh_binlog_mem_usage; - -pthread_handler_t ndb_binlog_thread_func(void *arg) -{ - THD *thd; /* needs to be first for thread_stack */ - Ndb *i_ndb= 0; - Ndb *s_ndb= 0; - Thd_ndb *thd_ndb=0; - int ndb_update_ndb_binlog_index= 1; - injector *inj= injector::instance(); - uint incident_id= 0; - -#ifdef RUN_NDB_BINLOG_TIMER - Timer main_timer; -#endif - - mysql_mutex_lock(&injector_mutex); - /* - Set up the Thread - */ - my_thread_init(); - DBUG_ENTER("ndb_binlog_thread"); - - thd= new THD; /* note that contructor of THD uses DBUG_ */ - THD_CHECK_SENTRY(thd); - thd->set_current_stmt_binlog_format_row(); - - /* We need to set thd->thread_id before thd->store_globals, or it will - set an invalid value for thd->variables.pseudo_thread_id. - */ - mysql_mutex_lock(&LOCK_thread_count); - thd->thread_id= thread_id++; - mysql_mutex_unlock(&LOCK_thread_count); - - mysql_thread_set_psi_id(thd->thread_id); - - thd->thread_stack= (char*) &thd; /* remember where our stack is */ - if (thd->store_globals()) - { - thd->cleanup(); - delete thd; - ndb_binlog_thread_running= -1; - mysql_mutex_unlock(&injector_mutex); - mysql_cond_signal(&injector_cond); - - DBUG_LEAVE; // Must match DBUG_ENTER() - my_thread_end(); - pthread_exit(0); - return NULL; // Avoid compiler warnings - } - - thd->init_for_queries(); - thd->command= COM_DAEMON; - thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG; - thd->main_security_ctx.host_or_ip= ""; - thd->client_capabilities= 0; - my_net_init(&thd->net, 0, MYF(MY_THREAD_SPECIFIC)); - thd->main_security_ctx.master_access= ~0; - thd->main_security_ctx.priv_user[0]= 0; - /* Do not use user-supplied timeout value for system threads. */ - thd->variables.lock_wait_timeout= LONG_TIMEOUT; - - /* - Set up ndb binlog - */ - sql_print_information("Starting MySQL Cluster Binlog Thread"); - - pthread_detach_this_thread(); - thd->real_id= pthread_self(); - mysql_mutex_lock(&LOCK_thread_count); - threads.append(thd); - mysql_mutex_unlock(&LOCK_thread_count); - thd->lex->start_transaction_opt= 0; - - if (!(s_ndb= new Ndb(g_ndb_cluster_connection, "")) || - s_ndb->init()) - { - sql_print_error("NDB Binlog: Getting Schema Ndb object failed"); - ndb_binlog_thread_running= -1; - mysql_mutex_unlock(&injector_mutex); - mysql_cond_signal(&injector_cond); - goto err; - } - - // empty database - if (!(i_ndb= new Ndb(g_ndb_cluster_connection, "")) || - i_ndb->init()) - { - sql_print_error("NDB Binlog: Getting Ndb object failed"); - ndb_binlog_thread_running= -1; - mysql_mutex_unlock(&injector_mutex); - mysql_cond_signal(&injector_cond); - goto err; - } - - /* init hash for schema object distribution */ - (void) my_hash_init(&ndb_schema_objects, system_charset_info, 32, 0, 0, - (my_hash_get_key)ndb_schema_objects_get_key, 0, 0); - - /* - Expose global reference to our ndb object. - - Used by both sql client thread and binlog thread to interact - with the storage - mysql_mutex_lock(&injector_mutex); - */ - injector_thd= thd; - injector_ndb= i_ndb; - p_latest_trans_gci= - injector_ndb->get_ndb_cluster_connection().get_latest_trans_gci(); - schema_ndb= s_ndb; - - if (opt_bin_log) - { - ndb_binlog_running= TRUE; - } - - /* Thread start up completed */ - ndb_binlog_thread_running= 1; - mysql_mutex_unlock(&injector_mutex); - mysql_cond_signal(&injector_cond); - - /* - wait for mysql server to start (so that the binlog is started - and thus can receive the first GAP event) - */ - mysql_mutex_lock(&LOCK_server_started); - while (!mysqld_server_started) - { - struct timespec abstime; - set_timespec(abstime, 1); - mysql_cond_timedwait(&COND_server_started, &LOCK_server_started, - &abstime); - if (ndbcluster_terminating) - { - mysql_mutex_unlock(&LOCK_server_started); - goto err; - } - } - mysql_mutex_unlock(&LOCK_server_started); -restart: - /* - Main NDB Injector loop - */ - while (ndb_binlog_running) - { - /* - check if it is the first log, if so we do not insert a GAP event - as there is really no log to have a GAP in - */ - if (incident_id == 0) - { - LOG_INFO log_info; - mysql_bin_log.get_current_log(&log_info); - int len= strlen(log_info.log_file_name); - uint no= 0; - if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) && - no == 1) - { - /* this is the fist log, so skip GAP event */ - break; - } - } - - /* - Always insert a GAP event as we cannot know what has happened - in the cluster while not being connected. - */ - LEX_STRING const msg[2]= - { - { C_STRING_WITH_LEN("mysqld startup") }, - { C_STRING_WITH_LEN("cluster disconnect")} - }; - int error __attribute__((unused))= - inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]); - DBUG_ASSERT(!error); - break; - } - incident_id= 1; - { - thd->proc_info= "Waiting for ndbcluster to start"; - - mysql_mutex_lock(&injector_mutex); - while (!ndb_schema_share || - (ndb_binlog_running && !ndb_apply_status_share)) - { - /* ndb not connected yet */ - struct timespec abstime; - set_timespec(abstime, 1); - mysql_cond_timedwait(&injector_cond, &injector_mutex, &abstime); - if (ndbcluster_binlog_terminating) - { - mysql_mutex_unlock(&injector_mutex); - goto err; - } - } - mysql_mutex_unlock(&injector_mutex); - - if (thd_ndb == NULL) - { - DBUG_ASSERT(ndbcluster_hton->slot != ~(uint)0); - if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - { - sql_print_error("Could not allocate Thd_ndb object"); - goto err; - } - set_thd_ndb(thd, thd_ndb); - thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP; - thd->query_id= 0; // to keep valgrind quiet - } - } - - { - // wait for the first event - thd->proc_info= "Waiting for first event from ndbcluster"; - int schema_res, res; - Uint64 schema_gci; - do - { - DBUG_PRINT("info", ("Waiting for the first event")); - - if (ndbcluster_binlog_terminating) - goto err; - - schema_res= s_ndb->pollEvents(100, &schema_gci); - } while (schema_gci == 0 || ndb_latest_received_binlog_epoch == schema_gci); - if (ndb_binlog_running) - { - Uint64 gci= i_ndb->getLatestGCI(); - while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch) - { - if (ndbcluster_binlog_terminating) - goto err; - res= i_ndb->pollEvents(10, &gci); - } - if (gci > schema_gci) - { - schema_gci= gci; - } - } - // now check that we have epochs consistant with what we had before the restart - DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res, - (long) schema_gci)); - { - i_ndb->flushIncompleteEvents(schema_gci); - s_ndb->flushIncompleteEvents(schema_gci); - if (schema_gci < ndb_latest_handled_binlog_epoch) - { - sql_print_error("NDB Binlog: cluster has been restarted --initial or with older filesystem. " - "ndb_latest_handled_binlog_epoch: %u, while current epoch: %u. " - "RESET MASTER should be issued. Resetting ndb_latest_handled_binlog_epoch.", - (unsigned) ndb_latest_handled_binlog_epoch, (unsigned) schema_gci); - *p_latest_trans_gci= 0; - ndb_latest_handled_binlog_epoch= 0; - ndb_latest_applied_binlog_epoch= 0; - ndb_latest_received_binlog_epoch= 0; - } - else if (ndb_latest_applied_binlog_epoch > 0) - { - sql_print_warning("NDB Binlog: cluster has reconnected. " - "Changes to the database that occured while " - "disconnected will not be in the binlog"); - } - if (opt_ndb_extra_logging) - { - sql_print_information("NDB Binlog: starting log at epoch %u", - (unsigned)schema_gci); - } - } - } - { - static char db[]= ""; - thd->db= db; - } - do_ndbcluster_binlog_close_connection= BCCC_running; - for ( ; !((ndbcluster_binlog_terminating || - do_ndbcluster_binlog_close_connection) && - ndb_latest_handled_binlog_epoch >= *p_latest_trans_gci) && - do_ndbcluster_binlog_close_connection != BCCC_restart; ) - { -#ifndef DBUG_OFF - if (do_ndbcluster_binlog_close_connection) - { - DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection: %d, " - "ndb_latest_handled_binlog_epoch: %lu, " - "*p_latest_trans_gci: %lu", - do_ndbcluster_binlog_close_connection, - (ulong) ndb_latest_handled_binlog_epoch, - (ulong) *p_latest_trans_gci)); - } -#endif -#ifdef RUN_NDB_BINLOG_TIMER - main_timer.stop(); - sql_print_information("main_timer %ld ms", main_timer.elapsed_ms()); - main_timer.start(); -#endif - - /* - now we don't want any events before next gci is complete - */ - thd->proc_info= "Waiting for event from ndbcluster"; - thd->set_time(); - - /* wait for event or 1000 ms */ - Uint64 gci= 0, schema_gci; - int res= 0, tot_poll_wait= 1000; - if (ndb_binlog_running) - { - res= i_ndb->pollEvents(tot_poll_wait, &gci); - tot_poll_wait= 0; - } - else - { - /* - Just consume any events, not used if no binlogging - e.g. node failure events - */ - Uint64 tmp_gci; - if (i_ndb->pollEvents(0, &tmp_gci)) - while (i_ndb->nextEvent()) - ; - } - int schema_res= s_ndb->pollEvents(tot_poll_wait, &schema_gci); - ndb_latest_received_binlog_epoch= gci; - - while (gci > schema_gci && schema_res >= 0) - { - static char buf[64]; - thd->proc_info= "Waiting for schema epoch"; - my_snprintf(buf, sizeof(buf), "%s %u(%u)", thd->proc_info, (unsigned) schema_gci, (unsigned) gci); - thd->proc_info= buf; - schema_res= s_ndb->pollEvents(10, &schema_gci); - } - - if ((ndbcluster_binlog_terminating || - do_ndbcluster_binlog_close_connection) && - (ndb_latest_handled_binlog_epoch >= *p_latest_trans_gci || - !ndb_binlog_running)) - break; /* Shutting down server */ - - if (ndb_binlog_index && ndb_binlog_index->s->has_old_version()) - { - if (ndb_binlog_index->s->has_old_version()) - { - trans_commit_stmt(thd); - close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); - ndb_binlog_index= 0; - } - } - - MEM_ROOT **root_ptr= - my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); - MEM_ROOT *old_root= *root_ptr; - MEM_ROOT mem_root; - init_sql_alloc(&mem_root, 4096, 0, MYF(0)); - List post_epoch_log_list; - List post_epoch_unlock_list; - *root_ptr= &mem_root; - - if (unlikely(schema_res > 0)) - { - thd->proc_info= "Processing events from schema table"; - s_ndb-> - setReportThreshEventGCISlip(opt_ndb_report_thresh_binlog_epoch_slip); - s_ndb-> - setReportThreshEventFreeMem(opt_ndb_report_thresh_binlog_mem_usage); - NdbEventOperation *pOp= s_ndb->nextEvent(); - while (pOp != NULL) - { - if (!pOp->hasError()) - { - ndb_binlog_thread_handle_schema_event(thd, s_ndb, pOp, - &post_epoch_log_list, - &post_epoch_unlock_list, - &mem_root); - DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ? - s_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ? - i_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - if (i_ndb->getEventOperation() == NULL && - s_ndb->getEventOperation() == NULL && - do_ndbcluster_binlog_close_connection == BCCC_running) - { - DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection= BCCC_restart")); - do_ndbcluster_binlog_close_connection= BCCC_restart; - if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running) - { - sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog " - "as latest received epoch is %lu", - (ulong) *p_latest_trans_gci, - (ulong) ndb_latest_received_binlog_epoch); - } - } - } - else - sql_print_error("NDB: error %lu (%s) on handling " - "binlog schema event", - (ulong) pOp->getNdbError().code, - pOp->getNdbError().message); - pOp= s_ndb->nextEvent(); - } - } - - if (res > 0) - { - DBUG_PRINT("info", ("pollEvents res: %d", res)); - thd->proc_info= "Processing events"; - NdbEventOperation *pOp= i_ndb->nextEvent(); - ndb_binlog_index_row row; - while (pOp != NULL) - { -#ifdef RUN_NDB_BINLOG_TIMER - Timer gci_timer, write_timer; - int event_count= 0; - gci_timer.start(); -#endif - gci= pOp->getGCI(); - DBUG_PRINT("info", ("Handling gci: %d", (unsigned)gci)); - // sometimes get TE_ALTER with invalid table - DBUG_ASSERT(pOp->getEventType() == NdbDictionary::Event::TE_ALTER || - ! IS_NDB_BLOB_PREFIX(pOp->getEvent()->getTable()->getName())); - DBUG_ASSERT(gci <= ndb_latest_received_binlog_epoch); - - /* initialize some variables for this epoch */ - g_ndb_log_slave_updates= opt_log_slave_updates; - i_ndb-> - setReportThreshEventGCISlip(opt_ndb_report_thresh_binlog_epoch_slip); - i_ndb->setReportThreshEventFreeMem(opt_ndb_report_thresh_binlog_mem_usage); - - bzero((char*) &row, sizeof(row)); - thd->variables.character_set_client= &my_charset_latin1; - injector::transaction trans; - // pass table map before epoch - { - Uint32 iter= 0; - const NdbEventOperation *gci_op; - Uint32 event_types; - while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types)) - != NULL) - { - NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData(); - DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x", - (long) gci_op, (long) share, event_types)); - // workaround for interface returning TE_STOP events - // which are normally filtered out below in the nextEvent loop - if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0) - { - DBUG_PRINT("info", ("Skipped TE_STOP on table %s", - gci_op->getEvent()->getTable()->getName())); - continue; - } - // this should not happen - if (share == NULL || share->table == NULL) - { - DBUG_PRINT("info", ("no share or table %s!", - gci_op->getEvent()->getTable()->getName())); - continue; - } - if (share == ndb_apply_status_share) - { - // skip this table, it is handled specially - continue; - } - TABLE *table= share->table; -#ifndef DBUG_OFF - const LEX_STRING &name= table->s->table_name; -#endif - if ((event_types & (NdbDictionary::Event::TE_INSERT | - NdbDictionary::Event::TE_UPDATE | - NdbDictionary::Event::TE_DELETE)) == 0) - { - DBUG_PRINT("info", ("skipping non data event table: %.*s", - (int) name.length, name.str)); - continue; - } - if (!trans.good()) - { - DBUG_PRINT("info", - ("Found new data event, initializing transaction")); - inj->new_trans(thd, &trans); - } - DBUG_PRINT("info", ("use_table: %.*s", - (int) name.length, name.str)); - injector::transaction::table tbl(table, TRUE); - int ret __attribute__((unused))= trans.use_table(::server_id, tbl); - DBUG_ASSERT(ret == 0); - } - } - if (trans.good()) - { - if (ndb_apply_status_share) - { - TABLE *table= ndb_apply_status_share->table; - -#ifndef DBUG_OFF - const LEX_STRING& name= table->s->table_name; - DBUG_PRINT("info", ("use_table: %.*s", - (int) name.length, name.str)); -#endif - injector::transaction::table tbl(table, TRUE); - int ret __attribute__((unused))= trans.use_table(::server_id, tbl); - DBUG_ASSERT(ret == 0); - - /* - Intialize table->record[0] - */ - empty_record(table); - - table->field[0]->store((longlong)::server_id); - table->field[1]->store((longlong)gci); - table->field[2]->store("", 0, &my_charset_bin); - table->field[3]->store((longlong)0); - table->field[4]->store((longlong)0); - trans.write_row(::server_id, - injector::transaction::table(table, TRUE), - &table->s->all_set, table->s->fields, - table->record[0]); - } - else - { - sql_print_error("NDB: Could not get apply status share"); - } - } -#ifdef RUN_NDB_BINLOG_TIMER - write_timer.start(); -#endif - do - { -#ifdef RUN_NDB_BINLOG_TIMER - event_count++; -#endif - if (pOp->hasError() && - ndb_binlog_thread_handle_error(i_ndb, pOp, row) < 0) - goto err; - -#ifndef DBUG_OFF - { - NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData(); - DBUG_PRINT("info", - ("EVENT TYPE: %d GCI: %ld last applied: %ld " - "share: 0x%lx (%s.%s)", pOp->getEventType(), - (long) gci, - (long) ndb_latest_applied_binlog_epoch, - (long) share, - share ? share->db : "'NULL'", - share ? share->table_name : "'NULL'")); - DBUG_ASSERT(share != 0); - } - // assert that there is consistancy between gci op list - // and event list - { - Uint32 iter= 0; - const NdbEventOperation *gci_op; - Uint32 event_types; - while ((gci_op= i_ndb->getGCIEventOperations(&iter, &event_types)) - != NULL) - { - if (gci_op == pOp) - break; - } - DBUG_ASSERT(gci_op == pOp); - DBUG_ASSERT((event_types & pOp->getEventType()) != 0); - } -#endif - if ((unsigned) pOp->getEventType() < - (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT) - ndb_binlog_thread_handle_data_event(i_ndb, pOp, row, trans); - else - { - // set injector_ndb database/schema from table internal name - int ret __attribute__((unused))= - i_ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable()); - DBUG_ASSERT(ret == 0); - ndb_binlog_thread_handle_non_data_event(thd, i_ndb, pOp, row); - // reset to catch errors - i_ndb->setDatabaseName(""); - DBUG_PRINT("info", ("s_ndb first: %s", s_ndb->getEventOperation() ? - s_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - DBUG_PRINT("info", ("i_ndb first: %s", i_ndb->getEventOperation() ? - i_ndb->getEventOperation()->getEvent()->getTable()->getName() : - "")); - if (i_ndb->getEventOperation() == NULL && - s_ndb->getEventOperation() == NULL && - do_ndbcluster_binlog_close_connection == BCCC_running) - { - DBUG_PRINT("info", ("do_ndbcluster_binlog_close_connection= BCCC_restart")); - do_ndbcluster_binlog_close_connection= BCCC_restart; - if (ndb_latest_received_binlog_epoch < *p_latest_trans_gci && ndb_binlog_running) - { - sql_print_error("NDB Binlog: latest transaction in epoch %lu not in binlog " - "as latest received epoch is %lu", - (ulong) *p_latest_trans_gci, - (ulong) ndb_latest_received_binlog_epoch); - } - } - } - - pOp= i_ndb->nextEvent(); - } while (pOp && pOp->getGCI() == gci); - - /* - note! pOp is not referring to an event in the next epoch - or is == 0 - */ -#ifdef RUN_NDB_BINLOG_TIMER - write_timer.stop(); -#endif - - if (trans.good()) - { - //DBUG_ASSERT(row.n_inserts || row.n_updates || row.n_deletes); - thd->proc_info= "Committing events to binlog"; - injector::transaction::binlog_pos start= trans.start_pos(); - if (int r= trans.commit()) - { - sql_print_error("NDB Binlog: " - "Error during COMMIT of GCI. Error: %d", - r); - /* TODO: Further handling? */ - } - row.gci= gci; - row.master_log_file= start.file_name(); - row.master_log_pos= start.file_pos(); - - DBUG_PRINT("info", ("COMMIT gci: %lu", (ulong) gci)); - if (ndb_update_ndb_binlog_index) - ndb_add_ndb_binlog_index(thd, &row); - ndb_latest_applied_binlog_epoch= gci; - } - ndb_latest_handled_binlog_epoch= gci; -#ifdef RUN_NDB_BINLOG_TIMER - gci_timer.stop(); - sql_print_information("gci %ld event_count %d write time " - "%ld(%d e/s), total time %ld(%d e/s)", - (ulong)gci, event_count, - write_timer.elapsed_ms(), - (1000*event_count) / write_timer.elapsed_ms(), - gci_timer.elapsed_ms(), - (1000*event_count) / gci_timer.elapsed_ms()); -#endif - } - } - - ndb_binlog_thread_handle_schema_event_post_epoch(thd, - &post_epoch_log_list, - &post_epoch_unlock_list); - free_root(&mem_root, MYF(0)); - *root_ptr= old_root; - ndb_latest_handled_binlog_epoch= ndb_latest_received_binlog_epoch; - } - if (do_ndbcluster_binlog_close_connection == BCCC_restart) - { - ndb_binlog_tables_inited= FALSE; - trans_commit_stmt(thd); - close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); - ndb_binlog_index= 0; - goto restart; - } -err: - sql_print_information("Stopping Cluster Binlog"); - DBUG_PRINT("info",("Shutting down cluster binlog thread")); - thd->proc_info= "Shutting down"; - thd->get_stmt_da()->set_overwrite_status(true); - thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd); - thd->get_stmt_da()->set_overwrite_status(false); - close_thread_tables(thd); - thd->mdl_context.release_transactional_locks(); - mysql_mutex_lock(&injector_mutex); - /* don't mess with the injector_ndb anymore from other threads */ - injector_thd= 0; - injector_ndb= 0; - p_latest_trans_gci= 0; - schema_ndb= 0; - mysql_mutex_unlock(&injector_mutex); - thd->db= 0; // as not to try to free memory - - if (ndb_apply_status_share) - { - /* ndb_share reference binlog extra free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u", - ndb_apply_status_share->key, - ndb_apply_status_share->use_count)); - free_share(&ndb_apply_status_share); - ndb_apply_status_share= 0; - } - if (ndb_schema_share) - { - /* begin protect ndb_schema_share */ - mysql_mutex_lock(&ndb_schema_share_mutex); - /* ndb_share reference binlog extra free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog extra free use_count: %u", - ndb_schema_share->key, - ndb_schema_share->use_count)); - free_share(&ndb_schema_share); - ndb_schema_share= 0; - ndb_binlog_tables_inited= 0; - mysql_mutex_unlock(&ndb_schema_share_mutex); - /* end protect ndb_schema_share */ - } - - /* remove all event operations */ - if (s_ndb) - { - NdbEventOperation *op; - DBUG_PRINT("info",("removing all event operations")); - while ((op= s_ndb->getEventOperation())) - { - DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName())); - DBUG_PRINT("info",("removing event operation on %s", - op->getEvent()->getName())); - NDB_SHARE *share= (NDB_SHARE*) op->getCustomData(); - DBUG_ASSERT(share != 0); - DBUG_ASSERT(share->op == op || - share->op_old == op); - share->op= share->op_old= 0; - /* ndb_share reference binlog free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog free use_count: %u", - share->key, share->use_count)); - free_share(&share); - s_ndb->dropEventOperation(op); - } - delete s_ndb; - s_ndb= 0; - } - if (i_ndb) - { - NdbEventOperation *op; - DBUG_PRINT("info",("removing all event operations")); - while ((op= i_ndb->getEventOperation())) - { - DBUG_ASSERT(! IS_NDB_BLOB_PREFIX(op->getEvent()->getTable()->getName())); - DBUG_PRINT("info",("removing event operation on %s", - op->getEvent()->getName())); - NDB_SHARE *share= (NDB_SHARE*) op->getCustomData(); - DBUG_ASSERT(share != 0); - DBUG_ASSERT(share->op == op || - share->op_old == op); - share->op= share->op_old= 0; - /* ndb_share reference binlog free */ - DBUG_PRINT("NDB_SHARE", ("%s binlog free use_count: %u", - share->key, share->use_count)); - free_share(&share); - i_ndb->dropEventOperation(op); - } - delete i_ndb; - i_ndb= 0; - } - - my_hash_free(&ndb_schema_objects); - - delete thd; - - ndb_binlog_thread_running= -1; - ndb_binlog_running= FALSE; - mysql_cond_signal(&injector_cond); - - DBUG_PRINT("exit", ("ndb_binlog_thread")); - - DBUG_LEAVE; // Must match DBUG_ENTER() - my_thread_end(); - pthread_exit(0); - return NULL; // Avoid compiler warnings -} - -bool -ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print, - enum ha_stat_type stat_type) -{ - char buf[IO_SIZE]; - uint buflen; - ulonglong ndb_latest_epoch= 0; - DBUG_ENTER("ndbcluster_show_status_binlog"); - - mysql_mutex_lock(&injector_mutex); - if (injector_ndb) - { - char buff1[22],buff2[22],buff3[22],buff4[22],buff5[22]; - ndb_latest_epoch= injector_ndb->getLatestGCI(); - mysql_mutex_unlock(&injector_mutex); - - buflen= - snprintf(buf, sizeof(buf), - "latest_epoch=%s, " - "latest_trans_epoch=%s, " - "latest_received_binlog_epoch=%s, " - "latest_handled_binlog_epoch=%s, " - "latest_applied_binlog_epoch=%s", - llstr(ndb_latest_epoch, buff1), - llstr(*p_latest_trans_gci, buff2), - llstr(ndb_latest_received_binlog_epoch, buff3), - llstr(ndb_latest_handled_binlog_epoch, buff4), - llstr(ndb_latest_applied_binlog_epoch, buff5)); - if (stat_print(thd, ndbcluster_hton_name, ndbcluster_hton_name_length, - "binlog", strlen("binlog"), - buf, buflen)) - DBUG_RETURN(TRUE); - } - else - mysql_mutex_unlock(&injector_mutex); - DBUG_RETURN(FALSE); -} - -#endif /* HAVE_NDB_BINLOG */ -#endif diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h deleted file mode 100644 index a02f687d76f..00000000000 --- a/sql/ha_ndbcluster_binlog.h +++ /dev/null @@ -1,239 +0,0 @@ -#ifndef HA_NDBCLUSTER_BINLOG_INCLUDED -#define HA_NDBCLUSTER_BINLOG_INCLUDED - -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ - -#include "sql_class.h" /* THD */ - -// Typedefs for long names -typedef NdbDictionary::Object NDBOBJ; -typedef NdbDictionary::Column NDBCOL; -typedef NdbDictionary::Table NDBTAB; -typedef NdbDictionary::Index NDBINDEX; -typedef NdbDictionary::Dictionary NDBDICT; -typedef NdbDictionary::Event NDBEVENT; - -#define IS_TMP_PREFIX(A) (is_prefix(A, tmp_file_prefix)) - -#define INJECTOR_EVENT_LEN 200 - -#define NDB_INVALID_SCHEMA_OBJECT 241 - -/* server id's with high bit set is reservered */ -#define NDB_ANYVALUE_FOR_NOLOGGING 0xFFFFFFFF -#define NDB_ANYVALUE_RESERVED 0x80000000 - -extern handlerton *ndbcluster_hton; - -/* - The numbers below must not change as they - are passed between mysql servers, and if changed - would break compatablility. Add new numbers to - the end. -*/ -enum SCHEMA_OP_TYPE -{ - SOT_DROP_TABLE= 0, - SOT_CREATE_TABLE= 1, - SOT_RENAME_TABLE_NEW= 2, - SOT_ALTER_TABLE= 3, - SOT_DROP_DB= 4, - SOT_CREATE_DB= 5, - SOT_ALTER_DB= 6, - SOT_CLEAR_SLOCK= 7, - SOT_TABLESPACE= 8, - SOT_LOGFILE_GROUP= 9, - SOT_RENAME_TABLE= 10, - SOT_TRUNCATE_TABLE= 11 -}; - -const uint max_ndb_nodes= 64; /* multiple of 32 */ - -static const char *ha_ndb_ext=".ndb"; -static const char share_prefix[]= "./"; - -class Ndb_table_guard -{ -public: - Ndb_table_guard(NDBDICT *dict, const char *tabname) - : m_dict(dict) - { - DBUG_ENTER("Ndb_table_guard"); - m_ndbtab= m_dict->getTableGlobal(tabname); - m_invalidate= 0; - DBUG_PRINT("info", ("m_ndbtab: %p", m_ndbtab)); - DBUG_VOID_RETURN; - } - ~Ndb_table_guard() - { - DBUG_ENTER("~Ndb_table_guard"); - if (m_ndbtab) - { - DBUG_PRINT("info", ("m_ndbtab: %p m_invalidate: %d", - m_ndbtab, m_invalidate)); - m_dict->removeTableGlobal(*m_ndbtab, m_invalidate); - } - DBUG_VOID_RETURN; - } - const NDBTAB *get_table() { return m_ndbtab; } - void invalidate() { m_invalidate= 1; } - const NDBTAB *release() - { - DBUG_ENTER("Ndb_table_guard::release"); - const NDBTAB *tmp= m_ndbtab; - DBUG_PRINT("info", ("m_ndbtab: %p", m_ndbtab)); - m_ndbtab = 0; - DBUG_RETURN(tmp); - } -private: - const NDBTAB *m_ndbtab; - NDBDICT *m_dict; - int m_invalidate; -}; - -#ifdef HAVE_NDB_BINLOG - -#ifdef HAVE_PSI_INTERFACE -extern PSI_mutex_key key_injector_mutex, key_ndb_schema_share_mutex, - key_ndb_schema_object_mutex; -extern PSI_cond_key key_injector_cond; -extern PSI_thread_key key_thread_ndb_binlog; -#endif /* HAVE_PSI_INTERFACE */ - -extern pthread_t ndb_binlog_thread; -extern mysql_mutex_t injector_mutex; -extern mysql_cond_t injector_cond; - -extern unsigned char g_node_id_map[max_ndb_nodes]; -extern pthread_t ndb_util_thread; -extern mysql_mutex_t LOCK_ndb_util_thread; -extern mysql_cond_t COND_ndb_util_thread; -extern int ndbcluster_util_inited; -extern mysql_mutex_t ndbcluster_mutex; -extern HASH ndbcluster_open_tables; -extern Ndb_cluster_connection* g_ndb_cluster_connection; -extern long ndb_number_of_storage_nodes; - -/* - Initialize the binlog part of the ndb handlerton -*/ -void ndbcluster_binlog_init_handlerton(); -/* - Initialize the binlog part of the NDB_SHARE -*/ -int ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *table); - -bool ndbcluster_check_if_local_table(const char *dbname, const char *tabname); -bool ndbcluster_check_if_local_tables_in_db(THD *thd, const char *dbname); - -int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, - uint key_len, - const char *db, - const char *table_name, - my_bool share_may_exist); -int ndbcluster_create_event(Ndb *ndb, const NDBTAB *table, - const char *event_name, NDB_SHARE *share, - int push_warning= 0); -int ndbcluster_create_event_ops(NDB_SHARE *share, - const NDBTAB *ndbtab, - const char *event_name); -int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, - const char *query, int query_length, - const char *db, const char *table_name, - uint32 ndb_table_id, - uint32 ndb_table_version, - enum SCHEMA_OP_TYPE type, - const char *new_db, - const char *new_table_name); -int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name, - NDB_SHARE *share, - const char *type_str); -void ndb_rep_event_name(String *event_name, - const char *db, const char *tbl); -int ndb_create_table_from_engine(THD *thd, const char *db, - const char *table_name); -int ndbcluster_binlog_start(); -pthread_handler_t ndb_binlog_thread_func(void *arg); - -/* - table mysql.ndb_apply_status -*/ -int ndbcluster_setup_binlog_table_shares(THD *thd); -extern NDB_SHARE *ndb_apply_status_share; -extern NDB_SHARE *ndb_schema_share; - -extern THD *injector_thd; -extern my_bool ndb_binlog_running; -extern my_bool ndb_binlog_tables_inited; - -bool -ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print, - enum ha_stat_type stat_type); - -/* - prototypes for ndb handler utility function also needed by - the ndb binlog code -*/ -int cmp_frm(const NDBTAB *ndbtab, const void *pack_data, - uint pack_length); -int ndbcluster_find_all_files(THD *thd); -#endif /* HAVE_NDB_BINLOG */ - -void ndb_unpack_record(TABLE *table, NdbValue *value, - MY_BITMAP *defined, uchar *buf); -char *ndb_pack_varchar(const NDBCOL *col, char *buf, - const char *str, int sz); - -NDB_SHARE *ndbcluster_get_share(const char *key, - TABLE *table, - bool create_if_not_exists, - bool have_lock); -NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share); -void ndbcluster_free_share(NDB_SHARE **share, bool have_lock); -void ndbcluster_real_free_share(NDB_SHARE **share); -int handle_trailing_share(NDB_SHARE *share); -inline NDB_SHARE *get_share(const char *key, - TABLE *table, - bool create_if_not_exists= TRUE, - bool have_lock= FALSE) -{ - return ndbcluster_get_share(key, table, create_if_not_exists, have_lock); -} - -inline NDB_SHARE *get_share(NDB_SHARE *share) -{ - return ndbcluster_get_share(share); -} - -inline void free_share(NDB_SHARE **share, bool have_lock= FALSE) -{ - ndbcluster_free_share(share, have_lock); -} - -inline -Thd_ndb * -get_thd_ndb(THD *thd) -{ return (Thd_ndb *) thd_get_ha_data(thd, ndbcluster_hton); } - -inline -void -set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) -{ thd_set_ha_data(thd, ndbcluster_hton, thd_ndb); } - -Ndb* check_ndb_in_thd(THD* thd); - -#endif /* HA_NDBCLUSTER_BINLOG_INCLUDED */ diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc deleted file mode 100644 index fd80304d400..00000000000 --- a/sql/ha_ndbcluster_cond.cc +++ /dev/null @@ -1,1475 +0,0 @@ -/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA -*/ - -/* - This file defines the NDB Cluster handler engine_condition_pushdown -*/ - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "sql_priv.h" -#include "sql_class.h" // set_var.h: THD -#include "my_global.h" // WITH_* -#include "log.h" // sql_print_error - -#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE -#include -#include "ha_ndbcluster_cond.h" - -// Typedefs for long names -typedef NdbDictionary::Column NDBCOL; -typedef NdbDictionary::Table NDBTAB; - - -/** - Serialize a constant item into a Ndb_cond node. - - @param const_type item's result type - @param item item to be serialized - @param curr_cond Ndb_cond node the item to be serialized into - @param context Traverse context -*/ - -static void ndb_serialize_const(Item_result const_type, const Item *item, - Ndb_cond *curr_cond, - Ndb_cond_traverse_context *context) -{ - DBUG_ASSERT(item->const_item()); - switch (const_type) { - case STRING_RESULT: - { - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::STRING_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(STRING_RESULT); - context->expect_collation(item->collation.collation); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - // Check that string result have correct collation - if (!context->expecting_collation(item->collation.collation)) - { - DBUG_PRINT("info", ("Found non-matching collation %s", - item->collation.collation->name)); - context->supported= FALSE; - } - } - break; - } - case REAL_RESULT: - { - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::REAL_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(REAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - break; - } - case INT_RESULT: - { - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::INT_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(INT_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - break; - } - case DECIMAL_RESULT: - { - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::DECIMAL_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(DECIMAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - break; - } - default: - break; - } -} -/* - Serialize the item tree into a linked list represented by Ndb_cond - for fast generation of NbdScanFilter. Adds information such as - position of fields that is not directly available in the Item tree. - Also checks if condition is supported. -*/ -void ndb_serialize_cond(const Item *item, void *arg) -{ - Ndb_cond_traverse_context *context= (Ndb_cond_traverse_context *) arg; - DBUG_ENTER("ndb_serialize_cond"); - - // Check if we are skipping arguments to a function to be evaluated - if (context->skip) - { - if (!item) - { - DBUG_PRINT("info", ("Unexpected mismatch of found and expected number of function arguments %u", context->skip)); - sql_print_error("ndb_serialize_cond: Unexpected mismatch of found and " - "expected number of function arguments %u", context->skip); - context->skip= 0; - DBUG_VOID_RETURN; - } - DBUG_PRINT("info", ("Skiping argument %d", context->skip)); - context->skip--; - switch (item->type()) { - case Item::FUNC_ITEM: - { - Item_func *func_item= (Item_func *) item; - context->skip+= func_item->argument_count(); - break; - } - case Item::INT_ITEM: - case Item::REAL_ITEM: - case Item::STRING_ITEM: - case Item::VARBIN_ITEM: - case Item::DECIMAL_ITEM: - break; - default: - context->supported= FALSE; - break; - } - - DBUG_VOID_RETURN; - } - - if (context->supported) - { - Ndb_rewrite_context *rewrite_context2= context->rewrite_stack; - const Item_func *rewrite_func_item; - // Check if we are rewriting some unsupported function call - if (rewrite_context2 && - (rewrite_func_item= rewrite_context2->func_item) && - rewrite_context2->count++ == 0) - { - switch (rewrite_func_item->functype()) { - case Item_func::BETWEEN: - /* - Rewrite - | BETWEEN | AND | - to | > | AND - | < | - or actually in prefix format - BEGIN(AND) GT(|, |), - LT(|, |), END() - */ - case Item_func::IN_FUNC: - { - /* - Rewrite | IN(|, |,..) - to | = | OR - = | ... - or actually in prefix format - BEGIN(OR) EQ(|, ), - EQ(|, |), ... END() - Each part of the disjunction is added for each call - to ndb_serialize_cond and end of rewrite statement - is wrapped in end of ndb_serialize_cond - */ - if (context->expecting(item->type()) || item->const_item()) - { - // This is the | item, save it in the rewrite context - rewrite_context2->left_hand_item= item; - if (item->type() == Item::FUNC_ITEM) - { - Item_func *func_item= (Item_func *) item; - if ((func_item->functype() == Item_func::UNKNOWN_FUNC || - func_item->functype() == Item_func::NEG_FUNC) && - func_item->const_item()) - { - // Skip any arguments since we will evaluate function instead - DBUG_PRINT("info", ("Skip until end of arguments marker")); - context->skip= func_item->argument_count(); - } - else - { - DBUG_PRINT("info", ("Found unsupported functional expression in BETWEEN|IN")); - context->supported= FALSE; - DBUG_VOID_RETURN; - - } - } - } - else - { - // Non-supported BETWEEN|IN expression - DBUG_PRINT("info", ("Found unexpected item of type %u in BETWEEN|IN", - item->type())); - context->supported= FALSE; - DBUG_VOID_RETURN; - } - break; - } - default: - context->supported= FALSE; - break; - } - DBUG_VOID_RETURN; - } - else - { - Ndb_cond_stack *ndb_stack= context->stack_ptr; - Ndb_cond *prev_cond= context->cond_ptr; - Ndb_cond *curr_cond= context->cond_ptr= new Ndb_cond(); - if (!ndb_stack->ndb_cond) - ndb_stack->ndb_cond= curr_cond; - curr_cond->prev= prev_cond; - if (prev_cond) prev_cond->next= curr_cond; - // Check if we are rewriting some unsupported function call - if (context->rewrite_stack) - { - Ndb_rewrite_context *rewrite_context= context->rewrite_stack; - const Item_func *func_item= rewrite_context->func_item; - switch (func_item->functype()) { - case Item_func::BETWEEN: - { - /* - Rewrite - | BETWEEN | AND | - to | > | AND - | < | - or actually in prefix format - BEGIN(AND) GT(|, |), - LT(|, |), END() - */ - if (rewrite_context->count == 2) - { - // Lower limit of BETWEEN - DBUG_PRINT("info", ("GE_FUNC")); - curr_cond->ndb_item= new Ndb_item(Item_func::GE_FUNC, 2); - } - else if (rewrite_context->count == 3) - { - // Upper limit of BETWEEN - DBUG_PRINT("info", ("LE_FUNC")); - curr_cond->ndb_item= new Ndb_item(Item_func::LE_FUNC, 2); - } - else - { - // Illegal BETWEEN expression - DBUG_PRINT("info", ("Illegal BETWEEN expression")); - context->supported= FALSE; - DBUG_VOID_RETURN; - } - break; - } - case Item_func::IN_FUNC: - { - /* - Rewrite | IN(|, |,..) - to | = | OR - = | ... - or actually in prefix format - BEGIN(OR) EQ(|, ), - EQ(|, |), ... END() - Each part of the disjunction is added for each call - to ndb_serialize_cond and end of rewrite statement - is wrapped in end of ndb_serialize_cond - */ - DBUG_PRINT("info", ("EQ_FUNC")); - curr_cond->ndb_item= new Ndb_item(Item_func::EQ_FUNC, 2); - break; - } - default: - context->supported= FALSE; - } - // Handle left hand | - context->rewrite_stack= NULL; // Disable rewrite mode - context->expect_only(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - context->expect(Item::INT_ITEM); - context->expect(Item::STRING_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FUNC_ITEM); - ndb_serialize_cond(rewrite_context->left_hand_item, arg); - context->skip= 0; // Any FUNC_ITEM expression has already been parsed - context->rewrite_stack= rewrite_context; // Enable rewrite mode - if (!context->supported) - DBUG_VOID_RETURN; - - prev_cond= context->cond_ptr; - curr_cond= context->cond_ptr= new Ndb_cond(); - prev_cond->next= curr_cond; - } - - // Check for end of AND/OR expression - if (!item) - { - // End marker for condition group - DBUG_PRINT("info", ("End of condition group")); - curr_cond->ndb_item= new Ndb_item(NDB_END_COND); - } - else - { - switch (item->type()) { - case Item::FIELD_ITEM: - { - Item_field *field_item= (Item_field *) item; - Field *field= field_item->field; - enum_field_types type= field->type(); - /* - Check that the field is part of the table of the handler - instance and that we expect a field with of this result type. - */ - if (context->table->s == field->table->s) - { - const NDBTAB *tab= context->ndb_table; - DBUG_PRINT("info", ("FIELD_ITEM")); - DBUG_PRINT("info", ("table %s", tab->getName())); - DBUG_PRINT("info", ("column %s", field->field_name)); - DBUG_PRINT("info", ("type %d", field->type())); - DBUG_PRINT("info", ("result type %d", field->result_type())); - - // Check that we are expecting a field and with the correct - // result type - if (context->expecting(Item::FIELD_ITEM) && - context->expecting_field_type(field->type()) && - (context->expecting_field_result(field->result_type()) || - // Date and year can be written as string or int - ((type == MYSQL_TYPE_TIME || - type == MYSQL_TYPE_DATE || - type == MYSQL_TYPE_YEAR || - type == MYSQL_TYPE_DATETIME) - ? (context->expecting_field_result(STRING_RESULT) || - context->expecting_field_result(INT_RESULT)) - : TRUE)) && - // Bit fields no yet supported in scan filter - type != MYSQL_TYPE_BIT && - // No BLOB support in scan filter - type != MYSQL_TYPE_TINY_BLOB && - type != MYSQL_TYPE_MEDIUM_BLOB && - type != MYSQL_TYPE_LONG_BLOB && - type != MYSQL_TYPE_BLOB) - { - const NDBCOL *col= tab->getColumn(field->field_name); - DBUG_ASSERT(col); - curr_cond->ndb_item= new Ndb_item(field, col->getColumnNo()); - context->dont_expect(Item::FIELD_ITEM); - context->expect_no_field_result(); - if (! context->expecting_nothing()) - { - // We have not seen second argument yet - if (type == MYSQL_TYPE_TIME || - type == MYSQL_TYPE_DATE || - type == MYSQL_TYPE_YEAR || - type == MYSQL_TYPE_DATETIME) - { - context->expect_only(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - } - else - switch (field->result_type()) { - case STRING_RESULT: - // Expect char string or binary string - context->expect_only(Item::STRING_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect_collation(field_item->collation.collation); - break; - case REAL_RESULT: - context->expect_only(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::INT_ITEM); - break; - case INT_RESULT: - context->expect_only(Item::INT_ITEM); - context->expect(Item::VARBIN_ITEM); - break; - case DECIMAL_RESULT: - context->expect_only(Item::DECIMAL_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::INT_ITEM); - break; - default: - break; - } - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - // Check that field and string constant collations are the same - if ((field->result_type() == STRING_RESULT) && - !context->expecting_collation(item->collation.collation) - && type != MYSQL_TYPE_TIME - && type != MYSQL_TYPE_DATE - && type != MYSQL_TYPE_YEAR - && type != MYSQL_TYPE_DATETIME) - { - DBUG_PRINT("info", ("Found non-matching collation %s", - item->collation.collation->name)); - context->supported= FALSE; - } - } - break; - } - else - { - DBUG_PRINT("info", ("Was not expecting field of type %u(%u)", - field->result_type(), type)); - context->supported= FALSE; - } - } - else - { - DBUG_PRINT("info", ("Was not expecting field from table %s (%s)", - context->table->s->table_name.str, - field->table->s->table_name.str)); - context->supported= FALSE; - } - break; - } - case Item::FUNC_ITEM: - { - Item_func *func_item= (Item_func *) item; - // Check that we expect a function or functional expression here - if (context->expecting(Item::FUNC_ITEM) || - func_item->functype() == Item_func::UNKNOWN_FUNC || - func_item->functype() == Item_func::NEG_FUNC) - context->expect_nothing(); - else - { - // Did not expect function here - context->supported= FALSE; - break; - } - - switch (func_item->functype()) { - case Item_func::EQ_FUNC: - { - DBUG_PRINT("info", ("EQ_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::NE_FUNC: - { - DBUG_PRINT("info", ("NE_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::LT_FUNC: - { - DBUG_PRINT("info", ("LT_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::LE_FUNC: - { - DBUG_PRINT("info", ("LE_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::GE_FUNC: - { - DBUG_PRINT("info", ("GE_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::GT_FUNC: - { - DBUG_PRINT("info", ("GT_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::REAL_ITEM); - context->expect(Item::DECIMAL_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::LIKE_FUNC: - { - DBUG_PRINT("info", ("LIKE_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::STRING_ITEM); - context->expect(Item::FIELD_ITEM); - context->expect_only_field_type(MYSQL_TYPE_STRING); - context->expect_field_type(MYSQL_TYPE_VAR_STRING); - context->expect_field_type(MYSQL_TYPE_VARCHAR); - context->expect_field_result(STRING_RESULT); - context->expect(Item::FUNC_ITEM); - break; - } - case Item_func::ISNULL_FUNC: - { - DBUG_PRINT("info", ("ISNULL_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::ISNOTNULL_FUNC: - { - DBUG_PRINT("info", ("ISNOTNULL_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::FIELD_ITEM); - context->expect_field_result(STRING_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(INT_RESULT); - context->expect_field_result(DECIMAL_RESULT); - break; - } - case Item_func::NOT_FUNC: - { - DBUG_PRINT("info", ("NOT_FUNC")); - curr_cond->ndb_item= new Ndb_item(func_item->functype(), - func_item); - context->expect(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - break; - } - case Item_func::BETWEEN: - { - DBUG_PRINT("info", ("BETWEEN, rewriting using AND")); - Item_func_between *between_func= (Item_func_between *) func_item; - Ndb_rewrite_context *rewrite_context= - new Ndb_rewrite_context(func_item); - rewrite_context->next= context->rewrite_stack; - context->rewrite_stack= rewrite_context; - if (between_func->negated) - { - DBUG_PRINT("info", ("NOT_FUNC")); - curr_cond->ndb_item= new Ndb_item(Item_func::NOT_FUNC, 1); - prev_cond= curr_cond; - curr_cond= context->cond_ptr= new Ndb_cond(); - curr_cond->prev= prev_cond; - prev_cond->next= curr_cond; - } - DBUG_PRINT("info", ("COND_AND_FUNC")); - curr_cond->ndb_item= - new Ndb_item(Item_func::COND_AND_FUNC, - func_item->argument_count() - 1); - context->expect_only(Item::FIELD_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::STRING_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FUNC_ITEM); - break; - } - case Item_func::IN_FUNC: - { - DBUG_PRINT("info", ("IN_FUNC, rewriting using OR")); - Item_func_in *in_func= (Item_func_in *) func_item; - Ndb_rewrite_context *rewrite_context= - new Ndb_rewrite_context(func_item); - rewrite_context->next= context->rewrite_stack; - context->rewrite_stack= rewrite_context; - if (in_func->negated) - { - DBUG_PRINT("info", ("NOT_FUNC")); - curr_cond->ndb_item= new Ndb_item(Item_func::NOT_FUNC, 1); - prev_cond= curr_cond; - curr_cond= context->cond_ptr= new Ndb_cond(); - curr_cond->prev= prev_cond; - prev_cond->next= curr_cond; - } - DBUG_PRINT("info", ("COND_OR_FUNC")); - curr_cond->ndb_item= new Ndb_item(Item_func::COND_OR_FUNC, - func_item->argument_count() - 1); - context->expect_only(Item::FIELD_ITEM); - context->expect(Item::INT_ITEM); - context->expect(Item::STRING_ITEM); - context->expect(Item::VARBIN_ITEM); - context->expect(Item::FUNC_ITEM); - break; - } - case Item_func::NEG_FUNC: - case Item_func::UNKNOWN_FUNC: - { - DBUG_PRINT("info", ("UNKNOWN_FUNC %s", - func_item->const_item()?"const":"")); - DBUG_PRINT("info", ("result type %d", func_item->result_type())); - if (func_item->const_item()) - { - ndb_serialize_const(func_item->result_type(), item, curr_cond, - context); - - // Skip any arguments since we will evaluate function instead - DBUG_PRINT("info", ("Skip until end of arguments marker")); - context->skip= func_item->argument_count(); - } - else - // Function does not return constant expression - context->supported= FALSE; - break; - } - default: - { - DBUG_PRINT("info", ("Found func_item of type %d", - func_item->functype())); - context->supported= FALSE; - } - } - break; - } - case Item::STRING_ITEM: - DBUG_PRINT("info", ("STRING_ITEM")); - if (context->expecting(Item::STRING_ITEM)) - { -#ifndef DBUG_OFF - char buff[256]; - String str(buff,(uint32) sizeof(buff), system_charset_info); - str.length(0); - Item_string *string_item= (Item_string *) item; - DBUG_PRINT("info", ("value \"%s\"", - string_item->val_str(&str)->ptr())); -#endif - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::STRING_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(STRING_RESULT); - context->expect_collation(item->collation.collation); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - // Check that we are comparing with a field with same collation - if (!context->expecting_collation(item->collation.collation)) - { - DBUG_PRINT("info", ("Found non-matching collation %s", - item->collation.collation->name)); - context->supported= FALSE; - } - } - } - else - context->supported= FALSE; - break; - case Item::INT_ITEM: - DBUG_PRINT("info", ("INT_ITEM")); - if (context->expecting(Item::INT_ITEM)) - { - DBUG_PRINT("info", ("value %ld", - (long) ((Item_int*) item)->value)); - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::INT_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(INT_RESULT); - context->expect_field_result(REAL_RESULT); - context->expect_field_result(DECIMAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= FALSE; - break; - case Item::REAL_ITEM: - DBUG_PRINT("info", ("REAL_ITEM")); - if (context->expecting(Item::REAL_ITEM)) - { - DBUG_PRINT("info", ("value %f", ((Item_float*) item)->value)); - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::REAL_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(REAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= FALSE; - break; - case Item::VARBIN_ITEM: - DBUG_PRINT("info", ("VARBIN_ITEM")); - if (context->expecting(Item::VARBIN_ITEM)) - { - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::VARBIN_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(STRING_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= FALSE; - break; - case Item::DECIMAL_ITEM: - DBUG_PRINT("info", ("DECIMAL_ITEM")); - if (context->expecting(Item::DECIMAL_ITEM)) - { - DBUG_PRINT("info", ("value %f", - ((Item_decimal*) item)->val_real())); - NDB_ITEM_QUALIFICATION q; - q.value_type= Item::DECIMAL_ITEM; - curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); - if (! context->expecting_no_field_result()) - { - // We have not seen the field argument yet - context->expect_only(Item::FIELD_ITEM); - context->expect_only_field_result(REAL_RESULT); - context->expect_field_result(DECIMAL_RESULT); - } - else - { - // Expect another logical expression - context->expect_only(Item::FUNC_ITEM); - context->expect(Item::COND_ITEM); - } - } - else - context->supported= FALSE; - break; - case Item::COND_ITEM: - { - Item_cond *cond_item= (Item_cond *) item; - - if (context->expecting(Item::COND_ITEM)) - { - switch (cond_item->functype()) { - case Item_func::COND_AND_FUNC: - DBUG_PRINT("info", ("COND_AND_FUNC")); - curr_cond->ndb_item= new Ndb_item(cond_item->functype(), - cond_item); - break; - case Item_func::COND_OR_FUNC: - DBUG_PRINT("info", ("COND_OR_FUNC")); - curr_cond->ndb_item= new Ndb_item(cond_item->functype(), - cond_item); - break; - default: - DBUG_PRINT("info", ("COND_ITEM %d", cond_item->functype())); - context->supported= FALSE; - break; - } - } - else - { - /* Did not expect condition */ - context->supported= FALSE; - } - break; - } - case Item::CACHE_ITEM: - { - DBUG_PRINT("info", ("CACHE_ITEM")); - if (item->const_item()) - { - ndb_serialize_const(((Item_cache*)item)->result_type(), item, - curr_cond, context); - } - else - context->supported= FALSE; - - break; - } - default: - { - DBUG_PRINT("info", ("Found item of type %d", item->type())); - context->supported= FALSE; - } - } - } - if (context->supported && context->rewrite_stack) - { - Ndb_rewrite_context *rewrite_context= context->rewrite_stack; - if (rewrite_context->count == - rewrite_context->func_item->argument_count()) - { - // Rewrite is done, wrap an END() at the en - DBUG_PRINT("info", ("End of condition group")); - prev_cond= curr_cond; - curr_cond= context->cond_ptr= new Ndb_cond(); - curr_cond->prev= prev_cond; - prev_cond->next= curr_cond; - curr_cond->ndb_item= new Ndb_item(NDB_END_COND); - // Pop rewrite stack - context->rewrite_stack= rewrite_context->next; - rewrite_context->next= NULL; - delete(rewrite_context); - } - } - } - } - - DBUG_VOID_RETURN; -} - -/* - Push a condition - */ -const -COND* -ha_ndbcluster_cond::cond_push(const COND *cond, - TABLE *table, const NDBTAB *ndb_table) -{ - DBUG_ENTER("cond_push"); - Ndb_cond_stack *ndb_cond = new Ndb_cond_stack(); - if (ndb_cond == NULL) - { - my_errno= HA_ERR_OUT_OF_MEM; - DBUG_RETURN(NULL); - } - if (m_cond_stack) - ndb_cond->next= m_cond_stack; - else - ndb_cond->next= NULL; - m_cond_stack= ndb_cond; - - if (serialize_cond(cond, ndb_cond, table, ndb_table)) - { - DBUG_RETURN(NULL); - } - else - { - cond_pop(); - } - DBUG_RETURN(cond); -} - -/* - Pop the top condition from the condition stack -*/ -void -ha_ndbcluster_cond::cond_pop() -{ - Ndb_cond_stack *ndb_cond_stack= m_cond_stack; - if (ndb_cond_stack) - { - m_cond_stack= ndb_cond_stack->next; - ndb_cond_stack->next= NULL; - delete ndb_cond_stack; - } -} - -/* - Clear the condition stack -*/ -void -ha_ndbcluster_cond::cond_clear() -{ - DBUG_ENTER("cond_clear"); - while (m_cond_stack) - cond_pop(); - - DBUG_VOID_RETURN; -} - -bool -ha_ndbcluster_cond::serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond, - TABLE *table, const NDBTAB *ndb_table) -{ - DBUG_ENTER("serialize_cond"); - Item *item= (Item *) cond; - Ndb_cond_traverse_context context(table, ndb_table, ndb_cond); - // Expect a logical expression - context.expect(Item::FUNC_ITEM); - context.expect(Item::COND_ITEM); - item->traverse_cond(&ndb_serialize_cond, (void *) &context, Item::PREFIX); - DBUG_PRINT("info", ("The pushed condition is %ssupported", (context.supported)?"":"not ")); - - DBUG_RETURN(context.supported); -} - -int -ha_ndbcluster_cond::build_scan_filter_predicate(Ndb_cond * &cond, - NdbScanFilter *filter, - bool negated) -{ - DBUG_ENTER("build_scan_filter_predicate"); - switch (cond->ndb_item->type) { - case NDB_FUNCTION: - { - if (!cond->next) - break; - Ndb_item *a= cond->next->ndb_item; - Ndb_item *b, *field, *value= NULL; - - switch (cond->ndb_item->argument_count()) { - case 1: - field= (a->type == NDB_FIELD)? a : NULL; - break; - case 2: - if (!cond->next->next) - { - field= NULL; - break; - } - b= cond->next->next->ndb_item; - value= ((a->type == NDB_VALUE) ? a : - (b->type == NDB_VALUE) ? b : - NULL); - field= ((a->type == NDB_FIELD) ? a : - (b->type == NDB_FIELD) ? b : - NULL); - break; - default: - field= NULL; //Keep compiler happy - DBUG_ASSERT(0); - break; - } - switch ((negated) ? - Ndb_item::negate(cond->ndb_item->qualification.function_type) - : cond->ndb_item->qualification.function_type) { - case NDB_EQ_FUNC: - { - if (!value || !field) break; - // Save value in right format for the field type - value->save_in_field(field); - DBUG_PRINT("info", ("Generating EQ filter")); - if (filter->cmp(NdbScanFilter::COND_EQ, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_NE_FUNC: - { - if (!value || !field) break; - // Save value in right format for the field type - value->save_in_field(field); - DBUG_PRINT("info", ("Generating NE filter")); - if (filter->cmp(NdbScanFilter::COND_NE, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_LT_FUNC: - { - if (!value || !field) break; - // Save value in right format for the field type - value->save_in_field(field); - if (a == field) - { - DBUG_PRINT("info", ("Generating LT filter")); - if (filter->cmp(NdbScanFilter::COND_LT, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - else - { - DBUG_PRINT("info", ("Generating GT filter")); - if (filter->cmp(NdbScanFilter::COND_GT, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_LE_FUNC: - { - if (!value || !field) break; - // Save value in right format for the field type - value->save_in_field(field); - if (a == field) - { - DBUG_PRINT("info", ("Generating LE filter")); - if (filter->cmp(NdbScanFilter::COND_LE, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - else - { - DBUG_PRINT("info", ("Generating GE filter")); - if (filter->cmp(NdbScanFilter::COND_GE, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_GE_FUNC: - { - if (!value || !field) break; - // Save value in right format for the field type - value->save_in_field(field); - if (a == field) - { - DBUG_PRINT("info", ("Generating GE filter")); - if (filter->cmp(NdbScanFilter::COND_GE, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - else - { - DBUG_PRINT("info", ("Generating LE filter")); - if (filter->cmp(NdbScanFilter::COND_LE, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_GT_FUNC: - { - if (!value || !field) break; - // Save value in right format for the field type - value->save_in_field(field); - if (a == field) - { - DBUG_PRINT("info", ("Generating GT filter")); - if (filter->cmp(NdbScanFilter::COND_GT, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - else - { - DBUG_PRINT("info", ("Generating LT filter")); - if (filter->cmp(NdbScanFilter::COND_LT, - field->get_field_no(), - field->get_val(), - field->pack_length()) == -1) - DBUG_RETURN(1); - } - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_LIKE_FUNC: - { - if (!value || !field) break; - if ((value->qualification.value_type != Item::STRING_ITEM) && - (value->qualification.value_type != Item::VARBIN_ITEM)) - break; - // Save value in right format for the field type - value->save_in_field(field); - DBUG_PRINT("info", ("Generating LIKE filter: like(%d,%s,%d)", - field->get_field_no(), value->get_val(), - value->pack_length())); - if (filter->cmp(NdbScanFilter::COND_LIKE, - field->get_field_no(), - value->get_val(), - value->pack_length()) == -1) - DBUG_RETURN(1); - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_NOTLIKE_FUNC: - { - if (!value || !field) break; - if ((value->qualification.value_type != Item::STRING_ITEM) && - (value->qualification.value_type != Item::VARBIN_ITEM)) - break; - // Save value in right format for the field type - value->save_in_field(field); - DBUG_PRINT("info", ("Generating NOTLIKE filter: notlike(%d,%s,%d)", - field->get_field_no(), value->get_val(), - value->pack_length())); - if (filter->cmp(NdbScanFilter::COND_NOT_LIKE, - field->get_field_no(), - value->get_val(), - value->pack_length()) == -1) - DBUG_RETURN(1); - cond= cond->next->next->next; - DBUG_RETURN(0); - } - case NDB_ISNULL_FUNC: - if (!field) - break; - DBUG_PRINT("info", ("Generating ISNULL filter")); - if (filter->isnull(field->get_field_no()) == -1) - DBUG_RETURN(1); - cond= cond->next->next; - DBUG_RETURN(0); - case NDB_ISNOTNULL_FUNC: - { - if (!field) - break; - DBUG_PRINT("info", ("Generating ISNOTNULL filter")); - if (filter->isnotnull(field->get_field_no()) == -1) - DBUG_RETURN(1); - cond= cond->next->next; - DBUG_RETURN(0); - } - default: - break; - } - break; - } - default: - break; - } - DBUG_PRINT("info", ("Found illegal condition")); - DBUG_RETURN(1); -} - - -int -ha_ndbcluster_cond::build_scan_filter_group(Ndb_cond* &cond, - NdbScanFilter *filter) -{ - uint level=0; - bool negated= FALSE; - DBUG_ENTER("build_scan_filter_group"); - - do - { - if (!cond) - DBUG_RETURN(1); - switch (cond->ndb_item->type) { - case NDB_FUNCTION: - { - switch (cond->ndb_item->qualification.function_type) { - case NDB_COND_AND_FUNC: - { - level++; - DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NAND":"AND", - level)); - if ((negated) ? filter->begin(NdbScanFilter::NAND) - : filter->begin(NdbScanFilter::AND) == -1) - DBUG_RETURN(1); - negated= FALSE; - cond= cond->next; - break; - } - case NDB_COND_OR_FUNC: - { - level++; - DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NOR":"OR", - level)); - if ((negated) ? filter->begin(NdbScanFilter::NOR) - : filter->begin(NdbScanFilter::OR) == -1) - DBUG_RETURN(1); - negated= FALSE; - cond= cond->next; - break; - } - case NDB_NOT_FUNC: - { - DBUG_PRINT("info", ("Generating negated query")); - cond= cond->next; - negated= TRUE; - break; - } - default: - if (build_scan_filter_predicate(cond, filter, negated)) - DBUG_RETURN(1); - negated= FALSE; - break; - } - break; - } - case NDB_END_COND: - DBUG_PRINT("info", ("End of group %u", level)); - level--; - if (cond) cond= cond->next; - if (filter->end() == -1) - DBUG_RETURN(1); - if (!negated) - break; - // else fall through (NOT END is an illegal condition) - default: - { - DBUG_PRINT("info", ("Illegal scan filter")); - } - } - } while (level > 0 || negated); - - DBUG_RETURN(0); -} - - -int -ha_ndbcluster_cond::build_scan_filter(Ndb_cond * &cond, NdbScanFilter *filter) -{ - bool simple_cond= TRUE; - DBUG_ENTER("build_scan_filter"); - - switch (cond->ndb_item->type) { - case NDB_FUNCTION: - switch (cond->ndb_item->qualification.function_type) { - case NDB_COND_AND_FUNC: - case NDB_COND_OR_FUNC: - simple_cond= FALSE; - break; - default: - break; - } - break; - default: - break; - } - if (simple_cond && filter->begin() == -1) - DBUG_RETURN(1); - if (build_scan_filter_group(cond, filter)) - DBUG_RETURN(1); - if (simple_cond && filter->end() == -1) - DBUG_RETURN(1); - - DBUG_RETURN(0); -} - -int -ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op) -{ - DBUG_ENTER("generate_scan_filter"); - - if (m_cond_stack) - { - NdbScanFilter filter(op, false); // don't abort on too large - - int ret=generate_scan_filter_from_cond(filter); - if (ret != 0) - { - const NdbError& err=filter.getNdbError(); - if (err.code == NdbScanFilter::FilterTooLarge) - { - // err.message has static storage - DBUG_PRINT("info", ("%s", err.message)); - push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, - err.code, err.message); - ret=0; - } - } - if (ret != 0) - DBUG_RETURN(ret); - } - else - { - DBUG_PRINT("info", ("Empty stack")); - } - - DBUG_RETURN(0); -} - - -int -ha_ndbcluster_cond::generate_scan_filter_from_cond(NdbScanFilter& filter) -{ - bool multiple_cond= FALSE; - DBUG_ENTER("generate_scan_filter_from_cond"); - - // Wrap an AND group around multiple conditions - if (m_cond_stack->next) - { - multiple_cond= TRUE; - if (filter.begin() == -1) - DBUG_RETURN(1); - } - for (Ndb_cond_stack *stack= m_cond_stack; - (stack); - stack= stack->next) - { - Ndb_cond *cond= stack->ndb_cond; - - if (build_scan_filter(cond, &filter)) - { - DBUG_PRINT("info", ("build_scan_filter failed")); - DBUG_RETURN(1); - } - } - if (multiple_cond && filter.end() == -1) - DBUG_RETURN(1); - - DBUG_RETURN(0); -} - - -int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op, - const KEY* key_info, - const uchar *key, - uint key_len, - uchar *buf) -{ - KEY_PART_INFO* key_part= key_info->key_part; - KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts; - NdbScanFilter filter(op, true); // abort on too large - int res; - DBUG_ENTER("generate_scan_filter_from_key"); - - filter.begin(NdbScanFilter::AND); - for (; key_part != end; key_part++) - { - Field* field= key_part->field; - uint32 pack_len= field->pack_length(); - const uchar* ptr= key; - DBUG_PRINT("info", ("Filtering value for %s", field->field_name)); - DBUG_DUMP("key", ptr, pack_len); - if (key_part->null_bit) - { - DBUG_PRINT("info", ("Generating ISNULL filter")); - if (filter.isnull(key_part->fieldnr-1) == -1) - DBUG_RETURN(1); - } - else - { - DBUG_PRINT("info", ("Generating EQ filter")); - if (filter.cmp(NdbScanFilter::COND_EQ, - key_part->fieldnr-1, - ptr, - pack_len) == -1) - DBUG_RETURN(1); - } - key += key_part->store_length; - } - // Add any pushed condition - if (m_cond_stack && - (res= generate_scan_filter_from_cond(filter))) - DBUG_RETURN(res); - - if (filter.end() == -1) - DBUG_RETURN(1); - - DBUG_RETURN(0); -} - -#endif diff --git a/sql/ha_ndbcluster_cond.h b/sql/ha_ndbcluster_cond.h deleted file mode 100644 index 952b705bfc2..00000000000 --- a/sql/ha_ndbcluster_cond.h +++ /dev/null @@ -1,500 +0,0 @@ -#ifndef HA_NDBCLUSTER_COND_INCLUDED -#define HA_NDBCLUSTER_COND_INCLUDED - -/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/* - This file defines the data structures used by engine condition pushdown in - the NDB Cluster handler -*/ - -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif - -/* - It is necessary to include set_var.h instead of item.h because there - are dependencies on include order for set_var.h and item.h. This - will be resolved later. -*/ -#include "set_var.h" /* Item, Item_field */ - -typedef enum ndb_item_type { - NDB_VALUE = 0, // Qualified more with Item::Type - NDB_FIELD = 1, // Qualified from table definition - NDB_FUNCTION = 2,// Qualified from Item_func::Functype - NDB_END_COND = 3 // End marker for condition group -} NDB_ITEM_TYPE; - -typedef enum ndb_func_type { - NDB_EQ_FUNC = 0, - NDB_NE_FUNC = 1, - NDB_LT_FUNC = 2, - NDB_LE_FUNC = 3, - NDB_GT_FUNC = 4, - NDB_GE_FUNC = 5, - NDB_ISNULL_FUNC = 6, - NDB_ISNOTNULL_FUNC = 7, - NDB_LIKE_FUNC = 8, - NDB_NOTLIKE_FUNC = 9, - NDB_NOT_FUNC = 10, - NDB_UNKNOWN_FUNC = 11, - NDB_COND_AND_FUNC = 12, - NDB_COND_OR_FUNC = 13, - NDB_UNSUPPORTED_FUNC = 14 -} NDB_FUNC_TYPE; - -typedef union ndb_item_qualification { - Item::Type value_type; - enum_field_types field_type; // Instead of Item::FIELD_ITEM - NDB_FUNC_TYPE function_type; // Instead of Item::FUNC_ITEM -} NDB_ITEM_QUALIFICATION; - -typedef struct ndb_item_field_value { - Field* field; - int column_no; -} NDB_ITEM_FIELD_VALUE; - -typedef union ndb_item_value { - const Item *item; - NDB_ITEM_FIELD_VALUE *field_value; - uint arg_count; -} NDB_ITEM_VALUE; - -struct negated_function_mapping -{ - NDB_FUNC_TYPE pos_fun; - NDB_FUNC_TYPE neg_fun; -}; - -/* - Define what functions can be negated in condition pushdown. - Note, these HAVE to be in the same order as in definition enum -*/ -static const negated_function_mapping neg_map[]= -{ - {NDB_EQ_FUNC, NDB_NE_FUNC}, - {NDB_NE_FUNC, NDB_EQ_FUNC}, - {NDB_LT_FUNC, NDB_GE_FUNC}, - {NDB_LE_FUNC, NDB_GT_FUNC}, - {NDB_GT_FUNC, NDB_LE_FUNC}, - {NDB_GE_FUNC, NDB_LT_FUNC}, - {NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC}, - {NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC}, - {NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC}, - {NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC}, - {NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_UNKNOWN_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC}, - {NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC} -}; - -/* - This class is the construction element for serialization of Item tree - in condition pushdown. - An instance of Ndb_Item represents a constant, table field reference, - unary or binary comparison predicate, and start/end of AND/OR. - Instances of Ndb_Item are stored in a linked list implemented by Ndb_cond - class. - The order of elements produced by Ndb_cond::next corresponds to - breadth-first traversal of the Item (i.e. expression) tree in prefix order. - AND and OR have arbitrary arity, so the end of AND/OR group is marked with - Ndb_item with type == NDB_END_COND. - NOT items represent negated conditions and generate NAND/NOR groups. -*/ -class Ndb_item : public Sql_alloc -{ -public: - Ndb_item(NDB_ITEM_TYPE item_type) : type(item_type) {}; - Ndb_item(NDB_ITEM_TYPE item_type, - NDB_ITEM_QUALIFICATION item_qualification, - const Item *item_value) - : type(item_type), qualification(item_qualification) - { - switch(item_type) { - case(NDB_VALUE): - value.item= item_value; - break; - case(NDB_FIELD): { - NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE(); - Item_field *field_item= (Item_field *) item_value; - field_value->field= field_item->field; - field_value->column_no= -1; // Will be fetched at scan filter generation - value.field_value= field_value; - break; - } - case(NDB_FUNCTION): - value.item= item_value; - value.arg_count= ((Item_func *) item_value)->argument_count(); - break; - case(NDB_END_COND): - break; - } - }; - Ndb_item(Field *field, int column_no) : type(NDB_FIELD) - { - NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE(); - qualification.field_type= field->type(); - field_value->field= field; - field_value->column_no= column_no; - value.field_value= field_value; - }; - Ndb_item(Item_func::Functype func_type, const Item *item_value) - : type(NDB_FUNCTION) - { - qualification.function_type= item_func_to_ndb_func(func_type); - value.item= item_value; - value.arg_count= ((Item_func *) item_value)->argument_count(); - }; - Ndb_item(Item_func::Functype func_type, uint no_args) - : type(NDB_FUNCTION) - { - qualification.function_type= item_func_to_ndb_func(func_type); - value.arg_count= no_args; - }; - ~Ndb_item() - { - if (type == NDB_FIELD) - { - delete value.field_value; - value.field_value= NULL; - } - }; - - uint32 pack_length() - { - switch(type) { - case(NDB_VALUE): - if(qualification.value_type == Item::STRING_ITEM) - return value.item->str_value.length(); - break; - case(NDB_FIELD): - return value.field_value->field->pack_length(); - default: - break; - } - - return 0; - }; - - Field * get_field() { return value.field_value->field; }; - - int get_field_no() { return value.field_value->column_no; }; - - int argument_count() - { - return value.arg_count; - }; - - const char* get_val() - { - switch(type) { - case(NDB_VALUE): - if(qualification.value_type == Item::STRING_ITEM) - return value.item->str_value.ptr(); - break; - case(NDB_FIELD): - return (char*) value.field_value->field->ptr; - default: - break; - } - - return NULL; - }; - - void save_in_field(Ndb_item *field_item) - { - Field *field = field_item->value.field_value->field; - const Item *item= value.item; - - if (item && field) - { - my_bitmap_map *old_map= - dbug_tmp_use_all_columns(field->table, field->table->write_set); - ((Item *)item)->save_in_field(field, FALSE); - dbug_tmp_restore_column_map(field->table->write_set, old_map); - } - }; - - static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun) - { - switch (fun) { - case (Item_func::EQ_FUNC): { return NDB_EQ_FUNC; } - case (Item_func::NE_FUNC): { return NDB_NE_FUNC; } - case (Item_func::LT_FUNC): { return NDB_LT_FUNC; } - case (Item_func::LE_FUNC): { return NDB_LE_FUNC; } - case (Item_func::GT_FUNC): { return NDB_GT_FUNC; } - case (Item_func::GE_FUNC): { return NDB_GE_FUNC; } - case (Item_func::ISNULL_FUNC): { return NDB_ISNULL_FUNC; } - case (Item_func::ISNOTNULL_FUNC): { return NDB_ISNOTNULL_FUNC; } - case (Item_func::LIKE_FUNC): { return NDB_LIKE_FUNC; } - case (Item_func::NOT_FUNC): { return NDB_NOT_FUNC; } - case (Item_func::NEG_FUNC): { return NDB_UNKNOWN_FUNC; } - case (Item_func::UNKNOWN_FUNC): { return NDB_UNKNOWN_FUNC; } - case (Item_func::COND_AND_FUNC): { return NDB_COND_AND_FUNC; } - case (Item_func::COND_OR_FUNC): { return NDB_COND_OR_FUNC; } - default: { return NDB_UNSUPPORTED_FUNC; } - } - }; - - static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun) - { - uint i= (uint) fun; - DBUG_ASSERT(fun == neg_map[i].pos_fun); - return neg_map[i].neg_fun; - }; - - NDB_ITEM_TYPE type; - NDB_ITEM_QUALIFICATION qualification; - private: - NDB_ITEM_VALUE value; -}; - -/* - This class implements a linked list used for storing a - serialization of the Item tree for condition pushdown. - */ -class Ndb_cond : public Sql_alloc -{ - public: - Ndb_cond() : ndb_item(NULL), next(NULL), prev(NULL) {}; - ~Ndb_cond() - { - if (ndb_item) delete ndb_item; - ndb_item= NULL; - /* - First item in the linked list deletes all in a loop - Note - doing it recursively causes stack issues for - big IN clauses - */ - Ndb_cond *n= next; - while (n) - { - Ndb_cond *tmp= n; - n= n->next; - tmp->next= NULL; - delete tmp; - } - next= prev= NULL; - }; - Ndb_item *ndb_item; - Ndb_cond *next; - Ndb_cond *prev; -}; - -/* - This class implements a stack for storing several conditions - for pushdown (represented as serialized Item trees using Ndb_cond). - The current implementation only pushes one condition, but is - prepared for handling several (C1 AND C2 ...) if the logic for - pushing conditions is extended in sql_select. -*/ -class Ndb_cond_stack : public Sql_alloc -{ - public: - Ndb_cond_stack() : ndb_cond(NULL), next(NULL) {}; - ~Ndb_cond_stack() - { - if (ndb_cond) delete ndb_cond; - ndb_cond= NULL; - if (next) delete next; - next= NULL; - }; - Ndb_cond *ndb_cond; - Ndb_cond_stack *next; -}; - -class Ndb_rewrite_context : public Sql_alloc -{ -public: - Ndb_rewrite_context(Item_func *func) - : func_item(func), left_hand_item(NULL), count(0) {}; - ~Ndb_rewrite_context() - { - if (next) delete next; - } - const Item_func *func_item; - const Item *left_hand_item; - uint count; - Ndb_rewrite_context *next; -}; - -/* - This class is used for storing the context when traversing - the Item tree. It stores a reference to the table the condition - is defined on, the serialized representation being generated, - if the condition found is supported, and information what is - expected next in the tree inorder for the condition to be supported. -*/ -class Ndb_cond_traverse_context : public Sql_alloc -{ - public: - Ndb_cond_traverse_context(TABLE *tab, const NdbDictionary::Table *ndb_tab, - Ndb_cond_stack* stack) - : table(tab), ndb_table(ndb_tab), - supported(TRUE), stack_ptr(stack), cond_ptr(NULL), - skip(0), collation(NULL), rewrite_stack(NULL) - { - // Allocate type checking bitmaps - my_bitmap_init(&expect_mask, 0, 512, FALSE); - my_bitmap_init(&expect_field_type_mask, 0, 512, FALSE); - my_bitmap_init(&expect_field_result_mask, 0, 512, FALSE); - - if (stack) - cond_ptr= stack->ndb_cond; - }; - ~Ndb_cond_traverse_context() - { - my_bitmap_free(&expect_mask); - my_bitmap_free(&expect_field_type_mask); - my_bitmap_free(&expect_field_result_mask); - if (rewrite_stack) delete rewrite_stack; - } - void expect(Item::Type type) - { - bitmap_set_bit(&expect_mask, (uint) type); - if (type == Item::FIELD_ITEM) expect_all_field_types(); - }; - void dont_expect(Item::Type type) - { - bitmap_clear_bit(&expect_mask, (uint) type); - }; - bool expecting(Item::Type type) - { - return bitmap_is_set(&expect_mask, (uint) type); - }; - void expect_nothing() - { - bitmap_clear_all(&expect_mask); - }; - bool expecting_nothing() - { - return bitmap_is_clear_all(&expect_mask); - } - void expect_only(Item::Type type) - { - expect_nothing(); - expect(type); - }; - - void expect_field_type(enum_field_types type) - { - bitmap_set_bit(&expect_field_type_mask, (uint) type); - }; - void expect_all_field_types() - { - bitmap_set_all(&expect_field_type_mask); - }; - bool expecting_field_type(enum_field_types type) - { - return bitmap_is_set(&expect_field_type_mask, (uint) type); - }; - void expect_no_field_type() - { - bitmap_clear_all(&expect_field_type_mask); - }; - bool expecting_no_field_type() - { - return bitmap_is_clear_all(&expect_field_type_mask); - } - void expect_only_field_type(enum_field_types result) - { - expect_no_field_type(); - expect_field_type(result); - }; - - void expect_field_result(Item_result result) - { - bitmap_set_bit(&expect_field_result_mask, (uint) result); - }; - bool expecting_field_result(Item_result result) - { - return bitmap_is_set(&expect_field_result_mask, (uint) result); - }; - void expect_no_field_result() - { - bitmap_clear_all(&expect_field_result_mask); - }; - bool expecting_no_field_result() - { - return bitmap_is_clear_all(&expect_field_result_mask); - } - void expect_only_field_result(Item_result result) - { - expect_no_field_result(); - expect_field_result(result); - }; - void expect_collation(CHARSET_INFO* col) - { - collation= col; - }; - bool expecting_collation(CHARSET_INFO* col) - { - bool matching= (!collation) ? true : (collation == col); - collation= NULL; - - return matching; - }; - - TABLE* table; - const NdbDictionary::Table *ndb_table; - bool supported; - Ndb_cond_stack* stack_ptr; - Ndb_cond* cond_ptr; - MY_BITMAP expect_mask; - MY_BITMAP expect_field_type_mask; - MY_BITMAP expect_field_result_mask; - uint skip; - CHARSET_INFO* collation; - Ndb_rewrite_context *rewrite_stack; -}; - -class ha_ndbcluster; - -class ha_ndbcluster_cond -{ -public: - ha_ndbcluster_cond() - : m_cond_stack(NULL) - {} - ~ha_ndbcluster_cond() - { if (m_cond_stack) delete m_cond_stack; } - const COND *cond_push(const COND *cond, - TABLE *table, const NdbDictionary::Table *ndb_table); - void cond_pop(); - void cond_clear(); - int generate_scan_filter(NdbScanOperation* op); - int generate_scan_filter_from_cond(NdbScanFilter& filter); - int generate_scan_filter_from_key(NdbScanOperation* op, - const KEY* key_info, - const uchar *key, - uint key_len, - uchar *buf); -private: - bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond, - TABLE *table, const NdbDictionary::Table *ndb_table); - int build_scan_filter_predicate(Ndb_cond* &cond, - NdbScanFilter* filter, - bool negated= false); - int build_scan_filter_group(Ndb_cond* &cond, - NdbScanFilter* filter); - int build_scan_filter(Ndb_cond* &cond, NdbScanFilter* filter); - - Ndb_cond_stack *m_cond_stack; -}; - -#endif /* HA_NDBCLUSTER_COND_INCLUDED */ diff --git a/sql/ha_ndbcluster_tables.h b/sql/ha_ndbcluster_tables.h deleted file mode 100644 index 4d97ca2c254..00000000000 --- a/sql/ha_ndbcluster_tables.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef HA_NDBCLUSTER_TABLES_INCLUDED -#define HA_NDBCLUSTER_TABLES_INCLUDED - -/* Copyright (c) 2000-2003, 2006, 2007 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA -*/ - -#define NDB_REP_DB "mysql" -#define OLD_NDB_REP_DB "cluster" -#define NDB_REP_TABLE "ndb_binlog_index" -#define NDB_APPLY_TABLE "ndb_apply_status" -#define OLD_NDB_APPLY_TABLE "apply_status" -#define NDB_SCHEMA_TABLE "ndb_schema" -#define OLD_NDB_SCHEMA_TABLE "schema" - -#endif /* HA_NDBCLUSTER_TABLES_INCLUDED */ diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 07a0b0de145..3ea8d4a855d 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -424,7 +424,6 @@ public: will be handled by any underlying handlers implementing transactions. There is only one call to each handler type involved per transaction and these go directly to the handlers supporting transactions - currently InnoDB, BDB and NDB). ------------------------------------------------------------------------- */ virtual THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to, @@ -799,14 +798,14 @@ public: the handler always has a primary key (hidden if not defined) and this index is used for scanning rather than a full table scan in all situations. - (InnoDB, BDB, Federated) + (InnoDB, Federated) HA_REC_NOT_IN_SEQ: This flag is set for handlers that cannot guarantee that the rows are returned accroding to incremental positions (0, 1, 2, 3...). This also means that rnd_next() should return HA_ERR_RECORD_DELETED if it finds a deleted row. - (MyISAM (not fixed length row), BDB, HEAP, NDB, InooDB) + (MyISAM (not fixed length row), HEAP, InnoDB) HA_CAN_GEOMETRY: Can the storage engine handle spatial data. @@ -819,13 +818,13 @@ public: finding a row by key as by position. This flag is used in a very special situation in conjunction with filesort's. For further explanation see intro to init_read_record. - (BDB, HEAP, InnoDB) + (HEAP, InnoDB) HA_NULL_IN_KEY: Is NULL values allowed in indexes. If this is not allowed then it is not possible to use an index on a NULLable field. - (BDB, HEAP, MyISAM, NDB, InnoDB) + (HEAP, MyISAM, InnoDB) HA_DUPLICATE_POS: Tells that we can the position for the conflicting duplicate key @@ -836,12 +835,12 @@ public: HA_CAN_INDEX_BLOBS: Is the storage engine capable of defining an index of a prefix on a BLOB attribute. - (BDB, Federated, MyISAM, InnoDB) + (Federated, MyISAM, InnoDB) HA_AUTO_PART_KEY: Auto increment fields can be part of a multi-part key. For second part auto-increment keys, the auto_incrementing is done in handler.cc - (BDB, Federated, MyISAM, NDB) + (Federated, MyISAM) HA_REQUIRE_PRIMARY_KEY: Can't define a table without primary key (and cannot handle a table @@ -871,7 +870,7 @@ public: HA_NO_PREFIX_CHAR_KEYS: Indexes on prefixes of character fields is not allowed. - (NDB) + (Federated) HA_CAN_FULLTEXT: Does the storage engine support fulltext indexes @@ -896,11 +895,11 @@ public: Should file names always be in lower case (used by engines that map table names to file names. Since partition handler has a local file this flag is set. - (BDB, Federated, MyISAM) + (Federated, MyISAM) HA_CAN_BIT_FIELD: Is the storage engine capable of handling bit fields? - (MyISAM, NDB) + (MyISAM) HA_NEED_READ_RANGE_BUFFER: Is Read Multi-Range supported => need multi read range buffer @@ -912,7 +911,7 @@ public: not handle this call. There are methods in handler.cc that will transfer those calls into index_read and other calls in the index scan module. - (NDB) + (No handler defines it) HA_PRIMARY_KEY_REQUIRED_FOR_POSITION: Does the storage engine need a PK for position? @@ -942,11 +941,11 @@ public: Does the index support read next, this is assumed in the server code and never checked so all indexes must support this. Note that the handler can be used even if it doesn't have any index. - (BDB, HEAP, MyISAM, Federated, NDB, InnoDB) + (HEAP, MyISAM, Federated, InnoDB) HA_READ_PREV: Can the index be used to scan backwards. - (BDB, HEAP, MyISAM, NDB, InnoDB) + (HEAP, MyISAM, InnoDB) HA_READ_ORDER: Can the index deliver its record in index order. Typically true for @@ -960,19 +959,19 @@ public: order all output started by index_read since most engines do this. With read_multi_range calls there is a specific flag setting order or not order so in those cases ordering of index output can be avoided. - (BDB, InnoDB, HEAP, MyISAM, NDB) + (InnoDB, HEAP, MyISAM) HA_READ_RANGE: Specify whether index can handle ranges, typically true for all ordered indexes and not true for hash indexes. Used by optimiser to check if ranges (as key >= 5) can be optimised by index. - (BDB, InnoDB, NDB, MyISAM, HEAP) + (InnoDB, MyISAM, HEAP) HA_ONLY_WHOLE_INDEX: Can't use part key searches. This is typically true for hash indexes and typically not true for ordered indexes. - (Federated, NDB, HEAP) + (Federated, HEAP) HA_KEYREAD_ONLY: Does the storage engine support index-only scans on this index. @@ -982,7 +981,7 @@ public: only have to fill in the columns the key covers. If HA_PRIMARY_KEY_IN_READ_INDEX is set then also the PRIMARY KEY columns must be updated in the row. - (BDB, InnoDB, MyISAM) + (InnoDB, MyISAM) */ virtual ulong index_flags(uint inx, uint part, bool all_parts) const { diff --git a/sql/handler.cc b/sql/handler.cc index 42f5f7ac442..a24f18f4863 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -77,7 +77,6 @@ ulong savepoint_alloc_size= 0; static const LEX_STRING sys_table_aliases[]= { { C_STRING_WITH_LEN("INNOBASE") }, { C_STRING_WITH_LEN("INNODB") }, - { C_STRING_WITH_LEN("NDB") }, { C_STRING_WITH_LEN("NDBCLUSTER") }, { C_STRING_WITH_LEN("HEAP") }, { C_STRING_WITH_LEN("MEMORY") }, { C_STRING_WITH_LEN("MERGE") }, { C_STRING_WITH_LEN("MRG_MYISAM") }, { C_STRING_WITH_LEN("Maria") }, { C_STRING_WITH_LEN("Aria") }, @@ -4411,10 +4410,10 @@ handler::ha_rename_partitions(const char *path) /** Tell the storage engine that it is allowed to "disable transaction" in the - handler. It is a hint that ACID is not required - it is used in NDB for + handler. It is a hint that ACID is not required - it was used in NDB for ALTER TABLE, for example, when data are copied to temporary table. A storage engine may treat this hint any way it likes. NDB for example - starts to commit every now and then automatically. + started to commit every now and then automatically. This hint can be safely ignored. */ int ha_enable_transaction(THD *thd, bool on) @@ -5227,145 +5226,6 @@ int ha_discover_table_names(THD *thd, LEX_STRING *db, MY_DIR *dirp, } -#ifdef HAVE_NDB_BINLOG -/* - TODO: change this into a dynamic struct - List does not work as - 1. binlog_end is called when MEM_ROOT is gone - 2. cannot work with thd MEM_ROOT as memory should be freed -*/ -#define MAX_HTON_LIST_ST 63 -struct hton_list_st -{ - handlerton *hton[MAX_HTON_LIST_ST]; - uint sz; -}; - -struct binlog_func_st -{ - enum_binlog_func fn; - void *arg; -}; - -/** @brief - Listing handlertons first to avoid recursive calls and deadlock -*/ -static my_bool binlog_func_list(THD *thd, plugin_ref plugin, void *arg) -{ - hton_list_st *hton_list= (hton_list_st *)arg; - handlerton *hton= plugin_hton(plugin); - if (hton->state == SHOW_OPTION_YES && hton->binlog_func) - { - uint sz= hton_list->sz; - if (sz == MAX_HTON_LIST_ST-1) - { - /* list full */ - return FALSE; - } - hton_list->hton[sz]= hton; - hton_list->sz= sz+1; - } - return FALSE; -} - -static my_bool binlog_func_foreach(THD *thd, binlog_func_st *bfn) -{ - hton_list_st hton_list; - uint i, sz; - - hton_list.sz= 0; - plugin_foreach(thd, binlog_func_list, - MYSQL_STORAGE_ENGINE_PLUGIN, &hton_list); - - for (i= 0, sz= hton_list.sz; i < sz ; i++) - hton_list.hton[i]->binlog_func(hton_list.hton[i], thd, bfn->fn, bfn->arg); - return FALSE; -} - -int ha_reset_logs(THD *thd) -{ - binlog_func_st bfn= {BFN_RESET_LOGS, 0}; - binlog_func_foreach(thd, &bfn); - return 0; -} - -void ha_reset_slave(THD* thd) -{ - binlog_func_st bfn= {BFN_RESET_SLAVE, 0}; - binlog_func_foreach(thd, &bfn); -} - -void ha_binlog_wait(THD* thd) -{ - binlog_func_st bfn= {BFN_BINLOG_WAIT, 0}; - binlog_func_foreach(thd, &bfn); -} - -int ha_binlog_end(THD* thd) -{ - binlog_func_st bfn= {BFN_BINLOG_END, 0}; - binlog_func_foreach(thd, &bfn); - return 0; -} - -int ha_binlog_index_purge_file(THD *thd, const char *file) -{ - binlog_func_st bfn= {BFN_BINLOG_PURGE_FILE, (void *)file}; - binlog_func_foreach(thd, &bfn); - return 0; -} - -struct binlog_log_query_st -{ - enum_binlog_command binlog_command; - const char *query; - uint query_length; - const char *db; - const char *table_name; -}; - -static my_bool binlog_log_query_handlerton2(THD *thd, - handlerton *hton, - void *args) -{ - struct binlog_log_query_st *b= (struct binlog_log_query_st*)args; - if (hton->state == SHOW_OPTION_YES && hton->binlog_log_query) - hton->binlog_log_query(hton, thd, - b->binlog_command, - b->query, - b->query_length, - b->db, - b->table_name); - return FALSE; -} - -static my_bool binlog_log_query_handlerton(THD *thd, - plugin_ref plugin, - void *args) -{ - return binlog_log_query_handlerton2(thd, plugin_hton(plugin), args); -} - -void ha_binlog_log_query(THD *thd, handlerton *hton, - enum_binlog_command binlog_command, - const char *query, uint query_length, - const char *db, const char *table_name) -{ - struct binlog_log_query_st b; - b.binlog_command= binlog_command; - b.query= query; - b.query_length= query_length; - b.db= db; - b.table_name= table_name; - if (hton == 0) - plugin_foreach(thd, binlog_log_query_handlerton, - MYSQL_STORAGE_ENGINE_PLUGIN, &b); - else - binlog_log_query_handlerton2(thd, hton, &b); -} -#endif - - /** Read first row between two ranges. Store ranges for future calls to read_range_next. diff --git a/sql/handler.h b/sql/handler.h index 69f7481e2c6..c91b26f768f 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -343,9 +343,6 @@ enum enum_alter_inplace_result { /* Note: the following includes binlog and closing 0. - so: innodb + bdb + ndb + binlog + myisam + myisammrg + archive + - example + csv + heap + blackhole + federated + 0 - (yes, the sum is deliberately inaccurate) TODO remove the limit, use dynarrays */ #define MAX_HA 64 @@ -423,7 +420,6 @@ enum legacy_db_type DB_TYPE_MYISAM=9, DB_TYPE_MRG_MYISAM=10, DB_TYPE_INNODB=12, - DB_TYPE_NDBCLUSTER=14, DB_TYPE_EXAMPLE_DB=15, DB_TYPE_ARCHIVE_DB=16, DB_TYPE_CSV_DB=17, @@ -4092,25 +4088,6 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht); #define trans_need_2pc(thd, all) ((total_ha_2pc > 1) && \ !((all ? &thd->transaction.all : &thd->transaction.stmt)->no_2pc)) -#ifdef HAVE_NDB_BINLOG -int ha_reset_logs(THD *thd); -int ha_binlog_index_purge_file(THD *thd, const char *file); -void ha_reset_slave(THD *thd); -void ha_binlog_log_query(THD *thd, handlerton *db_type, - enum_binlog_command binlog_command, - const char *query, uint query_length, - const char *db, const char *table_name); -void ha_binlog_wait(THD *thd); -int ha_binlog_end(THD *thd); -#else -#define ha_reset_logs(a) do {} while (0) -#define ha_binlog_index_purge_file(a,b) do {} while (0) -#define ha_reset_slave(a) do {} while (0) -#define ha_binlog_log_query(a,b,c,d,e,f,g) do {} while (0) -#define ha_binlog_wait(a) do {} while (0) -#define ha_binlog_end(a) do {} while (0) -#endif - const char *get_canonical_filename(handler *file, const char *path, char *tmp_path); bool mysql_xa_recover(THD *thd); diff --git a/sql/lex.h b/sql/lex.h index fe6298a2611..5ca188f99a0 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -394,8 +394,6 @@ static SYMBOL symbols[] = { { "NAMES", SYM(NAMES_SYM)}, { "NATIONAL", SYM(NATIONAL_SYM)}, { "NATURAL", SYM(NATURAL)}, - { "NDB", SYM(NDBCLUSTER_SYM)}, - { "NDBCLUSTER", SYM(NDBCLUSTER_SYM)}, { "NCHAR", SYM(NCHAR_SYM)}, { "NEW", SYM(NEW_SYM)}, { "NEXT", SYM(NEXT_SYM)}, diff --git a/sql/log.cc b/sql/log.cc index e0fd74b5e38..dcdf2bcc74d 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -3889,8 +3889,6 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log, mysql_mutex_unlock(&LOCK_xid_list); } - if (thd) - ha_reset_logs(thd); /* We need to get both locks to be sure that no one is trying to write to the index log file. @@ -4528,13 +4526,6 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space, } error= 0; - if (!need_mutex) - { - /* - This is to avoid triggering an error in NDB. - */ - ha_binlog_index_purge_file(current_thd, log_info.log_file_name); - } DBUG_PRINT("info",("purging %s",log_info.log_file_name)); if (!my_delete(log_info.log_file_name, MYF(0))) diff --git a/sql/log_event.cc b/sql/log_event.cc index e5183b208b2..600a98916a9 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -409,20 +409,6 @@ inline int idempotent_error_code(int err_code) inline int ignored_error_code(int err_code) { -#ifdef HAVE_NDB_BINLOG - /* - The following error codes are hard-coded and will always be ignored. - */ - switch (err_code) - { - case ER_DB_CREATE_EXISTS: - case ER_DB_DROP_EXISTS: - return 1; - default: - /* Nothing to do */ - break; - } -#endif return ((err_code == ER_SLAVE_IGNORED_TABLE) || (use_slave_mask && bitmap_is_set(&slave_error_mask, err_code))); } @@ -4085,36 +4071,8 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, clear_all_errors(thd, const_cast(rli)); current_stmt_is_commit= is_commit(); - if (current_stmt_is_commit && rgi->tables_to_lock) - { - /* - Cleaning-up the last statement context: - the terminal event of the current statement flagged with - STMT_END_F got filtered out in ndb circular replication. - */ - int error; - char llbuff[22]; - if ((error= rows_event_stmt_cleanup(rgi, thd))) - { - const_cast(rli)->report(ERROR_LEVEL, error, - "Error in cleaning up after an event preceding the commit; " - "the group log file/position: %s %s", - const_cast(rli)->group_master_log_name, - llstr(const_cast(rli)->group_master_log_pos, - llbuff)); - } - /* - Executing a part of rli->stmt_done() logics that does not deal - with group position change. The part is redundant now but is - future-change-proof addon, e.g if COMMIT handling will start checking - invariants like IN_STMT flag must be off at committing the transaction. - */ - rgi->inc_event_relay_log_pos(); - } - else - { - rgi->slave_close_thread_tables(thd); - } + DBUG_ASSERT(!current_stmt_is_commit || !rgi->tables_to_lock); + rgi->slave_close_thread_tables(thd); /* Note: We do not need to execute reset_one_shot_variables() if this @@ -11108,8 +11066,7 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability todo: to introduce a property for the event (handler?) which forces applying the event in the replace (idempotent) fashion. */ - if ((slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT) || - (m_table->s->db_type()->db_type == DB_TYPE_NDBCLUSTER)) + if (slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT) { /* We are using REPLACE semantics and not INSERT IGNORE semantics @@ -11122,8 +11079,7 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability /* Pretend we're executing a REPLACE command: this is needed for - InnoDB and NDB Cluster since they are not (properly) checking the - lex->duplicates flag. + InnoDB since it is not (properly) checking the lex->duplicates flag. */ thd->lex->sql_command= SQLCOM_REPLACE; /* @@ -11131,23 +11087,10 @@ Write_rows_log_event::do_before_row_operations(const Slave_reporting_capability */ m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); /* - NDB specific: update from ndb master wrapped as Write_rows - so that the event should be applied to replace slave's row - - Also following is needed in case if we have AFTER DELETE triggers. + The following is needed in case if we have AFTER DELETE triggers. */ m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); - /* - NDB specific: if update from ndb master wrapped as Write_rows - does not find the row it's assumed idempotent binlog applying - is taking place; don't raise the error. - */ m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY); - /* - TODO: the cluster team (Tomas?) says that it's better if the engine knows - how many rows are going to be inserted, then it can allocate needed memory - from the start. - */ } if (slave_run_triggers_for_rbr && !master_had_triggers && m_table->triggers ) m_table->prepare_triggers_for_insert_stmt_or_event(); @@ -11206,8 +11149,7 @@ Write_rows_log_event::do_after_row_operations(const Slave_reporting_capability * } m_table->next_number_field=0; m_table->auto_increment_field_not_null= FALSE; - if ((slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT) || - m_table->s->db_type()->db_type == DB_TYPE_NDBCLUSTER) + if (slave_exec_mode == SLAVE_EXEC_MODE_IDEMPOTENT) { m_table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); m_table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); @@ -11333,8 +11275,7 @@ Rows_log_event::write_row(rpl_group_info *rgi, slave_run_triggers_for_rbr && !master_had_triggers && table->triggers; auto_afree_ptr key(NULL); - prepare_record(table, m_width, - table->file->ht->db_type != DB_TYPE_NDBCLUSTER); + prepare_record(table, m_width, true); /* unpack row into table->record[0] */ if ((error= unpack_current_row(rgi))) @@ -11618,53 +11559,7 @@ uint8 Write_rows_log_event::get_trg_event_map() */ static bool record_compare(TABLE *table) { - /* - Need to set the X bit and the filler bits in both records since - there are engines that do not set it correctly. - - In addition, since MyISAM checks that one hasn't tampered with the - record, it is necessary to restore the old bytes into the record - after doing the comparison. - - TODO[record format ndb]: Remove it once NDB returns correct - records. Check that the other engines also return correct records. - */ - - DBUG_DUMP("record[0]", table->record[0], table->s->reclength); - DBUG_DUMP("record[1]", table->record[1], table->s->reclength); - bool result= FALSE; - uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0}; - - if (table->s->null_bytes > 0) - { - for (int i = 0 ; i < 2 ; ++i) - { - /* - If we have an X bit then we need to take care of it. - */ - if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) - { - saved_x[i]= table->record[i][0]; - table->record[i][0]|= 1U; - } - - /* - If (last_null_bit_pos == 0 && null_bytes > 1), then: - - X bit (if any) + N nullable fields + M Field_bit fields = 8 bits - - Ie, the entire byte is used. - */ - if (table->s->last_null_bit_pos > 0) - { - saved_filler[i]= table->record[i][table->s->null_bytes - 1]; - table->record[i][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); - } - } - } - /** Compare full record only if: - there are no blob fields (otherwise we would also need @@ -11712,24 +11607,6 @@ static bool record_compare(TABLE *table) } record_compare_exit: - /* - Restore the saved bytes. - - TODO[record format ndb]: Remove this code once NDB returns the - correct record format. - */ - if (table->s->null_bytes > 0) - { - for (int i = 0 ; i < 2 ; ++i) - { - if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) - table->record[i][0]= saved_x[i]; - - if (table->s->last_null_bit_pos) - table->record[i][table->s->null_bytes - 1]= saved_filler[i]; - } - } - return result; } @@ -12089,21 +11966,6 @@ int Rows_log_event::find_row(rpl_group_info *rgi) while (record_compare(table)) { - /* - We need to set the null bytes to ensure that the filler bit - are all set when returning. There are storage engines that - just set the necessary bits on the bytes and don't set the - filler bits correctly. - - TODO[record format ndb]: Remove this code once NDB returns the - correct record format. - */ - if (table->s->null_bytes > 0) - { - table->record[0][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); - } - while ((error= table->file->ha_index_next(table->record[0]))) { /* We just skip records that has already been deleted */ diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc index 0cb78686243..eaa882518f5 100644 --- a/sql/log_event_old.cc +++ b/sql/log_event_old.cc @@ -302,50 +302,7 @@ last_uniq_key(TABLE *table, uint keyno) */ static bool record_compare(TABLE *table) { - /* - Need to set the X bit and the filler bits in both records since - there are engines that do not set it correctly. - - In addition, since MyISAM checks that one hasn't tampered with the - record, it is necessary to restore the old bytes into the record - after doing the comparison. - - TODO[record format ndb]: Remove it once NDB returns correct - records. Check that the other engines also return correct records. - */ - bool result= FALSE; - uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0}; - - if (table->s->null_bytes > 0) - { - for (int i = 0 ; i < 2 ; ++i) - { - /* - If we have an X bit then we need to take care of it. - */ - if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) - { - saved_x[i]= table->record[i][0]; - table->record[i][0]|= 1U; - } - - /* - If (last_null_bit_pos == 0 && null_bytes > 1), then: - - X bit (if any) + N nullable fields + M Field_bit fields = 8 bits - - Ie, the entire byte is used. - */ - if (table->s->last_null_bit_pos > 0) - { - saved_filler[i]= table->record[i][table->s->null_bytes - 1]; - table->record[i][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); - } - } - } - if (table->s->blob_fields + table->s->varchar_fields == 0) { result= cmp_record(table,record[1]); @@ -372,24 +329,6 @@ static bool record_compare(TABLE *table) } record_compare_exit: - /* - Restore the saved bytes. - - TODO[record format ndb]: Remove this code once NDB returns the - correct record format. - */ - if (table->s->null_bytes > 0) - { - for (int i = 0 ; i < 2 ; ++i) - { - if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD)) - table->record[i][0]= saved_x[i]; - - if (table->s->last_null_bit_pos > 0) - table->record[i][table->s->null_bytes - 1]= saved_filler[i]; - } - } - return result; } @@ -780,21 +719,6 @@ static int find_and_fetch_row(TABLE *table, uchar *key) { int error; - /* - We need to set the null bytes to ensure that the filler bit - are all set when returning. There are storage engines that - just set the necessary bits on the bytes and don't set the - filler bits correctly. - - TODO[record format ndb]: Remove this code once NDB returns the - correct record format. - */ - if (table->s->null_bytes > 0) - { - table->record[1][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); - } - while ((error= table->file->ha_index_next(table->record[1]))) { /* We just skip records that has already been deleted */ @@ -889,34 +813,13 @@ int Write_rows_log_event_old::do_before_row_operations(TABLE *table) /* Tell the storage engine that we are using REPLACE semantics. */ thd->lex->duplicates= DUP_REPLACE; - /* - Pretend we're executing a REPLACE command: this is needed for - InnoDB and NDB Cluster since they are not (properly) checking the - lex->duplicates flag. - */ thd->lex->sql_command= SQLCOM_REPLACE; /* Do not raise the error flag in case of hitting to an unique attribute */ table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - /* - NDB specific: update from ndb master wrapped as Write_rows - */ - /* - so that the event should be applied to replace slave's row - */ table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); - /* - NDB specific: if update from ndb master wrapped as Write_rows - does not find the row it's assumed idempotent binlog applying - is taking place; don't raise the error. - */ table->file->extra(HA_EXTRA_IGNORE_NO_KEY); - /* - TODO: the cluster team (Tomas?) says that it's better if the engine knows - how many rows are going to be inserted, then it can allocate needed memory - from the start. - */ table->file->ha_start_bulk_insert(0); return error; } @@ -2375,21 +2278,6 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi) while (record_compare(table)) { - /* - We need to set the null bytes to ensure that the filler bit - are all set when returning. There are storage engines that - just set the necessary bits on the bytes and don't set the - filler bits correctly. - - TODO[record format ndb]: Remove this code once NDB returns the - correct record format. - */ - if (table->s->null_bytes > 0) - { - table->record[0][table->s->null_bytes - 1]|= - 256U - (1U << table->s->last_null_bit_pos); - } - while ((error= table->file->ha_index_next(table->record[0]))) { /* We just skip records that has already been deleted */ @@ -2529,34 +2417,13 @@ Write_rows_log_event_old::do_before_row_operations(const Slave_reporting_capabil /* Tell the storage engine that we are using REPLACE semantics. */ thd->lex->duplicates= DUP_REPLACE; - /* - Pretend we're executing a REPLACE command: this is needed for - InnoDB and NDB Cluster since they are not (properly) checking the - lex->duplicates flag. - */ thd->lex->sql_command= SQLCOM_REPLACE; /* Do not raise the error flag in case of hitting to an unique attribute */ m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - /* - NDB specific: update from ndb master wrapped as Write_rows - */ - /* - so that the event should be applied to replace slave's row - */ m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); - /* - NDB specific: if update from ndb master wrapped as Write_rows - does not find the row it's assumed idempotent binlog applying - is taking place; don't raise the error. - */ m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY); - /* - TODO: the cluster team (Tomas?) says that it's better if the engine knows - how many rows are going to be inserted, then it can allocate needed memory - from the start. - */ m_table->file->ha_start_bulk_insert(0); return error; } diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index b63db9ecea2..bb49cebb921 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -199,12 +199,6 @@ ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows, One must have called index_init() before calling this function. Several multi_range_read_init() calls may be made in course of one query. - Until WL#2623 is done (see its text, section 3.2), the following will - also hold: - The caller will guarantee that if "seq->init == mrr_ranges_array_init" - then seq_init_param is an array of n_ranges KEY_MULTI_RANGE structures. - This property will only be used by NDB handler until WL#2623 is done. - Buffer memory management is done according to the following scenario: The caller allocates the buffer and provides it to the callee by filling the members of HANDLER_BUFFER structure. diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 38f91967312..94503d507fe 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2061,12 +2061,6 @@ void clean_up(bool print_message) stop_handle_manager(); release_ddl_log(); - /* - make sure that handlers finish up - what they have that is dependent on the binlog - */ - ha_binlog_end(current_thd); - logger.cleanup_base(); injector::free_instance(); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 866e9a854b9..0dac6e56c1b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -11706,14 +11706,6 @@ int QUICK_RANGE_SELECT::reset() mrr_buf_desc->buffer= mrange_buff; mrr_buf_desc->buffer_end= mrange_buff + buf_size; mrr_buf_desc->end_of_used_area= mrange_buff; -#ifdef HAVE_valgrind - /* - We need this until ndb will use the buffer efficiently - (Now ndb stores complete row in here, instead of only the used fields - which gives us valgrind warnings in compare_record[]) - */ - bzero((char*) mrange_buff, buf_size); -#endif } if (!mrr_buf_desc) diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 98e796879ad..a753c5052bd 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -226,11 +226,6 @@ bool partition_info::set_partition_bitmaps(TABLE_LIST *table_list) { if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) { - /* - Don't allow PARTITION () clause on a NDB tables yet. - TODO: Add partition name handling to NDB/partition_info. - which is currently ha_partition specific. - */ my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0)); DBUG_RETURN(true); } @@ -286,7 +281,7 @@ bool partition_info::can_prune_insert(THD* thd, DBUG_ENTER("partition_info::can_prune_insert"); if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) - DBUG_RETURN(false); /* Should not insert prune NDB tables */ + DBUG_RETURN(false); /* If under LOCK TABLES pruning will skip start_stmt instead of external_lock @@ -1110,14 +1105,12 @@ static bool check_engine_condition(partition_element *p_elem, Current check verifies only that all handlers are the same. Later this check will be more sophisticated. (specified partition handler ) specified table handler - (NDB, NDB) NDB OK (MYISAM, MYISAM) - OK (MYISAM, -) - NOT OK (MYISAM, -) MYISAM OK (- , MYISAM) - NOT OK (- , -) MYISAM OK (-,-) - OK - (NDB, MYISAM) * NOT OK */ bool partition_info::check_engine_mix(handlerton *engine_type, diff --git a/sql/rpl_constants.h b/sql/rpl_constants.h index f83588ce321..cc6fb8145ad 100644 --- a/sql/rpl_constants.h +++ b/sql/rpl_constants.h @@ -36,8 +36,6 @@ enum Incident { Enumeration of the reserved formats of Binlog extra row information */ enum ExtraRowInfoFormat { - /** Ndb format */ - ERIF_NDB = 0, /** Reserved formats 0 -> 63 inclusive */ ERIF_LASTRESERVED = 63, diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 7042669a363..233bb835bd8 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -5920,9 +5920,8 @@ ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT eng "Cannot change the binary logging format inside a stored function or trigger" ger "Das Binärlog-Format kann innerhalb einer gespeicherten Funktion oder eines Triggers nicht geändert werden" -ER_NDB_CANT_SWITCH_BINLOG_FORMAT - eng "The NDB cluster engine does not support changing the binlog format on the fly yet" - ger "Die Speicher-Engine NDB Cluster unterstützt das Ändern des Binärlog-Formats zur Laufzeit noch nicht" +ER_UNUSED_13 + eng "You should never see it" ER_PARTITION_NO_TEMPORARY eng "Cannot create temporary table with partitions" ger "Anlegen temporärer Tabellen mit Partitionen nicht möglich" @@ -6139,9 +6138,8 @@ ER_SLAVE_HEARTBEAT_FAILURE ger "Unerwartete Daten vom Heartbeat des Masters: %s" ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds)." -ER_NDB_REPLICATION_SCHEMA_ERROR - eng "Bad schema for mysql.ndb_replication table. Message: %-.64s" - ger "Fehlerhaftes Schema für mysql.ndb_replication table. Meldung: %-.64s" +ER_UNUSED_14 + eng "You should never see it" ER_CONFLICT_FN_PARSE_ERROR eng "Error in parsing conflict function. Message: %-.64s" ger "Fehler beim Parsen einer Konflikt-Funktion. Meldung: %-.64s" diff --git a/sql/slave.cc b/sql/slave.cc index 3241f3fc117..ca29410cd1d 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3091,9 +3091,7 @@ static ulong read_event(MYSQL* mysql, Master_info *mi, bool* suppress_warnings) /* Check if the current error is of temporary nature of not. Some errors are temporary in nature, such as - ER_LOCK_DEADLOCK and ER_LOCK_WAIT_TIMEOUT. Ndb also signals - that the error is temporary by pushing a warning with the error code - ER_GET_TEMPORARY_ERRMSG, if the originating error is temporary. + ER_LOCK_DEADLOCK and ER_LOCK_WAIT_TIMEOUT. */ static int has_temporary_error(THD *thd) { @@ -3123,25 +3121,6 @@ static int has_temporary_error(THD *thd) thd->get_stmt_da()->sql_errno() == ER_LOCK_WAIT_TIMEOUT) DBUG_RETURN(1); -#ifdef HAVE_NDB_BINLOG - /* - currently temporary error set in ndbcluster - */ - List_iterator_fast it(thd->warning_info->warn_list()); - Sql_condition *err; - while ((err= it++)) - { - DBUG_PRINT("info", ("has condition %d %s", err->get_sql_errno(), - err->get_message_text())); - switch (err->get_sql_errno()) - { - case ER_GET_TEMPORARY_ERRMSG: - DBUG_RETURN(1); - default: - break; - } - } -#endif DBUG_RETURN(0); } diff --git a/sql/sql_class.h b/sql/sql_class.h index dfa6da5b7ce..d7bbfc3799d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1369,10 +1369,9 @@ enum enum_thread_type SYSTEM_THREAD_DELAYED_INSERT= 1, SYSTEM_THREAD_SLAVE_IO= 2, SYSTEM_THREAD_SLAVE_SQL= 4, - SYSTEM_THREAD_NDBCLUSTER_BINLOG= 8, - SYSTEM_THREAD_EVENT_SCHEDULER= 16, - SYSTEM_THREAD_EVENT_WORKER= 32, - SYSTEM_THREAD_BINLOG_BACKGROUND= 64 + SYSTEM_THREAD_EVENT_SCHEDULER= 8, + SYSTEM_THREAD_EVENT_WORKER= 16, + SYSTEM_THREAD_BINLOG_BACKGROUND= 32 }; inline char const * @@ -1385,7 +1384,6 @@ show_system_thread(enum_thread_type thread) RETURN_NAME_AS_STRING(SYSTEM_THREAD_DELAYED_INSERT); RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_IO); RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_SQL); - RETURN_NAME_AS_STRING(SYSTEM_THREAD_NDBCLUSTER_BINLOG); RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_SCHEDULER); RETURN_NAME_AS_STRING(SYSTEM_THREAD_EVENT_WORKER); default: diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 063b90a6780..a930cb0f12d 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -656,10 +656,6 @@ not_silent: query_length= thd->query_length(); DBUG_ASSERT(query); - ha_binlog_log_query(thd, 0, LOGCOM_CREATE_DB, - query, query_length, - db, ""); - if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, TRUE); @@ -735,10 +731,6 @@ bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) thd->variables.collation_database= thd->db_charset; } - ha_binlog_log_query(thd, 0, LOGCOM_ALTER_DB, - thd->query(), thd->query_length(), - db, ""); - if (mysql_bin_log.is_open()) { int errcode= query_error_code(thd, TRUE); @@ -883,11 +875,6 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) should be dropped while the database is being cleaned, but in the event that a change in the code to remove other objects is made, these drops should still not be logged. - - Notice that the binary log have to be enabled over the call to - ha_drop_database(), since NDB otherwise detects the binary log - as disabled and will not log the drop database statement on any - other connected server. */ ha_drop_database(path); diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h index b83ccf4b032..fa00e309623 100644 --- a/sql/sql_join_cache.h +++ b/sql/sql_join_cache.h @@ -84,7 +84,7 @@ class EXPLAIN_BKA_TYPE; For the third algorithm the accumulation of records allows to optimize fetching rows of the second operand from disk for some engines (MyISAM, InnoDB), or to minimize the number of round-trips between the Server and - the engine nodes (NDB Cluster). + the engine nodes. */ class JOIN_CACHE :public Sql_alloc diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 1ce952b9030..9bc8147c75f 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -6887,7 +6887,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, 1) Write the new frm, pack it and then delete it 2) Perform the change within the handler */ - if (mysql_write_frm(lpt, WFRM_WRITE_SHADOW | WFRM_PACK_FRM) || + if (mysql_write_frm(lpt, WFRM_WRITE_SHADOW) || mysql_change_partitions(lpt)) { goto err; diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc index 9db8b1c136a..8755ec47c54 100644 --- a/sql/sql_partition_admin.cc +++ b/sql/sql_partition_admin.cc @@ -129,7 +129,7 @@ static bool check_exchange_partition(TABLE *table, TABLE *part_table) { /* Only allowed on partitioned tables throught the generic ha_partition - handler, i.e not yet for native partitioning (NDB). + handler, i.e not yet for native partitioning. */ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); DBUG_RETURN(TRUE); @@ -784,11 +784,6 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd) if (open_tables(thd, &first_table, &table_counter, 0)) DBUG_RETURN(true); - /* - TODO: Add support for TRUNCATE PARTITION for NDB and other - engines supporting native partitioning. - */ - if (!first_table->table || first_table->view || first_table->table->s->db_type() != partition_hton) { diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index f5e0fd102ed..9ae3d792744 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -175,7 +175,6 @@ static struct { "performance_schema", PLUGIN_FORCE }, /* we disable few other plugins by default */ - { "ndbcluster", PLUGIN_OFF }, { "feedback", PLUGIN_OFF } }; @@ -1137,7 +1136,7 @@ static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check) historical ndb behavior caused MySQL plugins to specify status var names in full, with the plugin name prefix. this was never fixed in MySQL. - MariaDB fixes that but support MySQL style too. + MariaDB fixes that but supports MySQL style too. */ SHOW_VAR *show_vars= plugin->plugin->status_vars; SHOW_VAR tmp_array[2]= { @@ -1169,10 +1168,6 @@ static void plugin_deinitialize(struct st_plugin_int *plugin, bool ref_check) } plugin->state= PLUGIN_IS_UNINITIALIZED; - /* - We do the check here because NDB has a worker THD which doesn't - exit until NDB is shut down. - */ if (ref_check && plugin->ref_count) sql_print_error("Plugin '%s' has ref_count=%d after deinitialization.", plugin->name.str, plugin->ref_count); @@ -1379,7 +1374,7 @@ static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin, historical ndb behavior caused MySQL plugins to specify status var names in full, with the plugin name prefix. this was never fixed in MySQL. - MariaDB fixes that, but supports MySQL style too. + MariaDB fixes that but supports MySQL style too. */ SHOW_VAR *show_vars= plugin->plugin->status_vars; SHOW_VAR tmp_array[2]= { @@ -3590,12 +3585,6 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp, options+= 2; } - if (!my_strcasecmp(&my_charset_latin1, plugin_name_ptr, "NDBCLUSTER")) - { - plugin_name_ptr= const_cast("ndb"); // Use legacy "ndb" prefix - plugin_name_len= 3; - } - /* Two passes as the 2nd pass will take pointer addresses for use by my_getopt and register_var() in the first pass uses realloc @@ -3927,10 +3916,6 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp, my_afree(tmp_backup); } - /* - We adjust the default value to account for the hardcoded exceptions - we have set for the federated and ndbcluster storage engines. - */ if (tmp->load_option != PLUGIN_FORCE && tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT) opts[0].def_value= opts[1].def_value= plugin_load_option; diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index d8db5c55c3b..e91b3b0a2ed 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -3055,8 +3055,6 @@ int reset_slave(THD *thd, Master_info* mi) DBUG_RETURN(ER_SLAVE_MUST_STOP); } - ha_reset_slave(thd); - // delete relay logs, clear relay log coordinates if ((error= purge_relay_logs(&mi->rli, thd, 1 /* just reset */, @@ -3619,13 +3617,6 @@ bool mysql_show_binlog_events(THD* thd) /* select wich binary log to use: binlog or relay */ if ( thd->lex->sql_command == SQLCOM_SHOW_BINLOG_EVENTS ) { - /* - Wait for handlers to insert any pending information - into the binlog. For e.g. ndb which updates the binlog asynchronously - this is needed so that the uses sees all its own commands in the binlog - */ - ha_binlog_wait(thd); - binary_log= &mysql_bin_log; } else /* showing relay log contents */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index ab09dbaef12..436a2f6ce5d 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -23634,15 +23634,6 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta, table_map prefix_tab pushed_cond) { eta->push_extra(ET_USING_WHERE_WITH_PUSHED_CONDITION); - /* - psergey-todo: what to do? This was useful with NDB only. - - if (explain_flags & DESCRIBE_EXTENDED) - { - extra.append(STRING_WITH_LEN(": ")); - ((COND *)pushed_cond)->print(&extra, QT_ORDINARY); - } - */ } else eta->push_extra(ET_USING_WHERE); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 19914f31c97..68c25438f0c 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1853,27 +1853,6 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) goto end; } } - if (flags & WFRM_PACK_FRM) - { - /* - We need to pack the frm file and after packing it we delete the - frm file to ensure it doesn't get used. This is only used for - handlers that have the main version of the frm file stored in the - handler. - */ - const uchar *data; - size_t length; - if (readfrm(shadow_path, &data, &length) || - packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len)) - { - my_free(const_cast(data)); - my_free(lpt->pack_frm_data); - mem_alloc_error(length); - error= 1; - goto end; - } - error= mysql_file_delete(key_file_frm, shadow_frm_name, MYF(MY_WME)); - } if (flags & WFRM_INSTALL_SHADOW) { #ifdef WITH_PARTITION_STORAGE_ENGINE diff --git a/sql/sql_table.h b/sql/sql_table.h index 444626e0363..6a7fddb96ab 100644 --- a/sql/sql_table.h +++ b/sql/sql_table.h @@ -122,8 +122,7 @@ enum enum_explain_filename_mode #define WFRM_WRITE_SHADOW 1 #define WFRM_INSTALL_SHADOW 2 -#define WFRM_PACK_FRM 4 -#define WFRM_KEEP_SHARE 8 +#define WFRM_KEEP_SHARE 4 /* Flags for conversion functions. */ static const uint FN_FROM_IS_TMP= 1 << 0; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index edc8e47a2b5..e7fcdfbe596 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1297,7 +1297,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token NATURAL /* SQL-2003-R */ %token NCHAR_STRING %token NCHAR_SYM /* SQL-2003-R */ -%token NDBCLUSTER_SYM %token NE /* OPERATOR */ %token NEG %token NEW_SYM /* SQL-2003-R */ @@ -14261,7 +14260,6 @@ keyword_sp: | NAMES_SYM {} | NATIONAL_SYM {} | NCHAR_SYM {} - | NDBCLUSTER_SYM {} | NEXT_SYM {} | NEW_SYM {} | NO_WAIT_SYM {} diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index ca504acf64b..8d31ed88f7f 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -2885,7 +2885,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_default_stopword = /* general descriptive text (for SHOW PLUGINS) */ /* const char* */ - STRUCT_FLD(descr, "Default stopword list for InnDB Full Text Search"), + STRUCT_FLD(descr, "Default stopword list for InnoDB Full Text Search"), /* the plugin license (PLUGIN_LICENSE_XXX) */ /* int */ diff --git a/storage/ndb/MAINTAINERS b/storage/ndb/MAINTAINERS deleted file mode 100644 index 38f504cd1a1..00000000000 --- a/storage/ndb/MAINTAINERS +++ /dev/null @@ -1,165 +0,0 @@ -Copyright (c) 2007 MySQL AB - -MySQL Cluster MAINTAINERS -------------------------- - -This is a list of knowledgable people in parts of the NDB code. - -In changing that area of code, you probably want to talk to the -people who know a lot about it to look over the patch. - -When sending patches and queries, always CC the mailing list. - -If no list specified, assume internals@lists.mysql.com - -P: Person -M: Mail -L: Mailing list -W: Web page with status/info -C: Comment -SRC: Source directory (relative to this directory) -T: SCM tree type and location -S: Status, one of: - - Supported: Somebody is paid to maintain this. - Maintained: Not their primary job, but maintained. - Orphan: No current obvious maintainer. - Obsolete: Replaced by something else. - -------------------------------------------------------------- - -Binlog Injector -SRC: ha_ndbcluster_binlog.cc -C: see also row based replication -P: Stewart Smith -M: stewart@mysql.com -C: Original author -P: Tomas Ulin -M: tomas@mysql.com -C: Lots of updates -P: Martin Skold -M: martin@mysql.com -C: Metadata ops -S: Supported - -BLOBs -SRC: ha_ndbcluster.cc -SRC: src/ndbapi/NdbBlob* -P: Pekka -M: pekka@mysql.com -S: Supported - -cpcd/cpcc -SRC: src/cw/cpcd -SRC: src/cw/cpcc -C: Maintained only as part of autotest -P: Jonas Orland -M: jonas@mysql.com -S: Maintained - -cpcc-win32 -SRC: src/cw/cpcc-win32 -S: Obsolete - -Handler -SRC: ha_ndbcluster.cc -P: Martin Skold -M: martin@mysql.com -S: Supported - -Management Server -SRC: src/mgmsrv/ -P: Stewart Smith -M: stewart@mysql.com -S: Supported - -Management Client -SRC: src/mgmclient/ -P: Stewart Smith -M: stewart@mysql.com -S: Supported - -Management API -SRC: src/mgmapi/ -P: Stewart Smith -M: stewart@mysql.com -S: Supported - -NDB API Examples -SRC: ndbapi-examples/ -P: Tomas Ulin -M: tomas@mysql.com -C: Originally by Lars -P: Lars Thalmann -M: lars@mysql.com -S: Maintained - -NDB API NdbRecord Examples -SRC: ndbapi-examples/ -P: Kristian Nielsen -M: knielsen@mysql.com -S: Maintained - -tsman -C: Disk Data (Table Space MANager) -SRC: src/kernel/blocks/tsman.cpp -SRC: src/kernel/blocks/tsman.hpp -P: Jonas Oreland -M: jonas@mysql.com -S: Supported - -lgman -C: Disk Data (LoG MANager) -SRC: src/kernel/blocks/lgman.cpp -SRC: src/kernel/blocks/lgman.hpp -P: Jonas Oreland -M: jonas@mysql.com -S: Supported - -pgman -C: Disk Data (PaGe MANager) -SRC: src/kernel/blocks/lgman.cpp -SRC: src/kernel/blocks/lgman.hpp -P: Jonas Oreland -M: jonas@mysql.com -S: Supported - -SUMA -C: SUbscription MAnager -C: Used for replication -SRC: src/kernel/blocks/suma/ -P: Tomas Ulin -P: tomas@mysql.com -P: Jonas Oreland -P: jonas@mysql.com -S: Supported - -TRIX -C: TRiggers and IndeXs (but only online Index build) -SRC: src/kernel/blocks/trix -P: Martin Skold -P: mskold@mysql.com -S: Supported - -QMGR -C: Cluster (with a Q) ManaGeR -C: Heartbeats etc -SRC: src/kernel/blocks/qmgr -S: Supported - -NDBFS -C: NDB FileSystem -C: File System abstraction -SRC: src/kernel/blocks/ndbfs -S: Supported - -TRIX -C: TRiggers and IndeXs (but only online Index build) -SRC: src/kernel/blocks/trix -S: Supported - -TRIX -C: TRiggers and IndeXs (but only online Index build) -SRC: src/kernel/blocks/trix -S: Supported - diff --git a/storage/ndb/Makefile.am b/storage/ndb/Makefile.am deleted file mode 100644 index 4c97046f353..00000000000 --- a/storage/ndb/Makefile.am +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SUBDIRS = src tools . include @ndb_opt_subdirs@ -DIST_SUBDIRS = src tools include test docs -EXTRA_DIST = config ndbapi-examples plug.in -DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in - -include $(top_srcdir)/storage/ndb/config/common.mk.am - -dist-hook: - -rm -rf `find $(distdir) -type d -name SCCS` - -rm -rf `find $(distdir)/ndbapi-examples -name '*.o'` - list='$(SUBDIRS)'; for subdir in $$list; do \ - if test "$$subdir" != "." -a "$$subdir" != "include"; then \ - files="`find $$subdir -name '*\.h'` `find $$subdir -name '*\.hpp'`"; \ - for f in $$files; do \ - if test -d "$(distdir)/`dirname $$f`" -a ! -e "$(distdir)/$$f"; then \ - cp $$f $(distdir)/$$f; \ - fi; \ - done; \ - fi; \ - done - -windoze: - for i in `find . -name 'Makefile.am' -print`; \ - do make -C `dirname $$i` windoze-dsp; done - -windoze-dsp: - -all-windoze-dsp: windoze - find . -name '*.dsp' | grep -v SCCS | xargs unix2dos - $(top_srcdir)/storage/ndb/config/make-win-dsw.sh | unix2dos > ndb.dsw - tar cvfz ndb-win-dsp.tar.gz ndb.dsw `find . -name '*.dsp' | grep -v SCCS` diff --git a/storage/ndb/bin/.empty b/storage/ndb/bin/.empty deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/storage/ndb/bin/check-regression.sh b/storage/ndb/bin/check-regression.sh deleted file mode 100755 index 93a31ccb39c..00000000000 --- a/storage/ndb/bin/check-regression.sh +++ /dev/null @@ -1,180 +0,0 @@ -#!/bin/sh -# NAME -# check-regression.sh -# -# SYNOPSIS -# check-regression.sh -# -# DESCRIPTION -# -# This scrip must be run before any major cvs checkins are done. -# It will perform a number of regression tests to check that -# nothing is broken. -# -# OPTIONS -# -# EXAMPLES -# -# -# ENVIRONMENT -# NDB_PROJ_HOME Home dir for ndb -# verbose verbose printouts -# -# FILES -# $NDB_PROJ_HOME/lib/funcs.sh general shell script functions -# -# -# SEE ALSO -# -# DIAGNOSTICTS -# -# -# VERSION -# 1.0 -# -# AUTHOR -# -# - -. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff - -synopsis="check-regression.sh" -progname=`basename $0` - -numOfTestsOK=0 -numOfTestsFailed=0 - -LOG=check-regression.`date '+%Y-%m-%d'` - -executeTest() -{ - eval "$@" | tee -a $LOG - - if [ $? -eq 0 ] - then - echo "SUCCESS: $@" - numOfTestsOK=`expr $numOfTestsOK + 1` - else - echo "FAILED: $@" - numOfTestsFailed=`expr $numOfTestsFailed + 1` - fi -} - -# -# INFO -# -trace "Starting: `date`" -trace "NDB_PROJ_HOME = $NDB_PROJ_HOME" -trace "NDB_TOP = $NDB_TOP" - -# -# THE TESTS TO EXECUTE -# - -# Testsuite: testDataBuffers -# Number of tests: 1 -executeTest 'drop_tab ' TB00 TB01 TB02 TB03 TB04 TB05 TB06 TB07 TB08 TB09 TB10 TB11 TB12 TB13 TB14 TB15 -executeTest 'testDataBuffers' -executeTest 'drop_tab ' TB00 TB01 TB02 TB03 TB04 TB05 TB06 TB07 TB08 TB09 TB10 TB11 TB12 TB13 TB14 TB15 - -TABLES="T9 T13" - -# Testsuite: testBasic -# Number of tests: 16 -executeTest 'testBasic -n PkInsert' $TABLES -executeTest 'testBasic -n PkRead' $TABLES -executeTest 'testBasic -n PkUpdate' $TABLES -executeTest 'testBasic -n PkDelete' $TABLES -#executeTest 'testBasic -n UpdateAndRead' -#executeTest 'testBasic -n PkReadAndLocker' -#executeTest 'testBasic -n PkReadAndLocker2' -#executeTest 'testBasic -n PkReadUpdateAndLocker' -#executeTest 'testBasic -n ReadWithLocksAndInserts' -#executeTest 'testBasic -n ReadConsistency' -#executeTest 'testBasic -n PkInsertTwice' -#executeTest 'testBasic -n Fill' -#executeTest 'testBasic -n FillTwice' -#executeTest 'testBasic -n NoCommitSleep' -#executeTest 'testBasic -n NoCommit626' -#executeTest 'testBasic -n NoCommitAndClose' - -# Testsuite: testBasicAsynch -# Number of tests: 4 -executeTest 'testBasicAsynch -n PkInsertAsynch' $TABLES -executeTest 'testBasicAsynch -n PkReadAsynch' $TABLES -executeTest 'testBasicAsynch -n PkUpdateAsynch' $TABLES -executeTest 'testBasicAsynch -n PkDeleteAsynch' $TABLES - -# Testsuite: testDict -# Number of tests: 6 -#executeTest 'testDict -n CreateAndDrop' -#executeTest 'testDict -n CreateAndDropWithData' -#executeTest 'testDict -n CreateAndDropDuring' -#executeTest 'testDict -n CreateInvalidTables' -#executeTest 'testDict -n CreateTableWhenDbIsFull' -#executeTest 'testDict -n CreateMaxTables' - -# Testsuite: testScan -# Number of tests: 34 -#executeTest 'testScan -n ScanRead' -#executeTest 'testScan -n ScanRead16' -executeTest 'testScan -n ScanRead240' $TABLES -executeTest 'testScan -n ScanUpdate' $TABLES -executeTest 'testScan -n ScanUpdate2' $TABLES -executeTest 'testScan -n ScanDelete' $TABLES -executeTest 'testScan -n ScanDelete2' $TABLES -#executeTest 'testScan -n ScanUpdateAndScanRead' -#executeTest 'testScan -n ScanReadAndLocker' -#executeTest 'testScan -n ScanReadAndPkRead' -#executeTest 'testScan -n ScanRead488' -#executeTest 'testScan -n ScanWithLocksAndInserts' -#executeTest 'testScan -n ScanReadAbort' -#executeTest 'testScan -n ScanReadAbort15' -#executeTest 'testScan -n ScanReadAbort16' -#executeTest 'testScan -n ScanUpdateAbort16' -#executeTest 'testScan -n ScanReadAbort240' -#executeTest 'testScan -n ScanReadRestart' -#executeTest 'testScan -n ScanReadRestart16' -#executeTest 'testScan -n ScanReadRestart32' -#executeTest 'testScan -n ScanUpdateRestart' -#executeTest 'testScan -n ScanUpdateRestart16' -#executeTest 'testScan -n CheckGetValue' -#executeTest 'testScan -n CloseWithoutStop' -#executeTest 'testScan -n NextScanWhenNoMore' -#executeTest 'testScan -n ExecuteScanWithoutOpenScan' -#executeTest 'testScan -n OnlyOpenScanOnce' -#executeTest 'testScan -n OnlyOneOpInScanTrans' -#executeTest 'testScan -n OnlyOneOpBeforeOpenScan' -#executeTest 'testScan -n OnlyOneScanPerTrans' -#executeTest 'testScan -n NoCloseTransaction' -#executeTest 'testScan -n CheckInactivityTimeOut' -#executeTest 'testScan -n CheckInactivityBeforeClose' -#executeTest 'testScan -n CheckAfterTerror' - -# Testsuite: testScanInterpreter -# Number of tests: 1 -#executeTest 'testScanInterpreter -n ScanLessThan' - -TABLES="T6 T13" - -# Testsuite: testSystemRestart -# Number of tests: 4 -executeTest 'testSystemRestart -l 1 -n SR1' $TABLES -executeTest 'testSystemRestart -l 1 -n SR2' $TABLES -#executeTest 'testSystemRestart -n SR_UNDO' -#executeTest 'testSystemRestart -n SR_FULLDB' - -# TESTS FINISHED -trace "Finished: `date`" - -# -# TEST SUMMARY -# -if [ $numOfTestsFailed -eq 0 ] -then - echo "-- REGRESSION TEST SUCCESSFUL --" -else - echo "-- REGRESSION TEST FAILED!! --" -fi -echo "Number of successful tests: $numOfTestsOK" -echo "Number of failed tests : $numOfTestsFailed" diff --git a/storage/ndb/bin/makeTestPrograms_html.sh b/storage/ndb/bin/makeTestPrograms_html.sh deleted file mode 100755 index ac31c8a6267..00000000000 --- a/storage/ndb/bin/makeTestPrograms_html.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh -rm $1 -touch $1 -echo "" >> $1 -echo "" >> $1 -echo "" >> $1 -echo "" >> $1 -testBasic --print_html >> $1 -testBackup --print_html >> $1 -testBasicAsynch --print_html >> $1 -testDict --print_html >> $1 -testBank --print_html >> $1 -testIndex --print_html >> $1 -testNdbApi --print_html >> $1 -testNodeRestart --print_html >> $1 -testOperations --print_html >> $1 -testRestartGci --print_html >> $1 -testScan --print_html >> $1 -testScanInterpreter --print_html >> $1 -testSystemRestart --print_html >> $1 -echo "
Name Description
" >> $1 - diff --git a/storage/ndb/config/common.mk.am b/storage/ndb/config/common.mk.am deleted file mode 100644 index 40caeabd3b9..00000000000 --- a/storage/ndb/config/common.mk.am +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2004, 2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -ndbbindir = "$(libexecdir)" -ndbtoolsdir = "$(bindir)" -ndbtestdir = "$(bindir)" -ndblibdir = "$(pkglibdir)" -ndbincludedir = "$(pkgincludedir)/storage/ndb" -ndbapiincludedir = "$(pkgincludedir)/storage/ndb/ndbapi" -mgmapiincludedir = "$(pkgincludedir)/storage/ndb/mgmapi" - -INCLUDES = $(INCLUDES_LOC) -LDADD = $(LDADD_LOC) -DEFS = @DEFS@ @NDB_DEFS@ $(DEFS_LOC) $(NDB_EXTRA_FLAGS) -NDB_CXXFLAGS=@ndb_cxxflags_fix@ $(NDB_CXXFLAGS_LOC) -NDB_AM_CXXFLAGS:= $(AM_CXXFLAGS) -AM_CXXFLAGS=$(NDB_AM_CXXFLAGS) $(NDB_CXXFLAGS) diff --git a/storage/ndb/config/make-win-dsw.sh b/storage/ndb/config/make-win-dsw.sh deleted file mode 100755 index d56914b3450..00000000000 --- a/storage/ndb/config/make-win-dsw.sh +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -cat < - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ -}}} - -############################################################################### - -EOF -done - -cat< -{{{ -}}} - -Package=<3> -{{{ -}}} - -############################################################################### - -EOF diff --git a/storage/ndb/config/type_kernel.mk.am b/storage/ndb/config/type_kernel.mk.am deleted file mode 100644 index dbf63a76ec5..00000000000 --- a/storage/ndb/config/type_kernel.mk.am +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2004, 2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -INCLUDES += \ - -I$(srcdir) \ - -I$(top_builddir)/include \ - -I$(top_builddir)/storage/ndb/include \ - -I$(top_srcdir)/include \ - -I$(top_srcdir)/storage/ndb/include \ - -I$(top_srcdir)/storage/ndb/src/kernel/vm \ - -I$(top_srcdir)/storage/ndb/src/kernel/error \ - -I$(top_srcdir)/storage/ndb/src/kernel \ - -I$(top_srcdir)/storage/ndb/include/kernel \ - -I$(top_srcdir)/storage/ndb/include/transporter \ - -I$(top_srcdir)/storage/ndb/include/debugger \ - -I$(top_srcdir)/storage/ndb/include/mgmapi \ - -I$(top_srcdir)/storage/ndb/include/mgmcommon \ - -I$(top_srcdir)/storage/ndb/include/ndbapi \ - -I$(top_srcdir)/storage/ndb/include/util \ - -I$(top_srcdir)/storage/ndb/include/portlib \ - -I$(top_srcdir)/storage/ndb/include/logger - -#AM_LDFLAGS = @ndb_ldflags@ diff --git a/storage/ndb/config/type_mgmapiclient.mk.am b/storage/ndb/config/type_mgmapiclient.mk.am deleted file mode 100644 index 10021771055..00000000000 --- a/storage/ndb/config/type_mgmapiclient.mk.am +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -INCLUDES += -I$(top_srcdir)/storage/ndb/include/mgmapi diff --git a/storage/ndb/config/type_ndbapi.mk.am b/storage/ndb/config/type_ndbapi.mk.am deleted file mode 100644 index a54426053e8..00000000000 --- a/storage/ndb/config/type_ndbapi.mk.am +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -INCLUDES += \ - -I$(srcdir) \ - -I$(top_builddir)/include \ - -I$(top_builddir)/storage/ndb/include \ - -I$(top_srcdir)/include \ - -I$(top_srcdir)/mysys \ - -I$(top_srcdir)/storage/ndb/include \ - -I$(top_srcdir)/storage/ndb/include/kernel \ - -I$(top_srcdir)/storage/ndb/include/transporter \ - -I$(top_srcdir)/storage/ndb/include/debugger \ - -I$(top_srcdir)/storage/ndb/include/mgmapi \ - -I$(top_srcdir)/storage/ndb/include/mgmcommon \ - -I$(top_srcdir)/storage/ndb/include/ndbapi \ - -I$(top_srcdir)/storage/ndb/include/util \ - -I$(top_srcdir)/storage/ndb/include/portlib \ - -I$(top_srcdir)/storage/ndb/include/logger diff --git a/storage/ndb/config/type_ndbapiclient.mk.am b/storage/ndb/config/type_ndbapiclient.mk.am deleted file mode 100644 index 57cb913101f..00000000000 --- a/storage/ndb/config/type_ndbapiclient.mk.am +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -INCLUDES += -I$(top_srcdir)/storage/ndb/include/ndbapi diff --git a/storage/ndb/config/type_ndbapitest.mk.am b/storage/ndb/config/type_ndbapitest.mk.am deleted file mode 100644 index 7ca742e1ff1..00000000000 --- a/storage/ndb/config/type_ndbapitest.mk.am +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2004, 2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -LDADD += $(top_builddir)/storage/ndb/test/src/libNDBT.a \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ - -INCLUDES += -I$(top_srcdir) \ - -I$(top_builddir)/include \ - -I$(top_builddir)/storage/ndb/include \ - -I$(top_srcdir)/include \ - -I$(top_srcdir)/storage/ndb/include \ - -I$(top_srcdir)/storage/ndb/include/ndbapi \ - -I$(top_srcdir)/storage/ndb/include/util \ - -I$(top_srcdir)/storage/ndb/include/portlib \ - -I$(top_srcdir)/storage/ndb/test/include \ - -I$(top_srcdir)/storage/ndb/include/mgmapi diff --git a/storage/ndb/config/type_ndbapitools.mk.am b/storage/ndb/config/type_ndbapitools.mk.am deleted file mode 100644 index 130a23ec2ac..00000000000 --- a/storage/ndb/config/type_ndbapitools.mk.am +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2004, 2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -LDADD += \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ @ZLIB_LIBS@ - -INCLUDES += -I$(srcdir) \ - -I$(top_builddir)/include \ - -I$(top_builddir)/storage/ndb/include \ - -I$(top_srcdir)/include \ - -I$(top_srcdir)/storage/ndb/include \ - -I$(top_srcdir)/storage/ndb/include/ndbapi \ - -I$(top_srcdir)/storage/ndb/include/util \ - -I$(top_srcdir)/storage/ndb/include/portlib \ - -I$(top_srcdir)/storage/ndb/test/include \ - -I$(top_srcdir)/storage/ndb/include/mgmapi \ - -I$(top_srcdir)/storage/ndb/include/kernel diff --git a/storage/ndb/config/type_util.mk.am b/storage/ndb/config/type_util.mk.am deleted file mode 100644 index d696288c010..00000000000 --- a/storage/ndb/config/type_util.mk.am +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -INCLUDES += -I$(srcdir) \ - -I$(top_builddir)/include \ - -I$(top_builddir)/storage/ndb/include \ - -I$(top_srcdir)/include \ - -I$(top_srcdir)/mysys \ - -I$(top_srcdir)/storage/ndb/include \ - -I$(top_srcdir)/storage/ndb/include/util \ - -I$(top_srcdir)/storage/ndb/include/portlib \ - -I$(top_srcdir)/storage/ndb/include/logger diff --git a/storage/ndb/config/win-includes b/storage/ndb/config/win-includes deleted file mode 100755 index 5349c1117d9..00000000000 --- a/storage/ndb/config/win-includes +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -dst=$1 -shift - -out=`echo $* | sed 's/-I\([^ ]*\)/\/I "\1"/g'` -sed -e "s!@includes@!$out!g" $dst > /tmp/$dst.$$ -mv /tmp/$dst.$$ $dst diff --git a/storage/ndb/config/win-lib.am b/storage/ndb/config/win-lib.am deleted file mode 100644 index 2922cf6cfd2..00000000000 --- a/storage/ndb/config/win-lib.am +++ /dev/null @@ -1,116 +0,0 @@ -# Microsoft Developer Studio Project File - Name="@name@" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 - -# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Static Library" 0x0104 - -CFG=@name@ - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@name@.mak" CFG="@name@ - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@name@ - Win32 Release" (based on "Win32 (x86) Static Library") -!MESSAGE "@name@ - Win32 Debug" (based on "Win32 (x86) Static Library") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=xicl6.exe -RSC=rc.exe - -!IF "$(CFG)" == "@name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "release" -# PROP Intermediate_Dir "release" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /G6 /MT /W3 /O2 /D "WIN32" /D "DBUG_OFF" /D "_WINDOWS" /D "NDEBUG" /FD /c -# ADD BASE CPP @includes@ -# ADD CPP @includes@ -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x409 -# ADD RSC /l 0x409 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=xilink6.exe -lib -# ADD BASE LIB32 /nologo -# ADD LIB32 /nologo /out:".\lib_release\@name@.lib" -@release_libs@ - -!ELSEIF "$(CFG)" == "@name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "debug" -# PROP Intermediate_Dir "debug" -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /FD /c -# ADD CPP /nologo /G6 /MTd /W3 /Z7 /Od /Gf /D "WIN32" /D "_DEBUG" /D "SAFE_MUTEX" /D "_WINDOWS" /FD /c -# ADD BASE CPP @includes@ -# ADD CPP @includes@ -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x409 -# ADD RSC /l 0x409 -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LIB32=xilink6.exe -lib -# ADD BASE LIB32 /nologo -# ADD LIB32 /nologo /out:".\lib_debug\@name@.lib" -@debug_libs@ - -!ENDIF - -# Begin Target - -# Name "@name@ - Win32 Release" -# Name "@name@ - Win32 Debug" - -# Begin Group "Source Files" -# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat" - -@sources@ - -# End Group - -# End Target -# End Project diff --git a/storage/ndb/config/win-libraries b/storage/ndb/config/win-libraries deleted file mode 100755 index 5e15c52e083..00000000000 --- a/storage/ndb/config/win-libraries +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -dst=$1 -shift - -type=$1 -shift - -add_lib(){ - echo `dirname $2`/$1/`basename $2 | sed "s/\.[l]*a/$3.lib/g"` -} - -out_rel= -out_deb= -out_tls_rel= -out_tls_deb= -for i in $* -do -# mysql VC++ project files have for some unknown reason -# choosen NOT to put libdbug.lib in $(topdir)./dbug but rather in $(topdir) -# the same goes for mysys and strings - lib=$i - case $i in - *libdbug.a | *libmysys.a | *libmystrings.a) - lib=`echo $i | sed s'!dbug\/lib!!' | sed 's!mysys\/lib!!' | sed 's!strings\/libmy!!'` - echo "Changing from $i to $lib" - ;; - esac - - if [ `echo $i | grep -c gcc` -eq 0 ] - then - out_rel="${out_rel} `add_lib lib_release $lib`" - out_deb="${out_deb} `add_lib lib_debug $lib`" - out_tls_rel="${out_tls_rel} `add_lib lib_release $lib _tls`" - out_tls_deb="${out_tls_deb} `add_lib lib_debug $lib _tls`" - fi -done - -fix(){ - echo "# ADD BASE ${type}32 $*\n# ADD ${type}32 $*\n" -} - -if [ "$out_rel" ] -then - out_rel=`fix $out_rel` - out_deb=`fix $out_deb` - out_tls_rel=`fix $out_tls_rel` - out_tls_deb=`fix $out_tls_deb` -fi - -sed -e "s!@release_libs@!$out_rel!g" \ - -e "s!@debug_libs@!$out_deb!g" \ - -e "s!@tls_release_libs@!$out_tls_rel!g" \ - -e "s!@tls_debug_libs@!$out_tls_deb!g" \ - $dst > !tmp!$dst.$$ -mv !tmp!$dst.$$ $dst diff --git a/storage/ndb/config/win-name b/storage/ndb/config/win-name deleted file mode 100755 index c6a2f49f343..00000000000 --- a/storage/ndb/config/win-name +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -dst=$1 -shift - -sed -e "s/@name@/`echo $1 | sed 's/\.[l]*a//g'`/g" $dst > /tmp/$dst.$$ -mv /tmp/$dst.$$ $dst diff --git a/storage/ndb/config/win-prg.am b/storage/ndb/config/win-prg.am deleted file mode 100644 index 838345566a2..00000000000 --- a/storage/ndb/config/win-prg.am +++ /dev/null @@ -1,114 +0,0 @@ -# Microsoft Developer Studio Project File - Name="mysqld" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 - -# Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Console Application" 0x0103 - -CFG=@name@ - Win32 Release -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "@name@.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "@name@.mak" CFG="@name@ - Win32 Release" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "@name@ - Win32 Release" (based on "Win32 (x86) Console Application") -!MESSAGE "@name@ - Win32 Debug" (based on "Win32 (x86) Console Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=xicl6.exe -RSC=rc.exe - -!IF "$(CFG)" == "@name@ - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "release" -# PROP Intermediate_Dir "release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "NDB_WIN32" /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /G6 /MT /W3 /O2 /D "NDB_WIN32" /I "../zlib" /I "../include" /I "../regex" /D "NDEBUG" /D "DBUG_OFF" /D "HAVE_INNOBASE_DB" /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "_MBCS" /D "HAVE_DLOPEN" /FD /c -# ADD BASE CPP @includes@ -# ADD CPP @includes@ -# SUBTRACT CPP /YX -# ADD BASE RSC /l 0x410 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -@release_libs@ -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib Wsock32.lib /nologo /subsystem:console /pdb:none /machine:I386 /out:"release/@name@-opt.exe" -# SUBTRACT LINK32 /debug - -!ELSEIF "$(CFG)" == "@name@ - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "debug" -# PROP Intermediate_Dir "debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "NDB_WIN32" /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /D "_MBCS" /YX /FD /c -# ADD CPP /nologo /G6 /MTd /W3 /Z7 /Od /D "NDB_WIN32" /I "../include" /I "../regex" /I "../zlib" /D "_DEBUG" /D "SAFE_MUTEX" /D "HAVE_INNOBASE_DB" /D "MYSQL_SERVER" /D "_WINDOWS" /D "_CONSOLE" /D "_MBCS" /D "HAVE_DLOPEN" /FD /c -# ADD BASE CPP @includes@ -# ADD CPP @includes@ -# SUBTRACT CPP /Fr /YX -# ADD BASE RSC /l 0x410 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=xilink6.exe -@debug_libs@ -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib Wsock32.lib /nologo /subsystem:console /incremental:no /debug /machine:I386 /out:"debug/@name@.exe" /pdbtype:sept - -!ENDIF - -# Begin Target - -# Name "@name@ - Win32 Release" -# Name "@name@ - Win32 Debug" - -@sources@ - -# End Target -# End Project diff --git a/storage/ndb/config/win-sources b/storage/ndb/config/win-sources deleted file mode 100755 index 910189b086b..00000000000 --- a/storage/ndb/config/win-sources +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/sh - -# Copyright (c) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -dst=$1 -shift - -out=`echo $* | sed 's!\([^ ]*\)!# Begin Source File\\\nSOURCE=\1\\\n# End Source File\\\n!g'` -sed -e "s!@sources@!$out!g" $dst > /tmp/$dst.$$ -mv /tmp/$dst.$$ $dst diff --git a/storage/ndb/demos/1-node/1-api-3/Ndb.cfg b/storage/ndb/demos/1-node/1-api-3/Ndb.cfg deleted file mode 100644 index 61309af029e..00000000000 --- a/storage/ndb/demos/1-node/1-api-3/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 3 -127.0.0.1 10000 diff --git a/storage/ndb/demos/1-node/1-db-2/Ndb.cfg b/storage/ndb/demos/1-node/1-db-2/Ndb.cfg deleted file mode 100644 index 9315950b67a..00000000000 --- a/storage/ndb/demos/1-node/1-db-2/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 2 -127.0.0.1 10000 diff --git a/storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg b/storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg deleted file mode 100644 index 61d4c0ecc17..00000000000 --- a/storage/ndb/demos/1-node/1-mgm-1/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 1 -127.0.0.1 10000 diff --git a/storage/ndb/demos/1-node/1-mgm-1/template_config.ini b/storage/ndb/demos/1-node/1-mgm-1/template_config.ini deleted file mode 100644 index 76bb7867e3c..00000000000 --- a/storage/ndb/demos/1-node/1-mgm-1/template_config.ini +++ /dev/null @@ -1,70 +0,0 @@ -############################################################################### -# -# Initial system configuration file for MySQL Cluster v3.1.0 (Demo 1) -# -############################################################################### - -[DB DEFAULT] -NoOfReplicas: 1 -#LockPagesInMainMemory: Y -StopOnError: Y -#MaxNoOfConcurrentOperations: 1024 -#MaxNoOfConcurrentTransactions: 1024 -NoOfIndexPages: 1500 -NoOfDataPages: 5000 -#TimeBetweenLocalCheckpoints: 20 -#TimeBetweenGlobalCheckpoints: 1500 -#NoOfFragmentLogFiles: 8 -BackupMemory: 4M -BackupDataBufferSize: 2M -BackupLogBufferSize: 2M -BackupWriteSize: 32k - -[COMPUTER] -Id: 1 -ByteOrder: Little -HostName: localhost - -[MGM] -Id: 1 -ExecuteOnComputer: 1 -PortNumber: 10000 -PortNumberStats: 10001 - - -[DB] -Id: 2 -ExecuteOnComputer: 1 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_2_HERE - -[API] -Id: 3 -ExecuteOnComputer: 1 - -# Designated MySQL Server API node id -[API] -Id: 11 -ExecuteOnComputer: 1 - -[TCP DEFAULT] -SendSignalId: N -Compression: N -Checksum: N -SendBufferSize: 2000 -MaxReceiveSize: 2000 - -[TCP] -NodeId1: 1 -NodeId2: 2 -PortNumber: 10002 - -[TCP] -NodeId1: 2 -NodeId2: 3 -PortNumber: 10003 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 11 -PortNumber: 10011 diff --git a/storage/ndb/demos/2-node/2-api-4/Ndb.cfg b/storage/ndb/demos/2-node/2-api-4/Ndb.cfg deleted file mode 100644 index 1713a9b5893..00000000000 --- a/storage/ndb/demos/2-node/2-api-4/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 4 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-api-5/Ndb.cfg b/storage/ndb/demos/2-node/2-api-5/Ndb.cfg deleted file mode 100644 index faa2882eeea..00000000000 --- a/storage/ndb/demos/2-node/2-api-5/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 5 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-api-6/Ndb.cfg b/storage/ndb/demos/2-node/2-api-6/Ndb.cfg deleted file mode 100644 index bc2c4809453..00000000000 --- a/storage/ndb/demos/2-node/2-api-6/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 6 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-api-7/Ndb.cfg b/storage/ndb/demos/2-node/2-api-7/Ndb.cfg deleted file mode 100644 index 4107fdb6c5e..00000000000 --- a/storage/ndb/demos/2-node/2-api-7/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 7 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-db-2/Ndb.cfg b/storage/ndb/demos/2-node/2-db-2/Ndb.cfg deleted file mode 100644 index 9315950b67a..00000000000 --- a/storage/ndb/demos/2-node/2-db-2/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 2 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-db-3/Ndb.cfg b/storage/ndb/demos/2-node/2-db-3/Ndb.cfg deleted file mode 100644 index 61309af029e..00000000000 --- a/storage/ndb/demos/2-node/2-db-3/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 3 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg b/storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg deleted file mode 100644 index 61d4c0ecc17..00000000000 --- a/storage/ndb/demos/2-node/2-mgm-1/Ndb.cfg +++ /dev/null @@ -1,2 +0,0 @@ -OwnProcessId 1 -127.0.0.1 10000 diff --git a/storage/ndb/demos/2-node/2-mgm-1/template_config.ini b/storage/ndb/demos/2-node/2-mgm-1/template_config.ini deleted file mode 100644 index 3edb909609a..00000000000 --- a/storage/ndb/demos/2-node/2-mgm-1/template_config.ini +++ /dev/null @@ -1,157 +0,0 @@ -############################################################################### -# -# Initial system configuration file for MySQL Cluster v3.1.0 (Demo 2) -# -############################################################################### - -[COMPUTER] -Id: 1 -ByteOrder: Little -HostName: localhost - -[COMPUTER] -Id: 2 -ByteOrder: Little -HostName: localhost - -[MGM] -Id: 1 -ExecuteOnComputer: 1 -PortNumber: 10000 -PortNumberStats: 10001 -ArbitrationRank: 1 - -[DB DEFAULT] -NoOfReplicas: 2 -#LockPagesInMainMemory: N -StopOnError: N -#MaxNoOfConcurrentOperations: 1024 -#MaxNoOfConcurrentTransactions: 1024 -NoOfIndexPages: 200 -NoOfDataPages: 600 -#TimeBetweenLocalCheckpoints: 20 -#TimeBetweenGlobalCheckpoints: 1500 -#NoOfFragmentLogFiles: 8 -BackupMemory: 4M -BackupDataBufferSize: 2M -BackupLogBufferSize: 2M -BackupWriteSize: 32k - -[DB] -Id: 2 -ExecuteOnComputer: 1 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_2_HERE - -[DB] -Id: 3 -ExecuteOnComputer: 2 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_3_HERE - -[API DEFAULT] -ArbitrationRank: 1 - -[API] -Id: 4 -ExecuteOnComputer: 1 - -[API] -Id: 5 -ExecuteOnComputer: 1 - -[API] -Id: 6 -ExecuteOnComputer: 2 - -[API] -Id: 7 -ExecuteOnComputer: 2 - -# Designated MySQL Server API node id -[API] -Id: 11 -ExecuteOnComputer: 1 - -# Designated MySQL Server API node id -[API] -Id: 12 -ExecuteOnComputer: 2 - - -[TCP] -NodeId1: 1 -NodeId2: 2 -PortNumber: 10002 - -[TCP] -NodeId1: 1 -NodeId2: 3 -PortNumber: 10003 - -[TCP] -NodeId1: 2 -NodeId2: 3 -PortNumber: 10004 - -[TCP] -NodeId1: 2 -NodeId2: 4 -PortNumber: 10005 - -[TCP] -NodeId1: 2 -NodeId2: 5 -PortNumber: 10006 - -[TCP] -NodeId1: 2 -NodeId2: 6 -PortNumber: 10007 - -[TCP] -NodeId1: 2 -NodeId2: 7 -PortNumber: 10008 - -[TCP] -NodeId1: 3 -NodeId2: 4 -PortNumber: 10009 - -[TCP] -NodeId1: 3 -NodeId2: 5 -PortNumber: 10010 - -[TCP] -NodeId1: 3 -NodeId2: 6 -PortNumber: 10011 - -[TCP] -NodeId1: 3 -NodeId2: 7 -PortNumber: 10012 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 11 -PortNumber: 10013 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 3 -NodeId2: 11 -PortNumber: 10014 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 12 -PortNumber: 10015 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 3 -NodeId2: 12 -PortNumber: 10016 diff --git a/storage/ndb/demos/config-templates/config_template-1-REP.ini b/storage/ndb/demos/config-templates/config_template-1-REP.ini deleted file mode 100644 index 71be3f2f53f..00000000000 --- a/storage/ndb/demos/config-templates/config_template-1-REP.ini +++ /dev/null @@ -1,87 +0,0 @@ -############################################################################### -# -# Initial system configuration file for MySQL Cluster v3.1.0 (Demo 1) -# -############################################################################### - -[DB DEFAULT] -NoOfReplicas: 1 -StopOnError: Y -NoOfIndexPages: 1500 -NoOfDataPages: 5000 -BackupMemory: 4M -BackupDataBufferSize: 2M -BackupLogBufferSize: 2M -BackupWriteSize: 32k - -[COMPUTER] -Id: 1 -ByteOrder: Little -HostName: CHOOSE_HOSTNAME - -[EXTERNAL SYSTEM] -Name: External - -[MGM] -Id: 1 -ExecuteOnComputer: 1 -PortNumber: CHOOSE_PORT_BASE00 -PortNumberStats: CHOOSE_PORT_BASE01 - - -[DB] -Id: 2 -ExecuteOnComputer: 1 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_2_HERE - -[API] -Id: 3 -ExecuteOnComputer: 1 - -[REP] -Id: CHOOSE_REP_ID -ExecuteOnComputer: 1 - -[EXTERNAL REP] -Id: CHOOSE_EXTREP_ID -System: External - -# Designated MySQL Server API node id -[API] -Id: 11 -ExecuteOnComputer: 1 - -[TCP DEFAULT] -SendSignalId: N -Compression: N -Checksum: N -SendBufferSize: 2000 -MaxReceiveSize: 2000 - -[TCP] -NodeId1: 1 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE02 - -[TCP] -NodeId1: 2 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE03 - -[TCP] -NodeId1: 2 -NodeId2: CHOOSE_REP_ID -PortNumber: CHOOSE_PORT_BASE04 - -[TCP] -Hostname1: CHOOSE_HOSTNAME -Hostname2: CHOOSE_EXTHOSTNAME -NodeId1: CHOOSE_REP_ID -NodeId2: External.CHOOSE_EXTREP_ID -PortNumber: 10099 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 11 -PortNumber: CHOOSE_PORT_BASE11 diff --git a/storage/ndb/demos/config-templates/config_template-4.ini b/storage/ndb/demos/config-templates/config_template-4.ini deleted file mode 100644 index e47c9037344..00000000000 --- a/storage/ndb/demos/config-templates/config_template-4.ini +++ /dev/null @@ -1,336 +0,0 @@ -############################################################################### -# -# 4-node system configuration file for MySQL Cluster -# -############################################################################### - -[DB DEFAULT] -NoOfReplicas: 1 -StopOnError: N -NoOfIndexPages: 1500 -NoOfDataPages: 5000 -BackupMemory: 4M -BackupDataBufferSize: 2M -BackupLogBufferSize: 2M -BackupWriteSize: 32k - -[COMPUTER] -Id: 1 -ByteOrder: Little -HostName: CHOOSE_HOSTNAME_1 - -[COMPUTER] -Id: 2 -ByteOrder: Little -HostName: CHOOSE_HOSTNAME_2 - -[COMPUTER] -Id: 3 -ByteOrder: Little -HostName: CHOOSE_HOSTNAME_3 - -[COMPUTER] -Id: 4 -ByteOrder: Little -HostName: CHOOSE_HOSTNAME_4 - -[MGM] -Id: 1 -ExecuteOnComputer: 1 -PortNumber: CHOOSE_PORT_BASE00 -PortNumberStats: CHOOSE_PORT_BASE01 - -[DB] -Id: 2 -ExecuteOnComputer: 1 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_1_HERE - -[DB] -Id: 3 -ExecuteOnComputer: 2 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_2_HERE - -[DB] -Id: 4 -ExecuteOnComputer: 3 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_3_HERE - -[DB] -Id: 5 -ExecuteOnComputer: 4 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_4_HERE - -[API] -Id: 6 -ExecuteOnComputer: 1 - -[API] -Id: 7 -ExecuteOnComputer: 2 - -[API] -Id: 8 -ExecuteOnComputer: 3 - -[API] -Id: 9 -ExecuteOnComputer: 4 - -# Designated MySQL Server API node id -[API] -Id: 11 -ExecuteOnComputer: 1 - -# Designated MySQL Server API node id -[API] -Id: 12 -ExecuteOnComputer: 2 - -# Designated MySQL Server API node id -[API] -Id: 13 -ExecuteOnComputer: 3 - -# Designated MySQL Server API node id -[API] -Id: 14 -ExecuteOnComputer: 4 - -[TCP DEFAULT] -SendSignalId: N -Compression: N -Checksum: N -SendBufferSize: 2000 -MaxReceiveSize: 2000 - -# Management server -[TCP] -NodeId1: 1 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE02 - -[TCP] -NodeId1: 1 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE03 - -[TCP] -NodeId1: 1 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE04 - -[TCP] -NodeId1: 1 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE05 - -# Database cluster -[TCP] -NodeId1: 2 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE06 - -[TCP] -NodeId1: 2 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE07 - -[TCP] -NodeId1: 2 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE08 - -[TCP] -NodeId1: 3 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE09 - -[TCP] -NodeId1: 3 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE10 - -[TCP] -NodeId1: 4 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE11 - -# API node 6 -[TCP] -NodeId1: 6 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE12 - -[TCP] -NodeId1: 6 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE13 - -[TCP] -NodeId1: 6 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE14 - -[TCP] -NodeId1: 6 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE15 - -# API node 7 -[TCP] -NodeId1: 7 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE16 - -[TCP] -NodeId1: 7 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE17 - -[TCP] -NodeId1: 7 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE18 - -[TCP] -NodeId1: 7 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE19 - -# API node 8 -[TCP] -NodeId1: 8 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE20 - -[TCP] -NodeId1: 8 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE21 - -[TCP] -NodeId1: 8 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE22 - -[TCP] -NodeId1: 8 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE23 - -# API node 9 -[TCP] -NodeId1: 9 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE24 - -[TCP] -NodeId1: 9 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE25 - -[TCP] -NodeId1: 9 -NodeId2: 4 -PortNumber: CHOOSE_PORT_BASE26 - -[TCP] -NodeId1: 9 -NodeId2: 5 -PortNumber: CHOOSE_PORT_BASE27 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 11 -PortNumber: CHOOSE_PORT_BASE28 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 3 -NodeId2: 11 -PortNumber: CHOOSE_PORT_BASE29 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 4 -NodeId2: 11 -PortNumber: CHOOSE_PORT_BASE30 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 5 -NodeId2: 11 -PortNumber: CHOOSE_PORT_BASE31 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 12 -PortNumber: CHOOSE_PORT_BASE32 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 3 -NodeId2: 12 -PortNumber: CHOOSE_PORT_BASE33 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 4 -NodeId2: 12 -PortNumber: CHOOSE_PORT_BASE34 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 5 -NodeId2: 12 -PortNumber: CHOOSE_PORT_BASE35 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 13 -PortNumber: CHOOSE_PORT_BASE36 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 3 -NodeId2: 13 -PortNumber: CHOOSE_PORT_BASE37 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 4 -NodeId2: 13 -PortNumber: CHOOSE_PORT_BASE38 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 5 -NodeId2: 13 -PortNumber: CHOOSE_PORT_BASE39 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 14 -PortNumber: CHOOSE_PORT_BASE40 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 3 -NodeId2: 14 -PortNumber: CHOOSE_PORT_BASE41 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 4 -NodeId2: 14 -PortNumber: CHOOSE_PORT_BASE42 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 5 -NodeId2: 14 -PortNumber: CHOOSE_PORT_BASE43 diff --git a/storage/ndb/demos/config-templates/config_template-install.ini b/storage/ndb/demos/config-templates/config_template-install.ini deleted file mode 100644 index e31906ba609..00000000000 --- a/storage/ndb/demos/config-templates/config_template-install.ini +++ /dev/null @@ -1,64 +0,0 @@ -############################################################################### -# -# Initial system configuration file for MySQL Cluster v3.1.0 (Demo 1) -# -############################################################################### - -[DB DEFAULT] -NoOfReplicas: 1 -StopOnError: N -NoOfIndexPages: 1500 -NoOfDataPages: 5000 -BackupMemory: 4M -BackupDataBufferSize: 2M -BackupLogBufferSize: 2M -BackupWriteSize: 32k - -[COMPUTER] -Id: 1 -ByteOrder: Little -HostName: localhost - -[MGM] -Id: 1 -ExecuteOnComputer: 1 -PortNumber: CHOOSE_PORT_BASE00 -PortNumberStats: CHOOSE_PORT_BASE01 - - -[DB] -Id: 2 -ExecuteOnComputer: 1 -FileSystemPath: WRITE_PATH_TO_FILESYSTEM_2_HERE - -[API] -Id: 3 -ExecuteOnComputer: 1 - -# Designated MySQL Server API node id -[API] -Id: 11 -ExecuteOnComputer: 1 - -[TCP DEFAULT] -SendSignalId: N -Compression: N -Checksum: N -SendBufferSize: 2000 -MaxReceiveSize: 2000 - -[TCP] -NodeId1: 1 -NodeId2: 2 -PortNumber: CHOOSE_PORT_BASE02 - -[TCP] -NodeId1: 2 -NodeId2: 3 -PortNumber: CHOOSE_PORT_BASE03 - -# Designated MySQL Server API node connection -[TCP] -NodeId1: 2 -NodeId2: 11 -PortNumber: CHOOSE_PORT_BASE11 diff --git a/storage/ndb/demos/run_demo1-PS-SS_common.sh b/storage/ndb/demos/run_demo1-PS-SS_common.sh deleted file mode 100644 index 625e9655087..00000000000 --- a/storage/ndb/demos/run_demo1-PS-SS_common.sh +++ /dev/null @@ -1,50 +0,0 @@ -echo $NDB_HOST $NDB_EXTHOST - -NDB_PORT=$NDB_PORT_BASE"00" -NDB_CONNECTSTRING_BASE="host=$NDB_HOST:$NDB_PORT;nodeid=" - -# Edit file system path - -cd $NDB_DEMO -sed -e s,"WRITE_PATH_TO_FILESYSTEM_2_HERE",$NDB_DEMO/filesystem,g \ - -e s,"CHOOSE_HOSTNAME",$NDB_HOST,g\ - -e s,"CHOOSE_EXTHOSTNAME",$NDB_EXTHOST,g\ - -e s,"CHOOSE_PORT_BASE",$NDB_PORT_BASE,g\ - -e s,"CHOOSE_REP_ID",$NDB_REP_ID,g\ - -e s,"CHOOSE_EXTREP_ID",$NDB_EXTREP_ID,g\ - < ../config-templates/config_template-1-REP.ini > config.ini - -# Start management server as deamon - -NDB_ID="1" -NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID -export NDB_CONNECTSTRING -if mgmtsrvr -d -c config.ini ; then :; else - echo "Unable to start mgmtsrvr" - exit 1 -fi - -# Start database node - -NDB_ID="2" -NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID -export NDB_CONNECTSTRING -xterm -T "$NDB_DEMO_NAME DB Node $NDB_ID" -geometry 80x10 -xrm *.hold:true -e ndb -i & - -# Start xterm for application programs - -NDB_ID="3" -NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID -export NDB_CONNECTSTRING -xterm -T "$NDB_DEMO_NAME API Node $NDB_ID" -geometry 80x10 & - -# Start xterm for rep node - -NDB_ID=$NDB_REP_ID -NDB_CONNECTSTRING=$NDB_CONNECTSTRING_BASE$NDB_ID -export NDB_CONNECTSTRING -xterm -T "$NDB_DEMO_NAME REP Node $NDB_ID" -geometry 80x10 -xrm *.hold:true -e ndb_rep & - -# Start management client - -xterm -T "$NDB_DEMO_NAME Mgmt Client" -geometry 80x10 -xrm *.hold:true -e mgmtclient $NDB_HOST $NDB_PORT & diff --git a/storage/ndb/demos/run_demo1-PS.sh b/storage/ndb/demos/run_demo1-PS.sh deleted file mode 100755 index 82cfdd5e65b..00000000000 --- a/storage/ndb/demos/run_demo1-PS.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh -if [ -z "$MYSQLCLUSTER_TOP" ]; then - echo "MYSQLCLUSTER_TOP not set" - exit 1 -fi -if [ -d "$MYSQLCLUSTER_TOP/ndb" ]; then :; else - echo "$MYSQLCLUSTER_TOP/ndb directory does not exist" - exit 1 -fi -NDB_CONNECTSTRING= -NDB_HOME= -NDB_DEMO=$MYSQLCLUSTER_TOP/ndb/demos/1-node-PS - -NDB_PORT_BASE="102" -NDB_REP_ID="5" -NDB_EXTREP_ID="4" - -NDB_DEMO_NAME="Demo 1-PS MySQL Cluster" -NDB_HOST1=$1 -NDB_HOST2=$2 -if [ -z "$NDB_HOST1" ]; then - NDB_HOST1=localhost -fi -if [ -z "$NDB_HOST2" ]; then - NDB_HOST2=localhost -fi -NDB_HOST=$NDB_HOST1 -NDB_EXTHOST=$NDB_HOST2 - -source $MYSQLCLUSTER_TOP/ndb/demos/run_demo1-PS-SS_common.sh diff --git a/storage/ndb/demos/run_demo1-SS.sh b/storage/ndb/demos/run_demo1-SS.sh deleted file mode 100755 index 5ede57c44c4..00000000000 --- a/storage/ndb/demos/run_demo1-SS.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh -if [ -z "$MYSQLCLUSTER_TOP" ]; then - echo "MYSQLCLUSTER_TOP not set" - exit 1 -fi -if [ -d "$MYSQLCLUSTER_TOP/ndb" ]; then :; else - echo "$MYSQLCLUSTER_TOP/ndb directory does not exist" - exit 1 -fi -NDB_CONNECTSTRING= -NDB_HOME= -NDB_DEMO=$MYSQLCLUSTER_TOP/ndb/demos/1-node-SS - -NDB_PORT_BASE="101" -NDB_REP_ID="4" -NDB_EXTREP_ID="5" - -NDB_DEMO_NAME="Demo 1-SS MySQL Cluster" -NDB_HOST1=$1 -NDB_HOST2=$2 -if [ -z "$NDB_HOST1" ]; then - NDB_HOST1=localhost -fi -if [ -z "$NDB_HOST2" ]; then - NDB_HOST2=localhost -fi -NDB_HOST=$NDB_HOST2 -NDB_EXTHOST=$NDB_HOST1 - -source $MYSQLCLUSTER_TOP/ndb/demos/run_demo1-PS-SS_common.sh diff --git a/storage/ndb/demos/run_demo1.sh b/storage/ndb/demos/run_demo1.sh deleted file mode 100755 index df6e3fc799d..00000000000 --- a/storage/ndb/demos/run_demo1.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -if [ -z "$MYSQLCLUSTER_TOP" ]; then - echo "MYSQLCLUSTER_TOP not set" - exit 1 -fi -if [ -d "$MYSQLCLUSTER_TOP/ndb" ]; then :; else - echo "$MYSQLCLUSTER_TOP/ndb directory does not exist" - exit 1 -fi -NDB_CONNECTSTRING= -NDB_HOME= -ndb_demo=$MYSQLCLUSTER_TOP/ndb/demos - -# Edit file system path - -cd $ndb_demo/1-node/1-mgm-1 -sed -e s,"WRITE_PATH_TO_FILESYSTEM_2_HERE",$ndb_demo/1-node/1-db-2/filesystem,g \ - < template_config.ini > config.ini - -# Start management server as deamon - -cd $ndb_demo/1-node/1-mgm-1 -if mgmtsrvr -d -c config.ini ; then :; else - echo "Unable to start mgmtsrvr" - exit 1 -fi - -# Start database node - -cd $ndb_demo/1-node/1-db-2 -xterm -T "Demo 1 NDB Cluster DB Node 2" -geometry 80x10 -xrm *.hold:true -e ndb -i & - -# Start xterm for application programs - -cd $ndb_demo/1-node/1-api-3 -xterm -T "Demo 1 NDB Cluster API Node 3" -geometry 80x10 & - -# Start management client - -cd $ndb_demo -xterm -T "Demo 1 NDB Management Client" -geometry 80x10 -xrm *.hold:true -e mgmtclient localhost 10000 & diff --git a/storage/ndb/demos/run_demo2.sh b/storage/ndb/demos/run_demo2.sh deleted file mode 100755 index 9bae7517d5f..00000000000 --- a/storage/ndb/demos/run_demo2.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh -if [ -z "$MYSQLCLUSTER_TOP" ]; then - echo "MYSQLCLUSTER_TOP not set" - exit 1 -fi -if [ -d "$MYSQLCLUSTER_TOP/ndb" ]; then :; else - echo "$MYSQLCLUSTER_TOP/ndb directory does not exist" - exit 1 -fi -NDB_CONNECTSTRING= -NDB_HOME= -ndb_demo=$MYSQLCLUSTER_TOP/ndb/demos - -# Edit file system path - -cd $ndb_demo/2-node/2-mgm-1 -sed -e s,"WRITE_PATH_TO_FILESYSTEM_2_HERE",$ndb_demo/2-node/2-db-2/filesystem,g \ - -e s,"WRITE_PATH_TO_FILESYSTEM_3_HERE",$ndb_demo/2-node/2-db-3/filesystem,g \ - < template_config.ini > config.ini - -# Start management server as deamon - -cd $ndb_demo/2-node/2-mgm-1 -if mgmtsrvr -d -c config.ini ; then :; else - echo "Unable to start mgmtsrvr" - exit 1 -fi - -#xterm -T "Demo 2 NDB Management Server" -geometry 80x10 -xrm *.hold:true -e mgmtsrvr -c config.ini & - -# Start database node - -cd $ndb_demo/2-node/2-db-2 -xterm -T "Demo 2 NDB Cluster DB Node 2" -geometry 80x10 -xrm *.hold:true -e ndb -i & - -# Start database node - -cd $ndb_demo/2-node/2-db-3 -xterm -T "Demo 2 NDB Cluster DB Node 3" -geometry 80x10 -xrm *.hold:true -e ndb -i & - -# Start xterm for application programs - -cd $ndb_demo/2-node/2-api-4 -xterm -T "Demo 2 NDB Cluster API Node 4" -geometry 80x10 & - -# Start xterm for application programs - -cd $ndb_demo/2-node/2-api-5 -xterm -T "Demo 2 NDB Cluster API Node 5" -geometry 80x10 & - -# Start management client - -cd $ndb_demo -xterm -T "Demo 2 NDB Management Client" -geometry 80x10 -xrm *.hold:true -e mgmtclient localhost 10000 & diff --git a/storage/ndb/docs/Makefile.am b/storage/ndb/docs/Makefile.am deleted file mode 100644 index f0ecae1fe66..00000000000 --- a/storage/ndb/docs/Makefile.am +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -DOXYDIR = doxygen -noinst_HEADERS = $(DOXYDIR)/predoxy.pl $(DOXYDIR)/postdoxy.pl $(DOXYDIR)/Doxyfile.ndbapi $(DOXYDIR)/Doxyfile.mgmapi - -all-local: do-check-html ndbapidoc-html mgmapidoc-html -all-pdf: do-check-pdf ndbapidoc-pdf mgmapidoc-pdf - -DOXYTMP = .doxytmp -DOXYOUT = .doxyout - -NDB_RELEASE = @NDB_VERSION_MAJOR@.@NDB_VERSION_MINOR@.@NDB_VERSION_BUILD@-@NDB_VERSION_STATUS@ - -clean-local: - rm -rf ndbapi.pdf ndbapi.html mgmapi.pdf mgmapi.html - rm -rf $(DOXYTMP) $(DOXYOUT) - -do-check-html: - @set -x; \ - if test @PERL@ = no ; then \ - echo "Perl needed to make docs"; \ - exit 1; \ - fi; \ - if test @DOXYGEN@ = no ; then \ - echo "Doxygen needed to make docs"; \ - exit 1; \ - fi; - -do-check-pdf: do-check-html - if test @PDFLATEX@ = no ; then \ - echo "Pdflatex needed to make docs"; \ - exit 1; \ - fi; \ - if test @MAKEINDEX@ = no ; then \ - echo "Makeindex needed to make docs"; \ - exit 1; \ - fi; - -### -# -# NDB API Programmer's Guide -# -ndbapidoc-html: ndbapi.html -ndbapidoc-pdf: ndbapi.pdf - -ndbapi.html: $(noinst_HEADERS) - @set -x; \ - export NDB_RELEASE=$(NDB_RELEASE); \ - @RM@ -f ndbapi.pdf ndbapi.html; \ - @RM@ -rf $(DOXYTMP) $(DOXYOUT); \ - mkdir -p $(DOXYTMP) $(DOXYOUT); \ - @CP@ $(top_srcdir)/storage/ndb/include/ndbapi/* $(DOXYTMP); \ - @CP@ $(top_srcdir)/storage/ndb/ndbapi-examples/*/*.[ch]pp $(DOXYTMP); \ - @PERL@ $(DOXYDIR)/predoxy.pl; \ - mv footer.html $(DOXYTMP); \ - (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.ndbapi); \ - @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/ndbapi.latex "MySQL Cluster NDB API Programmer Guide"; \ - (cd $(DOXYOUT) && \ - find ndbapi.html -print | cpio -pdm ..); - -ndbapi.pdf: ndbapi.html - (cd $(DOXYOUT)/ndbapi.latex && \ - @PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \ - cp -p refman.pdf ../../ndbapi.pdf); - -### -# -# MGM API Guide -# -mgmapidoc-html: mgmapi.html -mgmapidoc-pdf: mgmapi.pdf - -mgmapi.html: $(noinst_HEADERS) - @set -x; \ - export NDB_RELEASE=$(NDB_RELEASE); \ - @RM@ -f mgmapi.pdf mgmapi.html; \ - @RM@ -rf $(DOXYTMP) $(DOXYOUT); \ - mkdir -p $(DOXYTMP) $(DOXYOUT); \ - @CP@ $(top_srcdir)/storage/ndb/include/mgmapi/* $(DOXYTMP); \ - @PERL@ $(DOXYDIR)/predoxy.pl; \ - mv footer.html $(DOXYTMP); \ - (cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.mgmapi); \ - @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/mgmapi.latex "MySQL Cluster MGM API Guide"; \ - (cd $(DOXYOUT) && \ - find mgmapi.html -print | cpio -pdm ..); - -mgmapi.pdf: mgmapi.html - (cd $(DOXYOUT)/mgmapi.latex && \ - @PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \ - cp -p refman.pdf ../../mgmapi.pdf); - -### -# -# Complete Source Browser except for -# ndbapi odbc test tools win32 lib examples docs CVS config bin -# include/ndbapi -# include/newtonapi src/newtonapi -# include/mgmapi src/mgmapi -# src/client -ndbdoc: DUMMY - mkdir -p $(OUTDIR) - cd $(top_srcdir)/storage/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.ndb - -### -# -# odbcdoc - Complete Source Browser for NDB ODBC (src/client/odbc) - -odbcdoc: DUMMY - mkdir -p $(OUTDIR) - cd $(top_srcdir)/storage/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.odbc - -testdoc: DUMMY - mkdir -p $(OUTDIR) - cd $(top_srcdir)/storage/ndb ; $(DOXYGEN) $(DOXYDIR)/Doxyfile.test - -windoze-dsp: diff --git a/storage/ndb/docs/README b/storage/ndb/docs/README deleted file mode 100644 index 262e9003aca..00000000000 --- a/storage/ndb/docs/README +++ /dev/null @@ -1,30 +0,0 @@ -Create MySQL Cluster user documentation from source code --------------------------------------------------------- -(All these require Doxygen.) - -* make clean - Remove all generated documentation and tmp files - -* make ndbapidoc - Makes the NDB API Programmer's Guide (in HTML) - -* make ndbapipdf - Makes the NDB API Programmer Guide (in PDF) - -* make mgmapidoc - Makes the MGM API Reference Manual (in HTML) - -* make mgmapipdf - Makes the MGM API Reference Manual (in PDF) - -* make ndbdoc - Makes source code browser for NDB Cluster (in HTML) - (Requires Graphviz.) - -Doxygen and Graphviz can be found at: - http://www.doxygen.org -or at (for Red Hat 9.0 RPMs): - http://dentrassi.de/download/doxygen/ - --- -lars@mysql.com diff --git a/storage/ndb/docs/doxygen/Doxyfile.mgmapi b/storage/ndb/docs/doxygen/Doxyfile.mgmapi deleted file mode 100644 index 48538735069..00000000000 --- a/storage/ndb/docs/doxygen/Doxyfile.mgmapi +++ /dev/null @@ -1,894 +0,0 @@ -# Copyright (C) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# Doxyfile 1.2.12 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# General configuration options -#--------------------------------------------------------------------------- -DETAILS_AT_TOP = yes -HIDE_FRIEND_COMPOUNDS = yes - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French, -# German, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, -# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish. - -OUTPUT_LANGUAGE = English - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these class will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = NO - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. It is allowed to use relative paths in the argument list. - -STRIP_FROM_PATH = - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower case letters. If set to YES upper case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# users are adviced to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like the Qt-style comments (thus requiring an -# explict @brief command for a brief description. - -JAVADOC_AUTOBRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# reimplements. - -INHERIT_DOCS = YES - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 2 - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consist of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. -# For instance some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. - -WARN_FORMAT = - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = . - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank file matching one of the following patterns are included: -# *.c *.cc *.cxx *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp -# *.h++ *.idl - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = NO - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. - -EXCLUDE_PATTERNS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = . - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. - -INPUT_FILTER = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse. - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# If the REFERENCED_BY_RELATION tag is set to YES (the default) -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES (the default) -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = NO - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = ../.doxyout/mgmapi.html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = footer.html - -# The HTML_STYLESHEET tag can be used to specify a user defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet - -HTML_STYLESHEET = - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = NO - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the Html help documentation and to the tree view. - -TOC_EXPAND = NO - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = YES - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be -# generated containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+, -# or Internet explorer 4.0+). Note that for large projects the tree generation -# can take a very long time. In such cases it is better to disable this feature. -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = YES - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = ../.doxyout/mgmapi.latex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimised for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = ../mgmapi.rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assigments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_XML = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_PREDEFINED tags. - -EXPAND_ONLY_PREDEF = YES - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. - -PREDEFINED = DOXYGEN_FIX \ - DOXYGEN_SHOULD_SKIP_DEPRECATED \ - DOXYGEN_SHOULD_SKIP_INTERNAL \ - protected=private - -# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line and do not end with a semicolon. Such function macros are typically -# used for boiler-plate code, and will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES tag can be used to specify one or more tagfiles. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or -# super classes. Setting the tag to NO turns the diagrams off. Note that this -# option is superceded by the HAVE_DOT option below. This is only a fallback. It is -# recommended to install and use dot, since it yield more powerful graphs. - -CLASS_DIAGRAMS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = NO - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = YES - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found on the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_WIDTH = 1024 - -# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_HEIGHT = 1024 - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermedate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = NO - -# The CGI_NAME tag should be the name of the CGI script that -# starts the search engine (doxysearch) with the correct parameters. -# A script with this name will be generated by doxygen. - -CGI_NAME = - -# The CGI_URL tag should be the absolute URL to the directory where the -# cgi binaries are located. See the documentation of your http daemon for -# details. - -CGI_URL = - -# The DOC_URL tag should be the absolute URL to the directory where the -# documentation is located. If left blank the absolute path to the -# documentation, with file:// prepended to it, will be used. - -DOC_URL = - -# The DOC_ABSPATH tag should be the absolute path to the directory where the -# documentation is located. If left blank the directory on the local machine -# will be used. - -DOC_ABSPATH = - -# The BIN_ABSPATH tag must point to the directory where the doxysearch binary -# is installed. - -BIN_ABSPATH = - -# The EXT_DOC_PATHS tag can be used to specify one or more paths to -# documentation generated for other projects. This allows doxysearch to search -# the documentation for these projects as well. - -EXT_DOC_PATHS = diff --git a/storage/ndb/docs/doxygen/Doxyfile.ndb b/storage/ndb/docs/doxygen/Doxyfile.ndb deleted file mode 100644 index fe21454bb2a..00000000000 --- a/storage/ndb/docs/doxygen/Doxyfile.ndb +++ /dev/null @@ -1,955 +0,0 @@ -# Copyright (C) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# Doxyfile 1.2.14 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# General configuration options -#--------------------------------------------------------------------------- - -DETAILS_AT_TOP = YES - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = "NDB Cluster" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French, -# German, Greek, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, -# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish. - -OUTPUT_LANGUAGE = English - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these class will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited -# members of a class in the documentation of that class as if those members were -# ordinary class members. Constructors, destructors and assignment operators of -# the base classes will not be shown. - -INLINE_INHERITED_MEMB = YES - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. It is allowed to use relative paths in the argument list. - -STRIP_FROM_PATH = . - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = YES - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower case letters. If set to YES upper case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# users are adviced to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = NO - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like the Qt-style comments (thus requiring an -# explict @brief command for a brief description. - -JAVADOC_AUTOBRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# reimplements. - -INHERIT_DOCS = YES - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 2 - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consist of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. -# For instance some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp -# *.h++ *.idl *.odl - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = test \ - tools \ - win32 \ - lib \ - examples \ - docs \ - CVS \ - SCCS \ - config \ - bin \ - include/ndbapi \ - include/newtonapi \ - src/newtonapi \ - include/mgmapi \ - src/mgmapi \ - src/client - -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories -# that are symbolic links (a Unix filesystem feature) are excluded from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. - -EXCLUDE_PATTERNS = *CVS* \ - *SCCS* - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. - -INPUT_FILTER = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse. - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = YES - -# If the REFERENCED_BY_RELATION tag is set to YES (the default) -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES (the default) -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = ndb.html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet - -HTML_STYLESHEET = - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = NO - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the Html help documentation and to the tree view. - -TOC_EXPAND = NO - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = YES - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be -# generated containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+, -# or Internet explorer 4.0+). Note that for large projects the tree generation -# can take a very long time. In such cases it is better to disable this feature. -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = ndb.latex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = NO - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = NO - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimised for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assigments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_XML = NO - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = NO - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_PREDEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line and do not end with a semicolon. Such function macros are typically -# used for boiler-plate code, and will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES tag can be used to specify one or more tagfiles. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or -# super classes. Setting the tag to NO turns the diagrams off. Note that this -# option is superceded by the HAVE_DOT option below. This is only a fallback. It is -# recommended to install and use dot, since it yield more powerful graphs. - -CLASS_DIAGRAMS = NO - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = NO - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = NO - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = NO - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are gif, jpg, and png -# If left blank gif will be used. - -DOT_IMAGE_FORMAT = gif - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found on the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_WIDTH = 1024 - -# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_HEIGHT = 1024 - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermedate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = NO - -# The CGI_NAME tag should be the name of the CGI script that -# starts the search engine (doxysearch) with the correct parameters. -# A script with this name will be generated by doxygen. - -CGI_NAME = search.cgi - -# The CGI_URL tag should be the absolute URL to the directory where the -# cgi binaries are located. See the documentation of your http daemon for -# details. - -CGI_URL = - -# The DOC_URL tag should be the absolute URL to the directory where the -# documentation is located. If left blank the absolute path to the -# documentation, with file:// prepended to it, will be used. - -DOC_URL = - -# The DOC_ABSPATH tag should be the absolute path to the directory where the -# documentation is located. If left blank the directory on the local machine -# will be used. - -DOC_ABSPATH = - -# The BIN_ABSPATH tag must point to the directory where the doxysearch binary -# is installed. - -BIN_ABSPATH = /usr/local/bin - -# The EXT_DOC_PATHS tag can be used to specify one or more paths to -# documentation generated for other projects. This allows doxysearch to search -# the documentation for these projects as well. - -EXT_DOC_PATHS = diff --git a/storage/ndb/docs/doxygen/Doxyfile.ndbapi b/storage/ndb/docs/doxygen/Doxyfile.ndbapi deleted file mode 100644 index 5316cb98cdc..00000000000 --- a/storage/ndb/docs/doxygen/Doxyfile.ndbapi +++ /dev/null @@ -1,893 +0,0 @@ -# Copyright (C) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# Doxyfile 1.2.12 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# General configuration options -#--------------------------------------------------------------------------- -DETAILS_AT_TOP = YES -HIDE_FRIEND_COMPOUNDS = YES - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French, -# German, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, -# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish. - -OUTPUT_LANGUAGE = English - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = NO - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these class will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = NO - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. It is allowed to use relative paths in the argument list. - -STRIP_FROM_PATH = - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = NO - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower case letters. If set to YES upper case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# users are adviced to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like the Qt-style comments (thus requiring an -# explict @brief command for a brief description. - -JAVADOC_AUTOBRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# reimplements. - -INHERIT_DOCS = YES - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 2 - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consist of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. -# For instance some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. - -WARN_FORMAT = - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = . - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank file matching one of the following patterns are included: -# *.c *.cc *.cxx *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp -# *.h++ *.idl - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = NO - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. - -EXCLUDE_PATTERNS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = . - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. - -INPUT_FILTER = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse. - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. - -SOURCE_BROWSER = NO - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = NO - -# If the REFERENCED_BY_RELATION tag is set to YES (the default) -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES (the default) -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = NO - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = ../.doxyout/ndbapi.html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = footer.html - -# The HTML_STYLESHEET tag can be used to specify a user defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet - -HTML_STYLESHEET = - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = NO - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the Html help documentation and to the tree view. - -TOC_EXPAND = NO - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = YES - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be -# generated containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+, -# or Internet explorer 4.0+). Note that for large projects the tree generation -# can take a very long time. In such cases it is better to disable this feature. -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = YES - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = ../.doxyout/ndbapi.latex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = YES - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = YES - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimised for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = ../ndbapi.rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assigments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_XML = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = YES - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_PREDEFINED tags. - -EXPAND_ONLY_PREDEF = YES - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. - -PREDEFINED = DOXYGEN_SHOULD_SKIP_DEPRECATED \ - DOXYGEN_SHOULD_SKIP_INTERNAL \ - protected=private - -# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line and do not end with a semicolon. Such function macros are typically -# used for boiler-plate code, and will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES tag can be used to specify one or more tagfiles. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or -# super classes. Setting the tag to NO turns the diagrams off. Note that this -# option is superceded by the HAVE_DOT option below. This is only a fallback. It is -# recommended to install and use dot, since it yield more powerful graphs. - -CLASS_DIAGRAMS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = NO - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = YES - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found on the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_WIDTH = 1024 - -# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_HEIGHT = 1024 - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermedate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = NO - -# The CGI_NAME tag should be the name of the CGI script that -# starts the search engine (doxysearch) with the correct parameters. -# A script with this name will be generated by doxygen. - -CGI_NAME = - -# The CGI_URL tag should be the absolute URL to the directory where the -# cgi binaries are located. See the documentation of your http daemon for -# details. - -CGI_URL = - -# The DOC_URL tag should be the absolute URL to the directory where the -# documentation is located. If left blank the absolute path to the -# documentation, with file:// prepended to it, will be used. - -DOC_URL = - -# The DOC_ABSPATH tag should be the absolute path to the directory where the -# documentation is located. If left blank the directory on the local machine -# will be used. - -DOC_ABSPATH = - -# The BIN_ABSPATH tag must point to the directory where the doxysearch binary -# is installed. - -BIN_ABSPATH = - -# The EXT_DOC_PATHS tag can be used to specify one or more paths to -# documentation generated for other projects. This allows doxysearch to search -# the documentation for these projects as well. - -EXT_DOC_PATHS = diff --git a/storage/ndb/docs/doxygen/Doxyfile.odbc b/storage/ndb/docs/doxygen/Doxyfile.odbc deleted file mode 100644 index b3a8f9aa899..00000000000 --- a/storage/ndb/docs/doxygen/Doxyfile.odbc +++ /dev/null @@ -1,939 +0,0 @@ -# Copyright (C) 2004, 2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# Doxyfile 1.2.14 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# General configuration options -#--------------------------------------------------------------------------- - -DETAILS_AT_TOP = YES - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = "NDB ODBC" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French, -# German, Greek, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, -# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish. - -OUTPUT_LANGUAGE = English - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these class will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited -# members of a class in the documentation of that class as if those members were -# ordinary class members. Constructors, destructors and assignment operators of -# the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. It is allowed to use relative paths in the argument list. - -STRIP_FROM_PATH = . - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = YES - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower case letters. If set to YES upper case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# users are adviced to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like the Qt-style comments (thus requiring an -# explict @brief command for a brief description. - -JAVADOC_AUTOBRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# reimplements. - -INHERIT_DOCS = YES - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 2 - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consist of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. -# For instance some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = src/client/odbc - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp -# *.h++ *.idl *.odl - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories -# that are symbolic links (a Unix filesystem feature) are excluded from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. - -EXCLUDE_PATTERNS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. - -INPUT_FILTER = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse. - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = YES - -# If the REFERENCED_BY_RELATION tag is set to YES (the default) -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES (the default) -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = ../.doxyout/odbc.html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet - -HTML_STYLESHEET = - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = NO - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the Html help documentation and to the tree view. - -TOC_EXPAND = NO - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = YES - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be -# generated containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+, -# or Internet explorer 4.0+). Note that for large projects the tree generation -# can take a very long time. In such cases it is better to disable this feature. -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = ../.doxyout/odbc.latex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = NO - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = NO - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimised for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assigments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_XML = NO - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = NO - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_PREDEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line and do not end with a semicolon. Such function macros are typically -# used for boiler-plate code, and will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES tag can be used to specify one or more tagfiles. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or -# super classes. Setting the tag to NO turns the diagrams off. Note that this -# option is superceded by the HAVE_DOT option below. This is only a fallback. It is -# recommended to install and use dot, since it yield more powerful graphs. - -CLASS_DIAGRAMS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = YES - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = YES - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are gif, jpg, and png -# If left blank gif will be used. - -DOT_IMAGE_FORMAT = gif - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found on the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_WIDTH = 1024 - -# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_HEIGHT = 1024 - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermedate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = YES - -# The CGI_NAME tag should be the name of the CGI script that -# starts the search engine (doxysearch) with the correct parameters. -# A script with this name will be generated by doxygen. - -CGI_NAME = search.cgi - -# The CGI_URL tag should be the absolute URL to the directory where the -# cgi binaries are located. See the documentation of your http daemon for -# details. - -CGI_URL = - -# The DOC_URL tag should be the absolute URL to the directory where the -# documentation is located. If left blank the absolute path to the -# documentation, with file:// prepended to it, will be used. - -DOC_URL = - -# The DOC_ABSPATH tag should be the absolute path to the directory where the -# documentation is located. If left blank the directory on the local machine -# will be used. - -DOC_ABSPATH = - -# The BIN_ABSPATH tag must point to the directory where the doxysearch binary -# is installed. - -BIN_ABSPATH = /usr/local/bin/ - -# The EXT_DOC_PATHS tag can be used to specify one or more paths to -# documentation generated for other projects. This allows doxysearch to search -# the documentation for these projects as well. - -EXT_DOC_PATHS = diff --git a/storage/ndb/docs/doxygen/Doxyfile.test b/storage/ndb/docs/doxygen/Doxyfile.test deleted file mode 100644 index 801c82cf380..00000000000 --- a/storage/ndb/docs/doxygen/Doxyfile.test +++ /dev/null @@ -1,923 +0,0 @@ -# Doxyfile 1.2.14 - -# This file describes the settings to be used by the documentation system -# doxygen (www.doxygen.org) for a project -# -# All text after a hash (#) is considered a comment and will be ignored -# The format is: -# TAG = value [value, ...] -# For lists items can also be appended using: -# TAG += value [value, ...] -# Values that contain spaces should be placed between quotes (" ") - -#--------------------------------------------------------------------------- -# General configuration options -#--------------------------------------------------------------------------- - -DETAILS_AT_TOP = YES - -# The PROJECT_NAME tag is a single word (or a sequence of words surrounded -# by quotes) that should identify the project. - -PROJECT_NAME = "NDB Cluster Test Programs" - -# The PROJECT_NUMBER tag can be used to enter a project or revision number. -# This could be handy for archiving the generated documentation or -# if some version control system is used. - -PROJECT_NUMBER = - -# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) -# base path where the generated documentation will be put. -# If a relative path is entered, it will be relative to the location -# where doxygen was started. If left blank the current directory will be used. - -OUTPUT_DIRECTORY = - -# The OUTPUT_LANGUAGE tag is used to specify the language in which all -# documentation generated by doxygen is written. Doxygen will use this -# information to generate all constant output in the proper language. -# The default language is English, other supported languages are: -# Brazilian, Chinese, Croatian, Czech, Danish, Dutch, Finnish, French, -# German, Greek, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, -# Portuguese, Romanian, Russian, Slovak, Slovene, Spanish and Swedish. - -OUTPUT_LANGUAGE = English - -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in -# documentation are documented, even if no documentation was available. -# Private class members and static file members will be hidden unless -# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES - -EXTRACT_ALL = YES - -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class -# will be included in the documentation. - -EXTRACT_PRIVATE = NO - -# If the EXTRACT_STATIC tag is set to YES all static members of a file -# will be included in the documentation. - -EXTRACT_STATIC = YES - -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) -# defined locally in source files will be included in the documentation. -# If set to NO only classes defined in header files are included. - -EXTRACT_LOCAL_CLASSES = YES - -# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all -# undocumented members of documented classes, files or namespaces. -# If set to NO (the default) these members will be included in the -# various overviews, but no documentation section is generated. -# This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_MEMBERS = NO - -# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all -# undocumented classes that are normally visible in the class hierarchy. -# If set to NO (the default) these class will be included in the various -# overviews. This option has no effect if EXTRACT_ALL is enabled. - -HIDE_UNDOC_CLASSES = NO - -# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will -# include brief member descriptions after the members that are listed in -# the file and class documentation (similar to JavaDoc). -# Set to NO to disable this. - -BRIEF_MEMBER_DESC = YES - -# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend -# the brief description of a member or function before the detailed description. -# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the -# brief descriptions will be completely suppressed. - -REPEAT_BRIEF = YES - -# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then -# Doxygen will generate a detailed section even if there is only a brief -# description. - -ALWAYS_DETAILED_SEC = NO - -# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited -# members of a class in the documentation of that class as if those members were -# ordinary class members. Constructors, destructors and assignment operators of -# the base classes will not be shown. - -INLINE_INHERITED_MEMB = NO - -# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full -# path before files name in the file list and in the header files. If set -# to NO the shortest path that makes the file name unique will be used. - -FULL_PATH_NAMES = YES - -# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag -# can be used to strip a user defined part of the path. Stripping is -# only done if one of the specified strings matches the left-hand part of -# the path. It is allowed to use relative paths in the argument list. - -STRIP_FROM_PATH = . - -# The INTERNAL_DOCS tag determines if documentation -# that is typed after a \internal command is included. If the tag is set -# to NO (the default) then the documentation will be excluded. -# Set it to YES to include the internal documentation. - -INTERNAL_DOCS = YES - -# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct -# doxygen to hide any special comment blocks from generated source code -# fragments. Normal C and C++ comments will always remain visible. - -STRIP_CODE_COMMENTS = YES - -# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate -# file names in lower case letters. If set to YES upper case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# users are adviced to set this option to NO. - -CASE_SENSE_NAMES = YES - -# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter -# (but less readable) file names. This can be useful is your file systems -# doesn't support long names like on DOS, Mac, or CD-ROM. - -SHORT_NAMES = NO - -# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen -# will show members with their full class and namespace scopes in the -# documentation. If set to YES the scope will be hidden. - -HIDE_SCOPE_NAMES = NO - -# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen -# will generate a verbatim copy of the header file for each class for -# which an include is specified. Set to NO to disable this. - -VERBATIM_HEADERS = NO - -# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen -# will put list of the files that are included by a file in the documentation -# of that file. - -SHOW_INCLUDE_FILES = YES - -# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen -# will interpret the first line (until the first dot) of a JavaDoc-style -# comment as the brief description. If set to NO, the JavaDoc -# comments will behave just like the Qt-style comments (thus requiring an -# explict @brief command for a brief description. - -JAVADOC_AUTOBRIEF = NO - -# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented -# member inherits the documentation from any documented member that it -# reimplements. - -INHERIT_DOCS = YES - -# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] -# is inserted in the documentation for inline members. - -INLINE_INFO = YES - -# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen -# will sort the (detailed) documentation of file and class members -# alphabetically by member name. If set to NO the members will appear in -# declaration order. - -SORT_MEMBER_DOCS = YES - -# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first -# member in the group (if any) for the other members of the group. By default -# all members of a group must be documented explicitly. - -DISTRIBUTE_GROUP_DOC = NO - -# The TAB_SIZE tag can be used to set the number of spaces in a tab. -# Doxygen uses this value to replace tabs by spaces in code fragments. - -TAB_SIZE = 2 - -# The GENERATE_TODOLIST tag can be used to enable (YES) or -# disable (NO) the todo list. This list is created by putting \todo -# commands in the documentation. - -GENERATE_TODOLIST = YES - -# The GENERATE_TESTLIST tag can be used to enable (YES) or -# disable (NO) the test list. This list is created by putting \test -# commands in the documentation. - -GENERATE_TESTLIST = YES - -# The GENERATE_BUGLIST tag can be used to enable (YES) or -# disable (NO) the bug list. This list is created by putting \bug -# commands in the documentation. - -GENERATE_BUGLIST = YES - -# This tag can be used to specify a number of aliases that acts -# as commands in the documentation. An alias has the form "name=value". -# For example adding "sideeffect=\par Side Effects:\n" will allow you to -# put the command \sideeffect (or @sideeffect) in the documentation, which -# will result in a user defined paragraph with heading "Side Effects:". -# You can put \n's in the value part of an alias to insert newlines. - -ALIASES = - -# The ENABLED_SECTIONS tag can be used to enable conditional -# documentation sections, marked by \if sectionname ... \endif. - -ENABLED_SECTIONS = - -# The MAX_INITIALIZER_LINES tag determines the maximum number of lines -# the initial value of a variable or define consist of for it to appear in -# the documentation. If the initializer consists of more lines than specified -# here it will be hidden. Use a value of 0 to hide initializers completely. -# The appearance of the initializer of individual variables and defines in the -# documentation can be controlled using \showinitializer or \hideinitializer -# command in the documentation regardless of this setting. - -MAX_INITIALIZER_LINES = 30 - -# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources -# only. Doxygen will then generate output that is more tailored for C. -# For instance some of the names that are used will be different. The list -# of all members will be omitted, etc. - -OPTIMIZE_OUTPUT_FOR_C = NO - -# Set the SHOW_USED_FILES tag to NO to disable the list of files generated -# at the bottom of the documentation of classes and structs. If set to YES the -# list will mention the files that were used to generate the documentation. - -SHOW_USED_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- - -# The QUIET tag can be used to turn on/off the messages that are generated -# by doxygen. Possible values are YES and NO. If left blank NO is used. - -QUIET = NO - -# The WARNINGS tag can be used to turn on/off the warning messages that are -# generated by doxygen. Possible values are YES and NO. If left blank -# NO is used. - -WARNINGS = YES - -# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings -# for undocumented members. If EXTRACT_ALL is set to YES then this flag will -# automatically be disabled. - -WARN_IF_UNDOCUMENTED = YES - -# The WARN_FORMAT tag determines the format of the warning messages that -# doxygen can produce. The string should contain the $file, $line, and $text -# tags, which will be replaced by the file and line number from which the -# warning originated and the warning text. - -WARN_FORMAT = "$file:$line: $text" - -# The WARN_LOGFILE tag can be used to specify a file to which warning -# and error messages should be written. If left blank the output is written -# to stderr. - -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- - -# The INPUT tag can be used to specify the files and/or directories that contain -# documented source files. You may enter file names like "myfile.cpp" or -# directories like "/usr/src/myproject". Separate the files or directories -# with spaces. - -INPUT = test - -# If the value of the INPUT tag contains directories, you can use the -# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank the following patterns are tested: -# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp -# *.h++ *.idl *.odl - -FILE_PATTERNS = - -# The RECURSIVE tag can be used to turn specify whether or not subdirectories -# should be searched for input files as well. Possible values are YES and NO. -# If left blank NO is used. - -RECURSIVE = YES - -# The EXCLUDE tag can be used to specify files and/or directories that should -# excluded from the INPUT source files. This way you can easily exclude a -# subdirectory from a directory tree whose root is specified with the INPUT tag. - -EXCLUDE = - -# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories -# that are symbolic links (a Unix filesystem feature) are excluded from the input. - -EXCLUDE_SYMLINKS = NO - -# If the value of the INPUT tag contains directories, you can use the -# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude -# certain files from those directories. - -EXCLUDE_PATTERNS = - -# The EXAMPLE_PATH tag can be used to specify one or more files or -# directories that contain example code fragments that are included (see -# the \include command). - -EXAMPLE_PATH = - -# If the value of the EXAMPLE_PATH tag contains directories, you can use the -# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp -# and *.h) to filter out the source-files in the directories. If left -# blank all files are included. - -EXAMPLE_PATTERNS = - -# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be -# searched for input files to be used with the \include or \dontinclude -# commands irrespective of the value of the RECURSIVE tag. -# Possible values are YES and NO. If left blank NO is used. - -EXAMPLE_RECURSIVE = NO - -# The IMAGE_PATH tag can be used to specify one or more files or -# directories that contain image that are included in the documentation (see -# the \image command). - -IMAGE_PATH = - -# The INPUT_FILTER tag can be used to specify a program that doxygen should -# invoke to filter for each input file. Doxygen will invoke the filter program -# by executing (via popen()) the command , where -# is the value of the INPUT_FILTER tag, and is the name of an -# input file. Doxygen will then use the output that the filter program writes -# to standard output. - -INPUT_FILTER = - -# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER) will be used to filter the input files when producing source -# files to browse. - -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- - -# If the SOURCE_BROWSER tag is set to YES then a list of source files will -# be generated. Documented entities will be cross-referenced with these sources. - -SOURCE_BROWSER = YES - -# Setting the INLINE_SOURCES tag to YES will include the body -# of functions and classes directly in the documentation. - -INLINE_SOURCES = YES - -# If the REFERENCED_BY_RELATION tag is set to YES (the default) -# then for each documented function all documented -# functions referencing it will be listed. - -REFERENCED_BY_RELATION = YES - -# If the REFERENCES_RELATION tag is set to YES (the default) -# then for each documented function all documented entities -# called/used by that function will be listed. - -REFERENCES_RELATION = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- - -# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index -# of all compounds will be generated. Enable this if the project -# contains a lot of classes, structs, unions or interfaces. - -ALPHABETICAL_INDEX = YES - -# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then -# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns -# in which this list will be split (can be a number in the range [1..20]) - -COLS_IN_ALPHA_INDEX = 5 - -# In case all classes in a project start with a common prefix, all -# classes will be put under the same header in the alphabetical index. -# The IGNORE_PREFIX tag can be used to specify one or more prefixes that -# should be ignored while generating the index headers. - -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- - -# If the GENERATE_HTML tag is set to YES (the default) Doxygen will -# generate HTML output. - -GENERATE_HTML = YES - -# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `html' will be used as the default path. - -HTML_OUTPUT = html - -# The HTML_FILE_EXTENSION tag can be used to specify the file extension for -# each generated HTML page (for example: .htm,.php,.asp). If it is left blank -# doxygen will generate files with .html extension. - -HTML_FILE_EXTENSION = .html - -# The HTML_HEADER tag can be used to specify a personal HTML header for -# each generated HTML page. If it is left blank doxygen will generate a -# standard header. - -HTML_HEADER = - -# The HTML_FOOTER tag can be used to specify a personal HTML footer for -# each generated HTML page. If it is left blank doxygen will generate a -# standard footer. - -HTML_FOOTER = - -# The HTML_STYLESHEET tag can be used to specify a user defined cascading -# style sheet that is used by each HTML page. It can be used to -# fine-tune the look of the HTML output. If the tag is left blank doxygen -# will generate a default style sheet - -HTML_STYLESHEET = - -# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, -# files or namespaces will be aligned in HTML using tables. If set to -# NO a bullet list will be used. - -HTML_ALIGN_MEMBERS = NO - -# If the GENERATE_HTMLHELP tag is set to YES, additional index files -# will be generated that can be used as input for tools like the -# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) -# of the generated HTML documentation. - -GENERATE_HTMLHELP = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag -# controls if a separate .chi index file is generated (YES) or that -# it should be included in the master .chm file (NO). - -GENERATE_CHI = NO - -# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag -# controls whether a binary table of contents is generated (YES) or a -# normal table of contents (NO) in the .chm file. - -BINARY_TOC = NO - -# The TOC_EXPAND flag can be set to YES to add extra items for group members -# to the contents of the Html help documentation and to the tree view. - -TOC_EXPAND = NO - -# The DISABLE_INDEX tag can be used to turn on/off the condensed index at -# top of each HTML page. The value NO (the default) enables the index and -# the value YES disables it. - -DISABLE_INDEX = YES - -# This tag can be used to set the number of enum values (range [1..20]) -# that doxygen will group on one line in the generated HTML documentation. - -ENUM_VALUES_PER_LINE = 4 - -# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be -# generated containing a tree-like index structure (just like the one that -# is generated for HTML Help). For this to work a browser that supports -# JavaScript and frames is required (for instance Mozilla, Netscape 4.0+, -# or Internet explorer 4.0+). Note that for large projects the tree generation -# can take a very long time. In such cases it is better to disable this feature. -# Windows users are probably better off using the HTML help feature. - -GENERATE_TREEVIEW = NO - -# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be -# used to set the initial width (in pixels) of the frame in which the tree -# is shown. - -TREEVIEW_WIDTH = 250 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- - -# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will -# generate Latex output. - -GENERATE_LATEX = NO - -# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `latex' will be used as the default path. - -LATEX_OUTPUT = latex - -# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact -# LaTeX documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_LATEX = NO - -# The PAPER_TYPE tag can be used to set the paper type that is used -# by the printer. Possible values are: a4, a4wide, letter, legal and -# executive. If left blank a4wide will be used. - -PAPER_TYPE = a4wide - -# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX -# packages that should be included in the LaTeX output. - -EXTRA_PACKAGES = - -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for -# the generated latex document. The header should contain everything until -# the first chapter. If it is left blank doxygen will generate a -# standard header. Notice: only use this tag if you know what you are doing! - -LATEX_HEADER = - -# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated -# is prepared for conversion to pdf (using ps2pdf). The pdf file will -# contain links (just like the HTML output) instead of page references -# This makes the output suitable for online browsing using a pdf viewer. - -PDF_HYPERLINKS = NO - -# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of -# plain latex in the generated Makefile. Set this option to YES to get a -# higher quality PDF documentation. - -USE_PDFLATEX = NO - -# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. -# command to the generated LaTeX files. This will instruct LaTeX to keep -# running if errors occur, instead of asking the user for help. -# This option is also used when generating formulas in HTML. - -LATEX_BATCHMODE = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- - -# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output -# The RTF output is optimised for Word 97 and may not look very pretty with -# other RTF readers or editors. - -GENERATE_RTF = NO - -# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `rtf' will be used as the default path. - -RTF_OUTPUT = rtf - -# If the COMPACT_RTF tag is set to YES Doxygen generates more compact -# RTF documents. This may be useful for small projects and may help to -# save some trees in general. - -COMPACT_RTF = NO - -# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated -# will contain hyperlink fields. The RTF file will -# contain links (just like the HTML output) instead of page references. -# This makes the output suitable for online browsing using WORD or other -# programs which support those fields. -# Note: wordpad (write) and others do not support links. - -RTF_HYPERLINKS = NO - -# Load stylesheet definitions from file. Syntax is similar to doxygen's -# config file, i.e. a series of assigments. You only have to provide -# replacements, missing definitions are set to their default value. - -RTF_STYLESHEET_FILE = - -# Set optional variables used in the generation of an rtf document. -# Syntax is similar to doxygen's config file. - -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- - -# If the GENERATE_MAN tag is set to YES (the default) Doxygen will -# generate man pages - -GENERATE_MAN = NO - -# The MAN_OUTPUT tag is used to specify where the man pages will be put. -# If a relative path is entered the value of OUTPUT_DIRECTORY will be -# put in front of it. If left blank `man' will be used as the default path. - -MAN_OUTPUT = man - -# The MAN_EXTENSION tag determines the extension that is added to -# the generated man pages (default is the subroutine's section .3) - -MAN_EXTENSION = .3 - -# If the MAN_LINKS tag is set to YES and Doxygen generates man output, -# then it will generate one additional man file for each entity -# documented in the real man page(s). These additional files -# only source the real man page, but without them the man command -# would be unable to find the correct page. The default is NO. - -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- - -# If the GENERATE_XML tag is set to YES Doxygen will -# generate an XML file that captures the structure of -# the code including all documentation. Note that this -# feature is still experimental and incomplete at the -# moment. - -GENERATE_XML = NO - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- - -# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will -# generate an AutoGen Definitions (see autogen.sf.net) file -# that captures the structure of the code including all -# documentation. Note that this feature is still experimental -# and incomplete at the moment. - -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- - -# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will -# evaluate all C-preprocessor directives found in the sources and include -# files. - -ENABLE_PREPROCESSING = YES - -# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro -# names in the source code. If set to NO (the default) only conditional -# compilation will be performed. Macro expansion can be done in a controlled -# way by setting EXPAND_ONLY_PREDEF to YES. - -MACRO_EXPANSION = NO - -# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES -# then the macro expansion is limited to the macros specified with the -# PREDEFINED and EXPAND_AS_PREDEFINED tags. - -EXPAND_ONLY_PREDEF = NO - -# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files -# in the INCLUDE_PATH (see below) will be search if a #include is found. - -SEARCH_INCLUDES = YES - -# The INCLUDE_PATH tag can be used to specify one or more directories that -# contain include files that are not input files but should be processed by -# the preprocessor. - -INCLUDE_PATH = - -# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard -# patterns (like *.h and *.hpp) to filter out the header-files in the -# directories. If left blank, the patterns specified with FILE_PATTERNS will -# be used. - -INCLUDE_FILE_PATTERNS = - -# The PREDEFINED tag can be used to specify one or more macro names that -# are defined before the preprocessor is started (similar to the -D option of -# gcc). The argument of the tag is a list of macros of the form: name -# or name=definition (no spaces). If the definition and the = are -# omitted =1 is assumed. - -PREDEFINED = - -# If the MACRO_EXPANSION and EXPAND_PREDEF_ONLY tags are set to YES then -# this tag can be used to specify a list of macro names that should be expanded. -# The macro definition that is found in the sources will be used. -# Use the PREDEFINED tag if you want to use a different macro definition. - -EXPAND_AS_DEFINED = - -# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then -# doxygen's preprocessor will remove all function-like macros that are alone -# on a line and do not end with a semicolon. Such function macros are typically -# used for boiler-plate code, and will confuse the parser if not removed. - -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to external references -#--------------------------------------------------------------------------- - -# The TAGFILES tag can be used to specify one or more tagfiles. - -TAGFILES = - -# When a file name is specified after GENERATE_TAGFILE, doxygen will create -# a tag file that is based on the input files it reads. - -GENERATE_TAGFILE = - -# If the ALLEXTERNALS tag is set to YES all external classes will be listed -# in the class index. If set to NO only the inherited external classes -# will be listed. - -ALLEXTERNALS = NO - -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed -# in the modules index. If set to NO, only the current project's groups will -# be listed. - -EXTERNAL_GROUPS = YES - -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of `which perl'). - -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- - -# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will -# generate a inheritance diagram (in Html, RTF and LaTeX) for classes with base or -# super classes. Setting the tag to NO turns the diagrams off. Note that this -# option is superceded by the HAVE_DOT option below. This is only a fallback. It is -# recommended to install and use dot, since it yield more powerful graphs. - -CLASS_DIAGRAMS = YES - -# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is -# available from the path. This tool is part of Graphviz, a graph visualization -# toolkit from AT&T and Lucent Bell Labs. The other options in this section -# have no effect if this option is set to NO (the default) - -HAVE_DOT = YES - -# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect inheritance relations. Setting this tag to YES will force the -# the CLASS_DIAGRAMS tag to NO. - -CLASS_GRAPH = YES - -# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen -# will generate a graph for each documented class showing the direct and -# indirect implementation dependencies (inheritance, containment, and -# class references variables) of the class with other documented classes. - -COLLABORATION_GRAPH = YES - -# If set to YES, the inheritance and collaboration graphs will show the -# relations between templates and their instances. - -TEMPLATE_RELATIONS = YES - -# If set to YES, the inheritance and collaboration graphs will hide -# inheritance and usage relations if the target is undocumented -# or is not a class. - -HIDE_UNDOC_RELATIONS = NO - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT -# tags are set to YES then doxygen will generate a graph for each documented -# file showing the direct and indirect include dependencies of the file with -# other documented files. - -INCLUDE_GRAPH = YES - -# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and -# HAVE_DOT tags are set to YES then doxygen will generate a graph for each -# documented header file showing the documented files that directly or -# indirectly include this file. - -INCLUDED_BY_GRAPH = YES - -# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen -# will graphical hierarchy of all classes instead of a textual one. - -GRAPHICAL_HIERARCHY = YES - -# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. Possible values are gif, jpg, and png -# If left blank gif will be used. - -DOT_IMAGE_FORMAT = gif - -# The tag DOT_PATH can be used to specify the path where the dot tool can be -# found. If left blank, it is assumed the dot tool can be found on the path. - -DOT_PATH = - -# The DOTFILE_DIRS tag can be used to specify one or more directories that -# contain dot files that are included in the documentation (see the -# \dotfile command). - -DOTFILE_DIRS = - -# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_WIDTH = 1024 - -# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height -# (in pixels) of the graphs generated by dot. If a graph becomes larger than -# this value, doxygen will try to truncate the graph, so that it fits within -# the specified constraint. Beware that most browsers cannot cope with very -# large images. - -MAX_DOT_GRAPH_HEIGHT = 1024 - -# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will -# generate a legend page explaining the meaning of the various boxes and -# arrows in the dot generated graphs. - -GENERATE_LEGEND = YES - -# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will -# remove the intermedate dot files that are used to generate -# the various graphs. - -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Configuration::addtions related to the search engine -#--------------------------------------------------------------------------- - -# The SEARCHENGINE tag specifies whether or not a search engine should be -# used. If set to NO the values of all tags below this one will be ignored. - -SEARCHENGINE = NO - -# The CGI_NAME tag should be the name of the CGI script that -# starts the search engine (doxysearch) with the correct parameters. -# A script with this name will be generated by doxygen. - -CGI_NAME = search.cgi - -# The CGI_URL tag should be the absolute URL to the directory where the -# cgi binaries are located. See the documentation of your http daemon for -# details. - -CGI_URL = - -# The DOC_URL tag should be the absolute URL to the directory where the -# documentation is located. If left blank the absolute path to the -# documentation, with file:// prepended to it, will be used. - -DOC_URL = - -# The DOC_ABSPATH tag should be the absolute path to the directory where the -# documentation is located. If left blank the directory on the local machine -# will be used. - -DOC_ABSPATH = - -# The BIN_ABSPATH tag must point to the directory where the doxysearch binary -# is installed. - -BIN_ABSPATH = /usr/local/bin - -# The EXT_DOC_PATHS tag can be used to specify one or more paths to -# documentation generated for other projects. This allows doxysearch to search -# the documentation for these projects as well. - -EXT_DOC_PATHS = diff --git a/storage/ndb/docs/doxygen/postdoxy.pl b/storage/ndb/docs/doxygen/postdoxy.pl deleted file mode 100755 index 64d80ef2102..00000000000 --- a/storage/ndb/docs/doxygen/postdoxy.pl +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# -# Written by Lars Thalmann, lars@mysql.com, 2003. -# - -use strict; -umask 000; - -# ----------------------------------------------------------------------------- -# Settings -# ----------------------------------------------------------------------------- - -$ENV{LD_LIBRARY_PATH} = "/usr/local/lib:/opt/as/local/lib"; -$ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} . ":/opt/as/forte6/SUNWspro/lib"; -$ENV{PATH} = $ENV{PATH} . ":/usr/local/bin:/opt/as/local/bin"; -$ENV{PATH} = $ENV{PATH} . ":/opt/as/local/teTeX/bin/sparc-sun-solaris2.8"; - -my $destdir = @ARGV[0]; -my $title = ""; # $ARGV[1]; - -my $release; -if (defined $ENV{'NDB_RELEASE'}) { - $release = $ENV{'NDB_RELEASE'}; - print "----------------------------------------------------------------\n"; - print "Relase = " . $release . "\n"; - print "----------------------------------------------------------------\n"; -} else { - print "----------------------------------------------------------------\n"; - print "NDB Documentation is being modified to statndard format\n"; - print "(If you want this automatic, use env variable NDB_RELEASE.)\n"; - print "Enter release (Examples: \"1.43.0 (alpha)\" or \"2.1.0 (gamma)\"): "; - $release = ; - print "----------------------------------------------------------------\n"; -} - -# ----------------------------------------------------------------------------- -# Change a little in refman.tex -# ----------------------------------------------------------------------------- - -open (INFILE, "< ${destdir}/refman.tex") - or die "Error opening ${destdir}/refman.tex.\n"; -open (OUTFILE, "> ${destdir}/refman.tex.new") - or die "Error opening ${destdir}/refman.tex.new.\n"; - -while () -{ - if (/(.*)(RELEASE)(.*)$/) { - print OUTFILE $1 . $release . $3; - } elsif (/(.*)(DATE)(.*)$/) { - print OUTFILE $1 . localtime() . $3; - } elsif (/\\chapter\{File Index\}/) { - # Erase - } elsif (/\\input\{files\}/) { - # Erase - } elsif (/\\chapter\{Hierarchical Index\}/) { - # Erase - } elsif (/\\input\{hierarchy\}/) { - # Erase - } elsif (/\\chapter\{Page Index\}/) { - # Erase - } elsif (/\\input\{pages\}/) { - # Erase - } else { - print OUTFILE; - } -} - -close INFILE; -close OUTFILE; - -system("mv ${destdir}/refman.tex.new ${destdir}/refman.tex"); - -# ----------------------------------------------------------------------------- -# Change a little in doxygen.sty -# ----------------------------------------------------------------------------- - -open (INFILE, "< ${destdir}/doxygen.sty") - or die "Error opening INFILE.\n"; -open (OUTFILE, "> ${destdir}/doxygen.sty.new") - or die "Error opening OUTFILE.\n"; - -while () -{ - if (/\\rfoot/) { - print OUTFILE "\\rfoot[\\fancyplain{}{\\bfseries\\small \\copyright~Copyright 2003-2008 MySQL AB, 2008 Sun Microsystems, Inc.\\hfill support-cluster\@mysql.com}]{}\n"; - } elsif (/\\lfoot/) { - print OUTFILE "\\lfoot[]{\\fancyplain{}{\\bfseries\\small support-cluster\@mysql.com\\hfill \\copyright~Copyright 2003-2008 MySQL AB, 2008 Sun Microsystems, Inc.}}\n"; - } else { - print OUTFILE; - } -} - -close INFILE; -close OUTFILE; - -system("mv ${destdir}/doxygen.sty.new ${destdir}/doxygen.sty"); - diff --git a/storage/ndb/docs/doxygen/predoxy.pl b/storage/ndb/docs/doxygen/predoxy.pl deleted file mode 100755 index b26dbc67ae1..00000000000 --- a/storage/ndb/docs/doxygen/predoxy.pl +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -# -# Written by Lars Thalmann, lars@mysql.com, 2003. -# - -use strict; -umask 000; - -# ----------------------------------------------------------------------------- -# Fix HTML Footer -# ----------------------------------------------------------------------------- - -open (OUTFILE, "> footer.html"); - -print OUTFILE< -
- -
-EOT -print OUTFILE "Documentation generated " . localtime() . - " from mysql source files."; -print OUTFILE< -© 2003-2004 -MySQL AB -
-
-
- - -EOT - -print "Preformat finished\n\n"; diff --git a/storage/ndb/docs/wl2077.txt b/storage/ndb/docs/wl2077.txt deleted file mode 100644 index f5b10bb702e..00000000000 --- a/storage/ndb/docs/wl2077.txt +++ /dev/null @@ -1,48 +0,0 @@ - -100' * (select 1 from T1 (1M rows) where key = rand()); -1 host, 1 ndbd, api co-hosted -results in 1000 rows / sec - - wo/reset bounds w/ rb -4.1-read committed a) 4.9 b) 7.4 -4.1-read hold lock c) 4.7 d) 6.7 - -wl2077-read committed 6.4 (+30%) 10.8 (+45%) -wl2077-read hold lock 4.6 (-1%) 6.7 (+ 0%) - -5.0-ndb batch read committed f) 50' (+680%) g) 50' (+360%) -5.0-ndb batch read hold lock h) 12' (+160%) i) 13' (+79%) - -shm-mem read committed (cmp. wl2077) a) 9.5' (+48%) b) 14' (+30%) - read hold lock c) 6.7' (+45%) d) 9.8' (+46%) - --- Comparision e) shm -serial pk: 10.9' 20' (+83%) -batched (1000): 59' 62' (+5%) -serial uniq index: 8.4' 14' (+66%) -batched (1000): 33' 36' (+9%) -index range (1000): 186' - ----- - -load) testScanPerf -c 1 -d 1 T1 -a) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 2 -q 0 T1 -b) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 2 -q 1 T1 -c) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 1 -r 2 -q 0 T1 -d) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 1 -r 2 -q 1 T1 -e) testReadPerf -i 25 -c 0 -d 0 T1 -f) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 3 -q 0 -m 1000 -i 10 T1 -g) testScanPerf -s 100000 -c 0 -d 0 -a 1 -l 0 -r 3 -q 1 -m 1000 -i 10 T1 - ---- music join 1db-co 2db-co - -4.1 13s 14s -4.1 wo/ blobs 1.7s 3.2s - -wl2077 12s 14s -wl2077 wo/ blobs 1.2s (-30%) 2.5s (-22%) - -pekka-blob-fix 1.3s - -shm 1.2s 2.0s -shm wo/ blobs 1.1s 2.0s diff --git a/storage/ndb/include/Makefile.am b/storage/ndb/include/Makefile.am deleted file mode 100644 index 06b7145572e..00000000000 --- a/storage/ndb/include/Makefile.am +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -include $(top_srcdir)/storage/ndb/config/common.mk.am - -ndbinclude_HEADERS = \ -ndb_constants.h \ -ndb_init.h \ -ndb_types.h \ -ndb_version.h - -ndbapiinclude_HEADERS = \ -ndbapi/ndbapi_limits.h \ -ndbapi/ndb_opt_defaults.h \ -ndbapi/Ndb.hpp \ -ndbapi/NdbApi.hpp \ -ndbapi/NdbTransaction.hpp \ -ndbapi/NdbDictionary.hpp \ -ndbapi/NdbError.hpp \ -ndbapi/NdbEventOperation.hpp \ -ndbapi/NdbIndexOperation.hpp \ -ndbapi/NdbOperation.hpp \ -ndbapi/ndb_cluster_connection.hpp \ -ndbapi/NdbBlob.hpp \ -ndbapi/NdbPool.hpp \ -ndbapi/NdbRecAttr.hpp \ -ndbapi/NdbReceiver.hpp \ -ndbapi/NdbScanFilter.hpp \ -ndbapi/NdbScanOperation.hpp \ -ndbapi/NdbIndexScanOperation.hpp \ -ndbapi/NdbIndexStat.hpp \ -ndbapi/ndberror.h - -mgmapiinclude_HEADERS = \ -mgmapi/mgmapi.h \ -mgmapi/mgmapi_error.h \ -mgmapi/mgmapi_debug.h \ -mgmapi/mgmapi_config_parameters.h \ -mgmapi/mgmapi_config_parameters_debug.h \ -mgmapi/ndb_logevent.h \ -mgmapi/ndbd_exit_codes.h - -noinst_HEADERS = \ -ndb_global.h \ -ndb_net.h - -EXTRA_DIST = debugger editline kernel logger mgmcommon \ -portlib transporter util - -dist-hook: - -rm -rf `find $(distdir) -type d -name SCCS` - -windoze-dsp: diff --git a/storage/ndb/include/debugger/DebuggerNames.hpp b/storage/ndb/include/debugger/DebuggerNames.hpp deleted file mode 100644 index bf5c91401ef..00000000000 --- a/storage/ndb/include/debugger/DebuggerNames.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DEBUGGER_NAMES -#define DEBUGGER_NAMES - -#include -#include - -/** - * getSignalName - * - * NOTES: Very quick - * - * RETURNS: Signal name or 0 if none found - */ -const char * -getSignalName(GlobalSignalNumber gsn, const char * defualtValue = "Unknown"); - -/** - * getGsn - * - * NOTES: Very slow - * - * RETURNS: Gsn or 0 if none found - */ -GlobalSignalNumber -getGsn(const char * signalName); - -/** - * getBlockName - * - * NOTES: Very quick - * - * RETURNS: Block name or - * defValue if not a valid block number - */ -const char * -getBlockName(BlockNumber blockNo, const char * defValue = 0); - -/** - * getBlockNo - * - * NOTES: Very slow - * - * RETURNS: BlockNo or 0 if none found - */ -BlockNumber -getBlockNo(const char * blockName); - -/** - * Find a print function for a signal - * - * RETURNS: 0 if none found - */ -SignalDataPrintFunction findPrintFunction(GlobalSignalNumber); - -#endif diff --git a/storage/ndb/include/debugger/EventLogger.hpp b/storage/ndb/include/debugger/EventLogger.hpp deleted file mode 100644 index 985b9f857ea..00000000000 --- a/storage/ndb/include/debugger/EventLogger.hpp +++ /dev/null @@ -1,178 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef EVENTLOGGER_H -#define EVENTLOGGER_H - -#include -#include -#include -#include -#include - -class EventLoggerBase { -public: - virtual ~EventLoggerBase(); - - /** - * LogLevel settings - */ - LogLevel m_logLevel; - - /** - * This matrix defines which event should be printed when - * - * threshold - is in range [0-15] - * severity - DEBUG to ALERT (Type of log message) - */ - typedef void (* EventTextFunction)(char *,size_t,const Uint32*); - - struct EventRepLogLevelMatrix { - Ndb_logevent_type eventType; - LogLevel::EventCategory eventCategory; - Uint32 threshold; - Logger::LoggerLevel severity; - EventTextFunction textF; - }; - - static const EventRepLogLevelMatrix matrix[]; - static const Uint32 matrixSize; - static int event_lookup(int eventType, - LogLevel::EventCategory &cat, - Uint32 &threshold, - Logger::LoggerLevel &severity, - EventTextFunction &textF); -}; - -/** - * The EventLogger is primarily used for logging NDB events - * in the Management Server. It inherits all logging functionality of Logger. - * - * HOW TO USE - * - * 1) Create an EventLogger - * - * EventLogger myEventLogger = new EventLogger(); - * - * 2) Log NDB events and other log messages. - * - * myEventLogger->info("Changing log levels."); - * - * EventReport* report = (EventReport*)&theSignalData[0]; - * myEventLogger->log(eventReport->getEventType(), theSignalData, aNodeId); - * - * - * The following NDB event categories and log levels are enabled as default: - * - * EVENT-CATEGORY LOG-LEVEL - * - * Startup 4 - * Shutdown 1 - * Statistic 2 - * Checkpoint 5 - * NodeRestart 8 - * Connection 2 - * Error 15 - * Info 10 - * - * @see Logger - * @version #@ $Id: EventLogger.hpp,v 1.3 2003/09/01 10:15:52 innpeno Exp $ - */ -class EventLogger : public EventLoggerBase, public Logger -{ -public: - /** - * Default constructor. Enables default log levels and - * sets the log category to 'EventLogger'. - */ - EventLogger(); - - /** - * Destructor. - */ - virtual ~EventLogger(); - - /** - * Opens/creates the eventlog with the specified filename. - * - * @param aFileName the eventlog filename. - * @param maxNoFiles the maximum no of archived eventlog files. - * @param maxFileSize the maximum eventlog file size. - * @param maxLogEntries the maximum number of log entries before - * checking time to archive. - * @return true if successful. - */ - bool open(const char* logFileName, - int maxNoFiles = FileLogHandler::MAX_NO_FILES, - long int maxFileSize = FileLogHandler::MAX_FILE_SIZE, - unsigned int maxLogEntries = FileLogHandler::MAX_LOG_ENTRIES); - - /** - * Closes the eventlog. - */ - void close(); - - /** - * Logs the NDB event. - * - * @param eventType the type of event. - * @param theData the event data. - * @param nodeId the node id of event origin. - */ - virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0); - - - /** - * Returns the event text for the specified event report type. - * - * @param textF print function for the event - * @param theData the event data. - * @param nodeId a node id. - * @return the event report text. - */ - static const char* getText(char * dst, size_t dst_len, - EventTextFunction textF, - const Uint32* theData, NodeId nodeId = 0); - - /** - * Returns the log level that is used to filter an event. The event will not - * be logged unless its event category's log level is <= levelFilter. - * - * @return the log level filter that is used for all event categories. - */ - int getFilterLevel() const; - - /** - * Sets log level filter. The event will be logged if - * the event category's log level is <= 'filterLevel'. - * - * @param level the log level to filter. - */ - void setFilterLevel(int filterLevel); - -private: - /** Prohibit */ - EventLogger(const EventLogger&); - EventLogger operator = (const EventLogger&); - bool operator == (const EventLogger&); - - Uint32 m_filterLevel; - - STATIC_CONST(MAX_TEXT_LENGTH = 256); -}; - -extern void getRestartAction(Uint32 action, BaseString &str); -#endif diff --git a/storage/ndb/include/debugger/GrepError.hpp b/storage/ndb/include/debugger/GrepError.hpp deleted file mode 100644 index 41f54fbf3a0..00000000000 --- a/storage/ndb/include/debugger/GrepError.hpp +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GREP_ERROR_H -#define GREP_ERROR_H - -#include - -/** - * - */ -class GrepError { -public: - enum GE_Code { - GE_NO_ERROR = 0, - SUBSCRIPTION_ID_NOMEM = 1, - SUBSCRIPTION_ID_NOT_FOUND = 2, - SUBSCRIPTION_ID_NOT_UNIQUE = 3, - SUBSCRIPTION_ID_SUMA_FAILED_CREATE = 4, - SUBSCRIPTION_ID_ALREADY_EXIST = 5, - COULD_NOT_ALLOCATE_MEM_FOR_SIGNAL = 6, - NULL_VALUE = 7, - SEQUENCE_ERROR = 8, - NOSPACE_IN_POOL= 9, - SUBSCRIPTION_NOT_FOUND = 10, - - NF_FakeErrorREF = 11, - - // Error that the user can get when issuing commands - SUBSCRIPTION_NOT_STARTED = 100, - START_OF_COMPONENT_IN_WRONG_STATE, - START_ALREADY_IN_PROGRESS, - ILLEGAL_STOP_EPOCH_ID, - WRONG_NO_OF_SECTIONS, - ILLEGAL_ACTION_WHEN_STOPPING, - ILLEGAL_USE_OF_COMMAND, - CHANNEL_NOT_STOPPABLE, - - // subscriber releated 20 - 30 - SUBSCRIBER_NOT_FOUND = 20, - - //SUMA specific 400 - 600 - SELECTED_TABLE_NOT_FOUND = 400, - SELECTED_TABLE_ALREADY_ADDED = 401, - - //REP ERRORS starts at 1000 - REP_NO_CONNECTED_NODES = 1001, - REP_DELETE_NEGATIVE_EPOCH = 1002, - REP_DELETE_NONEXISTING_EPOCH = 1003, - REP_APPLY_LOGRECORD_FAILED = 1012, - REP_APPLY_METARECORD_FAILED = 1013, - REP_APPLY_NONCOMPLETE_GCIBUFFER = 1004, - REP_APPLY_NULL_GCIBUFFER = 1005, - REP_APPLIER_START_TRANSACTION = 1006, - REP_APPLIER_NO_TABLE = 1007, - REP_APPLIER_NO_OPERATION = 1007, - REP_APPLIER_EXECUTE_TRANSACTION = 1008, - REP_APPLIER_CREATE_TABLE = 1009, - REP_APPLIER_PREPARE_TABLE = 1010, - REP_DISCONNECT = 1011, - REQUESTOR_ILLEGAL_STATE_FOR_SLOWSTOP = 1200, - REQUESTOR_ILLEGAL_STATE_FOR_FASTSTOP = 1201, - REP_NOT_PROPER_TABLE = 1202, - REP_TABLE_ALREADY_SELECTED = 1203, - REP_TABLE_NOT_FOUND = 1204, - - NOT_YET_IMPLEMENTED, - NO_OF_ERROR_CODES - }; - - struct ErrorDescription { - GE_Code errCode; - const char * name; - }; - static const ErrorDescription errorDescriptions[]; - static const Uint32 noOfErrorDescs; - static const char * getErrorDesc(GrepError::GE_Code err); - -}; - -#endif diff --git a/storage/ndb/include/debugger/SignalLoggerManager.hpp b/storage/ndb/include/debugger/SignalLoggerManager.hpp deleted file mode 100644 index cb47c6c5bc5..00000000000 --- a/storage/ndb/include/debugger/SignalLoggerManager.hpp +++ /dev/null @@ -1,174 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//**************************************************************************** -// -// .NAME -// SignalLoggerManager - Handle signal loggers -// -//**************************************************************************** -#ifndef SignalLoggerManager_H -#define SignalLoggerManager_H - - -#include -#include -#include - -class SignalLoggerManager -{ -public: - SignalLoggerManager(); - virtual ~SignalLoggerManager(); - - /** - * Sets output - * @Returns old output stream - */ - FILE * setOutputStream(FILE * output); - - /** - * Gets current output - */ - FILE * getOutputStream() const; - - void flushSignalLog(); - - /** - * For direct signals - * @See also SimulatedBlock EXECUTE_DIRECT - */ - void executeDirect(const SignalHeader&, - Uint8 prio, const Uint32 * theData, Uint32 node); - - /** - * For input signals - */ - void executeSignal(const SignalHeader&, Uint8 prio, - const Uint32 * theData, Uint32 node, - const SegmentedSectionPtr ptr[3], Uint32 secs); - - void executeSignal(const SignalHeader&, Uint8 prio, - const Uint32 * theData, Uint32 node, - const LinearSectionPtr ptr[3], Uint32 secs); - - /** - * For output signals - */ - void sendSignal(const SignalHeader&, Uint8 prio, - const Uint32 * theData, Uint32 node, - const SegmentedSectionPtr ptr[3], Uint32 secs); - - void sendSignal(const SignalHeader&, Uint8 prio, - const Uint32 * theData, Uint32 node, - const LinearSectionPtr ptr[3], Uint32 secs); - - /** - * For output signals - */ - void sendSignalWithDelay(Uint32 delayInMilliSeconds, - const SignalHeader&, - Uint8 prio, const Uint32 * data, Uint32 node, - const SegmentedSectionPtr ptr[3], Uint32 secs); - - /** - * Generic messages in the signal log - */ - void log(BlockNumber bno, const char * msg, ...); - - /** - * LogModes - */ - enum LogMode { - LogOff = 0, - LogIn = 1, - LogOut = 2, - LogInOut = 3 - }; - - /** - * Returns no of loggers affected - */ - int log(LogMode logMode, const char * params); - int logOn(bool allBlocks, BlockNumber bno, LogMode logMode); - int logOff(bool allBlocks, BlockNumber bno, LogMode logMode); - int logToggle(bool allBlocks, BlockNumber bno, LogMode logMode); - - void setTrace(unsigned long trace); - unsigned long getTrace() const; - - void setOwnNodeId(int nodeId); - void setLogDistributed(bool val); - - /** - * Print header - */ - static void printSignalHeader(FILE * output, - const SignalHeader & sh, - Uint8 prio, - Uint32 node, - bool printReceiversSignalId); - - /** - * Function for printing the Signal Data - */ - static void printSignalData(FILE * out, - const SignalHeader & sh, const Uint32 *); - - /** - * Print linear section. - */ - static void printLinearSection(FILE * output, - const SignalHeader & sh, - const LinearSectionPtr ptr[3], - unsigned i); - - /** - * Print segmented section. - */ - static void printSegmentedSection(FILE * output, - const SignalHeader & sh, - const SegmentedSectionPtr ptr[3], - unsigned i); - - /** - * Print data word in hex. Adds line break before the word - * when pos > 0 && pos % 7 == 0. Increments pos. - */ - static void printDataWord(FILE * output, Uint32 & pos, const Uint32 data); - -private: - bool m_logDistributed; - Uint32 m_ownNodeId; - - FILE * outputStream; - int log(int cmd, BlockNumber bno, LogMode logMode); - - Uint32 traceId; - Uint8 logModes[NO_OF_BLOCKS]; - - inline bool - logMatch(BlockNumber bno, LogMode mask) - { - // avoid addressing outside logModes - return - bno < MIN_BLOCK_NO || bno > MAX_BLOCK_NO || - (logModes[bno-MIN_BLOCK_NO] & mask); - } -}; - -#endif // SignalLoggerManager_H - diff --git a/storage/ndb/include/editline/editline.h b/storage/ndb/include/editline/editline.h deleted file mode 100644 index 3907e57896f..00000000000 --- a/storage/ndb/include/editline/editline.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/* $Id: editline.h,v 1.1 2002/12/11 13:53:46 hin Exp $ */ - -/* - * Public include file for editline, to be included instead of readline.h - */ - -#ifndef __EDITLINE_H_INCLUDED__ -#define __EDITLINE_H_INCLUDED__ - -#ifdef __cplusplus -extern "C" { -#endif - -extern char *readline(const char *); -extern void add_history(char *); - -#ifdef __cplusplus -} -#endif - -#endif /* !__EDITLINE_H_INCLUDED__ */ - diff --git a/storage/ndb/include/kernel/AttributeDescriptor.hpp b/storage/ndb/include/kernel/AttributeDescriptor.hpp deleted file mode 100644 index 3927d8d447e..00000000000 --- a/storage/ndb/include/kernel/AttributeDescriptor.hpp +++ /dev/null @@ -1,236 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ATTRIBUTE_DESCRIPTOR_HPP -#define ATTRIBUTE_DESCRIPTOR_HPP - -class AttributeDescriptor { - friend class Dbdict; - friend class Dbtc; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dblqh; - friend class SimulatedBlock; - -public: - static void setType(Uint32 &, Uint32 type); - static void setSize(Uint32 &, Uint32 size); - static void setArrayType(Uint32 &, Uint32 arrayType); - static void setArraySize(Uint32 &, Uint32 arraySize); - static void setNullable(Uint32 &, Uint32 nullable); - static void setDKey(Uint32 &, Uint32 dkey); - static void setPrimaryKey(Uint32 &, Uint32 dkey); - static void setDynamic(Uint32 &, Uint32 dynamicInd); - static void setDiskBased(Uint32 &, Uint32 val); - - static Uint32 getType(const Uint32 &); - static Uint32 getSize(const Uint32 &); - static Uint32 getSizeInBytes(const Uint32 &); - static Uint32 getSizeInWords(const Uint32 &); - static Uint32 getArrayType(const Uint32 &); - static Uint32 getArraySize(const Uint32 &); - static Uint32 getNullable(const Uint32 &); - static Uint32 getDKey(const Uint32 &); - static Uint32 getPrimaryKey(const Uint32 &); - static Uint32 getDynamic(const Uint32 &); - static Uint32 getDiskBased(const Uint32 &); - - Uint32 m_data; -}; - -/** - * - * a = Array type - 2 Bits -> Max 3 (Bit 0-1) - * t = Attribute type - 5 Bits -> Max 31 (Bit 2-6) - * s = Attribute size - 3 Bits -> Max 7 (Bit 8-10) - * d = Disk based - 1 Bit 11 - * n = Nullable - 1 Bit 12 - * k = Distribution Key Ind - 1 Bit 13 - * p = Primary key attribute - 1 Bit 14 - * y = Dynamic attribute - 1 Bit 15 - * z = Array size - 16 Bits -> Max 65535 (Bit 16-31) - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * aattttt sssdnkpyzzzzzzzzzzzzzzzz - * aattsss n d k pyzzzzzzzzzzzzzzzz [ old format ] - * - */ - -#define AD_ARRAY_TYPE_SHIFT (0) -#define AD_ARRAY_TYPE_MASK (3) - -#define AD_TYPE_SHIFT (2) -#define AD_TYPE_MASK (31) - -#define AD_SIZE_SHIFT (8) -#define AD_SIZE_MASK (7) - -#define AD_SIZE_IN_BYTES_SHIFT (3) -#define AD_SIZE_IN_WORDS_OFFSET (31) -#define AD_SIZE_IN_WORDS_SHIFT (5) - -#define AD_DISK_SHIFT (11) -#define AD_NULLABLE_SHIFT (12) -#define AD_DISTR_KEY_SHIFT (13) -#define AD_PRIMARY_KEY (14) -#define AD_DYNAMIC (15) - -#define AD_ARRAY_SIZE_SHIFT (16) -#define AD_ARRAY_SIZE_MASK (65535) - -inline -void -AttributeDescriptor::setType(Uint32 & desc, Uint32 type){ - assert(type <= AD_TYPE_MASK); - desc |= (type << AD_TYPE_SHIFT); -} - -inline -void -AttributeDescriptor::setSize(Uint32 & desc, Uint32 size){ - assert(size <= AD_SIZE_MASK); - desc |= (size << AD_SIZE_SHIFT); -} - -inline -void -AttributeDescriptor::setArrayType(Uint32 & desc, Uint32 arrayType){ - assert(arrayType <= AD_ARRAY_TYPE_MASK); - desc |= (arrayType << AD_ARRAY_TYPE_SHIFT); -} - -inline -void -AttributeDescriptor::setArraySize(Uint32 & desc, Uint32 arraySize){ - assert(arraySize <= AD_ARRAY_SIZE_MASK); - desc |= (arraySize << AD_ARRAY_SIZE_SHIFT); -} - -inline -void -AttributeDescriptor::setNullable(Uint32 & desc, Uint32 nullable){ - assert(nullable <= 1); - desc |= (nullable << AD_NULLABLE_SHIFT); -} - -inline -void -AttributeDescriptor::setDKey(Uint32 & desc, Uint32 dkey){ - assert(dkey <= 1); - desc |= (dkey << AD_DISTR_KEY_SHIFT); -} - -inline -void -AttributeDescriptor::setPrimaryKey(Uint32 & desc, Uint32 dkey){ - assert(dkey <= 1); - desc |= (dkey << AD_PRIMARY_KEY); -} - -inline -void -AttributeDescriptor::setDynamic(Uint32 & desc, Uint32 dynamic){ - assert(dynamic <= 1); - desc |= (dynamic << AD_DYNAMIC); -} - -inline -void -AttributeDescriptor::setDiskBased(Uint32 & desc, Uint32 val) -{ - assert(val <= 1); - desc |= (val << AD_DISK_SHIFT); -} - -/** - * Getters - */ -inline -Uint32 -AttributeDescriptor::getType(const Uint32 & desc){ - return (desc >> AD_TYPE_SHIFT) & AD_TYPE_MASK; -} - -inline -Uint32 -AttributeDescriptor::getSize(const Uint32 & desc){ - return (desc >> AD_SIZE_SHIFT) & AD_SIZE_MASK; -} - -inline -Uint32 -AttributeDescriptor::getSizeInBytes(const Uint32 & desc){ - return (getArraySize(desc) << getSize(desc)) - >> AD_SIZE_IN_BYTES_SHIFT; -} - -inline -Uint32 -AttributeDescriptor::getSizeInWords(const Uint32 & desc){ - return ((getArraySize(desc) << getSize(desc)) - + AD_SIZE_IN_WORDS_OFFSET) - >> AD_SIZE_IN_WORDS_SHIFT; -} - -inline -Uint32 -AttributeDescriptor::getArrayType(const Uint32 & desc){ - return (desc >> AD_ARRAY_TYPE_SHIFT) & AD_ARRAY_TYPE_MASK; -} - -inline -Uint32 -AttributeDescriptor::getArraySize(const Uint32 & desc){ - return (desc >> AD_ARRAY_SIZE_SHIFT) & AD_ARRAY_SIZE_MASK; -} - -inline -Uint32 -AttributeDescriptor::getNullable(const Uint32 & desc){ - return (desc >> AD_NULLABLE_SHIFT) & 1; -} - -inline -Uint32 -AttributeDescriptor::getDKey(const Uint32 & desc){ - return (desc >> AD_DISTR_KEY_SHIFT) & 1; -} - -inline -Uint32 -AttributeDescriptor::getPrimaryKey(const Uint32 & desc){ - return (desc >> AD_PRIMARY_KEY) & 1; -} - -inline -Uint32 -AttributeDescriptor::getDynamic(const Uint32 & desc){ - return (desc >> AD_DYNAMIC) & 1; -} - -inline -Uint32 -AttributeDescriptor::getDiskBased(const Uint32 & desc) -{ - return (desc >> AD_DISK_SHIFT) & 1; -} - -class NdbOut& -operator<<(class NdbOut&, const AttributeDescriptor&); - -#endif diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp deleted file mode 100644 index 9edc759f563..00000000000 --- a/storage/ndb/include/kernel/AttributeHeader.hpp +++ /dev/null @@ -1,247 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ATTRIBUTE_HEADER -#define ATTRIBUTE_HEADER - -/** - * @class AttributeHeader - * @brief Header passed in front of every attribute value in AttrInfo signal - */ -class AttributeHeader { - friend class Dbtup; - friend class Backup; - friend class NdbOperation; - friend class DbUtil; - friend class Suma; - -public: - /** - * Pseudo columns - */ - STATIC_CONST( PSEUDO = 0x8000 ); - STATIC_CONST( FRAGMENT = 0xFFFE ); // Read fragment no - STATIC_CONST( ROW_COUNT = 0xFFFD ); // Read row count (committed) - STATIC_CONST( COMMIT_COUNT = 0xFFFC ); // Read commit count - STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges) - - STATIC_CONST( ROW_SIZE = 0xFFFA ); - STATIC_CONST( FRAGMENT_FIXED_MEMORY= 0xFFF9 ); - - STATIC_CONST( RECORDS_IN_RANGE = 0xFFF8 ); - STATIC_CONST( DISK_REF = 0xFFF7 ); - STATIC_CONST( ROWID = 0xFFF6 ); - STATIC_CONST( ROW_GCI = 0xFFF5 ); - STATIC_CONST( FRAGMENT_VARSIZED_MEMORY = 0xFFF4 ); - // 0xFFF3 to be used for read packed when merged - STATIC_CONST( ANY_VALUE = 0xFFF2 ); - STATIC_CONST( COPY_ROWID = 0xFFF1 ); - - // NOTE: in 5.1 ctors and init take size in bytes - - /** Initialize AttributeHeader at location aHeaderPtr */ - static void init(Uint32* aHeaderPtr, Uint32 anAttributeId, Uint32 aByteSize); - - /** Returns size of AttributeHeader (usually one or two words) */ - Uint32 getHeaderSize() const; // In 32-bit words - - /** Store AttributeHeader in location given as argument */ - void insertHeader(Uint32*); - - /** Get next attribute header (if there is one) */ - AttributeHeader* getNext() const; - - /** Get location of attribute value */ - Uint32* getDataPtr() const; - - /** Getters and Setters */ - Uint32 getAttributeId() const; - void setAttributeId(Uint32); - Uint32 getByteSize() const; - void setByteSize(Uint32); - Uint32 getDataSize() const; // In 32-bit words, rounded up - void setDataSize(Uint32); // Set size to multiple of word size - bool isNULL() const; - void setNULL(); - - /** Print **/ - //void print(NdbOut&); - void print(FILE*); - - static Uint32 getByteSize(Uint32); - static Uint32 getDataSize(Uint32); - -public: - AttributeHeader(Uint32 = 0); - AttributeHeader(Uint32 anAttributeId, Uint32 aByteSize); - ~AttributeHeader(); - - Uint32 m_value; -}; - -/** - * 1111111111222222222233 - * 01234567890123456789012345678901 - * ssssssssssssssssiiiiiiiiiiiiiiii - * - * i = Attribute Id - * s = Size of current "chunk" in bytes - 16 bits. - * To allow round up to word, max value is 0xFFFC (not checked). - * e - [ obsolete future ] - * Element data/Blob, read element of array - * If == 0 next data word contains attribute value. - * If == 1 next data word contains: - * For Array of Fixed size Elements - * Start Index (16 bit), Stop Index(16 bit) - * For Blob - * Start offset (32 bit) (length is defined in previous word) - * - * An attribute value equal to "null" is represented by setting s == 0. - */ - -inline -void AttributeHeader::init(Uint32* aHeaderPtr, Uint32 anAttributeId, - Uint32 aByteSize) -{ - AttributeHeader ah(anAttributeId, aByteSize); - *aHeaderPtr = ah.m_value; -} - -inline -AttributeHeader::AttributeHeader(Uint32 aHeader) -{ - m_value = aHeader; -} - -inline -AttributeHeader::AttributeHeader(Uint32 anAttributeId, Uint32 aByteSize) -{ - m_value = 0; - this->setAttributeId(anAttributeId); - this->setByteSize(aByteSize); -} - -inline -AttributeHeader::~AttributeHeader() -{} - -inline -Uint32 AttributeHeader::getHeaderSize() const -{ - // Should check 'e' bit here - return 1; -} - -inline -Uint32 AttributeHeader::getAttributeId() const -{ - return (m_value & 0xFFFF0000) >> 16; -} - -inline -void AttributeHeader::setAttributeId(Uint32 anAttributeId) -{ - m_value &= 0x0000FFFF; // Clear attribute id - m_value |= (anAttributeId << 16); -} - -inline -Uint32 AttributeHeader::getByteSize() const -{ - return (m_value & 0xFFFF); -} - -inline -void AttributeHeader::setByteSize(Uint32 aByteSize) -{ - m_value &= (~0xFFFF); - m_value |= aByteSize; -} - -inline -Uint32 AttributeHeader::getDataSize() const -{ - return (((m_value & 0xFFFF) + 3) >> 2); -} - -inline -void AttributeHeader::setDataSize(Uint32 aDataSize) -{ - m_value &= (~0xFFFF); - m_value |= (aDataSize << 2); -} - -inline -bool AttributeHeader::isNULL() const -{ - return (getDataSize() == 0); -} - -inline -void AttributeHeader::setNULL() -{ - setDataSize(0); -} - -inline -Uint32* AttributeHeader::getDataPtr() const -{ - return (Uint32*)&m_value + getHeaderSize(); -} - -inline -void AttributeHeader::insertHeader(Uint32* target) -{ - *target = m_value; -} - -inline -AttributeHeader* -AttributeHeader::getNext() const { - return (AttributeHeader*)(getDataPtr() + getDataSize()); -} - -inline -void -//AttributeHeader::print(NdbOut& output) { -AttributeHeader::print(FILE* output) { - fprintf(output, "AttributeId: H\'%.8x (D\'%d), DataSize: H\'%.8x (D\'%d), " - "isNULL: %d\n", - getAttributeId(), getAttributeId(), - getDataSize(), getDataSize(), - isNULL()); -} - -inline -Uint32 -AttributeHeader::getByteSize(Uint32 m_value){ - return (m_value & 0xFFFF); -} - -inline -Uint32 -AttributeHeader::getDataSize(Uint32 m_value){ - return (((m_value & 0xFFFF) + 3) >> 2); -} - -#endif - - - - - - - diff --git a/storage/ndb/include/kernel/AttributeList.hpp b/storage/ndb/include/kernel/AttributeList.hpp deleted file mode 100644 index e0e7b45dfe5..00000000000 --- a/storage/ndb/include/kernel/AttributeList.hpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ATTRIBUTE_LIST_HPP -#define ATTRIBUTE_LIST_HPP - -#include "ndb_limits.h" - -/** - * Masks and lists used by index and trigger. Must be plain old Uint32 data. - * XXX depends on other headers XXX move to some common file - */ - -typedef Bitmask AttributeMask; - -template -struct Id_array -{ - Uint32 sz; - Uint32 id[SZ]; -}; - -typedef Id_array AttributeList; - -#endif diff --git a/storage/ndb/include/kernel/BlockNumbers.h b/storage/ndb/include/kernel/BlockNumbers.h deleted file mode 100644 index 1c02dee4f1f..00000000000 --- a/storage/ndb/include/kernel/BlockNumbers.h +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BLOCK_NUMBERS_H -#define BLOCK_NUMBERS_H - -#include -#include - -/* 240 */ -#define MIN_API_BLOCK_NO 0x8000 - -/* 2047 */ -#define API_PACKED 0x07ff - -/* 4002 */ -#define API_CLUSTERMGR 0x0FA2 - -#define BACKUP 0xF4 -#define DBTC 0xF5 -#define DBDIH 0xF6 -#define DBLQH 0xF7 -#define DBACC 0xF8 -#define DBTUP 0xF9 -#define DBDICT 0xFA -#define NDBCNTR 0xFB -#define CNTR 0xFB -#define QMGR 0xFC -#define NDBFS 0xFD -#define CMVMI 0xFE -#define TRIX 0xFF -#define DBUTIL 0x100 -#define SUMA 0x101 -#define DBTUX 0x102 -#define TSMAN 0x103 -#define LGMAN 0x104 -#define PGMAN 0x105 -#define RESTORE 0x106 - -const BlockReference BACKUP_REF = numberToRef(BACKUP, 0); -const BlockReference DBTC_REF = numberToRef(DBTC, 0); -const BlockReference DBDIH_REF = numberToRef(DBDIH, 0); -const BlockReference DBLQH_REF = numberToRef(DBLQH, 0); -const BlockReference DBACC_REF = numberToRef(DBACC, 0); -const BlockReference DBTUP_REF = numberToRef(DBTUP, 0); -const BlockReference DBDICT_REF = numberToRef(DBDICT, 0); -const BlockReference NDBCNTR_REF = numberToRef(NDBCNTR, 0); -const BlockReference QMGR_REF = numberToRef(QMGR, 0); -const BlockReference NDBFS_REF = numberToRef(NDBFS, 0); -const BlockReference CMVMI_REF = numberToRef(CMVMI, 0); -const BlockReference TRIX_REF = numberToRef(TRIX, 0); -const BlockReference DBUTIL_REF = numberToRef(DBUTIL, 0); -const BlockReference SUMA_REF = numberToRef(SUMA, 0); -const BlockReference DBTUX_REF = numberToRef(DBTUX, 0); -const BlockReference TSMAN_REF = numberToRef(TSMAN, 0); -const BlockReference LGMAN_REF = numberToRef(LGMAN, 0); -const BlockReference PGMAN_REF = numberToRef(PGMAN, 0); -const BlockReference RESTORE_REF = numberToRef(RESTORE, 0); - -const BlockNumber MIN_BLOCK_NO = BACKUP; -const BlockNumber MAX_BLOCK_NO = RESTORE; -const BlockNumber NO_OF_BLOCKS = (MAX_BLOCK_NO - MIN_BLOCK_NO + 1); - -/** - * Used for printing and stuff - */ -struct BlockName { - const char* name; - BlockNumber number; -}; - -extern const BlockName BlockNames[]; -extern const BlockNumber NO_OF_BLOCK_NAMES; - -#endif diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h deleted file mode 100644 index da14c27abdc..00000000000 --- a/storage/ndb/include/kernel/GlobalSignalNumbers.h +++ /dev/null @@ -1,990 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GLOBAL_SIGNAL_NUMBERS_H -#define GLOBAL_SIGNAL_NUMBERS_H - -#include -/** - * NOTE - * - * When adding a new signal, remember to update MAX_GSN and SignalNames.cpp - */ -const GlobalSignalNumber MAX_GSN = 730; - -struct GsnName { - GlobalSignalNumber gsn; - const char * name; -}; - -extern const GsnName SignalNames[]; -extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; - -/** - * These are used by API and kernel - */ -#define GSN_API_REGCONF 1 -#define GSN_API_REGREF 2 -#define GSN_API_REGREQ 3 - -#define GSN_ATTRINFO 4 -#define GSN_TRANSID_AI 5 -#define GSN_KEYINFO 6 -#define GSN_READCONF 7 - -#define GSN_TCKEY_FAILCONF 8 -#define GSN_TCKEY_FAILREF 9 -#define GSN_TCKEYCONF 10 -#define GSN_TCKEYREF 11 -#define GSN_TCKEYREQ 12 - -#define GSN_TCROLLBACKCONF 13 -#define GSN_TCROLLBACKREF 14 -#define GSN_TCROLLBACKREQ 15 -#define GSN_TCROLLBACKREP 16 - -#define GSN_TC_COMMITCONF 17 -#define GSN_TC_COMMITREF 18 -#define GSN_TC_COMMITREQ 19 -#define GSN_TC_HBREP 20 - -#define GSN_TRANSID_AI_R 21 -#define GSN_KEYINFO20_R 22 - -#define GSN_GET_TABINFOREF 23 -#define GSN_GET_TABINFOREQ 24 -#define GSN_GET_TABINFO_CONF 190 - -#define GSN_GET_TABLEID_REQ 683 -#define GSN_GET_TABLEID_REF 684 -#define GSN_GET_TABLEID_CONF 685 - -#define GSN_DIHNDBTAMPER 25 -#define GSN_NODE_FAILREP 26 -#define GSN_NF_COMPLETEREP 27 - -#define GSN_SCAN_NEXTREQ 28 -#define GSN_SCAN_TABCONF 29 -/* 30 unused */ -#define GSN_SCAN_TABREF 31 -#define GSN_SCAN_TABREQ 32 -#define GSN_KEYINFO20 33 - -#define GSN_TCRELEASECONF 34 -#define GSN_TCRELEASEREF 35 -#define GSN_TCRELEASEREQ 36 - -#define GSN_TCSEIZECONF 37 -#define GSN_TCSEIZEREF 38 -#define GSN_TCSEIZEREQ 39 - -/* 40 unused */ -/* 41 unused */ -/* 42 unused */ -/* 43 unused */ -/* 44 unused */ -/* 45 unused */ -/* 46 unused */ -/* 47 unused */ -/* 48 unused */ -/* 49 unused */ -/* 50 unused */ -/* 51 unused */ -/* 52 unused */ -/* 53 unused */ -/* 54 unused */ -/* 55 unused */ -/* 56 unused */ -/* 57 unused */ -/* 58 unused */ -/* 59 unused */ -#define GSN_ALLOC_NODEID_REQ 60 -#define GSN_ALLOC_NODEID_CONF 61 -#define GSN_ALLOC_NODEID_REF 62 -/* 63 unused */ -/* 64 unused */ -/* 65 unused */ -/* 66 unused */ - -/** - * These are used only by kernel - */ - -#define GSN_ACC_ABORTCONF 67 -/* 68 not unused */ -/* 69 not unused */ -#define GSN_UPDATE_FRAG_DIST_KEY_ORD 70 -#define GSN_ACC_ABORTREQ 71 -#define GSN_ACC_CHECK_SCAN 72 -#define GSN_ACC_COMMITCONF 73 -#define GSN_ACC_COMMITREQ 74 -/* 75 unused */ -/* 76 unused */ - -/* 79 unused */ -/* 78 unused */ -/* 77 unused */ - -/* 80 unused */ -#define GSN_ACC_OVER_REC 81 - -/* 83 unused */ -#define GSN_ACC_SCAN_INFO 84 -#define GSN_ACC_SCAN_INFO24 85 -#define GSN_ACC_SCANCONF 86 -#define GSN_ACC_SCANREF 87 -#define GSN_ACC_SCANREQ 88 - -#define GSN_RESTORE_LCP_REQ 91 -#define GSN_RESTORE_LCP_REF 90 -#define GSN_RESTORE_LCP_CONF 89 - -#define GSN_ACC_TO_CONF 92 -#define GSN_ACC_TO_REF 93 -#define GSN_ACC_TO_REQ 94 -#define GSN_ACCFRAGCONF 95 -#define GSN_ACCFRAGREF 96 -#define GSN_ACCFRAGREQ 97 -#define GSN_ACCKEYCONF 98 -#define GSN_ACCKEYREF 99 -#define GSN_ACCKEYREQ 100 -#define GSN_ACCMINUPDATE 101 -#define GSN_ACCSEIZECONF 103 -#define GSN_ACCSEIZEREF 104 -#define GSN_ACCSEIZEREQ 105 -#define GSN_ACCUPDATECONF 106 -#define GSN_ACCUPDATEKEY 107 -#define GSN_ACCUPDATEREF 108 - -#define GSN_ADD_FRAGCONF 109 -#define GSN_ADD_FRAGREF 110 -#define GSN_ADD_FRAGREQ 111 - -#define GSN_API_START_REP 120 -#define GSN_API_FAILCONF 113 -#define GSN_API_FAILREQ 114 -#define GSN_CNTR_START_REQ 115 -/* 116 not unused */ -#define GSN_CNTR_START_REF 117 -#define GSN_CNTR_START_CONF 118 -#define GSN_CNTR_START_REP 119 -/* 120 not unused */ -#define GSN_ROUTE_ORD 121 -#define GSN_NODE_VERSION_REP 122 -/* 123 unused */ -/* 124 unused */ -#define GSN_CHECK_LCP_STOP 125 -#define GSN_CLOSE_COMCONF 126 /* local */ -#define GSN_CLOSE_COMREQ 127 /* local */ -#define GSN_CM_ACKADD 128 /* distr. */ -/* 129 unused */ -#define GSN_CM_ADD 130 /* distr. */ -/* 131 unused */ -/* 132 not unused */ -/* 133 not unused */ -#define GSN_CM_HEARTBEAT 134 /* distr. */ - -#define GSN_PREPARE_COPY_FRAG_REQ 135 -#define GSN_PREPARE_COPY_FRAG_REF 136 -#define GSN_PREPARE_COPY_FRAG_CONF 137 - -#define GSN_CM_NODEINFOCONF 138 /* distr. */ -#define GSN_CM_NODEINFOREF 139 /* distr. */ -#define GSN_CM_NODEINFOREQ 140 /* distr. */ -#define GSN_CM_REGCONF 141 /* distr. */ -#define GSN_CM_REGREF 142 /* distr. */ -#define GSN_CM_REGREQ 143 /* distr. */ -/* 144 unused */ -/* 145 unused */ -/* 146 unused */ -#define GSN_CM_ADD_REP 147 /* local */ -/* 148 unused */ -/* 149 unused */ -/* 150 unused */ -#define GSN_CNTR_WAITREP 151 /* distr. */ -#define GSN_COMMIT 152 -#define GSN_COMMIT_FAILCONF 153 -#define GSN_COMMIT_FAILREQ 154 -#define GSN_COMMITCONF 155 -#define GSN_COMMITREQ 156 -#define GSN_COMMITTED 157 -#define GSN_COMPLETE 159 -#define GSN_COMPLETECONF 160 -#define GSN_COMPLETED 161 -#define GSN_COMPLETEREQ 162 -#define GSN_CONNECT_REP 163 -#define GSN_CONTINUEB 164 -/* 165 not unused */ -#define GSN_COPY_ACTIVECONF 166 -#define GSN_COPY_ACTIVEREF 167 -#define GSN_COPY_ACTIVEREQ 168 -#define GSN_COPY_FRAGCONF 169 -#define GSN_COPY_FRAGREF 170 -#define GSN_COPY_FRAGREQ 171 -#define GSN_COPY_GCICONF 172 -#define GSN_COPY_GCIREQ 173 -#define GSN_COPY_STATECONF 174 -#define GSN_COPY_STATEREQ 175 -#define GSN_COPY_TABCONF 176 -#define GSN_COPY_TABREQ 177 -#define GSN_CREATE_FRAGCONF 178 -#define GSN_CREATE_FRAGREF 179 -#define GSN_CREATE_FRAGREQ 180 -#define GSN_DEBUG_SIG 181 -#define GSN_DI_FCOUNTCONF 182 -#define GSN_DI_FCOUNTREF 183 -#define GSN_DI_FCOUNTREQ 184 -#define GSN_DIADDTABCONF 185 -#define GSN_DIADDTABREF 186 -#define GSN_DIADDTABREQ 187 -/* 188 not unused */ -/* 189 not unused */ -/* 190 not unused */ -#define GSN_DICTSTARTCONF 191 -#define GSN_DICTSTARTREQ 192 - -#define GSN_LIST_TABLES_REQ 193 -#define GSN_LIST_TABLES_CONF 194 - -#define GSN_ABORT 195 -#define GSN_ABORTCONF 196 -#define GSN_ABORTED 197 -#define GSN_ABORTREQ 198 - -/****************************************** - * DROP TABLE - * - */ - -/** - * This is drop table's public interface - */ -#define GSN_DROP_TABLE_REQ 82 -#define GSN_DROP_TABLE_REF 102 -#define GSN_DROP_TABLE_CONF 112 - -/** - * This is used for implementing drop table - */ -#define GSN_PREP_DROP_TAB_REQ 199 -#define GSN_PREP_DROP_TAB_REF 200 -#define GSN_PREP_DROP_TAB_CONF 201 - -#define GSN_DROP_TAB_REQ 202 -#define GSN_DROP_TAB_REF 203 -#define GSN_DROP_TAB_CONF 204 - -#define GSN_WAIT_DROP_TAB_REQ 208 -#define GSN_WAIT_DROP_TAB_REF 209 -#define GSN_WAIT_DROP_TAB_CONF 216 - -/*****************************************/ - -#define GSN_UPDATE_TOCONF 205 -#define GSN_UPDATE_TOREF 206 -#define GSN_UPDATE_TOREQ 207 - -#define GSN_DIGETNODESCONF 210 -#define GSN_DIGETNODESREF 211 -#define GSN_DIGETNODESREQ 212 -#define GSN_DIGETPRIMCONF 213 -#define GSN_DIGETPRIMREF 214 -#define GSN_DIGETPRIMREQ 215 - -#define GSN_DIH_RESTARTCONF 217 -#define GSN_DIH_RESTARTREF 218 -#define GSN_DIH_RESTARTREQ 219 - -/* 220 not unused */ -/* 221 not unused */ -/* 222 not unused */ - -#define GSN_EMPTY_LCP_REQ 223 -#define GSN_EMPTY_LCP_CONF 224 - -#define GSN_SCHEMA_INFO 225 -#define GSN_SCHEMA_INFOCONF 226 - -#define GSN_MASTER_GCPCONF 227 -#define GSN_MASTER_GCPREF 228 -#define GSN_MASTER_GCPREQ 229 - -/* 230 not unused */ -/* 231 not unused */ - -#define GSN_DIRELEASECONF 232 -#define GSN_DIRELEASEREF 233 -#define GSN_DIRELEASEREQ 234 -#define GSN_DISCONNECT_REP 235 -#define GSN_DISEIZECONF 236 -#define GSN_DISEIZEREF 237 -#define GSN_DISEIZEREQ 238 -#define GSN_DIVERIFYCONF 239 -#define GSN_DIVERIFYREF 240 -#define GSN_DIVERIFYREQ 241 -#define GSN_ENABLE_COMORD 242 -#define GSN_END_LCPCONF 243 -#define GSN_END_LCP_CONF 243 -#define GSN_END_LCPREQ 244 -#define GSN_END_LCP_REQ 244 -#define GSN_END_TOCONF 245 -#define GSN_END_TOREQ 246 -#define GSN_EVENT_REP 247 -#define GSN_EXEC_FRAGCONF 248 -#define GSN_EXEC_FRAGREF 249 -#define GSN_EXEC_FRAGREQ 250 -#define GSN_EXEC_SRCONF 251 -#define GSN_EXEC_SRREQ 252 -#define GSN_EXPANDCHECK2 253 -#define GSN_FAIL_REP 254 -#define GSN_FSCLOSECONF 255 -#define GSN_FSCLOSEREF 256 -#define GSN_FSCLOSEREQ 257 -#define GSN_FSAPPENDCONF 258 -#define GSN_FSOPENCONF 259 -#define GSN_FSOPENREF 260 -#define GSN_FSOPENREQ 261 -#define GSN_FSREADCONF 262 -#define GSN_FSREADREF 263 -#define GSN_FSREADREQ 264 -#define GSN_FSSYNCCONF 265 -#define GSN_FSSYNCREF 266 -#define GSN_FSSYNCREQ 267 -#define GSN_FSAPPENDREQ 268 -#define GSN_FSAPPENDREF 269 -#define GSN_FSWRITECONF 270 -#define GSN_FSWRITEREF 271 -#define GSN_FSWRITEREQ 272 -#define GSN_GCP_ABORT 273 -#define GSN_GCP_ABORTED 274 -#define GSN_GCP_COMMIT 275 -#define GSN_GCP_NODEFINISH 276 -#define GSN_GCP_NOMORETRANS 277 -#define GSN_GCP_PREPARE 278 -#define GSN_GCP_PREPARECONF 279 -#define GSN_GCP_PREPAREREF 280 -#define GSN_GCP_SAVECONF 281 -#define GSN_GCP_SAVEREF 282 -#define GSN_GCP_SAVEREQ 283 -#define GSN_GCP_TCFINISHED 284 - -/* 285 unused */ -/* 286 unused */ -/* 287 unused */ -#define GSN_GETGCICONF 288 -#define GSN_GETGCIREQ 289 -#define GSN_HOT_SPAREREP 290 -#define GSN_INCL_NODECONF 291 -#define GSN_INCL_NODEREF 292 -#define GSN_INCL_NODEREQ 293 - -#define GSN_LCP_PREPARE_REQ 296 -#define GSN_LCP_PREPARE_REF 295 -#define GSN_LCP_PREPARE_CONF 294 - -/* 297 unused */ -/* 298 unused */ -/* 299 unused */ -#define GSN_SHRINKCHECK2 301 -#define GSN_GET_SCHEMA_INFOREQ 302 -/* 303 not unused */ -/* 304 not unused */ -#define GSN_LQH_RESTART_OP 305 -#define GSN_LQH_TRANSCONF 306 -#define GSN_LQH_TRANSREQ 307 -#define GSN_LQHADDATTCONF 308 -#define GSN_LQHADDATTREF 309 -#define GSN_LQHADDATTREQ 310 -#define GSN_LQHFRAGCONF 311 -#define GSN_LQHFRAGREF 312 -#define GSN_LQHFRAGREQ 313 -#define GSN_LQHKEYCONF 314 -#define GSN_LQHKEYREF 315 -#define GSN_LQHKEYREQ 316 - -#define GSN_MASTER_LCPCONF 318 -#define GSN_MASTER_LCPREF 319 -#define GSN_MASTER_LCPREQ 320 - -#define GSN_MEMCHECKCONF 321 -#define GSN_MEMCHECKREQ 322 -#define GSN_NDB_FAILCONF 323 -#define GSN_NDB_STARTCONF 324 -#define GSN_NDB_STARTREF 325 -#define GSN_NDB_STARTREQ 326 -#define GSN_NDB_STTOR 327 -#define GSN_NDB_STTORRY 328 -#define GSN_NDB_TAMPER 329 -#define GSN_NEXT_SCANCONF 330 -#define GSN_NEXT_SCANREF 331 -#define GSN_NEXT_SCANREQ 332 -#define GSN_NEXTOPERATION 333 - -#define GSN_READ_CONFIG_REQ 334 /* new name for sizealt, local */ -#define GSN_READ_CONFIG_CONF 335 /* new name for sizealt, local */ - -/* 336 unused */ -/* 337 unused */ -/* 338 unused */ -#define GSN_OPEN_COMCONF 339 -#define GSN_OPEN_COMREF 340 -#define GSN_OPEN_COMREQ 341 -#define GSN_PACKED_SIGNAL 342 -#define GSN_PREP_FAILCONF 343 -#define GSN_PREP_FAILREF 344 -#define GSN_PREP_FAILREQ 345 -#define GSN_PRES_TOCONF 346 -#define GSN_PRES_TOREQ 347 -#define GSN_READ_NODESCONF 348 -#define GSN_READ_NODESREF 349 -#define GSN_READ_NODESREQ 350 -#define GSN_SCAN_FRAGCONF 351 -#define GSN_SCAN_FRAGREF 352 -#define GSN_SCAN_FRAGREQ 353 -#define GSN_SCAN_HBREP 354 -#define GSN_SCAN_PROCCONF 355 -#define GSN_SCAN_PROCREQ 356 -#define GSN_SEND_PACKED 357 -#define GSN_SET_LOGLEVELORD 358 - -#define GSN_LQH_ALLOCREQ 359 -#define GSN_TUP_ALLOCREQ 360 -#define GSN_TUP_DEALLOCREQ 361 - -/* 362 not unused */ - -#define GSN_TUP_WRITELOG_REQ 363 -#define GSN_LQH_WRITELOG_REQ 364 - -#define GSN_LCP_FRAG_REP 300 -#define GSN_LCP_FRAG_ORD 365 -#define GSN_LCP_COMPLETE_REP 158 - -#define GSN_START_LCP_REQ 317 -#define GSN_START_LCP_CONF 366 - -#define GSN_UNBLO_DICTCONF 367 -#define GSN_UNBLO_DICTREQ 368 -#define GSN_START_COPYCONF 369 -#define GSN_START_COPYREF 370 -#define GSN_START_COPYREQ 371 -#define GSN_START_EXEC_SR 372 -#define GSN_START_FRAGCONF 373 -#define GSN_START_FRAGREF 374 -#define GSN_START_FRAGREQ 375 -#define GSN_START_LCP_REF 376 -#define GSN_START_LCP_ROUND 377 -#define GSN_START_MECONF 378 -#define GSN_START_MEREF 379 -#define GSN_START_MEREQ 380 -#define GSN_START_PERMCONF 381 -#define GSN_START_PERMREF 382 -#define GSN_START_PERMREQ 383 -#define GSN_START_RECCONF 384 -#define GSN_START_RECREF 385 -#define GSN_START_RECREQ 386 -#define GSN_START_TOCONF 387 -#define GSN_START_TOREQ 388 -#define GSN_STORED_PROCCONF 389 -#define GSN_STORED_PROCREF 390 -#define GSN_STORED_PROCREQ 391 -#define GSN_STTOR 392 -#define GSN_STTORRY 393 -#define GSN_BACKUP_TRIG_REQ 394 -#define GSN_SYSTEM_ERROR 395 -#define GSN_TAB_COMMITCONF 396 -#define GSN_TAB_COMMITREF 397 -#define GSN_TAB_COMMITREQ 398 -#define GSN_TAKE_OVERTCCONF 399 -#define GSN_TAKE_OVERTCREQ 400 -#define GSN_TC_CLOPSIZECONF 401 -#define GSN_TC_CLOPSIZEREQ 402 -#define GSN_TC_SCHVERCONF 403 -#define GSN_TC_SCHVERREQ 404 -#define GSN_TCGETOPSIZECONF 405 -#define GSN_TCGETOPSIZEREQ 406 -#define GSN_TEST_ORD 407 -#define GSN_TESTSIG 408 -#define GSN_TIME_SIGNAL 409 -#define GSN_TUP_ABORTREQ 414 -#define GSN_TUP_ADD_ATTCONF 415 -#define GSN_TUP_ADD_ATTRREF 416 -#define GSN_TUP_ADD_ATTRREQ 417 -#define GSN_TUP_ATTRINFO 418 -#define GSN_TUP_COMMITREQ 419 - -/* 421 unused */ -/* 422 unused */ -/* 423 unused */ - -/* 424 unused */ -/* 425 unused */ -/* 426 unused */ -/* 427 unused */ -/* 428 unused */ -/* 429 unused */ -/* 430 unused */ -#define GSN_TUPFRAGCONF 431 -#define GSN_TUPFRAGREF 432 -#define GSN_TUPFRAGREQ 433 -#define GSN_TUPKEYCONF 434 -#define GSN_TUPKEYREF 435 -#define GSN_TUPKEYREQ 436 -#define GSN_TUPRELEASECONF 437 -#define GSN_TUPRELEASEREF 438 -#define GSN_TUPRELEASEREQ 439 -#define GSN_TUPSEIZECONF 440 -#define GSN_TUPSEIZEREF 441 -#define GSN_TUPSEIZEREQ 442 - -#define GSN_ABORT_ALL_REQ 445 -#define GSN_ABORT_ALL_REF 446 -#define GSN_ABORT_ALL_CONF 447 - -/* 448 unused - formerly GSN_STATISTICS_REQ */ -#define GSN_STOP_ORD 449 -#define GSN_TAMPER_ORD 450 -/* 451 unused - formerly GSN_SET_VAR_REQ */ -/* 452 unused - formerly GSN_SET_VAR_CONF */ -/* 453 unused - formerly GSN_SET_VAR_REF */ -/* 454 unused - formerly GSN_STATISTICS_CONF */ - -#define GSN_START_ORD 455 -/* 457 unused */ - -#define GSN_EVENT_SUBSCRIBE_REQ 458 -#define GSN_EVENT_SUBSCRIBE_CONF 459 -#define GSN_EVENT_SUBSCRIBE_REF 460 -/* 461 unused */ -/* 462 unused */ -/* 463 unused */ -/* 464 unused */ - -#define GSN_DUMP_STATE_ORD 465 - -#define GSN_START_INFOREQ 466 -#define GSN_START_INFOREF 467 -#define GSN_START_INFOCONF 468 - -#define GSN_TC_COMMIT_ACK 469 -#define GSN_REMOVE_MARKER_ORD 470 - -#define GSN_CHECKNODEGROUPSREQ 471 -#define GSN_CHECKNODEGROUPSCONF 472 - -/* 473 unused */ -#define GSN_ARBIT_PREPREQ 474 -#define GSN_ARBIT_PREPCONF 475 -#define GSN_ARBIT_PREPREF 476 -#define GSN_ARBIT_STARTREQ 477 -#define GSN_ARBIT_STARTCONF 478 -#define GSN_ARBIT_STARTREF 479 -#define GSN_ARBIT_CHOOSEREQ 480 -#define GSN_ARBIT_CHOOSECONF 481 -#define GSN_ARBIT_CHOOSEREF 482 -#define GSN_ARBIT_STOPORD 483 -#define GSN_ARBIT_STOPREP 484 - -#define GSN_BLOCK_COMMIT_ORD 485 -#define GSN_UNBLOCK_COMMIT_ORD 486 - -#define GSN_NODE_START_REP 502 -#define GSN_NODE_STATE_REP 487 -#define GSN_CHANGE_NODE_STATE_REQ 488 -#define GSN_CHANGE_NODE_STATE_CONF 489 - -#define GSN_DIH_SWITCH_REPLICA_REQ 490 -#define GSN_DIH_SWITCH_REPLICA_CONF 491 -#define GSN_DIH_SWITCH_REPLICA_REF 492 - -#define GSN_STOP_PERM_REQ 493 -#define GSN_STOP_PERM_REF 494 -#define GSN_STOP_PERM_CONF 495 - -#define GSN_STOP_ME_REQ 496 -#define GSN_STOP_ME_REF 497 -#define GSN_STOP_ME_CONF 498 - -#define GSN_WAIT_GCP_REQ 499 -#define GSN_WAIT_GCP_REF 500 -#define GSN_WAIT_GCP_CONF 501 - -/* 502 used */ - -/** - * Trigger and index signals - */ - -/** - * These are used by API and kernel - */ -#define GSN_TRIG_ATTRINFO 503 -#define GSN_CREATE_TRIG_REQ 504 -#define GSN_CREATE_TRIG_CONF 505 -#define GSN_CREATE_TRIG_REF 506 -#define GSN_ALTER_TRIG_REQ 507 -#define GSN_ALTER_TRIG_CONF 508 -#define GSN_ALTER_TRIG_REF 509 -#define GSN_CREATE_INDX_REQ 510 -#define GSN_CREATE_INDX_CONF 511 -#define GSN_CREATE_INDX_REF 512 -#define GSN_DROP_TRIG_REQ 513 -#define GSN_DROP_TRIG_CONF 514 -#define GSN_DROP_TRIG_REF 515 -#define GSN_DROP_INDX_REQ 516 -#define GSN_DROP_INDX_CONF 517 -#define GSN_DROP_INDX_REF 518 -#define GSN_TCINDXREQ 519 -#define GSN_TCINDXCONF 520 -#define GSN_TCINDXREF 521 -#define GSN_INDXKEYINFO 522 -#define GSN_INDXATTRINFO 523 -#define GSN_TCINDXNEXTREQ 524 -#define GSN_TCINDXNEXTCONF 525 -#define GSN_TCINDXNEXREF 526 -#define GSN_FIRE_TRIG_ORD 527 - -/** - * These are used only by kernel - */ -#define GSN_BUILDINDXREQ 528 -#define GSN_BUILDINDXCONF 529 -#define GSN_BUILDINDXREF 530 - -/** - * Backup interface - */ -#define GSN_BACKUP_REQ 531 -#define GSN_BACKUP_DATA 532 -#define GSN_BACKUP_REF 533 -#define GSN_BACKUP_CONF 534 - -#define GSN_ABORT_BACKUP_ORD 535 - -#define GSN_BACKUP_ABORT_REP 536 -#define GSN_BACKUP_COMPLETE_REP 537 -#define GSN_BACKUP_NF_COMPLETE_REP 538 - -/** - * Internal backup signals - */ -#define GSN_DEFINE_BACKUP_REQ 539 -#define GSN_DEFINE_BACKUP_REF 540 -#define GSN_DEFINE_BACKUP_CONF 541 - -#define GSN_START_BACKUP_REQ 542 -#define GSN_START_BACKUP_REF 543 -#define GSN_START_BACKUP_CONF 544 - -#define GSN_BACKUP_FRAGMENT_REQ 545 -#define GSN_BACKUP_FRAGMENT_REF 546 -#define GSN_BACKUP_FRAGMENT_CONF 547 - -#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575 - -#define GSN_STOP_BACKUP_REQ 548 -#define GSN_STOP_BACKUP_REF 549 -#define GSN_STOP_BACKUP_CONF 550 - -/** - * Used for master take-over / API status request - */ -#define GSN_BACKUP_STATUS_REQ 551 -#define GSN_BACKUP_STATUS_REF 116 -#define GSN_BACKUP_STATUS_CONF 165 - -/** - * Db sequence signals - */ -#define GSN_UTIL_SEQUENCE_REQ 552 -#define GSN_UTIL_SEQUENCE_REF 553 -#define GSN_UTIL_SEQUENCE_CONF 554 - -#define GSN_FSREMOVEREQ 555 -#define GSN_FSREMOVEREF 556 -#define GSN_FSREMOVECONF 557 - -#define GSN_UTIL_PREPARE_REQ 558 -#define GSN_UTIL_PREPARE_CONF 559 -#define GSN_UTIL_PREPARE_REF 560 - -#define GSN_UTIL_EXECUTE_REQ 561 -#define GSN_UTIL_EXECUTE_CONF 562 -#define GSN_UTIL_EXECUTE_REF 563 - -#define GSN_UTIL_RELEASE_REQ 564 -#define GSN_UTIL_RELEASE_CONF 565 -#define GSN_UTIL_RELEASE_REF 566 - -/** - * When dropping a long signal due to lack of memory resources - */ -#define GSN_SIGNAL_DROPPED_REP 567 -#define GSN_CONTINUE_FRAGMENTED 568 - -/** - * Suma participant interface - */ -#define GSN_SUB_REMOVE_REQ 569 -#define GSN_SUB_REMOVE_REF 570 -#define GSN_SUB_REMOVE_CONF 571 -#define GSN_SUB_STOP_REQ 572 -#define GSN_SUB_STOP_REF 573 -#define GSN_SUB_STOP_CONF 574 -/* 575 used */ -#define GSN_SUB_CREATE_REQ 576 -#define GSN_SUB_CREATE_REF 577 -#define GSN_SUB_CREATE_CONF 578 -#define GSN_SUB_START_REQ 579 -#define GSN_SUB_START_REF 580 -#define GSN_SUB_START_CONF 581 -#define GSN_SUB_SYNC_REQ 582 -#define GSN_SUB_SYNC_REF 583 -#define GSN_SUB_SYNC_CONF 584 -/* 585 unused */ -#define GSN_SUB_TABLE_DATA 586 - -#define GSN_CREATE_TABLE_REQ 587 -#define GSN_CREATE_TABLE_REF 588 -#define GSN_CREATE_TABLE_CONF 589 - -#define GSN_ALTER_TABLE_REQ 624 -#define GSN_ALTER_TABLE_REF 625 -#define GSN_ALTER_TABLE_CONF 626 - -#define GSN_SUB_SYNC_CONTINUE_REQ 590 -#define GSN_SUB_SYNC_CONTINUE_REF 591 -#define GSN_SUB_SYNC_CONTINUE_CONF 592 -#define GSN_SUB_GCP_COMPLETE_REP 593 - -#define GSN_CREATE_FRAGMENTATION_REQ 594 -#define GSN_CREATE_FRAGMENTATION_REF 595 -#define GSN_CREATE_FRAGMENTATION_CONF 596 - -#define GSN_CREATE_TAB_REQ 597 -#define GSN_CREATE_TAB_REF 598 -#define GSN_CREATE_TAB_CONF 599 - -#define GSN_ALTER_TAB_REQ 600 -#define GSN_ALTER_TAB_REF 601 -#define GSN_ALTER_TAB_CONF 602 - -#define GSN_ALTER_INDX_REQ 603 -#define GSN_ALTER_INDX_REF 604 -#define GSN_ALTER_INDX_CONF 605 - -#define GSN_ALTER_TABLE_REP 606 -#define GSN_API_BROADCAST_REP 607 -#define GSN_608 -#define GSN_609 -#define GSN_610 -#define GSN_611 - -#define GSN_612 -#define GSN_613 -#define GSN_614 -#define GSN_615 -#define GSN_616 -#define GSN_617 - -#define GSN_618 -#define GSN_619 -#define GSN_620 -#define GSN_621 -#define GSN_622 -#define GSN_623 - -#define GSN_627 -#define GSN_628 -#define GSN_629 -#define GSN_630 -#define GSN_631 -#define GSN_632 -#define GSN_633 -#define GSN_634 -#define GSN_635 -#define GSN_636 -#define GSN_637 -#define GSN_638 -#define GSN_639 -#define GSN_640 -#define GSN_641 -#define GSN_642 -#define GSN_643 -#define GSN_644 -#define GSN_645 -#define GSN_646 -#define GSN_647 -#define GSN_648 -#define GSN_649 - -#define GSN_650 -#define GSN_651 -#define GSN_652 -#define GSN_653 -#define GSN_654 -#define GSN_655 - -#define GSN_656 - -#define GSN_UTIL_CREATE_LOCK_REQ 132 -#define GSN_UTIL_CREATE_LOCK_REF 133 -#define GSN_UTIL_CREATE_LOCK_CONF 188 - -#define GSN_UTIL_DESTROY_LOCK_REQ 189 -#define GSN_UTIL_DESTROY_LOCK_REF 220 -#define GSN_UTIL_DESTROY_LOCK_CONF 221 - -#define GSN_UTIL_LOCK_REQ 222 -#define GSN_UTIL_LOCK_REF 230 -#define GSN_UTIL_LOCK_CONF 231 - -#define GSN_UTIL_UNLOCK_REQ 303 -#define GSN_UTIL_UNLOCK_REF 304 -#define GSN_UTIL_UNLOCK_CONF 362 - -/* SUMA */ -#define GSN_CREATE_SUBID_REQ 661 -#define GSN_CREATE_SUBID_REF 662 -#define GSN_CREATE_SUBID_CONF 663 - -/* used 664 */ -/* used 665 */ -/* used 666 */ -/* used 667 */ -/* used 668 */ -/* used 669 */ - -/* - * TUX - */ -#define GSN_TUXFRAGREQ 670 -#define GSN_TUXFRAGCONF 671 -#define GSN_TUXFRAGREF 672 -#define GSN_TUX_ADD_ATTRREQ 673 -#define GSN_TUX_ADD_ATTRCONF 674 -#define GSN_TUX_ADD_ATTRREF 675 - -/* - * REP - */ -#define GSN_REP_DISCONNECT_REP 676 - -#define GSN_TUX_MAINT_REQ 677 -#define GSN_TUX_MAINT_CONF 678 -#define GSN_TUX_MAINT_REF 679 - -/* not used 680 */ -/* not used 681 */ - -/** - * from mgmtsrvr to NDBCNTR - */ -#define GSN_RESUME_REQ 682 -#define GSN_STOP_REQ 443 -#define GSN_STOP_REF 444 -#define GSN_STOP_CONF 456 -#define GSN_API_VERSION_REQ 697 -#define GSN_API_VERSION_CONF 698 - -/* not used 686 */ -/* not used 687 */ -/* not used 689 */ -/* not used 690 */ - -/** - * SUMA restart protocol - */ -#define GSN_SUMA_START_ME_REQ 691 -#define GSN_SUMA_START_ME_REF 694 -#define GSN_SUMA_START_ME_CONF 695 -#define GSN_SUMA_HANDOVER_REQ 692 -#define GSN_SUMA_HANDOVER_REF 696 -#define GSN_SUMA_HANDOVER_CONF 693 - -/* used 694 */ -/* used 695 */ -/* used 696 */ - -#define GSN_706 -#define GSN_707 -#define GSN_708 -#define GSN_709 - - -/* - * EVENT Signals - */ -#define GSN_SUB_GCP_COMPLETE_ACK 699 - -#define GSN_CREATE_EVNT_REQ 700 -#define GSN_CREATE_EVNT_CONF 701 -#define GSN_CREATE_EVNT_REF 702 - -#define GSN_DROP_EVNT_REQ 703 -#define GSN_DROP_EVNT_CONF 704 -#define GSN_DROP_EVNT_REF 705 - -#define GSN_TUX_BOUND_INFO 710 - -#define GSN_ACC_LOCKREQ 711 -#define GSN_READ_PSEUDO_REQ 712 - -/** - * Filegroup - */ -#define GSN_CREATE_FILEGROUP_REQ 713 -#define GSN_CREATE_FILEGROUP_REF 714 -#define GSN_CREATE_FILEGROUP_CONF 715 - -#define GSN_CREATE_FILE_REQ 716 -#define GSN_CREATE_FILE_REF 717 -#define GSN_CREATE_FILE_CONF 718 - -#define GSN_DROP_FILEGROUP_REQ 719 -#define GSN_DROP_FILEGROUP_REF 720 -#define GSN_DROP_FILEGROUP_CONF 721 - -#define GSN_DROP_FILE_REQ 722 -#define GSN_DROP_FILE_REF 723 -#define GSN_DROP_FILE_CONF 724 - -#define GSN_CREATE_OBJ_REQ 725 -#define GSN_CREATE_OBJ_REF 726 -#define GSN_CREATE_OBJ_CONF 727 - -#define GSN_DROP_OBJ_REQ 728 -#define GSN_DROP_OBJ_REF 729 -#define GSN_DROP_OBJ_CONF 730 - -#define GSN_ALLOC_EXTENT_REQ 68 -#define GSN_FREE_EXTENT_REQ 69 - -#define GSN_DICT_COMMIT_REQ 664 -#define GSN_DICT_COMMIT_REF 665 -#define GSN_DICT_COMMIT_CONF 666 - -#define GSN_DICT_ABORT_REQ 667 -#define GSN_DICT_ABORT_REF 668 -#define GSN_DICT_ABORT_CONF 669 - -/* DICT LOCK signals */ -#define GSN_DICT_LOCK_REQ 410 -#define GSN_DICT_LOCK_CONF 411 -#define GSN_DICT_LOCK_REF 412 -#define GSN_DICT_UNLOCK_ORD 420 - -#endif diff --git a/storage/ndb/include/kernel/GrepEvent.hpp b/storage/ndb/include/kernel/GrepEvent.hpp deleted file mode 100644 index fbef9560028..00000000000 --- a/storage/ndb/include/kernel/GrepEvent.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GREP_EVENT_H -#define GREP_EVENT_H - -class GrepEvent { -public: - enum Subscription { - GrepSS_CreateSubIdConf = 1, - GrepSS_SubCreateConf = 2, - GrepSS_SubStartMetaConf = 3, - GrepSS_SubStartDataConf = 4, - GrepSS_SubSyncDataConf = 5, - GrepSS_SubSyncMetaConf = 6, - GrepSS_SubRemoveConf = 7, - - GrepPS_CreateSubIdConf = 8, - GrepPS_SubCreateConf = 9, - GrepPS_SubStartMetaConf = 10, - GrepPS_SubStartDataConf = 11, - GrepPS_SubSyncMetaConf = 12, - GrepPS_SubSyncDataConf = 13, - GrepPS_SubRemoveConf = 14, - - GrepPS_CreateSubIdRef = 15, - GrepPS_SubCreateRef = 16, - GrepPS_SubStartMetaRef = 17, - GrepPS_SubStartDataRef = 18, - GrepPS_SubSyncMetaRef = 19, - GrepPS_SubSyncDataRef = 20, - GrepPS_SubRemoveRef = 21, - - GrepSS_CreateSubIdRef = 22, - GrepSS_SubCreateRef = 23, - GrepSS_SubStartMetaRef = 24, - GrepSS_SubStartDataRef = 25, - GrepSS_SubSyncMetaRef = 26, - GrepSS_SubSyncDataRef = 27, - GrepSS_SubRemoveRef = 28, - - Rep_Disconnect = 29 - - }; -}; -#endif diff --git a/storage/ndb/include/kernel/Interpreter.hpp b/storage/ndb/include/kernel/Interpreter.hpp deleted file mode 100644 index 8e9a6c01ccc..00000000000 --- a/storage/ndb/include/kernel/Interpreter.hpp +++ /dev/null @@ -1,284 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_INTERPRETER_HPP -#define NDB_INTERPRETER_HPP - -#include - -class Interpreter { -public: - - inline static Uint32 mod4(Uint32 len){ - return len + ((4 - (len & 3)) & 3); - } - - - /** - * General Mnemonic format - * - * i = Instruction - 5 Bits ( 0 - 5 ) max 63 - * x = Register 1 - 3 Bits ( 6 - 8 ) max 7 - * y = Register 2 - 3 Bits ( 9 -11 ) max 7 - * b = Branch offset (only branches) - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * iiiiiixxxyyy bbbbbbbbbbbbbbbb - * - * - */ - - /** - * Instructions - */ - STATIC_CONST( READ_ATTR_INTO_REG = 1 ); - STATIC_CONST( WRITE_ATTR_FROM_REG = 2 ); - STATIC_CONST( LOAD_CONST_NULL = 3 ); - STATIC_CONST( LOAD_CONST16 = 4 ); - STATIC_CONST( LOAD_CONST32 = 5 ); - STATIC_CONST( LOAD_CONST64 = 6 ); - STATIC_CONST( ADD_REG_REG = 7 ); - STATIC_CONST( SUB_REG_REG = 8 ); - STATIC_CONST( BRANCH = 9 ); - STATIC_CONST( BRANCH_REG_EQ_NULL = 10 ); - STATIC_CONST( BRANCH_REG_NE_NULL = 11 ); - STATIC_CONST( BRANCH_EQ_REG_REG = 12 ); - STATIC_CONST( BRANCH_NE_REG_REG = 13 ); - STATIC_CONST( BRANCH_LT_REG_REG = 14 ); - STATIC_CONST( BRANCH_LE_REG_REG = 15 ); - STATIC_CONST( BRANCH_GT_REG_REG = 16 ); - STATIC_CONST( BRANCH_GE_REG_REG = 17 ); - STATIC_CONST( EXIT_OK = 18 ); - STATIC_CONST( EXIT_REFUSE = 19 ); - STATIC_CONST( CALL = 20 ); - STATIC_CONST( RETURN = 21 ); - STATIC_CONST( EXIT_OK_LAST = 22 ); - STATIC_CONST( BRANCH_ATTR_OP_ARG = 23 ); - STATIC_CONST( BRANCH_ATTR_EQ_NULL = 24 ); - STATIC_CONST( BRANCH_ATTR_NE_NULL = 25 ); - - /** - * Macros for creating code - */ - static Uint32 Read(Uint32 AttrId, Uint32 Register); - static Uint32 Write(Uint32 AttrId, Uint32 Register); - - static Uint32 LoadNull(Uint32 Register); - static Uint32 LoadConst16(Uint32 Register, Uint32 Value); - static Uint32 LoadConst32(Uint32 Register); // Value in next word - static Uint32 LoadConst64(Uint32 Register); // Value in next 2 words - static Uint32 Add(Uint32 DstReg, Uint32 SrcReg1, Uint32 SrcReg2); - static Uint32 Sub(Uint32 DstReg, Uint32 SrcReg1, Uint32 SrcReg2); - static Uint32 Branch(Uint32 Inst, Uint32 Reg1, Uint32 Reg2); - static Uint32 ExitOK(); - - /** - * Branch string - * - * i = Instruction - 5 Bits ( 0 - 5 ) max 63 - * a = Attribute id - * l = Length of string - * b = Branch offset - * t = branch type - * d = Array length diff - * v = Varchar flag - * p = No-blank-padding flag for char compare - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * iiiiii ddvtttpbbbbbbbbbbbbbbbb - * aaaaaaaaaaaaaaaallllllllllllllll - * -string.... - - */ - enum UnaryCondition { - IS_NULL = 0, - IS_NOT_NULL = 1 - }; - - enum BinaryCondition { - EQ = 0, - NE = 1, - LT = 2, - LE = 3, - GT = 4, - GE = 5, - LIKE = 6, - NOT_LIKE = 7 - }; - static Uint32 BranchCol(BinaryCondition cond, - Uint32 arrayLengthDiff, Uint32 varchar, bool nopad); - static Uint32 BranchCol_2(Uint32 AttrId); - static Uint32 BranchCol_2(Uint32 AttrId, Uint32 Len); - - static Uint32 getBinaryCondition(Uint32 op1); - static Uint32 getArrayLengthDiff(Uint32 op1); - static Uint32 isVarchar(Uint32 op1); - static Uint32 isNopad(Uint32 op1); - static Uint32 getBranchCol_AttrId(Uint32 op2); - static Uint32 getBranchCol_Len(Uint32 op2); - - /** - * Macros for decoding code - */ - static Uint32 getOpCode(Uint32 op); - static Uint32 getReg1(Uint32 op); - static Uint32 getReg2(Uint32 op); - static Uint32 getReg3(Uint32 op); -}; - -inline -Uint32 -Interpreter::Read(Uint32 AttrId, Uint32 Register){ - return (AttrId << 16) + (Register << 6) + READ_ATTR_INTO_REG; -} - -inline -Uint32 -Interpreter::Write(Uint32 AttrId, Uint32 Register){ - return (AttrId << 16) + (Register << 6) + WRITE_ATTR_FROM_REG; -} - -inline -Uint32 -Interpreter::LoadConst16(Uint32 Register, Uint32 Value){ - return (Value << 16) + (Register << 6) + LOAD_CONST16; -} - -inline -Uint32 -Interpreter::LoadConst32(Uint32 Register){ - return (Register << 6) + LOAD_CONST32; -} - -inline -Uint32 -Interpreter::LoadConst64(Uint32 Register){ - return (Register << 6) + LOAD_CONST64; -} - -inline -Uint32 -Interpreter::Add(Uint32 Dcoleg, Uint32 SrcReg1, Uint32 SrcReg2){ - return (SrcReg1 << 6) + (SrcReg2 << 9) + (Dcoleg << 16) + ADD_REG_REG; -} - -inline -Uint32 -Interpreter::Sub(Uint32 Dcoleg, Uint32 SrcReg1, Uint32 SrcReg2){ - return (SrcReg1 << 6) + (SrcReg2 << 9) + (Dcoleg << 16) + SUB_REG_REG; -} - -inline -Uint32 -Interpreter::Branch(Uint32 Inst, Uint32 Reg1, Uint32 Reg2){ - return (Reg1 << 9) + (Reg2 << 6) + Inst; -} - -inline -Uint32 -Interpreter::BranchCol(BinaryCondition cond, - Uint32 arrayLengthDiff, - Uint32 varchar, bool nopad){ - //ndbout_c("BranchCol: cond=%d diff=%u varchar=%u nopad=%d", - //cond, arrayLengthDiff, varchar, nopad); - return - BRANCH_ATTR_OP_ARG + - (arrayLengthDiff << 9) + - (varchar << 11) + - (cond << 12) + - (nopad << 15); -} - -inline -Uint32 -Interpreter::BranchCol_2(Uint32 AttrId, Uint32 Len){ - return (AttrId << 16) + Len; -} - -inline -Uint32 -Interpreter::BranchCol_2(Uint32 AttrId){ - return (AttrId << 16); -} - -inline -Uint32 -Interpreter::getBinaryCondition(Uint32 op){ - return (op >> 12) & 0x7; -} - -inline -Uint32 -Interpreter::getArrayLengthDiff(Uint32 op){ - return (op >> 9) & 0x3; -} - -inline -Uint32 -Interpreter::isVarchar(Uint32 op){ - return (op >> 11) & 1; -} - -inline -Uint32 -Interpreter::isNopad(Uint32 op){ - return (op >> 15) & 1; -} - -inline -Uint32 -Interpreter::getBranchCol_AttrId(Uint32 op){ - return (op >> 16) & 0xFFFF; -} - -inline -Uint32 -Interpreter::getBranchCol_Len(Uint32 op){ - return op & 0xFFFF; -} - -inline -Uint32 -Interpreter::ExitOK(){ - return EXIT_OK; -} - -inline -Uint32 -Interpreter::getOpCode(Uint32 op){ - return op & 0x3f; -} - -inline -Uint32 -Interpreter::getReg1(Uint32 op){ - return (op >> 6) & 0x7; -} - -inline -Uint32 -Interpreter::getReg2(Uint32 op){ - return (op >> 9) & 0x7; -} - -inline -Uint32 -Interpreter::getReg3(Uint32 op){ - return (op >> 16) & 0x7; -} - -#endif diff --git a/storage/ndb/include/kernel/LogLevel.hpp b/storage/ndb/include/kernel/LogLevel.hpp deleted file mode 100644 index 138f299187c..00000000000 --- a/storage/ndb/include/kernel/LogLevel.hpp +++ /dev/null @@ -1,163 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef _LOG_LEVEL_HPP -#define _LOG_LEVEL_HPP - -#include -#include - -/** - * - */ -class LogLevel { - friend class Config; -public: - /** - * Constructor - */ - LogLevel(); - - /** - * Howto add a new event category: - * 1. Add the new event category to EventCategory below - * 2. Update #define _LOGLEVEL_CATEGORIES (found below) with the number of - * items in EventCategory - * 3. Update LogLevelCategoryName in LogLevel.cpp - * 4. Add the event in EventLogger - */ - - - /** - * Copy operator - */ - LogLevel & operator= (const LogLevel &); - - enum EventCategory { - llInvalid = -1, - llStartUp = CFG_LOGLEVEL_STARTUP - CFG_MIN_LOGLEVEL, - llShutdown = CFG_LOGLEVEL_SHUTDOWN - CFG_MIN_LOGLEVEL, - llStatistic = CFG_LOGLEVEL_STATISTICS - CFG_MIN_LOGLEVEL, - llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - CFG_MIN_LOGLEVEL, - llNodeRestart = CFG_LOGLEVEL_NODERESTART - CFG_MIN_LOGLEVEL, - llConnection = CFG_LOGLEVEL_CONNECTION - CFG_MIN_LOGLEVEL, - llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL, - llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL, - llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL, - llCongestion = CFG_LOGLEVEL_CONGESTION - CFG_MIN_LOGLEVEL, - llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL - ,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL - }; - - /** - * No of categories - */ -#define _LOGLEVEL_CATEGORIES (CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1) - STATIC_CONST( LOGLEVEL_CATEGORIES = _LOGLEVEL_CATEGORIES ); - - void clear(); - - /** - * Note level is valid as 0-15 - */ - int setLogLevel(EventCategory ec, Uint32 level = 7); - - /** - * Get the loglevel (0-15) for a category - */ - Uint32 getLogLevel(EventCategory ec) const; - - /** - * Set this= max(this, ll) per category - */ - LogLevel& set_max(const LogLevel& ll); - - bool operator==(const LogLevel& l) const { - return memcmp(this, &l, sizeof(* this)) == 0; - } - - LogLevel& operator=(const struct EventSubscribeReq & req); - -private: - /** - * The actual data - */ - Uint8 logLevelData[LOGLEVEL_CATEGORIES]; -}; - -inline -LogLevel::LogLevel(){ - clear(); -} - -inline -LogLevel & -LogLevel::operator= (const LogLevel & org){ - memcpy(logLevelData, org.logLevelData, sizeof(logLevelData)); - return * this; -} - -inline -void -LogLevel::clear(){ - for(Uint32 i = 0; i= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES) - { - logLevelData[ec] = (Uint8)level; - return 0; - } - return 1; -} - -inline -Uint32 -LogLevel::getLogLevel(EventCategory ec) const{ - assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES); - - return (Uint32)logLevelData[ec]; -} - -inline -LogLevel & -LogLevel::set_max(const LogLevel & org){ - for(Uint32 i = 0; i> 16)] = req.theData[i] & 0xFFFF; - } - return * this; -} - -#endif diff --git a/storage/ndb/include/kernel/NodeBitmask.hpp b/storage/ndb/include/kernel/NodeBitmask.hpp deleted file mode 100644 index dac2d503fdf..00000000000 --- a/storage/ndb/include/kernel/NodeBitmask.hpp +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NODE_BITMASK_HPP -#define NODE_BITMASK_HPP - -#include -#include -#include - -/** - * No of 32 bits words needed to store a node bitmask - * containing all the nodes in the system - * Both NDB nodes and API, MGM... nodes - * - * Note that this is used in a lot of signals - */ -#define _NODE_BITMASK_SIZE 2 - -/** - * No of 32 bits words needed to store a node bitmask - * containing all the ndb nodes in the system - * - * Note that this is used in a lot of signals - */ -#define _NDB_NODE_BITMASK_SIZE 2 - -/** - * No of 32 bits word needed to store B bits for N nodes - */ -#define NODE_ARRAY_SIZE(N, B) (((N)*(B)+31) >> 5) - -typedef Bitmask<(unsigned int)_NODE_BITMASK_SIZE> NodeBitmask; - -typedef Bitmask<(unsigned int)_NDB_NODE_BITMASK_SIZE> NdbNodeBitmask; - -#define __NBM_SZ ((MAX_NODES >> 5) + ((MAX_NODES & 31) != 0)) -#define __NNBM_SZ ((MAX_NDB_NODES >> 5) + ((MAX_NDB_NODES & 31) != 0)) - -#if ( __NBM_SZ > _NODE_BITMASK_SIZE) -#error "MAX_NODES can not fit into NODE_BITMASK_SIZE" -#endif - -#if ( __NNBM_SZ > _NDB_NODE_BITMASK_SIZE) -#error "MAX_NDB_NODES can not fit into NDB_NODE_BITMASK_SIZE" -#endif - -/** - * General B Bits operations - * - * Get(x, A[], B) - * w = x >> S1 - * s = (x & S2) << S3 - * return (A[w] >> s) & S4 - * - * Set(x, A[], v, B) - * w = x >> S1 - * s = (x & S2) << S3 - * m = ~(S4 << s) - * t = A[w] & m; - * A[w] = t | ((v & S4) << s) - * - * B(Bits) S1 S2 S3 S4 - * 1 5 31 0 1 - * 2 4 15 1 3 - * 4 3 7 2 15 - * 8 2 3 3 255 - * 16 1 1 4 65535 - * - * S1 = 5 - 2log(B) - * S2 = 2^S1 - 1 - * S3 = 2log(B) - * S4 = 2^B - 1 - */ - -#endif diff --git a/storage/ndb/include/kernel/NodeInfo.hpp b/storage/ndb/include/kernel/NodeInfo.hpp deleted file mode 100644 index 7d0b196ee32..00000000000 --- a/storage/ndb/include/kernel/NodeInfo.hpp +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NODE_INFO_HPP -#define NODE_INFO_HPP - -#include -#include - -class NodeInfo { -public: - NodeInfo(); - - /** - * NodeType - */ - enum NodeType { - DB = NODE_TYPE_DB, ///< Database node - API = NODE_TYPE_API, ///< NDB API node - MGM = NODE_TYPE_MGM, ///< Management node (incl. NDB API) - INVALID = 255 ///< Invalid type - }; - NodeType getType() const; - - Uint32 m_version; ///< Node version - Uint32 m_signalVersion; ///< Signal version - Uint32 m_type; ///< Node type - Uint32 m_connectCount; ///< No of times connected - bool m_connected; ///< Node is connected - Uint32 m_heartbeat_cnt; ///< Missed heartbeats - - friend NdbOut & operator<<(NdbOut&, const NodeInfo&); -}; - - -inline -NodeInfo::NodeInfo(){ - m_version = 0; - m_signalVersion = 0; - m_type = INVALID; - m_connectCount = 0; - m_heartbeat_cnt= 0; -} - -inline -NodeInfo::NodeType -NodeInfo::getType() const { - return (NodeType)m_type; -} - -inline -NdbOut & -operator<<(NdbOut& ndbout, const NodeInfo & info){ - ndbout << "[NodeInfo: "; - switch(info.m_type){ - case NodeInfo::DB: - ndbout << "DB"; - break; - case NodeInfo::API: - ndbout << "API"; - break; - case NodeInfo::MGM: - ndbout << "MGM"; - break; - case NodeInfo::INVALID: - ndbout << "INVALID"; - break; - default: - ndbout << ""; - break; - } - - ndbout << " version: " << info.m_version - << " sig. version; " << info.m_signalVersion - << " connect count: " << info.m_connectCount - << "]"; - return ndbout; -} - -struct NodeVersionInfo -{ - STATIC_CONST( DataLength = 6 ); - struct - { - Uint32 m_min_version; - Uint32 m_max_version; - } m_type [3]; // Indexed as NodeInfo::Type -}; - -#endif diff --git a/storage/ndb/include/kernel/NodeState.hpp b/storage/ndb/include/kernel/NodeState.hpp deleted file mode 100644 index 7f6dc1a17a4..00000000000 --- a/storage/ndb/include/kernel/NodeState.hpp +++ /dev/null @@ -1,319 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NODE_STATE_HPP -#define NODE_STATE_HPP - -#include -#include - -class NodeState { -public: - enum StartLevel { - /** - * SL_NOTHING - * Nothing is started - */ - SL_NOTHING = 0, - - /** - * SL_CMVMI - * CMVMI is started - * Listening to management server - * Qmgr knows nothing... - */ - SL_CMVMI = 1, - - /** - * SL_STARTING - * All blocks are starting - * Initial or restart - * During this phase is startPhase valid - */ - SL_STARTING = 2, - - /** - * The database is started open for connections - */ - SL_STARTED = 3, - - SL_SINGLEUSER = 4, - - /** - * SL_STOPPING_1 - Inform API - * API is informed not to start transactions on node - * The database is about to close - * - * New TcSeize(s) are refused (TcSeizeRef) - */ - SL_STOPPING_1 = 5, - - /** - * SL_STOPPING_2 - Close TC - * New transactions(TC) are refused - */ - SL_STOPPING_2 = 6, - - - - - /** - * SL_STOPPING_3 - Wait for reads in LQH - * No transactions are running in TC - * New scans(s) and read(s) are refused in LQH - * NS: The node is not Primary for any fragment - * NS: No node is allow to start - */ - SL_STOPPING_3 = 7, - - /** - * SL_STOPPING_4 - Close LQH - * Node is out of DIGETNODES - * Insert/Update/Delete can still be running in LQH - * GCP is refused - * Node is not startable w.o Node Recovery - */ - SL_STOPPING_4 = 8 - }; - - enum StartType { - ST_INITIAL_START = 0, - ST_SYSTEM_RESTART = 1, - ST_NODE_RESTART = 2, - ST_INITIAL_NODE_RESTART = 3, - ST_ILLEGAL_TYPE = 4 - }; - - /** - * Length in 32-bit words - */ - STATIC_CONST( DataLength = 8 + NdbNodeBitmask::Size ); - - /** - * Constructor(s) - */ - NodeState(); - NodeState(StartLevel); - NodeState(StartLevel, bool systemShutdown); - NodeState(StartLevel, Uint32 startPhase, StartType); - void init(); - - /** - * Current start level - */ - Uint32 startLevel; - - /** - * Node group - */ - Uint32 nodeGroup; // valid when startLevel == SL_STARTING - - /** - * Dynamic id - */ - union { - Uint32 dynamicId; // valid when startLevel == SL_STARTING to API - Uint32 masterNodeId; // When from cntr - }; - - /** - * - */ - union { - struct { - Uint32 startPhase; // valid when startLevel == SL_STARTING - Uint32 restartType; // valid when startLevel == SL_STARTING - } starting; - struct { - Uint32 systemShutdown; // valid when startLevel == SL_STOPPING_{X} - Uint32 timeout; - Uint32 alarmTime; - } stopping; - - - }; - Uint32 singleUserMode; - Uint32 singleUserApi; //the single user node - - BitmaskPOD m_connected_nodes; - - void setDynamicId(Uint32 dynamic); - void setNodeGroup(Uint32 group); - void setSingleUser(Uint32 s); - void setSingleUserApi(Uint32 n); - - - /** - * Is a node restart in progress (ordinary or initial) - */ - bool getNodeRestartInProgress() const; - - /** - * Is a system restart ongoing - */ - bool getSystemRestartInProgress() const; - - /** - * Is in single user mode? - */ - bool getSingleUserMode() const; - - /** - * Is in single user mode - */ - Uint32 getSingleUserApi() const; - - friend NdbOut & operator<<(NdbOut&, const NodeState&); -}; - -inline -NodeState::NodeState(){ - init(); -} - -inline -void -NodeState::init(){ - startLevel = SL_CMVMI; - nodeGroup = 0xFFFFFFFF; - dynamicId = 0xFFFFFFFF; - singleUserMode = 0; - singleUserApi = 0xFFFFFFFF; - m_connected_nodes.clear(); -} - -inline -NodeState::NodeState(StartLevel sl){ - init(); - startLevel = sl; - singleUserMode = 0; - singleUserApi = 0xFFFFFFFF; -} - -inline -NodeState::NodeState(StartLevel sl, Uint32 sp, StartType typeOfStart){ - init(); - startLevel = sl; - starting.startPhase = sp; - starting.restartType = typeOfStart; - singleUserMode = 0; - singleUserApi = 0xFFFFFFFF; -} - -inline -NodeState::NodeState(StartLevel sl, bool sys){ - init(); - startLevel = sl; - stopping.systemShutdown = sys; - singleUserMode = 0; - singleUserApi = 0xFFFFFFFF; -} - -inline -void NodeState::setDynamicId(Uint32 dynamic){ - dynamicId = dynamic; -} - -inline -void NodeState::setNodeGroup(Uint32 group){ - nodeGroup = group; -} - -inline -void NodeState::setSingleUser(Uint32 s) { - singleUserMode = s; -} - -inline -void NodeState::setSingleUserApi(Uint32 n) { - singleUserApi = n; -} -inline -bool NodeState::getNodeRestartInProgress() const { - return startLevel == SL_STARTING && - (starting.restartType == ST_NODE_RESTART || - starting.restartType == ST_INITIAL_NODE_RESTART); -} - -inline -bool NodeState::getSingleUserMode() const { - return singleUserMode; -} - -inline -Uint32 NodeState::getSingleUserApi() const { - return singleUserApi; -} - -inline -bool NodeState::getSystemRestartInProgress() const { - return startLevel == SL_STARTING && starting.restartType == ST_SYSTEM_RESTART; -} - -inline -NdbOut & -operator<<(NdbOut& ndbout, const NodeState & state){ - ndbout << "[NodeState: startLevel: "; - switch(state.startLevel){ - case NodeState::SL_NOTHING: - ndbout << " ]"; - break; - case NodeState::SL_CMVMI: - ndbout << " ]"; - break; - case NodeState::SL_STARTING: - ndbout << " ]"; - break; - case NodeState::SL_STARTED: - ndbout << " ]"; - break; - case NodeState::SL_STOPPING_1: - ndbout << " ]"; - break; - case NodeState::SL_STOPPING_2: - ndbout << " ]"; - break; - case NodeState::SL_STOPPING_3: - ndbout << " ]"; - break; - case NodeState::SL_STOPPING_4: - ndbout << " ]"; - break; - default: - ndbout << " ]"; - } - return ndbout; -} - -#endif diff --git a/storage/ndb/include/kernel/RefConvert.hpp b/storage/ndb/include/kernel/RefConvert.hpp deleted file mode 100644 index 2dcc67983a8..00000000000 --- a/storage/ndb/include/kernel/RefConvert.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef REFCONVERT_H -#define REFCONVERT_H - -#include "kernel_types.h" - -/** - * Convert BlockReference to BlockNumber - */ -inline -BlockNumber refToBlock(BlockReference ref){ - return (BlockNumber)(ref >> 16); -} - -/** - * Convert BlockReference to NodeId - */ -inline -NodeId refToNode(BlockReference ref){ - return (NodeId)(ref & 0xFFFF); -} - -/** - * Convert NodeId and BlockNumber to BlockReference - */ -inline -BlockReference numberToRef(BlockNumber bnr, NodeId proc){ - return (((Uint32)bnr) << 16) + proc; -} - -#endif - diff --git a/storage/ndb/include/kernel/kernel_config_parameters.h b/storage/ndb/include/kernel/kernel_config_parameters.h deleted file mode 100644 index 0da5df00a79..00000000000 --- a/storage/ndb/include/kernel/kernel_config_parameters.h +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef DB_CONFIG_PARAMTERS_H -#define DB_CONFIG_PARAMTERS_H - -#define PRIVATE_BASE 14000 - -#define CFG_ACC_DIR_RANGE (PRIVATE_BASE + 1) -#define CFG_ACC_DIR_ARRAY (PRIVATE_BASE + 2) -#define CFG_ACC_FRAGMENT (PRIVATE_BASE + 3) -#define CFG_ACC_OP_RECS (PRIVATE_BASE + 4) -#define CFG_ACC_OVERFLOW_RECS (PRIVATE_BASE + 5) -#define CFG_ACC_PAGE8 (PRIVATE_BASE + 6) -#define CFG_ACC_ROOT_FRAG (PRIVATE_BASE + 7) -#define CFG_ACC_TABLE (PRIVATE_BASE + 8) -#define CFG_ACC_SCAN (PRIVATE_BASE + 9) - -#define CFG_DICT_ATTRIBUTE (PRIVATE_BASE + 10) -#define CFG_DICT_TABLE (PRIVATE_BASE + 13) - -#define CFG_DIH_API_CONNECT (PRIVATE_BASE + 15) -#define CFG_DIH_CONNECT (PRIVATE_BASE + 16) -#define CFG_DIH_FRAG_CONNECT (PRIVATE_BASE + 17) -#define CFG_DIH_MORE_NODES (PRIVATE_BASE + 18) -#define CFG_DIH_REPLICAS (PRIVATE_BASE + 19) -#define CFG_DIH_TABLE (PRIVATE_BASE + 20) - -#define CFG_LQH_FRAG (PRIVATE_BASE + 21) -#define CFG_LQH_TABLE (PRIVATE_BASE + 23) -#define CFG_LQH_TC_CONNECT (PRIVATE_BASE + 24) -#define CFG_LQH_LOG_FILES (PRIVATE_BASE + 26) -#define CFG_LQH_SCAN (PRIVATE_BASE + 27) - -#define CFG_TC_API_CONNECT (PRIVATE_BASE + 28) -#define CFG_TC_TC_CONNECT (PRIVATE_BASE + 29) -#define CFG_TC_TABLE (PRIVATE_BASE + 30) -#define CFG_TC_SCAN (PRIVATE_BASE + 31) -#define CFG_TC_LOCAL_SCAN (PRIVATE_BASE + 32) - -#define CFG_TUP_FRAG (PRIVATE_BASE + 33) -#define CFG_TUP_OP_RECS (PRIVATE_BASE + 34) -#define CFG_TUP_PAGE (PRIVATE_BASE + 35) -#define CFG_TUP_PAGE_RANGE (PRIVATE_BASE + 36) -#define CFG_TUP_TABLE (PRIVATE_BASE + 37) -#define CFG_TUP_TABLE_DESC (PRIVATE_BASE + 38) -#define CFG_TUP_STORED_PROC (PRIVATE_BASE + 39) - -#define CFG_TUX_INDEX (PRIVATE_BASE + 40) -#define CFG_TUX_FRAGMENT (PRIVATE_BASE + 41) -#define CFG_TUX_ATTRIBUTE (PRIVATE_BASE + 42) -#define CFG_TUX_SCAN_OP (PRIVATE_BASE + 43) - -#endif diff --git a/storage/ndb/include/kernel/kernel_types.h b/storage/ndb/include/kernel/kernel_types.h deleted file mode 100644 index fc3d8597c78..00000000000 --- a/storage/ndb/include/kernel/kernel_types.h +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_KERNEL_TYPES_H -#define NDB_KERNEL_TYPES_H - -#include -#include -#include "ndb_limits.h" - -typedef Uint16 NodeId; -typedef Uint16 BlockNumber; -typedef Uint32 BlockReference; -typedef Uint16 GlobalSignalNumber; - -enum Operation_t { - ZREAD = 0 - ,ZUPDATE = 1 - ,ZINSERT = 2 - ,ZDELETE = 3 - ,ZWRITE = 4 - ,ZREAD_EX = 5 -#if 0 - ,ZREAD_CONSISTENT = 6 -#endif -}; - -/** - * 32k page - */ -struct GlobalPage { - union { - Uint32 data[GLOBAL_PAGE_SIZE/sizeof(Uint32)]; - Uint32 nextPool; - }; -}; - -struct Local_key -{ - Uint32 m_page_no; - Uint16 m_page_idx; - Uint16 m_file_no; - - bool isNull() const { return m_page_no == RNIL; } - void setNull() { m_page_no= RNIL; m_file_no= m_page_idx= ~0;} - - Uint32 ref() const { return (m_page_no << MAX_TUPLES_BITS) | m_page_idx ;} - - Local_key& assref (Uint32 ref) { - m_page_no =ref >> MAX_TUPLES_BITS; - m_page_idx = ref & MAX_TUPLES_PER_PAGE; - return *this; - } -}; - -class NdbOut& -operator<<(class NdbOut&, const struct Local_key&); - -inline -Uint32 -table_version_major(Uint32 ver) -{ - return ver & 0x00FFFFFF; -} - -#endif - - - - diff --git a/storage/ndb/include/kernel/ndb_limits.h b/storage/ndb/include/kernel/ndb_limits.h deleted file mode 100644 index 3e9ade05d61..00000000000 --- a/storage/ndb/include/kernel/ndb_limits.h +++ /dev/null @@ -1,165 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_LIMITS_H -#define NDB_LIMITS_H - -#include - -#define RNIL 0xffffff00 - -/** - * Note that actual value = MAX_NODES - 1, - * since NodeId = 0 can not be used - */ -#define MAX_NDB_NODES 49 -#define MAX_NODES 64 -#define UNDEF_NODEGROUP 0xFFFF - -/************************************************************************** - * IT SHOULD BE (MAX_NDB_NODES - 1). - * WHEN MAX_NDB_NODE IS CHANGED, IT SHOULD BE CHANGED ALSO - **************************************************************************/ -#define MAX_DATA_NODE_ID 48 -/************************************************************************** - * IT SHOULD BE (MAX_NODES - 1). - * WHEN MAX_NODES IS CHANGED, IT SHOULD BE CHANGED ALSO - **************************************************************************/ -#define MAX_NODES_ID 63 - -/** - * MAX_API_NODES = MAX_NODES - No of NDB Nodes in use - */ - -/** - * The maximum number of replicas in the system - */ -#define MAX_REPLICAS 4 - -/** - * The maximum number of local checkpoints stored at a time - */ -#define MAX_LCP_STORED 3 - -/** - * The maximum number of log execution rounds at system restart - */ -#define MAX_LOG_EXEC 4 - -/** - * The maximum number of tuples per page - **/ -#define MAX_TUPLES_PER_PAGE 8191 -#define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */ -#define MAX_TABLES 20320 /* SchemaFile.hpp */ -#define MAX_TAB_NAME_SIZE 128 -#define MAX_ATTR_NAME_SIZE NAME_LEN /* From mysql_com.h */ -#define MAX_ATTR_DEFAULT_VALUE_SIZE 128 -#define MAX_ATTRIBUTES_IN_TABLE 128 -#define MAX_ATTRIBUTES_IN_INDEX 32 -#define MAX_TUPLE_SIZE_IN_WORDS 2013 -#define MAX_KEY_SIZE_IN_WORDS 1023 -#define MAX_FRM_DATA_SIZE 6000 -#define MAX_NULL_BITS 4096 -#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES)) -#define MAX_NDB_PARTITIONS 1024 -#define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data - -#define MAX_WORDS_META_FILE 24576 - -#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1) -/* - * Max Number of Records to fetch per SCAN_NEXTREQ in a scan in LQH. The - * API can order a multiple of this number of records at a time since - * fragments can be scanned in parallel. - */ -#define MAX_PARALLEL_OP_PER_SCAN 992 -/* -* The default batch size. Configurable parameter. -*/ -#define DEF_BATCH_SIZE 64 -/* -* When calculating the number of records sent from LQH in each batch -* one uses SCAN_BATCH_SIZE divided by the expected size of signals -* per row. This gives the batch size used for the scan. The NDB API -* will receive one batch from each node at a time so there has to be -* some care taken also so that the NDB API is not overloaded with -* signals. -* This parameter is configurable, this is the default value. -*/ -#define SCAN_BATCH_SIZE 32768 -/* -* To protect the NDB API from overload we also define a maximum total -* batch size from all nodes. This parameter should most likely be -* configurable, or dependent on sendBufferSize. -* This parameter is configurable, this is the default value. -*/ -#define MAX_SCAN_BATCH_SIZE 262144 -/* - * Maximum number of Parallel Scan queries on one hash index fragment - */ -#define MAX_PARALLEL_SCANS_PER_FRAG 12 -/* - * Maximum parallel ordered index scans per primary table fragment. - * Implementation limit is (256 minus 12). - */ -#define MAX_PARALLEL_INDEX_SCANS_PER_FRAG 32 - -/** - * Computed defines - */ -#define MAXNROFATTRIBUTESINWORDS (MAX_ATTRIBUTES_IN_TABLE / 32) - -/* - * Ordered index constants. Make configurable per index later. - */ -#define MAX_TTREE_NODE_SIZE 64 /* total words in node */ -#define MAX_TTREE_PREF_SIZE 4 /* words in min prefix */ -#define MAX_TTREE_NODE_SLACK 2 /* diff between max and min occupancy */ - -/* - * Blobs. - */ -#define NDB_BLOB_HEAD_SIZE 2 /* sizeof(NdbBlob::Head) >> 2 */ - -/* - * Character sets. - */ -#define MAX_XFRM_MULTIPLY 8 /* max expansion when normalizing */ - -/** - * Disk data - */ -#define MAX_FILES_PER_FILEGROUP 1024 - -/** - * Page size in global page pool - */ -#define GLOBAL_PAGE_SIZE 32768 -#define GLOBAL_PAGE_SIZE_WORDS 8192 - -/* - * Long signals - */ -#define NDB_SECTION_SEGMENT_SZ 60 - -/* - * Restore Buffer in pages - * 4M - */ -#define LCP_RESTORE_BUFFER (4*32) - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AbortAll.hpp b/storage/ndb/include/kernel/signaldata/AbortAll.hpp deleted file mode 100644 index 98734dc770f..00000000000 --- a/storage/ndb/include/kernel/signaldata/AbortAll.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ABORT_ALL_REQ_HPP -#define ABORT_ALL_REQ_HPP - -#include "SignalData.hpp" - -class AbortAllReq { - - /** - * Reciver(s) - */ - friend class Dbtc; - - /** - * Sender - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 2 ); - -public: - - Uint32 senderRef; - Uint32 senderData; -}; - -class AbortAllConf { - - /** - * Reciver(s) - */ - friend class Ndbcntr; - - /** - * Sender - */ - friend class Dbtc; - -public: - STATIC_CONST( SignalLength = 1 ); - -public: - Uint32 senderData; -}; - -class AbortAllRef { - - /** - * Reciver(s) - */ - friend class Ndbcntr; - - /** - * Sender - */ - friend class Dbtc; - -public: - STATIC_CONST( SignalLength = 2 ); - - enum ErrorCode { - InvalidState = 1, - AbortAlreadyInProgress = 2, - FunctionNotImplemented = 3 - }; -public: - Uint32 senderData; - Uint32 errorCode; -}; - -#endif - diff --git a/storage/ndb/include/kernel/signaldata/AccFrag.hpp b/storage/ndb/include/kernel/signaldata/AccFrag.hpp deleted file mode 100644 index 906b29042a6..00000000000 --- a/storage/ndb/include/kernel/signaldata/AccFrag.hpp +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ACC_FRAG_HPP -#define ACC_FRAG_HPP - -#include "SignalData.hpp" - -class AccFragReq { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbacc; -public: - STATIC_CONST( SignalLength = 12 ); - -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 tableId; - Uint32 reqInfo; - Uint32 fragId; - Uint32 localKeyLen; - Uint32 maxLoadFactor; - Uint32 minLoadFactor; - Uint32 kValue; - Uint32 lhFragBits; - Uint32 lhDirBits; - Uint32 keyLength; -}; - -class AccFragConf { - /** - * Sender(s) - */ - friend class Dbacc; - - /** - * Receiver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 7 ); - -private: - Uint32 userPtr; - Uint32 rootFragPtr; - Uint32 fragId[2]; - Uint32 fragPtr[2]; - Uint32 rootHashCheck; -}; - -class AccFragRef { - /** - * Sender(s) - */ - friend class Dbacc; - - /** - * Receiver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 userPtr; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AccLock.hpp b/storage/ndb/include/kernel/signaldata/AccLock.hpp deleted file mode 100644 index 525d9291f24..00000000000 --- a/storage/ndb/include/kernel/signaldata/AccLock.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ACC_LOCK_HPP -#define ACC_LOCK_HPP - -#include "SignalData.hpp" - -/* - * Lock or unlock tuple. If lock request is queued, the reply is later - * via ACCKEYCONF. - */ -class AccLockReq { - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend bool printACC_LOCKREQ(FILE *, const Uint32*, Uint32, Uint16); -public: - enum RequestType { // first byte - LockShared = 1, - LockExclusive = 2, - Unlock = 3, - Abort = 4, - AbortWithConf = 5 - }; - enum RequestFlag { // second byte - }; - enum ReturnCode { - Success = 0, - IsBlocked = 1, // was put in lock queue - WouldBlock = 2, // if we add non-blocking option - Refused = 3, - NoFreeOp = 4 - }; - STATIC_CONST( LockSignalLength = 12 ); - STATIC_CONST( UndoSignalLength = 3 ); -private: - Uint32 returnCode; - Uint32 requestInfo; - Uint32 accOpPtr; - // rest only if lock request - Uint32 userPtr; - Uint32 userRef; - Uint32 tableId; - Uint32 fragId; - Uint32 fragPtrI; - Uint32 hashValue; - Uint32 tupAddr; - Uint32 transId1; - Uint32 transId2; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AccScan.hpp b/storage/ndb/include/kernel/signaldata/AccScan.hpp deleted file mode 100644 index c5defddb86d..00000000000 --- a/storage/ndb/include/kernel/signaldata/AccScan.hpp +++ /dev/null @@ -1,224 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ACC_SCAN_HPP -#define ACC_SCAN_HPP - -#include "SignalData.hpp" - -/* - * Used by ACC and TUX scan. - */ - -class AccScanReq { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class Dbacc; - friend class Dbtux; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 8 ); - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 fragmentNo; - Uint32 requestInfo; - Uint32 transId1; - Uint32 transId2; - union { - Uint32 savePointId; - Uint32 gci; - }; - Uint32 maxPage; - - /** - * Previously there where also a scan type - */ - static Uint32 getLockMode(const Uint32 & requestInfo); - static Uint32 getReadCommittedFlag(const Uint32 & requestInfo); - static Uint32 getDescendingFlag(const Uint32 & requestInfo); - - static void setLockMode(Uint32 & requestInfo, Uint32 lockMode); - static void setReadCommittedFlag(Uint32 & requestInfo, Uint32 readCommitted); - static void setDescendingFlag(Uint32 & requestInfo, Uint32 descending); - - static Uint32 getNoDiskScanFlag(const Uint32 & requestInfo); - static void setNoDiskScanFlag(Uint32 & requestInfo, Uint32 nodisk); - - static Uint32 getNRScanFlag(const Uint32 & requestInfo); - static void setNRScanFlag(Uint32 & requestInfo, Uint32 nr); - - static Uint32 getLcpScanFlag(const Uint32 & requestInfo); - static void setLcpScanFlag(Uint32 & requestInfo, Uint32 nr); -}; - -/** - * Request Info - * - * l = Lock Mode - 1 Bit 2 - * h = Read Committed - 1 Bit 5 - * z = Descending (TUX) - 1 Bit 6 - * d = No disk scan - 1 Bit 7 - * n = Node recovery scan - 1 Bit 8 - * c = LCP scan - 1 Bit 9 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * l hzdn - */ -#define AS_LOCK_MODE_SHIFT (2) -#define AS_LOCK_MODE_MASK (1) -#define AS_READ_COMMITTED_SHIFT (5) -#define AS_DESCENDING_SHIFT (6) -#define AS_NO_DISK_SCAN (7) -#define AS_NR_SCAN (8) -#define AS_LCP_SCAN (9) - -inline -Uint32 -AccScanReq::getLockMode(const Uint32 & requestInfo){ - return (requestInfo >> AS_LOCK_MODE_SHIFT) & AS_LOCK_MODE_MASK; -} - -inline -Uint32 -AccScanReq::getReadCommittedFlag(const Uint32 & requestInfo){ - return (requestInfo >> AS_READ_COMMITTED_SHIFT) & 1; -} - -inline -Uint32 -AccScanReq::getDescendingFlag(const Uint32 & requestInfo){ - return (requestInfo >> AS_DESCENDING_SHIFT) & 1; -} - -inline -void -AccScanReq::setLockMode(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, AS_LOCK_MODE_MASK, "AccScanReq::setLockMode"); - requestInfo |= (val << AS_LOCK_MODE_SHIFT); -} - -inline -void -AccScanReq::setReadCommittedFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "AccScanReq::setReadCommittedFlag"); - requestInfo |= (val << AS_READ_COMMITTED_SHIFT); -} - -inline -void -AccScanReq::setDescendingFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "AccScanReq::setDescendingFlag"); - requestInfo |= (val << AS_DESCENDING_SHIFT); -} - -inline -Uint32 -AccScanReq::getNoDiskScanFlag(const Uint32 & requestInfo){ - return (requestInfo >> AS_NO_DISK_SCAN) & 1; -} - -inline -void -AccScanReq::setNoDiskScanFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "AccScanReq::setNoDiskScanFlag"); - requestInfo |= (val << AS_NO_DISK_SCAN); -} - -inline -Uint32 -AccScanReq::getNRScanFlag(const Uint32 & requestInfo){ - return (requestInfo >> AS_NR_SCAN) & 1; -} - -inline -void -AccScanReq::setNRScanFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "AccScanReq::setNoDiskScanFlag"); - requestInfo |= (val << AS_NR_SCAN); -} - -inline -Uint32 -AccScanReq::getLcpScanFlag(const Uint32 & requestInfo){ - return (requestInfo >> AS_LCP_SCAN) & 1; -} - -inline -void -AccScanReq::setLcpScanFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "AccScanReq::setNoDiskScanFlag"); - requestInfo |= (val << AS_LCP_SCAN); -} - -class AccScanConf { - /** - * Sender(s) - */ - friend class Dbacc; - friend class Dbtux; - friend class Dbtup; - - /** - * Reciver(s) - */ - friend class Dblqh; - - enum { - ZEMPTY_FRAGMENT = 0, - ZNOT_EMPTY_FRAGMENT = 1 - }; - -public: - STATIC_CONST( SignalLength = 8 ); - -private: - Uint32 scanPtr; - Uint32 accPtr; - Uint32 unused1; - Uint32 unused2; - Uint32 unused3; - Uint32 unused4; - Uint32 unused5; - Uint32 flag; -}; - -class AccCheckScan { - friend class Dbacc; - friend class Dbtux; - friend class Dbtup; - friend class Dblqh; - enum { - ZCHECK_LCP_STOP = 0, - ZNOT_CHECK_LCP_STOP = 1 - }; -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 accPtr; // scanptr.i in ACC or TUX - Uint32 checkLcpStop; // from enum -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp deleted file mode 100644 index a8768c52a22..00000000000 --- a/storage/ndb/include/kernel/signaldata/AccSizeAltReq.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ACC_SIZE_ALT_REQ_H -#define ACC_SIZE_ALT_REQ_H - -#include "SignalData.hpp" - -class AccSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Reciver(s) - */ - friend class Dbacc; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0 ); - STATIC_CONST( IND_DIR_RANGE = 1 ); - STATIC_CONST( IND_DIR_ARRAY = 2 ); - STATIC_CONST( IND_FRAGMENT = 3 ); - STATIC_CONST( IND_OP_RECS = 4 ); - STATIC_CONST( IND_OVERFLOW_RECS = 5 ); - STATIC_CONST( IND_PAGE8 = 6 ); - STATIC_CONST( IND_ROOT_FRAG = 7 ); - STATIC_CONST( IND_TABLE = 8 ); - STATIC_CONST( IND_SCAN = 9 ); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[10]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AllocNodeId.hpp b/storage/ndb/include/kernel/signaldata/AllocNodeId.hpp deleted file mode 100644 index c9efe18bdd8..00000000000 --- a/storage/ndb/include/kernel/signaldata/AllocNodeId.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (c) 2003, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ALLOC_NODE_ID_HPP -#define ALLOC_NODE_ID_HPP - -#include "SignalData.hpp" -#include - -/** - * Request to allocate node id - */ -class AllocNodeIdReq { -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 nodeId; - Uint32 nodeType; -}; - -class AllocNodeIdConf { -public: - STATIC_CONST( SignalLength = 3 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 nodeId; -}; - -class AllocNodeIdRef { -public: - STATIC_CONST( SignalLength = 5 ); - - enum ErrorCodes { - NoError = 0, - Undefined = 1, - NF_FakeErrorREF = 11, - Busy = 701, - NotMaster = 702, - NodeReserved = 1701, - NodeConnected = 1702, - NodeFailureHandlingNotCompleted = 1703, - NodeTypeMismatch = 1704 - }; - - Uint32 senderRef; - Uint32 senderData; - Uint32 nodeId; - Uint32 errorCode; - Uint32 masterRef; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/AlterIndx.hpp b/storage/ndb/include/kernel/signaldata/AlterIndx.hpp deleted file mode 100644 index 8f3db3f9992..00000000000 --- a/storage/ndb/include/kernel/signaldata/AlterIndx.hpp +++ /dev/null @@ -1,271 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ALTER_INDX_HPP -#define ALTER_INDX_HPP - -#include "SignalData.hpp" -#include -#include - -/** - * AlterIndxReq. - */ -class AlterIndxReq { - friend bool printALTER_INDX_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_CREATE_INDEX = 2, - RT_DROP_INDEX = 3, - RT_SYSTEMRESTART = 4, - RT_NODERESTART = 5, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_TC = 5 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4, - RT_TC = 5 << 8, - RT_TUX = 8 << 8 - }; - STATIC_CONST( SignalLength = 7 ); - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; // only set by DICT - Uint32 m_indexVersion; - Uint32 m_online; // new state 0-offline 1-online - // extra - Uint32 m_opKey; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - AlterIndxReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (AlterIndxReq::RequestType)val; - } - void setRequestType(AlterIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getIndexVersion() const { - return m_indexVersion; - } - void setIndexVersion(Uint32 val) { - m_indexVersion = val; - } - Uint32 getOnline() const { - return m_online; - } - void setOnline(Uint32 val) { - m_online = val; - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } -}; - -/** - * AlterIndxConf. - */ -class AlterIndxConf { - friend bool printALTER_INDX_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 6 ); - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; - Uint32 m_indexVersion; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - AlterIndxReq::RequestType getRequestType() const { - return (AlterIndxReq::RequestType)m_requestInfo; - } - void setRequestType(AlterIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getIndexVersion() const { - return m_indexVersion; - } - void setIndexVersion(Uint32 val) { - m_indexVersion = val; - } -}; - -/** - * AlterIndxRef. - */ -class AlterIndxRef { - friend bool printALTER_INDX_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - IndexNotFound = 4243, - IndexExists = 4244, - BadRequestType = 4247, - NotAnIndex = 4254, - BadState = 4347, - Inconsistency = 4348 - }; - STATIC_CONST( SignalLength = AlterIndxConf::SignalLength + 3 ); - -private: - AlterIndxConf m_conf; - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_indexId; - //Uint32 m_indexVersion; - Uint32 m_errorCode; - Uint32 m_errorLine; - union { - Uint32 m_errorNode; - Uint32 masterNodeId; // if NotMaster - }; -public: - AlterIndxConf* getConf() { - return &m_conf; - } - const AlterIndxConf* getConf() const { - return &m_conf; - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - AlterIndxReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(AlterIndxReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - Uint32 getIndexId() const { - return m_conf.getIndexId(); - } - void setIndexId(Uint32 val) { - m_conf.setIndexId(val); - } - Uint32 getIndexVersion() const { - return m_conf.getIndexVersion(); - } - void setIndexVersion(Uint32 val) { - m_conf.setIndexVersion(val); - } - AlterIndxRef::ErrorCode getErrorCode() const { - return (AlterIndxRef::ErrorCode)m_errorCode; - } - void setErrorCode(AlterIndxRef::ErrorCode val) { - m_errorCode = (Uint32)val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AlterTab.hpp b/storage/ndb/include/kernel/signaldata/AlterTab.hpp deleted file mode 100644 index c91e068326b..00000000000 --- a/storage/ndb/include/kernel/signaldata/AlterTab.hpp +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ALTER_TAB_HPP -#define ALTER_TAB_HPP - -#include "SignalData.hpp" -#include "GlobalSignalNumbers.h" - -/** - * AlterTab - * - * Implemenatation of AlterTable - */ -class AlterTabReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Dbdih; - friend class Dbtc; - friend class Dblqh; - friend class Suma; - - /** - * For printing - */ - friend bool printALTER_TAB_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 9 ); - - enum RequestType { - AlterTablePrepare = 0, // Prepare alter table - AlterTableCommit = 1, // Commit alter table - AlterTableRevert = 2 // Prepare failed, revert instead - }; -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 clientRef; - Uint32 clientData; - - Uint32 changeMask; - Uint32 tableId; - Uint32 tableVersion; - Uint32 gci; - Uint32 requestType; - - SECTION( DICT_TAB_INFO = 0 ); -}; - -struct AlterTabRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Dbdih; - friend class Dbtc; - friend class Dblqh; - friend class Dbtup; - friend class SafeCounter; - - /** - * For printing - */ - friend bool printALTER_TAB_REF(FILE *, const Uint32 *, Uint32, Uint16); - - STATIC_CONST( SignalLength = 7 ); - STATIC_CONST( GSN = GSN_ALTER_TAB_REF ); - - enum ErrorCode { - NF_FakeErrorREF = 255 - }; - - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 errorStatus; - Uint32 requestType; -}; - -class AlterTabConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Dbdih; - friend class Dbtc; - friend class Dblqh; - friend class Dbtup; - - /** - * For printing - */ - friend bool printALTER_TAB_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 7 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 changeMask; - Uint32 tableId; - Uint32 tableVersion; - Uint32 gci; - Uint32 requestType; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AlterTable.hpp b/storage/ndb/include/kernel/signaldata/AlterTable.hpp deleted file mode 100644 index 7dae3d0358f..00000000000 --- a/storage/ndb/include/kernel/signaldata/AlterTable.hpp +++ /dev/null @@ -1,287 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ALTER_TABLE_HPP -#define ALTER_TABLE_HPP - -#include "SignalData.hpp" - -/** - * AlterTable - * - * This signal is sent by API to DICT/TRIX - * as a request to alter a secondary index - * and then from TRIX to TRIX(n) and TRIX to TC. - */ -class AlterTableReq { - /** - * Sender(s) - */ - // API - - /** - * Sender(s) / Reciver(s) - */ - friend class NdbTableImpl; - friend class NdbEventOperationImpl; - friend class NdbDictInterface; - friend class Dbdict; - friend class Suma; - - /** - * For printing - */ - friend bool printALTER_TABLE_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 changeMask; - Uint32 tableId; - Uint32 tableVersion; - - SECTION( DICT_TAB_INFO = 0 ); - -/** - * ChangeMask - */ - -/* - n = Changed name - f = Changed frm - d = Changed fragment data - r = Changed range or list array - t = Changed tablespace name array - s = Changed tablespace id array - - 1111111111222222222233 - 01234567890123456789012345678901 - nf------------------------------ -*/ -#define NAME_SHIFT (0) -#define FRM_SHIFT (1) -#define FRAG_DATA_SHIFT (2) -#define RANGE_LIST_SHIFT (3) -#define TS_NAME_SHIFT (4) -#define TS_SHIFT (5) - - /** - * Getters and setters - */ - static Uint8 getNameFlag(const UintR & changeMask); - static void setNameFlag(UintR & changeMask, Uint32 nameFlg); - static Uint8 getFrmFlag(const UintR & changeMask); - static void setFrmFlag(UintR & changeMask, Uint32 frmFlg); - static Uint8 getFragDataFlag(const UintR & changeMask); - static void setFragDataFlag(UintR & changeMask, Uint32 fragFlg); - static Uint8 getRangeListFlag(const UintR & changeMask); - static void setRangeListFlag(UintR & changeMask, Uint32 rangeFlg); - static Uint8 getTsNameFlag(const UintR & changeMask); - static void setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg); - static Uint8 getTsFlag(const UintR & changeMask); - static void setTsFlag(UintR & changeMask, Uint32 tsFlg); -}; - -inline -Uint8 -AlterTableReq::getTsFlag(const UintR & changeMask){ - return (Uint8)((changeMask >> TS_SHIFT) & 1); -} - -inline -void -AlterTableReq::setTsFlag(UintR & changeMask, Uint32 tsFlg){ - changeMask |= (tsFlg << TS_SHIFT); -} - -inline -Uint8 -AlterTableReq::getNameFlag(const UintR & changeMask){ - return (Uint8)((changeMask >> NAME_SHIFT) & 1); -} - -inline -void -AlterTableReq::setNameFlag(UintR & changeMask, Uint32 nameFlg){ - changeMask |= (nameFlg << NAME_SHIFT); -} - -inline -Uint8 -AlterTableReq::getFrmFlag(const UintR & changeMask){ - return (Uint8)((changeMask >> FRM_SHIFT) & 1); -} - -inline -void -AlterTableReq::setFrmFlag(UintR & changeMask, Uint32 frmFlg){ - changeMask |= (frmFlg << FRM_SHIFT); -} - -inline -Uint8 -AlterTableReq::getFragDataFlag(const UintR & changeMask){ - return (Uint8)((changeMask >> FRAG_DATA_SHIFT) & 1); -} - -inline -void -AlterTableReq::setFragDataFlag(UintR & changeMask, Uint32 fragDataFlg){ - changeMask |= (fragDataFlg << FRAG_DATA_SHIFT); -} - -inline -Uint8 -AlterTableReq::getRangeListFlag(const UintR & changeMask){ - return (Uint8)((changeMask >> RANGE_LIST_SHIFT) & 1); -} - -inline -void -AlterTableReq::setRangeListFlag(UintR & changeMask, Uint32 rangeFlg){ - changeMask |= (rangeFlg << RANGE_LIST_SHIFT); -} - -inline -Uint8 -AlterTableReq::getTsNameFlag(const UintR & changeMask){ - return (Uint8)((changeMask >> TS_NAME_SHIFT) & 1); -} - -inline -void -AlterTableReq::setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg){ - changeMask |= (tsNameFlg << TS_NAME_SHIFT); -} - - -class AlterTableRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printALTER_TABLE_REF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 7 ); - - enum ErrorCode { - NoError = 0, - InvalidTableVersion = 241, - DropInProgress = 283, - Busy = 701, - BusyWithNR = 711, - NotMaster = 702, - InvalidFormat = 703, - AttributeNameTooLong = 704, - TableNameTooLong = 705, - Inconsistency = 706, - NoMoreTableRecords = 707, - NoMoreAttributeRecords = 708, - NoSuchTable = 709, - AttributeNameTwice = 720, - TableAlreadyExist = 721, - ArraySizeTooBig = 737, - RecordTooBig = 738, - InvalidPrimaryKeySize = 739, - NullablePrimaryKey = 740, - UnsupportedChange = 741, - BackupInProgress = 762, - IncompatibleVersions = 763, - SingleUser = 299 - }; - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 masterNodeId; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 status; - -public: - Uint32 getErrorCode() const { - return errorCode; - } - Uint32 getErrorLine() const { - return errorLine; - } -}; - -class AlterTableConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printALTER_TABLE_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 tableVersion; -}; - -/** - * Inform API about change of table definition - */ -struct AlterTableRep -{ - friend bool printALTER_TABLE_REP(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 3 ); - - enum Change_type - { - CT_ALTERED = 0x1, - CT_DROPPED = 0x2 - }; - - Uint32 tableId; - Uint32 tableVersion; - Uint32 changeType; - - SECTION( TABLE_NAME = 0 ); -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AlterTrig.hpp b/storage/ndb/include/kernel/signaldata/AlterTrig.hpp deleted file mode 100644 index 41f9d88de9e..00000000000 --- a/storage/ndb/include/kernel/signaldata/AlterTrig.hpp +++ /dev/null @@ -1,288 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ALTER_TRIG_HPP -#define ALTER_TRIG_HPP - -#include "SignalData.hpp" -#include -#include - -/** - * AlterTrigReq. - */ -class AlterTrigReq { - friend bool printALTER_TRIG_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_CREATE_TRIGGER = 2, - RT_DROP_TRIGGER = 3, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_TC = 5 << 4, - RT_DICT_LQH = 6 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4 - }; - STATIC_CONST( SignalLength = 8 ); - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_triggerId; - Uint32 m_triggerInfo; - Uint32 m_online; // new state 0-offline 1-online - Uint32 m_receiverRef; // receiver for subscription trigger - // extra - Uint32 m_opKey; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - AlterTrigReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (AlterTrigReq::RequestType)val; - } - void setRequestType(AlterTrigReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getTriggerId() const { - return m_triggerId; - } - void setTriggerId(Uint32 val) { - m_triggerId = val; - } - Uint32 getTriggerInfo() const { - return m_triggerInfo; - } - void setTriggerInfo(Uint32 val) { - m_triggerInfo = val; - } - TriggerType::Value getTriggerType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 0, 8); - return (TriggerType::Value)val; - } - void setTriggerType(TriggerType::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 0, 8, (Uint32)val); - } - TriggerActionTime::Value getTriggerActionTime() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 8, 8); - return (TriggerActionTime::Value)val; - } - void setTriggerActionTime(TriggerActionTime::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 8, 8, (Uint32)val); - } - TriggerEvent::Value getTriggerEvent() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 16, 8); - return (TriggerEvent::Value)val; - } - void setTriggerEvent(TriggerEvent::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 16, 8, (Uint32)val); - } - bool getMonitorReplicas() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 24, 1); - } - void setMonitorReplicas(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 24, 1, val); - } - bool getMonitorAllAttributes() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 25, 1); - } - void setMonitorAllAttributes(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 25, 1, val); - } - Uint32 getOnline() const { - return m_online; - } - void setOnline(Uint32 val) { - m_online = val; - } - Uint32 getReceiverRef() const { - return m_receiverRef; - } - void setReceiverRef(Uint32 val) { - m_receiverRef = val; - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } -}; - -/** - * AlterTrigConf. - */ -class AlterTrigConf { - friend bool printALTER_TRIG_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_triggerId; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - AlterTrigReq::RequestType getRequestType() const { - return (AlterTrigReq::RequestType)m_requestInfo; - } - void setRequestType(AlterTrigReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getTriggerId() const { - return m_triggerId; - } - void setTriggerId(Uint32 val) { - m_triggerId = val; - } -}; - -/** - * AlterTrigRef. - */ -class AlterTrigRef { - friend bool printALTER_TRIG_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum ErrorCode { - NoError = 0, - Busy = 701, - TriggerNotFound = 4238, - TriggerExists = 4239, - BadRequestType = 4247 - }; - STATIC_CONST( SignalLength = AlterTrigConf::SignalLength + 3 ); - -private: - AlterTrigConf m_conf; - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_triggerId; - Uint32 m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - -public: - AlterTrigConf* getConf() { - return &m_conf; - } - const AlterTrigConf* getConf() const { - return &m_conf; - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - AlterTrigReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(AlterTrigReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - Uint32 getTriggerId() const { - return m_conf.getTriggerId(); - } - void setTriggerId(Uint32 val) { - m_conf.setTriggerId(val); - } - ErrorCode getErrorCode() const { - return (ErrorCode)m_errorCode; - } - void setErrorCode(ErrorCode val) { - m_errorCode = (Uint32)val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ApiBroadcast.hpp b/storage/ndb/include/kernel/signaldata/ApiBroadcast.hpp deleted file mode 100644 index 5674d1dafcc..00000000000 --- a/storage/ndb/include/kernel/signaldata/ApiBroadcast.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef API_BROADCAST_HPP -#define API_BROADCAST_HPP - -#include "SignalData.hpp" - -struct ApiBroadcastRep -{ - STATIC_CONST( SignalLength = 2 ); - - Uint32 gsn; - Uint32 minVersion; - Uint32 theData[1]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp b/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp deleted file mode 100644 index 5fca04f7b74..00000000000 --- a/storage/ndb/include/kernel/signaldata/ApiRegSignalData.hpp +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef API_REGCONF_HPP -#define API_REGCONF_HPP - -#include - -class ApiRegReq { - /** - * Sender(s) - */ - friend class ClusterMgr; - - /** - * Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 ref; - Uint32 version; // Version of API node -}; - -/** - * - */ -class ApiRegRef { - /** - * Sender(s) - */ - friend class Qmgr; - - /** - * Reciver(s) - */ - friend class ClusterMgr; - -public: - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - WrongType = 1, - UnsupportedVersion = 2 - }; -private: - Uint32 ref; // Qmgr ref - Uint32 version; // Version of NDB node - Uint32 errorCode; -}; - -/** - * - */ -class ApiRegConf { - /** - * Sender(s) - */ - friend class Qmgr; - - /** - * Reciver(s) - */ - friend class ClusterMgr; - -public: - STATIC_CONST( SignalLength = 4 + NodeState::DataLength ); -private: - - Uint32 qmgrRef; - Uint32 version; // Version of NDB node - Uint32 apiHeartbeatFrequency; - Uint32 minDbVersion; - NodeState nodeState; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ApiVersion.hpp b/storage/ndb/include/kernel/signaldata/ApiVersion.hpp deleted file mode 100644 index 829cbfedb78..00000000000 --- a/storage/ndb/include/kernel/signaldata/ApiVersion.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef API_VERSION_HPP -#define API_VERSION_HPP - -class ApiVersionReq { -/** - * Sender(s) - */ - friend class MgmtSrv; - - /** - * Reciver(s) - */ - friend class Qmgr; -public: - STATIC_CONST( SignalLength = 3 ); - Uint32 senderRef; - Uint32 nodeId; //api node id - Uint32 version; // Version of API node - - -}; - - - -class ApiVersionConf { -/** - * Sender(s) - */ - friend class Qmgr; - - /** - * Reciver(s) - */ - friend class MgmtSrv; -public: - STATIC_CONST( SignalLength = 4 ); - Uint32 senderRef; - Uint32 nodeId; //api node id - Uint32 version; // Version of API node - Uint32 inet_addr; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp deleted file mode 100644 index 8230064dd7e..00000000000 --- a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp +++ /dev/null @@ -1,157 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ARBIT_SIGNAL_DATA_H -#define ARBIT_SIGNAL_DATA_H - -#include -#include -#include -#include -#include "SignalData.hpp" -#include "SignalDataPrint.hpp" - -/** - * The ticket. - */ -class ArbitTicket { -private: - Uint32 data[2]; - -public: - ArbitTicket() {} - STATIC_CONST( DataLength = 2 ); - STATIC_CONST( TextLength = DataLength * 8 ); // hex digits - - inline void clear() { - data[0] = 0; - data[1] = 0; - } - - inline void update() { - Uint16 cnt = data[0] & 0xFFFF; // previous count - Uint16 pid = NdbHost_GetProcessId(); - data[0] = (pid << 16) | (cnt + 1); - data[1] = NdbTick_CurrentMillisecond(); - } - - inline bool match(ArbitTicket& aTicket) const { - return - data[0] == aTicket.data[0] && - data[1] == aTicket.data[1]; - } - - inline void getText(char *buf, size_t buf_len) const { - BaseString::snprintf(buf, buf_len, "%08x%08x", data[0], data[1]); - } - -/* inline char* getText() const { - static char buf[TextLength + 1]; - getText(buf, sizeof(buf)); - return buf; - } */ -}; - -/** - * Result codes. Part of signal data. Each signal uses only - * a subset but a common namespace is convenient. - */ -class ArbitCode { -public: - STATIC_CONST( ErrTextLength = 80 ); - - enum { - NoInfo = 0, - - // CFG signals - CfgRank1 = 1, // these have to be 1 and 2 - CfgRank2 = 2, - - // QMGR continueB thread state - ThreadStart = 11, // continueB thread started - - // PREP signals - PrepPart1 = 21, // zero old ticket - PrepPart2 = 22, // get new ticket - PrepAtrun = 23, // late joiner gets ticket at RUN time - - // arbitrator state - ApiStart = 31, // arbitrator thread started - ApiFail = 32, // arbitrator died - ApiExit = 33, // arbitrator reported it will exit - - // arbitration result - LoseNodes = 41, // lose on ndb node count - WinNodes = 42, // win on ndb node count - WinGroups = 43, // we win, no need for arbitration - LoseGroups = 44, // we lose, missing node group - Partitioning = 45, // possible network partitioning - WinChoose = 46, // positive reply - LoseChoose = 47, // negative reply - LoseNorun = 48, // arbitrator required but not running - LoseNocfg = 49, // arbitrator required but none configured - - // general error codes - ErrTicket = 91, // invalid arbitrator-ticket - ErrToomany = 92, // too many requests - ErrState = 93, // invalid state - ErrTimeout = 94, // timeout waiting for signals - ErrUnknown = 95 // unknown error - }; - - static inline void getErrText(Uint32 code, char* buf, size_t buf_len) { - switch (code) { - case ErrTicket: - BaseString::snprintf(buf, buf_len, "invalid arbitrator-ticket"); - break; - case ErrToomany: - BaseString::snprintf(buf, buf_len, "too many requests"); - break; - case ErrState: - BaseString::snprintf(buf, buf_len, "invalid state"); - break; - case ErrTimeout: - BaseString::snprintf(buf, buf_len, "timeout"); - break; - default: - BaseString::snprintf(buf, buf_len, "unknown error [code=%u]", code); - break; - } - } -}; - -/** - * Common class for arbitration signal data. - */ -class ArbitSignalData { -public: - Uint32 sender; // sender's node id (must be word 0) - Uint32 code; // result code or other info - Uint32 node; // arbitrator node id - ArbitTicket ticket; // ticket - NodeBitmask mask; // set of nodes - - ArbitSignalData() {} - STATIC_CONST( SignalLength = 3 + ArbitTicket::DataLength + NodeBitmask::Size ); - - inline bool match(ArbitSignalData& aData) const { - return - node == aData.node && - ticket.match(aData.ticket); - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/AttrInfo.hpp b/storage/ndb/include/kernel/signaldata/AttrInfo.hpp deleted file mode 100644 index a044ce79ace..00000000000 --- a/storage/ndb/include/kernel/signaldata/AttrInfo.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ATTRINFO_HPP -#define ATTRINFO_HPP - -#include "SignalData.hpp" - -class AttrInfo { - /** - * Sender(s) - */ - friend class DbUtil; - - /** - * Receiver(s) - */ - friend class Dbtup; - - /** - * Sender(s) / Receiver(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class NdbScanOperation; - friend class Restore; - - friend bool printATTRINFO(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( HeaderLength = 3 ); - STATIC_CONST( DataLength = 22 ); - STATIC_CONST( MaxSignalLength = HeaderLength + DataLength ); - -private: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 attrData[DataLength]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp b/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp deleted file mode 100644 index af70e7f69d1..00000000000 --- a/storage/ndb/include/kernel/signaldata/BackupContinueB.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BACKUP_CONTINUEB_H -#define BACKUP_CONTINUEB_H - -#include "SignalData.hpp" - -class BackupContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Backup; - friend bool printCONTINUEB_BACKUP(FILE * output, const Uint32 * theData, Uint32 len); -private: - enum { - START_FILE_THREAD = 0, - BUFFER_UNDERFLOW = 1, - BUFFER_FULL_SCAN = 2, - BUFFER_FULL_FRAG_COMPLETE = 3, - BUFFER_FULL_META = 4, - BACKUP_FRAGMENT_INFO = 5, - RESET_DISK_SPEED_COUNTER = 6, - ZDELAY_SCAN_NEXT = 7 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp deleted file mode 100644 index a7623e07b32..00000000000 --- a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp +++ /dev/null @@ -1,385 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BACKUP_IMPL_HPP -#define BACKUP_IMPL_HPP - -#include "SignalData.hpp" -#include - -class DefineBackupReq { - /** - * Sender(s) - */ - friend class BackupMaster; - - /** - * Reciver(s) - */ - friend class Backup; - friend class Dblqh; - - friend bool printDEFINE_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 9 + NdbNodeBitmask::Size); - -private: - /** - * i - value of backup object - */ - Uint32 backupPtr; - - Uint32 backupId; - Uint32 clientRef; - Uint32 clientData; - Uint32 senderRef; - - /** - * Which node(s) is participating in the backup - */ - NdbNodeBitmask nodes; - - /** - * Generated random number - */ - Uint32 backupKey[2]; - - /** - * Length of backup data - */ - Uint32 backupDataLen; - - /** - * Backup flags - */ - /* & 0x3 - waitCompleted - */ - Uint32 flags; -}; - -class DefineBackupRef { - /** - * Sender(s) - */ - friend class Backup; - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printDEFINE_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - Undefined = 1340, - FailedToAllocateBuffers = 1342, - FailedToSetupFsBuffers = 1343, - FailedToAllocateTables = 1344, - FailedInsertFileHeader = 1345, - FailedInsertTableList = 1346, - FailedAllocateTableMem = 1347, - FailedToAllocateFileRecord = 1348, - FailedToAllocateAttributeRecord = 1349 - }; -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 errorCode; - Uint32 nodeId; -}; - -class DefineBackupConf { - /** - * Sender(s) - */ - friend class Backup; - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printDEFINE_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 backupId; - Uint32 backupPtr; -}; - -class StartBackupReq { - /** - * Sender(s) - */ - friend class BackupMaster; - - /** - * Reciver(s) - */ - friend class Backup; - - friend bool printSTART_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 backupId; - Uint32 backupPtr; -}; - -class StartBackupRef { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - FailedToAllocateTriggerRecord = 1 - }; -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 errorCode; - Uint32 nodeId; -}; - -class StartBackupConf { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printSTART_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 backupId; - Uint32 backupPtr; -}; - -class BackupFragmentReq { - /** - * Sender(s) - */ - friend class BackupMaster; - - /** - * Reciver(s) - */ - friend class Backup; - friend class Dblqh; - - friend bool printBACKUP_FRAGMENT_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 tableId; - Uint32 fragmentNo; - Uint32 count; -}; - -class BackupFragmentRef { - /** - * Sender(s) - */ - friend class Backup; - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printBACKUP_FRAGMENT_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 errorCode; - Uint32 nodeId; -}; - -class BackupFragmentConf { - /** - * Sender(s) - */ - friend class Backup; - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 8 ); - -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 tableId; - Uint32 fragmentNo; - Uint32 noOfRecordsLow; - Uint32 noOfBytesLow; - Uint32 noOfRecordsHigh; - Uint32 noOfBytesHigh; -}; - -class BackupFragmentCompleteRep { -public: - STATIC_CONST( SignalLength = 8 ); - - Uint32 backupId; - Uint32 backupPtr; - Uint32 tableId; - Uint32 fragmentNo; - Uint32 noOfTableRowsLow; - Uint32 noOfFragmentRowsLow; - Uint32 noOfTableRowsHigh; - Uint32 noOfFragmentRowsHigh; -}; - -class StopBackupReq { - /** - * Sender(s) - */ - friend class BackupMaster; - - /** - * Reciver(s) - */ - friend class Backup; - - friend bool printSTOP_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 startGCP; - Uint32 stopGCP; -}; - -class StopBackupRef { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printSTOP_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 errorCode; - Uint32 nodeId; -}; - -class StopBackupConf { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printSTOP_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 backupId; - Uint32 backupPtr; - Uint32 noOfLogBytes; - Uint32 noOfLogRecords; -}; - -class BackupStatusReq { - /** - * Sender(s) - */ - friend class BackupMaster; - - /** - * Reciver(s) - */ - friend class Backup; - - friend bool printBACKUP_STATUS_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 1 ); - -private: -}; - -class BackupStatusConf { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class BackupMaster; - - friend bool printBACKUP_STATUS_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 1 ); - -private: -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp b/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp deleted file mode 100644 index ae5c3114623..00000000000 --- a/storage/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ /dev/null @@ -1,261 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BACKUP_HPP -#define BACKUP_HPP - -#include "SignalData.hpp" -#include - -/** - * Request to start a backup - */ -class BackupReq { - /** - * Sender(s) - */ - friend class MgmtSrvr; - - /** - * Reciver(s) - */ - friend class Backup; - - friend bool printBACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderData; - Uint32 backupDataLen; - /* & 0x3 - waitCompleted - */ - Uint32 flags; -}; - -class BackupData { - /** - * Sender(s) - */ - friend class BackupMaster; - - /** - * Reciver(s) - */ - friend class Backup; - - friend bool printBACKUP_DATA(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 25 ); - - enum KeyValues { - /** - * Buffer(s) and stuff - */ - BufferSize = 1, // In MB - BlockSize = 2, // Write in chunks of this (in bytes) - MinWrite = 3, // Minimum write as multiple of blocksize - MaxWrite = 4, // Maximum write as multiple of blocksize - - // Max throughput - // Parallell files - - NoOfTables = 1000, - TableName = 1001 // char* - }; -private: - enum RequestType { - ClientToMaster = 1, - MasterToSlave = 2 - }; - Uint32 requestType; - - union { - Uint32 backupPtr; - Uint32 senderData; - }; - Uint32 backupId; - - /** - * totalLen = totalLen_offset >> 16 - * offset = totalLen_offset & 0xFFFF - */ - Uint32 totalLen_offset; - - /** - * Length in this = signal->length() - 3 - * Sender block ref = signal->senderBlockRef() - */ - Uint32 backupData[21]; -}; - -/** - * The request to start a backup was refused - */ -class BackupRef { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class MgmtSrvr; - - friend bool printBACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - enum ErrorCodes { - Undefined = 1300, - IAmNotMaster = 1301, - OutOfBackupRecord = 1302, - OutOfResources = 1303, - SequenceFailure = 1304, - BackupDefinitionNotImplemented = 1305, - CannotBackupDiskless = 1306 - }; - Uint32 senderData; - Uint32 errorCode; - union { - Uint32 masterRef; - }; -}; - -/** - * The backup has started - */ -class BackupConf { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class MgmtSrvr; - - friend bool printBACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 + NdbNodeBitmask::Size ); - -private: - Uint32 senderData; - Uint32 backupId; - NdbNodeBitmask nodes; -}; - -/** - * A backup has been aborted - */ -class BackupAbortRep { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class MgmtSrvr; - - friend bool printBACKUP_ABORT_REP(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderData; - Uint32 backupId; - Uint32 reason; -}; - -/** - * A backup has been completed - */ -class BackupCompleteRep { - /** - * Sender(s) - */ - friend class Backup; - - /** - * Reciver(s) - */ - friend class MgmtSrvr; - - friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size ); -private: - Uint32 senderData; - Uint32 backupId; - Uint32 startGCP; - Uint32 stopGCP; - Uint32 noOfBytesLow; - Uint32 noOfRecordsLow; - Uint32 noOfLogBytes; - Uint32 noOfLogRecords; - NdbNodeBitmask nodes; - Uint32 noOfBytesHigh; - Uint32 noOfRecordsHigh; -}; - -/** - * A master has finished taking-over backup responsiblility - */ -class BackupNFCompleteRep { - friend bool printBACKUP_NF_COMPLETE_REP(FILE*, const Uint32*, Uint32, Uint16); -}; - -/** - * Abort of backup - */ -class AbortBackupOrd { - /** - * Sender / Reciver - */ - friend class Backup; - friend class MgmtSrvr; - - friend bool printABORT_BACKUP_ORD(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - - enum RequestType { - ClientAbort = 1321, - BackupComplete = 1322, - BackupFailure = 1323, // General backup failure coordinator -> slave - LogBufferFull = 1324, // slave -> coordinator - FileOrScanError = 1325, // slave -> coordinator - BackupFailureDueToNodeFail = 1326, // slave -> slave - OkToClean = 1327 // master -> slave - - ,AbortScan = 1328 - ,IncompatibleVersions = 1329 - }; -private: - Uint32 requestType; - Uint32 backupId; - union { - Uint32 backupPtr; - Uint32 senderData; - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp b/storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp deleted file mode 100644 index 18ddefa70b7..00000000000 --- a/storage/ndb/include/kernel/signaldata/BlockCommitOrd.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BLOCK_COMMIT_ORD_HPP -#define BLOCK_COMMIT_ORD_HPP - -/** - * These two signals are sent via EXECUTE_DIRECT - * to DBDIH from QMGR - * - * Block make sure that no commit is performed - * Unblock turns on commit again - */ - -class BlockCommitOrd { - /** - * Sender(s) - */ - friend class Qmgr; - - /** - * Reciver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 failNo; // As used by Qmgr -}; - -class UnblockCommitOrd { - /** - * Sender(s) - */ - friend class Qmgr; - - /** - * Reciver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 failNo; // As used by Qmgr -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/BuildIndx.hpp b/storage/ndb/include/kernel/signaldata/BuildIndx.hpp deleted file mode 100644 index e73bcb2c7f4..00000000000 --- a/storage/ndb/include/kernel/signaldata/BuildIndx.hpp +++ /dev/null @@ -1,308 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BUILD_INDX_HPP -#define BUILD_INDX_HPP - -#include "SignalData.hpp" -#include -#include - -/** - * BuildIndxReq - * - * This signal is sent by DICT to TRIX(n) - * as a request to build a secondary index - */ -class BuildIndxReq { - friend bool printBUILD_INDX_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_ALTER_INDEX = 2, - RT_SYSTEMRESTART = 3, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_TC = 5 << 4, - RT_DICT_TRIX = 7 << 4, - RT_DICT_TUX = 8 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4, - RT_TRIX = 7 << 8 - }; - STATIC_CONST( SignalLength = 9 ); - STATIC_CONST( INDEX_COLUMNS = 0 ); - STATIC_CONST( KEY_COLUMNS = 1 ); - STATIC_CONST( NoOfSections = 2 ); - -private: - Uint32 m_userRef; // user block reference - Uint32 m_connectionPtr; // user "schema connection" - Uint32 m_requestInfo; - Uint32 m_buildId; // Suma subscription id - Uint32 m_buildKey; // Suma subscription key - Uint32 m_tableId; // table being indexed - Uint32 m_indexType; // from DictTabInfo::TableType - Uint32 m_indexId; // table storing index - Uint32 m_parallelism; // number of parallel insert transactions - // extra - Uint32 m_opKey; - // Sent data ends here - Uint32 m_slack[25 - SignalLength - 1]; - Uint32 m_sectionBuffer[MAX_ATTRIBUTES_IN_TABLE * 2]; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - BuildIndxReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (BuildIndxReq::RequestType)val; - } - void setRequestType(BuildIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - return (BuildIndxReq::RequestType)val; - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getBuildId() const { - return m_buildId; - } - void setBuildId(Uint32 val) { - m_buildId = val; - } - Uint32 getBuildKey() const { - return m_buildKey; - } - void setBuildKey(Uint32 val) { - m_buildKey = val; - } - Uint32 getIndexType() const { - return m_indexType; - } - void setIndexType(Uint32 val) { - m_indexType = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getParallelism() const { - return m_parallelism; - } - void setParallelism(Uint32 val) { - m_parallelism = val; - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } - // Column order - void setColumnOrder(Uint32* indexBuf, Uint32 indexLen, - Uint32* keyBuf, Uint32 keyLen, - struct LinearSectionPtr orderPtr[]); -}; - -inline -void BuildIndxReq::setColumnOrder(Uint32* indexBuf, Uint32 indexLen, - Uint32* keyBuf, Uint32 keyLen, - struct LinearSectionPtr orderPtr[]) - -{ - printf("BuildIndxReq::setColumnOrder: indexLen %u, keyLen %u\n", indexLen, keyLen); - // Copy buffers - MEMCOPY_NO_WORDS(m_sectionBuffer, indexBuf, indexLen); - MEMCOPY_NO_WORDS(m_sectionBuffer + indexLen, keyBuf, keyLen); - orderPtr[INDEX_COLUMNS].p = m_sectionBuffer; - orderPtr[INDEX_COLUMNS].sz = indexLen; - orderPtr[KEY_COLUMNS].p = m_sectionBuffer + indexLen; - orderPtr[KEY_COLUMNS].sz = keyLen; -} - -/** - * BuildIndxConf - * - * This signal is sent back to DICT from TRIX - * as confirmation of succesfull index build - * (BuildIndxReq). - */ -class BuildIndxConf { - friend bool printBUILD_INDX_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 6 ); - -private: - friend class BuildIndxRef; - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexType; - Uint32 m_indexId; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - BuildIndxReq::RequestType getRequestType() const { - return (BuildIndxReq::RequestType)m_requestInfo; - } - void setRequestType(BuildIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexType() const { - return m_indexType; - } - void setIndexType(Uint32 val) { - m_indexType = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } -}; - -/** - * BuildIndxRef - * - * This signal is sent back to API from DICT/TRIX - * as refusal of a failed index creation - * (BuildIndxReq). It is also sent as refusal - * from TC to TRIX and TRIX to DICT. - */ -class BuildIndxRef { - friend bool printBUILD_INDX_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - BadRequestType = 4247, - InvalidPrimaryTable = 4249, - InvalidIndexType = 4250, - IndexNotUnique = 4251, - AllocationFailure = 4252, - InternalError = 4346 - }; - STATIC_CONST( SignalLength = BuildIndxConf::SignalLength + 2 ); - - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_indexType; - //Uint32 m_indexId; - BuildIndxConf m_conf; - Uint32 m_errorCode; - Uint32 masterNodeId; - -public: - BuildIndxConf* getConf() { - return &m_conf; - } - const BuildIndxConf* getConf() const { - return &m_conf; - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - BuildIndxReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(BuildIndxReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - Uint32 getIndexType() const { - return m_conf.getIndexType(); - } - void setIndexType(Uint32 val) { - m_conf.setIndexType(val); - } - Uint32 getIndexId() const { - return m_conf.getIndexId(); - } - void setIndexId(Uint32 val) { - m_conf.setIndexId(val); - } - BuildIndxRef::ErrorCode getErrorCode() const { - return (BuildIndxRef::ErrorCode)m_errorCode; - } - void setErrorCode(BuildIndxRef::ErrorCode val) { - m_errorCode = (Uint32)val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp b/storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp deleted file mode 100644 index 5047e4ab4d2..00000000000 --- a/storage/ndb/include/kernel/signaldata/CheckNodeGroups.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CHECKNODEGROUPS_H -#define CHECKNODEGROUPS_H - -#include -#include -#include "SignalData.hpp" -#include "SignalDataPrint.hpp" - -/** - * Ask DIH to check if a node set can survive i.e. if it - * has at least one node in every node group. Returns one - * of Win, Lose, Partitioning. - * - * Same class is used for REQ and CONF. The REQ can also - * be executed as a direct signal. - */ -class CheckNodeGroups { -public: - Uint32 blockRef; // sender's node id - union { - Uint32 requestType; // direct flag, output code - Uint32 output; - }; - - Uint32 nodeId; // nodeId input for GetNodeGroupMembers - NodeBitmask mask; /* set of NDB nodes, input for ArbitCheck, - * output for GetNodeGroupMembers - */ - enum RequestType { - Direct = 0x1, - ArbitCheck = 0x2, - GetNodeGroup = 0x4, - GetNodeGroupMembers = 0x8 - }; - - enum Output { - Lose = 1, // we cannot survive - Win = 2, // we and only we can survive - Partitioning = 3 // possible network partitioning - }; - - STATIC_CONST( SignalLength = 3 + NodeBitmask::Size ); -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp b/storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp deleted file mode 100644 index 4fcd938efb8..00000000000 --- a/storage/ndb/include/kernel/signaldata/CloseComReqConf.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CLOSE_COMREQCONF_HPP -#define CLOSE_COMREQCONF_HPP - -#include "SignalData.hpp" -#include - -/** - * The Req signal is sent by Qmgr to Cmvmi - * and the Conf signal is sent back - * - * NOTE that the signals are identical - */ -class CloseComReqConf { - - /** - * Sender(s) / Reciver(s) - */ - friend class Qmgr; - friend class Cmvmi; - - /** - * For printing - */ - friend bool printCLOSECOMREQCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 3 + NodeBitmask::Size ); -private: - - Uint32 xxxBlockRef; - Uint32 failNo; - - Uint32 noOfNodes; - Uint32 theNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CmInit.hpp b/storage/ndb/include/kernel/signaldata/CmInit.hpp deleted file mode 100644 index 17ad4df1def..00000000000 --- a/storage/ndb/include/kernel/signaldata/CmInit.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CM_INIT_HPP -#define CM_INIT_HPP - -#include - -/** - * - */ -class CmInit { - /** - * Sender(s) - */ - friend class Cmvmi; - - /** - * Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 4 + NodeBitmask::Size ); -private: - - Uint32 heartbeatDbDb; - Uint32 heartbeatDbApi; - Uint32 inactiveTransactionCheck; - Uint32 arbitTimeout; - - Uint32 allNdbNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp b/storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp deleted file mode 100644 index 8203d6e5901..00000000000 --- a/storage/ndb/include/kernel/signaldata/CmRegSignalData.hpp +++ /dev/null @@ -1,213 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CM_REG_HPP -#define CM_REG_HPP - -#include - -/** - * This is the first distributed signal - * (the node tries to register in the cluster) - */ -class CmRegReq { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 5 + NdbNodeBitmask::Size ); -private: - - Uint32 blockRef; - Uint32 nodeId; - Uint32 version; // See ndb_version.h - - Uint32 start_type; // As specified by cmd-line or mgm, NodeState::StartType - Uint32 latest_gci; // 0 means no fs - Uint32 skip_nodes[NdbNodeBitmask::Size]; // Nodes that does not _need_ - // to be part of restart -}; - -/** - * The node receving this signal has been accepted into the cluster - */ -class CmRegConf { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 4 + NdbNodeBitmask::Size ); -private: - - Uint32 presidentBlockRef; - Uint32 presidentNodeId; - Uint32 presidentVersion; - - /** - * The dynamic id that the node reciving this signal has - */ - Uint32 dynamicId; - Uint32 allNdbNodes[NdbNodeBitmask::Size]; -}; - -/** - * - */ -class CmRegRef { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 7 + NdbNodeBitmask::Size ); - - enum ErrorCode { - ZBUSY = 0, /* Only the president can send this */ - ZBUSY_PRESIDENT = 1,/* Only the president can send this */ - ZBUSY_TO_PRES = 2, /* Only the president can send this */ - ZNOT_IN_CFG = 3, /* Only the president can send this */ - ZELECTION = 4, /* Receiver is definitely not president, - * but we are not sure if sender ends up - * as president. */ - ZNOT_PRESIDENT = 5, /* We are not president */ - ZNOT_DEAD = 6, /* We are not dead when we are starting */ - ZINCOMPATIBLE_VERSION = 7, - ZINCOMPATIBLE_START_TYPE = 8, - ZSINGLE_USER_MODE = 9, /* The cluster is in single user mode, - * data node is not allowed to get added - * in the cluster while in single user mode */ - ZGENERIC = 100 /* The generic error code */ - }; -private: - - Uint32 blockRef; - Uint32 nodeId; - Uint32 errorCode; - /** - * Applicable if ZELECTION - */ - Uint32 presidentCandidate; - Uint32 candidate_latest_gci; // 0 means non - - /** - * Data for sending node sending node - */ - Uint32 latest_gci; - Uint32 start_type; - Uint32 skip_nodes[NdbNodeBitmask::Size]; // Nodes that does not _need_ - // to be part of restart -}; - -class CmAdd { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 3 ); - -private: - enum RequestType { - Prepare = 0, - AddCommit = 1, - CommitNew = 2 - }; - - Uint32 requestType; - Uint32 startingNodeId; - Uint32 startingVersion; -}; - -class CmAckAdd { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderNodeId; - Uint32 requestType; // see CmAdd::RequestType - Uint32 startingNodeId; -}; - -class CmNodeInfoReq { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 3 ); - -private: - /** - * This is information for sending node (starting node) - */ - Uint32 nodeId; - Uint32 dynamicId; - Uint32 version; -}; - -class CmNodeInfoRef { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - NotRunning = 1 - }; - -private: - Uint32 nodeId; - Uint32 errorCode; -}; - -class CmNodeInfoConf { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 nodeId; - Uint32 dynamicId; - Uint32 version; -}; - -#endif - - - - - - - diff --git a/storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp b/storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp deleted file mode 100644 index 2d79a63c7ea..00000000000 --- a/storage/ndb/include/kernel/signaldata/CmvmiCfgConf.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CMVMI_CFGCONF_H -#define CMVMI_CFGCONF_H - -#include "SignalData.hpp" - -/** - * This signal is used for transfering the - * ISP_X Data - * - * I.e. Configuration data which is sent in a specific start phase - * - */ -class CmvmiCfgConf { - /** - * Sender(s) - */ - friend class Cmvmi; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - -public: - STATIC_CONST( NO_OF_WORDS = 16 ); - STATIC_CONST( LENGTH = 17 ); -private: - - Uint32 startPhase; - Uint32 theData[NO_OF_WORDS]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp b/storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp deleted file mode 100644 index 7b1496fe279..00000000000 --- a/storage/ndb/include/kernel/signaldata/CntrMasterConf.hpp +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CNTR_MASTERCONF_HPP -#define CNTR_MASTERCONF_HPP - -#include - -/** - * This signals is sent by NdbCntr-Master to NdbCntr - */ -class CntrMasterConf { - /** - * Sender(s) - */ - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - - /** - * Reciver(s) - */ - -public: - STATIC_CONST( SignalLength = 1 + NodeBitmask::Size ); -private: - - Uint32 noStartNodes; - Uint32 theNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp b/storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp deleted file mode 100644 index 136a7e8e33d..00000000000 --- a/storage/ndb/include/kernel/signaldata/CntrMasterReq.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CNTR_MASTERREQ_HPP -#define CNTR_MASTERREQ_HPP - -#include - -/** - * This signals is sent by NdbCntr-Master to NdbCntr - */ -class CntrMasterReq { - /** - * Sender(s) - */ - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - - /** - * Reciver(s) - */ - -public: - STATIC_CONST( SignalLength = 4 + NodeBitmask::Size ); -private: - - Uint32 userBlockRef; - Uint32 userNodeId; - Uint32 typeOfStart; - Uint32 noRestartNodes; - Uint32 theNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CntrStart.hpp b/storage/ndb/include/kernel/signaldata/CntrStart.hpp deleted file mode 100644 index 0423c55c641..00000000000 --- a/storage/ndb/include/kernel/signaldata/CntrStart.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef CNTR_START_HPP -#define CNTR_START_HPP - -#include - -/** - * - */ -class CntrStartReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - - friend bool printCNTR_START_REQ(FILE*, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 3 ); -private: - - Uint32 nodeId; - Uint32 startType; - Uint32 lastGci; -}; - -class CntrStartRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - - friend bool printCNTR_START_REF(FILE*, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - - enum ErrorCode { - OK = 0, - NotMaster = 1, - StopInProgress = 2 - }; -private: - - Uint32 errorCode; - Uint32 masterNodeId; -}; - -class CntrStartConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend struct UpgradeStartup; - - friend bool printCNTR_START_CONF(FILE*, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 + 2 * NdbNodeBitmask::Size ); - -private: - - Uint32 startType; - Uint32 startGci; - Uint32 masterNodeId; - Uint32 noStartNodes; - Uint32 startedNodes[NdbNodeBitmask::Size]; - Uint32 startingNodes[NdbNodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ConfigParamId.hpp b/storage/ndb/include/kernel/signaldata/ConfigParamId.hpp deleted file mode 100644 index 24517420ec3..00000000000 --- a/storage/ndb/include/kernel/signaldata/ConfigParamId.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ConfigParamId_H -#define ConfigParamId_H - - enum ConfigParamId { - - Id, - ExecuteOnComputer, - MaxNoOfSavedMessages, - ShmKey, - - LockPagesInMainMemory, - TimeBetweenWatchDogCheck, - StopOnError, - - MaxNoOfConcurrentOperations, - MaxNoOfConcurrentTransactions, - MemorySpaceIndexes, - MemorySpaceTuples, - MemoryDiskPages, - NoOfFreeDiskClusters, - NoOfDiskClusters, - - TimeToWaitAlive, - HeartbeatIntervalDbDb, - HeartbeatIntervalDbApi, - ArbitTimeout, - - TimeBetweenLocalCheckpoints, - TimeBetweenGlobalCheckpoints, - NoOfFragmentLogFiles, - NoOfConcurrentCheckpointsDuringRestart, - TransactionDeadlockDetectionTimeout, - TransactionInactiveTime, - NoOfConcurrentProcessesHandleTakeover, - - NoOfConcurrentCheckpointsAfterRestart, - - NoOfDiskPagesToDiskDuringRestartTUP, - NoOfDiskPagesToDiskAfterRestartTUP, - NoOfDiskPagesToDiskDuringRestartACC, - NoOfDiskPagesToDiskAfterRestartACC, - - NoOfDiskClustersPerDiskFile, - NoOfDiskFiles, - - MaxNoOfSavedEvents - }; - -#endif // ConfigParamId_H - - - - - - diff --git a/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp b/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp deleted file mode 100644 index 4d0e88246d8..00000000000 --- a/storage/ndb/include/kernel/signaldata/ContinueFragmented.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CONTINUE_FRAGMENTED_HPP -#define CONTINUE_FRAGMENTED_HPP - -#include "SignalData.hpp" - -class ContinueFragmented { - - /** - * Sender/Reciver(s) - */ - friend class SimulatedBlock; - - friend bool printCONTINUE_FRAGMENTED(FILE *,const Uint32 *, Uint32, Uint16); -public: - -private: - Uint32 line; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CopyActive.hpp b/storage/ndb/include/kernel/signaldata/CopyActive.hpp deleted file mode 100644 index 2328873eb96..00000000000 --- a/storage/ndb/include/kernel/signaldata/CopyActive.hpp +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef COPY_ACTIVE_HPP -#define COPY_ACTIVE_HPP - -#include "SignalData.hpp" - -class CopyActiveReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 tableId; - Uint32 fragId; - Uint32 distributionKey; -}; - -class CopyActiveConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 userPtr; - Uint32 startingNodeId; - Uint32 tableId; - Uint32 fragId; - Uint32 startGci; -}; -class CopyActiveRef { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 userPtr; - Uint32 startingNodeId; - Uint32 tableId; - Uint32 fragId; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CopyFrag.hpp b/storage/ndb/include/kernel/signaldata/CopyFrag.hpp deleted file mode 100644 index c9ff4e66bf6..00000000000 --- a/storage/ndb/include/kernel/signaldata/CopyFrag.hpp +++ /dev/null @@ -1,138 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef COPY_FRAG_HPP -#define COPY_FRAG_HPP - -#include "SignalData.hpp" - -class CopyFragReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 10 ); - -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 tableId; - Uint32 fragId; - Uint32 nodeId; - Uint32 schemaVersion; - Uint32 distributionKey; - Uint32 gci; - Uint32 nodeCount; - Uint32 nodeList[1]; - //Uint32 maxPage; is stored in nodeList[nodeCount] -}; - -class CopyFragConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 userPtr; - Uint32 sendingNodeId; - Uint32 startingNodeId; - Uint32 tableId; - Uint32 fragId; -}; -class CopyFragRef { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 6 ); - -private: - Uint32 userPtr; - Uint32 sendingNodeId; - Uint32 startingNodeId; - Uint32 tableId; - Uint32 fragId; - Uint32 errorCode; -}; - -struct UpdateFragDistKeyOrd -{ - Uint32 tableId; - Uint32 fragId; - Uint32 fragDistributionKey; - - STATIC_CONST( SignalLength = 3 ); -}; - -struct PrepareCopyFragReq -{ - STATIC_CONST( SignalLength = 6 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 fragId; - Uint32 copyNodeId; - Uint32 startingNodeId; -}; - -struct PrepareCopyFragRef -{ - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 fragId; - Uint32 copyNodeId; - Uint32 startingNodeId; - Uint32 errorCode; - - STATIC_CONST( SignalLength = 7 ); -}; - -struct PrepareCopyFragConf -{ - STATIC_CONST( SignalLength = 7 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 fragId; - Uint32 copyNodeId; - Uint32 startingNodeId; - Uint32 maxPageNo; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp b/storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp deleted file mode 100644 index 0261bcc3c40..00000000000 --- a/storage/ndb/include/kernel/signaldata/CopyGCIReq.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef COPY_GCI_REQ_HPP -#define COPY_GCI_REQ_HPP - -#include "SignalData.hpp" - -/** - * This signal is used for transfering the sysfile - * between Dih on different nodes. - * - * The master will distributes the file to the other nodes - * - * Since the Sysfile can be larger than on StartMeConf signal, - * there might be more than on of these signals sent before - * the entire sysfile is transfered - - */ -class CopyGCIReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - friend bool printCOPY_GCI_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - enum CopyReason { - IDLE = 0, - LOCAL_CHECKPOINT = 1, - RESTART = 2, - GLOBAL_CHECKPOINT = 3, - INITIAL_START_COMPLETED = 4 - }; - -private: - - Uint32 anyData; - Uint32 copyReason; - Uint32 startWord; - - /** - * No of free words to carry data - */ - STATIC_CONST( DATA_SIZE = 22 ); - - Uint32 data[DATA_SIZE]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp deleted file mode 100644 index 22a31e4d5bf..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp +++ /dev/null @@ -1,485 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_EVNT_HPP -#define CREATE_EVNT_HPP - -#include -#include "SignalData.hpp" -#include -#include - -/** - * DropEvntReq. - */ -class DropEvntReq { - friend bool printDROP_EVNT_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - SECTION( EVENT_NAME_SECTION = 0 ); - - union { // user block reference - Uint32 senderRef; - Uint32 m_userRef; - }; - union { - Uint32 senderData; - Uint32 m_userData; // user - }; - - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getUserData() const { - return m_userData; - } - void setUserData(Uint32 val) { - m_userData = val; - } -}; - -/** - * DropEvntConf. - */ -class DropEvntConf { - friend bool printDROP_EVNT_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - - union { // user block reference - Uint32 senderRef; - Uint32 m_userRef; - }; - union { - Uint32 senderData; - Uint32 m_userData; // user - }; - - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getUserData() const { - return m_userData; - } - void setUserData(Uint32 val) { - m_userData = val; - } -}; - -/** - * DropEvntRef. - */ -class DropEvntRef { - friend bool printDROP_EVNT_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum ErrorCode { - NoError = 0, - Undefined = 1, - NF_FakeErrorREF = 11, - Busy = 701, - NotMaster = 702 - }; - STATIC_CONST( SignalLength = 7 ); - STATIC_CONST( SignalLength2 = SignalLength+1 ); - - union { // user block reference - Uint32 senderRef; - Uint32 m_userRef; - }; - union { - Uint32 senderData; - Uint32 m_userData; // user - }; - union { - Uint32 errorCode; - Uint32 m_errorCode; - }; - Uint32 m_errorLine; - Uint32 m_errorNode; - // with SignalLength2 - Uint32 m_masterNodeId; - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getUserData() const { - return m_userData; - } - void setUserData(Uint32 val) { - m_userData = val; - } - Uint32 getErrorCode() const { - return m_errorCode; - } - void setErrorCode(Uint32 val) { - m_errorCode = val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } - Uint32 getMasterNode() const { - return m_masterNodeId; - } - void setMasterNode(Uint32 val) { - m_masterNodeId = val; - } -}; - -/** - * CreateEvntReq. - */ -struct CreateEvntReq { - friend bool printCREATE_EVNT_REQ(FILE*, const Uint32*, Uint32, Uint16); - - enum RequestType { - RT_UNDEFINED = 0, - RT_USER_CREATE = 1, - RT_USER_GET = 2, - - RT_DICT_AFTER_GET = 0x1 << 4 - // RT_DICT_MASTER = 0x2 << 4, - - // RT_DICT_COMMIT = 0xC << 4, - // RT_DICT_ABORT = 0xF << 4, - // RT_TC = 5 << 8 - }; - enum EventFlags { - EF_REPORT_ALL = 0x1 << 16, - EF_REPORT_SUBSCRIBE = 0x2 << 16, - EF_ALL = 0xFFFF << 16 - }; - STATIC_CONST( SignalLengthGet = 3 ); - STATIC_CONST( SignalLengthCreate = 6+MAXNROFATTRIBUTESINWORDS ); - STATIC_CONST( SignalLength = 8+MAXNROFATTRIBUTESINWORDS ); - // SECTION( ATTRIBUTE_LIST_SECTION = 0 ); - SECTION( EVENT_NAME_SECTION = 0 ); - - union { - Uint32 m_userRef; // user block reference - Uint32 senderRef; // user block reference - }; - union { - Uint32 m_userData; // user - Uint32 senderData; // user - }; - Uint32 m_requestInfo; - Uint32 m_tableId; // table to event - Uint32 m_tableVersion; // table version - AttributeMask::Data m_attrListBitmask; - Uint32 m_eventType; // EventFlags (16 bits) + from DictTabInfo::TableType (16 bits) - Uint32 m_eventId; // event table id set by DICT/SUMA - Uint32 m_eventKey; // event table key set by DICT/SUMA - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getUserData() const { - return m_userData; - } - void setUserData(Uint32 val) { - m_userData = val; - } - CreateEvntReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (CreateEvntReq::RequestType)val; - } - void setRequestType(CreateEvntReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getTableVersion() const { - return m_tableVersion; - } - void setTableVersion(Uint32 val) { - m_tableVersion = val; - } - AttributeMask getAttrListBitmask() const { - AttributeMask tmp; - tmp.assign(m_attrListBitmask); - return tmp; - } - void setAttrListBitmask(const AttributeMask & val) { - AttributeMask::assign(m_attrListBitmask.data, val); - } - Uint32 getEventType() const { - return m_eventType & ~EF_ALL; - } - void setEventType(Uint32 val) { - m_eventType = (m_eventType & EF_ALL) | (~EF_ALL & (Uint32)val); - } - Uint32 getEventId() const { - return m_eventId; - } - void setEventId(Uint32 val) { - m_eventId = val; - } - Uint32 getEventKey() const { - return m_eventKey; - } - void setEventKey(Uint32 val) { - m_eventKey = val; - } - void clearFlags() { - m_eventType&= ~EF_ALL; - } - Uint32 getReportFlags() const { - return m_eventType & EF_ALL; - } - void setReportFlags(Uint32 val) { - m_eventType = (val & EF_ALL) | (m_eventType & ~EF_ALL); - } - Uint32 getReportAll() const { - return m_eventType & EF_REPORT_ALL ; - } - void setReportAll() { - m_eventType|= EF_REPORT_ALL; - } - Uint32 getReportSubscribe() const { - return m_eventType & EF_REPORT_SUBSCRIBE ; - } - void setReportSubscribe() { - m_eventType|= EF_REPORT_SUBSCRIBE; - } -}; - -/** - * CreateEvntConf. - */ -class CreateEvntConf { - friend bool printCREATE_EVNT_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - // STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 8+MAXNROFATTRIBUTESINWORDS ); - - union { - Uint32 m_userRef; // user block reference - Uint32 senderRef; // user block reference - }; - union { - Uint32 m_userData; // user - Uint32 senderData; // user - }; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_tableVersion; // table version - AttributeMask m_attrListBitmask; - Uint32 m_eventType; - Uint32 m_eventId; - Uint32 m_eventKey; - - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getUserData() const { - return m_userData; - } - void setUserData(Uint32 val) { - m_userData = val; - } - CreateEvntReq::RequestType getRequestType() const { - return (CreateEvntReq::RequestType)m_requestInfo; - } - void setRequestType(CreateEvntReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getTableVersion() const { - return m_tableVersion; - } - void setTableVersion(Uint32 val) { - m_tableVersion = val; - } - AttributeMask getAttrListBitmask() const { - return m_attrListBitmask; - } - void setAttrListBitmask(const AttributeMask & val) { - m_attrListBitmask = val; - } - Uint32 getEventType() const { - return m_eventType; - } - void setEventType(Uint32 val) { - m_eventType = (Uint32)val; - } - Uint32 getEventId() const { - return m_eventId; - } - void setEventId(Uint32 val) { - m_eventId = val; - } - Uint32 getEventKey() const { - return m_eventKey; - } - void setEventKey(Uint32 val) { - m_eventKey = val; - } -}; - -/** - * CreateEvntRef. - */ -struct CreateEvntRef { - friend class SafeCounter; - friend bool printCREATE_EVNT_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 11 ); - STATIC_CONST( SignalLength2 = SignalLength + 1 ); - enum ErrorCode { - NoError = 0, - Undefined = 1, - NF_FakeErrorREF = 11, - Busy = 701, - NotMaster = 702 - }; - union { - Uint32 m_userRef; // user block reference - Uint32 senderRef; // user block reference - }; - union { - Uint32 m_userData; // user - Uint32 senderData; // user - }; - - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_tableVersion; // table version - Uint32 m_eventType; - Uint32 m_eventId; - Uint32 m_eventKey; - Uint32 errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // with SignalLength2 - Uint32 m_masterNodeId; - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getUserData() const { - return m_userData; - } - void setUserData(Uint32 val) { - m_userData = val; - } - CreateEvntReq::RequestType getRequestType() const { - return (CreateEvntReq::RequestType)m_requestInfo; - } - void setRequestType(CreateEvntReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getTableVersion() const { - return m_tableVersion; - } - void setTableVersion(Uint32 val) { - m_tableVersion = val; - } - - Uint32 getEventType() const { - return m_eventType; - } - void setEventType(Uint32 val) { - m_eventType = (Uint32)val; - } - Uint32 getEventId() const { - return m_eventId; - } - void setEventId(Uint32 val) { - m_eventId = val; - } - Uint32 getEventKey() const { - return m_eventKey; - } - void setEventKey(Uint32 val) { - m_eventKey = val; - } - - Uint32 getErrorCode() const { - return errorCode; - } - void setErrorCode(Uint32 val) { - errorCode = val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } - Uint32 getMasterNode() const { - return m_masterNodeId; - } - void setMasterNode(Uint32 val) { - m_masterNodeId = val; - } -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp deleted file mode 100644 index 38b2b9020e0..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateFilegroup.hpp +++ /dev/null @@ -1,202 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_FILEGROUP_HPP -#define CREATE_FILEGROUP_HPP - -#include "SignalData.hpp" - -struct CreateFilegroupReq { - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - friend class Dbdict; - - /** - * For printing - */ - friend bool printCREATE_FILEGROUP_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 3 ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 objType; - SECTION( FILEGROUP_INFO = 0 ); -}; - -struct CreateFilegroupRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printCREATE_FILEGROUP_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 7 ); - - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - NoMoreObjectRecords = 710, - InvalidFormat = 740, - OutOfFilegroupRecords = 765, - InvalidExtentSize = 764, - InvalidUndoBufferSize = 779, - NoSuchLogfileGroup = 767, - InvalidFilegroupVersion = 768, - SingleUser = 299 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 masterNodeId; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 status; -}; - -struct CreateFilegroupConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printCREATE_FILEGROUP_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 filegroupId; - Uint32 filegroupVersion; -}; - -struct CreateFileReq { - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - friend class Dbdict; - friend class Tsman; - - /** - * For printing - */ - friend bool printCREATE_FILE_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 objType; - Uint32 requestInfo; - - enum RequstInfo - { - ForceCreateFile = 0x1 - }; - - SECTION( FILE_INFO = 0 ); -}; - -struct CreateFileRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printCREATE_FILE_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 7 ); - - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - NoMoreObjectRecords = 710, - InvalidFormat = 752, - NoSuchFilegroup = 753, - InvalidFilegroupVersion = 754, - FilenameAlreadyExists = 760, - OutOfFileRecords = 751, - InvalidFileType = 750, - NotSupportedWhenDiskless = 775, - SingleUser = 299 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 masterNodeId; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 status; -}; - -struct CreateFileConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printCREATE_FILE_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 fileId; - Uint32 fileVersion; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp b/storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp deleted file mode 100644 index c301c93c63d..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateFilegroupImpl.hpp +++ /dev/null @@ -1,195 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_FILEGROUP_IMPL_HPP -#define CREATE_FILEGROUP_IMPL_HPP - -#include "SignalData.hpp" - -struct CreateFilegroupImplReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printCREATE_FILEGROUP_IMPL_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( TablespaceLength = 6 ); - STATIC_CONST( LogfileGroupLength = 5 ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 filegroup_id; - Uint32 filegroup_version; - - union { - struct { - Uint32 extent_size; - Uint32 logfile_group_id; - } tablespace; - struct { - Uint32 buffer_size; // In pages - } logfile_group; - }; -}; - -struct CreateFilegroupImplRef { - /** - * Sender(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printCREATE_FILEGROUP_IMPL_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - NoError = 0, - FilegroupAlreadyExists = 1502, - OutOfFilegroupRecords = 1503, - OutOfLogBufferMemory = 1504, - OneLogfileGroupLimit = 1514 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; -}; - -struct CreateFilegroupImplConf { - /** - * Sender(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printCREATE_FILEGROUP_IMPL_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderData; - Uint32 senderRef; -}; - -struct CreateFileImplReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printCREATE_FILE_IMPL_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( DatafileLength = 9 ); - STATIC_CONST( UndofileLength = 8 ); - STATIC_CONST( CommitLength = 6 ); - STATIC_CONST( AbortLength = 6 ); - SECTION( FILENAME = 0 ); - - enum RequestInfo { - Create = 0x1, - CreateForce = 0x2, - Open = 0x4, - Commit = 0x8, - Abort = 0x10 - }; - - Uint32 senderData; - Uint32 senderRef; - - Uint32 requestInfo; - Uint32 file_id; - Uint32 filegroup_id; - Uint32 filegroup_version; - Uint32 file_size_hi; - Uint32 file_size_lo; - - union { - struct { - Uint32 extent_size; - } tablespace; - }; -}; - -struct CreateFileImplRef { - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printCREATE_FILE_IMPL_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 5 ); - - enum ErrorCode { - NoError = 0, - InvalidFilegroup = 1505, - InvalidFilegroupVersion = 1506, - FileNoAlreadyExists = 1507, - OutOfFileRecords = 1508, - FileError = 1509, - InvalidFileMetadata = 1510, - OutOfMemory = 1511, - FileReadError = 1512, - FilegroupNotOnline = 1513, - FileSizeTooLarge = 1515 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; - Uint32 fsErrCode; - Uint32 osErrCode; -}; - -struct CreateFileImplConf { - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - - /** - * For printing - */ - friend bool printCREATE_FILE_IMPL_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 senderRef; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateFrag.hpp b/storage/ndb/include/kernel/signaldata/CreateFrag.hpp deleted file mode 100644 index 8a3a820ae32..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateFrag.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_FRAG_HPP -#define CREATE_FRAG_HPP - -class CreateFragReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 8 ); - - enum ReplicaType { - STORED = 7, - COMMIT_STORED = 9 - }; -private: - - Uint32 userPtr; - BlockReference userRef; - Uint32 tableId; - Uint32 fragId; - Uint32 startingNodeId; - Uint32 copyNodeId; - Uint32 startGci; - Uint32 replicaType; -}; - -class CreateFragConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 5 ); -private: - - Uint32 userPtr; - Uint32 tableId; - Uint32 fragId; - Uint32 sendingNodeId; - Uint32 startingNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp b/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp deleted file mode 100644 index 9083ab350b1..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_FRAGMENTATION_REQ_HPP -#define CREATE_FRAGMENTATION_REQ_HPP - -#include "SignalData.hpp" - -class CreateFragmentationReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbdih; - - friend bool printCREATE_FRAGMENTATION_REQ(FILE *, - const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 fragmentationType; - Uint32 noOfFragments; - Uint32 primaryTableId; // use same fragmentation as this table if not RNIL -}; - -class CreateFragmentationRef { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printCREATE_FRAGMENTATION_REF(FILE *, - const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - OK = 0 - ,InvalidNodeGroup = 771 - ,InvalidFragmentationType = 772 - ,InvalidPrimaryTable = 749 - }; - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; -}; - -class CreateFragmentationConf { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printCREATE_FRAGMENTATION_CONF(FILE *, - const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - SECTION( FRAGMENTS = 0 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 noOfReplicas; - Uint32 noOfFragments; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateIndx.hpp b/storage/ndb/include/kernel/signaldata/CreateIndx.hpp deleted file mode 100644 index 6f77a9e6e42..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateIndx.hpp +++ /dev/null @@ -1,301 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_INDX_HPP -#define CREATE_INDX_HPP - -#include "SignalData.hpp" -#include -#include - -/** - * CreateIndxReq. - */ -class CreateIndxReq { - friend bool printCREATE_INDX_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4, - RT_TC = 5 << 8 - }; - STATIC_CONST( SignalLength = 8 ); - SECTION( ATTRIBUTE_LIST_SECTION = 0 ); - SECTION( INDEX_NAME_SECTION = 1 ); - -private: - Uint32 m_connectionPtr; // user "schema connection" - Uint32 m_userRef; // user block reference - Uint32 m_requestInfo; - Uint32 m_tableId; // table to index - Uint32 m_indexType; // from DictTabInfo::TableType - Uint32 m_indexId; // index table id set by DICT - Uint32 m_indexVersion; // index table version set by DICT - Uint32 m_online; // alter online - // extra - Uint32 m_opKey; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - CreateIndxReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (CreateIndxReq::RequestType)val; - } - void setRequestType(CreateIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - DictTabInfo::TableType getIndexType() const { - return (DictTabInfo::TableType)m_indexType; - } - void setIndexType(DictTabInfo::TableType val) { - m_indexType = (Uint32)val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getOnline() const { - return m_online; - } - void setOnline(Uint32 val) { - m_online = val; - } - Uint32 getIndexVersion() const { - return m_indexVersion; - } - void setIndexVersion(Uint32 val) { - m_indexVersion = val; - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } -}; - -/** - * CreateIndxConf. - */ -class CreateIndxConf { - friend bool printCREATE_INDX_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 7 ); - -private: - Uint32 m_connectionPtr; - Uint32 m_userRef; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexType; - Uint32 m_indexId; - Uint32 m_indexVersion; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - CreateIndxReq::RequestType getRequestType() const { - return (CreateIndxReq::RequestType)m_requestInfo; - } - void setRequestType(CreateIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - DictTabInfo::TableType getIndexType() const { - return (DictTabInfo::TableType)m_indexType; - } - void setIndexType(DictTabInfo::TableType val) { - m_indexType = (Uint32)val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getIndexVersion() const { - return m_indexVersion; - } - void setIndexVersion(Uint32 val) { - m_indexVersion = val; - } -}; - -/** - * CreateIndxRef. - */ -struct CreateIndxRef { - friend bool printCREATE_INDX_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = CreateIndxReq::SignalLength + 3 ); - enum ErrorCode { - NoError = 0, - Busy = 701, - BusyWithNR = 711, - NotMaster = 702, - IndexOnDiskAttributeError = 756, - TriggerNotFound = 4238, - TriggerExists = 4239, - IndexNameTooLong = 4241, - TooManyIndexes = 4242, - IndexExists = 4244, - AttributeNullable = 4246, - BadRequestType = 4247, - InvalidName = 4248, - InvalidPrimaryTable = 4249, - InvalidIndexType = 4250, - NotUnique = 4251, - AllocationError = 4252, - CreateIndexTableFailed = 4253, - DuplicateAttributes = 4258, - SingleUser = 299, - TableIsTemporary = 776, - TableIsNotTemporary = 777, - NoLoggingTemporaryIndex = 778 - }; - - CreateIndxConf m_conf; - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_indexType; - //Uint32 m_indexId; - //Uint32 m_indexVersion; - Uint32 m_errorCode; - Uint32 m_errorLine; - union { - Uint32 m_errorNode; - Uint32 masterNodeId; // If NotMaster - }; -public: - CreateIndxConf* getConf() { - return &m_conf; - } - const CreateIndxConf* getConf() const { - return &m_conf; - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - CreateIndxReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(CreateIndxReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - DictTabInfo::TableType getIndexType() const { - return m_conf.getIndexType(); - } - void setIndexType(DictTabInfo::TableType val) { - m_conf.setIndexType(val); - } - Uint32 getIndexId() const { - return m_conf.getIndexId(); - } - void setIndexId(Uint32 val) { - m_conf.setIndexId(val); - } - Uint32 getIndexVersion() const { - return m_conf.getIndexVersion(); - } - void setIndexVersion(Uint32 val) { - m_conf.setIndexVersion(val); - } - CreateIndxRef::ErrorCode getErrorCode() const { - return (CreateIndxRef::ErrorCode)m_errorCode; - } - void setErrorCode(CreateIndxRef::ErrorCode val) { - m_errorCode = (Uint32)val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateObj.hpp b/storage/ndb/include/kernel/signaldata/CreateObj.hpp deleted file mode 100644 index fcab3949e48..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateObj.hpp +++ /dev/null @@ -1,107 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_OBJ_HPP -#define CREATE_OBJ_HPP - -#include "DictObjOp.hpp" -#include "SignalData.hpp" - -/** - * CreateObj - * - * Implemenatation of CreateObj - */ -struct CreateObjReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - - /** - * For printing - */ - friend bool printCREATE_OBJ_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 10 ); - STATIC_CONST( GSN = GSN_CREATE_OBJ_REQ ); - -private: - Uint32 op_key; - Uint32 senderRef; - Uint32 senderData; - Uint32 requestInfo; - - Uint32 clientRef; - Uint32 clientData; - - Uint32 objId; - Uint32 objType; - Uint32 objVersion; - Uint32 gci; - - SECTION( DICT_OBJ_INFO = 0 ); -}; - -struct CreateObjRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class SafeCounter; - - /** - * For printing - */ - friend bool printCREATE_OBJ_REF(FILE *, const Uint32 *, Uint32, Uint16); - - STATIC_CONST( SignalLength = 6 ); - STATIC_CONST( GSN = GSN_CREATE_OBJ_REF ); - - enum ErrorCode { - NF_FakeErrorREF = 255 - }; - - - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 errorStatus; -}; - -struct CreateObjConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - - /** - * For printing - */ - friend bool printCREATE_OBJ_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderRef; - Uint32 senderData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateTab.hpp b/storage/ndb/include/kernel/signaldata/CreateTab.hpp deleted file mode 100644 index 9a67f73cbb6..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateTab.hpp +++ /dev/null @@ -1,109 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_TAB_HPP -#define CREATE_TAB_HPP - -#include "SignalData.hpp" - -/** - * CreateTab - * - * Implemenatation of CreateTable - */ -class CreateTabReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - - /** - * For printing - */ - friend bool printCREATE_TAB_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 8 ); - - enum RequestType { - CreateTablePrepare = 0, // Prepare create table - CreateTableCommit = 1, // Commit create table - CreateTableDrop = 2 // Prepare failed, drop instead - }; -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 clientRef; - Uint32 clientData; - - Uint32 tableId; - Uint32 tableVersion; - Uint32 gci; - Uint32 requestType; - - SECTION( DICT_TAB_INFO = 0 ); - SECTION( FRAGMENTATION = 1 ); -}; - -struct CreateTabRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class SafeCounter; - - /** - * For printing - */ - friend bool printCREATE_TAB_REF(FILE *, const Uint32 *, Uint32, Uint16); - - STATIC_CONST( SignalLength = 6 ); - STATIC_CONST( GSN = GSN_CREATE_TAB_REF ); - - enum ErrorCode { - NF_FakeErrorREF = 255 - }; - - - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 errorStatus; -}; - -class CreateTabConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Suma; - - /** - * For printing - */ - friend bool printCREATE_TAB_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderRef; - Uint32 senderData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateTable.hpp b/storage/ndb/include/kernel/signaldata/CreateTable.hpp deleted file mode 100644 index 59a7f410f62..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_TABLE_HPP -#define CREATE_TABLE_HPP - -#include "SignalData.hpp" - -/** - * CreateTable - * - * This signal is sent by API to DICT/TRIX - * as a request to create a secondary index - * and then from TRIX to TRIX(n) and TRIX to TC. - */ -class CreateTableReq { - /** - * Sender(s) - */ - // API - - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - friend class Dbdict; - friend class Ndbcntr; - - /** - * For printing - */ - friend bool printCREATE_TABLE_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderData; - Uint32 senderRef; - - SECTION( DICT_TAB_INFO = 0 ); -}; - -class CreateTableRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printCREATE_TABLE_REF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 7 ); - - enum ErrorCode { - NoError = 0, - Busy = 701, - BusyWithNR = 711, - NotMaster = 702, - InvalidFormat = 703, - AttributeNameTooLong = 704, - TableNameTooLong = 705, - Inconsistency = 706, - NoMoreTableRecords = 707, - NoMoreAttributeRecords = 708, - AttributeNameTwice = 720, - TableAlreadyExist = 721, - InvalidArraySize = 736, - ArraySizeTooBig = 737, - RecordTooBig = 738, - InvalidPrimaryKeySize = 739, - NullablePrimaryKey = 740, - InvalidCharset = 743, - SingleUser = 299, - InvalidTablespace = 755, - VarsizeBitfieldNotSupported = 757, - NotATablespace = 758, - InvalidTablespaceVersion = 759, - OutOfStringBuffer = 773, - NoLoggingTemporaryTable = 778 - }; - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 masterNodeId; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - Uint32 status; - -public: - Uint32 getErrorCode() const { - return errorCode; - } - Uint32 getErrorLine() const { - return errorLine; - } -}; - -class CreateTableConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printCREATE_TABLE_REF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 tableVersion; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/CreateTrig.hpp b/storage/ndb/include/kernel/signaldata/CreateTrig.hpp deleted file mode 100644 index e54f37fc62a..00000000000 --- a/storage/ndb/include/kernel/signaldata/CreateTrig.hpp +++ /dev/null @@ -1,423 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CREATE_TRIG_HPP -#define CREATE_TRIG_HPP - -#include "SignalData.hpp" -#include -#include -#include - -/** - * CreateTrigReq. - */ -class CreateTrigReq { - friend bool printCREATE_TRIG_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_ALTER_INDEX = 2, - RT_BUILD_INDEX = 3, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_CREATE = 2 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4, - RT_TC = 5 << 8, - RT_LQH = 6 << 8 - }; - STATIC_CONST( SignalLength = 9 + MAXNROFATTRIBUTESINWORDS); - SECTION( TRIGGER_NAME_SECTION = 0 ); - SECTION( ATTRIBUTE_MASK_SECTION = 1 ); // not yet in use - enum KeyValues { - TriggerNameKey = 0xa1 - }; - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; // only for index trigger - Uint32 m_triggerId; // only set by DICT - Uint32 m_triggerInfo; // flags | event | timing | type - Uint32 m_online; // alter online (not normally for subscription) - Uint32 m_receiverRef; // receiver for subscription trigger - AttributeMask m_attributeMask; - // extra - Uint32 m_opKey; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - CreateTrigReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (CreateTrigReq::RequestType)val; - } - void setRequestType(CreateTrigReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getTriggerId() const { - return m_triggerId; - } - void setTriggerId(Uint32 val) { - m_triggerId = val; - } - Uint32 getTriggerInfo() const { - return m_triggerInfo; - } - void setTriggerInfo(Uint32 val) { - m_triggerInfo = val; - } - TriggerType::Value getTriggerType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 0, 8); - return (TriggerType::Value)val; - } - void setTriggerType(TriggerType::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 0, 8, (Uint32)val); - } - TriggerActionTime::Value getTriggerActionTime() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 8, 8); - return (TriggerActionTime::Value)val; - } - void setTriggerActionTime(TriggerActionTime::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 8, 8, (Uint32)val); - } - TriggerEvent::Value getTriggerEvent() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 16, 8); - return (TriggerEvent::Value)val; - } - void setTriggerEvent(TriggerEvent::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 16, 8, (Uint32)val); - } - bool getMonitorReplicas() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 24, 1); - } - void setMonitorReplicas(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 24, 1, val); - } - bool getMonitorAllAttributes() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 25, 1); - } - void setMonitorAllAttributes(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 25, 1, val); - } - bool getReportAllMonitoredAttributes() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 26, 1); - } - void setReportAllMonitoredAttributes(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 26, 1, val); - } - Uint32 getOnline() const { - return m_online; - } - void setOnline(Uint32 val) { - m_online = val; - } - Uint32 getReceiverRef() const { - return m_receiverRef; - } - void setReceiverRef(Uint32 val) { - m_receiverRef = val; - } - AttributeMask& getAttributeMask() { - return m_attributeMask; - } - const AttributeMask& getAttributeMask() const { - return m_attributeMask; - } - void clearAttributeMask() { - m_attributeMask.clear(); - } - void setAttributeMask(const AttributeMask& val) { - m_attributeMask = val; - } - void setAttributeMask(Uint16 val) { - m_attributeMask.set(val); - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } -}; - -/** - * CreateTrigConf. - */ -class CreateTrigConf { - friend bool printCREATE_TRIG_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 7 ); - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; - Uint32 m_triggerId; - Uint32 m_triggerInfo; // BACKUP wants this - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - CreateTrigReq::RequestType getRequestType() const { - return (CreateTrigReq::RequestType)m_requestInfo; - } - void setRequestType(CreateTrigReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getTriggerId() const { - return m_triggerId; - } - void setTriggerId(Uint32 val) { - m_triggerId = val; - } - Uint32 getTriggerInfo() const { - return m_triggerInfo; - } - void setTriggerInfo(Uint32 val) { - m_triggerInfo = val; - } - TriggerType::Value getTriggerType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 0, 8); - return (TriggerType::Value)val; - } - void setTriggerType(TriggerType::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 0, 8, (Uint32)val); - } - TriggerActionTime::Value getTriggerActionTime() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 8, 8); - return (TriggerActionTime::Value)val; - } - void setTriggerActionTime(TriggerActionTime::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 8, 8, (Uint32)val); - } - TriggerEvent::Value getTriggerEvent() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 16, 8); - return (TriggerEvent::Value)val; - } - void setTriggerEvent(TriggerEvent::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 16, 8, (Uint32)val); - } - bool getMonitorReplicas() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 24, 1); - } - void setMonitorReplicas(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 24, 1, val); - } - bool getMonitorAllAttributes() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 25, 1); - } - void setMonitorAllAttributes(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 25, 1, val); - } -}; - -/** - * CreateTrigRef. - */ -class CreateTrigRef { - friend bool printCREATE_TRIG_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - TriggerNameTooLong = 4236, - TooManyTriggers = 4237, - TriggerNotFound = 4238, - TriggerExists = 4239, - UnsupportedTriggerType = 4240, - BadRequestType = 4247, - InvalidName = 4248, - InvalidTable = 4249 - }; - STATIC_CONST( SignalLength = CreateTrigConf::SignalLength + 3 ); - -private: - CreateTrigConf m_conf; - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_indexId; - //Uint32 m_triggerId; - //Uint32 m_triggerInfo; - Uint32 m_errorCode; - Uint32 m_errorLine; - union { - Uint32 m_errorNode; - Uint32 masterNodeId; // When NotMaster - }; -public: - CreateTrigConf* getConf() { - return &m_conf; - } - const CreateTrigConf* getConf() const { - return &m_conf; - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - CreateTrigReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(CreateTrigReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - Uint32 getIndexId() const { - return m_conf.getIndexId(); - } - void setIndexId(Uint32 val) { - m_conf.setIndexId(val); - } - Uint32 getTriggerId() const { - return m_conf.getTriggerId(); - } - void setTriggerId(Uint32 val) { - m_conf.setTriggerId(val); - } - Uint32 getTriggerInfo() const { - return m_conf.getTriggerInfo(); - } - void setTriggerInfo(Uint32 val) { - m_conf.setTriggerInfo(val); - } - TriggerType::Value getTriggerType() const { - return m_conf.getTriggerType(); - } - void setTriggerType(TriggerType::Value val) { - m_conf.setTriggerType(val); - } - TriggerActionTime::Value getTriggerActionTime() const { - return m_conf.getTriggerActionTime(); - } - void setTriggerActionTime(TriggerActionTime::Value val) { - m_conf.setTriggerActionTime(val); - } - TriggerEvent::Value getTriggerEvent() const { - return m_conf.getTriggerEvent(); - } - void setTriggerEvent(TriggerEvent::Value val) { - m_conf.setTriggerEvent(val); - } - bool getMonitorReplicas() const { - return m_conf.getMonitorReplicas(); - } - void setMonitorReplicas(bool val) { - m_conf.setMonitorReplicas(val); - } - bool getMonitorAllAttributes() const { - return m_conf.getMonitorAllAttributes(); - } - void setMonitorAllAttributes(bool val) { - m_conf.setMonitorAllAttributes(val); - } - CreateTrigRef::ErrorCode getErrorCode() const { - return (CreateTrigRef::ErrorCode)m_errorCode; - } - void setErrorCode(CreateTrigRef::ErrorCode val) { - m_errorCode = (Uint32)val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp deleted file mode 100644 index 4c7ab5b973b..00000000000 --- a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp +++ /dev/null @@ -1,92 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIADDTABREQ_HPP -#define DIADDTABREQ_HPP - -#include "SignalData.hpp" - -class DiAddTabReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 10 ); - SECTION( FRAGMENTATION = 0 ); - SECTION( TS_RANGE = 0 ); - -private: - Uint32 connectPtr; - Uint32 tableId; - Uint32 fragType; - Uint32 kValue; - Uint32 noOfReplicas; //Currently not used - Uint32 loggedTable; - Uint32 tableType; - Uint32 schemaVersion; - Uint32 primaryTableId; - Uint32 temporaryTable; -}; - -class DiAddTabRef { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; -public: - STATIC_CONST( SignalLength = 2 ); - -private: - union { - Uint32 connectPtr; - Uint32 senderData; - }; - Uint32 errorCode; -}; - -class DiAddTabConf { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; -public: - STATIC_CONST( SignalLength = 1 ); - -private: - union { - Uint32 connectPtr; - Uint32 senderData; - }; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp b/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp deleted file mode 100644 index 26ebd26059d..00000000000 --- a/storage/ndb/include/kernel/signaldata/DiGetNodes.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIGETNODES_HPP -#define DIGETNODES_HPP - -#include -#include - -/** - * - */ -class DiGetNodesConf { - /** - * Receiver(s) - */ - friend class Dbtc; - /** - * Sender(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 3 + MAX_REPLICAS ); -private: - Uint32 zero; - Uint32 fragId; - Uint32 reqinfo; - Uint32 nodes[MAX_REPLICAS]; -}; -/** - * - */ -class DiGetNodesReq { - /** - * Sender(s) - */ - friend class Dbtc; - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 3 ); -private: - Uint32 notUsed; - Uint32 tableId; - Uint32 hashValue; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/DictLock.hpp b/storage/ndb/include/kernel/signaldata/DictLock.hpp deleted file mode 100644 index 485ffedfd1a..00000000000 --- a/storage/ndb/include/kernel/signaldata/DictLock.hpp +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2003, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DICT_LOCK_HPP -#define DICT_LOCK_HPP - -#include "SignalData.hpp" - -// see comments in Dbdict.hpp - -class DictLockReq { - friend class Dbdict; - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 3 ); - enum LockType { - NoLock = 0, - NodeRestartLock = 1 - }; -private: - Uint32 userPtr; - Uint32 lockType; - Uint32 userRef; -}; - -class DictLockConf { - friend class Dbdict; - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 3 ); -private: - Uint32 userPtr; - Uint32 lockType; - Uint32 lockPtr; -}; - -class DictLockRef { - friend class Dbdict; - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 3 ); - enum ErrorCode { - NotMaster = 1, - InvalidLockType = 2, - BadUserRef = 3, - TooLate = 4, - TooManyRequests = 5 - }; -private: - Uint32 userPtr; - Uint32 lockType; - Uint32 errorCode; -}; - -class DictUnlockOrd { - friend class Dbdict; - friend class Dbdih; -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 lockPtr; - Uint32 lockType; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DictObjOp.hpp b/storage/ndb/include/kernel/signaldata/DictObjOp.hpp deleted file mode 100644 index 936aa42cead..00000000000 --- a/storage/ndb/include/kernel/signaldata/DictObjOp.hpp +++ /dev/null @@ -1,104 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DICT_OBJ_OP_HPP -#define DICT_OBJ_OP_HPP - -struct DictObjOp { - - enum RequestType { - Prepare = 0, // Prepare create obj - Commit = 1, // Commit create obj - Abort = 2 // Prepare failed, drop instead - }; - - enum State { - Defined = 0, - Preparing = 1, - Prepared = 2, - Committing = 3, - Committed = 4, - Aborting = 5, - Aborted = 6 - }; -}; - -struct DictCommitReq -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 op_key; - - STATIC_CONST( SignalLength = 3 ); - STATIC_CONST( GSN = GSN_DICT_COMMIT_REQ ); -}; - -struct DictCommitRef -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; - enum ErrorCode - { - NF_FakeErrorREF = 1 - }; - STATIC_CONST( SignalLength = 3 ); - STATIC_CONST( GSN = GSN_DICT_COMMIT_REF ); -}; - -struct DictCommitConf -{ - Uint32 senderData; - Uint32 senderRef; - - STATIC_CONST( SignalLength = 2 ); - STATIC_CONST( GSN = GSN_DICT_COMMIT_CONF ); -}; - -struct DictAbortReq -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 op_key; - - STATIC_CONST( SignalLength = 3 ); - STATIC_CONST( GSN = GSN_DICT_ABORT_REQ ); -}; - -struct DictAbortRef -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; - enum ErrorCode - { - NF_FakeErrorREF = 1 - }; - STATIC_CONST( SignalLength = 3 ); - STATIC_CONST( GSN = GSN_DICT_ABORT_REF ); -}; - -struct DictAbortConf -{ - Uint32 senderData; - Uint32 senderRef; - - STATIC_CONST( SignalLength = 2 ); - STATIC_CONST( GSN = GSN_DICT_ABORT_CONF ); -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp b/storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp deleted file mode 100644 index be07fe5bb5b..00000000000 --- a/storage/ndb/include/kernel/signaldata/DictSchemaInfo.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DICT_SCHEMA_INFO_HPP -#define DICT_SCHEMA_INFO_HPP - -#include "SignalData.hpp" - -class DictSchemaInfo { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( HeaderLength = 3 ); - STATIC_CONST( DataLength = 22 ); - -private: - Uint32 senderRef; - Uint32 offset; - Uint32 totalLen; - - /** - * Length in this = signal->length() - 3 - * Sender block ref = signal->senderBlockRef() - */ - - Uint32 schemaInfoData[22]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp deleted file mode 100644 index f68f9a969de..00000000000 --- a/storage/ndb/include/kernel/signaldata/DictSizeAltReq.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DICT_SIZE_ALT_REQ_H -#define DICT_SIZE_ALT_REQ_H - - - -#include "SignalData.hpp" - -class DictSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Reciver(s) - */ - friend class Dbdict; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0 ); - STATIC_CONST( IND_ATTRIBUTE = 1 ); - STATIC_CONST( IND_CONNECT = 2 ); - STATIC_CONST( IND_FRAG_CONNECT = 3 ); - STATIC_CONST( IND_TABLE = 4 ); - STATIC_CONST( IND_TC_CONNECT = 5 ); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[6]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DictStart.hpp b/storage/ndb/include/kernel/signaldata/DictStart.hpp deleted file mode 100644 index fcb3af5467a..00000000000 --- a/storage/ndb/include/kernel/signaldata/DictStart.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DICT_START_HPP -#define DICT_START_HPP - -class DictStartReq { - /** - * Sender(s) - */ - friend class Dbdih; - /** - * Receiver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - - Uint32 restartGci; - Uint32 senderRef; -}; - -class DictStartConf { - /** - * Sender(s) - */ - friend class Dbdict; - /** - * Receiver(s) - */ - friend class Dbdih; - -public: -private: - - Uint32 startingNodeId; - Uint32 startWord; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp deleted file mode 100644 index a858f19690a..00000000000 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ /dev/null @@ -1,727 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DICT_TAB_INFO_HPP -#define DICT_TAB_INFO_HPP - -#include "SignalData.hpp" -#include -#include -#include -#include -#include - -#ifndef my_decimal_h - -// sql/my_decimal.h requires many more sql/*.h new to ndb -// for now, copy the bit we need TODO proper fix - -#define DECIMAL_MAX_LENGTH ((8 * 9) - 8) - -C_MODE_START -extern int decimal_bin_size(int, int); -C_MODE_END - -inline int my_decimal_get_binary_size(uint precision, uint scale) -{ - return decimal_bin_size((int)precision, (int)scale); -} - -#endif - -#define DTIMAP(x, y, z) \ - { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } - -#define DTIMAP2(x, y, z, u, v) \ - { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } - -#define DTIMAPS(x, y, z, u, v) \ - { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } - -#define DTIMAPB(x, y, z, u, v, l) \ - { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ - my_offsetof(x, l) } - -#define DTIBREAK(x) \ - { DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } - -class DictTabInfo { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Backup; - friend class Dbdict; - friend class Ndbcntr; - friend class Trix; - friend class DbUtil; - // API - friend class NdbSchemaOp; - - /** - * For printing - */ - friend bool printDICTTABINFO(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - -public: - enum RequestType { - CreateTableFromAPI = 1, - AddTableFromDict = 2, // Between DICT's - CopyTable = 3, // Between DICT's - ReadTableFromDiskSR = 4, // Local in DICT - GetTabInfoConf = 5, - AlterTableFromAPI = 6 - }; - - enum KeyValues { - TableName = 1, // String, Mandatory - TableId = 2, //Mandatory between DICT's otherwise not allowed - TableVersion = 3, //Mandatory between DICT's otherwise not allowed - TableLoggedFlag = 4, //Default Logged - NoOfKeyAttr = 5, //Default 1 - NoOfAttributes = 6, //Mandatory - NoOfNullable = 7, //Deafult 0 - NoOfVariable = 8, //Default 0 - TableKValue = 9, //Default 6 - MinLoadFactor = 10, //Default 70 - MaxLoadFactor = 11, //Default 80 - KeyLength = 12, //Default 1 (No of words in primary key) - FragmentTypeVal = 13, //Default AllNodesSmallTable - TableTypeVal = 18, //Default TableType::UserTable - PrimaryTable = 19, //Mandatory for index otherwise RNIL - PrimaryTableId = 20, //ditto - IndexState = 21, - InsertTriggerId = 22, - UpdateTriggerId = 23, - DeleteTriggerId = 24, - CustomTriggerId = 25, - FrmLen = 26, - FrmData = 27, - - TableTemporaryFlag = 28, //Default not Temporary - ForceVarPartFlag = 29, - - FragmentCount = 128, // No of fragments in table (!fragment replicas) - FragmentDataLen = 129, - FragmentData = 130, // CREATE_FRAGMENTATION reply - TablespaceId = 131, - TablespaceVersion = 132, - TablespaceDataLen = 133, - TablespaceData = 134, - RangeListDataLen = 135, - RangeListData = 136, - ReplicaDataLen = 137, - ReplicaData = 138, - MaxRowsLow = 139, - MaxRowsHigh = 140, - DefaultNoPartFlag = 141, - LinearHashFlag = 142, - MinRowsLow = 143, - MinRowsHigh = 144, - - RowGCIFlag = 150, - RowChecksumFlag = 151, - - SingleUserMode = 152, - - TableEnd = 999, - - AttributeName = 1000, // String, Mandatory - AttributeId = 1001, //Mandatory between DICT's otherwise not allowed - AttributeType = 1002, //for osu 4.1->5.0.x - AttributeSize = 1003, //Default DictTabInfo::a32Bit - AttributeArraySize = 1005, //Default 1 - AttributeKeyFlag = 1006, //Default noKey - AttributeStorageType = 1007, //Default NDB_STORAGETYPE_MEMORY - AttributeNullableFlag = 1008, //Default NotNullable - AttributeDKey = 1010, //Default NotDKey - AttributeExtType = 1013, //Default ExtUnsigned - AttributeExtPrecision = 1014, //Default 0 - AttributeExtScale = 1015, //Default 0 - AttributeExtLength = 1016, //Default 0 - AttributeAutoIncrement = 1017, //Default false - AttributeDefaultValue = 1018, //Default value (printable string), - AttributeArrayType = 1019, //Default NDB_ARRAYTYPE_FIXED - AttributeEnd = 1999 // - }; - // ---------------------------------------------------------------------- - // Part of the protocol is that we only transfer parameters which do not - // have a default value. Thus the default values are part of the protocol. - // ---------------------------------------------------------------------- - - - - // FragmentType constants - enum FragmentType { - AllNodesSmallTable = 0, - AllNodesMediumTable = 1, - AllNodesLargeTable = 2, - SingleFragment = 3, - DistrKeyHash = 4, - DistrKeyLin = 5, - UserDefined = 6, - DistrKeyUniqueHashIndex = 7, - DistrKeyOrderedIndex = 8 - }; - - // TableType constants + objects - enum TableType { - UndefTableType = 0, - SystemTable = 1, - UserTable = 2, - UniqueHashIndex = 3, - HashIndex = 4, - UniqueOrderedIndex = 5, - OrderedIndex = 6, - // constant 10 hardcoded in Dbdict.cpp - HashIndexTrigger = 11, - SubscriptionTrigger = 16, - ReadOnlyConstraint = 17, - IndexTrigger = 18, - - Tablespace = 20, ///< Tablespace - LogfileGroup = 21, ///< Logfile group - Datafile = 22, ///< Datafile - Undofile = 23 ///< Undofile - }; - - // used 1) until type BlobTable added 2) in upgrade code - static bool - isBlobTableName(const char* name, Uint32* ptab_id = 0, Uint32* pcol_no = 0); - - static inline bool - isTable(int tableType) { - return - tableType == SystemTable || - tableType == UserTable; - } - static inline bool - isIndex(int tableType) { - return - tableType == UniqueHashIndex || - tableType == HashIndex || - tableType == UniqueOrderedIndex || - tableType == OrderedIndex; - } - static inline bool - isUniqueIndex(int tableType) { - return - tableType == UniqueHashIndex || - tableType == UniqueOrderedIndex; - } - static inline bool - isNonUniqueIndex(int tableType) { - return - tableType == HashIndex || - tableType == OrderedIndex; - } - static inline bool - isHashIndex(int tableType) { - return - tableType == UniqueHashIndex || - tableType == HashIndex; - } - static inline bool - isOrderedIndex(int tableType) { - return - tableType == UniqueOrderedIndex || - tableType == OrderedIndex; - } - static inline bool - isTrigger(int tableType) { - return - tableType == HashIndexTrigger || - tableType == SubscriptionTrigger || - tableType == ReadOnlyConstraint || - tableType == IndexTrigger; - } - static inline bool - isFilegroup(int tableType) { - return - tableType == Tablespace || - tableType == LogfileGroup; - } - - static inline bool - isFile(int tableType) { - return - tableType == Datafile|| - tableType == Undofile; - } - - // Object state for translating from/to API - enum ObjectState { - StateUndefined = 0, - StateOffline = 1, - StateBuilding = 2, - StateDropping = 3, - StateOnline = 4, - StateBackup = 5, - StateBroken = 9 - }; - - // Object store for translating from/to API - enum ObjectStore { - StoreUndefined = 0, - StoreNotLogged = 1, - StorePermanent = 2 - }; - - // AttributeSize constants - STATIC_CONST( aBit = 0 ); - STATIC_CONST( an8Bit = 3 ); - STATIC_CONST( a16Bit = 4 ); - STATIC_CONST( a32Bit = 5 ); - STATIC_CONST( a64Bit = 6 ); - STATIC_CONST( a128Bit = 7 ); - - // Table data interpretation - struct Table { - char TableName[MAX_TAB_NAME_SIZE]; - Uint32 TableId; - char PrimaryTable[MAX_TAB_NAME_SIZE]; // Only used when "index" - Uint32 PrimaryTableId; - Uint32 TableLoggedFlag; - Uint32 TableTemporaryFlag; - Uint32 ForceVarPartFlag; - Uint32 NoOfKeyAttr; - Uint32 NoOfAttributes; - Uint32 NoOfNullable; - Uint32 NoOfVariable; - Uint32 TableKValue; - Uint32 MinLoadFactor; - Uint32 MaxLoadFactor; - Uint32 KeyLength; - Uint32 FragmentType; - Uint32 TableType; - Uint32 TableVersion; - Uint32 IndexState; - Uint32 InsertTriggerId; - Uint32 UpdateTriggerId; - Uint32 DeleteTriggerId; - Uint32 CustomTriggerId; - Uint32 TablespaceId; - Uint32 TablespaceVersion; - Uint32 DefaultNoPartFlag; - Uint32 LinearHashFlag; - /* - TODO RONM: - We need to replace FRM, Fragment Data, Tablespace Data and in - very particular RangeListData with dynamic arrays - */ - Uint32 FrmLen; - char FrmData[MAX_FRM_DATA_SIZE]; - Uint32 FragmentCount; - Uint32 ReplicaDataLen; - Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES]; - Uint32 FragmentDataLen; - Uint16 FragmentData[3*MAX_NDB_PARTITIONS]; - - Uint32 MaxRowsLow; - Uint32 MaxRowsHigh; - Uint32 MinRowsLow; - Uint32 MinRowsHigh; - - Uint32 TablespaceDataLen; - Uint32 TablespaceData[2*MAX_NDB_PARTITIONS]; - Uint32 RangeListDataLen; - char RangeListData[4*2*MAX_NDB_PARTITIONS*2]; - - Uint32 RowGCIFlag; - Uint32 RowChecksumFlag; - - Uint32 SingleUserMode; - - Table() {} - void init(); - }; - - static const - SimpleProperties::SP2StructMapping TableMapping[]; - - static const Uint32 TableMappingSize; - - // AttributeExtType values - enum ExtType { - ExtUndefined = NdbSqlUtil::Type::Undefined, - ExtTinyint = NdbSqlUtil::Type::Tinyint, - ExtTinyunsigned = NdbSqlUtil::Type::Tinyunsigned, - ExtSmallint = NdbSqlUtil::Type::Smallint, - ExtSmallunsigned = NdbSqlUtil::Type::Smallunsigned, - ExtMediumint = NdbSqlUtil::Type::Mediumint, - ExtMediumunsigned = NdbSqlUtil::Type::Mediumunsigned, - ExtInt = NdbSqlUtil::Type::Int, - ExtUnsigned = NdbSqlUtil::Type::Unsigned, - ExtBigint = NdbSqlUtil::Type::Bigint, - ExtBigunsigned = NdbSqlUtil::Type::Bigunsigned, - ExtFloat = NdbSqlUtil::Type::Float, - ExtDouble = NdbSqlUtil::Type::Double, - ExtOlddecimal = NdbSqlUtil::Type::Olddecimal, - ExtOlddecimalunsigned = NdbSqlUtil::Type::Olddecimalunsigned, - ExtDecimal = NdbSqlUtil::Type::Decimal, - ExtDecimalunsigned = NdbSqlUtil::Type::Decimalunsigned, - ExtChar = NdbSqlUtil::Type::Char, - ExtVarchar = NdbSqlUtil::Type::Varchar, - ExtBinary = NdbSqlUtil::Type::Binary, - ExtVarbinary = NdbSqlUtil::Type::Varbinary, - ExtDatetime = NdbSqlUtil::Type::Datetime, - ExtDate = NdbSqlUtil::Type::Date, - ExtBlob = NdbSqlUtil::Type::Blob, - ExtText = NdbSqlUtil::Type::Text, - ExtBit = NdbSqlUtil::Type::Bit, - ExtLongvarchar = NdbSqlUtil::Type::Longvarchar, - ExtLongvarbinary = NdbSqlUtil::Type::Longvarbinary, - ExtTime = NdbSqlUtil::Type::Time, - ExtYear = NdbSqlUtil::Type::Year, - ExtTimestamp = NdbSqlUtil::Type::Timestamp - }; - - // Attribute data interpretation - struct Attribute { - char AttributeName[MAX_TAB_NAME_SIZE]; - Uint32 AttributeId; - Uint32 AttributeType; // for osu 4.1->5.0.x - Uint32 AttributeSize; - Uint32 AttributeArraySize; - Uint32 AttributeArrayType; - Uint32 AttributeKeyFlag; - Uint32 AttributeNullableFlag; - Uint32 AttributeDKey; - Uint32 AttributeExtType; - Uint32 AttributeExtPrecision; - Uint32 AttributeExtScale; - Uint32 AttributeExtLength; - Uint32 AttributeAutoIncrement; - Uint32 AttributeStorageType; - char AttributeDefaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE]; - - Attribute() {} - void init(); - - inline - Uint32 sizeInWords() - { - return ((1 << AttributeSize) * AttributeArraySize + 31) >> 5; - } - - // compute old-sty|e attribute size and array size - inline bool - translateExtType() { - switch (AttributeExtType) { - case DictTabInfo::ExtUndefined: - return false; - case DictTabInfo::ExtTinyint: - case DictTabInfo::ExtTinyunsigned: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtSmallint: - case DictTabInfo::ExtSmallunsigned: - AttributeSize = DictTabInfo::a16Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtMediumint: - case DictTabInfo::ExtMediumunsigned: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 3 * AttributeExtLength; - break; - case DictTabInfo::ExtInt: - case DictTabInfo::ExtUnsigned: - AttributeSize = DictTabInfo::a32Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtBigint: - case DictTabInfo::ExtBigunsigned: - AttributeSize = DictTabInfo::a64Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtFloat: - AttributeSize = DictTabInfo::a32Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtDouble: - AttributeSize = DictTabInfo::a64Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtOlddecimal: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = - (1 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) * - AttributeExtLength; - break; - case DictTabInfo::ExtOlddecimalunsigned: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = - (0 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) * - AttributeExtLength; - break; - case DictTabInfo::ExtDecimal: - case DictTabInfo::ExtDecimalunsigned: - { - // copy from Field_new_decimal ctor - uint precision = AttributeExtPrecision; - uint scale = AttributeExtScale; - if (precision > DECIMAL_MAX_LENGTH || scale >= NOT_FIXED_DEC) - precision = DECIMAL_MAX_LENGTH; - uint bin_size = my_decimal_get_binary_size(precision, scale); - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = bin_size * AttributeExtLength; - } - break; - case DictTabInfo::ExtChar: - case DictTabInfo::ExtBinary: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtVarchar: - case DictTabInfo::ExtVarbinary: - if (AttributeExtLength > 0xff) - return false; - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = AttributeExtLength + 1; - break; - case DictTabInfo::ExtDatetime: - // to fix - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 8 * AttributeExtLength; - break; - case DictTabInfo::ExtDate: - // to fix - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 3 * AttributeExtLength; - break; - case DictTabInfo::ExtBlob: - case DictTabInfo::ExtText: - AttributeSize = DictTabInfo::an8Bit; - // head + inline part (length in precision lower half) - AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF); - break; - case DictTabInfo::ExtBit: - AttributeSize = DictTabInfo::aBit; - AttributeArraySize = AttributeExtLength; - break; - case DictTabInfo::ExtLongvarchar: - case DictTabInfo::ExtLongvarbinary: - if (AttributeExtLength > 0xffff) - return false; - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = AttributeExtLength + 2; - break; - case DictTabInfo::ExtTime: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 3 * AttributeExtLength; - break; - case DictTabInfo::ExtYear: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 1 * AttributeExtLength; - break; - case DictTabInfo::ExtTimestamp: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 4 * AttributeExtLength; - break; - default: - return false; - }; - return true; - } - - inline void print(FILE *out) { - fprintf(out, "AttributeId = %d\n", AttributeId); - fprintf(out, "AttributeType = %d\n", AttributeType); - fprintf(out, "AttributeSize = %d\n", AttributeSize); - fprintf(out, "AttributeArraySize = %d\n", AttributeArraySize); - fprintf(out, "AttributeArrayType = %d\n", AttributeArrayType); - fprintf(out, "AttributeKeyFlag = %d\n", AttributeKeyFlag); - fprintf(out, "AttributeStorageType = %d\n", AttributeStorageType); - fprintf(out, "AttributeNullableFlag = %d\n", AttributeNullableFlag); - fprintf(out, "AttributeDKey = %d\n", AttributeDKey); - fprintf(out, "AttributeGroup = %d\n", AttributeGroup); - fprintf(out, "AttributeAutoIncrement = %d\n", AttributeAutoIncrement); - fprintf(out, "AttributeExtType = %d\n", AttributeExtType); - fprintf(out, "AttributeExtPrecision = %d\n", AttributeExtPrecision); - fprintf(out, "AttributeExtScale = %d\n", AttributeExtScale); - fprintf(out, "AttributeExtLength = %d\n", AttributeExtLength); - fprintf(out, "AttributeDefaultValue = \"%s\"\n", - AttributeDefaultValue ? AttributeDefaultValue : ""); - } - }; - - static const - SimpleProperties::SP2StructMapping AttributeMapping[]; - - static const Uint32 AttributeMappingSize; - - // Signal constants - STATIC_CONST( DataLength = 20 ); - STATIC_CONST( HeaderLength = 5 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 requestType; - Uint32 totalLen; - Uint32 offset; - - /** - * Length of this data = signal->length() - HeaderLength - * Sender block ref = signal->senderBlockRef() - */ - - Uint32 tabInfoData[DataLength]; - -public: - enum Depricated - { - AttributeDGroup = 1009, //Default NotDGroup - AttributeStoredInd = 1011, //Default NotStored - TableStorageVal = 14, //Disk storage specified per attribute - SecondTableId = 17, //Mandatory between DICT's otherwise not allowed - FragmentKeyTypeVal = 16 //Default PrimaryKey - }; - - enum Unimplemented - { - ScanOptimised = 15, //Default updateOptimised - AttributeGroup = 1012, //Default 0 - FileNo = 102 - }; -}; - -#define DFGIMAP(x, y, z) \ - { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 } - -#define DFGIMAP2(x, y, z, u, v) \ - { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 } - -#define DFGIMAPS(x, y, z, u, v) \ - { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 } - -#define DFGIMAPB(x, y, z, u, v, l) \ - { DictFilegroupInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \ - my_offsetof(x, l) } - -#define DFGIBREAK(x) \ - { DictFilegroupInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 } - -struct DictFilegroupInfo { - enum KeyValues { - FilegroupName = 1, - FilegroupType = 2, - FilegroupId = 3, - FilegroupVersion = 4, - - /** - * File parameters - */ - FileName = 100, - FileType = 101, - FileId = 103, - FileFGroupId = 104, - FileFGroupVersion = 105, - FileSizeHi = 106, - FileSizeLo = 107, - FileFreeExtents = 108, - FileVersion = 109, - FileEnd = 199, // - - /** - * Tablespace parameters - */ - TS_ExtentSize = 1000, // specified in bytes - TS_LogfileGroupId = 1001, - TS_LogfileGroupVersion = 1002, - TS_GrowLimit = 1003, // In bytes - TS_GrowSizeHi = 1004, - TS_GrowSizeLo = 1005, - TS_GrowPattern = 1006, - TS_GrowMaxSize = 1007, - - /** - * Logfile group parameters - */ - LF_UndoBufferSize = 2005, // In bytes - LF_UndoGrowLimit = 2000, // In bytes - LF_UndoGrowSizeHi = 2001, - LF_UndoGrowSizeLo = 2002, - LF_UndoGrowPattern = 2003, - LF_UndoGrowMaxSize = 2004, - LF_UndoFreeWordsHi = 2006, - LF_UndoFreeWordsLo = 2007 - }; - - // FragmentType constants - enum FileTypeValues { - Datafile = 0, - Undofile = 1 - //, Redofile - }; - - struct GrowSpec { - Uint32 GrowLimit; - Uint32 GrowSizeHi; - Uint32 GrowSizeLo; - char GrowPattern[PATH_MAX]; - Uint32 GrowMaxSize; - }; - - // Table data interpretation - struct Filegroup { - char FilegroupName[MAX_TAB_NAME_SIZE]; - Uint32 FilegroupType; // ObjType - Uint32 FilegroupId; - Uint32 FilegroupVersion; - - union { - Uint32 TS_ExtentSize; - Uint32 LF_UndoBufferSize; - }; - Uint32 TS_LogfileGroupId; - Uint32 TS_LogfileGroupVersion; - union { - GrowSpec TS_DataGrow; - GrowSpec LF_UndoGrow; - }; - //GrowSpec LF_RedoGrow; - Uint32 LF_UndoFreeWordsHi; - Uint32 LF_UndoFreeWordsLo; - Filegroup() {} - void init(); - }; - static const Uint32 MappingSize; - static const SimpleProperties::SP2StructMapping Mapping[]; - - struct File { - char FileName[PATH_MAX]; - Uint32 FileType; - Uint32 FileId; - Uint32 FileVersion; - Uint32 FilegroupId; - Uint32 FilegroupVersion; - Uint32 FileSizeHi; - Uint32 FileSizeLo; - Uint32 FileFreeExtents; - - File() {} - void init(); - }; - static const Uint32 FileMappingSize; - static const SimpleProperties::SP2StructMapping FileMapping[]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DihAddFrag.hpp b/storage/ndb/include/kernel/signaldata/DihAddFrag.hpp deleted file mode 100644 index 5757d9b4d78..00000000000 --- a/storage/ndb/include/kernel/signaldata/DihAddFrag.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIHADDFRAG_HPP -#define DIHADDFRAG_HPP - -#include -#include - -/** - * - */ -class DihAddFragConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 senderNodeId; - Uint32 tableId; -}; -/** - * - */ -class DihAddFragReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 10 + MAX_REPLICAS ); -private: - Uint32 masterRef; - Uint32 tableId; - Uint32 fragId; - Uint32 kValue; - Uint32 method; - Uint32 mask; - Uint32 hashPointer; - Uint32 noOfFragments; - Uint32 noOfBackups; - Uint32 storedTable; - Uint32 nodes[MAX_REPLICAS]; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/DihContinueB.hpp b/storage/ndb/include/kernel/signaldata/DihContinueB.hpp deleted file mode 100644 index 32175d7d219..00000000000 --- a/storage/ndb/include/kernel/signaldata/DihContinueB.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIH_CONTINUEB_H -#define DIH_CONTINUEB_H - -#include "SignalData.hpp" - -class DihContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Dbdih; - friend bool printCONTINUEB_DBDIH(FILE * output, const Uint32 * theData, - Uint32 len, Uint16); -private: - enum Type { - ZPACK_TABLE_INTO_PAGES = 1, - ZPACK_FRAG_INTO_PAGES = 2, - ZREAD_PAGES_INTO_TABLE = 3, - ZREAD_PAGES_INTO_FRAG = 4, - //ZREAD_TAB_DESCRIPTION = 5, - ZCOPY_TABLE = 6, - ZCOPY_TABLE_NODE = 7, - ZSTART_FRAGMENT = 8, - ZCOMPLETE_RESTART = 9, - ZREAD_TABLE_FROM_PAGES = 10, - ZSR_PHASE2_READ_TABLE = 11, - ZCHECK_TC_COUNTER = 12, - ZCALCULATE_KEEP_GCI = 13, - ZSTORE_NEW_LCP_ID = 14, - ZTABLE_UPDATE = 15, - ZCHECK_LCP_COMPLETED = 16, - ZINIT_LCP = 17, - ZADD_TABLE_MASTER_PAGES = 19, - ZDIH_ADD_TABLE_MASTER = 20, - ZADD_TABLE_SLAVE_PAGES = 21, - ZDIH_ADD_TABLE_SLAVE = 22, - ZSTART_GCP = 23, - ZCOPY_GCI = 24, - ZEMPTY_VERIFY_QUEUE = 25, - ZCHECK_GCP_STOP = 26, - ZREMOVE_NODE_FROM_TABLE = 27, - ZCOPY_NODE = 28, - ZSTART_TAKE_OVER = 29, - ZCHECK_START_TAKE_OVER = 30, - ZTO_START_COPY_FRAG = 31, - ZINITIALISE_RECORDS = 33, - ZINVALIDATE_NODE_LCP = 34, - ZSTART_PERMREQ_AGAIN = 35, - SwitchReplica = 36, - ZSEND_START_TO = 37, - ZSEND_ADD_FRAG = 38, - ZSEND_CREATE_FRAG = 39, - ZSEND_UPDATE_TO = 40, - ZSEND_END_TO = 41, - - WAIT_DROP_TAB_WRITING_TO_FILE = 42, - CHECK_WAIT_DROP_TAB_FAILED_LQH = 43, - ZTO_START_FRAGMENTS = 44 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DihFragCount.hpp b/storage/ndb/include/kernel/signaldata/DihFragCount.hpp deleted file mode 100644 index 49d518e9b67..00000000000 --- a/storage/ndb/include/kernel/signaldata/DihFragCount.hpp +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (C) 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIH_FRAG_COUNT_HPP -#define DIH_FRAG_COUNT_HPP - -#include "SignalData.hpp" - -/** - * DihFragCountReq - */ -class DihFragCountReq { - -public: - STATIC_CONST( SignalLength = 3 ); - STATIC_CONST( RetryInterval = 5 ); - Uint32 m_connectionData; - Uint32 m_tableRef; - Uint32 m_senderData; -}; - -/** - * DihFragCountConf - */ -class DihFragCountConf { - -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 m_connectionData; - Uint32 m_tableRef; - Uint32 m_senderData; - Uint32 m_fragmentCount; - Uint32 m_noOfBackups; -}; - -/** - * DihFragCountRef - */ -class DihFragCountRef { - -public: - enum ErrorCode { - ErroneousState = 0, - ErroneousTableState = 1 - }; - STATIC_CONST( SignalLength = 5 ); - Uint32 m_connectionData; - Uint32 m_tableRef; - Uint32 m_senderData; - Uint32 m_error; - Uint32 m_tableStatus; // Dbdih::TabRecord::tabStatus -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp deleted file mode 100644 index f99581d3b78..00000000000 --- a/storage/ndb/include/kernel/signaldata/DihSizeAltReq.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIH_SIZE_ALT_REQ_H -#define DIH_SIZE_ALT_REQ_H - -#include "SignalData.hpp" - -class DihSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Reciver(s) - */ - friend class Dbdih; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0 ); - STATIC_CONST( IND_API_CONNECT = 1 ); - STATIC_CONST( IND_CONNECT = 2 ); - STATIC_CONST( IND_FRAG_CONNECT = 3 ); - STATIC_CONST( IND_MORE_NODES = 4 ); - STATIC_CONST( IND_REPLICAS = 5 ); - STATIC_CONST( IND_TABLE = 6 ); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[7]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DihStartTab.hpp b/storage/ndb/include/kernel/signaldata/DihStartTab.hpp deleted file mode 100644 index 5244e63817b..00000000000 --- a/storage/ndb/include/kernel/signaldata/DihStartTab.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIH_STARTTAB__HPP -#define DIH_STARTTAB__HPP - -#include "SignalData.hpp" - -class DihStartTabReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbdih; -public: - STATIC_CONST( HeaderLength = 3 ); - -private: - - Uint32 senderRef; - Uint32 senderData; - Uint32 noOfTables; - - struct { - Uint32 tableId; - Uint32 schemaVersion; - } tables[10]; -}; - -class DihStartTabConf { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderRef; - Uint32 senderData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp b/storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp deleted file mode 100644 index f9d98ccdf1a..00000000000 --- a/storage/ndb/include/kernel/signaldata/DihSwitchReplica.hpp +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DIH_SWITCH_REPLICA_HPP -#define DIH_SWITCH_REPLICA_HPP - -/** - * This signal is sent from master DIH to all DIH's - * switches primary / backup nodes for replica(s) - * - */ -class DihSwitchReplicaReq { - /** - * Sender/Reciver - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 4 + MAX_REPLICAS ); - -private: - /** - * Request Info - * - */ - Uint32 senderRef; - Uint32 tableId; - Uint32 fragNo; - Uint32 noOfReplicas; - Uint32 newNodeOrder[MAX_REPLICAS]; -}; - -class DihSwitchReplicaRef { - /** - * Sender/Reciver - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderNode; - Uint32 errorCode; // See StopPermRef::ErrorCode -}; - -class DihSwitchReplicaConf { - /** - * Sender/Reciver - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 senderNode; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp b/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp deleted file mode 100644 index 79553ba8046..00000000000 --- a/storage/ndb/include/kernel/signaldata/DisconnectRep.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DISCONNECT_REP_HPP -#define DISCONNECT_REP_HPP - -#include "SignalData.hpp" - -/** - * - */ -class DisconnectRep { - /** - * Receiver(s) - */ - friend class Qmgr; - friend class Cmvmi; // Cmvmi - - /** - * Senders - */ - friend class Dbtc; - friend void reportDisconnect(void * , NodeId, Uint32); // TransporterCallback - - /** - * For printing - */ - friend bool printDISCONNECT_REP(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - - enum ErrCode { - // ErrorCodes come from different sources - // for example TransporterCallback.hpp - // or inet errno - // This one is selected not to conflict with any of them - TcReportNodeFailed = 0xFF000001 - }; - -private: - - Uint32 nodeId; - Uint32 err; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp b/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp deleted file mode 100644 index 43e1903dfd3..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropFilegroup.hpp +++ /dev/null @@ -1,196 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_FILEGROUP_HPP -#define DROP_FILEGROUP_HPP - -#include "SignalData.hpp" - -struct DropFilegroupReq { - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - friend class Dbdict; - friend class Tsman; - - /** - * For printing - */ - friend bool printDROP_FILEGROUP_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - STATIC_CONST( GSN = GSN_DROP_FILEGROUP_REQ ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 filegroup_id; - Uint32 filegroup_version; -}; - -struct DropFilegroupRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printDROP_FILEGROUP_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 7 ); - STATIC_CONST( GSN = GSN_DROP_FILEGROUP_REF ); - - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - NoSuchFilegroup = 767, - FilegroupInUse = 768, - InvalidSchemaObjectVersion = 774, - SingleUser = 299 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 masterNodeId; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - -}; - -struct DropFilegroupConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printDROP_FILEGROUP_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - STATIC_CONST( GSN = GSN_DROP_FILEGROUP_CONF ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 filegroupId; - Uint32 filegroupVersion; -}; - -struct DropFileReq { - /** - * Sender(s) / Reciver(s) - */ - friend class NdbDictInterface; - friend class Dbdict; - friend class Tsman; - - /** - * For printing - */ - friend bool printDROP_FILE_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - STATIC_CONST( GSN = GSN_DROP_FILE_REQ ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 file_id; - Uint32 file_version; -}; - -struct DropFileRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printDROP_FILE_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 7 ); - STATIC_CONST( GSN = GSN_DROP_FILE_REF ); - - enum ErrorCode { - NoError = 0, - Busy = 701, - NotMaster = 702, - NoSuchFile = 766, - DropUndoFileNotSupported = 769, - InvalidSchemaObjectVersion = 774, - SingleUser = 299 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 masterNodeId; - Uint32 errorCode; - Uint32 errorLine; - Uint32 errorKey; - -}; - -struct DropFileConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - friend class NdbDictInterface; - - /** - * For printing - */ - friend bool printDROP_FILE_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 4 ); - STATIC_CONST( GSN = GSN_DROP_FILE_CONF ); - - Uint32 senderData; - Uint32 senderRef; - Uint32 fileId; - Uint32 fileVersion; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropFilegroupImpl.hpp b/storage/ndb/include/kernel/signaldata/DropFilegroupImpl.hpp deleted file mode 100644 index ebedfd0ad09..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropFilegroupImpl.hpp +++ /dev/null @@ -1,171 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_FILEGROUP_IMPL_HPP -#define DROP_FILEGROUP_IMPL_HPP - -#include "SignalData.hpp" - -struct DropFilegroupImplReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printDROP_FILEGROUP_IMPL_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 5 ); - - enum RequestInfo { - Prepare = 0x1, - Commit = 0x2, - Abort = 0x4 - }; - - Uint32 senderData; - Uint32 senderRef; - - Uint32 requestInfo; - Uint32 filegroup_id; - Uint32 filegroup_version; -}; - -struct DropFilegroupImplRef { - /** - * Sender(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printDROP_FILEGROUP_IMPL_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - NoError = 0, - NoSuchFilegroup = 767, - InvalidFilegroupVersion = 767, - FilegroupInUse = 768 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; -}; - -struct DropFilegroupImplConf { - /** - * Sender(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printDROP_FILEGROUP_IMPL_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderData; - Uint32 senderRef; -}; - -struct DropFileImplReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printDROP_FILE_IMPL_REQ(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 6 ); - - enum RequestInfo { - Prepare = 0x1, - Commit = 0x2, - Abort = 0x4 - }; - - Uint32 senderData; - Uint32 senderRef; - - Uint32 requestInfo; - Uint32 file_id; - Uint32 filegroup_id; - Uint32 filegroup_version; -}; - -struct DropFileImplRef { - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - /** - * For printing - */ - friend bool printDROP_FILE_IMPL_REF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 5 ); - - enum ErrorCode { - NoError = 0, - InvalidFilegroup = 767, - InvalidFilegroupVersion = 767, - NoSuchFile = 766, - FileInUse = 770 - }; - - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; - Uint32 fsErrCode; - Uint32 osErrCode; -}; - -struct DropFileImplConf { - friend class Dbdict; - friend class Tsman; - friend class Lgman; - - - /** - * For printing - */ - friend bool printDROP_FILE_IMPL_CONF(FILE*, const Uint32*, Uint32, Uint16); - - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderData; - Uint32 senderRef; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropIndx.hpp b/storage/ndb/include/kernel/signaldata/DropIndx.hpp deleted file mode 100644 index ec45cad13ea..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropIndx.hpp +++ /dev/null @@ -1,257 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_INDX_HPP -#define DROP_INDX_HPP - -#include "SignalData.hpp" -#include - -/** - * DropIndxReq. - */ -class DropIndxReq { - friend bool printDROP_INDX_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4, - RT_TC = 5 << 8 - }; - STATIC_CONST( SignalLength = 6 ); - -private: - Uint32 m_connectionPtr; - Uint32 m_userRef; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; - Uint32 m_indexVersion; - // extra - Uint32 m_opKey; - -public: - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - DropIndxReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (DropIndxReq::RequestType)val; - } - void setRequestType(DropIndxReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getIndexVersion() const { - return m_indexVersion; - } - void setIndexVersion(Uint32 val) { - m_indexVersion = val; - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } -}; - -/** - * DropIndxConf. - */ -class DropIndxConf { - friend bool printDROP_INDX_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 6 ); - -private: - Uint32 m_connectionPtr; - Uint32 m_userRef; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; - Uint32 m_indexVersion; - -public: - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - DropIndxReq::RequestType getRequestType() const { - return (DropIndxReq::RequestType)m_requestInfo; - } - void setRequestType(DropIndxReq::RequestType val) { - m_requestInfo = val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getIndexVersion() const { - return m_indexVersion; - } - void setIndexVersion(Uint32 val) { - m_indexVersion = val; - } -}; - -/** - * DropIndxRef. - */ -struct DropIndxRef { - friend bool printDROP_INDX_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum ErrorCode { - NoError = 0, - InvalidIndexVersion = 241, - Busy = 701, - BusyWithNR = 711, - NotMaster = 702, - IndexNotFound = 4243, - BadRequestType = 4247, - InvalidName = 4248, - NotAnIndex = 4254, - SingleUser = 299 - }; - STATIC_CONST( SignalLength = DropIndxConf::SignalLength + 3 ); - - DropIndxConf m_conf; - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_indexId; - //Uint32 m_indexVersion; - Uint32 m_errorCode; - Uint32 m_errorLine; - union { - Uint32 m_errorNode; - Uint32 masterNodeId; - }; -public: - DropIndxConf* getConf() { - return &m_conf; - } - const DropIndxConf* getConf() const { - return &m_conf; - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - DropIndxReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(DropIndxReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - Uint32 getIndexId() const { - return m_conf.getIndexId(); - } - void setIndexId(Uint32 val) { - m_conf.setIndexId(val); - } - Uint32 getIndexVersion() const { - return m_conf.getIndexVersion(); - } - void setIndexVersion(Uint32 val) { - m_conf.setIndexVersion(val); - } - DropIndxRef::ErrorCode getErrorCode() const { - return (DropIndxRef::ErrorCode)m_errorCode; - } - void setErrorCode(DropIndxRef::ErrorCode val) { - m_errorCode = (Uint32)val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropObj.hpp b/storage/ndb/include/kernel/signaldata/DropObj.hpp deleted file mode 100644 index f16bd2ad0d2..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropObj.hpp +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_OBJ_HPP -#define DROP_OBJ_HPP - -#include "DictObjOp.hpp" -#include "SignalData.hpp" - -struct DropObjReq -{ - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dbdih; - - friend bool printDROP_OBJ_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 9 ); - - Uint32 op_key; - Uint32 objId; - Uint32 objType; - Uint32 objVersion; - - Uint32 senderRef; - Uint32 senderData; - - Uint32 requestInfo; - - Uint32 clientRef; - Uint32 clientData; -}; - -class DropObjConf { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printDROP_OBJ_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 objId; -}; - -class DropObjRef { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printDROP_OBJ_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - NoSuchObj = 1, - DropWoPrep = 2, // Calling Drop with first calling PrepDrop - PrepDropInProgress = 3, - DropInProgress = 4, - NF_FakeErrorREF = 5 - }; - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 objId; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropTab.hpp b/storage/ndb/include/kernel/signaldata/DropTab.hpp deleted file mode 100644 index 3f5afa2194d..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropTab.hpp +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_TAB_HPP -#define DROP_TAB_HPP - -#include "SignalData.hpp" - -class DropTabReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dbdih; - - friend bool printDROP_TAB_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum RequestType { - OnlineDropTab = 0, - CreateTabDrop = 1, - RestartDropTab = 2 - }; -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 requestType; -}; - -class DropTabConf { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dbdih; - friend class Suma; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printDROP_TAB_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; -}; - -class DropTabRef { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbacc; - friend class Dbtup; - friend class Dbtux; - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printDROP_TAB_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - NoSuchTable = 1, - DropWoPrep = 2, // Calling Drop with first calling PrepDrop - PrepDropInProgress = 3, - DropInProgress = 4, - NF_FakeErrorREF = 5 - }; - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropTabFile.hpp b/storage/ndb/include/kernel/signaldata/DropTabFile.hpp deleted file mode 100644 index caf79e62102..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropTabFile.hpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_TABFILE_HPP -#define DROP_TABFILE_HPP - -#include "SignalData.hpp" - -class DropTabFileReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbdih; - friend class Dbacc; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 primaryTableId; - Uint32 secondaryTableId; -}; -class DropTabFileConf { - /** - * Receiver(s) - */ - friend class Dbdict; - - /** - * Sender(s) - */ - friend class Dbdih; - friend class Dbacc; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 userPtr; - Uint32 senderRef; - Uint32 nodeId; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropTable.hpp b/storage/ndb/include/kernel/signaldata/DropTable.hpp deleted file mode 100644 index 1e0a5f175f2..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropTable.hpp +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_TABLE_HPP -#define DROP_TABLE_HPP - -#include "SignalData.hpp" - -class DropTableReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( SignalLength = 4 ); -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 tableVersion; -}; - -class DropTableRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( SignalLength = 6 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 tableVersion; - Uint32 errorCode; - Uint32 masterNodeId; - - enum ErrorCode { - Busy = 701, - BusyWithNR = 711, - NotMaster = 702, - NoSuchTable = 709, - InvalidTableVersion = 241, - DropInProgress = 283, - NoDropTableRecordAvailable = 1229, - BackupInProgress = 761, - SingleUser = 299 - }; -}; - -class DropTableConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 tableVersion; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DropTrig.hpp b/storage/ndb/include/kernel/signaldata/DropTrig.hpp deleted file mode 100644 index 7bc8f2444f4..00000000000 --- a/storage/ndb/include/kernel/signaldata/DropTrig.hpp +++ /dev/null @@ -1,300 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DROP_TRIG_HPP -#define DROP_TRIG_HPP - -#include "SignalData.hpp" -#include -#include - -/** - * DropTrigReq. - */ -class DropTrigReq { - friend bool printDROP_TRIG_REQ(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum RequestType { - RT_UNDEFINED = 0, - RT_USER = 1, - RT_ALTER_INDEX = 2, - RT_BUILD_INDEX = 3, - RT_DICT_PREPARE = 1 << 4, - RT_DICT_COMMIT = 0xC << 4, - RT_DICT_ABORT = 0xF << 4, - RT_TC = 5 << 8, - RT_LQH = 6 << 8 - }; - STATIC_CONST( SignalLength = 7 ); - SECTION( TRIGGER_NAME_SECTION = 0 ); // optional - enum KeyValues { - TriggerNameKey = 0xa1 - }; - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; // set by DICT if index trigger - Uint32 m_triggerId; // set by DICT based on name - Uint32 m_triggerInfo; // only for TUP - // extra - Uint32 m_opKey; - -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - DropTrigReq::RequestType getRequestType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_requestInfo, 0, 16); - return (DropTrigReq::RequestType)val; - } - void setRequestType(DropTrigReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getRequestFlag() const { - return BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - }; - void addRequestFlag(Uint32 val) { - val |= BitmaskImpl::getField(1, &m_requestInfo, 16, 16); - BitmaskImpl::setField(1, &m_requestInfo, 16, 16, val); - }; - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getTriggerId() const { - return m_triggerId; - } - void setTriggerId(Uint32 val) { - m_triggerId = val; - } - Uint32 getTriggerInfo() const { - return m_triggerInfo; - } - void setTriggerInfo(Uint32 val) { - m_triggerInfo = val; - } - TriggerType::Value getTriggerType() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 0, 8); - return (TriggerType::Value)val; - } - void setTriggerType(TriggerType::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 0, 8, (Uint32)val); - } - TriggerActionTime::Value getTriggerActionTime() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 8, 8); - return (TriggerActionTime::Value)val; - } - void setTriggerActionTime(TriggerActionTime::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 8, 8, (Uint32)val); - } - TriggerEvent::Value getTriggerEvent() const { - const Uint32 val = BitmaskImpl::getField(1, &m_triggerInfo, 16, 8); - return (TriggerEvent::Value)val; - } - void setTriggerEvent(TriggerEvent::Value val) { - BitmaskImpl::setField(1, &m_triggerInfo, 16, 8, (Uint32)val); - } - bool getMonitorReplicas() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 24, 1); - } - void setMonitorReplicas(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 24, 1, val); - } - bool getMonitorAllAttributes() const { - return BitmaskImpl::getField(1, &m_triggerInfo, 25, 1); - } - void setMonitorAllAttributes(bool val) { - BitmaskImpl::setField(1, &m_triggerInfo, 25, 1, val); - } - Uint32 getOpKey() const { - return m_opKey; - } - void setOpKey(Uint32 val) { - m_opKey = val; - } -}; - -/** - * DropTrigConf. - */ -class DropTrigConf { - friend bool printDROP_TRIG_CONF(FILE*, const Uint32*, Uint32, Uint16); - -public: - STATIC_CONST( InternalLength = 3 ); - STATIC_CONST( SignalLength = 6 ); - -private: - Uint32 m_userRef; - Uint32 m_connectionPtr; - Uint32 m_requestInfo; - Uint32 m_tableId; - Uint32 m_indexId; - Uint32 m_triggerId; - - // Public methods -public: - Uint32 getUserRef() const { - return m_userRef; - } - void setUserRef(Uint32 val) { - m_userRef = val; - } - Uint32 getConnectionPtr() const { - return m_connectionPtr; - } - void setConnectionPtr(Uint32 val) { - m_connectionPtr = val; - } - DropTrigReq::RequestType getRequestType() const { - return (DropTrigReq::RequestType)m_requestInfo; - } - void setRequestType(DropTrigReq::RequestType val) { - m_requestInfo = (Uint32)val; - } - Uint32 getTableId() const { - return m_tableId; - } - void setTableId(Uint32 val) { - m_tableId = val; - } - Uint32 getIndexId() const { - return m_indexId; - } - void setIndexId(Uint32 val) { - m_indexId = val; - } - Uint32 getTriggerId() const { - return m_triggerId; - } - void setTriggerId(Uint32 val) { - m_triggerId = val; - } -}; - -/** - * DropTrigRef. - */ -class DropTrigRef { - friend bool printDROP_TRIG_REF(FILE*, const Uint32*, Uint32, Uint16); - -public: - enum ErrorCode { - NoError = 0, - Busy = 701, - TriggerNotFound = 4238, - BadRequestType = 4247, - InvalidName = 4248 - }; - STATIC_CONST( SignalLength = DropTrigConf::SignalLength + 3 ); - -private: - DropTrigConf m_conf; - //Uint32 m_userRef; - //Uint32 m_connectionPtr; - //Uint32 m_requestInfo; - //Uint32 m_tableId; - //Uint32 m_indexId; - //Uint32 m_triggerId; - Uint32 m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - -public: - DropTrigConf* getConf() { - return &m_conf; - } - const DropTrigConf* getConf() const { - return &m_conf; - } - Uint32 getUserRef() const { - return m_conf.getUserRef(); - } - void setUserRef(Uint32 val) { - m_conf.setUserRef(val); - } - Uint32 getConnectionPtr() const { - return m_conf.getConnectionPtr(); - } - void setConnectionPtr(Uint32 val) { - m_conf.setConnectionPtr(val); - } - DropTrigReq::RequestType getRequestType() const { - return m_conf.getRequestType(); - } - void setRequestType(DropTrigReq::RequestType val) { - m_conf.setRequestType(val); - } - Uint32 getTableId() const { - return m_conf.getTableId(); - } - void setTableId(Uint32 val) { - m_conf.setTableId(val); - } - Uint32 getIndexId() const { - return m_conf.getIndexId(); - } - void setIndexId(Uint32 val) { - m_conf.setIndexId(val); - } - Uint32 getTriggerId() const { - return m_conf.getTriggerId(); - } - void setTriggerId(Uint32 val) { - m_conf.setTriggerId(val); - } - DropTrigRef::ErrorCode getErrorCode() const { - return (DropTrigRef::ErrorCode)m_errorCode; - } - void setErrorCode(DropTrigRef::ErrorCode val) { - m_errorCode = (Uint32)val; - } - Uint32 getErrorLine() const { - return m_errorLine; - } - void setErrorLine(Uint32 val) { - m_errorLine = val; - } - Uint32 getErrorNode() const { - return m_errorNode; - } - void setErrorNode(Uint32 val) { - m_errorNode = val; - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp deleted file mode 100644 index cf4a01aa4e1..00000000000 --- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp +++ /dev/null @@ -1,157 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DUMP_STATE_ORD_HPP -#define DUMP_STATE_ORD_HPP - -#include "SignalData.hpp" - -/** - * DumpStateOrd is sent by the mgmtsrvr to CMVMI. - * CMVMI the redirect the signal to all blocks. - * - * The implementation of the DumpStateOrd should dump state information - * (typically using the infoEvent-function) - */ -class DumpStateOrd { - /** - * Sender/Reciver - */ - friend class Cmvmi; - - /** - * Sender(s) - */ - friend class MgmtSrvr; - - /** - * Reciver(s) - */ - friend class Dbacc; - friend class Dblqh; - friend class Dbtup; - friend class Dbtc; - friend class Ndbcntr; - friend class Qmgr; - friend class Dbdih; - friend class Dbdict; - friend class Ndbfs; - -public: - enum DumpStateType { - // 1 QMGR Dump information about phase 1 variables - // 13 CMVMI Dump signal counter - // 13 NDBCNTR Dump start phase information - // 13 NDBCNTR_REF Dump start phase information - CommitAckMarkersSize = 14, // TC+LQH Dump free size in commitAckMarkerP - CommitAckMarkersDump = 15, // TC+LQH Dump info in commitAckMarkerPool - DihDumpNodeRestartInfo = 16, // 16 DIH Dump node restart info - DihDumpNodeStatusInfo = 17,// 17 DIH Dump node status info - DihPrintFragmentation = 18,// 18 DIH Print fragmentation - // 19 NDBFS Fipple with O_SYNC, O_CREATE etc. - // 20-24 BACKUP - NdbcntrTestStopOnError = 25, - NdbcntrStopNodes = 70, - // 100-105 TUP and ACC - // 200-240 UTIL - // 300-305 TRIX - QmgrErr935 = 935, - NdbfsDumpFileStat = 400, - NdbfsDumpAllFiles = 401, - NdbfsDumpOpenFiles = 402, - NdbfsDumpIdleFiles = 403, - // 1222-1225 DICT - LqhDumpAllDefinedTabs = 1332, - LqhDumpNoLogPages = 1333, - LqhDumpOneScanRec = 2300, - LqhDumpAllScanRec = 2301, - LqhDumpAllActiveScanRec = 2302, - LqhDumpLcpState = 2303, - LqhErrorInsert5042 = 2315, - - AccDumpOneScanRec = 2400, - AccDumpAllScanRec = 2401, - AccDumpAllActiveScanRec = 2402, - AccDumpOneOperationRec = 2403, - AccDumpNumOpRecs = 2404, - AccDumpFreeOpRecs = 2405, - AccDumpNotFreeOpRecs = 2406, - DumpPageMemory = 1000, // Acc & TUP - TcDumpAllScanFragRec = 2500, - TcDumpOneScanFragRec = 2501, - TcDumpAllScanRec = 2502, - TcDumpAllActiveScanRec = 2503, - TcDumpOneScanRec = 2504, - TcDumpOneApiConnectRec = 2505, - TcDumpAllApiConnectRec = 2506, - TcSetTransactionTimeout = 2507, - TcSetApplTransactionTimeout = 2508, - StartTcTimer = 2509, - StopTcTimer = 2510, - StartPeriodicTcTimer = 2511, - TcStartDumpIndexOpCount = 2512, - TcDumpIndexOpCount = 2513, - CmvmiDumpConnections = 2600, - CmvmiDumpLongSignalMemory = 2601, - CmvmiSetRestartOnErrorInsert = 2602, - CmvmiTestLongSigWithDelay = 2603, - CmvmiDumpSubscriptions = 2604, /* note: done to respective outfile - to be able to debug if events - for some reason does not end up - in clusterlog */ - LCPContinue = 5900, - // 7000 DIH - // 7001 DIH - // 7002 DIH - // 7003 DIH - // 7004 DIH - // 7005 DIH - // 7006 DIH - // 7006 DIH - // 7007 DIH - // 7008 DIH - // 7009 DIH - // 7010 DIH - // 7011 DIH - // 7012 DIH - DihDumpLCPState= 7013, - DihDumpLCPMasterTakeOver = 7014, - // 7015 DIH - DihAllAllowNodeStart = 7016, - DihMinTimeBetweenLCP = 7017, - DihMaxTimeBetweenLCP = 7018, - // 7019 - // 7020 - // 7021 - EnableUndoDelayDataWrite = 7080, // DIH+ACC+TUP - DihSetTimeBetweenGcp = 7090, - DihStartLcpImmediately = 7099, - // 8000 Suma - // 12000 Tux - TuxLogToFile = 12001, - TuxSetLogFlags = 12002, - TuxMetaDataJunk = 12009, - - DumpTsman = 9800, - DumpLgman = 10000, - DumpPgman = 11000 - }; -public: - - Uint32 args[25]; // Generic argument -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/EmptyLcp.hpp b/storage/ndb/include/kernel/signaldata/EmptyLcp.hpp deleted file mode 100644 index 68f5e2b7bd4..00000000000 --- a/storage/ndb/include/kernel/signaldata/EmptyLcp.hpp +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef EMPTY_LCPREQ_HPP -#define EMPTY_LCPREQ_HPP - -/** - * This signals is sent by Dbdih-Master to Dblqh - * as part of master take over after node crash - */ -class EmptyLcpReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Sender(s) / Receiver(s) - */ - - /** - * Receiver(s) - */ - friend class Dblqh; - -public: - STATIC_CONST( SignalLength = 1 ); -private: - - Uint32 senderRef; -}; - -/** - * This signals is sent by Dblqh to Dbdih - * as part of master take over after node crash - */ -class EmptyLcpConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Sender(s) / Receiver(s) - */ - - /** - * Receiver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 6 ); -private: - - Uint32 senderNodeId; - Uint32 tableId; - Uint32 fragmentId; - Uint32 lcpNo; - Uint32 lcpId; - Uint32 idle; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/EndTo.hpp b/storage/ndb/include/kernel/signaldata/EndTo.hpp deleted file mode 100644 index ec04dbaf1f6..00000000000 --- a/storage/ndb/include/kernel/signaldata/EndTo.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef END_TO_HPP -#define END_TO_HPP - -class EndToReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 4 ); -private: - Uint32 userPtr; - BlockReference userRef; - Uint32 startingNodeId; - Uint32 nodeTakenOver; -}; - -class EndToConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 3 ); -private: - - Uint32 userPtr; - Uint32 sendingNodeId; - Uint32 startingNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/EventReport.hpp b/storage/ndb/include/kernel/signaldata/EventReport.hpp deleted file mode 100644 index 698d75433b5..00000000000 --- a/storage/ndb/include/kernel/signaldata/EventReport.hpp +++ /dev/null @@ -1,102 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SD_EVENT_REPORT_H -#define SD_EVENT_REPORT_H - -#include -#include "SignalData.hpp" - -/** - * Send by different block to report that a event has taken place - * - * SENDER: *Block* - * RECIVER: SimBlockCMCtrBlck - */ -class EventReport { - friend class SimulatedBlock; - friend class Cmvmi; - friend class SimblockMissra; - friend class Dbacc; - friend class Dblqh; - friend class Dbtup; - friend class Dbtc; - friend class Ndbcntr; - friend class Qmgr; - friend class Dbdih; - friend class Dbdict; - friend class MgmtSrvr; - friend class Grep; -public: - /* - EventType defines what event reports to send. - - The ORDER is NOT important anymore. //ejonore 2003-07-24 15:03 - - HOW TO ADD A NEW EVENT - -------------------- - 1) Add SentHeartbeat EventType in the category where it belongs. - ... - // INFO - SentHeartbeat, - InfoEvent - ... - - 2) remeber to update # of events below. Just to keep count... - Number of event types = 53 - - 3) Add a new SentHeartBeat entry to EventLogger::matrix[]. - ... - // INFO - { EventReport::SentHeartbeat, LogLevel::llInfo, 11, INFO }, - { EventReport::InfoEvent, LogLevel::llInfo, 2, INFO } - ... - - 4) Add SentHeartbeat in EventLogger::getText() - - */ - void setNodeId(Uint32 nodeId); - Uint32 getNodeId() const; - void setEventType(Ndb_logevent_type type); - Ndb_logevent_type getEventType() const; - UintR eventType; // DATA 0 -}; - -inline -void -EventReport::setNodeId(Uint32 nodeId){ - eventType = (nodeId << 16) | (eventType & 0xFFFF); -} - -inline -Uint32 -EventReport::getNodeId() const { - return eventType >> 16; -} - -inline -void -EventReport::setEventType(Ndb_logevent_type type){ - eventType = (eventType & 0xFFFF0000) | (((UintR) type) & 0xFFFF); -} - -inline -Ndb_logevent_type -EventReport::getEventType() const { - return (Ndb_logevent_type)(eventType & 0xFFFF); -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp b/storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp deleted file mode 100644 index ad5d109554c..00000000000 --- a/storage/ndb/include/kernel/signaldata/EventSubscribeReq.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SD_EVENT_SUB_REQ_H -#define SD_EVENT_SUB_REQ_H - -#include "SignalData.hpp" - -/** - * Requests change (new, update, delete) of event subscription, - * i.e. forwarding of events. - * - * SENDER: Mgm server - * RECIVER: SimBlockCMCtrBlck - */ - -struct EventSubscribeReq { - /** - * Receiver(s) - */ - friend class Cmvmi; - - /** - * Sender(s) - */ - friend class MgmtSrvr; - - STATIC_CONST( SignalLength = 2 + LogLevel::LOGLEVEL_CATEGORIES ); - - /** - * Note: If you use the same blockRef as you have used earlier, - * you update your ongoing subscription - */ - Uint32 blockRef; - - /** - * If you specify 0 entries, it's the same as cancelling an - * subscription - */ - Uint32 noOfEntries; - - Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES]; - - EventSubscribeReq& operator= (const LogLevel& ll){ - noOfEntries = LogLevel::LOGLEVEL_CATEGORIES; - for(size_t i = 0; i - -/** - * - */ -class FailRep { - /** - * Sender(s) & Reciver(s) - */ - friend class Qmgr; - friend class Ndbcntr; - - /** - * For printing - */ - friend bool printFAIL_REP(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 2 ); - STATIC_CONST( ExtraLength = 1 + NdbNodeBitmask::Size ); - - enum FailCause { - ZOWN_FAILURE=0, - ZOTHER_NODE_WHEN_WE_START=1, - ZIN_PREP_FAIL_REQ=2, - ZSTART_IN_REGREQ=3, - ZHEARTBEAT_FAILURE=4, - ZLINK_FAILURE=5, - ZOTHERNODE_FAILED_DURING_START=6, - ZMULTI_NODE_SHUTDOWN = 7, - ZPARTITIONED_CLUSTER = 8 - }; - -private: - - Uint32 failNodeId; - Uint32 failCause; - /** - * Used when failCause == ZPARTITIONED_CLUSTER - */ - Uint32 president; - Uint32 partition[NdbNodeBitmask::Size]; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp deleted file mode 100644 index 2e2f52cb956..00000000000 --- a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp +++ /dev/null @@ -1,216 +0,0 @@ -/* Copyright (c) 2003, 2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FIRE_TRIG_ORD_HPP -#define FIRE_TRIG_ORD_HPP - -#include "SignalData.hpp" -#include -#include -#include - -/** - * FireTrigOrd - * - * This signal is sent by TUP to signal - * that a trigger has fired - */ -class FireTrigOrd { - /** - * Sender(s) - */ - // API - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbtup; - - /** - * Reciver(s) - */ - friend class Dbtc; - friend class Backup; - friend class SumaParticipant; - - /** - * For printing - */ - friend bool printFIRE_TRIG_ORD(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 8 ); - STATIC_CONST( SignalWithGCILength = 9 ); - STATIC_CONST( SignalLengthSuma = 11 ); - -private: - Uint32 m_connectionPtr; - Uint32 m_userRef; - Uint32 m_triggerId; - TriggerEvent::Value m_triggerEvent; - Uint32 m_noPrimKeyWords; - Uint32 m_noBeforeValueWords; - Uint32 m_noAfterValueWords; - Uint32 fragId; - Uint32 m_gci; - Uint32 m_hashValue; - Uint32 m_any_value; - // Public methods -public: - Uint32 getConnectionPtr() const; - void setConnectionPtr(Uint32); - Uint32 getUserRef() const; - void setUserRef(Uint32); - Uint32 getTriggerId() const; - void setTriggerId(Uint32 anIndxId); - TriggerEvent::Value getTriggerEvent() const; - void setTriggerEvent(TriggerEvent::Value); - Uint32 getNoOfPrimaryKeyWords() const; - void setNoOfPrimaryKeyWords(Uint32); - Uint32 getNoOfBeforeValueWords() const; - void setNoOfBeforeValueWords(Uint32); - Uint32 getNoOfAfterValueWords() const; - void setNoOfAfterValueWords(Uint32); - Uint32 getGCI() const; - void setGCI(Uint32); - Uint32 getHashValue() const; - void setHashValue(Uint32); - Uint32 getAnyValue() const; - void setAnyValue(Uint32); -}; - -inline -Uint32 FireTrigOrd::getConnectionPtr() const -{ - return m_connectionPtr; -} - -inline -void FireTrigOrd::setConnectionPtr(Uint32 aConnectionPtr) -{ - m_connectionPtr = aConnectionPtr; -} - -inline -Uint32 FireTrigOrd::getUserRef() const -{ - return m_userRef; -} - -inline -void FireTrigOrd::setUserRef(Uint32 aUserRef) -{ - m_userRef = aUserRef; -} - -inline -Uint32 FireTrigOrd::getTriggerId() const -{ - return m_triggerId; -} - -inline -void FireTrigOrd::setTriggerId(Uint32 aTriggerId) -{ - m_triggerId = aTriggerId; -} - -inline -TriggerEvent::Value FireTrigOrd::getTriggerEvent() const -{ - return m_triggerEvent; -} - -inline -void FireTrigOrd::setTriggerEvent(TriggerEvent::Value aTriggerEvent) -{ - m_triggerEvent = aTriggerEvent; -} - -inline -Uint32 FireTrigOrd::getNoOfPrimaryKeyWords() const -{ - return m_noPrimKeyWords; -} - -inline -void FireTrigOrd::setNoOfPrimaryKeyWords(Uint32 noPrim) -{ - m_noPrimKeyWords = noPrim; -} - -inline -Uint32 FireTrigOrd::getNoOfBeforeValueWords() const -{ - return m_noBeforeValueWords; -} - -inline -void FireTrigOrd::setNoOfBeforeValueWords(Uint32 noBefore) -{ - m_noBeforeValueWords = noBefore; -} - -inline -Uint32 FireTrigOrd::getNoOfAfterValueWords() const -{ - return m_noAfterValueWords; -} - -inline -void FireTrigOrd::setNoOfAfterValueWords(Uint32 noAfter) -{ - m_noAfterValueWords = noAfter; -} - -inline -Uint32 FireTrigOrd::getGCI() const -{ - return m_gci; -} - -inline -void FireTrigOrd::setGCI(Uint32 aGCI) -{ - m_gci = aGCI; -} - -inline -Uint32 FireTrigOrd::getHashValue() const -{ - return m_hashValue; -} - -inline -void FireTrigOrd::setHashValue(Uint32 flag) -{ - m_hashValue = flag; -} - -inline -Uint32 FireTrigOrd::getAnyValue() const -{ - return m_any_value; -} - -inline -void FireTrigOrd::setAnyValue(Uint32 any_value) -{ - m_any_value = any_value; -} - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FsAppendReq.hpp b/storage/ndb/include/kernel/signaldata/FsAppendReq.hpp deleted file mode 100644 index bf6047c88a4..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsAppendReq.hpp +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_APPENDREQ_H -#define FS_APPENDREQ_H - -#include "SignalData.hpp" - -/** - * - * SENDER: - * RECIVER: Ndbfs - */ -class FsAppendReq { - /** - * Reciver(s) - */ - friend class Ndbfs; - friend class VoidFs; - - /** - * Sender(s) - */ - friend class Backup; - - friend bool printFSAPPENDREQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 7 ); - -private: - - /** - * DATA VARIABLES - */ - UintR filePointer; // DATA 0 - UintR userReference; // DATA 1 - UintR userPointer; // DATA 2 - UintR varIndex; // DATA 3 - UintR offset; // DATA 4 - UintR size; // DATA 5 - UintR synch_flag; // DATA 6 -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FsCloseReq.hpp b/storage/ndb/include/kernel/signaldata/FsCloseReq.hpp deleted file mode 100644 index 445c305dcd9..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsCloseReq.hpp +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_CLOSE_REQ_H -#define FS_CLOSE_REQ_H - -#include "SignalData.hpp" - -/** - * - * SENDER: - * RECIVER: Ndbfs - */ -class FsCloseReq { - /** - * Reciver(s) - */ - friend class Ndbfs; // Reciver - friend class VoidFs; - friend class Lgman; - friend class Tsman; - - /** - * Sender(s) - */ - friend class Backup; - friend class Dbdict; - friend class Restore; - - /** - * For printing - */ - friend bool printFSCLOSEREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 4 ); - -private: - - /** - * DATA VARIABLES - */ - - UintR filePointer; // DATA 0 - UintR userReference; // DATA 1 - UintR userPointer; // DATA 2 - UintR fileFlag; // DATA 3 - - static bool getRemoveFileFlag(const UintR & fileflag); - static void setRemoveFileFlag(UintR & fileflag, bool removefile); - -}; - - -inline -bool -FsCloseReq::getRemoveFileFlag(const UintR & fileflag){ - return (fileflag == 1); -} - -inline -void -FsCloseReq::setRemoveFileFlag(UintR & fileflag, bool removefile){ -// ASSERT_BOOL(removefile, "FsCloseReq::setRemoveFileFlag"); - if (removefile) - fileflag = 1; - else - fileflag = 0; -} - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FsConf.hpp b/storage/ndb/include/kernel/signaldata/FsConf.hpp deleted file mode 100644 index ddad5a79f63..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsConf.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_CONF_H -#define FS_CONF_H - -#include "SignalData.hpp" - -/** - * FsConf - Common signal class for all CONF signals sent from Ndbfs - * GSN_FSCLOSECONF, GSN_FSOPENCONF, GSN_FSWRITECONF, GSN_FSREADCONF, - * GSN_FSSYNCCONF, GSN_FSREMOVECONF - */ - -/** - * - * SENDER: Ndbfs - * RECIVER: - */ -class FsConf { - /** - * Reciver(s) - */ - friend class Backup; - friend class Dbacc; - friend class Dbtup; - friend class Dbdict; - friend class Lgman; - friend class Tsman; - friend class Pgman; - friend class Restore; - /** - * Sender(s) - */ - friend class Ndbfs; - friend class VoidFs; - - /** - * For printing - */ - friend bool printFSCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - /** - * FSOPENCONF: static const UintR SignalLength = 2; - * FSCLOSECONF, FSREADCONF, FSWRITECONF, FSSYNCCONF: static const UintR SignalLength = 2; - */ - -private: - - /** - * DATA VARIABLES - */ - UintR userPointer; // DATA 0 - - // Data 1 - union { - UintR filePointer; // FSOPENCONF - Uint32 bytes_read; // FSREADCONF (when allow partial read) - }; -}; - - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp b/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp deleted file mode 100644 index b9ef8826766..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsOpenReq.hpp +++ /dev/null @@ -1,335 +0,0 @@ -/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_OPEN_REQ_H -#define FS_OPEN_REQ_H - -#include "SignalData.hpp" - -/** - * - * SENDER: - * RECIVER: Ndbfs - */ -class FsOpenReq { - /** - * Reciver(s) - */ - friend class Ndbfs; // Reciver - friend class AsyncFile; // Uses FsOpenReq to decode file open flags - friend class Filename; - friend class VoidFs; - - /** - * Sender(s) - */ - friend class Backup; - friend class Dbdict; - friend class Ndbcntr; // For initial start... - friend class Dbdih; - friend class Lgman; - friend class Tsman; - friend class Restore; - friend class Dblqh; - - friend class Dbtup; - - /** - * For printing - */ - friend bool printFSOPENREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 11 ); - SECTION( FILENAME = 0 ); - -private: - - /** - * DATA VARIABLES - */ - - UintR userReference; // DATA 0 - UintR userPointer; // DATA 1 - UintR fileNumber[4]; // DATA 2 - 5 - UintR fileFlags; // DATA 6 - Uint32 page_size; - Uint32 file_size_hi; - Uint32 file_size_lo; - Uint32 auto_sync_size; // In bytes - - STATIC_CONST( OM_READONLY = 0 ); - STATIC_CONST( OM_WRITEONLY = 1 ); - STATIC_CONST( OM_READWRITE = 2 ); - - STATIC_CONST( OM_APPEND = 0x8 ); // Not Implemented on W2k - STATIC_CONST( OM_SYNC = 0x10 ); - STATIC_CONST( OM_CREATE = 0x100 ); - STATIC_CONST( OM_TRUNCATE = 0x200 ); - STATIC_CONST( OM_AUTOSYNC = 0x400 ); - - STATIC_CONST( OM_CREATE_IF_NONE = 0x0800 ); - STATIC_CONST( OM_INIT = 0x1000 ); // - STATIC_CONST( OM_CHECK_SIZE = 0x2000 ); - STATIC_CONST( OM_DIRECT = 0x4000 ); - - enum Suffixes { - S_DATA = 0, - S_FRAGLOG = 1, - S_LOGLOG = 2, - S_FRAGLIST = 3, - S_TABLELIST = 4, - S_SCHEMALOG = 5, - S_SYSFILE = 6, - S_LOG = 7, - S_CTL = 8 - }; - - static Uint32 getVersion(const Uint32 fileNumber[]); - static Uint32 getSuffix(const Uint32 fileNumber[]); - - static void setVersion(Uint32 fileNumber[], Uint8 val); - static void setSuffix(Uint32 fileNumber[], Uint8 val); - - /** - * V1 - */ - static Uint32 v1_getDisk(const Uint32 fileNumber[]); - static Uint32 v1_getTable(const Uint32 fileNumber[]); - static Uint32 v1_getFragment(const Uint32 fileNumber[]); - static Uint32 v1_getS(const Uint32 fileNumber[]); - static Uint32 v1_getP(const Uint32 fileNumber[]); - - static void v1_setDisk(Uint32 fileNumber[], Uint8 val); - static void v1_setTable(Uint32 fileNumber[], Uint32 val); - static void v1_setFragment(Uint32 fileNumber[], Uint32 val); - static void v1_setS(Uint32 fileNumber[], Uint32 val); - static void v1_setP(Uint32 fileNumber[], Uint8 val); - - /** - * V2 - Backup - */ - static Uint32 v2_getSequence(const Uint32 fileNumber[]); - static Uint32 v2_getNodeId(const Uint32 fileNumber[]); - static Uint32 v2_getCount(const Uint32 fileNumber[]); - - static void v2_setSequence(Uint32 fileNumber[], Uint32 no); - static void v2_setNodeId(Uint32 fileNumber[], Uint32 no); - static void v2_setCount(Uint32 fileNumber[], Uint32 no); - - /** - * V4 - LCP - */ - static Uint32 v5_getLcpNo(const Uint32 fileNumber[]); - static Uint32 v5_getTableId(const Uint32 fileNumber[]); - static Uint32 v5_getFragmentId(const Uint32 fileNumber[]); - - static void v5_setLcpNo(Uint32 fileNumber[], Uint32 no); - static void v5_setTableId(Uint32 fileNumber[], Uint32 no); - static void v5_setFragmentId(Uint32 fileNumber[], Uint32 no); -}; - -/** - * File flags (set according to solaris standard) - * - o = Open mode - 2 Bits -> max 3 - c = create new file - 1 Bit - t = truncate existing - 1 Bit - - 1111111111222222222233 - 01234567890123456789012345678901 - oo ct -*/ - - -/** - * -- v1 -- - * File number[0] = Table - * File number[1] = Fragment - * File number[2] = S-value - * File number[3] = - * p = v1_P 0 - 7 - * d = v1_disk 8 - 15 - * s = v1_suffix 16 - 23 - * v = version 24 - 31 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * ppppppppddddddddssssssssvvvvvvvv - * - * -- v2 -- - * File number[0] = Backup Sequence Number - * File number[1] = Node Id - * File number[3] = - * v = version 24 - 31 - * s = v1_suffix 16 - 23 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * ssssssssvvvvvvvv - * - * -- v3 -- - * File number[0] = Table - * File number[1] = LcpNo - * File number[2] = - * File number[3] = - * v = version 24 - 31 - * s = v1_suffix 16 - 23 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * ssssssssvvvvvvvv - */ -inline -Uint32 FsOpenReq::getVersion(const Uint32 fileNumber[]){ - return (fileNumber[3] >> 24) & 0xff; -} - -inline -void FsOpenReq::setVersion(Uint32 fileNumber[], Uint8 val){ - const Uint32 t = fileNumber[3]; - fileNumber[3] = (t & 0x00FFFFFF) | (((Uint32)val) << 24); -} - -inline -Uint32 FsOpenReq::getSuffix(const Uint32 fileNumber[]){ - return (fileNumber[3] >> 16)& 0xff; -} - -inline -void FsOpenReq::setSuffix(Uint32 fileNumber[], Uint8 val){ - const Uint32 t = fileNumber[3]; - fileNumber[3] = (t & 0xFF00FFFF) | (((Uint32)val) << 16); -} - -inline -Uint32 FsOpenReq::v1_getDisk(const Uint32 fileNumber[]){ - return (fileNumber[3]>>8) & 0xff; -} - -inline -void FsOpenReq::v1_setDisk(Uint32 fileNumber[], Uint8 val){ - const Uint32 t = fileNumber[3]; - fileNumber[3] = (t & 0xFFFF00FF) | (((Uint32)val) << 8); -} - -inline -Uint32 FsOpenReq::v1_getTable(const Uint32 fileNumber[]){ - return fileNumber[0]; -} - -inline -void FsOpenReq::v1_setTable(Uint32 fileNumber[], Uint32 val){ - fileNumber[0] = val; -} - -inline -Uint32 FsOpenReq::v1_getFragment(const Uint32 fileNumber[]){ - return fileNumber[1]; -} - -inline -void FsOpenReq::v1_setFragment(Uint32 fileNumber[], Uint32 val){ - fileNumber[1] = val; -} - -inline -Uint32 FsOpenReq::v1_getS(const Uint32 fileNumber[]){ - return fileNumber[2]; -} - -inline -void FsOpenReq::v1_setS(Uint32 fileNumber[], Uint32 val){ - fileNumber[2] = val; -} - -inline -Uint32 FsOpenReq::v1_getP(const Uint32 fileNumber[]){ - return fileNumber[3] & 0xff; -} - -inline -void FsOpenReq::v1_setP(Uint32 fileNumber[], Uint8 val){ - const Uint32 t = fileNumber[3]; - fileNumber[3] = (t & 0xFFFFFF00) | val; -} - -/****************/ -inline -Uint32 FsOpenReq::v2_getSequence(const Uint32 fileNumber[]){ - return fileNumber[0]; -} - -inline -void FsOpenReq::v2_setSequence(Uint32 fileNumber[], Uint32 val){ - fileNumber[0] = val; -} - -inline -Uint32 FsOpenReq::v2_getNodeId(const Uint32 fileNumber[]){ - return fileNumber[1]; -} - -inline -void FsOpenReq::v2_setNodeId(Uint32 fileNumber[], Uint32 val){ - fileNumber[1] = val; -} - -inline -Uint32 FsOpenReq::v2_getCount(const Uint32 fileNumber[]){ - return fileNumber[2]; -} - -inline -void FsOpenReq::v2_setCount(Uint32 fileNumber[], Uint32 val){ - fileNumber[2] = val; -} - -/****************/ -inline -Uint32 FsOpenReq::v5_getTableId(const Uint32 fileNumber[]){ - return fileNumber[0]; -} - -inline -void FsOpenReq::v5_setTableId(Uint32 fileNumber[], Uint32 val){ - fileNumber[0] = val; -} - -inline -Uint32 FsOpenReq::v5_getLcpNo(const Uint32 fileNumber[]){ - return fileNumber[1]; -} - -inline -void FsOpenReq::v5_setLcpNo(Uint32 fileNumber[], Uint32 val){ - fileNumber[1] = val; -} - -inline -Uint32 FsOpenReq::v5_getFragmentId(const Uint32 fileNumber[]){ - return fileNumber[2]; -} - -inline -void FsOpenReq::v5_setFragmentId(Uint32 fileNumber[], Uint32 val){ - fileNumber[2] = val; -} - -#endif - diff --git a/storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp b/storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp deleted file mode 100644 index bd4edbacc9e..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsReadWriteReq.hpp +++ /dev/null @@ -1,172 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_READWRITEREQ_H -#define FS_READWRITEREQ_H - -#include "SignalData.hpp" - -/** - * FsReadWriteReq - Common signal class for FSWRITEREQ and FSREADREQ - * - */ - -/** - * - * SENDER: - * RECIVER: Ndbfs - */ -class FsReadWriteReq { - /** - * Reciver(s) - */ - friend class Ndbfs; - friend class VoidFs; - friend class AsyncFile; - - /** - * Sender(s) - */ - friend class Dbdict; - friend class Lgman; - friend class Tsman; - friend class Pgman; - friend class Restore; - - /** - * For printing - */ - friend bool printFSREADWRITEREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Enum type for errorCode - */ - enum NdbfsFormatType { - fsFormatListOfPairs=0, - fsFormatArrayOfPages=1, - fsFormatListOfMemPages=2, - fsFormatGlobalPage=3, - fsFormatSharedPage=4, - fsFormatMax - }; - - /** - * Length of signal - */ - STATIC_CONST( FixedLength = 6 ); - -private: - - /** - * DATA VARIABLES - */ - UintR filePointer; // DATA 0 - UintR userReference; // DATA 1 - UintR userPointer; // DATA 2 - UintR operationFlag; // DATA 3 - UintR varIndex; // DATA 4 - UintR numberOfPages; // DATA 5 - -//------------------------------------------------------------- -// Variable sized part. Those will contain -// info about memory/file pages to read/write -//------------------------------------------------------------- - union { - UintR pageData[16]; // DATA 6 - 21 - struct { - Uint32 varIndex; // In unit cluster size - Uint32 fileOffset; // In unit page size - } listOfPair[8]; - struct { - Uint32 varIndex; - Uint32 fileOffset; - } arrayOfPages; - struct { - Uint32 varIndex[1]; // Size = numberOfPages - Uint32 fileOffset; - } listOfMemPages; - } data; - - static Uint8 getSyncFlag(const UintR & opFlag); - static void setSyncFlag(UintR & opFlag, Uint8 flag); - - static NdbfsFormatType getFormatFlag(const UintR & opFlag); - static void setFormatFlag(UintR & opFlag, Uint8 flag); - - static Uint32 getPartialReadFlag(UintR opFlag); - static void setPartialReadFlag(UintR & opFlag, Uint32 flag); -}; - -/** - * Operation flag - * - f = Format of pageData - 4 Bits -> max 15 - s = sync after write flag - 1 Bit - - 1111111111222222222233 - 01234567890123456789012345678901 - ffffs -*/ - -#define SYNC_SHIFT (4) -#define SYNC_MASK (0x01) - -#define FORMAT_MASK (0x0F) - -#define PARTIAL_READ_SHIFT (5) - -inline -Uint8 -FsReadWriteReq::getSyncFlag(const UintR & opFlag){ - return (Uint8)((opFlag >> SYNC_SHIFT) & SYNC_MASK); -} - -inline -FsReadWriteReq::NdbfsFormatType -FsReadWriteReq::getFormatFlag(const UintR & opFlag){ - return (NdbfsFormatType)(opFlag & FORMAT_MASK); -} - -inline -void -FsReadWriteReq::setSyncFlag(UintR & opFlag, Uint8 flag){ - ASSERT_BOOL(flag, "FsReadWriteReq::setSyncFlag"); - opFlag |= (flag << SYNC_SHIFT); -} - -inline -void -FsReadWriteReq::setFormatFlag(UintR & opFlag, Uint8 flag){ - ASSERT_MAX(flag, fsFormatMax, "FsReadWriteReq::setSyncFlag"); - opFlag |= flag; -} - -inline -void -FsReadWriteReq::setPartialReadFlag(UintR & opFlag, Uint32 flag){ - ASSERT_BOOL(flag, "FsReadWriteReq::setSyncFlag"); - opFlag |= (flag << PARTIAL_READ_SHIFT); -} - -inline -Uint32 -FsReadWriteReq::getPartialReadFlag(UintR opFlag){ - return (opFlag >> PARTIAL_READ_SHIFT) & 1; -} - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FsRef.hpp b/storage/ndb/include/kernel/signaldata/FsRef.hpp deleted file mode 100644 index 994f873fe6b..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsRef.hpp +++ /dev/null @@ -1,100 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_REF_H -#define FS_REF_H - -#include -#include "SignalData.hpp" - -/** - * FsRef - Common signal class for all REF signals sent from Ndbfs - * GSN_FSCLOSEREF, GSN_FSOPENREF, GSN_FSWRITEREF, GSN_FSREADREF, - * GSN_FSSYNCREF - */ - - -/** - * - * SENDER: Ndbfs - * RECIVER: - */ -struct FsRef { - - friend bool printFSREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - - /** - * Enum type for errorCode - */ - STATIC_CONST( FS_ERR_BIT = 0x8000 ); - - enum NdbfsErrorCodeType { - fsErrNone=0, - fsErrEnvironmentError=NDBD_EXIT_AFS_ENVIRONMENT, - fsErrTemporaryNotAccessible=NDBD_EXIT_AFS_TEMP_NO_ACCESS, - fsErrNoSpaceLeftOnDevice=NDBD_EXIT_AFS_DISK_FULL, - fsErrPermissionDenied=NDBD_EXIT_AFS_PERMISSION_DENIED, - fsErrInvalidParameters=NDBD_EXIT_AFS_INVALID_PARAM, - fsErrUnknown=NDBD_EXIT_AFS_UNKNOWN, - fsErrNoMoreResources=NDBD_EXIT_AFS_NO_MORE_RESOURCES, - fsErrFileDoesNotExist=NDBD_EXIT_AFS_NO_SUCH_FILE, - fsErrReadUnderflow = NDBD_EXIT_AFS_READ_UNDERFLOW, - fsErrFileExists = FS_ERR_BIT | 12, - fsErrInvalidFileSize = FS_ERR_BIT | 13, - fsErrOutOfMemory = FS_ERR_BIT | 14, - fsErrMax - }; - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 4 ); - - /** - * DATA VARIABLES - */ - UintR userPointer; // DATA 0 - UintR errorCode; // DATA 1 - UintR osErrorCode; // DATA 2 - UintR senderData; - - static NdbfsErrorCodeType getErrorCode(const UintR & errorcode); - static void setErrorCode(UintR & errorcode, NdbfsErrorCodeType errorcodetype); - static void setErrorCode(UintR & errorcode, UintR errorcodetype); - -}; - - -inline -FsRef::NdbfsErrorCodeType -FsRef::getErrorCode(const UintR & errorcode){ - return (NdbfsErrorCodeType)errorcode; -} - -inline -void -FsRef::setErrorCode(UintR & errorcode, NdbfsErrorCodeType errorcodetype){ - ASSERT_MAX(errorcodetype, fsErrMax, "FsRef::setErrorCode"); - errorcode = (UintR)errorcodetype; -} - -inline -void -FsRef::setErrorCode(UintR & errorcode, UintR errorcodetype){ - ASSERT_MAX(errorcodetype, fsErrMax, "FsRef::setErrorCode"); - errorcode = errorcodetype; -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp b/storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp deleted file mode 100644 index 1692c00f833..00000000000 --- a/storage/ndb/include/kernel/signaldata/FsRemoveReq.hpp +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_REMOVE_REQ_H -#define FS_REMOVE_REQ_H - -#include "SignalData.hpp" -#include "FsOpenReq.hpp" - -/** - * - * SENDER: - * RECIVER: Ndbfs - */ -class FsRemoveReq { - /** - * Reciver(s) - */ - friend class Ndbfs; // Reciver - friend class AsyncFile; // Uses FsOpenReq to decode file open flags - friend class Filename; - friend class VoidFs; - - /** - * Sender(s) - */ - friend class Backup; - friend class Dbdict; - friend class Dbacc; - friend class Dbtup; - friend class Ndbcntr; // For initial start... - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 8 ); - -private: - - /** - * DATA VARIABLES - */ - - UintR userReference; // DATA 0 - UintR userPointer; // DATA 1 - UintR fileNumber[4]; // DATA 2 - 5 // See FsOpen for interpretation - - /** - * 0 = File -> rm file - * 1 = Directory -> rm -r path - */ - UintR directory; - - /** - * If directory = 1 - * - * 0 = remove only files/direcories in directory specified in fileNumber - * 1 = remove directory specified in fileNumber - */ - UintR ownDirectory; -}; - -#endif - diff --git a/storage/ndb/include/kernel/signaldata/GCPSave.hpp b/storage/ndb/include/kernel/signaldata/GCPSave.hpp deleted file mode 100644 index 8e03ebed660..00000000000 --- a/storage/ndb/include/kernel/signaldata/GCPSave.hpp +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GCP_SAVE_HPP -#define GCP_SAVE_HPP - -#include "SignalData.hpp" - -/** - * GCPSaveReq / (Ref/Conf) is sent as part of GCP - */ -class GCPSaveReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Reciver(s) - */ - friend class Dblqh; - - friend bool printGCPSaveReq(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 dihBlockRef; - Uint32 dihPtr; - Uint32 gci; -}; - -class GCPSaveRef { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class Dbdih; - - friend bool printGCPSaveRef(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - NodeShutdownInProgress = 1, - FakedSignalDueToNodeFailure = 2, - NodeRestartInProgress = 3 - }; - -private: - Uint32 dihPtr; - Uint32 nodeId; - Uint32 gci; - Uint32 errorCode; -}; - -class GCPSaveConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class Dbdih; - - friend bool printGCPSaveConf(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 dihPtr; - Uint32 nodeId; - Uint32 gci; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/GetTabInfo.hpp b/storage/ndb/include/kernel/signaldata/GetTabInfo.hpp deleted file mode 100644 index e1cbf2b86dc..00000000000 --- a/storage/ndb/include/kernel/signaldata/GetTabInfo.hpp +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GET_INFO_TAB_HPP -#define GET_INFO_TAB_HPP - -#include "SignalData.hpp" - -/** - * GetTabInfo - Get table info from DICT - * - * Successfull return = series of DICTTABINFO-signals - */ -class GetTabInfoReq { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Dbdict; - friend class Backup; - friend class Trix; - friend class DbUtil; - // API - friend class Table; - - friend bool printGET_TABINFO_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 requestType; // Bitmask of GetTabInfoReq::RequestType - union { - Uint32 tableId; - Uint32 tableNameLen; - }; - Uint32 unused; // This is located here so that Req & Ref have the same format - - enum RequestType { - RequestById = 0, - RequestByName = 1, - LongSignalConf = 2 - }; - SECTION( TABLE_NAME = 0 ); -}; - -class GetTabInfoRef { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Dbdict; - friend class Backup; - friend class Trix; - friend class DbUtil; - // API - friend class Table; - - friend bool printGET_TABINFO_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 requestType; // Bitmask of GetTabInfoReq::RequestType - union { - Uint32 tableId; - Uint32 tableNameLen; - }; - Uint32 errorCode; - - enum ErrorCode { - InvalidTableId = 709, - TableNotDefined = 723, - TableNameTooLong = 702, - NoFetchByName = 710, - Busy = 701 - }; -}; - -class GetTabInfoConf { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Dbdict; - friend class Backup; - friend class Trix; - friend class DbUtil; - friend class Suma; - // API - friend class Table; - - friend bool printGET_TABINFO_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - - SECTION( DICT_TAB_INFO = 0 ); -public: - Uint32 senderData; - Uint32 tableId; - union { - Uint32 gci; // For table - Uint32 freeWordsHi; // for logfile group m_free_file_words - }; - union { - Uint32 totalLen; // In words - Uint32 freeExtents; - Uint32 freeWordsLo; // for logfile group m_free_file_words - }; - Uint32 tableType; - Uint32 senderRef; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/GetTableId.hpp b/storage/ndb/include/kernel/signaldata/GetTableId.hpp deleted file mode 100644 index 0054c52baba..00000000000 --- a/storage/ndb/include/kernel/signaldata/GetTableId.hpp +++ /dev/null @@ -1,93 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GET_TABLEID_HPP -#define GET_TABLEID_HPP - -#include "SignalData.hpp" - -/** - * Convert tabname to table id - */ -class GetTableIdReq { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Dbdict; - friend class SumaParticipant; - - friend bool printGET_TABLEID_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 len; - SECTION( TABLE_NAME = 0 ); -}; - - -/** - * Convert tabname to table id - */ -class GetTableIdRef { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Dbdict; - friend class SumaParticipant; - friend bool printGET_TABLEID_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 err; - - enum ErrorCode { - InvalidTableId = 709, - TableNotDefined = 723, - TableNameTooLong = 702, - EmptyTable = 1111 - }; -}; - - -/** - * Convert tabname to table id - */ -class GetTableIdConf { - /** - * Sender(s) / Reciver(s) - */ - // Blocks - friend class Dbdict; - friend class SumaParticipant; - friend bool printGET_TABLEID_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 schemaVersion; - -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/GrepImpl.hpp b/storage/ndb/include/kernel/signaldata/GrepImpl.hpp deleted file mode 100644 index 9f4ed9e9c0f..00000000000 --- a/storage/ndb/include/kernel/signaldata/GrepImpl.hpp +++ /dev/null @@ -1,891 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef GREP_IMPL_HPP -#define GREP_IMPL_HPP - -#include "SignalData.hpp" -#include -#include - - - -/***************************************************************************** - * GREP REQ Request a Global Replication (between SS and PS) - *****************************************************************************/ -/** - * @class GrepReq - * @brief - */ -class GrepReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - -public: - enum Request { - START = 0, ///< Start Global Replication (all phases) - SLOWSTOP = 1, ///< Stop after finishing applying current GCI epoch - FASTSTOP = 2, ///< Stop after finishing applying all PS GCI epochs - STATUS = 3, ///< Status - REMOVE_BUFFERS = 4, ///< Remove buffers from PS and SS - - START_SUBSCR = 5, - START_METALOG = 6, ///< Start Global Replication Logging of Metadata - START_METASCAN = 7, ///< Start Global Replication Scanning of Metadata - START_DATALOG = 8, ///< Start Global Replication Logging of table data - START_DATASCAN = 9, ///< Start Global Replication Scanning of table data - START_REQUESTOR = 10, ///< Start Global Replication Requestor - START_TRANSFER = 11, ///< Start SS-PS transfer - START_APPLY = 12, ///< Start applying GCI epochs in SS - START_DELETE = 13, ///< Start deleting buffers at PS/SS REP automatic. - - STOP_SUBSCR = 14, ///< Remove subscription - STOP_METALOG = 15, ///< Stop Global Replication Logging of Metadata - STOP_METASCAN = 16, ///< Stop Global Replication Scanning of Metadata - STOP_DATALOG = 17, ///< Stop Global Replication Logging of table data - STOP_DATASCAN = 18, ///< Stop Global Replication Scanning of table data - STOP_REQUESTOR = 19, ///< Stop Global Replication Requestor - STOP_TRANSFER = 20, ///< Stop SS-PS transfer - STOP_APPLY = 21, ///< Stop applying GCI epochs in SS - STOP_DELETE = 22, ///< Stop deleting buffers at PS/SS REP automatically - CREATE_SUBSCR = 23, ///< Create subscription ID in SUMA - DROP_TABLE = 24, ///< Create subscription ID in SUMA - STOP = 25, - - NO_REQUEST = 0xffffffff - }; - - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderRef; - Uint32 request; -}; - - -/***************************************************************************** - * CREATE Between SS and PS (DB and REP nodes) - *****************************************************************************/ -/** - * @class GrepSubCreateReq - * @brief - */ -class GrepSubCreateReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_CREATE_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; - Uint32 senderRef; - Uint32 senderData; - SECTION( TABLE_LIST = 0 ); -}; - -/** - * @class GrepSubCreateReq - * @brief - */ -class GrepSubCreateRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_CREATE_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; - Uint32 err; - Uint32 senderRef; - Uint32 senderData; -}; - - -/** - * @class GrepSubCreateConf - * @brief - */ -class GrepSubCreateConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_CREATE_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; - Uint32 senderRef; - Uint32 senderData; - Uint32 noOfNodeGroups; -}; - - - -/***************************************************************************** - * CREATE Internal between PS DB nodes - *****************************************************************************/ - -/** - * @class GrepCreateReq - * @brief - */ -class GrepCreateReq { - /** - * Sender(s)/Reciver(s) - */ - friend class GrepParticipant; - - friend bool printGREP_CREATE_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 8 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriberData; - Uint32 subscriberRef; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; - SECTION( TABLE_LIST = 0 ); -}; - - -/** - * @class GrepCreateRef - * @brief - */ -class GrepCreateRef { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_CREATE_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - enum ErrorCode { - NF_FakeErrorREF = GrepError::NF_FakeErrorREF - }; - STATIC_CONST( SignalLength = 6 ); - Uint32 senderRef; - Uint32 senderData; - union { - Uint32 err; - Uint32 errorCode; - }; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; -}; - - -/** - * @class GrepCreateConf - * @brief - */ -class GrepCreateConf { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_CREATE_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - Uint32 senderNodeId; - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; -}; - - -/***************************************************************************** - * START Between SS and PS (DB and REP nodes) - *****************************************************************************/ - -/** - * @class GrepSubStartReq - * @brief - */ -class GrepSubStartReq { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_START_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderRef; - Uint32 senderData; - Uint32 part; -}; - -/** - * @class GrepSubStartRef - * @brief - */ -class GrepSubStartRef { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_START_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 err; - Uint32 senderRef; - Uint32 senderData; - Uint32 part; -}; - - - -/** - * @class GrepSubStartConf - * @brief - */ -class GrepSubStartConf { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_START_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderRef; - Uint32 senderData; - Uint32 part; - Uint32 firstGCI; -}; - - -/***************************************************************************** - * START Internal between PS DB nodes - *****************************************************************************/ - -/** - * @class GrepStartReq - * @brief - */ -class GrepStartReq { - /** - * Sender(s)/Reciver(s) - */ - friend class GrepParticipant; - - friend bool printGREP_START_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 part; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - - -/** - * @class GrepStartRef - * @brief - */ -class GrepStartRef { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_START_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - enum ErrorCode { - NF_FakeErrorREF = GrepError::NF_FakeErrorREF - }; - STATIC_CONST( SignalLength = 6 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 part; - Uint32 subscriptionId; - Uint32 subscriptionKey; - union { - Uint32 err; - Uint32 errorCode; - }; -}; - - -/** - * @class GrepStartConf - * @brief - */ -class GrepStartConf { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_START_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); - public: - STATIC_CONST( SignalLength = 7 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 part; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 firstGCI; - Uint32 senderNodeId; - }; - - -/***************************************************************************** - * SCAN (SYNC) Between SS and PS (REP and DB nodes) - *****************************************************************************/ - -/** - * @class GrepSubSyncReq - * @brief - */ -class GrepSubSyncReq { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_SYNC_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderRef; - Uint32 senderData; - Uint32 part; -}; - - -/** - * @class GrepSubSyncRef - * @brief - */ -class GrepSubSyncRef { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_SYNC_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 6 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderRef; - Uint32 err; - Uint32 senderData; - Uint32 part; -}; - - -/** - * @class GrepSubSyncConf - * @brief - */ -class GrepSubSyncConf { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_SYNC_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); - public: - STATIC_CONST( SignalLength = 7 ); - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderRef; - Uint32 senderData; - Uint32 part; - Uint32 firstGCI; - Uint32 lastGCI; -}; - - - -/***************************************************************************** - * SCAN (SYNC) Internal between PS DB nodes - *****************************************************************************/ - -/** - * @class GrepSyncReq - * @brief - */ -class GrepSyncReq { - /** - * Sender(s)/Reciver(s) - */ - friend class GrepParticipant; - - friend bool printGREP_SYNC_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 part; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - - -/** - * @class GrepSyncRef - * @brief - */ -class GrepSyncRef { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_SYNC_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - enum ErrorCode { - NF_FakeErrorREF = GrepError::NF_FakeErrorREF - }; - STATIC_CONST( SignalLength = 6 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 part; - Uint32 subscriptionId; - Uint32 subscriptionKey; - union { - Uint32 err; - Uint32 errorCode; - }; -}; - - -/** - * @class GrepSyncConf - * @brief - */ -class GrepSyncConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class GrepParticipant; - - friend bool printGREP_SYNC_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 8 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 part; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderNodeId; - Uint32 firstGCI; - Uint32 lastGCI; -}; - -/***************************************************************************** - * ABORT - remove subscription - *****************************************************************************/ - -/** - * @class GrepSubRemoveReq - * @brief Between PS and SS - */ -class GrepSubRemoveReq { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_REMOVE_REQ(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - - -/** - * @class GrepSubRemoveRef - * @brief Between PS and SS - */ -class GrepSubRemoveRef { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_REMOVE_REF(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 err; -}; - - -/** - * @class - * @brief - */ -class GrepSubRemoveConf { - /** - * Sender(s)/Reciver(s) - */ - friend class Grep; - - friend bool printGREP_SUB_REMOVE_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - - -/** - * @class - * @brief - */ -class GrepRemoveReq { - /** - * Sender(s)/Reciver(s) - */ - friend class GrepParticipant; - - friend bool printGREP_REMOVE_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - - -/** - * @class - * @brief - */ -class GrepRemoveRef { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_REMOVE_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - enum ErrorCode { - NF_FakeErrorREF = GrepError::NF_FakeErrorREF - }; - STATIC_CONST( SignalLength = 5 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - union { - Uint32 err; - Uint32 errorCode; - }; -}; - - -/** - * @class - * @brief - */ -class GrepRemoveConf { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_REMOVE_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderNodeId; -}; - - -/***************************************************************************** - * WAIT FOR CGP - *****************************************************************************/ - -/** - * @class GrepWaitGcpReq - * @brief - */ -class GrepWaitGcpReq { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_WAITGCP_REQ(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - - Uint32 senderData; - Uint32 gcp; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderNodeId; -}; - -/** - * @class GrepWaitGcpConf - * @brief - */ -class GrepWaitGcpConf { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_WAITGCP_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderNodeId; -}; - - - -class GrepCreateSubscriptionIdConf { - friend class Grep; - - friend bool printGREP_CREATE_SUBSCRIPTION_ID_CONF(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - - Uint32 subscriptionId; - Uint32 subscriptionKey; - union { // Haven't decide what to call it - Uint32 senderData; - Uint32 subscriberData; - }; -}; - - - -class GrepStartMe { - friend class Grep; - friend bool printGREP_START_ME(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 1 ); - Uint32 senderRef; -}; - - - - -/** - * @class GrepAddSubReq - * @brief - */ -class GrepAddSubReq { - /** - * Sender(s)/Reciver(s) - */ - friend class GrepParticipant; - - friend bool printGREP_ADD_SUB_REQ(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 7 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriberData; - Uint32 subscriberRef; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; -}; - - -/** - * @class GrepAddSubRef - * @brief - */ -class GrepAddSubRef { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_CREATE_REF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 err; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; -}; - - -/** - * @class GrepAddSubConf - * @brief - */ -class GrepAddSubConf { - /** - * Sender(s)/Reciver(s) - */ - - friend class GrepParticipant; - - friend bool printGREP_CREATE_CONF(FILE *, - const Uint32 *, - Uint32, - Uint16); -public: - STATIC_CONST( SignalLength = 1 ); - Uint32 noOfSub; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/HotSpareRep.hpp b/storage/ndb/include/kernel/signaldata/HotSpareRep.hpp deleted file mode 100644 index 06cce089883..00000000000 --- a/storage/ndb/include/kernel/signaldata/HotSpareRep.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef HOT_SPAREREP_HPP -#define HOT_SPAREREP_HPP - -#include - -/** - * This signals is sent by Dbdih to Dbdict - */ -class HotSpareRep { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Sender(s) / Reciver(s) - */ - - /** - * Reciver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( SignalLength = 1 + NodeBitmask::Size ); -private: - - Uint32 noHotSpareNodes; - Uint32 theHotSpareNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp b/storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp deleted file mode 100755 index fb38ba16e9d..00000000000 --- a/storage/ndb/include/kernel/signaldata/IndxAttrInfo.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef INDX_ATTRINFO_HPP -#define INDX_ATTRINFO_HPP - -#include "SignalData.hpp" - -class IndxAttrInfo { - /** - * Sender(s) - */ - friend class NdbIndexOperation; - - /** - * Receiver(s) - */ - friend class Dbtc; - - friend bool printINDXATTRINFO(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( HeaderLength = 3 ); - STATIC_CONST( DataLength = 22 ); - STATIC_CONST( MaxSignalLength = HeaderLength + DataLength); - - // Public methods -public: - Uint32* getData() const; - -private: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 attrData[DataLength]; -}; - -inline -Uint32* IndxAttrInfo::getData() const -{ - return (Uint32*)&attrData[0]; -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp b/storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp deleted file mode 100755 index 5141ee1303b..00000000000 --- a/storage/ndb/include/kernel/signaldata/IndxKeyInfo.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef INDX_KEY_INFO_HPP -#define INDX_KEY_INFO_HPP - -#include "SignalData.hpp" - -class IndxKeyInfo { - /** - * Sender(s) - */ - friend class NdbIndexOperation; - - /** - * Reciver(s) - */ - friend class Dbtc; - - friend bool printINDXKEYINFO(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( HeaderLength = 3 ); - STATIC_CONST( DataLength = 20 ); - STATIC_CONST( MaxSignalLength = HeaderLength + DataLength ); - - // Public methods -public: - Uint32* getData() const; - -private: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 keyData[DataLength]; -}; - -inline -Uint32* IndxKeyInfo::getData() const -{ - return (Uint32*)&keyData[0]; -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp b/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp deleted file mode 100644 index a0290a328f1..00000000000 --- a/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPConf.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef INVALIDATE_NODE_LCP_CONF_HPP -#define INVALIDATE_NODE_LCP_CONF_HPP - -/** - * This signal is sent from the non-master DIH to master DIHs - * - */ -class InvalidateNodeLCPConf { - - /** - * Sender/Receiver - */ - friend class Dbdih; - - /** - * NodeId of sending node - * which is "done" - */ - Uint32 sendingNodeId; - -public: - STATIC_CONST( SignalLength = 1 ); -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp b/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp deleted file mode 100644 index 51dea1c6fd9..00000000000 --- a/storage/ndb/include/kernel/signaldata/InvalidateNodeLCPReq.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef INVALIDATE_NODE_LCP_REQ_HPP -#define INVALIDATE_NODE_LCP_REQ_HPP - -/** - * This signal is sent from the master DIH to all DIHs - * when a node is starting without filesystem. - * - * All DIHs must then "forgett" that the starting node has - * performed LCP - * - * @see StartPermReq - */ -class InvalidateNodeLCPReq { - - /** - * Sender/Receiver - */ - friend class Dbdih; - - Uint32 startingNodeId; - -public: - STATIC_CONST( SignalLength = 1 ); -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/KeyInfo.hpp b/storage/ndb/include/kernel/signaldata/KeyInfo.hpp deleted file mode 100644 index 12991a9fb20..00000000000 --- a/storage/ndb/include/kernel/signaldata/KeyInfo.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef KEY_INFO_HPP -#define KEY_INFO_HPP - -#include "SignalData.hpp" - -class KeyInfo { - /** - * Sender(s) - */ - friend class DbUtil; - friend class NdbOperation; - friend class NdbScanOperation; - friend class NdbIndexScanOperation; - friend class Restore; - - /** - * Reciver(s) - */ - friend class Dbtc; - -public: - STATIC_CONST( HeaderLength = 3 ); - STATIC_CONST( DataLength = 20 ); - STATIC_CONST( MaxSignalLength = HeaderLength + DataLength ); - -private: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 keyData[DataLength]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/LCP.hpp b/storage/ndb/include/kernel/signaldata/LCP.hpp deleted file mode 100644 index 0b0bc4734e3..00000000000 --- a/storage/ndb/include/kernel/signaldata/LCP.hpp +++ /dev/null @@ -1,219 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LCP_SIGNAL_DATA_HPP -#define LCP_SIGNAL_DATA_HPP - -#include "SignalData.hpp" -#include - -class StartLcpReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Sender(s) / Receiver(s) - */ - - /** - * Receiver(s) - */ - friend class Dblqh; - - friend bool printSTART_LCP_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - - STATIC_CONST( SignalLength = 2 + 2 * NdbNodeBitmask::Size ); -private: - Uint32 senderRef; - Uint32 lcpId; - - NdbNodeBitmask participatingDIH; - NdbNodeBitmask participatingLQH; -}; - -class StartLcpConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Sender(s) / Receiver(s) - */ - - /** - * Receiver(s) - */ - friend class Dbdih; - - friend bool printSTART_LCP_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 senderRef; - Uint32 lcpId; -}; - -/** - * This signals is sent by Dbdih to Dblqh - * to order checkpointing of a certain - * fragment. - */ -class LcpFragOrd { - /** - * Sender(s) - */ - friend class Dbdih; - friend class Lgman; - friend class Pgman; - friend class Dbtup; - - /** - * Sender(s) / Receiver(s) - */ - - /** - * Receiver(s) - */ - friend class Dblqh; - - friend bool printLCP_FRAG_ORD(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 6 ); -private: - - Uint32 tableId; - Uint32 fragmentId; - Uint32 lcpNo; - Uint32 lcpId; - Uint32 lastFragmentFlag; - Uint32 keepGci; -}; - - -class LcpFragRep { - /** - * Sender(s) and receiver(s) - */ - friend class Dbdih; - - /** - * Sender(s) - */ - friend class Dblqh; - - friend bool printLCP_FRAG_REP(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 7 ); - -private: - Uint32 nodeId; - Uint32 lcpId; - Uint32 lcpNo; - Uint32 tableId; - Uint32 fragId; - Uint32 maxGciCompleted; - Uint32 maxGciStarted; -}; - -class LcpCompleteRep { - /** - * Sender(s) and receiver(s) - */ - friend class Dbdih; - - /** - * Sender(s) - */ - friend class Dblqh; - - friend bool printLCP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 nodeId; - Uint32 blockNo; - Uint32 lcpId; -}; - -struct LcpPrepareReq -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 lcpNo; - Uint32 tableId; - Uint32 fragmentId; - Uint32 lcpId; - Uint32 backupPtr; - Uint32 backupId; - - STATIC_CONST( SignalLength = 8 ); -}; - -struct LcpPrepareRef -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 fragmentId; - Uint32 errorCode; - - STATIC_CONST( SignalLength = 5 ); -}; - -struct LcpPrepareConf -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 tableId; - Uint32 fragmentId; - - STATIC_CONST( SignalLength = 4 ); -}; - -struct EndLcpReq -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 backupPtr; - Uint32 backupId; - - STATIC_CONST( SignalLength = 4 ); -}; - -struct EndLcpRef -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; - - STATIC_CONST( SignalLength = 3 ); -}; - -struct EndLcpConf -{ - Uint32 senderData; - Uint32 senderRef; - - STATIC_CONST( SignalLength = 2 ); -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/LgmanContinueB.hpp b/storage/ndb/include/kernel/signaldata/LgmanContinueB.hpp deleted file mode 100644 index 6c1e6456f76..00000000000 --- a/storage/ndb/include/kernel/signaldata/LgmanContinueB.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LGMAN_CONTINUEB_H -#define LGMAN_CONTINUEB_H - -#include "SignalData.hpp" - -struct LgmanContinueB { - - enum { - CUT_LOG_TAIL = 0 - ,FILTER_LOG = 1 - ,FLUSH_LOG = 2 - ,PROCESS_LOG_BUFFER_WAITERS = 3 - ,FIND_LOG_HEAD = 4 - ,EXECUTE_UNDO_RECORD = 5 - ,READ_UNDO_LOG = 6 - ,STOP_UNDO_LOG = 7 - ,PROCESS_LOG_SYNC_WAITERS = 8 - ,FORCE_LOG_SYNC = 9 - ,DROP_FILEGROUP = 10 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ListTables.hpp b/storage/ndb/include/kernel/signaldata/ListTables.hpp deleted file mode 100644 index b38829ae3de..00000000000 --- a/storage/ndb/include/kernel/signaldata/ListTables.hpp +++ /dev/null @@ -1,178 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LIST_TABLES_HPP -#define LIST_TABLES_HPP - -#include -#include "SignalData.hpp" - -/** - * It is convenient to pack request/response data per table in one - * 32-bit word... - */ -class ListTablesData { -public: - static Uint32 getTableId(Uint32 data) { - return BitmaskImpl::getField(1, &data, 0, 12); - } - static void setTableId(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 0, 12, val); - } - static Uint32 getTableType(Uint32 data) { - return BitmaskImpl::getField(1, &data, 12, 8); - } - static void setTableType(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 12, 8, val); - } - static Uint32 getTableStore(Uint32 data) { - return BitmaskImpl::getField(1, &data, 20, 3); - } - static void setTableStore(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 20, 3, val); - } - static Uint32 getTableTemp(Uint32 data) { - return BitmaskImpl::getField(1, &data, 23, 1); - } - static void setTableTemp(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 23, 1, val); - } - static Uint32 getTableState(Uint32 data) { - return BitmaskImpl::getField(1, &data, 24, 4); - } - static void setTableState(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 24, 4, val); - } - static Uint32 getListNames(Uint32 data) { - return BitmaskImpl::getField(1, &data, 28, 1); - } - static void setListNames(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 28, 1, val); - } - static Uint32 getListIndexes(Uint32 data) { - return BitmaskImpl::getField(1, &data, 29, 1); - } - static void setListIndexes(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 29, 1, val); - } -}; - -class ListTablesReq { - /** - * Sender(s) - */ - friend class Backup; - friend class Table; - friend class Suma; - - /** - * Reciver(s) - */ - friend class Dbdict; - -public: - STATIC_CONST( SignalLength = 3 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 requestData; - - Uint32 getTableId() { - return ListTablesData::getTableId(requestData); - } - void setTableId(Uint32 val) { - ListTablesData::setTableId(requestData, val); - } - Uint32 getTableType() const { - return ListTablesData::getTableType(requestData); - } - void setTableType(Uint32 val) { - ListTablesData::setTableType(requestData, val); - } - Uint32 getListNames() const { - return ListTablesData::getListNames(requestData); - } - void setListNames(Uint32 val) { - ListTablesData::setListNames(requestData, val); - } - Uint32 getListIndexes() const { - return ListTablesData::getListIndexes(requestData); - } - void setListIndexes(Uint32 val) { - ListTablesData::setListIndexes(requestData, val); - } -}; - -class ListTablesConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Reciver(s) - */ - friend class Backup; - friend class Table; - friend class Suma; - -public: - /** - * Note: last signal is indicated by having length < 25 - */ - STATIC_CONST( SignalLength = 25 ); - STATIC_CONST( HeaderLength = 2 ); - STATIC_CONST( DataLength = 23 ); - -public: - Uint32 senderData; - Uint32 counter; - Uint32 tableData[DataLength]; - - static Uint32 getTableId(Uint32 data) { - return ListTablesData::getTableId(data); - } - void setTableId(unsigned pos, Uint32 val) { - ListTablesData::setTableId(tableData[pos], val); - } - static Uint32 getTableType(Uint32 data) { - return ListTablesData::getTableType(data); - } - void setTableType(unsigned pos, Uint32 val) { - ListTablesData::setTableType(tableData[pos], val); - } - static Uint32 getTableStore(Uint32 data) { - return ListTablesData::getTableStore(data); - } - void setTableStore(unsigned pos, Uint32 val) { - ListTablesData::setTableStore(tableData[pos], val); - } - static Uint32 getTableState(Uint32 data) { - return ListTablesData::getTableState(data); - } - void setTableState(unsigned pos, Uint32 val) { - ListTablesData::setTableState(tableData[pos], val); - } - static Uint32 getTableTemp(Uint32 data) { - return ListTablesData::getTableTemp(data); - } - void setTableTemp(unsigned pos, Uint32 val) { - ListTablesData::setTableTemp(tableData[pos], val); - } -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp deleted file mode 100644 index c0712604130..00000000000 --- a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp +++ /dev/null @@ -1,260 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LQH_FRAG_HPP -#define LQH_FRAG_HPP - -#include "SignalData.hpp" - -class AddFragReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printADD_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 11 ); - - enum RequestInfo { - CreateInRunning = 0x8000000, - TemporaryTable = 0x00000010 - }; -private: - Uint32 dihPtr; - Uint32 senderData; // The same data as sent in DIADDTABREQ - Uint32 fragmentId; - Uint32 requestInfo; - Uint32 tableId; - Uint32 nextLCP; - Uint32 nodeId; - Uint32 totalFragments; - Uint32 startGci; - Uint32 tablespaceId; - Uint32 logPartId; -}; - -class AddFragRef { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbdih; - - friend bool printADD_FRAG_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 dihPtr; -}; - -class AddFragConf { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbdih; - - friend bool printADD_FRAG_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 dihPtr; - Uint32 fragId; -}; - -class LqhFragReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dblqh; - - friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 25 ); - - enum RequestInfo { - CreateInRunning = 0x8000000, - TemporaryTable = 0x00000010 - }; - -private: - Uint32 senderData; - Uint32 senderRef; - Uint32 fragmentId; - Uint32 requestInfo; - Uint32 maxLoadFactor; - Uint32 minLoadFactor; - Uint32 kValue; - Uint32 schemaVersion; - Uint32 nextLCP; - Uint32 noOfCharsets; - Uint32 startGci; - Uint32 tableType; // DictTabInfo::TableType - Uint32 primaryTableId; // table of index or RNIL - Uint32 tablespace_id; // RNIL for MM table - Uint16 tableId; - Uint16 localKeyLength; - Uint16 lh3DistrBits; - Uint16 lh3PageBits; - Uint16 noOfAttributes; - Uint16 noOfNullAttributes; - Uint16 noOfPagesToPreAllocate; - Uint16 keyLength; - Uint16 noOfKeyAttr; - Uint8 checksumIndicator; - Uint8 GCPIndicator; - Uint32 logPartId; - Uint32 maxRowsLow; - Uint32 maxRowsHigh; - Uint32 minRowsLow; - Uint32 minRowsHigh; - Uint32 forceVarPartFlag; -}; - -class LqhFragConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printLQH_FRAG_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderData; - Uint32 lqhFragPtr; -}; - -class LqhFragRef { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printLQH_FRAG_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderData; - Uint32 errorCode; -}; - -class LqhAddAttrReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dblqh; - - friend bool printLQH_ADD_ATTR_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( HeaderLength = 4 ); - STATIC_CONST( EntryLength = 3 ); - STATIC_CONST( MAX_ATTRIBUTES = 6 ); - struct Entry { - Uint32 attrId; // for index, includes primary attr id << 16 - Uint32 attrDescriptor; // 2 words type info - Uint32 extTypeInfo; - }; -private: - Uint32 lqhFragPtr; - Uint32 noOfAttributes; - Uint32 senderData; - Uint32 senderAttrPtr; - Entry attributes[MAX_ATTRIBUTES]; -}; - -class LqhAddAttrRef { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printLQH_ADD_ATTR_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderData; - Uint32 errorCode; -}; - -class LqhAddAttrConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printLQH_ADD_ATTR_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderData; - Uint32 senderAttrPtr; - Uint32 fragId; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/LqhKey.hpp b/storage/ndb/include/kernel/signaldata/LqhKey.hpp deleted file mode 100644 index 1aa43edeb99..00000000000 --- a/storage/ndb/include/kernel/signaldata/LqhKey.hpp +++ /dev/null @@ -1,613 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LQH_KEY_H -#define LQH_KEY_H - -#include "SignalData.hpp" - -class LqhKeyReq { - /** - * Reciver(s) - */ - friend class Dblqh; // Reciver - - /** - * Sender(s) - */ - friend class Dbtc; - friend class Restore; - - /** - * For printing - */ - friend bool printLQHKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( FixedSignalLength = 11 ); - -private: - - /** - * DATA VARIABLES - */ -//------------------------------------------------------------- -// Unconditional part. First 10 words -//------------------------------------------------------------- - UintR clientConnectPtr; // DATA 0 - UintR attrLen; // DATA 1 - UintR hashValue; // DATA 2 - UintR requestInfo; // DATA 3 - UintR tcBlockref; // DATA 4 - UintR tableSchemaVersion; // DATA 5 - UintR fragmentData; // DATA 6 - UintR transId1; // DATA 7 - UintR transId2; // DATA 8 - UintR savePointId; // DATA 9 - union { - /** - * When sent from TC -> LQH this variable contains scanInfo - * When send from LQH -> LQH this variable contains noFiredTriggers - */ - UintR noFiredTriggers; // DATA 10 - Uint32 scanInfo; // DATA 10 - }; - -//------------------------------------------------------------- -// Variable sized key part. Those will be placed to -// pack the signal in an appropriate manner. -//------------------------------------------------------------- - UintR variableData[10]; // DATA 11 - 21 - - static UintR getAttrLen(const UintR & scanInfoAttrLen); - static UintR getScanTakeOverFlag(const UintR & scanInfoAttrLen); - static UintR getStoredProcFlag(const UintR & scanData); - static UintR getDistributionKey(const UintR & scanData); - - static UintR getTableId(const UintR & tableSchemaVersion); - static UintR getSchemaVersion(const UintR & tableSchemaVersion); - - static UintR getFragmentId(const UintR & fragmentData); - static UintR getNextReplicaNodeId(const UintR & fragmentData); - - static Uint8 getLockType(const UintR & requestInfo); - static Uint8 getDirtyFlag(const UintR & requestInfo); - static Uint8 getInterpretedFlag(const UintR & requestInfo); - static Uint8 getSimpleFlag(const UintR & requestInfo); - static Uint8 getOperation(const UintR & requestInfo); - static Uint8 getSeqNoReplica(const UintR & requestInfo); - static Uint8 getLastReplicaNo(const UintR & requestInfo); - static Uint8 getAIInLqhKeyReq(const UintR & requestInfo); - static UintR getKeyLen(const UintR & requestInfo); - static UintR getSameClientAndTcFlag(const UintR & requestInfo); - static UintR getReturnedReadLenAIFlag(const UintR & requestInfo); - static UintR getApplicationAddressFlag(const UintR & requestInfo); - static UintR getMarkerFlag(const UintR & requestInfo); - static UintR getNoDiskFlag(const UintR & requestInfo); - - /** - * Setters - */ - - static void setAttrLen(UintR & scanInfoAttrLen, UintR val); - static void setScanTakeOverFlag(UintR & scanInfoAttrLen, UintR val); - static void setStoredProcFlag(UintR & scanData, UintR val); - static void setDistributionKey(UintR & scanData, UintR val); - - static void setTableId(UintR & tableSchemaVersion, UintR val); - static void setSchemaVersion(UintR & tableSchemaVersion, UintR val); - - static void setFragmentId(UintR & fragmentData, UintR val); - static void setNextReplicaNodeId(UintR & fragmentData, UintR val); - - static void setLockType(UintR & requestInfo, UintR val); - static void setDirtyFlag(UintR & requestInfo, UintR val); - static void setInterpretedFlag(UintR & requestInfo, UintR val); - static void setSimpleFlag(UintR & requestInfo, UintR val); - static void setOperation(UintR & requestInfo, UintR val); - static void setSeqNoReplica(UintR & requestInfo, UintR val); - static void setLastReplicaNo(UintR & requestInfo, UintR val); - static void setAIInLqhKeyReq(UintR & requestInfo, UintR val); - static void setKeyLen(UintR & requestInfo, UintR val); - static void setSameClientAndTcFlag(UintR & requestInfo, UintR val); - static void setReturnedReadLenAIFlag(UintR & requestInfo, UintR val); - static void setApplicationAddressFlag(UintR & requestInfo, UintR val); - static void setMarkerFlag(UintR & requestInfo, UintR val); - static void setNoDiskFlag(UintR & requestInfo, UintR val); - - static UintR getRowidFlag(const UintR & requestInfo); - static void setRowidFlag(UintR & requestInfo, UintR val); - - /** - * When doing DIRTY WRITES - */ - static UintR getGCIFlag(const UintR & requestInfo); - static void setGCIFlag(UintR & requestInfo, UintR val); - - static UintR getNrCopyFlag(const UintR & requestInfo); - static void setNrCopyFlag(UintR & requestInfo, UintR val); -}; - -/** - * Request Info - * - * k = Key len - 10 Bits (0-9) max 1023 - * l = Last Replica No - 2 Bits -> Max 3 (10-11) - - IF version < NDBD_ROWID_VERSION - * t = Lock type - 3 Bits -> Max 7 (12-14) - * p = Application Addr. Ind - 1 Bit (15) - * d = Dirty indicator - 1 Bit (16) - * i = Interpreted indicator - 1 Bit (17) - * s = Simple indicator - 1 Bit (18) - * o = Operation - 3 Bits (19-21) - * r = Sequence replica - 2 Bits (22-23) - * a = Attr Info in LQHKEYREQ - 3 Bits (24-26) - * c = Same client and tc - 1 Bit (27) - * u = Read Len Return Ind - 1 Bit (28) - * m = Commit ack marker - 1 Bit (29) - * x = No disk usage - 1 Bit (30) - * z = Use rowid for insert - 1 Bit (31) - * g = gci flag - 1 Bit (12) - * n = NR copy - 1 Bit (13) - - * 1111111111222222222233 - * 01234567890123456789012345678901 - * kkkkkkkkkklltttpdisooorraaacumxz - * kkkkkkkkkkllgn pdisooorraaacumxz - */ - -#define RI_KEYLEN_SHIFT (0) -#define RI_KEYLEN_MASK (1023) -#define RI_LAST_REPL_SHIFT (10) -#define RI_LAST_REPL_MASK (3) -#define RI_LOCK_TYPE_SHIFT (12) -#define RI_LOCK_TYPE_MASK (7) -#define RI_APPL_ADDR_SHIFT (15) -#define RI_DIRTY_SHIFT (16) -#define RI_INTERPRETED_SHIFT (17) -#define RI_SIMPLE_SHIFT (18) -#define RI_OPERATION_SHIFT (19) -#define RI_OPERATION_MASK (7) -#define RI_SEQ_REPLICA_SHIFT (22) -#define RI_SEQ_REPLICA_MASK (3) -#define RI_AI_IN_THIS_SHIFT (24) -#define RI_AI_IN_THIS_MASK (7) -#define RI_SAME_CLIENT_SHIFT (27) -#define RI_RETURN_AI_SHIFT (28) -#define RI_MARKER_SHIFT (29) -#define RI_NODISK_SHIFT (30) -#define RI_ROWID_SHIFT (31) -#define RI_GCI_SHIFT (12) -#define RI_NR_COPY_SHIFT (13) - -/** - * Scan Info - * - * a = Attr Len - 16 Bits -> max 65535 (0-15) - * p = Stored Procedure Ind - 1 Bit (16) - * d = Distribution key - 8 Bit -> max 255 (17-24) - * t = Scan take over indicator - 1 Bit (25) - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * aaaaaaaaaaaaaaaapddddddddt - */ - -#define SI_ATTR_LEN_MASK (65535) -#define SI_ATTR_LEN_SHIFT (0) -#define SI_STORED_PROC_SHIFT (16) -#define SI_DISTR_KEY_MASK (255) -#define SI_DISTR_KEY_SHIFT (17) -#define SI_SCAN_TO_SHIFT (25) -#define SI_SCAN_INFO_MASK (63) -#define SI_SCAN_INFO_SHIFT (26) - -inline -UintR -LqhKeyReq::getAttrLen(const UintR & scanData) -{ - return (scanData >> SI_ATTR_LEN_SHIFT) & SI_ATTR_LEN_MASK; -} - -inline -Uint32 -LqhKeyReq::getScanTakeOverFlag(const UintR & scanData) -{ - return (scanData >> SI_SCAN_TO_SHIFT) & 1; -} - -inline -UintR -LqhKeyReq::getStoredProcFlag(const UintR & scanData){ - return (scanData >> SI_STORED_PROC_SHIFT) & 1; -} - -inline -UintR -LqhKeyReq::getDistributionKey(const UintR & scanData){ - return (scanData >> SI_DISTR_KEY_SHIFT) & SI_DISTR_KEY_MASK; -} - -inline -UintR LqhKeyReq::getTableId(const UintR & tableSchemaVersion) -{ - return tableSchemaVersion & 0xFFFF; -} - -inline -UintR LqhKeyReq::getSchemaVersion(const UintR & tableSchemaVersion) -{ - return tableSchemaVersion >> 16; -} - -inline -UintR LqhKeyReq::getFragmentId(const UintR & fragmentData) -{ - return fragmentData & 0xFFFF; -} - -inline -UintR LqhKeyReq::getNextReplicaNodeId(const UintR & fragmentData) -{ - return fragmentData >> 16; -} - -inline -Uint8 LqhKeyReq::getLastReplicaNo(const UintR & requestInfo) -{ - return (requestInfo >> RI_LAST_REPL_SHIFT) & RI_LAST_REPL_MASK; -} - -inline -Uint8 LqhKeyReq::getLockType(const UintR & requestInfo) -{ - return (requestInfo >> RI_LOCK_TYPE_SHIFT) & RI_LOCK_TYPE_MASK; -} - -inline -Uint8 LqhKeyReq::getDirtyFlag(const UintR & requestInfo) -{ - return (requestInfo >> RI_DIRTY_SHIFT) & 1; -} - -inline -Uint8 LqhKeyReq::getInterpretedFlag(const UintR & requestInfo) -{ - return (requestInfo >> RI_INTERPRETED_SHIFT) & 1; -} - -inline -Uint8 LqhKeyReq::getSimpleFlag(const UintR & requestInfo) -{ - return (requestInfo >> RI_SIMPLE_SHIFT) & 1; -} - -inline -Uint8 LqhKeyReq::getOperation(const UintR & requestInfo) -{ - return (requestInfo >> RI_OPERATION_SHIFT) & RI_OPERATION_MASK; -} - -inline -Uint8 LqhKeyReq::getSeqNoReplica(const UintR & requestInfo) -{ - return (requestInfo >> RI_SEQ_REPLICA_SHIFT) & RI_SEQ_REPLICA_MASK; -} - - -inline -Uint8 LqhKeyReq::getAIInLqhKeyReq(const UintR & requestInfo) -{ - return (requestInfo >> RI_AI_IN_THIS_SHIFT) & RI_AI_IN_THIS_MASK; -} - -inline -UintR LqhKeyReq::getKeyLen(const UintR & requestInfo) -{ - return (requestInfo >> RI_KEYLEN_SHIFT) & RI_KEYLEN_MASK; -} - -inline -UintR -LqhKeyReq::getSameClientAndTcFlag(const UintR & requestInfo) -{ - return (requestInfo >> RI_SAME_CLIENT_SHIFT) & 1; -} - -inline -UintR LqhKeyReq::getReturnedReadLenAIFlag(const UintR & requestInfo) -{ - return (requestInfo >> RI_RETURN_AI_SHIFT) & 1; -} - -inline -UintR -LqhKeyReq::getApplicationAddressFlag(const UintR & requestInfo){ - return (requestInfo >> RI_APPL_ADDR_SHIFT) & 1; -} - -inline -void -LqhKeyReq::setAttrLen(UintR & scanInfoAttrLen, UintR val){ - ASSERT_MAX(val, SI_ATTR_LEN_MASK, "LqhKeyReq::setAttrLen"); - scanInfoAttrLen |= (val << SI_ATTR_LEN_SHIFT); -} - - -inline -void -LqhKeyReq::setScanTakeOverFlag(UintR & scanInfoAttrLen, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setScanTakeOverFlag"); - scanInfoAttrLen |= (val << SI_SCAN_TO_SHIFT); -} -inline -void - -LqhKeyReq::setStoredProcFlag(UintR & scanData, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setStoredProcFlag"); - scanData |= (val << SI_STORED_PROC_SHIFT); -} - -inline -void - -LqhKeyReq::setDistributionKey(UintR & scanData, UintR val){ - ASSERT_MAX(val, SI_DISTR_KEY_MASK, "LqhKeyReq::setDistributionKey"); - scanData |= (val << SI_DISTR_KEY_SHIFT); -} - -#if 0 -inline -void - -LqhKeyReq::setTableId(UintR & tableSchemaVersion, UintR val){ - -} -inline -void -LqhKeyReq::setSchemaVersion(UintR & tableSchemaVersion, UintR val); - -inline -void -LqhKeyReq::setFragmentId(UintR & fragmentData, UintR val); - -inline -void -LqhKeyReq::setNextReplicaNodeId(UintR & fragmentData, UintR val); -#endif - -inline -void -LqhKeyReq::setLockType(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, RI_LOCK_TYPE_MASK, "LqhKeyReq::setLockType"); - requestInfo |= (val << RI_LOCK_TYPE_SHIFT); -} - -inline -void -LqhKeyReq::setDirtyFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setDirtyFlag"); - requestInfo |= (val << RI_DIRTY_SHIFT); -} - -inline -void -LqhKeyReq::setInterpretedFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setInterpretedFlag"); - requestInfo |= (val << RI_INTERPRETED_SHIFT); -} - -inline -void -LqhKeyReq::setSimpleFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setSimpleFlag"); - requestInfo |= (val << RI_SIMPLE_SHIFT); -} - -inline -void -LqhKeyReq::setOperation(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, RI_OPERATION_MASK, "LqhKeyReq::setOperation"); - requestInfo |= (val << RI_OPERATION_SHIFT); -} - -inline -void -LqhKeyReq::setSeqNoReplica(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, RI_SEQ_REPLICA_MASK, "LqhKeyReq::setSeqNoReplica"); - requestInfo |= (val << RI_SEQ_REPLICA_SHIFT); -} - -inline -void -LqhKeyReq::setLastReplicaNo(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, RI_LAST_REPL_MASK, "LqhKeyReq::setLastReplicaNo"); - requestInfo |= (val << RI_LAST_REPL_SHIFT); -} - -inline -void -LqhKeyReq::setAIInLqhKeyReq(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, RI_AI_IN_THIS_MASK, "LqhKeyReq::setAIInLqhKeyReq"); - requestInfo |= (val << RI_AI_IN_THIS_SHIFT); -} - -inline -void -LqhKeyReq::setKeyLen(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, RI_KEYLEN_MASK, "LqhKeyReq::setKeyLen"); - requestInfo |= (val << RI_KEYLEN_SHIFT); -} - -inline -void -LqhKeyReq::setSameClientAndTcFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setSameClientAndTcFlag"); - requestInfo |= (val << RI_SAME_CLIENT_SHIFT); -} - -inline -void -LqhKeyReq::setReturnedReadLenAIFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setReturnedReadLenAIFlag"); - requestInfo |= (val << RI_RETURN_AI_SHIFT); -} - -inline -void -LqhKeyReq::setApplicationAddressFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setApplicationAddressFlag"); - requestInfo |= (val << RI_APPL_ADDR_SHIFT); -} - -/**** */ - -inline -void -LqhKeyReq::setMarkerFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setMarkerFlag"); - requestInfo |= (val << RI_MARKER_SHIFT); -} - -inline -UintR -LqhKeyReq::getMarkerFlag(const UintR & requestInfo){ - return (requestInfo >> RI_MARKER_SHIFT) & 1; -} - -inline -void -LqhKeyReq::setNoDiskFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setNoDiskFlag"); - requestInfo |= (val << RI_NODISK_SHIFT); -} - -inline -UintR -LqhKeyReq::getNoDiskFlag(const UintR & requestInfo){ - return (requestInfo >> RI_NODISK_SHIFT) & 1; -} - -inline -void -LqhKeyReq::setRowidFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setRowidFlag"); - requestInfo |= (val << RI_ROWID_SHIFT); -} - -inline -UintR -LqhKeyReq::getRowidFlag(const UintR & requestInfo){ - return (requestInfo >> RI_ROWID_SHIFT) & 1; -} - -inline -void -LqhKeyReq::setGCIFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setGciFlag"); - requestInfo |= (val << RI_GCI_SHIFT); -} - -inline -UintR -LqhKeyReq::getGCIFlag(const UintR & requestInfo){ - return (requestInfo >> RI_GCI_SHIFT) & 1; -} - -inline -void -LqhKeyReq::setNrCopyFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhKeyReq::setNrCopyFlag"); - requestInfo |= (val << RI_NR_COPY_SHIFT); -} - -inline -UintR -LqhKeyReq::getNrCopyFlag(const UintR & requestInfo){ - return (requestInfo >> RI_NR_COPY_SHIFT) & 1; -} - -class LqhKeyConf { - /** - * Reciver(s) - */ - friend class Dbtc; - friend class Restore; - - /** - * Sender(s) - */ - friend class Dblqh; - - // Sent in a packed signal - friend class PackedSignal; - /** - * For printing - */ - friend bool printPACKED_SIGNAL(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - friend bool printLQHKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 7 ); - -private: - - /** - * DATA VARIABLES - */ - Uint32 connectPtr; - Uint32 opPtr; - Uint32 userRef; - Uint32 readLen; - Uint32 transId1; - Uint32 transId2; - Uint32 noFiredTriggers; -}; - -class LqhKeyRef { - /** - * Reciver(s) - */ - friend class Dbtc; - friend class Restore; - - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * For printing - */ - friend bool printLQHKEYREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 5 ); - -private: - - /** - * DATA VARIABLES - */ - Uint32 userRef; - Uint32 connectPtr; - Uint32 errorCode; - Uint32 transId1; - Uint32 transId2; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp deleted file mode 100644 index e3fc622911d..00000000000 --- a/storage/ndb/include/kernel/signaldata/LqhSizeAltReq.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LQH_SIZE_ALT_REQ_H -#define LQH_SIZE_ALT_REQ_H - - - -#include "SignalData.hpp" - -class LqhSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Reciver(s) - */ - friend class Dblqh; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0); - STATIC_CONST( IND_FRAG = 1); - STATIC_CONST( IND_CONNECT = 2); - STATIC_CONST( IND_TABLE = 3); - STATIC_CONST( IND_TC_CONNECT = 4); - STATIC_CONST( IND_REPLICAS = 5); - STATIC_CONST( IND_LOG_FILES = 6); - STATIC_CONST( IND_SCAN = 7); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[8]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/LqhTransConf.hpp b/storage/ndb/include/kernel/signaldata/LqhTransConf.hpp deleted file mode 100644 index bcefa3f0299..00000000000 --- a/storage/ndb/include/kernel/signaldata/LqhTransConf.hpp +++ /dev/null @@ -1,218 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LQH_TRANS_CONF_H -#define LQH_TRANS_CONF_H - -#include "SignalData.hpp" - -/** - * This signal is sent as response to a LQH_TRANSREQ - * which is sent as by a take-over TC - */ -class LqhTransConf { - /** - * Reciver(s) - */ - friend class Dbtc; - - /** - * Sender(s) - */ - friend class Dblqh; - - friend bool printLQH_TRANSCONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 15 ); -private: - - /** - * This type describes the state of the operation returned in this signal - */ - enum OperationStatus { - InvalidStatus = 0, /**< This status should never be sent in a signal - it is only used for initializing variables so that - you can easily later check if they have changed */ - LastTransConf = 4, /**< This status indicates that LQH has finished the scan - of operations belonging to the died TC. - Data 0 - 2 is valid */ - - Prepared = 2, - Committed = 3, - Aborted = 1, - Marker = 5 /**< This means that the only thing left is a marker, - Data 0 - 6 is valid */ - }; - - /** - * DATA VARIABLES - */ - Uint32 tcRef; // 0 - Uint32 lqhNodeId; // 1 - Uint32 operationStatus; // 2 See enum OperationStatus - Uint32 transId1; // 3 - Uint32 transId2; // 4 - Uint32 apiRef; // 5 - Uint32 apiOpRec; // 6 - Uint32 lqhConnectPtr; - Uint32 oldTcOpRec; - Uint32 requestInfo; - Uint32 gci; - Uint32 nextNodeId1; - Uint32 nextNodeId2; - Uint32 nextNodeId3; - Uint32 tableId; - - /** - * Getters - */ - static Uint32 getReplicaNo(Uint32 & requestInfo); - static Uint32 getReplicaType(Uint32 & requestInfo); - static Uint32 getLastReplicaNo(Uint32 & requestInfo); - static Uint32 getSimpleFlag(Uint32 & requestInfo); - static Uint32 getDirtyFlag(Uint32 & requestInfo); - static Uint32 getOperation(Uint32 & requestInfo); - static Uint32 getMarkerFlag(Uint32 & requestInfo); - - static void setReplicaNo(UintR & requestInfo, UintR val); - static void setReplicaType(UintR & requestInfo, UintR val); - static void setLastReplicaNo(UintR & requestInfo, UintR val); - static void setSimpleFlag(UintR & requestInfo, UintR val); - static void setDirtyFlag(UintR & requestInfo, UintR val); - static void setOperation(UintR & requestInfo, UintR val); - static void setMarkerFlag(Uint32 & requestInfo, Uint32 val); -}; - -/** - * Request Info - * - * t = replica type - 2 Bits (0-1) - * r = Replica No - 2 Bits (2-3) - * l = Last Replica No - 2 Bits (4-5) - * s = Simple - 1 Bits (6) - * d = Dirty - 1 Bit (7) - * o = Operation - 3 Bit (8-9) - * m = Marker present - 1 Bit (10) - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * ttrrllsdooom - */ -#define LTC_REPLICA_TYPE_SHIFT (0) -#define LTC_REPLICA_TYPE_MASK (3) -#define LTC_REPLICA_NO_SHIFT (2) -#define LTC_REPLICA_NO_MASK (3) -#define LTC_LAST_REPLICA_SHIFT (4) -#define LTC_LAST_REPLICA_MASK (3) -#define LTC_SIMPLE_SHIFT (6) -#define LTC_DIRTY_SHIFT (7) -#define LTC_OPERATION_SHIFT (8) -#define LTC_OPERATION_MASK (7) -#define LTC_MARKER_SHIFT (10) - -inline -Uint32 -LqhTransConf::getReplicaType(Uint32 & requestInfo){ - return (requestInfo >> LTC_REPLICA_TYPE_SHIFT) & LTC_REPLICA_TYPE_MASK; -} - -inline -Uint32 -LqhTransConf::getReplicaNo(Uint32 & requestInfo){ - return (requestInfo >> LTC_REPLICA_NO_SHIFT) & LTC_REPLICA_NO_MASK; -} - -inline -Uint32 -LqhTransConf::getLastReplicaNo(Uint32 & requestInfo){ - return (requestInfo >> LTC_LAST_REPLICA_SHIFT) & LTC_LAST_REPLICA_MASK; -} - -inline -Uint32 -LqhTransConf::getSimpleFlag(Uint32 & requestInfo){ - return (requestInfo >> LTC_SIMPLE_SHIFT) & 1; -} - -inline -Uint32 -LqhTransConf::getDirtyFlag(Uint32 & requestInfo){ - return (requestInfo >> LTC_DIRTY_SHIFT) & 1; -} - -inline -Uint32 -LqhTransConf::getOperation(Uint32 & requestInfo){ - return (requestInfo >> LTC_OPERATION_SHIFT) & LTC_OPERATION_MASK; -} - -inline -Uint32 -LqhTransConf::getMarkerFlag(Uint32 & requestInfo){ - return (requestInfo >> LTC_MARKER_SHIFT) & 1; -} - - -inline -void -LqhTransConf::setReplicaNo(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, LTC_REPLICA_NO_MASK, "LqhTransConf::setReplicaNo"); - requestInfo |= (val << LTC_REPLICA_NO_SHIFT); -} - -inline -void -LqhTransConf::setReplicaType(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, LTC_REPLICA_TYPE_MASK, "LqhTransConf::setReplicaType"); - requestInfo |= (val << LTC_REPLICA_TYPE_SHIFT); -} - -inline -void -LqhTransConf::setLastReplicaNo(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, LTC_LAST_REPLICA_MASK, "LqhTransConf::setLastReplicaNo"); - requestInfo |= (val << LTC_LAST_REPLICA_SHIFT); -} - -inline -void -LqhTransConf::setSimpleFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhTransConf::setSimpleFlag"); - requestInfo |= (val << LTC_SIMPLE_SHIFT); -} - -inline -void -LqhTransConf::setDirtyFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhTransConf::setDirtyFlag"); - requestInfo |= (val << LTC_DIRTY_SHIFT); -} - -inline -void -LqhTransConf::setOperation(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, LTC_OPERATION_MASK, "LqhTransConf::setOperation"); - requestInfo |= (val << LTC_OPERATION_SHIFT); -} - -inline -void -LqhTransConf::setMarkerFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "LqhTransConf::setMarkerFlag"); - requestInfo |= (val << LTC_MARKER_SHIFT); -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ManagementServer.hpp b/storage/ndb/include/kernel/signaldata/ManagementServer.hpp deleted file mode 100644 index 6fb72818bf7..00000000000 --- a/storage/ndb/include/kernel/signaldata/ManagementServer.hpp +++ /dev/null @@ -1,87 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MANAGEMENTSERVER_HPP -#define MANAGEMENTSERVER_HPP - -#include "SignalData.hpp" - -/** - * Request to lock configuration - */ -class MgmLockConfigReq { - friend class MgmtSrvr; - -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 newConfigGeneration; -}; - -/** - * Confirm configuration lock - */ -class MgmLockConfigRep { - friend class MgmtSrvr; -public: - STATIC_CONST( SignalLength = 1 ); - - /* Error codes */ - enum ErrorCode { - OK, - UNKNOWN_ERROR, - GENERATION_MISMATCH, - ALREADY_LOCKED - }; - -private: - Uint32 errorCode; -}; - -/** - * Unlock configuration - */ -class MgmUnlockConfigReq { - friend class MgmtSrvr; - -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 commitConfig; -}; - -/** - * Confirm config unlock - */ -class MgmUnlockConfigRep { - friend class MgmtSrvr; -public: - STATIC_CONST( SignalLength = 1 ); - - /* Error codes */ - enum ErrorCode { - OK, - UNKNOWN_ERROR, - NOT_LOCKED - }; - -private: - Uint32 errorCode; -}; - -#endif /* !MANAGEMENTSERVER_HPP */ diff --git a/storage/ndb/include/kernel/signaldata/MasterGCP.hpp b/storage/ndb/include/kernel/signaldata/MasterGCP.hpp deleted file mode 100644 index 389b1bb53f1..00000000000 --- a/storage/ndb/include/kernel/signaldata/MasterGCP.hpp +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MASTER_GCP_HPP -#define MASTER_GCP_HPP - -#include - -/** - * - */ -class MasterGCPConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size ); - - enum State { - GCP_READY = 0, - GCP_PREPARE_RECEIVED = 1, - GCP_COMMIT_RECEIVED = 2, - GCP_TC_FINISHED = 3 - }; -private: - /** - * Data replied - */ - Uint32 gcpState; - Uint32 senderNodeId; - Uint32 failedNodeId; - Uint32 newGCP; - Uint32 latestLCP; - Uint32 oldestRestorableGCI; - Uint32 keepGCI; - Uint32 lcpActive[NdbNodeBitmask::Size]; -}; -/** - * - */ -class MasterGCPReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 masterRef; - Uint32 failedNodeId; -}; - -/** - * - */ -class MasterGCPRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 senderNodeId; - Uint32 failedNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/MasterLCP.hpp b/storage/ndb/include/kernel/signaldata/MasterLCP.hpp deleted file mode 100644 index 4a469edb7ff..00000000000 --- a/storage/ndb/include/kernel/signaldata/MasterLCP.hpp +++ /dev/null @@ -1,86 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MASTER_LCP_HPP -#define MASTER_LCP_HPP - -#include -#include "SignalData.hpp" - -/** - * - */ -class MasterLCPConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - friend bool printMASTER_LCP_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - - enum State { - LCP_STATUS_IDLE = 0, - LCP_STATUS_ACTIVE = 2, - LCP_TAB_COMPLETED = 8, - LCP_TAB_SAVED = 9 - }; - - friend NdbOut& operator<<(NdbOut&, const State&); - -private: - /** - * Data replied - */ - Uint32 senderNodeId; - Uint32 lcpState; - Uint32 failedNodeId; -}; -/** - * - */ -class MasterLCPReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - friend bool printMASTER_LCP_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 masterRef; - Uint32 failedNodeId; -}; - -class MasterLCPRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - friend bool printMASTER_LCP_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); -private: - /** - * Data replied - */ - Uint32 senderNodeId; - Uint32 failedNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp b/storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp deleted file mode 100644 index f6ad6b0e185..00000000000 --- a/storage/ndb/include/kernel/signaldata/NFCompleteRep.hpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NF_COMPLETE_REP_HPP -#define NF_COMPLETE_REP_HPP - -#include "SignalData.hpp" - -/** - * NFCompleteRep - Node Fail Complete Report - * - * This signal is sent by a block(or a node) - * when it has finished cleaning up after a node failure. - * - * It's also sent from Qmgr to the clusterMgr in API - * to tell the API that it can now abort all transactions still waiting for response - * from the failed NDB node - * - */ -struct NFCompleteRep { - - friend bool printNF_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); - - STATIC_CONST( SignalLength = 5 ); - - /** - * Which block has completed... - * - * NOTE: 0 means the node has completed - */ - Uint32 blockNo; - - /** - * Which node has completed... - */ - Uint32 nodeId; - - /** - * Which node has failed - */ - Uint32 failedNodeId; - - /** - * Is this the original message or a delayed variant. - */ - Uint32 unused; // originalMessage - - Uint32 from; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/NdbSttor.hpp b/storage/ndb/include/kernel/signaldata/NdbSttor.hpp deleted file mode 100644 index a5e4a128f9b..00000000000 --- a/storage/ndb/include/kernel/signaldata/NdbSttor.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_STTOR_HPP -#define NDB_STTOR_HPP - -#include "SignalData.hpp" - -class NdbSttor { - /** - * Sender(s) - */ - friend class NdbCntr; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - friend class Dbdict; - friend class Dbdih; - friend class Dblqh; - friend class Dbtc; - friend class ClusterMgr; - friend class Trix; - friend class Backup; - friend class Suma; - friend class Grep; - - friend bool printNDB_STTOR(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - STATIC_CONST( DataLength = 16 ); -private: - - Uint32 senderRef; - Uint32 nodeId; - Uint32 internalStartPhase; - Uint32 typeOfStart; - Uint32 masterNodeId; - Uint32 unused; - Uint32 config[DataLength]; -}; - -class NdbSttorry { - /** - * Receiver(s) - */ - friend class NdbCntr; - - /** - * Sender(s) - */ - friend class Ndbcntr; - friend class Dbdict; - friend class Dbdih; - friend class Dblqh; - friend class Dbtc; - friend class ClusterMgr; - friend class Trix; - friend class Backup; - friend class Suma; - friend class Grep; - - friend bool printNDB_STTORRY(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 1 ); -private: - - Uint32 senderRef; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp b/storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp deleted file mode 100644 index e956abf1622..00000000000 --- a/storage/ndb/include/kernel/signaldata/NdbfsContinueB.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBFS_CONTINUEB_H -#define NDBFS_CONTINUEB_H - -#include "SignalData.hpp" - -class NdbfsContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Ndbfs; - friend bool printCONTINUEB_NDBFS(FILE * output, const Uint32 * theData, - Uint32 len, Uint16); -private: - enum { - ZSCAN_MEMORYCHANNEL_10MS_DELAY = 0, - ZSCAN_MEMORYCHANNEL_NO_DELAY = 1 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/NextScan.hpp b/storage/ndb/include/kernel/signaldata/NextScan.hpp deleted file mode 100644 index c5e6de9d89c..00000000000 --- a/storage/ndb/include/kernel/signaldata/NextScan.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NEXT_SCAN_HPP -#define NEXT_SCAN_HPP - -#include "SignalData.hpp" - -class NextScanReq { - friend class Dblqh; - friend class Dbacc; - friend class Dbtux; - friend class Dbtup; -public: - // two sets of defs picked from lqh/acc - enum ScanFlag { - ZSCAN_NEXT = 1, - ZSCAN_NEXT_COMMIT = 2, - ZSCAN_COMMIT = 3, // new - ZSCAN_CLOSE = 6, - ZSCAN_NEXT_ABORT = 12 - }; - STATIC_CONST( SignalLength = 3 ); -private: - Uint32 accPtr; // scan record in ACC/TUX - Uint32 accOperationPtr; - Uint32 scanFlag; -}; - -class NextScanConf { - friend class Dbacc; - friend class Dbtux; - friend class Dbtup; - friend class Dblqh; -public: - // length is less if no keyinfo or no next result - STATIC_CONST( SignalLength = 11 ); -private: - Uint32 scanPtr; // scan record in LQH - Uint32 accOperationPtr; - Uint32 fragId; - Uint32 localKey[2]; - Uint32 localKeyLength; - Uint32 gci; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp b/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp deleted file mode 100644 index 15b884c97f4..00000000000 --- a/storage/ndb/include/kernel/signaldata/NodeFailRep.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NODE_FAILREP_HPP -#define NODE_FAILREP_HPP - -#include "SignalData.hpp" -#include - -/** - * This signals is sent by Qmgr to NdbCntr - * and then from NdbCntr sent to: dih, dict, lqh, tc & API - */ -struct NodeFailRep { - STATIC_CONST( SignalLength = 3 + NodeBitmask::Size ); - - Uint32 failNo; - - /** - * Note: This field is only set when signals is sent FROM Ndbcntr - * (not when signal is sent from Qmgr) - */ - Uint32 masterNodeId; - - Uint32 noOfNodes; - Uint32 theNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp b/storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp deleted file mode 100644 index 1c583c8b9ee..00000000000 --- a/storage/ndb/include/kernel/signaldata/NodeStateSignalData.hpp +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NODE_STATE_SIGNAL_DATA_HPP -#define NODE_STATE_SIGNAL_DATA_HPP - -#include - -/** - * NodeStateRep - * - * Sent so that all blocks will update their NodeState - */ -class NodeStateRep { - /** - * Sender(s) - */ - friend class Ndbcntr; - - /** - * Reciver - */ - friend class SimulatedBlock; - -public: - STATIC_CONST( SignalLength = NodeState::DataLength ); -private: - - NodeState nodeState; -}; - -/** - * ChangeNodeStateReq - * - * Sent by NdbCntr when synchronous NodeState updates are needed - */ -class ChangeNodeStateReq { - /** - * Sender(s) - */ - friend class Ndbcntr; - - /** - * Reciver - */ - friend class SimulatedBlock; - -public: - STATIC_CONST( SignalLength = 2 + NodeState::DataLength ); -public: - - Uint32 senderRef; - Uint32 senderData; - NodeState nodeState; -}; - -/** - * ChangeNodeStateConf - * - * Sent by SimulatedBlock as a confirmation to ChangeNodeStateReq - */ -class ChangeNodeStateConf { - /** - * Sender(s) - */ - friend class SimulatedBlock; - - /** - * Reciver - */ - friend class NdbCntr; - -public: - STATIC_CONST( SignalLength = 1 ); -private: - - Uint32 senderData; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/PackedSignal.hpp b/storage/ndb/include/kernel/signaldata/PackedSignal.hpp deleted file mode 100644 index be5edb47cc4..00000000000 --- a/storage/ndb/include/kernel/signaldata/PackedSignal.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PACKED_SIGNAL_HPP -#define PACKED_SIGNAL_HPP - -#include "SignalData.hpp" - -// -------- CODES FOR COMPRESSED SIGNAL (PACKED_SIGNAL) ------- -#define ZCOMMIT 0 -#define ZCOMPLETE 1 -#define ZCOMMITTED 2 -#define ZCOMPLETED 3 -#define ZLQHKEYCONF 4 -#define ZREMOVE_MARKER 5 - -class PackedSignal { - - static Uint32 getSignalType(Uint32 data); - - /** - * For printing - */ - friend bool printPACKED_SIGNAL(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); -}; - -inline -Uint32 PackedSignal::getSignalType(Uint32 data) { return data >> 28; } - -#endif diff --git a/storage/ndb/include/kernel/signaldata/PgmanContinueB.hpp b/storage/ndb/include/kernel/signaldata/PgmanContinueB.hpp deleted file mode 100644 index a296174bbfd..00000000000 --- a/storage/ndb/include/kernel/signaldata/PgmanContinueB.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PGMAN_CONTINUEB_H -#define PGMAN_CONTINUEB_H - -#include "SignalData.hpp" - -class PgmanContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Pgman; -private: - enum { - STATS_LOOP = 0, - BUSY_LOOP = 1, - CLEANUP_LOOP = 2, - LCP_LOOP = 3, - LCP_LOCKED = 4 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/PrepDropTab.hpp b/storage/ndb/include/kernel/signaldata/PrepDropTab.hpp deleted file mode 100644 index a809556f78e..00000000000 --- a/storage/ndb/include/kernel/signaldata/PrepDropTab.hpp +++ /dev/null @@ -1,172 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PREP_DROP_TAB_HPP -#define PREP_DROP_TAB_HPP - -#include "SignalData.hpp" - -class PrepDropTabReq { - /** - * Sender(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbdih; - - friend bool printPREP_DROP_TAB_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 requestType; // @see DropTabReq::RequestType -}; - -class PrepDropTabConf { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printPREP_DROP_TAB_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; -}; - -class PrepDropTabRef { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Dblqh; - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dbdict; - - friend bool printPREP_DROP_TAB_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - OK = 0, - NoSuchTable = 1, - PrepDropInProgress = 2, - DropInProgress = 3, - InvalidTableState = 4, - NF_FakeErrorREF = 5 - }; - -private: - Uint32 senderRef; - Uint32 senderData; - Uint32 tableId; - Uint32 errorCode; -}; - -class WaitDropTabReq { - /** - * Sender - */ - friend class Dbtc; - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dblqh; - - friend bool printWAIT_DROP_TAB_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - - Uint32 tableId; - Uint32 senderRef; -}; - -class WaitDropTabRef { - /** - * Sender - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbtc; - friend class Dbdih; - - friend bool printWAIT_DROP_TAB_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - NoSuchTable = 1, - IllegalTableState = 2, - DropInProgress = 3, - NF_FakeErrorREF = 4 - }; - - Uint32 tableId; - Uint32 senderRef; - Uint32 errorCode; - Uint32 tableStatus; -}; - - -class WaitDropTabConf { - /** - * Sender - */ - friend class Dblqh; - - /** - * Receiver(s) - */ - friend class Dbtc; - friend class Dbdih; - - friend bool printWAIT_DROP_TAB_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 2 ); - - Uint32 tableId; - Uint32 senderRef; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp b/storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp deleted file mode 100644 index b650307a163..00000000000 --- a/storage/ndb/include/kernel/signaldata/PrepFailReqRef.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PREP_FAILREQREF_HPP -#define PREP_FAILREQREF_HPP - -#include "SignalData.hpp" -#include - -/** - * The Req signal is sent by Qmgr to Qmgr - * and the Ref signal might be sent back - * - * NOTE that the signals are identical - */ -class PrepFailReqRef { - - /** - * Sender(s) / Reciver(s) - */ - friend class Qmgr; - - friend bool printPREPFAILREQREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 3 + NodeBitmask::Size ); -private: - - Uint32 xxxBlockRef; - Uint32 failNo; - - Uint32 noOfNodes; - Uint32 theNodes[NodeBitmask::Size]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ReadConfig.hpp b/storage/ndb/include/kernel/signaldata/ReadConfig.hpp deleted file mode 100644 index 07df94cde00..00000000000 --- a/storage/ndb/include/kernel/signaldata/ReadConfig.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef READ_CONFIG_HPP -#define READ_CONFIG_HPP - -/** - */ -class ReadConfigReq { -public: - STATIC_CONST( SignalLength = 3 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 noOfParameters; // 0 Means read all relevant for block - Uint32 parameters[1]; // see mgmapi_config_parameters.h -}; - -class ReadConfigConf { -public: - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderRef; - Uint32 senderData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp b/storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp deleted file mode 100644 index a99ea49ff88..00000000000 --- a/storage/ndb/include/kernel/signaldata/ReadNodesConf.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef READ_NODESCONF_HPP -#define READ_NODESCONF_HPP - -#include - -/** - * This signals is sent by Qmgr to NdbCntr - * and then from NdbCntr sent to: dih, dict, lqh, tc - * - * NOTE Only noOfNodes & allNodes are valid when sent from Qmgr - */ -class ReadNodesConf { - /** - * Sender(s) - */ - friend class Qmgr; - - /** - * Sender(s) / Reciver(s) - */ - friend class Ndbcntr; - - /** - * Reciver(s) - */ - friend class Dbdih; - friend class Dbdict; - friend class Dblqh; - friend class Dbtc; - friend class Trix; - friend class Backup; - friend class Suma; - friend class Grep; - - friend bool printREAD_NODES_CONF(FILE*, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 + 5*NdbNodeBitmask::Size ); -private: - - Uint32 noOfNodes; - Uint32 ndynamicId; - - /** - * - * NOTE Not valid when send from Qmgr - */ - Uint32 masterNodeId; - - /** - * This array defines all the ndb nodes in the system - */ - union { - Uint32 allNodes[NdbNodeBitmask::Size]; - Uint32 definedNodes[NdbNodeBitmask::Size]; - }; - - /** - * This array describes wheather the nodes are currently active - * - * NOTE Not valid when send from Qmgr - */ - Uint32 inactiveNodes[NdbNodeBitmask::Size]; - - Uint32 clusterNodes[NdbNodeBitmask::Size]; // From Qmgr - Uint32 startingNodes[NdbNodeBitmask::Size]; // From Cntr - Uint32 startedNodes[NdbNodeBitmask::Size]; // From Cntr -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/RelTabMem.hpp b/storage/ndb/include/kernel/signaldata/RelTabMem.hpp deleted file mode 100644 index 572cdf8d3b6..00000000000 --- a/storage/ndb/include/kernel/signaldata/RelTabMem.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef REL_TABMEM_HPP -#define REL_TABMEM_HPP - -#include "SignalData.hpp" - -class RelTabMemReq { - /** - * Sender(s) and Receiver(s) - */ - friend class Dbdict; - - /** - * Receiver(s) - */ - friend class Dbacc; - friend class Dbdih; - friend class Dblqh; - friend class Dbtc; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 4 ); - -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 primaryTableId; - Uint32 secondaryTableId; -}; - -class RelTabMemConf { - /** - * Sender(s) and Receiver(s) - */ - friend class Dbdict; - - /** - * Sender(s) - */ - friend class Dbacc; - friend class Dbdih; - friend class Dblqh; - friend class Dbtc; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 userPtr; - Uint32 senderRef; - Uint32 nodeId; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/RepImpl.hpp b/storage/ndb/include/kernel/signaldata/RepImpl.hpp deleted file mode 100644 index 374c5e10a6f..00000000000 --- a/storage/ndb/include/kernel/signaldata/RepImpl.hpp +++ /dev/null @@ -1,500 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef REP_IMPL_HPP -#define REP_IMPL_HPP - -#include "SignalData.hpp" -#include -#include -#include - -/** - * RecordType - * sz = no of elems in enum - * @todo support for meta_log must be added - */ -enum RecordType -{ - DATA_SCAN = 0, - DATA_LOG = 1, - META_SCAN = 2, - // META_LOG = 3, //removed META_LOG. not supported - RecordTypeSize = 3 // =4 if meta log is supported -}; - -/** - * Wait GCP - */ -class RepWaitGcpReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - friend class GrepParticipant; - friend bool printREP_WAITGCP_REQ(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 gcp; - Uint32 senderNodeId; -}; - -class RepWaitGcpConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - friend class GrepParticipant; - - friend bool printREP_WAITGCP_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderNodeId; -}; - -class RepWaitGcpRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - friend class GrepParticipant; - - friend bool printREP_WAITGCP_REF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 6 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 senderNodeId; - GrepError::GE_Code err; -}; - -class RepGetGciReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - friend class Grep; - - friend bool printREP_GET_GCI_REQ(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 3 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 nodeGrp; -}; - -class RepGetGciConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_GET_GCI_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 7 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 nodeGrp; - Uint32 firstPSGCI; - Uint32 lastPSGCI; - Uint32 firstSSGCI; - Uint32 lastSSGCI; -}; - -class RepGetGciRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_GET_GCI_REF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 8); - Uint32 senderData; - Uint32 senderRef; - Uint32 nodeGrp; - Uint32 firstPSGCI; - Uint32 lastPSGCI; - Uint32 firstSSGCI; - Uint32 lastSSGCI; - GrepError::GE_Code err; -}; - -class RepGetGciBufferReq { - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_GET_GCIBUFFER_REQ(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 nodeGrp; -}; - - -class RepGetGciBufferConf { - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_GET_GCIBUFFER_CONF(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 8 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstPSGCI; - Uint32 lastPSGCI; - Uint32 firstSSGCI; - Uint32 lastSSGCI; - Uint32 currentGCIBuffer; - Uint32 nodeGrp; -}; - -class RepGetGciBufferRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_GET_GCIBUFFER_REF(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 9 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstPSGCI; - Uint32 lastPSGCI; - Uint32 firstSSGCI; - Uint32 lastSSGCI; - Uint32 currentGCIBuffer; - Uint32 nodeGrp; - GrepError::GE_Code err; -}; - -class RepInsertGciBufferReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_INSERT_GCIBUFFER_REQ(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 gci; - Uint32 nodeGrp; - Uint32 force; -}; - -class RepInsertGciBufferRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_INSERT_GCIBUFFER_REF(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 7 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 gci; - Uint32 nodeGrp; - Uint32 tableId; - Uint32 force; - GrepError::GE_Code err; -}; - -class RepInsertGciBufferConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_INSERT_GCIBUFFER_CONF(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 gci; - Uint32 nodeGrp; - Uint32 force; -}; - - -class RepClearPSGciBufferReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_CLEAR_PS_GCIBUFFER_REQ(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 nodeGrp; -}; - -class RepClearPSGciBufferRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_CLEAR_PS_GCIBUFFER_REF(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 7 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 currentGCI; - Uint32 nodeGrp; - GrepError::GE_Code err; -}; - -class RepClearPSGciBufferConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_CLEAR_PS_GCIBUFFER_CONF(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 nodeGrp; -}; - -class RepClearSSGciBufferReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_CLEAR_SS_GCIBUFFER_REQ(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 nodeGrp; -}; - -class RepClearSSGciBufferRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_CLEAR_SS_GCIBUFFER_REF(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 7 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 currentGCI; - Uint32 nodeGrp; - GrepError::GE_Code err; -}; - -class RepClearSSGciBufferConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_CLEAR_SS_GCIBUFFER_CONF(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 firstGCI; - Uint32 lastGCI; - Uint32 nodeGrp; -}; - - -class RepDataPage -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_DATA_PAGE(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 nodeGrp; - Uint32 gci; -}; - - -class RepGciBufferAccRep -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_GCIBUFFER_ACC_REP(FILE *, const Uint32 *, - Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 nodeGrp; - Uint32 gci; - Uint32 totalSentBytes; -}; - -class RepDropTableReq -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_DROP_TABLE_REQ(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 ); - Uint32 tableId; - // char tableName[MAX_TAB_NAME_SIZE]; -}; - -class RepDropTableRef -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_DROP_TABLE_REF(FILE *, const Uint32 *, - Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 ); - Uint32 tableId; - // char tableName[MAX_TAB_NAME_SIZE]; -}; - -class RepDropTableConf -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - - friend bool printREP_DROP_TABLE_CONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 4 ); - Uint32 tableId; - //char tableName[MAX_TAB_NAME_SIZE]; -}; - -class RepDisconnectRep -{ - /** - * Sender(s)/Reciver(s) - */ - friend class Rep; - friend class Grep; - - friend bool printREP_DISCONNECT_REP(FILE *, const Uint32 *, Uint32, Uint16); - -public: - enum NodeType { - DB = 0, - REP = 1 - }; - STATIC_CONST( SignalLength = 7 ); - Uint32 senderData; - Uint32 senderRef; - Uint32 nodeId; - Uint32 nodeType; - Uint32 subId; - Uint32 subKey; - Uint32 err; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/RestoreContinueB.hpp b/storage/ndb/include/kernel/signaldata/RestoreContinueB.hpp deleted file mode 100644 index 42c4975e964..00000000000 --- a/storage/ndb/include/kernel/signaldata/RestoreContinueB.hpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef RESTORE_CONTINUEB_H -#define RESTORE_CONTINUEB_H - -#include "SignalData.hpp" - -class RestoreContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Restore; - friend bool printCONTINUEB_RESTORE(FILE * output, const Uint32 * theData, Uint32 len); -private: - enum { - START_FILE_THREAD = 0, - BUFFER_UNDERFLOW = 1, - BUFFER_FULL_SCAN = 2, - BUFFER_FULL_FRAG_COMPLETE = 3, - BUFFER_FULL_META = 4 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/RestoreImpl.hpp b/storage/ndb/include/kernel/signaldata/RestoreImpl.hpp deleted file mode 100644 index 0a048095eec..00000000000 --- a/storage/ndb/include/kernel/signaldata/RestoreImpl.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef RESTORE_SIGNAL_DATA_HPP -#define RESTORE_SIGNAL_DATA_HPP - -#include "SignalData.hpp" - -struct RestoreLcpReq -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 lcpNo; - Uint32 tableId; - Uint32 fragmentId; - Uint32 lcpId; - STATIC_CONST( SignalLength = 6 ); -}; - -struct RestoreLcpRef -{ - Uint32 senderData; - Uint32 senderRef; - Uint32 errorCode; - Uint32 extra[1]; - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode - { - OK = 0, - NoFileRecord = 1, - OutOfDataBuffer = 2, - OutOfReadBufferPages = 3, - InvalidFileFormat = 4 - }; -}; - -struct RestoreLcpConf -{ - Uint32 senderData; - Uint32 senderRef; - STATIC_CONST( SignalLength = 2 ); -}; - -struct RestoreContinueB { - - enum { - RESTORE_NEXT = 0, - READ_FILE = 1 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ResumeReq.hpp b/storage/ndb/include/kernel/signaldata/ResumeReq.hpp deleted file mode 100644 index 5288b3d6baa..00000000000 --- a/storage/ndb/include/kernel/signaldata/ResumeReq.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef RESUME_REQ_HPP -#define RESUME_REQ_HPP - -#include "SignalData.hpp" - -class ResumeReq { - - /** - * Reciver(s) - */ - friend class Ndbcntr; - - /** - * Sender - */ - friend class MgmtSrvr; - -public: - STATIC_CONST( SignalLength = 2 ); - -public: - - Uint32 senderRef; - Uint32 senderData; -}; - -class ResumeRef { - - /** - * Reciver(s) - */ - friend class MgmtSrvr; - - /** - * Sender - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 2 ); - - enum ErrorCode { - OK = 0, - NodeShutdownInProgress = 1, - SystemShutdownInProgress = 2, - NodeShutdownWouldCauseSystemCrash = 3 - }; - -public: - Uint32 senderData; - Uint32 errorCode; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/RouteOrd.hpp b/storage/ndb/include/kernel/signaldata/RouteOrd.hpp deleted file mode 100644 index 47bb272a073..00000000000 --- a/storage/ndb/include/kernel/signaldata/RouteOrd.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2003, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ROUTE_ORD_HPP -#define ROUTE_ORD_HPP - -#include "SignalData.hpp" -#include - -/** - * Request to allocate node id - */ -struct RouteOrd { - STATIC_CONST( SignalLength = 4 ); - - Uint32 dstRef; - Uint32 srcRef; - Uint32 gsn; - Uint32 cnt; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ScanFrag.hpp b/storage/ndb/include/kernel/signaldata/ScanFrag.hpp deleted file mode 100644 index ce83879d4bb..00000000000 --- a/storage/ndb/include/kernel/signaldata/ScanFrag.hpp +++ /dev/null @@ -1,399 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SCAN_FRAG_HPP -#define SCAN_FRAG_HPP - -#include "SignalData.hpp" -#include "ndb_limits.h" - -class ScanFragReq { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Backup; - friend class Suma; - - /** - * Reciver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 12 ); - - friend bool printSCAN_FRAGREQ(FILE *, const Uint32*, Uint32, Uint16); - -public: - Uint32 senderData; - Uint32 resultRef; // Where to send the result - Uint32 savePointId; - Uint32 requestInfo; - Uint32 tableId; - Uint32 fragmentNoKeyLen; - Uint32 schemaVersion; - Uint32 transId1; - Uint32 transId2; - Uint32 clientOpPtr; - Uint32 batch_size_rows; - Uint32 batch_size_bytes; - - static Uint32 getLockMode(const Uint32 & requestInfo); - static Uint32 getHoldLockFlag(const Uint32 & requestInfo); - static Uint32 getKeyinfoFlag(const Uint32 & requestInfo); - static Uint32 getReadCommittedFlag(const Uint32 & requestInfo); - static Uint32 getRangeScanFlag(const Uint32 & requestInfo); - static Uint32 getDescendingFlag(const Uint32 & requestInfo); - static Uint32 getTupScanFlag(const Uint32 & requestInfo); - static Uint32 getAttrLen(const Uint32 & requestInfo); - static Uint32 getScanPrio(const Uint32 & requestInfo); - static Uint32 getNoDiskFlag(const Uint32 & requestInfo); - static Uint32 getLcpScanFlag(const Uint32 & requestInfo); - - static void setLockMode(Uint32 & requestInfo, Uint32 lockMode); - static void setHoldLockFlag(Uint32 & requestInfo, Uint32 holdLock); - static void setKeyinfoFlag(Uint32 & requestInfo, Uint32 keyinfo); - static void setReadCommittedFlag(Uint32 & requestInfo, Uint32 readCommitted); - static void setRangeScanFlag(Uint32 & requestInfo, Uint32 rangeScan); - static void setDescendingFlag(Uint32 & requestInfo, Uint32 descending); - static void setTupScanFlag(Uint32 & requestInfo, Uint32 tupScan); - static void setAttrLen(Uint32 & requestInfo, Uint32 attrLen); - static void setScanPrio(Uint32& requestInfo, Uint32 prio); - static void setNoDiskFlag(Uint32& requestInfo, Uint32 val); - static void setLcpScanFlag(Uint32 & requestInfo, Uint32 val); -}; - -class KeyInfo20 { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class Backup; - friend class NdbOperation; - friend class NdbScanReceiver; -public: - STATIC_CONST( HeaderLength = 5); - STATIC_CONST( DataLength = 20 ); - - - static Uint32 setScanInfo(Uint32 noOfOps, Uint32 scanNo); - static Uint32 getScanNo(Uint32 scanInfo); - static Uint32 getScanOp(Uint32 scanInfo); - -public: - Uint32 clientOpPtr; - Uint32 keyLen; - Uint32 scanInfo_Node; - Uint32 transId1; - Uint32 transId2; - Uint32 keyData[DataLength]; -}; - -class ScanFragConf { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class Dbtc; - friend class Backup; - friend class Suma; -public: - STATIC_CONST( SignalLength = 6 ); - -public: - Uint32 senderData; - Uint32 completedOps; - Uint32 fragmentCompleted; - Uint32 transId1; - Uint32 transId2; - Uint32 total_len; -}; - -class ScanFragRef { - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * Reciver(s) - */ - friend class Dbtc; - friend class Backup; - friend class Suma; -public: - STATIC_CONST( SignalLength = 4 ); -public: - enum ErrorCode { - ZNO_FREE_TC_CONREC_ERROR = 484, - ZTOO_FEW_CONCURRENT_OPERATIONS = 485, - ZTOO_MANY_CONCURRENT_OPERATIONS = 486, - ZSCAN_NO_FRAGMENT_ERROR = 487, - ZTOO_MANY_ACTIVE_SCAN_ERROR = 488, - ZNO_FREE_SCANREC_ERROR = 489, - ZWRONG_BATCH_SIZE = 1230, - ZSTANDBY_SCAN_ERROR = 1209, - ZSCAN_BOOK_ACC_OP_ERROR = 1219, - ZUNKNOWN_TRANS_ERROR = 1227 - }; - - Uint32 senderData; - Uint32 transId1; - Uint32 transId2; - Uint32 errorCode; -}; - -/** - * This is part of Scan Fragment protocol - * - * Not to be confused with ScanNextReq in Scan Table protocol - */ -class ScanFragNextReq { - /** - * Sender(s) - */ - friend class Dbtc; - friend class Backup; - friend class Suma; - - /** - * Reciver(s) - */ - friend class Dblqh; - - friend bool printSCANFRAGNEXTREQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 6 ); - -public: - Uint32 senderData; - Uint32 closeFlag; - Uint32 transId1; - Uint32 transId2; - Uint32 batch_size_rows; - Uint32 batch_size_bytes; -}; - -/** - * Request Info - * - * a = Length of attrinfo - 16 Bits (16-31) - * c = LCP scan - 1 Bit 3 - * d = No disk - 1 Bit 4 - * l = Lock Mode - 1 Bit 5 - * h = Hold lock - 1 Bit 7 - * k = Keyinfo - 1 Bit 8 - * r = read committed - 1 Bit 9 - * x = range scan - 1 Bit 6 - * z = descending - 1 Bit 10 - * t = tup scan - 1 Bit 11 (implies x=z=0) - * p = Scan prio - 4 Bits (12-15) -> max 15 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * dlxhkrztppppaaaaaaaaaaaaaaaa - */ -#define SF_LOCK_MODE_SHIFT (5) -#define SF_LOCK_MODE_MASK (1) - -#define SF_NO_DISK_SHIFT (4) -#define SF_HOLD_LOCK_SHIFT (7) -#define SF_KEYINFO_SHIFT (8) -#define SF_READ_COMMITTED_SHIFT (9) -#define SF_RANGE_SCAN_SHIFT (6) -#define SF_DESCENDING_SHIFT (10) -#define SF_TUP_SCAN_SHIFT (11) -#define SF_LCP_SCAN_SHIFT (3) - -#define SF_ATTR_LEN_SHIFT (16) -#define SF_ATTR_LEN_MASK (65535) - -#define SF_PRIO_SHIFT 12 -#define SF_PRIO_MASK 15 - -inline -Uint32 -ScanFragReq::getLockMode(const Uint32 & requestInfo){ - return (requestInfo >> SF_LOCK_MODE_SHIFT) & SF_LOCK_MODE_MASK; -} - -inline -Uint32 -ScanFragReq::getHoldLockFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_HOLD_LOCK_SHIFT) & 1; -} - -inline -Uint32 -ScanFragReq::getKeyinfoFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_KEYINFO_SHIFT) & 1; -} - -inline -Uint32 -ScanFragReq::getRangeScanFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_RANGE_SCAN_SHIFT) & 1; -} - -inline -Uint32 -ScanFragReq::getDescendingFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_DESCENDING_SHIFT) & 1; -} - -inline -Uint32 -ScanFragReq::getTupScanFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_TUP_SCAN_SHIFT) & 1; -} - -inline -Uint32 -ScanFragReq::getReadCommittedFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_READ_COMMITTED_SHIFT) & 1; -} - -inline -Uint32 -ScanFragReq::getAttrLen(const Uint32 & requestInfo){ - return (requestInfo >> SF_ATTR_LEN_SHIFT) & SF_ATTR_LEN_MASK; -} - -inline -Uint32 -ScanFragReq::getScanPrio(const Uint32 & requestInfo){ - return (requestInfo >> SF_PRIO_SHIFT) & SF_PRIO_MASK; -} - -inline -void -ScanFragReq::setScanPrio(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, SF_PRIO_MASK, "ScanFragReq::setScanPrio"); - requestInfo |= (val << SF_PRIO_SHIFT); -} - -inline -void -ScanFragReq::setLockMode(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, SF_LOCK_MODE_MASK, "ScanFragReq::setLockMode"); - requestInfo |= (val << SF_LOCK_MODE_SHIFT); -} - -inline -void -ScanFragReq::setHoldLockFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setHoldLockFlag"); - requestInfo |= (val << SF_HOLD_LOCK_SHIFT); -} - -inline -void -ScanFragReq::setKeyinfoFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setKeyinfoFlag"); - requestInfo |= (val << SF_KEYINFO_SHIFT); -} - -inline -void -ScanFragReq::setReadCommittedFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setReadCommittedFlag"); - requestInfo |= (val << SF_READ_COMMITTED_SHIFT); -} - -inline -void -ScanFragReq::setRangeScanFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setRangeScanFlag"); - requestInfo |= (val << SF_RANGE_SCAN_SHIFT); -} - -inline -void -ScanFragReq::setDescendingFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setDescendingFlag"); - requestInfo |= (val << SF_DESCENDING_SHIFT); -} - -inline -void -ScanFragReq::setTupScanFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setTupScanFlag"); - requestInfo |= (val << SF_TUP_SCAN_SHIFT); -} - -inline -void -ScanFragReq::setAttrLen(UintR & requestInfo, UintR val){ - ASSERT_MAX(val, SF_ATTR_LEN_MASK, "ScanFragReq::setAttrLen"); - requestInfo |= (val << SF_ATTR_LEN_SHIFT); -} - -inline -Uint32 -ScanFragReq::getNoDiskFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_NO_DISK_SHIFT) & 1; -} - -inline -void -ScanFragReq::setNoDiskFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setNoDiskFlag"); - requestInfo |= (val << SF_NO_DISK_SHIFT); -} - -inline -Uint32 -ScanFragReq::getLcpScanFlag(const Uint32 & requestInfo){ - return (requestInfo >> SF_LCP_SCAN_SHIFT) & 1; -} - -inline -void -ScanFragReq::setLcpScanFlag(UintR & requestInfo, UintR val){ - ASSERT_BOOL(val, "ScanFragReq::setLcpScanFlag"); - requestInfo |= (val << SF_LCP_SCAN_SHIFT); -} - -inline -Uint32 -KeyInfo20::setScanInfo(Uint32 opNo, Uint32 scanNo){ - ASSERT_MAX(opNo, 1023, "KeyInfo20::setScanInfo"); - ASSERT_MAX(scanNo, 255, "KeyInfo20::setScanInfo"); - return (opNo << 8) + scanNo; -} - -inline -Uint32 -KeyInfo20::getScanNo(Uint32 scanInfo){ - return scanInfo & 0xFF; -} - -inline -Uint32 -KeyInfo20::getScanOp(Uint32 scanInfo){ - return (scanInfo >> 8) & 0x3FF; -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/ScanTab.hpp b/storage/ndb/include/kernel/signaldata/ScanTab.hpp deleted file mode 100644 index 65a69b47a83..00000000000 --- a/storage/ndb/include/kernel/signaldata/ScanTab.hpp +++ /dev/null @@ -1,474 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SCAN_TAB_H -#define SCAN_TAB_H - -#include "SignalData.hpp" - -/** - * - * SENDER: API - * RECIVER: Dbtc - */ -class ScanTabReq { - /** - * Reciver(s) - */ - friend class Dbtc; // Reciver - - /** - * Sender(s) - */ - friend class NdbTransaction; - friend class NdbScanOperation; - friend class NdbIndexScanOperation; - - /** - * For printing - */ - friend bool printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - STATIC_CONST( StaticLength = 11 ); - STATIC_CONST( MaxTotalAttrInfo = 0xFFFF ); - -private: - - // Type definitions - - /** - * DATA VARIABLES - */ - UintR apiConnectPtr; // DATA 0 - UintR attrLenKeyLen; // DATA 1 - UintR requestInfo; // DATA 2 - UintR tableId; // DATA 3 - UintR tableSchemaVersion; // DATA 4 - UintR storedProcId; // DATA 5 - UintR transId1; // DATA 6 - UintR transId2; // DATA 7 - UintR buddyConPtr; // DATA 8 - UintR batch_byte_size; // DATA 9 - UintR first_batch_size; // DATA 10 - - /** - * Optional - */ - Uint32 distributionKey; - - /** - * Get:ers for requestInfo - */ - static Uint8 getParallelism(const UintR & requestInfo); - static Uint8 getLockMode(const UintR & requestInfo); - static Uint8 getHoldLockFlag(const UintR & requestInfo); - static Uint8 getReadCommittedFlag(const UintR & requestInfo); - static Uint8 getRangeScanFlag(const UintR & requestInfo); - static Uint8 getDescendingFlag(const UintR & requestInfo); - static Uint8 getTupScanFlag(const UintR & requestInfo); - static Uint8 getKeyinfoFlag(const UintR & requestInfo); - static Uint16 getScanBatch(const UintR & requestInfo); - static Uint8 getDistributionKeyFlag(const UintR & requestInfo); - static UintR getNoDiskFlag(const UintR & requestInfo); - - /** - * Set:ers for requestInfo - */ - static void clearRequestInfo(UintR & requestInfo); - static void setParallelism(UintR & requestInfo, Uint32 flag); - static void setLockMode(UintR & requestInfo, Uint32 flag); - static void setHoldLockFlag(UintR & requestInfo, Uint32 flag); - static void setReadCommittedFlag(UintR & requestInfo, Uint32 flag); - static void setRangeScanFlag(UintR & requestInfo, Uint32 flag); - static void setDescendingFlag(UintR & requestInfo, Uint32 flag); - static void setTupScanFlag(UintR & requestInfo, Uint32 flag); - static void setKeyinfoFlag(UintR & requestInfo, Uint32 flag); - static void setScanBatch(Uint32& requestInfo, Uint32 sz); - static void setDistributionKeyFlag(Uint32& requestInfo, Uint32 flag); - static void setNoDiskFlag(UintR & requestInfo, UintR val); -}; - -/** - * Request Info - * - p = Parallelism - 8 Bits -> Max 256 (Bit 0-7) - l = Lock mode - 1 Bit 8 - h = Hold lock mode - 1 Bit 10 - c = Read Committed - 1 Bit 11 - k = Keyinfo - 1 Bit 12 - t = Tup scan - 1 Bit 13 - z = Descending (TUX) - 1 Bit 14 - x = Range Scan (TUX) - 1 Bit 15 - b = Scan batch - 10 Bit 16-25 (max 1023) - d = Distribution key flag - 1 Bit 26 - n = No disk flag - 1 Bit 9 - - 1111111111222222222233 - 01234567890123456789012345678901 - pppppppplnhcktzxbbbbbbbbbbd -*/ - -#define PARALLEL_SHIFT (0) -#define PARALLEL_MASK (255) - -#define LOCK_MODE_SHIFT (8) -#define LOCK_MODE_MASK (1) - -#define HOLD_LOCK_SHIFT (10) -#define HOLD_LOCK_MASK (1) - -#define KEYINFO_SHIFT (12) -#define KEYINFO_MASK (1) - -#define READ_COMMITTED_SHIFT (11) -#define READ_COMMITTED_MASK (1) - -#define RANGE_SCAN_SHIFT (15) -#define RANGE_SCAN_MASK (1) - -#define DESCENDING_SHIFT (14) -#define DESCENDING_MASK (1) - -#define TUP_SCAN_SHIFT (13) -#define TUP_SCAN_MASK (1) - -#define SCAN_BATCH_SHIFT (16) -#define SCAN_BATCH_MASK (1023) - -#define SCAN_DISTR_KEY_SHIFT (26) -#define SCAN_DISTR_KEY_MASK (1) - -#define SCAN_NODISK_SHIFT (9) -#define SCAN_NODISK_MASK (1) - -inline -Uint8 -ScanTabReq::getParallelism(const UintR & requestInfo){ - return (Uint8)((requestInfo >> PARALLEL_SHIFT) & PARALLEL_MASK); -} - -inline -Uint8 -ScanTabReq::getLockMode(const UintR & requestInfo){ - return (Uint8)((requestInfo >> LOCK_MODE_SHIFT) & LOCK_MODE_MASK); -} - -inline -Uint8 -ScanTabReq::getHoldLockFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> HOLD_LOCK_SHIFT) & HOLD_LOCK_MASK); -} - -inline -Uint8 -ScanTabReq::getReadCommittedFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> READ_COMMITTED_SHIFT) & READ_COMMITTED_MASK); -} - -inline -Uint8 -ScanTabReq::getRangeScanFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> RANGE_SCAN_SHIFT) & RANGE_SCAN_MASK); -} - -inline -Uint8 -ScanTabReq::getDescendingFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> DESCENDING_SHIFT) & DESCENDING_MASK); -} - -inline -Uint8 -ScanTabReq::getTupScanFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> TUP_SCAN_SHIFT) & TUP_SCAN_MASK); -} - -inline -Uint16 -ScanTabReq::getScanBatch(const Uint32 & requestInfo){ - return (Uint16)((requestInfo >> SCAN_BATCH_SHIFT) & SCAN_BATCH_MASK); -} - -inline -void -ScanTabReq::clearRequestInfo(UintR & requestInfo){ - requestInfo = 0; -} - -inline -void -ScanTabReq::setParallelism(UintR & requestInfo, Uint32 type){ - ASSERT_MAX(type, PARALLEL_MASK, "ScanTabReq::setParallelism"); - requestInfo= (requestInfo & ~(PARALLEL_MASK << PARALLEL_SHIFT)) | - ((type & PARALLEL_MASK) << PARALLEL_SHIFT); -} - -inline -void -ScanTabReq::setLockMode(UintR & requestInfo, Uint32 mode){ - ASSERT_MAX(mode, LOCK_MODE_MASK, "ScanTabReq::setLockMode"); - requestInfo= (requestInfo & ~(LOCK_MODE_MASK << LOCK_MODE_SHIFT)) | - ((mode & LOCK_MODE_MASK) << LOCK_MODE_SHIFT); -} - -inline -void -ScanTabReq::setHoldLockFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setHoldLockFlag"); - requestInfo= (requestInfo & ~(HOLD_LOCK_MASK << HOLD_LOCK_SHIFT)) | - ((flag & HOLD_LOCK_MASK) << HOLD_LOCK_SHIFT); -} - -inline -void -ScanTabReq::setReadCommittedFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setReadCommittedFlag"); - requestInfo= (requestInfo & ~(READ_COMMITTED_MASK << READ_COMMITTED_SHIFT)) | - ((flag & READ_COMMITTED_MASK) << READ_COMMITTED_SHIFT); -} - -inline -void -ScanTabReq::setRangeScanFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setRangeScanFlag"); - requestInfo= (requestInfo & ~(RANGE_SCAN_MASK << RANGE_SCAN_SHIFT)) | - ((flag & RANGE_SCAN_MASK) << RANGE_SCAN_SHIFT); -} - -inline -void -ScanTabReq::setDescendingFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setDescendingFlag"); - requestInfo= (requestInfo & ~(DESCENDING_MASK << DESCENDING_SHIFT)) | - ((flag & DESCENDING_MASK) << DESCENDING_SHIFT); -} - -inline -void -ScanTabReq::setTupScanFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setTupScanFlag"); - requestInfo= (requestInfo & ~(TUP_SCAN_MASK << TUP_SCAN_SHIFT)) | - ((flag & TUP_SCAN_MASK) << TUP_SCAN_SHIFT); -} - -inline -void -ScanTabReq::setScanBatch(Uint32 & requestInfo, Uint32 flag){ - ASSERT_MAX(flag, SCAN_BATCH_MASK, "ScanTabReq::setScanBatch"); - requestInfo= (requestInfo & ~(SCAN_BATCH_MASK << SCAN_BATCH_SHIFT)) | - ((flag & SCAN_BATCH_MASK) << SCAN_BATCH_SHIFT); -} - -inline -Uint8 -ScanTabReq::getKeyinfoFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> KEYINFO_SHIFT) & KEYINFO_MASK); -} - -inline -void -ScanTabReq::setKeyinfoFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag"); - requestInfo= (requestInfo & ~(KEYINFO_MASK << KEYINFO_SHIFT)) | - ((flag & KEYINFO_MASK) << KEYINFO_SHIFT); -} - -inline -Uint8 -ScanTabReq::getDistributionKeyFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> SCAN_DISTR_KEY_SHIFT) & SCAN_DISTR_KEY_MASK); -} - -inline -void -ScanTabReq::setDistributionKeyFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag"); - requestInfo= (requestInfo & ~(SCAN_DISTR_KEY_MASK << SCAN_DISTR_KEY_SHIFT)) | - ((flag & SCAN_DISTR_KEY_MASK) << SCAN_DISTR_KEY_SHIFT); -} - -inline -UintR -ScanTabReq::getNoDiskFlag(const UintR & requestInfo){ - return (requestInfo >> SCAN_NODISK_SHIFT) & SCAN_NODISK_MASK; -} - -inline -void -ScanTabReq::setNoDiskFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setNoDiskFlag"); - requestInfo= (requestInfo & ~(SCAN_NODISK_MASK << SCAN_NODISK_SHIFT)) | - ((flag & SCAN_NODISK_MASK) << SCAN_NODISK_SHIFT); -} - -/** - * - * SENDER: Dbtc - * RECIVER: API - */ -class ScanTabConf { - /** - * Reciver(s) - */ - friend class NdbTransaction; // Reciver - - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * For printing - */ - friend bool printSCANTABCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 4 ); - STATIC_CONST( EndOfData = (1 << 31) ); - -private: - - // Type definitions - - /** - * DATA VARIABLES - */ - UintR apiConnectPtr; // DATA 0 - UintR requestInfo; // DATA 1 - UintR transId1; // DATA 2 - UintR transId2; // DATA 3 - - struct OpData { - Uint32 apiPtrI; - Uint32 tcPtrI; - Uint32 info; - }; - - static Uint32 getLength(Uint32 opDataInfo) { return opDataInfo >> 10; }; - static Uint32 getRows(Uint32 opDataInfo) { return opDataInfo & 1023;} -}; - -/** - * request info - * - o = received operations - 7 Bits -> Max 255 (Bit 0-7) - s = status of scan - 2 Bits -> Max ??? (Bit 8-?) - - 1111111111222222222233 - 01234567890123456789012345678901 - ooooooooss -*/ - -#define OPERATIONS_SHIFT (0) -#define OPERATIONS_MASK (0xFF) - -#define STATUS_SHIFT (8) -#define STATUS_MASK (0xFF) - - -/** - * - * SENDER: Dbtc - * RECIVER: API - */ -class ScanTabRef { - /** - * Reciver(s) - */ - friend class NdbTransaction; // Reciver - - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * For printing - */ - friend bool printSCANTABREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 5 ); - -private: - - // Type definitions - - /** - * DATA VARIABLES - */ - UintR apiConnectPtr; // DATA 0 - UintR transId1; // DATA 1 - UintR transId2; // DATA 2 - UintR errorCode; // DATA 3 - UintR closeNeeded; // DATA 4 - -}; - -/** - * - * SENDER: API - * RECIVER: Dbtc - */ -class ScanNextReq { - /** - * Reciver(s) - */ - friend class Dbtc; // Reciver - - /** - * Sender(s) - */ - friend class NdbOperation; - - /** - * For printing - */ - friend bool printSCANNEXTREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 4 ); - -private: - - // Type definitions - - /** - * DATA VARIABLES - */ - UintR apiConnectPtr; // DATA 0 - UintR stopScan; // DATA 1 - UintR transId1; // DATA 2 - UintR transId2; // DATA 3 - - // stopScan = 1, stop this scan - -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp b/storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp deleted file mode 100644 index f9f3412f5bf..00000000000 --- a/storage/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SET_LOGLEVEL_ORD_HPP -#define SET_LOGLEVEL_ORD_HPP - -#include -#include "EventSubscribeReq.hpp" -#include "SignalData.hpp" - -/** - * - */ -class SetLogLevelOrd { - /** - * Sender(s) - */ - friend class MgmtSrvr; /* XXX can probably be removed */ - friend class MgmApiSession; - friend class CommandInterpreter; - - /** - * Reciver(s) - */ - friend class Cmvmi; - - friend class NodeLogLevel; - -private: - STATIC_CONST( SignalLength = 1 + LogLevel::LOGLEVEL_CATEGORIES ); - - Uint32 noOfEntries; - Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES]; - - void clear(); - - /** - * Note level is valid as 0-15 - */ - void setLogLevel(LogLevel::EventCategory ec, int level = 7); - - SetLogLevelOrd& operator= (const LogLevel& ll){ - noOfEntries = LogLevel::LOGLEVEL_CATEGORIES; - for(size_t i = 0; i(_variable); -} - - -inline void SetVarReq::value(UintR value) { - _value = value; -} - -inline UintR SetVarReq::value(void) const { - return _value; -} - - - -#endif // SETVARREQ_H - diff --git a/storage/ndb/include/kernel/signaldata/SignalData.hpp b/storage/ndb/include/kernel/signaldata/SignalData.hpp deleted file mode 100644 index 1c249d887ee..00000000000 --- a/storage/ndb/include/kernel/signaldata/SignalData.hpp +++ /dev/null @@ -1,227 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SIGNAL_DATA_H -#define SIGNAL_DATA_H - -#include -#include -#include -#include - -#define ASSERT_BOOL(flag, message) assert(flag<=1) -#define ASSERT_RANGE(value, min, max, message) \ - assert((value) >= (min) && (value) <= (max)) -#define ASSERT_MAX(value, max, message) assert((value) <= (max)) - -#define SECTION(x) STATIC_CONST(x) - -// defines for setter and getters on commonly used member data in signals - -#define GET_SET_SENDERDATA \ - Uint32 getSenderData() { return senderData; }; \ - void setSenderData(Uint32 _s) { senderData = _s; }; - -#define GET_SET_SENDERREF \ - Uint32 getSenderRef() { return senderRef; }; \ - void setSenderRef(Uint32 _s) { senderRef = _s; }; - -#define GET_SET_PREPAREID \ - Uint32 getPrepareId() { return prepareId; }; \ - void setPrepareId(Uint32 _s) { prepareId = _s; }; - -#define GET_SET_ERRORCODE \ - Uint32 getErrorCode() { return errorCode; }; \ - void setErrorCode(Uint32 _s) { errorCode = _s; }; - -#define GET_SET_TCERRORCODE \ - Uint32 getTCErrorCode() { return TCErrorCode; }; \ - void setTCErrorCode(Uint32 _s) { TCErrorCode = _s; }; - -#define GSN_PRINT_SIGNATURE(f) bool f(FILE *, const Uint32 *, Uint32, Uint16) - -GSN_PRINT_SIGNATURE(printTCKEYREQ); -GSN_PRINT_SIGNATURE(printTCKEYCONF); -GSN_PRINT_SIGNATURE(printTCKEYREF); -GSN_PRINT_SIGNATURE(printLQHKEYREQ); -GSN_PRINT_SIGNATURE(printLQHKEYCONF); -GSN_PRINT_SIGNATURE(printLQHKEYREF); -GSN_PRINT_SIGNATURE(printTUPKEYREQ); -GSN_PRINT_SIGNATURE(printTUPKEYCONF); -GSN_PRINT_SIGNATURE(printTUPKEYREF); -GSN_PRINT_SIGNATURE(printTUPCOMMITREQ); -GSN_PRINT_SIGNATURE(printCONTINUEB); -GSN_PRINT_SIGNATURE(printFSOPENREQ); -GSN_PRINT_SIGNATURE(printFSCLOSEREQ); -GSN_PRINT_SIGNATURE(printFSREADWRITEREQ); -GSN_PRINT_SIGNATURE(printFSREADWRITEREQ); -GSN_PRINT_SIGNATURE(printFSREF); -GSN_PRINT_SIGNATURE(printFSREF); -GSN_PRINT_SIGNATURE(printFSREF); -GSN_PRINT_SIGNATURE(printFSREF); -GSN_PRINT_SIGNATURE(printFSREF); -GSN_PRINT_SIGNATURE(printFSCONF); -GSN_PRINT_SIGNATURE(printFSCONF); -GSN_PRINT_SIGNATURE(printFSCONF); -GSN_PRINT_SIGNATURE(printFSCONF); -GSN_PRINT_SIGNATURE(printFSCONF); -GSN_PRINT_SIGNATURE(printCLOSECOMREQCONF); -GSN_PRINT_SIGNATURE(printCLOSECOMREQCONF); -GSN_PRINT_SIGNATURE(printPACKED_SIGNAL); -GSN_PRINT_SIGNATURE(printPREPFAILREQREF); -GSN_PRINT_SIGNATURE(printPREPFAILREQREF); -GSN_PRINT_SIGNATURE(printALTER_TABLE_REQ); -GSN_PRINT_SIGNATURE(printALTER_TABLE_CONF); -GSN_PRINT_SIGNATURE(printALTER_TABLE_REF); -GSN_PRINT_SIGNATURE(printALTER_TAB_REQ); -GSN_PRINT_SIGNATURE(printALTER_TAB_CONF); -GSN_PRINT_SIGNATURE(printALTER_TAB_REF); -GSN_PRINT_SIGNATURE(printCREATE_TRIG_REQ); -GSN_PRINT_SIGNATURE(printCREATE_TRIG_CONF); -GSN_PRINT_SIGNATURE(printCREATE_TRIG_REF); -GSN_PRINT_SIGNATURE(printALTER_TRIG_REQ); -GSN_PRINT_SIGNATURE(printALTER_TRIG_CONF); -GSN_PRINT_SIGNATURE(printALTER_TRIG_REF); -GSN_PRINT_SIGNATURE(printDROP_TRIG_REQ); -GSN_PRINT_SIGNATURE(printDROP_TRIG_CONF); -GSN_PRINT_SIGNATURE(printDROP_TRIG_REF); -GSN_PRINT_SIGNATURE(printFIRE_TRIG_ORD); -GSN_PRINT_SIGNATURE(printTRIG_ATTRINFO); -GSN_PRINT_SIGNATURE(printCREATE_INDX_REQ); -GSN_PRINT_SIGNATURE(printCREATE_INDX_CONF); -GSN_PRINT_SIGNATURE(printCREATE_INDX_REF); -GSN_PRINT_SIGNATURE(printDROP_INDX_REQ); -GSN_PRINT_SIGNATURE(printDROP_INDX_CONF); -GSN_PRINT_SIGNATURE(printDROP_INDX_REF); -GSN_PRINT_SIGNATURE(printALTER_INDX_REQ); -GSN_PRINT_SIGNATURE(printALTER_INDX_CONF); -GSN_PRINT_SIGNATURE(printALTER_INDX_REF); -GSN_PRINT_SIGNATURE(printTCINDXREQ); -GSN_PRINT_SIGNATURE(printTCINDXCONF); -GSN_PRINT_SIGNATURE(printTCINDXREF); -GSN_PRINT_SIGNATURE(printINDXKEYINFO); -GSN_PRINT_SIGNATURE(printINDXATTRINFO); -GSN_PRINT_SIGNATURE(printFSAPPENDREQ); -GSN_PRINT_SIGNATURE(printBACKUP_REQ); -GSN_PRINT_SIGNATURE(printBACKUP_DATA); -GSN_PRINT_SIGNATURE(printBACKUP_REF); -GSN_PRINT_SIGNATURE(printBACKUP_CONF); -GSN_PRINT_SIGNATURE(printABORT_BACKUP_ORD); -GSN_PRINT_SIGNATURE(printBACKUP_ABORT_REP); -GSN_PRINT_SIGNATURE(printBACKUP_COMPLETE_REP); -GSN_PRINT_SIGNATURE(printBACKUP_NF_COMPLETE_REP); -GSN_PRINT_SIGNATURE(printDEFINE_BACKUP_REQ); -GSN_PRINT_SIGNATURE(printDEFINE_BACKUP_REF); -GSN_PRINT_SIGNATURE(printDEFINE_BACKUP_CONF); -GSN_PRINT_SIGNATURE(printSTART_BACKUP_REQ); -GSN_PRINT_SIGNATURE(printSTART_BACKUP_REF); -GSN_PRINT_SIGNATURE(printSTART_BACKUP_CONF); -GSN_PRINT_SIGNATURE(printBACKUP_FRAGMENT_REQ); -GSN_PRINT_SIGNATURE(printBACKUP_FRAGMENT_REF); -GSN_PRINT_SIGNATURE(printBACKUP_FRAGMENT_CONF); -GSN_PRINT_SIGNATURE(printSTOP_BACKUP_REQ); -GSN_PRINT_SIGNATURE(printSTOP_BACKUP_REF); -GSN_PRINT_SIGNATURE(printSTOP_BACKUP_CONF); -GSN_PRINT_SIGNATURE(printBACKUP_STATUS_REQ); -GSN_PRINT_SIGNATURE(printBACKUP_STATUS_CONF); -GSN_PRINT_SIGNATURE(printUTIL_SEQUENCE_REQ); -GSN_PRINT_SIGNATURE(printUTIL_SEQUENCE_REF); -GSN_PRINT_SIGNATURE(printUTIL_SEQUENCE_CONF); -GSN_PRINT_SIGNATURE(printUTIL_PREPARE_REQ); -GSN_PRINT_SIGNATURE(printUTIL_PREPARE_REF); -GSN_PRINT_SIGNATURE(printUTIL_PREPARE_CONF); -GSN_PRINT_SIGNATURE(printUTIL_EXECUTE_REQ); -GSN_PRINT_SIGNATURE(printUTIL_EXECUTE_REF); -GSN_PRINT_SIGNATURE(printUTIL_EXECUTE_CONF); -GSN_PRINT_SIGNATURE(printSCANTABREQ); -GSN_PRINT_SIGNATURE(printSCANTABCONF); -GSN_PRINT_SIGNATURE(printSCANTABREF); -GSN_PRINT_SIGNATURE(printSCANNEXTREQ); -GSN_PRINT_SIGNATURE(printLQH_FRAG_REQ); -GSN_PRINT_SIGNATURE(printLQH_FRAG_REF); -GSN_PRINT_SIGNATURE(printLQH_FRAG_CONF); -GSN_PRINT_SIGNATURE(printPREP_DROP_TAB_REQ); -GSN_PRINT_SIGNATURE(printPREP_DROP_TAB_REF); -GSN_PRINT_SIGNATURE(printPREP_DROP_TAB_CONF); -GSN_PRINT_SIGNATURE(printDROP_TAB_REQ); -GSN_PRINT_SIGNATURE(printDROP_TAB_REF); -GSN_PRINT_SIGNATURE(printDROP_TAB_CONF); -GSN_PRINT_SIGNATURE(printLCP_FRAG_ORD); -GSN_PRINT_SIGNATURE(printLCP_FRAG_REP); -GSN_PRINT_SIGNATURE(printLCP_COMPLETE_REP); -GSN_PRINT_SIGNATURE(printSTART_LCP_REQ); -GSN_PRINT_SIGNATURE(printSTART_LCP_CONF); -GSN_PRINT_SIGNATURE(printMASTER_LCP_REQ); -GSN_PRINT_SIGNATURE(printMASTER_LCP_REF); -GSN_PRINT_SIGNATURE(printMASTER_LCP_CONF); -GSN_PRINT_SIGNATURE(printCOPY_GCI_REQ); -GSN_PRINT_SIGNATURE(printSYSTEM_ERROR); -GSN_PRINT_SIGNATURE(printSTART_REC_REQ); -GSN_PRINT_SIGNATURE(printSTART_REC_CONF); -GSN_PRINT_SIGNATURE(printNF_COMPLETE_REP); -GSN_PRINT_SIGNATURE(printSIGNAL_DROPPED_REP); -GSN_PRINT_SIGNATURE(printFAIL_REP); -GSN_PRINT_SIGNATURE(printDISCONNECT_REP); -GSN_PRINT_SIGNATURE(printSUB_CREATE_REQ); -GSN_PRINT_SIGNATURE(printSUB_CREATE_CONF); -GSN_PRINT_SIGNATURE(printSUB_CREATE_REF); -GSN_PRINT_SIGNATURE(printSUB_REMOVE_REQ); -GSN_PRINT_SIGNATURE(printSUB_REMOVE_CONF); -GSN_PRINT_SIGNATURE(printSUB_REMOVE_REF); -GSN_PRINT_SIGNATURE(printSUB_START_REQ); -GSN_PRINT_SIGNATURE(printSUB_START_REF); -GSN_PRINT_SIGNATURE(printSUB_START_CONF); -GSN_PRINT_SIGNATURE(printSUB_STOP_REQ); -GSN_PRINT_SIGNATURE(printSUB_STOP_REF); -GSN_PRINT_SIGNATURE(printSUB_STOP_CONF); -GSN_PRINT_SIGNATURE(printSUB_SYNC_REQ); -GSN_PRINT_SIGNATURE(printSUB_SYNC_REF); -GSN_PRINT_SIGNATURE(printSUB_SYNC_CONF); -GSN_PRINT_SIGNATURE(printSUB_META_DATA); -GSN_PRINT_SIGNATURE(printSUB_TABLE_DATA); -GSN_PRINT_SIGNATURE(printSUB_SYNC_CONTINUE_REQ); -GSN_PRINT_SIGNATURE(printSUB_SYNC_CONTINUE_REF); -GSN_PRINT_SIGNATURE(printSUB_SYNC_CONTINUE_CONF); -GSN_PRINT_SIGNATURE(printSUB_GCP_COMPLETE_REP); -GSN_PRINT_SIGNATURE(printCREATE_FRAGMENTATION_REQ); -GSN_PRINT_SIGNATURE(printCREATE_FRAGMENTATION_REF); -GSN_PRINT_SIGNATURE(printCREATE_FRAGMENTATION_CONF); -GSN_PRINT_SIGNATURE(printUTIL_CREATE_LOCK_REQ); -GSN_PRINT_SIGNATURE(printUTIL_CREATE_LOCK_REF); -GSN_PRINT_SIGNATURE(printUTIL_CREATE_LOCK_CONF); -GSN_PRINT_SIGNATURE(printUTIL_DESTROY_LOCK_REQ); -GSN_PRINT_SIGNATURE(printUTIL_DESTROY_LOCK_REF); -GSN_PRINT_SIGNATURE(printUTIL_DESTROY_LOCK_CONF); -GSN_PRINT_SIGNATURE(printUTIL_LOCK_REQ); -GSN_PRINT_SIGNATURE(printUTIL_LOCK_REF); -GSN_PRINT_SIGNATURE(printUTIL_LOCK_CONF); -GSN_PRINT_SIGNATURE(printUTIL_UNLOCK_REQ); -GSN_PRINT_SIGNATURE(printUTIL_UNLOCK_REF); -GSN_PRINT_SIGNATURE(printUTIL_UNLOCK_CONF); -GSN_PRINT_SIGNATURE(printCNTR_START_REQ); -GSN_PRINT_SIGNATURE(printCNTR_START_REF); -GSN_PRINT_SIGNATURE(printCNTR_START_CONF); -GSN_PRINT_SIGNATURE(printREAD_NODES_CONF); -GSN_PRINT_SIGNATURE(printTUX_MAINT_REQ); -GSN_PRINT_SIGNATURE(printACC_LOCKREQ); -GSN_PRINT_SIGNATURE(printLQH_TRANSCONF); -GSN_PRINT_SIGNATURE(printSCAN_FRAGREQ); - -GSN_PRINT_SIGNATURE(printCONTINUEB_NDBFS); -GSN_PRINT_SIGNATURE(printCONTINUEB_DBDIH); -GSN_PRINT_SIGNATURE(printSTART_FRAG_REQ); - -#endif diff --git a/storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp b/storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp deleted file mode 100644 index e6a3a195bef..00000000000 --- a/storage/ndb/include/kernel/signaldata/SignalDataPrint.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SIGNAL_DATA_PRINT_H -#define SIGNAL_DATA_PRINT_H - -#include -#include - -/** - * Typedef for a Signal Data Print Function - */ -typedef bool (* SignalDataPrintFunction)(FILE * output, const Uint32 * theData, Uint32 len, BlockNumber receiverBlockNo); - -struct NameFunctionPair { - GlobalSignalNumber gsn; - SignalDataPrintFunction function; -}; - -extern const NameFunctionPair SignalDataPrintFunctions[]; -extern const unsigned short NO_OF_PRINT_FUNCTIONS; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp b/storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp deleted file mode 100644 index c6e6a23bf44..00000000000 --- a/storage/ndb/include/kernel/signaldata/SignalDroppedRep.hpp +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SIGNAL_DROPPED_HPP -#define SIGNAL_DROPPED_HPP - -#include "SignalData.hpp" - -class SignalDroppedRep { - - /** - * Reciver(s) - */ - friend class SimulatedBlock; - - /** - * Sender (TransporterCallback.cpp) - */ - friend void execute(void * , struct SignalHeader* const, Uint8, - Uint32* const, struct LinearSectionPtr ptr[3]); - - friend bool printSIGNAL_DROPPED_REP(FILE *, const Uint32 *, Uint32, Uint16); -public: -private: - Uint32 originalGsn; - Uint32 originalLength; - Uint32 originalSectionCount; - Uint32 originalData[1]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/SrFragidConf.hpp b/storage/ndb/include/kernel/signaldata/SrFragidConf.hpp deleted file mode 100644 index ddb6f1d0dd9..00000000000 --- a/storage/ndb/include/kernel/signaldata/SrFragidConf.hpp +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SR_FRAGIDCONF_HPP -#define SR_FRAGIDCONF_HPP - -#include "SignalData.hpp" - -class SrFragidConf { - /** - * Sender(s) - */ - friend class Dbacc; - - /** - * Receiver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 10 ); - -private: - Uint32 lcpPtr; - Uint32 accPtr; - Uint32 noLocFrag; - Uint32 fragId[4]; - Uint32 fragPtr[2]; - Uint32 hashCheckBit; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/StartFragReq.hpp b/storage/ndb/include/kernel/signaldata/StartFragReq.hpp deleted file mode 100644 index 31c58ac6c05..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartFragReq.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_FRAGREQ_HPP -#define START_FRAGREQ_HPP - -#include "SignalData.hpp" - -class StartFragReq { - /** - * Sender(s) - */ - friend class Dbdih; - - /** - * Receiver(s) - */ - friend class Dblqh; -public: - STATIC_CONST( SignalLength = 19 ); - - friend bool printSTART_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); - - Uint32 userPtr; - Uint32 userRef; - Uint32 lcpNo; - Uint32 lcpId; - Uint32 tableId; - Uint32 fragId; - Uint32 noOfLogNodes; - Uint32 lqhLogNode[4]; - Uint32 startGci[4]; - Uint32 lastGci[4]; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/StartInfo.hpp b/storage/ndb/include/kernel/signaldata/StartInfo.hpp deleted file mode 100644 index 0e216314908..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartInfo.hpp +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_INFO_HPP -#define START_INFO_HPP - -/** - * This signal is sent from the master DIH to all DIHs - * when a node is starting. - * If the typeStart is initial node restart then the node - * has started without filesystem. - * All DIHs must then "forget" that the starting node has - * performed LCP's ever. - * - * @see StartPermReq - */ - -class StartInfoReq { - /** - * Sender/Receiver - */ - friend class Dbdih; - - Uint32 startingNodeId; - Uint32 typeStart; - Uint32 systemFailureNo; - -public: - STATIC_CONST( SignalLength = 3 ); -}; - -class StartInfoConf { - - /** - * Sender/Receiver - */ - friend class Dbdih; - - /** - * NodeId of sending node - * which is "done" - */ - Uint32 sendingNodeId; - Uint32 startingNodeId; - -public: - STATIC_CONST( SignalLength = 2 ); -}; - -class StartInfoRef { - - /** - * Sender/Receiver - */ - friend class Dbdih; - - /** - * NodeId of sending node - * The node was refused to start. This could be - * because there are still processes handling - * previous information from the starting node. - */ - Uint32 sendingNodeId; - Uint32 startingNodeId; - Uint32 errorCode; - -public: - STATIC_CONST( SignalLength = 3 ); -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/StartMe.hpp b/storage/ndb/include/kernel/signaldata/StartMe.hpp deleted file mode 100644 index 3b44a73ee17..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartMe.hpp +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_ME_HPP -#define START_ME_HPP - -/** - * This signal is sent... - * - * It also contains the Sysfile. - * Since the Sysfile can be larger than on StartMeConf signal, - * there might be more than on of these signals sent before - * the entire sysfile is transfered - * - */ -class StartMeReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - - Uint32 startingRef; - Uint32 startingVersion; -}; - -class StartMeConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 25 ); -private: - - Uint32 startingNodeId; - Uint32 startWord; - - /** - * No of free words to carry data - */ - STATIC_CONST( DATA_SIZE = 23 ); - - Uint32 data[DATA_SIZE]; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/StartOrd.hpp b/storage/ndb/include/kernel/signaldata/StartOrd.hpp deleted file mode 100644 index 98df6155b94..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartOrd.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_ORD_HPP -#define START_ORD_HPP - -#include "SignalData.hpp" -#include "StopReq.hpp" - -class StartOrd { -public: - /** - * Senders - */ - friend class ThreadConfig; - friend class MgmtSrvr; - friend class Ndbcntr; - - /** - * Receivers - */ - friend class SimBlockCMCtrBlck; - - /** - * RequestInfo - See StopReq for getters/setters - */ - Uint32 restartInfo; - -public: - STATIC_CONST( SignalLength = 1 ); -}; - - -#endif - diff --git a/storage/ndb/include/kernel/signaldata/StartPerm.hpp b/storage/ndb/include/kernel/signaldata/StartPerm.hpp deleted file mode 100644 index ca7fb430179..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartPerm.hpp +++ /dev/null @@ -1,75 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_PERM_REQ_HPP -#define START_PERM_REQ_HPP - -/** - * This signal is sent by starting DIH to master DIH - * - * Used when starting in an already started cluster - * - */ -class StartPermReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 3 ); -private: - - Uint32 blockRef; - Uint32 nodeId; - Uint32 startType; -}; - -class StartPermConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - - Uint32 startingNodeId; - Uint32 systemFailureNo; -}; - -class StartPermRef { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - - Uint32 startingNodeId; - Uint32 errorCode; - - enum ErrorCode - { - ZNODE_ALREADY_STARTING_ERROR = 305, - ZNODE_START_DISALLOWED_ERROR = 309, - InitialStartRequired = 320 - }; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/StartRec.hpp b/storage/ndb/include/kernel/signaldata/StartRec.hpp deleted file mode 100644 index a11ccbebc7f..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartRec.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_REC_HPP -#define START_REC_HPP - -#include "SignalData.hpp" - -class StartRecReq { - /** - * Sender(s) - */ - friend class Dbdih; - /** - * Receiver(s) - */ - friend class Dblqh; - - friend bool printSTART_REC_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); -private: - - Uint32 receivingNodeId; - Uint32 senderRef; - Uint32 keepGci; - Uint32 lastCompletedGci; - Uint32 newestGci; -}; - -class StartRecConf { - /** - * Sender(s) - */ - friend class Dblqh; - /** - * Receiver(s) - */ - friend class Dbdih; - - friend bool printSTART_REC_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 1 ); -private: - - Uint32 startingNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/StartTo.hpp b/storage/ndb/include/kernel/signaldata/StartTo.hpp deleted file mode 100644 index be1fe819fdf..00000000000 --- a/storage/ndb/include/kernel/signaldata/StartTo.hpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef START_TO_HPP -#define START_TO_HPP - -class StartToReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 5 ); -private: - Uint32 userPtr; - BlockReference userRef; - Uint32 startingNodeId; - Uint32 nodeTakenOver; - bool nodeRestart; -}; - -class StartToConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 3 ); -private: - - Uint32 userPtr; - Uint32 sendingNodeId; - Uint32 startingNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/StopMe.hpp b/storage/ndb/include/kernel/signaldata/StopMe.hpp deleted file mode 100644 index 0f27065620d..00000000000 --- a/storage/ndb/include/kernel/signaldata/StopMe.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef STOP_ME_HPP -#define STOP_ME_HPP - -/** - * This signal is sent by ndbcntr to local DIH - * - * If local DIH then sends it to all DIH's - * - * @see StopPermReq - * @see StartMeReq - * @see StartPermReq - */ -class StopMeReq { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Sender - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 2 ); -private: - - Uint32 senderRef; - Uint32 senderData; -}; - -class StopMeConf { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderRef; - Uint32 senderData; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/StopPerm.hpp b/storage/ndb/include/kernel/signaldata/StopPerm.hpp deleted file mode 100644 index d73792a4ae4..00000000000 --- a/storage/ndb/include/kernel/signaldata/StopPerm.hpp +++ /dev/null @@ -1,96 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef STOP_PERM_HPP -#define STOP_PERM_HPP - -/** - * This signal is sent by ndbcntr to local DIH - * - * If local DIH is not master, it forwards it to master DIH - * and start acting as a proxy - * - * @see StopMeReq - * @see StartMeReq - * @see StartPermReq - */ -class StopPermReq { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Sender - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 2 ); -public: - - Uint32 senderRef; - Uint32 senderData; -}; - -class StopPermConf { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 senderData; -}; - -class StopPermRef { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 2 ); - - enum ErrorCode { - StopOK = 0, - NodeStartInProgress = 1, - NodeShutdownInProgress = 2, - NF_CausedAbortOfStopProcedure = 3 - }; - -private: - Uint32 errorCode; - Uint32 senderData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/StopReq.hpp b/storage/ndb/include/kernel/signaldata/StopReq.hpp deleted file mode 100644 index 72c074c6903..00000000000 --- a/storage/ndb/include/kernel/signaldata/StopReq.hpp +++ /dev/null @@ -1,217 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef STOP_REQ_HPP -#define STOP_REQ_HPP - -#include "SignalData.hpp" - -class StopReq -{ - /** - * Reciver(s) - */ - friend class Ndbcntr; - - /** - * Sender - */ - friend class MgmtSrvr; - -public: - STATIC_CONST( SignalLength = 9 + NdbNodeBitmask::Size); - -public: - Uint32 senderRef; - Uint32 senderData; - - Uint32 requestInfo; - Uint32 singleuser; // Indicates whether or not to enter - // single user mode. - // Only in conjunction with system stop - Uint32 singleUserApi; // allowed api in singleuser - - Int32 apiTimeout; // Timeout before api transactions are refused - Int32 transactionTimeout; // Timeout before transactions are aborted - Int32 readOperationTimeout; // Timeout before read operations are aborted - Int32 operationTimeout; // Timeout before all operations are aborted - - Uint32 nodes[NdbNodeBitmask::Size]; - - static void setSystemStop(Uint32 & requestInfo, bool value); - static void setPerformRestart(Uint32 & requestInfo, bool value); - static void setNoStart(Uint32 & requestInfo, bool value); - static void setInitialStart(Uint32 & requestInfo, bool value); - /** - * Don't perform "graceful" shutdown/restart... - */ - static void setStopAbort(Uint32 & requestInfo, bool value); - static void setStopNodes(Uint32 & requestInfo, bool value); - - static bool getSystemStop(const Uint32 & requestInfo); - static bool getPerformRestart(const Uint32 & requestInfo); - static bool getNoStart(const Uint32 & requestInfo); - static bool getInitialStart(const Uint32 & requestInfo); - static bool getStopAbort(const Uint32 & requestInfo); - static bool getStopNodes(const Uint32 & requestInfo); -}; - -struct StopConf -{ - STATIC_CONST( SignalLength = 2 ); - Uint32 senderData; - union { - Uint32 nodeState; - Uint32 nodeId; - }; -}; - -class StopRef -{ - /** - * Reciver(s) - */ - friend class MgmtSrvr; - - /** - * Sender - */ - friend class Ndbcntr; - -public: - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - OK = 0, - NodeShutdownInProgress = 1, - SystemShutdownInProgress = 2, - NodeShutdownWouldCauseSystemCrash = 3, - TransactionAbortFailed = 4, - UnsupportedNodeShutdown = 5, - MultiNodeShutdownNotMaster = 6 - }; - -public: - Uint32 senderData; - Uint32 errorCode; - Uint32 masterNodeId; -}; - -inline -bool -StopReq::getSystemStop(const Uint32 & requestInfo) -{ - return requestInfo & 1; -} - -inline -bool -StopReq::getPerformRestart(const Uint32 & requestInfo) -{ - return requestInfo & 2; -} - -inline -bool -StopReq::getNoStart(const Uint32 & requestInfo) -{ - return requestInfo & 4; -} - -inline -bool -StopReq::getInitialStart(const Uint32 & requestInfo) -{ - return requestInfo & 8; -} - -inline -bool -StopReq::getStopAbort(const Uint32 & requestInfo) -{ - return requestInfo & 32; -} - -inline -bool -StopReq::getStopNodes(const Uint32 & requestInfo) -{ - return requestInfo & 64; -} - - -inline -void -StopReq::setSystemStop(Uint32 & requestInfo, bool value) -{ - if(value) - requestInfo |= 1; - else - requestInfo &= ~1; -} - -inline -void -StopReq::setPerformRestart(Uint32 & requestInfo, bool value) -{ - if(value) - requestInfo |= 2; - else - requestInfo &= ~2; -} - -inline -void -StopReq::setNoStart(Uint32 & requestInfo, bool value) -{ - if(value) - requestInfo |= 4; - else - requestInfo &= ~4; -} - -inline -void -StopReq::setInitialStart(Uint32 & requestInfo, bool value) -{ - if(value) - requestInfo |= 8; - else - requestInfo &= ~8; -} - -inline -void -StopReq::setStopAbort(Uint32 & requestInfo, bool value) -{ - if(value) - requestInfo |= 32; - else - requestInfo &= ~32; -} - -inline -void -StopReq::setStopNodes(Uint32 & requestInfo, bool value) -{ - if(value) - requestInfo |= 64; - else - requestInfo &= ~64; -} - -#endif - diff --git a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp deleted file mode 100644 index c9e614a36a8..00000000000 --- a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp +++ /dev/null @@ -1,548 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SUMA_IMPL_HPP -#define SUMA_IMPL_HPP - -#include "SignalData.hpp" -#include - - -struct SubCreateReq { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - friend struct SumaParticipant; - - friend bool printSUB_CREATE_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 6 ); - STATIC_CONST( SignalLength2 = 7 ); - - enum SubscriptionType { - SingleTableScan = 1, // - DatabaseSnapshot = 2, // All tables/all data (including new ones) - TableEvent = 3, // - SelectiveTableSnapshot = 4, // User defines tables - RemoveFlags = 0xff, - GetFlags = 0xff << 16, - AddTableFlag = 0x1 << 16, - RestartFlag = 0x2 << 16, - ReportAll = 0x4 << 16, - ReportSubscribe= 0x8 << 16 - }; - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 subscriptionType; - Uint32 tableId; - Uint32 state; -}; - -struct SubCreateRef { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - friend struct SumaParticipant; - - friend bool printSUB_CREATE_REF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 3 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; -}; - -struct SubCreateConf { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - friend struct SumaParticipant; - - friend bool printSUB_CREATE_CONF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderRef; - Uint32 senderData; -}; - -struct SubscriptionData { - enum Part { - MetaData = 1, - TableData = 2 - }; -}; - -struct SubStartReq { - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - - friend bool printSUB_START_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 6 ); - STATIC_CONST( SignalLength2 = SignalLength+1 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 part; // SubscriptionData::Part - Uint32 subscriberData; - Uint32 subscriberRef; -}; - -struct SubStartRef { - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - - friend bool printSUB_START_REF(FILE *, const Uint32 *, Uint32, Uint16); - enum ErrorCode { - Undefined = 1, - NF_FakeErrorREF = 11, - Busy = 701, - NotMaster = 702, - PartiallyConnected = 1421 - }; - - STATIC_CONST( SignalLength = 7 ); - STATIC_CONST( SignalLength2 = SignalLength+1 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 part; // SubscriptionData::Part - Uint32 subscriberData; - // do not change the order here! - Uint32 errorCode; - // with SignalLength2 - union { - Uint32 subscriberRef; - Uint32 m_masterNodeId; - }; -}; - -struct SubStartConf { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - - friend bool printSUB_START_CONF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 7 ); - STATIC_CONST( SignalLength2 = SignalLength+1 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 firstGCI; - Uint32 part; // SubscriptionData::Part - Uint32 subscriberData; - // with SignalLength2 - Uint32 subscriberRef; -}; - -struct SubStopReq { - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - - friend bool printSUB_STOP_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 7 ); - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 part; // SubscriptionData::Part - Uint32 subscriberData; - Uint32 subscriberRef; -}; - -struct SubStopRef { - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - - friend bool printSUB_STOP_REF(FILE *, const Uint32 *, Uint32, Uint16); - enum ErrorCode { - Undefined = 1, - NF_FakeErrorREF = 11, - Busy = 701, - NotMaster = 702 - }; - - STATIC_CONST( SignalLength = 8 ); - STATIC_CONST( SignalLength2 = SignalLength+1 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 part; // SubscriptionData::Part - Uint32 subscriberData; - Uint32 subscriberRef; - Uint32 errorCode; - // with SignalLength2 - Uint32 m_masterNodeId; -}; - -struct SubStopConf { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - - friend bool printSUB_STOP_CONF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 7 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 part; // SubscriptionData::Part - Uint32 subscriberData; - Uint32 subscriberRef; -}; - -struct SubSyncReq { - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - friend struct Grep; - - friend bool printSUB_SYNC_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 5 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 part; // SubscriptionData::Part - - SECTION( ATTRIBUTE_LIST = 0); // Used when doing SingelTableScan - SECTION( TABLE_LIST = 1 ); -}; - -struct SubSyncRef { - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - friend struct Grep; - - friend bool printSUB_SYNC_REF(FILE *, const Uint32 *, Uint32, Uint16); - enum ErrorCode { - Undefined = 1 - }; - STATIC_CONST( SignalLength = 3 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; -}; - -struct SubSyncConf { - - /** - * Sender(s)/Reciver(s) - */ - friend struct Suma; - friend struct Grep; - - friend bool printSUB_SYNC_CONF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderRef; - Uint32 senderData; -}; - -struct SubTableData { - /** - * Sender(s)/Reciver(s) - */ - friend struct SumaParticipant; - friend struct Grep; - - friend bool printSUB_TABLE_DATA(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 7 ); - SECTION( DICT_TAB_INFO = 0 ); - SECTION( ATTR_INFO = 0 ); - SECTION( AFTER_VALUES = 1 ); - SECTION( BEFORE_VALUES = 2 ); - - enum LogType { - SCAN = 1, - LOG = 2, - REMOVE_FLAGS = 0xff - }; - - Uint32 senderData; - Uint32 gci; - Uint32 tableId; - Uint32 requestInfo; - Uint32 logType; - union { - Uint32 changeMask; - Uint32 anyValue; - }; - Uint32 totalLen; - - static void setOperation(Uint32& ri, Uint32 val) { - ri = (ri & 0xFFFFFF00) | val; - } - static void setReqNodeId(Uint32& ri, Uint32 val) { - ri = (ri & 0xFFFF00FF) | (val << 8); - } - static void setNdbdNodeId(Uint32& ri, Uint32 val) { - ri = (ri & 0xFF00FFFF) | (val << 16); - } - - static Uint32 getOperation(const Uint32 & ri){ - return (ri & 0xFF); - } - - static Uint32 getReqNodeId(const Uint32 & ri){ - return (ri >> 8) & 0xFF; - } - - static Uint32 getNdbdNodeId(const Uint32 & ri){ - return (ri >> 16) & 0xFF; - } -}; - -struct SubSyncContinueReq { - /** - * Sender(s)/Reciver(s) - */ - friend struct SumaParticipant; - friend struct Grep; - friend struct Trix; - - friend bool printSUB_SYNC_CONTINUE_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 2 ); - - Uint32 subscriberData; - Uint32 noOfRowsSent; -}; - -struct SubSyncContinueRef { - /** - * Sender(s)/Reciver(s) - */ - friend struct SumaParticipant; - friend struct Grep; - friend struct Trix; - - friend bool printSUB_SYNC_CONTINUE_REF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 2 ); - - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - -struct SubSyncContinueConf { - /** - * Sender(s)/Reciver(s) - */ - friend struct SumaParticipant; - friend struct Grep; - friend struct Trix; - - friend bool printSUB_SYNC_CONTINUE_CONF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 2 ); - - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - -struct SubGcpCompleteRep { - - /** - * Sender(s)/Reciver(s) - */ - friend struct Dbdih; - friend struct SumaParticipant; - friend struct Grep; - friend struct Trix; - - friend bool printSUB_GCP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 3 ); - - Uint32 gci; - Uint32 senderRef; - Uint32 gcp_complete_rep_count; -}; - -struct SubGcpCompleteAck { - /** - * Sender(s)/Reciver(s) - */ - STATIC_CONST( SignalLength = SubGcpCompleteRep::SignalLength ); - - SubGcpCompleteRep rep; -}; - -struct SubRemoveReq { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - friend struct SumaParticipant; - - friend bool printSUB_REMOVE_REQ(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - -struct SubRemoveRef { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - friend struct SumaParticipant; - - friend bool printSUB_REMOVE_REF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 5 ); - enum ErrorCode { - Undefined = 1, - NF_FakeErrorREF = 11, - Busy = 701 - }; - - Uint32 senderRef; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 errorCode; - Uint32 senderData; -}; - -struct SubRemoveConf { - /** - * Sender(s)/Reciver(s) - */ - friend struct Grep; - friend struct SumaParticipant; - - friend bool printSUB_REMOVE_CONF(FILE *, const Uint32 *, Uint32, Uint16); - STATIC_CONST( SignalLength = 5 ); - - Uint32 senderRef; - Uint32 subscriptionId; - Uint32 subscriptionKey; - Uint32 errorCode; - Uint32 senderData; -}; - - -struct CreateSubscriptionIdReq { - friend struct Grep; - friend struct SumaParticipant; - - friend bool printCREATE_SUBSCRIPTION_ID_REQ(FILE *, const Uint32 *, - Uint32, Uint16); - STATIC_CONST( SignalLength = 2 ); - - Uint32 senderRef; - Uint32 senderData; -}; - - -struct CreateSubscriptionIdConf { - friend struct Grep; - friend struct SumaParticipant; - - friend bool printCREATE_SUBSCRIPTION_ID_CONF(FILE *, const Uint32 *, - Uint32, Uint16); - STATIC_CONST( SignalLength = 4 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 subscriptionId; - Uint32 subscriptionKey; -}; - - -struct CreateSubscriptionIdRef { - friend struct Grep; - friend struct SumaParticipant; - - friend bool printCREATE_SUBSCRIPTION_ID_REF(FILE *, const Uint32 *, - Uint32, Uint16); - STATIC_CONST( SignalLength = 3 ); - - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; -}; - -struct SumaStartMeReq { - STATIC_CONST( SignalLength = 1 ); - Uint32 unused; -}; - -struct SumaStartMeRef { - STATIC_CONST( SignalLength = 1 ); - Uint32 errorCode; - enum { - Busy = 0x1 - }; -}; - -struct SumaStartMeConf { - STATIC_CONST( SignalLength = 1 ); - Uint32 unused; -}; - -struct SumaHandoverReq { - STATIC_CONST( SignalLength = 3 ); - Uint32 gci; - Uint32 nodeId; - Uint32 theBucketMask[1]; -}; - -struct SumaHandoverConf { - STATIC_CONST( SignalLength = 3 ); - Uint32 gci; - Uint32 nodeId; - Uint32 theBucketMask[1]; -}; - -struct SumaContinueB -{ - enum - { - RESEND_BUCKET = 1 - ,RELEASE_GCI = 2 - ,OUT_OF_BUFFER_RELEASE = 3 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/SystemError.hpp b/storage/ndb/include/kernel/signaldata/SystemError.hpp deleted file mode 100644 index 79d73197375..00000000000 --- a/storage/ndb/include/kernel/signaldata/SystemError.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SYSTEM_ERROR_HPP -#define SYSTEM_ERROR_HPP - -#include "SignalData.hpp" - -class SystemError { - - /** - * Reciver(s) - */ - friend class Ndbcntr; - - /** - * Sender - */ - friend class Dbtc; - friend class Dbdih; - - /** - * For printing - */ - friend bool printSYSTEM_ERROR(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - GCPStopDetected = 3, - CopyFragRefError = 5, - TestStopOnError = 6, - CopySubscriptionRef = 7, - CopySubscriberRef = 8, - StartFragRefError = 9 - }; - - Uint32 errorRef; - Uint32 errorCode; - Uint32 data1; - Uint32 data2; -}; - -#endif - diff --git a/storage/ndb/include/kernel/signaldata/TamperOrd.hpp b/storage/ndb/include/kernel/signaldata/TamperOrd.hpp deleted file mode 100644 index 82b2abd9c45..00000000000 --- a/storage/ndb/include/kernel/signaldata/TamperOrd.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TAMPERORD_H -#define TAMPERORD_H - -#include "SignalData.hpp" - -class TamperOrd { - /** - * Sender - */ - friend class MgmtSrvr; - - /** - * Receiver - */ - friend class Cmvmi; - -private: - STATIC_CONST( SignalLength = 1 ); - - UintR errorNo; -}; - -#endif // TAMPERORD_H - diff --git a/storage/ndb/include/kernel/signaldata/TcCommit.hpp b/storage/ndb/include/kernel/signaldata/TcCommit.hpp deleted file mode 100644 index 5eaaff376ba..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcCommit.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TCCOMMITCONF_HPP -#define TCCOMMITCONF_HPP - -#include "SignalData.hpp" - -/** - * This is signal is sent from TC to API - * It means that the transaction was committed - */ -class TcCommitConf { - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * Reciver(s) - */ - friend class Ndb; - friend class NdbTransaction; - -public: - STATIC_CONST( SignalLength = 4 ); -private: - - /** - * apiConnectPtr - * - * Bit 0 (lowest) is used as indicator - * if == 1 then tc expects a commit ack - */ - Uint32 apiConnectPtr; - - Uint32 transId1; - Uint32 transId2; - Uint32 gci; -}; - -class TcCommitRef { - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * Reciver(s) - */ - friend class NdbTransaction; - -public: - STATIC_CONST( SignalLength = 4 ); -private: - - Uint32 apiConnectPtr; - Uint32 transId1; - Uint32 transId2; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcContinueB.hpp b/storage/ndb/include/kernel/signaldata/TcContinueB.hpp deleted file mode 100644 index 3feec993652..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcContinueB.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TC_CONTINUEB_H -#define TC_CONTINUEB_H - -#include "SignalData.hpp" - -class TcContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Dbtc; -private: - enum { - ZRETURN_FROM_QUEUED_DELIVERY = 1, - ZCOMPLETE_TRANS_AT_TAKE_OVER = 2, - ZCONTINUE_TIME_OUT_CONTROL = 3, - ZNODE_TAKE_OVER_COMPLETED = 4, - ZINITIALISE_RECORDS = 5, - ZSEND_COMMIT_LOOP = 6, - ZSEND_COMPLETE_LOOP = 7, - ZHANDLE_FAILED_API_NODE = 8, - ZTRANS_EVENT_REP = 9, - ZABORT_BREAK = 10, - ZABORT_TIMEOUT_BREAK = 11, - ZCONTINUE_TIME_OUT_FRAG_CONTROL = 12, - ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS = 13, - ZWAIT_ABORT_ALL = 14, - ZCHECK_SCAN_ACTIVE_FAILED_LQH = 15, - CHECK_WAIT_DROP_TAB_FAILED_LQH = 16, - TRIGGER_PENDING = 17, - - DelayTCKEYCONF = 18, - ZNF_CHECK_TRANSACTIONS = 19 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcHbRep.hpp b/storage/ndb/include/kernel/signaldata/TcHbRep.hpp deleted file mode 100644 index 7dfdcb78da8..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcHbRep.hpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TC_HB_REP_H -#define TC_HB_REP_H - -#include "SignalData.hpp" - -/** - * @class TcHbRep - * @brief Order tc refresh(exetend) the timeout counters for this - * transaction - * - * - SENDER: API - * - RECEIVER: TC - */ -class TcHbRep { - /** - * Receiver(s) - */ - friend class Dbtc; // Receiver - - /** - * Sender(s) - */ - friend class NdbTransaction; - - /** - * For printing - */ - friend bool printTC_HBREP(FILE *, const Uint32 *, Uint32, Uint16); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 3 ); - -private: - - /** - * DATA VARIABLES - */ - - Uint32 apiConnectPtr; // DATA 0 - UintR transId1; // DATA 1 - UintR transId2; // DATA 2 -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcIndx.hpp b/storage/ndb/include/kernel/signaldata/TcIndx.hpp deleted file mode 100644 index 1e9448716b1..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcIndx.hpp +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TC_INDX_H -#define TC_INDX_H - -#include "SignalData.hpp" -#include "TcKeyReq.hpp" - -class TcIndxConf { - - /** - * Reciver(s) - */ - friend class Ndb; - friend class NdbTransaction; - - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * For printing - */ - friend bool printTCINDXCONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - /** - * Length of signal - */ - STATIC_CONST( SignalLength = 5 ); - -private: - /** - * DATA VARIABLES - */ - //------------------------------------------------------------- - // Unconditional part. First 5 words - //------------------------------------------------------------- - - Uint32 apiConnectPtr; - Uint32 gci; - Uint32 confInfo; - Uint32 transId1; - Uint32 transId2; - - struct OperationConf { - Uint32 apiOperationPtr; - Uint32 attrInfoLen; - }; - //------------------------------------------------------------- - // Operations confirmations, - // No of actually sent = getNoOfOperations(confInfo) - //------------------------------------------------------------- - OperationConf operations[10]; - - /** - * Get:ers for confInfo - */ - static Uint32 getNoOfOperations(const Uint32 & confInfo); - static Uint32 getCommitFlag(const Uint32 & confInfo); - static bool getMarkerFlag(const Uint32 & confInfo); - - /** - * Set:ers for confInfo - */ - static void setCommitFlag(Uint32 & confInfo, Uint8 flag); - static void setNoOfOperations(Uint32 & confInfo, Uint32 noOfOps); - static void setMarkerFlag(Uint32 & confInfo, Uint32 flag); -}; - -inline -Uint32 -TcIndxConf::getNoOfOperations(const Uint32 & confInfo){ - return confInfo & 65535; -} - -inline -Uint32 -TcIndxConf::getCommitFlag(const Uint32 & confInfo){ - return ((confInfo >> 16) & 1); -} - -inline -bool -TcIndxConf::getMarkerFlag(const Uint32 & confInfo){ - const Uint32 bits = 3 << 16; // Marker only valid when doing commit - return (confInfo & bits) == bits; -} - -inline -void -TcIndxConf::setNoOfOperations(Uint32 & confInfo, Uint32 noOfOps){ - ASSERT_MAX(noOfOps, 65535, "TcIndxConf::setNoOfOperations"); - confInfo |= noOfOps; -} - -inline -void -TcIndxConf::setCommitFlag(Uint32 & confInfo, Uint8 flag){ - ASSERT_BOOL(flag, "TcIndxConf::setCommitFlag"); - confInfo |= (flag << 16); -} - -inline -void -TcIndxConf::setMarkerFlag(Uint32 & confInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcIndxConf::setMarkerFlag"); - confInfo |= (flag << 17); -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp deleted file mode 100644 index 1ae6f9fbfaa..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp +++ /dev/null @@ -1,132 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TC_KEY_CONF_H -#define TC_KEY_CONF_H - -#include "SignalData.hpp" - -/** - * - */ -class TcKeyConf { - /** - * Reciver(s) - */ - friend class Ndb; - friend class NdbTransaction; - friend class Ndbcntr; - friend class DbUtil; - - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * For printing - */ - friend bool printTCKEYCONF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - /** - * Length of signal - */ - STATIC_CONST( StaticLength = 5 ); - STATIC_CONST( OperationLength = 2 ); - STATIC_CONST( DirtyReadBit = (((Uint32)1) << 31) ); - -private: - - /** - * DATA VARIABLES - */ - //------------------------------------------------------------- - // Unconditional part. First 5 words - //------------------------------------------------------------- - - Uint32 apiConnectPtr; - Uint32 gci; - Uint32 confInfo; - Uint32 transId1; - Uint32 transId2; - - struct OperationConf { - Uint32 apiOperationPtr; - Uint32 attrInfoLen; - }; - //------------------------------------------------------------- - // Operations confirmations, - // No of actually sent = getNoOfOperations(confInfo) - //------------------------------------------------------------- - OperationConf operations[10]; - - /** - * Get:ers for confInfo - */ - static Uint32 getNoOfOperations(const Uint32 & confInfo); - static Uint32 getCommitFlag(const Uint32 & confInfo); - static bool getMarkerFlag(const Uint32 & confInfo); - - /** - * Set:ers for confInfo - */ - static void setCommitFlag(Uint32 & confInfo, Uint8 flag); - static void setNoOfOperations(Uint32 & confInfo, Uint32 noOfOps); - static void setMarkerFlag(Uint32 & confInfo, Uint32 flag); -}; - -inline -Uint32 -TcKeyConf::getNoOfOperations(const Uint32 & confInfo){ - return confInfo & 65535; -} - -inline -Uint32 -TcKeyConf::getCommitFlag(const Uint32 & confInfo){ - return ((confInfo >> 16) & 1); -} - -inline -bool -TcKeyConf::getMarkerFlag(const Uint32 & confInfo){ - const Uint32 bits = 3 << 16; // Marker only valid when doing commit - return (confInfo & bits) == bits; -} - -inline -void -TcKeyConf::setNoOfOperations(Uint32 & confInfo, Uint32 noOfOps){ - ASSERT_MAX(noOfOps, 65535, "TcKeyConf::setNoOfOperations"); - confInfo = (confInfo & 0xFFFF0000) | noOfOps; -} - -inline -void -TcKeyConf::setCommitFlag(Uint32 & confInfo, Uint8 flag){ - ASSERT_BOOL(flag, "TcKeyConf::setCommitFlag"); - confInfo |= (flag << 16); -} - -inline -void -TcKeyConf::setMarkerFlag(Uint32 & confInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyConf::setMarkerFlag"); - confInfo |= (flag << 17); -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp b/storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp deleted file mode 100644 index b9c74ea3cb9..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcKeyFailConf.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TCKEYFAILCONF_HPP -#define TCKEYFAILCONF_HPP - -#include - -/** - * This is signal is sent from "Take-Over" TC after a node crash - * It means that the transaction was committed - */ -class TcKeyFailConf { - /** - * Sender(s) - */ - friend class Dbtc; - - /** - * Reciver(s) - */ - friend class Ndb; - friend class NdbTransaction; - -public: - STATIC_CONST( SignalLength = 3 ); -private: - - /** - * apiConnectPtr - * - * Bit 0 (lowest) is used as indicator - * if == 1 then tc expects a commit ack - */ - Uint32 apiConnectPtr; - Uint32 transId1; - Uint32 transId2; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp b/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp deleted file mode 100644 index f709708f709..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003, 2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TCKEYREF_HPP -#define TCKEYREF_HPP - -#include "SignalData.hpp" - -class TcKeyRef { - - /** - * Receiver(s) - */ - friend class NdbOperation; - friend class Ndbcntr; - friend class DbUtil; - - /** - * Sender(s) / Receiver(s) - */ - friend class Dbtc; - - /** - * Sender(s) - */ - friend class Dblqh; - - friend bool printTCKEYREF(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 errorCode; - Uint32 errorData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp b/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp deleted file mode 100644 index 65b6c301150..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcKeyReq.hpp +++ /dev/null @@ -1,530 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TC_KEY_REQ_H -#define TC_KEY_REQ_H - -#include "SignalData.hpp" - -/** - * @class TcKeyReq - * @brief Contains KeyInfo and AttrInfo and is commonly followed by more signals - * - * - SENDER: API, NDBCNTR - * - RECEIVER: TC - */ -class TcKeyReq { - /** - * Receiver(s) - */ - friend class Dbtc; // Receiver - - /** - * Sender(s) - */ - friend class Ndbcntr; - friend class NdbOperation; - friend class NdbIndexOperation; - friend class NdbScanOperation; - friend class NdbBlob; - friend class DbUtil; - - /** - * For printing - */ - friend bool printTCKEYREQ(FILE *, const Uint32 *, Uint32, Uint16); - friend bool printTCINDXREQ(FILE *, const Uint32 *, Uint32, Uint16); - -public: - /** - * Length of signal - */ - STATIC_CONST( StaticLength = 8 ); - STATIC_CONST( SignalLength = 25 ); - STATIC_CONST( MaxKeyInfo = 8 ); - STATIC_CONST( MaxAttrInfo = 5 ); - STATIC_CONST( MaxTotalAttrInfo = 0xFFFF ); - -private: - - enum AbortOption { - CommitIfFailFree = 0, AbortOnError = 0, - CommitAsMuchAsPossible = 2, IgnoreError = 2 - }; - - typedef AbortOption CommitType; - - /** - * DATA VARIABLES - */ - - // ---------------------------------------------------------------------- - // Unconditional part = must be present in signal. First 8 words - // ---------------------------------------------------------------------- - Uint32 apiConnectPtr; // DATA 0 - union { - Uint32 senderData; - UintR apiOperationPtr; // DATA 1 - }; - /** - * ATTRIBUTE INFO (attrinfo) LENGTH - * This is the total length of all attribute info that is sent from - * the application as part of this operation. - * It includes all attribute info sent in possible attrinfo - * signals as well as the attribute info sent in TCKEYREQ. - */ - UintR attrLen; // DATA 2 (also stores API Version) - UintR tableId; // DATA 3 - UintR requestInfo; // DATA 4 Various transaction flags - UintR tableSchemaVersion; // DATA 5 - UintR transId1; // DATA 6 - UintR transId2; // DATA 7 - - // ---------------------------------------------------------------------- - // Conditional part = can be present in signal. - // These four words will be sent only if their indicator is set. - // ---------------------------------------------------------------------- - UintR scanInfo; // DATA 8 Various flags for scans - UintR distrGroupHashValue; // DATA 9 - UintR distributionKeySize; // DATA 10 - UintR storedProcId; // DATA 11 - - // ---------------------------------------------------------------------- - // Variable sized KEY and ATTRINFO part. - // These will be placed to pack the signal in an appropriate manner. - // ---------------------------------------------------------------------- - UintR keyInfo[MaxKeyInfo]; // DATA 12 - 19 - UintR attrInfo[MaxAttrInfo]; // DATA 20 - 24 - - /** - * Get:ers for attrLen - */ - - static Uint16 getAPIVersion(const UintR & attrLen); - static Uint16 getAttrinfoLen(const UintR & attrLen); - static void setAPIVersion(UintR & attrLen, Uint16 apiVersion); - static void setAttrinfoLen(UintR & attrLen, Uint16 aiLen); - - - /** - * Get:ers for requestInfo - */ - static Uint8 getCommitFlag(const UintR & requestInfo); - static Uint8 getAbortOption(const UintR & requestInfo); - static Uint8 getStartFlag(const UintR & requestInfo); - static Uint8 getSimpleFlag(const UintR & requestInfo); - static Uint8 getDirtyFlag(const UintR & requestInfo); - static Uint8 getInterpretedFlag(const UintR & requestInfo); - static Uint8 getDistributionKeyFlag(const UintR & requestInfo); - static Uint8 getScanIndFlag(const UintR & requestInfo); - static Uint8 getOperationType(const UintR & requestInfo); - static Uint8 getExecuteFlag(const UintR & requestInfo); - - static Uint16 getKeyLength(const UintR & requestInfo); - static Uint8 getAIInTcKeyReq(const UintR & requestInfo); - static Uint8 getExecutingTrigger(const UintR & requestInfo); - static UintR getNoDiskFlag(const UintR & requestInfo); - - /** - * Get:ers for scanInfo - */ - static Uint8 getTakeOverScanFlag(const UintR & scanInfo); - static Uint16 getTakeOverScanFragment(const UintR & scanInfo); - static Uint32 getTakeOverScanInfo(const UintR & scanInfo); - - - /** - * Set:ers for requestInfo - */ - static void clearRequestInfo(UintR & requestInfo); - static void setAbortOption(UintR & requestInfo, Uint32 type); - static void setCommitFlag(UintR & requestInfo, Uint32 flag); - static void setStartFlag(UintR & requestInfo, Uint32 flag); - static void setSimpleFlag(UintR & requestInfo, Uint32 flag); - static void setDirtyFlag(UintR & requestInfo, Uint32 flag); - static void setInterpretedFlag(UintR & requestInfo, Uint32 flag); - static void setDistributionKeyFlag(UintR & requestInfo, Uint32 flag); - static void setScanIndFlag(UintR & requestInfo, Uint32 flag); - static void setExecuteFlag(UintR & requestInfo, Uint32 flag); - static void setOperationType(UintR & requestInfo, Uint32 type); - - static void setKeyLength(UintR & requestInfo, Uint32 len); - static void setAIInTcKeyReq(UintR & requestInfo, Uint32 len); - static void setExecutingTrigger(UintR & requestInfo, Uint32 flag); - static void setNoDiskFlag(UintR & requestInfo, UintR val); - - /** - * Set:ers for scanInfo - */ - static void setTakeOverScanFlag(UintR & scanInfo, Uint8 flag); - static void setTakeOverScanFragment(UintR & scanInfo, Uint16 fragment); - static void setTakeOverScanInfo(UintR & scanInfo, Uint32 aScanInfo); -}; - -/** - * Request Info - * - a = Attr Info in TCKEYREQ - 3 Bits -> Max 7 (Bit 16-18) - b = Distribution Key Ind - 1 Bit 2 - c = Commit Indicator - 1 Bit 4 - d = Dirty Indicator - 1 Bit 0 - e = Scan Indicator - 1 Bit 14 - f = Execute fired trigger - 1 Bit 19 - i = Interpreted Indicator - 1 Bit 15 - k = Key length - 12 Bits -> Max 4095 (Bit 20 - 31) - o = Operation Type - 3 Bits -> Max 7 (Bit 5-7) - l = Execute - 1 Bit 10 - p = Simple Indicator - 1 Bit 8 - s = Start Indicator - 1 Bit 11 - y = Commit Type - 2 Bit 12-13 - n = No disk flag - 1 Bit 1 - - 1111111111222222222233 - 01234567890123456789012345678901 - dnb cooop lsyyeiaaafkkkkkkkkkkkk -*/ - -#define TCKEY_NODISK_SHIFT (1) -#define COMMIT_SHIFT (4) -#define START_SHIFT (11) -#define SIMPLE_SHIFT (8) -#define DIRTY_SHIFT (0) -#define EXECUTE_SHIFT (10) -#define INTERPRETED_SHIFT (15) -#define DISTR_KEY_SHIFT (2) -#define SCAN_SHIFT (14) - -#define OPERATION_SHIFT (5) -#define OPERATION_MASK (7) - -#define AINFO_SHIFT (16) -#define AINFO_MASK (7) - -#define KEY_LEN_SHIFT (20) -#define KEY_LEN_MASK (4095) - -#define COMMIT_TYPE_SHIFT (12) -#define COMMIT_TYPE_MASK (3) - -#define EXECUTING_TRIGGER_SHIFT (19) - -/** - * Scan Info - * - t = Scan take over indicator - 1 Bit - n = Take over node - 12 Bits -> max 65535 - p = Scan Info - 18 Bits -> max 4095 - - 1111111111222222222233 - 01234567890123456789012345678901 - tpppppppppppppppppp nnnnnnnnnnnn -*/ - -#define TAKE_OVER_SHIFT (0) - -#define TAKE_OVER_FRAG_SHIFT (20) -#define TAKE_OVER_FRAG_MASK (4095) - -#define SCAN_INFO_SHIFT (1) -#define SCAN_INFO_MASK (262143) - -/** - * Attr Len - * - n = Attrinfo length(words) - 16 Bits -> max 65535 - a = API version no - 16 Bits -> max 65535 - - 1111111111222222222233 - 01234567890123456789012345678901 - aaaaaaaaaaaaaaaannnnnnnnnnnnnnnn -*/ - -#define API_VER_NO_SHIFT (16) -#define API_VER_NO_MASK (65535) - -#define ATTRLEN_SHIFT (0) -#define ATTRLEN_MASK (65535) - -inline -Uint8 -TcKeyReq::getCommitFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> COMMIT_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getAbortOption(const UintR & requestInfo){ - return (Uint8)((requestInfo >> COMMIT_TYPE_SHIFT) & COMMIT_TYPE_MASK); -} - -inline -Uint8 -TcKeyReq::getStartFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> START_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getSimpleFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> SIMPLE_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getExecuteFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> EXECUTE_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getDirtyFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> DIRTY_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getInterpretedFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> INTERPRETED_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getDistributionKeyFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> DISTR_KEY_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getScanIndFlag(const UintR & requestInfo){ - return (Uint8)((requestInfo >> SCAN_SHIFT) & 1); -} - -inline -Uint8 -TcKeyReq::getOperationType(const UintR & requestInfo){ - return (Uint8)((requestInfo >> OPERATION_SHIFT) & OPERATION_MASK); -} - -inline -Uint16 -TcKeyReq::getKeyLength(const UintR & requestInfo){ - return (Uint16)((requestInfo >> KEY_LEN_SHIFT) & KEY_LEN_MASK); -} - -inline -Uint8 -TcKeyReq::getAIInTcKeyReq(const UintR & requestInfo){ - return (Uint8)((requestInfo >> AINFO_SHIFT) & AINFO_MASK); -} - -inline -Uint8 -TcKeyReq::getExecutingTrigger(const UintR & requestInfo){ - return (Uint8)((requestInfo >> EXECUTING_TRIGGER_SHIFT) & 1); -} - -inline -void -TcKeyReq::clearRequestInfo(UintR & requestInfo){ - requestInfo = 0; -} - -inline -void -TcKeyReq::setAbortOption(UintR & requestInfo, Uint32 type){ - ASSERT_MAX(type, COMMIT_TYPE_MASK, "TcKeyReq::setAbortOption"); - requestInfo &= ~(COMMIT_TYPE_MASK << COMMIT_TYPE_SHIFT); - requestInfo |= (type << COMMIT_TYPE_SHIFT); -} - -inline -void -TcKeyReq::setCommitFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setCommitFlag"); - requestInfo &= ~(1 << COMMIT_SHIFT); - requestInfo |= (flag << COMMIT_SHIFT); -} - -inline -void -TcKeyReq::setStartFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setStartFlag"); - requestInfo &= ~(1 << START_SHIFT); - requestInfo |= (flag << START_SHIFT); -} - -inline -void -TcKeyReq::setSimpleFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setSimpleFlag"); - requestInfo &= ~(1 << SIMPLE_SHIFT); - requestInfo |= (flag << SIMPLE_SHIFT); -} - -inline -void -TcKeyReq::setDirtyFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setDirstFlag"); - requestInfo &= ~(1 << DIRTY_SHIFT); - requestInfo |= (flag << DIRTY_SHIFT); -} - -inline -void -TcKeyReq::setExecuteFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setExecuteFlag"); - requestInfo &= ~(1 << EXECUTE_SHIFT); - requestInfo |= (flag << EXECUTE_SHIFT); -} - -inline -void -TcKeyReq::setInterpretedFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setInterpretedFlag"); - requestInfo &= ~(1 << INTERPRETED_SHIFT); - requestInfo |= (flag << INTERPRETED_SHIFT); -} - -inline -void -TcKeyReq::setDistributionKeyFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setDistributionKeyFlag"); - requestInfo &= ~(1 << DISTR_KEY_SHIFT); - requestInfo |= (flag << DISTR_KEY_SHIFT); -} - -inline -void -TcKeyReq::setScanIndFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setScanIndFlag"); - requestInfo &= ~(1 << SCAN_SHIFT); - requestInfo |= (flag << SCAN_SHIFT); -} - -inline -void -TcKeyReq::setOperationType(UintR & requestInfo, Uint32 type){ - ASSERT_MAX(type, OPERATION_MASK, "TcKeyReq::setOperationType"); - requestInfo &= ~(OPERATION_MASK << OPERATION_SHIFT); - requestInfo |= (type << OPERATION_SHIFT); -} - -inline -void -TcKeyReq::setKeyLength(UintR & requestInfo, Uint32 len){ - ASSERT_MAX(len, KEY_LEN_MASK, "TcKeyReq::setKeyLength"); - requestInfo &= ~(KEY_LEN_MASK << KEY_LEN_SHIFT); - requestInfo |= (len << KEY_LEN_SHIFT); -} - -inline -void -TcKeyReq::setAIInTcKeyReq(UintR & requestInfo, Uint32 len){ - ASSERT_MAX(len, AINFO_MASK, "TcKeyReq::setAIInTcKeyReq"); - requestInfo &= ~(AINFO_MASK << AINFO_SHIFT); - requestInfo |= (len << AINFO_SHIFT); -} - -inline -void -TcKeyReq::setExecutingTrigger(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setExecutingTrigger"); - requestInfo &= ~(1 << EXECUTING_TRIGGER_SHIFT); - requestInfo |= (flag << EXECUTING_TRIGGER_SHIFT); -} - -inline -Uint8 -TcKeyReq::getTakeOverScanFlag(const UintR & scanInfo){ - return (Uint8)((scanInfo >> TAKE_OVER_SHIFT) & 1); -} - -inline -Uint16 -TcKeyReq::getTakeOverScanFragment(const UintR & scanInfo){ - return (Uint16)((scanInfo >> TAKE_OVER_FRAG_SHIFT) & TAKE_OVER_FRAG_MASK); -} - -inline -Uint32 -TcKeyReq::getTakeOverScanInfo(const UintR & scanInfo){ - return (Uint32)((scanInfo >> SCAN_INFO_SHIFT) & SCAN_INFO_MASK); -} - - -inline -void -TcKeyReq::setTakeOverScanFlag(UintR & scanInfo, Uint8 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setTakeOverScanFlag"); - scanInfo |= (flag << TAKE_OVER_SHIFT); -} - -inline -void -TcKeyReq::setTakeOverScanFragment(UintR & scanInfo, Uint16 node){ -// ASSERT_MAX(node, TAKE_OVER_NODE_MASK, "TcKeyReq::setTakeOverScanNode"); - scanInfo |= (node << TAKE_OVER_FRAG_SHIFT); -} - -inline -void -TcKeyReq::setTakeOverScanInfo(UintR & scanInfo, Uint32 aScanInfo){ -// ASSERT_MAX(aScanInfo, SCAN_INFO_MASK, "TcKeyReq::setTakeOverScanInfo"); - scanInfo |= (aScanInfo << SCAN_INFO_SHIFT); -} - - -inline -Uint16 -TcKeyReq::getAPIVersion(const UintR & anAttrLen){ - return (Uint16)((anAttrLen >> API_VER_NO_SHIFT) & API_VER_NO_MASK); -} - -inline -void -TcKeyReq::setAPIVersion(UintR & anAttrLen, Uint16 apiVersion){ -// ASSERT_MAX(apiVersion, API_VER_NO_MASK, "TcKeyReq::setAPIVersion"); - anAttrLen |= (apiVersion << API_VER_NO_SHIFT); -} - -inline -Uint16 -TcKeyReq::getAttrinfoLen(const UintR & anAttrLen){ - return (Uint16)((anAttrLen) & ATTRLEN_MASK); -} - -inline -void -TcKeyReq::setAttrinfoLen(UintR & anAttrLen, Uint16 aiLen){ -// ASSERT_MAX(aiLen, ATTRLEN_MASK, "TcKeyReq::setAttrinfoLen"); - anAttrLen |= aiLen; -} - -inline -UintR -TcKeyReq::getNoDiskFlag(const UintR & requestInfo){ - return (requestInfo >> TCKEY_NODISK_SHIFT) & 1; -} - -inline -void -TcKeyReq::setNoDiskFlag(UintR & requestInfo, Uint32 flag){ - ASSERT_BOOL(flag, "TcKeyReq::setNoDiskFlag"); - requestInfo &= ~(1 << TCKEY_NODISK_SHIFT); - requestInfo |= (flag << TCKEY_NODISK_SHIFT); -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp b/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp deleted file mode 100644 index 25e3a1f7466..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TCROLLBACKREP_HPP -#define TCROLLBACKREP_HPP - -#include "SignalData.hpp" - -class TcRollbackRep { - /** - * Sender(s) - */ - friend class NdbTransaction; - friend class DbUtil; - - /** - * Receiver(s) - */ - friend class Dbtup; - - /** - * Sender(s) / Receiver(s) - */ - friend class Dbtc; - - friend bool printTCROLBACKREP(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 returnCode; - Uint32 errorData; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp deleted file mode 100644 index a1256905323..00000000000 --- a/storage/ndb/include/kernel/signaldata/TcSizeAltReq.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TC_SIZE_ALT_REQ_H -#define TC_SIZE_ALT_REQ_H - - - -#include "SignalData.hpp" - -class TcSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Reciver(s) - */ - friend class Dbtc; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0 ); - STATIC_CONST( IND_API_CONNECT = 1 ); - STATIC_CONST( IND_TC_CONNECT = 2 ); - STATIC_CONST( IND_UNUSED = 3 ); - STATIC_CONST( IND_TABLE = 4 ); - STATIC_CONST( IND_TC_SCAN = 5 ); - STATIC_CONST( IND_LOCAL_SCAN = 6 ); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[7]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TestOrd.hpp b/storage/ndb/include/kernel/signaldata/TestOrd.hpp deleted file mode 100644 index e39236bf3fb..00000000000 --- a/storage/ndb/include/kernel/signaldata/TestOrd.hpp +++ /dev/null @@ -1,229 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TEST_ORD_H -#define TEST_ORD_H - -#include "SignalData.hpp" - -/** - * Send by API to preform TEST ON / TEST OFF - * - * SENDER: API - * RECIVER: SimBlockCMCtrBlck - */ -class TestOrd { - friend class Ndb; - friend class Cmvmi; - friend class MgmtSrvr; -public: - - enum Command { - KeepUnchanged = 0, - On = 1, - Off = 2, - Toggle = 3, - COMMAND_MASK = 3 - }; - - enum SignalLoggerSpecification { - InputSignals = 1, - OutputSignals = 2, - InputOutputSignals = 3, - LOG_MASK = 3 - }; - - enum TraceSpecification { - TraceALL = 0, - TraceAPI = 1, - TraceGlobalCheckpoint = 2, - TraceLocalCheckpoint = 4, - TraceDisconnect = 8, - TRACE_MASK = 15 - }; - -private: - STATIC_CONST( SignalLength = 25 ); - - /** - * Clear Signal - */ - void clear(); - - /** - * Set/Get test command - */ - void setTestCommand(Command); - void getTestCommand(Command&) const; - - /** - * Set trace command - */ - void setTraceCommand(Command, TraceSpecification); - - /** - * Get trace command - */ - void getTraceCommand(Command&, TraceSpecification&) const; - - /** - * Return no of signal logger commands - * - * -1 Means apply command(0) to all blocks - * - */ - UintR getNoOfSignalLoggerCommands() const; - - /** - * Add a signal logger command to a specific block - */ - void addSignalLoggerCommand(BlockNumber, Command, SignalLoggerSpecification); - - /** - * Add a signal logger command to all blocks - * - * Note removes all previously added commands - * - */ - void addSignalLoggerCommand(Command, SignalLoggerSpecification); - - /** - * Get Signal logger command - */ - void getSignalLoggerCommand(int no, BlockNumber&, Command&, SignalLoggerSpecification&) const; - - UintR testCommand; // DATA 0 - UintR traceCommand; // DATA 1 - UintR noOfSignalLoggerCommands; // DATA 2 - UintR signalLoggerCommands[22]; // DATA 3 - 25 -}; - -#define COMMAND_SHIFT (0) -#define TRACE_SHIFT (2) -#define LOG_SHIFT (2) - -#define BLOCK_NO_SHIFT (16) -#define BLOCK_NO_MASK 65535 - -/** - * Clear Signal - */ -inline -void -TestOrd::clear(){ - setTestCommand(KeepUnchanged); - setTraceCommand(KeepUnchanged, TraceAPI); // - noOfSignalLoggerCommands = 0; -} - -/** - * Set/Get test command - */ -inline -void -TestOrd::setTestCommand(Command cmd){ - ASSERT_RANGE(cmd, 0, COMMAND_MASK, "TestOrd::setTestCommand"); - testCommand = cmd; -} - -inline -void -TestOrd::getTestCommand(Command & cmd) const{ - cmd = (Command)(testCommand >> COMMAND_SHIFT); -} - -/** - * Set trace command - */ -inline -void -TestOrd::setTraceCommand(Command cmd, TraceSpecification spec){ - ASSERT_RANGE(cmd, 0, COMMAND_MASK, "TestOrd::setTraceCommand"); - ASSERT_RANGE(spec, 0, TRACE_MASK, "TestOrd::setTraceCommand"); - traceCommand = (cmd << COMMAND_SHIFT) | (spec << TRACE_SHIFT); -} - -/** - * Get trace command - */ -inline -void -TestOrd::getTraceCommand(Command & cmd, TraceSpecification & spec) const{ - cmd = (Command)((traceCommand >> COMMAND_SHIFT) & COMMAND_MASK); - spec = (TraceSpecification)((traceCommand >> TRACE_SHIFT) & TRACE_MASK); -} - -/** - * Return no of signal logger commands - * - * -1 Means apply command(0) to all blocks - * - */ -inline -UintR -TestOrd::getNoOfSignalLoggerCommands() const{ - return noOfSignalLoggerCommands; -} - -/** - * Add a signal logger command to a specific block - */ -inline -void -TestOrd::addSignalLoggerCommand(BlockNumber bnr, - Command cmd, SignalLoggerSpecification spec){ - ASSERT_RANGE(cmd, 0, COMMAND_MASK, "TestOrd::addSignalLoggerCommand"); - ASSERT_RANGE(spec, 0, LOG_MASK, "TestOrd::addSignalLoggerCommand"); - //ASSERT_MAX(bnr, BLOCK_NO_MASK, "TestOrd::addSignalLoggerCommand"); - - signalLoggerCommands[noOfSignalLoggerCommands] = - (bnr << BLOCK_NO_SHIFT) | (cmd << COMMAND_SHIFT) | (spec << LOG_SHIFT); - noOfSignalLoggerCommands ++; -} - -/** - * Add a signal logger command to all blocks - * - * Note removes all previously added commands - * - */ -inline -void -TestOrd::addSignalLoggerCommand(Command cmd, SignalLoggerSpecification spec){ - ASSERT_RANGE(cmd, 0, COMMAND_MASK, "TestOrd::addSignalLoggerCommand"); - ASSERT_RANGE(spec, 0, LOG_MASK, "TestOrd::addSignalLoggerCommand"); - - noOfSignalLoggerCommands = ~0; - signalLoggerCommands[0] = (cmd << COMMAND_SHIFT) | (spec << LOG_SHIFT); -} - -/** - * Get Signal logger command - */ -inline -void -TestOrd::getSignalLoggerCommand(int no, BlockNumber & bnr, - Command & cmd, - SignalLoggerSpecification & spec) const{ - bnr = (BlockNumber)((signalLoggerCommands[no] >> BLOCK_NO_SHIFT) - & BLOCK_NO_MASK); - cmd = (Command)((signalLoggerCommands[no] >> COMMAND_SHIFT) - & COMMAND_MASK); - spec = (SignalLoggerSpecification)((signalLoggerCommands[no] >> LOG_SHIFT) - & LOG_MASK); -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TransIdAI.hpp b/storage/ndb/include/kernel/signaldata/TransIdAI.hpp deleted file mode 100755 index 2475f839fe3..00000000000 --- a/storage/ndb/include/kernel/signaldata/TransIdAI.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TRANSID_AI_HPP -#define TRANSID_AI_HPP - -#include "SignalData.hpp" - -class TransIdAI { - /** - * Sender(s) - */ - friend class Dbtup; - - /** - * Receiver(s) - */ - friend class NdbTransaction; - friend class Dbtc; - friend class Dbutil; - friend class Dblqh; - friend class Suma; - - friend bool printTRANSID_AI(FILE *, const Uint32 *, Uint32, Uint16); - -public: - STATIC_CONST( HeaderLength = 3 ); - STATIC_CONST( DataLength = 22 ); - - // Public methods -public: - Uint32* getData() const; - -public: - Uint32 connectPtr; - Uint32 transId[2]; - Uint32 attrData[DataLength]; -}; - -inline -Uint32* TransIdAI::getData() const -{ - return (Uint32*)&attrData[0]; -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp b/storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp deleted file mode 100644 index 0fe591fca04..00000000000 --- a/storage/ndb/include/kernel/signaldata/TrigAttrInfo.hpp +++ /dev/null @@ -1,138 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TRIG_ATTRINFO_HPP -#define TRIG_ATTRINFO_HPP - -#include "SignalData.hpp" -#include -#include -#include - -/** - * TrigAttrInfo - * - * This signal is sent by TUP to signal - * that a trigger has fired - */ -class TrigAttrInfo { - /** - * Sender(s) - */ - // API - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbtup; - - /** - * Reciver(s) - */ - friend class Dbtc; - friend class Backup; - friend class SumaParticipant; - - /** - * For printing - */ - friend bool printTRIG_ATTRINFO(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: -enum AttrInfoType { - PRIMARY_KEY = 0, - BEFORE_VALUES = 1, - AFTER_VALUES = 2 -}; - - STATIC_CONST( DataLength = 22 ); - STATIC_CONST( StaticLength = 3 ); - -private: - Uint32 m_connectionPtr; - Uint32 m_trigId; - Uint32 m_type; - Uint32 m_data[DataLength]; - - // Public methods -public: - Uint32 getConnectionPtr() const; - void setConnectionPtr(Uint32); - AttrInfoType getAttrInfoType() const; - void setAttrInfoType(AttrInfoType anAttrType); - Uint32 getTriggerId() const; - void setTriggerId(Uint32 aTriggerId); - Uint32 getTransactionId1() const; - void setTransactionId1(Uint32 aTransId); - Uint32 getTransactionId2() const; - void setTransactionId2(Uint32 aTransId); - Uint32* getData() const; - int setData(Uint32* aDataBuf, Uint32 aDataLen); -}; - -inline -Uint32 TrigAttrInfo::getConnectionPtr() const -{ - return m_connectionPtr; -} - -inline -void TrigAttrInfo::setConnectionPtr(Uint32 aConnectionPtr) -{ - m_connectionPtr = aConnectionPtr; -} - -inline -TrigAttrInfo::AttrInfoType TrigAttrInfo::getAttrInfoType() const -{ - return (TrigAttrInfo::AttrInfoType) m_type; -} - -inline -void TrigAttrInfo::setAttrInfoType(TrigAttrInfo::AttrInfoType anAttrType) -{ - m_type = (Uint32) anAttrType; -} - -inline -Uint32 TrigAttrInfo::getTriggerId() const -{ - return m_trigId; -} - -inline -void TrigAttrInfo::setTriggerId(Uint32 aTriggerId) -{ - m_trigId = aTriggerId; -} - -inline -Uint32* TrigAttrInfo::getData() const -{ - return (Uint32*)&m_data[0]; -} - -inline -int TrigAttrInfo::setData(Uint32* aDataBuf, Uint32 aDataLen) -{ - if (aDataLen > DataLength) - return -1; - memcpy(m_data, aDataBuf, aDataLen*sizeof(Uint32)); - - return 0; -} - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TsmanContinueB.hpp b/storage/ndb/include/kernel/signaldata/TsmanContinueB.hpp deleted file mode 100644 index 983f07b0823..00000000000 --- a/storage/ndb/include/kernel/signaldata/TsmanContinueB.hpp +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TSMAN_CONTINUEB_H -#define TSMAN_CONTINUEB_H - -#include "SignalData.hpp" - -class TsmanContinueB { - /** - * Sender(s)/Reciver(s) - */ - friend class Tsman; -private: - enum { - LOAD_EXTENT_PAGES = 0, - SCAN_TABLESPACE_EXTENT_HEADERS = 1, - SCAN_DATAFILE_EXTENT_HEADERS = 2, - END_LCP = 3, - RELEASE_EXTENT_PAGES = 4 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TupCommit.hpp b/storage/ndb/include/kernel/signaldata/TupCommit.hpp deleted file mode 100644 index 50ce5f7bdf7..00000000000 --- a/storage/ndb/include/kernel/signaldata/TupCommit.hpp +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2003, 2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUP_COMMIT_H -#define TUP_COMMIT_H - -#include "SignalData.hpp" - -class TupCommitReq { - /** - * Reciver(s) - */ - friend class Dbtup; - - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * For printing - */ - friend bool printTUPCOMMITREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 4 ); - -private: - - /** - * DATA VARIABLES - */ - Uint32 opPtr; - Uint32 gci; - Uint32 hashValue; - Uint32 diskpage; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TupFrag.hpp b/storage/ndb/include/kernel/signaldata/TupFrag.hpp deleted file mode 100644 index 5dcfaf5fd27..00000000000 --- a/storage/ndb/include/kernel/signaldata/TupFrag.hpp +++ /dev/null @@ -1,210 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUP_FRAG_HPP -#define TUP_FRAG_HPP - -#include "SignalData.hpp" - -/* - * Add fragment and add attribute signals between LQH and TUP,TUX. - * NOTE: return signals from TUP,TUX to LQH must have same format. - */ - -// TUP: add fragment - -class TupFragReq { - friend class Dblqh; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 18 ); -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 reqInfo; - Uint32 tableId; - Uint32 noOfAttr; - Uint32 fragId; - Uint32 maxRowsLow; - Uint32 maxRowsHigh; - Uint32 minRowsLow; - Uint32 minRowsHigh; - Uint32 noOfNullAttr; - Uint32 schemaVersion; - Uint32 noOfKeyAttr; - Uint32 noOfCharsets; - Uint32 checksumIndicator; - Uint32 globalCheckpointIdIndicator; - Uint32 tablespaceid; - Uint32 forceVarPartFlag; -}; - -class TupFragConf { - friend class Dblqh; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 4 ); -private: - Uint32 userPtr; - Uint32 tupConnectPtr; - Uint32 fragPtr; - Uint32 fragId; -}; - -class TupFragRef { - friend class Dblqh; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 userPtr; - Uint32 errorCode; -}; - -// TUX: add fragment - -class TuxFragReq { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 14 ); -private: - Uint32 userPtr; - Uint32 userRef; - Uint32 reqInfo; - Uint32 tableId; - Uint32 noOfAttr; - Uint32 fragId; - Uint32 fragOff; - Uint32 tableType; - Uint32 primaryTableId; - Uint32 tupIndexFragPtrI; - Uint32 tupTableFragPtrI[2]; - Uint32 accTableFragPtrI[2]; -}; - -class TuxFragConf { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 4 ); -private: - Uint32 userPtr; - Uint32 tuxConnectPtr; - Uint32 fragPtr; - Uint32 fragId; -}; - -class TuxFragRef { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 2 ); - enum ErrorCode { - NoError = 0, - InvalidRequest = 903, - NoFreeFragment = 904, - NoFreeAttributes = 905 - }; -private: - Uint32 userPtr; - Uint32 errorCode; -}; - -// TUP: add attribute - -class TupAddAttrReq { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 5 ); -private: - Uint32 tupConnectPtr; - Uint32 notused1; - Uint32 attrId; - Uint32 attrDescriptor; - Uint32 extTypeInfo; -}; - -class TupAddAttrConf { - friend class Dblqh; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 userPtr; - Uint32 lastAttr; // bool: got last attr and closed frag op -}; - -class TupAddAttrRef { - friend class Dblqh; - friend class Dbtup; -public: - STATIC_CONST( SignalLength = 2 ); - enum ErrorCode { - NoError = 0, - InvalidCharset = 743, - TooManyBitsUsed = 831, - UnsupportedType = 906 - }; -private: - Uint32 userPtr; - Uint32 errorCode; -}; - -// TUX: add attribute - -class TuxAddAttrReq { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 6 ); -private: - Uint32 tuxConnectPtr; - Uint32 notused1; - Uint32 attrId; - Uint32 attrDescriptor; - Uint32 extTypeInfo; - Uint32 primaryAttrId; -}; - -class TuxAddAttrConf { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 2 ); -private: - Uint32 userPtr; - Uint32 lastAttr; // bool: got last attr and closed frag op -}; - -class TuxAddAttrRef { - friend class Dblqh; - friend class Dbtux; -public: - STATIC_CONST( SignalLength = 2 ); - enum ErrorCode { - NoError = 0, - InvalidAttributeType = 906, - InvalidCharset = 907, - InvalidNodeSize = 908 - }; -private: - Uint32 userPtr; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TupKey.hpp b/storage/ndb/include/kernel/signaldata/TupKey.hpp deleted file mode 100644 index 476530bf491..00000000000 --- a/storage/ndb/include/kernel/signaldata/TupKey.hpp +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUP_KEY_H -#define TUP_KEY_H - -#include "SignalData.hpp" - -class TupKeyReq { - /** - * Reciver(s) - */ - friend class Dbtup; - - /** - * Sender(s) - */ - friend class Dblqh; - - /** - * For printing - */ - friend bool printTUPKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 18 ); - -private: - - /** - * DATA VARIABLES - */ - Uint32 connectPtr; - Uint32 request; - Uint32 keyRef1; - Uint32 keyRef2; - Uint32 attrBufLen; - Uint32 opRef; - Uint32 applRef; - Uint32 storedProcedure; - Uint32 transId1; - Uint32 transId2; - Uint32 fragPtr; - Uint32 primaryReplica; - Uint32 coordinatorTC; - Uint32 tcOpIndex; - Uint32 savePointId; - Uint32 disk_page; - Uint32 m_row_id_page_no; - Uint32 m_row_id_page_idx; -}; - -class TupKeyConf { - /** - * Reciver(s) - */ - friend class Dblqh; - - /** - * Sender(s) - */ - friend class Dbtup; - - /** - * For printing - */ - friend bool printTUPKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 6 ); - -private: - - /** - * DATA VARIABLES - */ - Uint32 userPtr; - Uint32 readLength; - Uint32 writeLength; - Uint32 noFiredTriggers; - Uint32 lastRow; - Uint32 rowid; -}; - -class TupKeyRef { - /** - * Reciver(s) - */ - friend class Dblqh; - - /** - * Sender(s) - */ - friend class Dbtup; - - /** - * For printing - */ - friend bool printTUPKEYREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 2 ); - -private: - - /** - * DATA VARIABLES - */ - Uint32 userRef; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp deleted file mode 100644 index c564bf6607f..00000000000 --- a/storage/ndb/include/kernel/signaldata/TupSizeAltReq.hpp +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUP_SIZE_ALT_REQ_H -#define TUP_SIZE_ALT_REQ_H - - - -#include "SignalData.hpp" - -class TupSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Reciver(s) - */ - friend class Dbtup; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0 ); - STATIC_CONST( IND_DISK_PAGE_ARRAY = 1 ); - STATIC_CONST( IND_DISK_PAGE_REPRESENT = 2 ); - STATIC_CONST( IND_FRAG = 3 ); - STATIC_CONST( IND_PAGE_CLUSTER = 4 ); - STATIC_CONST( IND_LOGIC_PAGE = 5 ); - STATIC_CONST( IND_OP_RECS = 6 ); - STATIC_CONST( IND_PAGE = 7 ); - STATIC_CONST( IND_PAGE_RANGE = 8 ); - STATIC_CONST( IND_TABLE = 9 ); - STATIC_CONST( IND_TABLE_DESC = 10 ); - STATIC_CONST( IND_DELETED_BLOCKS = 11 ); - STATIC_CONST( IND_STORED_PROC = 12 ); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[13]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TuxBound.hpp b/storage/ndb/include/kernel/signaldata/TuxBound.hpp deleted file mode 100644 index 03f33d4b5fa..00000000000 --- a/storage/ndb/include/kernel/signaldata/TuxBound.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUX_BOUND_HPP -#define TUX_BOUND_HPP - -#include "SignalData.hpp" - -class TuxBoundInfo { - friend class Dblqh; - friend class Dbtux; -public: - // must match API (0-4 and no changes expected) - enum BoundType { - BoundLE = 0, // bit 1 for less/greater - BoundLT = 1, // bit 0 for strict - BoundGE = 2, - BoundGT = 3, - BoundEQ = 4 - }; - enum ErrorCode { - InvalidAttrInfo = 4110, - InvalidBounds = 4259, - OutOfBuffers = 873, - InvalidCharFormat = 744, - TooMuchAttrInfo = 823 - }; - STATIC_CONST( SignalLength = 3 ); -private: - /* - * Error code set by TUX. Zero means no error. - */ - Uint32 errorCode; - /* - * Pointer (i-value) to scan operation in TUX. - */ - Uint32 tuxScanPtrI; - /* - * Number of words of bound info included after fixed signal data. - */ - Uint32 boundAiLength; - - Uint32 data[1]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TuxContinueB.hpp b/storage/ndb/include/kernel/signaldata/TuxContinueB.hpp deleted file mode 100644 index b75e87dd7ca..00000000000 --- a/storage/ndb/include/kernel/signaldata/TuxContinueB.hpp +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUX_CONTINUEB_H -#define TUX_CONTINUEB_H - -#include "SignalData.hpp" - -class TuxContinueB { - friend class Dbtux; -private: - enum { - DropIndex = 1 - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TuxMaint.hpp b/storage/ndb/include/kernel/signaldata/TuxMaint.hpp deleted file mode 100644 index 73bc14264d8..00000000000 --- a/storage/ndb/include/kernel/signaldata/TuxMaint.hpp +++ /dev/null @@ -1,70 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUX_MAINT_HPP -#define TUX_MAINT_HPP - -#include "SignalData.hpp" - -/* - * Ordered index maintenance operation. - */ - -class TuxMaintReq { - friend class Dbtup; - friend class Dbtux; - friend bool printTUX_MAINT_REQ(FILE*, const Uint32*, Uint32, Uint16); -public: - enum OpCode { // first byte of opInfo - OpAdd = 1, - OpRemove = 2 - }; - enum OpFlag { // second byte of opInfo - }; - enum ErrorCode { - NoError = 0, // must be zero - SearchError = 901, // add + found or remove + not found - NoMemError = 902 - }; - STATIC_CONST( SignalLength = 8 ); - - /* - * Error code set by TUX. Zero means no error. - */ - Uint32 errorCode; - /* - * Table, index, fragment. - */ - Uint32 tableId; - Uint32 indexId; - Uint32 fragId; - /* - * Tuple version identified by physical address of "original" tuple - * and version number. - */ - Uint32 pageId; - Uint32 pageIndex; - Uint32 tupVersion; - /* - * Operation code and flags. - */ - Uint32 opInfo; - - Uint32 tupFragPtrI; - Uint32 fragPageId; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp b/storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp deleted file mode 100644 index 3e92f1c4f17..00000000000 --- a/storage/ndb/include/kernel/signaldata/TuxSizeAltReq.hpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TUX_SIZE_ALT_REQ_H -#define TUX_SIZE_ALT_REQ_H - -#include "SignalData.hpp" - -class TuxSizeAltReq { - /** - * Sender(s) - */ - friend class ClusterConfiguration; - - /** - * Receiver(s) - */ - friend class Dbtux; -private: - /** - * Indexes in theData - */ - STATIC_CONST( IND_BLOCK_REF = 0 ); - STATIC_CONST( IND_INDEX = 1 ); - STATIC_CONST( IND_FRAGMENT = 2 ); - STATIC_CONST( IND_ATTRIBUTE = 3 ); - STATIC_CONST( IND_SCAN = 4 ); - - /** - * Use the index definitions to use the signal data - */ - UintR theData[4]; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UpdateTo.hpp b/storage/ndb/include/kernel/signaldata/UpdateTo.hpp deleted file mode 100644 index f3b12c0366f..00000000000 --- a/storage/ndb/include/kernel/signaldata/UpdateTo.hpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UPDATE_TO_HPP -#define UPDATE_TO_HPP - -class UpdateToReq { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 6 ); -private: - enum UpdateState { - TO_COPY_FRAG_COMPLETED = 0, - TO_COPY_COMPLETED = 1 - }; - Uint32 userPtr; - BlockReference userRef; - UpdateState updateState; - Uint32 startingNodeId; - - /** - * Only when TO_COPY_FRAG_COMPLETED - */ - Uint32 tableId; - Uint32 fragmentNo; -}; - -class UpdateToConf { - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - -public: - STATIC_CONST( SignalLength = 3 ); -private: - - Uint32 userPtr; - Uint32 sendingNodeId; - Uint32 startingNodeId; -}; -#endif diff --git a/storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp b/storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp deleted file mode 100644 index 2c0459940cd..00000000000 --- a/storage/ndb/include/kernel/signaldata/UpgradeStartup.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef NDB_UPGRADE_STARTUP -#define NDB_UPGRADE_STARTUP - -class Ndbcntr; - -struct UpgradeStartup { - - static void installEXEC(SimulatedBlock*); - - STATIC_CONST( GSN_CM_APPCHG = 131 ); - STATIC_CONST( GSN_CNTR_MASTERCONF = 148 ); - STATIC_CONST( GSN_CNTR_MASTERREF = 149 ); - STATIC_CONST( GSN_CNTR_MASTERREQ = 150 ); - - static void sendCmAppChg(Ndbcntr&, Signal *, Uint32 startLevel); - static void execCM_APPCHG(SimulatedBlock& block, Signal*); - static void sendCntrMasterReq(Ndbcntr& cntr, Signal* signal, Uint32 n); - static void execCNTR_MASTER_REPLY(SimulatedBlock & block, Signal* signal); - - struct CntrMasterReq { - STATIC_CONST( SignalLength = 4 + NdbNodeBitmask::Size ); - - Uint32 userBlockRef; - Uint32 userNodeId; - Uint32 typeOfStart; - Uint32 noRestartNodes; - Uint32 theNodes[NdbNodeBitmask::Size]; - }; - - struct CntrMasterConf { - STATIC_CONST( SignalLength = 1 + NdbNodeBitmask::Size ); - - Uint32 noStartNodes; - Uint32 theNodes[NdbNodeBitmask::Size]; - }; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UtilDelete.hpp b/storage/ndb/include/kernel/signaldata/UtilDelete.hpp deleted file mode 100644 index f413654fe77..00000000000 --- a/storage/ndb/include/kernel/signaldata/UtilDelete.hpp +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UTIL_DELETE_HPP -#define UTIL_DELETE_HPP - -#include "SignalData.hpp" -#include - -/** - * UTIL_DELETE_REQ, UTIL_DELETE_CONF, UTIL_DELETE_REF - */ - -/** - * @class UtilDeleteReq - * @brief Delete transaction in Util block - * - * Data format: - * - UTIL_DELETE_REQ - */ - -class UtilDeleteReq { - /** Sender(s) / Receiver(s) */ - friend class DbUtil; - - /** For printing */ - friend bool printUTIL_DELETE_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( DataLength = 22 ); - STATIC_CONST( HeaderLength = 3 ); - -private: - Uint32 senderData; - Uint32 prepareId; // Which prepared transaction to execute - Uint32 totalDataLen; // Total length of attrData (including AttributeHeaders - // and possibly spanning over multiple signals) - - /** - * Length in this = signal->length() - 3 - * Sender block ref = signal->senderBlockRef() - */ - - Uint32 attrData[DataLength]; -}; - - - -/** - * @class UtilDeleteConf - * - * Data format: - * - UTIL_PREPARE_CONF - */ - -class UtilDeleteConf { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - - /** - * For printing - */ - friend bool printUTIL_DELETE_CONF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 senderData; ///< The client data provided by the client sending - ///< UTIL_DELETE_REQ -}; - - -/** - * @class UtilDeleteRef - * - * Data format: - * - UTIL_PREPARE_REF - */ - -class UtilDeleteRef { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - - /** - * For printing - */ - friend bool printUTIL_DELETE_REF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderData; - Uint32 errorCode; ///< See UtilExecuteRef::errorCode - Uint32 TCErrorCode; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UtilExecute.hpp b/storage/ndb/include/kernel/signaldata/UtilExecute.hpp deleted file mode 100644 index c0b0776d09c..00000000000 --- a/storage/ndb/include/kernel/signaldata/UtilExecute.hpp +++ /dev/null @@ -1,135 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UTIL_EXECUTE_HPP -#define UTIL_EXECUTE_HPP - -#include "SignalData.hpp" -#include - -/** - * UTIL_EXECUTE_REQ, UTIL_EXECUTE_CONF, UTIL_EXECUTE_REF - */ - -/** - * @class UtilExecuteReq - * @brief Execute transaction in Util block - * - * Data format: - * - UTIL_EXECUTE_REQ - */ - -class UtilExecuteReq { - /** Sender(s) / Receiver(s) */ - friend class DbUtil; - friend class Trix; - - /** For printing */ - friend bool printUTIL_EXECUTE_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 3 ); - STATIC_CONST( HEADER_SECTION = 0 ); - STATIC_CONST( DATA_SECTION = 1 ); - STATIC_CONST( NoOfSections = 2 ); - - GET_SET_SENDERREF - GET_SET_SENDERDATA - void setPrepareId(Uint32 pId) { prepareId = pId; }; // !! unsets release flag - Uint32 getPrepareId() const { return prepareId & 0xFF; }; - void setReleaseFlag() { prepareId |= 0x100; }; - bool getReleaseFlag() const { return (prepareId & 0x100) != 0; }; -private: - Uint32 senderData; // MUST be no 1! - Uint32 senderRef; - Uint32 prepareId; // Which prepared transaction to execute -}; - -/** - * @class UtilExecuteConf - * - * Data format: - * - UTIL_PREPARE_CONF - */ - -class UtilExecuteConf { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - friend class Trix; - - /** - * For printing - */ - friend bool printUTIL_EXECUTE_CONF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); -public: - STATIC_CONST( SignalLength = 1 ); - - GET_SET_SENDERDATA -private: - Uint32 senderData; // MUST be no 1! -}; - - -/** - * @class UtilExecuteRef - * - * Data format: - * - UTIL_PREPARE_REF - */ - -class UtilExecuteRef { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - friend class Trix; - - /** - * For printing - */ - friend bool printUTIL_EXECUTE_REF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 3 ); - - enum ErrorCode { - IllegalKeyNumber = 1, - IllegalAttrNumber = 2, - TCError = 3, - AllocationError = 5, - MissingDataSection = 6, - MissingData = 7 - }; - - GET_SET_SENDERDATA - GET_SET_ERRORCODE - GET_SET_TCERRORCODE -private: - Uint32 senderData; // MUST be no 1! - Uint32 errorCode; - Uint32 TCErrorCode; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UtilLock.hpp b/storage/ndb/include/kernel/signaldata/UtilLock.hpp deleted file mode 100644 index 2fe99436c53..00000000000 --- a/storage/ndb/include/kernel/signaldata/UtilLock.hpp +++ /dev/null @@ -1,334 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UTIL_LOCK_HPP -#define UTIL_LOCK_HPP - -#include "SignalData.hpp" - -class UtilLockReq { - - /** - * Receiver - */ - friend class DbUtil; - - /** - * Sender - */ - friend class Dbdih; - friend class MutexManager; - - friend bool printUTIL_LOCK_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum RequestInfo { - TryLock = 1 - }; -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 requestInfo; -}; - -class UtilLockConf { - - /** - * Receiver - */ - friend class Dbdih; - friend class MutexManager; - - /** - * Sender - */ - friend class DbUtil; - - friend bool printUTIL_LOCK_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 lockKey; -}; - -class UtilLockRef { - - /** - * Reciver - */ - friend class Dbdih; - friend class MutexManager; - - /** - * Sender - */ - friend class DbUtil; - - friend bool printUTIL_LOCK_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - OK = 0, - NoSuchLock = 1, - OutOfLockRecords = 2, - DistributedLockNotSupported = 3, - LockAlreadyHeld = 4 - - }; -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 errorCode; -}; - -class UtilUnlockReq { - - /** - * Receiver - */ - friend class DbUtil; - - /** - * Sender - */ - friend class Dbdih; - friend class MutexManager; - - friend bool printUTIL_UNLOCK_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 lockKey; -}; - -class UtilUnlockConf { - - /** - * Receiver - */ - friend class Dbdih; - friend class MutexManager; - - /** - * Sender - */ - friend class DbUtil; - - friend bool printUTIL_UNLOCK_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; -}; - -class UtilUnlockRef { - - /** - * Reciver - */ - friend class Dbdih; - friend class MutexManager; - - /** - * Sender - */ - friend class DbUtil; - - friend bool printUTIL_UNLOCK_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - - enum ErrorCode { - OK = 0, - NoSuchLock = 1, - NotLockOwner = 2 - }; -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 errorCode; -}; - -/** - * Creating a lock - */ -class UtilCreateLockReq { - /** - * Receiver - */ - friend class DbUtil; - - /** - * Sender - */ - friend class MutexManager; - - friend bool printUTIL_CREATE_LOCK_REQ(FILE *, const Uint32*, Uint32, Uint16); -public: - enum LockType { - Mutex = 0 // Lock with only exclusive locks - }; - - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 lockType; -}; - -class UtilCreateLockRef { - /** - * Sender - */ - friend class DbUtil; - - /** - * Receiver - */ - friend class MutexManager; - - friend bool printUTIL_CREATE_LOCK_REF(FILE *, const Uint32*, Uint32, Uint16); -public: - enum ErrorCode { - OK = 0, - OutOfLockQueueRecords = 1, - LockIdAlreadyUsed = 2, - UnsupportedLockType = 3 - }; - - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 errorCode; -}; - -class UtilCreateLockConf { - /** - * Sender - */ - friend class DbUtil; - - /** - * Receiver - */ - friend class MutexManager; - - friend bool printUTIL_CREATE_LOCK_CONF(FILE*, const Uint32*, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; -}; - -/** - * Creating a lock - */ -class UtilDestroyLockReq { - /** - * Receiver - */ - friend class DbUtil; - - /** - * Sender - */ - friend class MutexManager; - - friend bool printUTIL_DESTROY_LOCK_REQ(FILE *, const Uint32*, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 lockKey; -}; - -class UtilDestroyLockRef { - /** - * Sender - */ - friend class DbUtil; - - /** - * Receiver - */ - friend class MutexManager; - - friend bool printUTIL_DESTROY_LOCK_REF(FILE *, const Uint32*, Uint32, Uint16); -public: - enum ErrorCode { - OK = 0, - NoSuchLock = 1, - NotLockOwner = 2 - }; - - STATIC_CONST( SignalLength = 4 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; - Uint32 errorCode; -}; - -class UtilDestroyLockConf { - /** - * Sender - */ - friend class DbUtil; - - /** - * Receiver - */ - friend class MutexManager; - - friend bool printUTIL_DESTROY_LOCK_CONF(FILE*, const Uint32*, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - -public: - Uint32 senderData; - Uint32 senderRef; - Uint32 lockId; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UtilPrepare.hpp b/storage/ndb/include/kernel/signaldata/UtilPrepare.hpp deleted file mode 100644 index fca0aed7bd6..00000000000 --- a/storage/ndb/include/kernel/signaldata/UtilPrepare.hpp +++ /dev/null @@ -1,161 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UTIL_PREPARE_REQ_HPP -#define UTIL_PREPARE_REQ_HPP - -#include "SignalData.hpp" -#include - -#ifdef NDB_WIN32 -#ifdef NO_ERROR -#undef NO_ERROR -#endif -#endif - -/** - * UTIL_PREPARE_REQ, UTIL_PREPARE_CONF, UTIL_PREPARE_REF - */ - -/** - * @class UtilPrepareReq - * @brief Prepare transaction in Util block - * - * Data format: - * - UTIL_PREPARE_REQ ( +)+ - */ -class UtilPrepareReq { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - friend class Trix; - - /** - * For printing - */ - friend bool printUTIL_PREPARE_REQ(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - -public: - enum OperationTypeValue { - Read = 0, - Update = 1, - Insert = 2, - Delete = 3, - Write = 4 - - }; - - enum KeyValue { - NoOfOperations = 1, ///< No of operations in transaction - OperationType = 2, /// - TableName = 3, ///< String - AttributeName = 4, ///< String - TableId = 5, - AttributeId = 6 - }; - - // Signal constants - STATIC_CONST( SignalLength = 2 ); - STATIC_CONST( PROPERTIES_SECTION = 0 ); - STATIC_CONST( NoOfSections = 1 ); - - GET_SET_SENDERREF - GET_SET_SENDERDATA -private: - Uint32 senderData; // MUST be no 1! - Uint32 senderRef; -}; - -/** - * @class UtilPrepareConf - * - * Data format: - * - UTIL_PREPARE_CONF - */ - -class UtilPrepareConf { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - friend class Trix; - - /** - * For printing - */ - friend bool printUTIL_PREPARE_CONF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - -public: - STATIC_CONST( SignalLength = 2 ); - - GET_SET_SENDERDATA - GET_SET_PREPAREID -private: - Uint32 senderData; // MUST be no 1! - Uint32 prepareId; -}; - - -/** - * @class UtilPrepareRef - * - * Data format: - * - UTIL_PREPARE_REF - */ - -class UtilPrepareRef { - /** - * Sender(s) / Receiver(s) - */ - friend class DbUtil; - friend class Trix; - - /** - * For printing - */ - friend bool printUTIL_PREPARE_REF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo); - -public: - enum ErrorCode { - NO_ERROR = 0, - PREPARE_SEIZE_ERROR = 1, - PREPARE_PAGES_SEIZE_ERROR = 2, - PREPARED_OPERATION_SEIZE_ERROR = 3, - DICT_TAB_INFO_ERROR = 4, - MISSING_PROPERTIES_SECTION = 5 - }; - - STATIC_CONST( SignalLength = 2 ); - - GET_SET_SENDERDATA - GET_SET_ERRORCODE -private: - Uint32 senderData; // MUST be no 1! - Uint32 errorCode; -}; - - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UtilRelease.hpp b/storage/ndb/include/kernel/signaldata/UtilRelease.hpp deleted file mode 100644 index 3c303496ddd..00000000000 --- a/storage/ndb/include/kernel/signaldata/UtilRelease.hpp +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UTIL_RELEASE_HPP -#define UTIL_PREPARE_HPP - -#include "SignalData.hpp" - -/** - * @class UtilReleaseReq - * @brief Release Prepared transaction in Util block - * - * Data format: - * - UTIL_PREPARE_RELEASE_REQ - */ -class UtilReleaseReq { - friend class DbUtil; - friend class Trix; -public: - STATIC_CONST( SignalLength = 2 ); - -private: - Uint32 senderData; // MUST be no 1! - Uint32 prepareId; -}; - - -/** - * @class UtilReleaseConf - * - * Data format: - * - UTIL_PREPARE_CONF - */ - -class UtilReleaseConf { - friend class DbUtil; - friend class Trix; - - STATIC_CONST( SignalLength = 1 ); - -private: - Uint32 senderData; // MUST be no 1! -}; - - -/** - * @class UtilReleaseRef - * - * Data format: - * - UTIL_PREPARE_RELEASE_REF - */ - -class UtilReleaseRef { - friend class DbUtil; - friend class Trix; - - enum ErrorCode { - NO_ERROR = 0, - NO_SUCH_PREPARE_SEIZED = 1 - }; - - STATIC_CONST( SignalLength = 3 ); - -private: - Uint32 senderData; // MUST be no 1! - Uint32 prepareId; - Uint32 errorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/UtilSequence.hpp b/storage/ndb/include/kernel/signaldata/UtilSequence.hpp deleted file mode 100644 index da6d12f140d..00000000000 --- a/storage/ndb/include/kernel/signaldata/UtilSequence.hpp +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UTIL_SEQUENCE_HPP -#define UTIL_SEQUENCE_HPP - -#include "SignalData.hpp" - -class UtilSequenceReq { - - /** - * Receiver - */ - friend class DbUtil; - - /** - * Sender - */ - friend class Backup; - friend class Suma; - - friend bool printUTIL_SEQUENCE_REQ(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 3 ); - - enum RequestType { - NextVal = 1, // Return uniq value - CurrVal = 2, // Read - Create = 3 // Create a sequence - }; -private: - Uint32 senderData; - Uint32 sequenceId; // Number of sequence variable - Uint32 requestType; -}; - -class UtilSequenceConf { - - /** - * Receiver - */ - friend class Backup; - friend class Suma; - /** - * Sender - */ - friend class DbUtil; - - friend bool printUTIL_SEQUENCE_CONF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - -private: - Uint32 senderData; - Uint32 sequenceId; - Uint32 requestType; - Uint32 sequenceValue[2]; -}; - -class UtilSequenceRef { - - /** - * Reciver - */ - friend class Backup; - friend class Suma; - /** - * Sender - */ - friend class DbUtil; - - friend bool printUTIL_SEQUENCE_REF(FILE *, const Uint32 *, Uint32, Uint16); -public: - STATIC_CONST( SignalLength = 5 ); - - enum ErrorCode { - NoSuchSequence = 1, - TCError = 2 - }; -private: - Uint32 senderData; - Uint32 sequenceId; - Uint32 requestType; - Uint32 errorCode; - Uint32 TCErrorCode; -}; - -#endif diff --git a/storage/ndb/include/kernel/signaldata/WaitGCP.hpp b/storage/ndb/include/kernel/signaldata/WaitGCP.hpp deleted file mode 100644 index 44851c2fa37..00000000000 --- a/storage/ndb/include/kernel/signaldata/WaitGCP.hpp +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef WAIT_GCP_HPP -#define WAIT_GCP_HPP - -/** - * This signal is sent by anyone to local DIH - * - * If local DIH is not master, it forwards it to master DIH - * and start acting as a proxy - * - */ -class WaitGCPReq { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Sender - */ - friend class Ndbcntr; - friend class Dbdict; - friend class Backup; - //friend class Grep::PSCoord; - -public: - STATIC_CONST( SignalLength = 3 ); -public: - enum RequestType { - Complete = 1, ///< Wait for a GCP to complete - CompleteForceStart = 2, ///< Wait for a GCP to complete start one if needed - CompleteIfRunning = 3, ///< Wait for ongoing GCP - CurrentGCI = 8, ///< Immediately return current GCI - BlockStartGcp = 9, - UnblockStartGcp = 10 - }; - - Uint32 senderRef; - Uint32 senderData; - Uint32 requestType; -}; - -class WaitGCPConf { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - friend class Dbdict; - friend class Backup; - //friend class Grep::PSCoord; - -public: - STATIC_CONST( SignalLength = 3 ); - -public: - Uint32 senderData; - Uint32 gcp; - Uint32 blockStatus; -}; - -class WaitGCPRef { - - /** - * Sender(s) / Reciver(s) - */ - friend class Dbdih; - - /** - * Reciver(s) - */ - friend class Ndbcntr; - friend class Dbdict; - friend class Backup; - friend class Grep; - -public: - STATIC_CONST( SignalLength = 2 ); - - enum ErrorCode { - StopOK = 0, - NF_CausedAbortOfProcedure = 1, - NoWaitGCPRecords = 2 - }; - -private: - Uint32 errorCode; - Uint32 senderData; -}; - -#endif diff --git a/storage/ndb/include/kernel/trigger_definitions.h b/storage/ndb/include/kernel/trigger_definitions.h deleted file mode 100644 index 331dbdd397a..00000000000 --- a/storage/ndb/include/kernel/trigger_definitions.h +++ /dev/null @@ -1,62 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_TRIGGER_DEFINITIONS_H -#define NDB_TRIGGER_DEFINITIONS_H - -#include -#include "ndb_limits.h" -#include - -#define ILLEGAL_TRIGGER_ID ((Uint32)(~0)) - -struct TriggerType { - enum Value { - //CONSTRAINT = 0, - SECONDARY_INDEX = DictTabInfo::HashIndexTrigger, - //FOREIGN_KEY = 2, - //SCHEMA_UPGRADE = 3, - //API_TRIGGER = 4, - //SQL_TRIGGER = 5, - SUBSCRIPTION = DictTabInfo::SubscriptionTrigger, - READ_ONLY_CONSTRAINT = DictTabInfo::ReadOnlyConstraint, - ORDERED_INDEX = DictTabInfo::IndexTrigger, - - SUBSCRIPTION_BEFORE = 9 // Only used by TUP/SUMA, should be REMOVED!! - }; -}; - -struct TriggerActionTime { - enum Value { - TA_BEFORE = 0, /* Immediate, before operation */ - TA_AFTER = 1, /* Immediate, after operation */ - TA_DEFERRED = 2, /* Before commit */ - TA_DETACHED = 3, /* After commit in a separate transaction, NYI */ - TA_CUSTOM = 4 /* Hardcoded per TriggerType */ - }; -}; - -struct TriggerEvent { - /** TableEvent must match 1 << TriggerEvent */ - enum Value { - TE_INSERT = 0, - TE_DELETE = 1, - TE_UPDATE = 2, - TE_CUSTOM = 3 /* Hardcoded per TriggerType */ - }; -}; - -#endif diff --git a/storage/ndb/include/logger/ConsoleLogHandler.hpp b/storage/ndb/include/logger/ConsoleLogHandler.hpp deleted file mode 100644 index 06cc4c308af..00000000000 --- a/storage/ndb/include/logger/ConsoleLogHandler.hpp +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CONSOLELOGHANDLER_H -#define CONSOLELOGHANDLER_H - -#include "LogHandler.hpp" - -/** - * Logs messages to the console/stdout. - * - * @see LogHandler - * @version #@ $Id: ConsoleLogHandler.hpp,v 1.2 2003/09/01 10:15:53 innpeno Exp $ - */ -class ConsoleLogHandler : public LogHandler -{ -public: - /** - * Default constructor. - */ - ConsoleLogHandler(); - /** - * Destructor. - */ - virtual ~ConsoleLogHandler(); - - virtual bool open(); - virtual bool close(); - - virtual bool setParam(const BaseString ¶m, const BaseString &value); - -protected: - virtual void writeHeader(const char* pCategory, Logger::LoggerLevel level); - virtual void writeMessage(const char* pMsg); - virtual void writeFooter(); - -private: - /** Prohibit*/ - ConsoleLogHandler(const ConsoleLogHandler&); - ConsoleLogHandler operator = (const ConsoleLogHandler&); - bool operator == (const ConsoleLogHandler&); - -}; -#endif diff --git a/storage/ndb/include/logger/FileLogHandler.hpp b/storage/ndb/include/logger/FileLogHandler.hpp deleted file mode 100644 index 20a242047d4..00000000000 --- a/storage/ndb/include/logger/FileLogHandler.hpp +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FILELOGHANDLER_H -#define FILELOGHANDLER_H - -#include "LogHandler.hpp" - -class File_class; - -/** - * Logs messages to a file. The log file will be archived depending on - * the file's size or after N number of log entries. - * There will be only a specified number of archived logs - * which will be "recycled". - * - * The archived log file will be named as .1..N. - * - * - * @see LogHandler - * @version #@ $Id: FileLogHandler.hpp,v 1.2 2003/09/01 10:15:53 innpeno Exp $ - */ -class FileLogHandler : public LogHandler -{ -public: - /** Max number of log files to archive. */ - STATIC_CONST( MAX_NO_FILES = 6 ); - /** Max file size of the log before archiving. */ - STATIC_CONST( MAX_FILE_SIZE = 1024000 ); - /** Max number of log entries before archiving. */ - STATIC_CONST( MAX_LOG_ENTRIES = 10000 ); - - /** - * Default constructor. - */ - FileLogHandler(); - - /** - * Creates a new file handler with the specified filename, - * max number of archived log files and max log size for each log. - * - * @param aFileName the log filename. - * @param maxNoFiles the maximum number of archived log files. - * @param maxFileSize the maximum log file size before archiving. - * @param maxLogEntries the maximum number of log entries before checking time to archive. - */ - FileLogHandler(const char* aFileName, - int maxNoFiles = MAX_NO_FILES, - long maxFileSize = MAX_FILE_SIZE, - unsigned int maxLogEntries = MAX_LOG_ENTRIES); - - /** - * Destructor. - */ - virtual ~FileLogHandler(); - - virtual bool open(); - virtual bool close(); - - virtual bool setParam(const BaseString ¶m, const BaseString &value); - virtual bool checkParams(); - -protected: - virtual void writeHeader(const char* pCategory, Logger::LoggerLevel level); - virtual void writeMessage(const char* pMsg); - virtual void writeFooter(); - -private: - /** Prohibit */ - FileLogHandler(const FileLogHandler&); - FileLogHandler operator = (const FileLogHandler&); - bool operator == (const FileLogHandler&); - - /** - * Returns true if it is time to create a new log file. - */ - bool isTimeForNewFile(); - - /** - * Archives the current log file and creates a new one. - * The archived log filename will be in the format of .N - * - * @return true if successful. - */ - bool createNewFile(); - - bool setFilename(const BaseString &filename); - bool setMaxSize(const BaseString &size); - bool setMaxFiles(const BaseString &files); - - int m_maxNoFiles; - off_t m_maxFileSize; - unsigned int m_maxLogEntries; - File_class* m_pLogFile; -}; - -#endif diff --git a/storage/ndb/include/logger/LogHandler.hpp b/storage/ndb/include/logger/LogHandler.hpp deleted file mode 100644 index 301e8780909..00000000000 --- a/storage/ndb/include/logger/LogHandler.hpp +++ /dev/null @@ -1,221 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LOGHANDLER_H -#define LOGHANDLER_H - -#include "Logger.hpp" - -/** - * This class is the base class for all log handlers. A log handler is - * responsible for formatting and writing log messages to a specific output. - * - * A log entry consists of three parts: a header, - * 09:17:37 2002-03-13 [MgmSrv] INFO -- Local checkpoint 13344 started. - * - * - * Header format: TIME&DATE CATEGORY LEVEL -- - * TIME&DATE = ctime() format. - * CATEGORY = Any string. - * LEVEL = ALERT to DEBUG (Log levels) - * - * Footer format: \n (currently only newline) - * - * @version #@ $Id: LogHandler.hpp,v 1.7 2003/09/01 10:15:53 innpeno Exp $ - */ -class LogHandler -{ -public: - /** - * Default constructor. - */ - LogHandler(); - - /** - * Destructor. - */ - virtual ~LogHandler(); - - /** - * Opens/initializes the log handler. - * - * @return true if successful. - */ - virtual bool open() = 0; - - /** - * Closes/free any allocated resources used by the log handler. - * - * @return true if successful. - */ - virtual bool close() = 0; - - /** - * Append a log message to the output stream/file whatever. - * append() will call writeHeader(), writeMessage() and writeFooter() for - * a child class and in that order. Append checks for repeated messages. - * append_impl() does not check for repeats. - * - * @param pCategory the category/name to tag the log entry with. - * @param level the log level. - * @param pMsg the log message. - */ - void append(const char* pCategory, Logger::LoggerLevel level, - const char* pMsg); - void append_impl(const char* pCategory, Logger::LoggerLevel level, - const char* pMsg); - - /** - * Returns a default formatted header. It currently has the - * follwing default format: '%H:%M:%S %Y-%m-%d [CATEGORY] LOGLEVEL --' - * - * @param pStr the header string to format. - * @param pCategory a category/name to tag the log entry with. - * @param level the log level. - * @return the header. - */ - const char* getDefaultHeader(char* pStr, const char* pCategory, - Logger::LoggerLevel level) const; - - /** - * Returns a default formatted footer. Currently only returns a newline. - * - * @return the footer. - */ - const char* getDefaultFooter() const; - - /** - * Returns the date and time format used by ctime(). - * - * @return the date and time format. - */ - const char* getDateTimeFormat() const; - - /** - * Sets the date and time format. It needs to have the same arguments - * a ctime(). - * - * @param pFormat the date and time format. - */ - void setDateTimeFormat(const char* pFormat); - - /** - * Returns the error code. - */ - int getErrorCode() const; - - /** - * Sets the error code. - * - * @param code the error code. - */ - void setErrorCode(int code); - - /** - * Returns the error string. - */ - char* getErrorStr(); - - /** - * Sets the error string. - * - * @param str the error string. - */ - void setErrorStr(const char* str); - - /** - * Parse logstring parameters - * - * @param params list of parameters, formatted as "param=value", - * entries separated by "," - * @return true on success, false on failure - */ - bool parseParams(const BaseString ¶ms); - - /** - * Sets a parameters. What parameters are accepted depends on the subclass. - * - * @param param name of parameter - * @param value value of parameter - */ - virtual bool setParam(const BaseString ¶m, const BaseString &value) = 0; - - /** - * Checks that all necessary parameters have been set. - * - * @return true if all parameters are correctly set, false otherwise - */ - virtual bool checkParams(); - -protected: - /** Max length of the date and time header in the log. */ - STATIC_CONST( MAX_DATE_TIME_HEADER_LENGTH = 64 ); - /** Max length of the header the log. */ - STATIC_CONST( MAX_HEADER_LENGTH = 128 ); - /** Max lenght of footer in the log. */ - STATIC_CONST( MAX_FOOTER_LENGTH = 128 ); - - /** - * Write the header to the log. - * - * @param pCategory the category to tag the log with. - * @param level the log level. - */ - virtual void writeHeader(const char* category, Logger::LoggerLevel level) = 0; - - /** - * Write the message to the log. - * - * @param pMsg the message to log. - */ - virtual void writeMessage(const char* pMsg) = 0; - - /** - * Write the footer to the log. - * - */ - virtual void writeFooter() = 0; - -private: - /** - * Returns a string date and time string. - * @note does not update time, uses m_now as time - * @param pStr a string. - * @return a string with date and time. - */ - char* getTimeAsString(char* pStr) const; - time_t m_now; - - /** Prohibit */ - LogHandler(const LogHandler&); - LogHandler* operator = (const LogHandler&); - bool operator == (const LogHandler&); - - const char* m_pDateTimeFormat; - int m_errorCode; - char* m_errorStr; - - // for handling repeated messages - unsigned m_count_repeated_messages; - unsigned m_max_repeat_frequency; - time_t m_last_log_time; - char m_last_category[MAX_HEADER_LENGTH]; - char m_last_message[MAX_LOG_MESSAGE_SIZE]; - Logger::LoggerLevel m_last_level; -}; - -#endif diff --git a/storage/ndb/include/logger/Logger.hpp b/storage/ndb/include/logger/Logger.hpp deleted file mode 100644 index ac13a477256..00000000000 --- a/storage/ndb/include/logger/Logger.hpp +++ /dev/null @@ -1,301 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef Logger_H -#define Logger_H - -#include -#include - -#define MAX_LOG_MESSAGE_SIZE 1024 - -class LogHandler; -class LogHandlerList; - -/** - * Logger should be used whenver you need to log a message like - * general information or debug messages. By creating/adding different - * log handlers, a single log message can be sent to - * different outputs (stdout, file or syslog). - * - * Each log entry is created with a log level (or severity) which is - * used to identity the type of the entry, e.g., if it is a debug - * or an error message. - * - * Example of a log entry: - * - * 09:17:39 2002-03-13 [myLogger] INFO -- Local checkpoint started. - * - * HOW TO USE - * - * 1) Create a new instance of the Logger. - * - * Logger myLogger = new Logger(); - * - * 2) Add the log handlers that you want, i.e., where the log entries - * should be written/shown. - * - * myLogger->createConsoleHandler(); // Output to console/stdout - * myLogger->addHandler(new FileLogHandler("mylog.txt")); // use mylog.txt - * - * 3) Tag each log entry with a category/name. - * - * myLogger->setCategory("myLogger"); - * - * 4) Start log messages. - * - * myLogger->alert("T-9 to lift off"); - * myLogger->info("Here comes the sun, la la"); - * myLogger->debug("Why does this not work!!!, We should not be here...") - * - * 5) Log only debug messages. - * - * myLogger->enable(Logger::LL_DEBUG); - * - * 6) Log only ALERTS and ERRORS. - * - * myLogger->enable(Logger::LL_ERROR, Logger::LL_ALERT); - * - * 7) Do not log any messages. - * - * myLogger->disable(Logger::LL_ALL); - * - * - * LOG LEVELS (Matches the severity levels of syslog) - *
- *
- *  ALERT           A condition  that  should  be  corrected
- *                  immediately,  such as a corrupted system
- *                  database.
- *
- *  CRITICAL        Critical conditions, such as hard device
- *                  errors.
- *
- *  ERROR           Errors.
- *
- *  WARNING         Warning messages.
- *
- *  INFO            Informational messages.
- *
- *  DEBUG           Messages that contain  information  nor-
- *                  mally  of use only when debugging a pro-
- *                  gram.
- * 
- * - * @version #@ $Id: Logger.hpp,v 1.7 2003/09/01 10:15:53 innpeno Exp $ - */ -class Logger -{ -public: - /** The log levels. NOTE: Could not use the name LogLevel since - * it caused conflicts with another class. - */ - enum LoggerLevel {LL_ON, LL_DEBUG, LL_INFO, LL_WARNING, LL_ERROR, - LL_CRITICAL, LL_ALERT, LL_ALL}; - - /** - * String representation of the the log levels. - */ - static const char* LoggerLevelNames[]; - - /** - * Default constructor. - */ - Logger(); - - /** - * Destructor. - */ - virtual ~Logger(); - - /** - * Set a category/name that each log entry will have. - * - * @param pCategory the category. - */ - void setCategory(const char* pCategory); - - /** - * Create a default handler that logs to the console/stdout. - * - * @return true if successful. - */ - bool createConsoleHandler(); - - /** - * Remove the default console handler. - */ - void removeConsoleHandler(); - - /** - * Create a default handler that logs to a file called logger.log. - * - * @return true if successful. - */ - bool createFileHandler(); - - /** - * Remove the default file handler. - */ - void removeFileHandler(); - - /** - * Create a default handler that logs to the syslog. - * - * @return true if successful. - */ - bool createSyslogHandler(); - - /** - * Remove the default syslog handler. - */ - void removeSyslogHandler(); - - /** - * Add a new log handler. - * - * @param pHandler a log handler. - * @return true if successful. - */ - bool addHandler(LogHandler* pHandler); - - /** - * Add a new handler - * - * @param logstring string describing the handler to add - * @param err OS errno in event of error - * @param len max length of errStr buffer - * @param errStr logger error string in event of error - */ - bool addHandler(const BaseString &logstring, int *err, int len, char* errStr); - - /** - * Remove a log handler. - * - * @param pHandler log handler to remove. - * @return true if successful. - */ - bool removeHandler(LogHandler* pHandler); - - /** - * Remove all log handlers. - */ - void removeAllHandlers(); - - /** - * Returns true if the specified log level is enabled. - * - * @return true if enabled. - */ - bool isEnable(LoggerLevel logLevel) const; - - /** - * Enable the specified log level. - * - * @param logLevel the loglevel to enable. - */ - void enable(LoggerLevel logLevel); - - /** - * Enable log levels. - * - * @param fromLogLevel enable from log level. - * @param toLogLevel enable to log level. - */ - void enable (LoggerLevel fromLogLevel, LoggerLevel toLogLevel); - - /** - * Disable log level. - * - * @param logLevel disable log level. - */ - void disable(LoggerLevel logLevel); - - /** - * Log an alert message. - * - * @param pMsg the message. - */ - virtual void alert(const char* pMsg, ...) const; - virtual void alert(BaseString &pMsg) const { alert(pMsg.c_str()); }; - - /** - * Log a critical message. - * - * @param pMsg the message. - */ - virtual void critical(const char* pMsg, ...) const; - virtual void critical(BaseString &pMsg) const { critical(pMsg.c_str()); }; - - /** - * Log an error message. - * - * @param pMsg the message. - */ - virtual void error(const char* pMsg, ...) const; - virtual void error(BaseString &pMsg) const { error(pMsg.c_str()); }; - - /** - * Log a warning message. - * - * @param pMsg the message. - */ - virtual void warning(const char* pMsg, ...) const; - virtual void warning(BaseString &pMsg) const { warning(pMsg.c_str()); }; - - /** - * Log an info message. - * - * @param pMsg the message. - */ - virtual void info(const char* pMsg, ...) const; - virtual void info(BaseString &pMsg) const { info(pMsg.c_str()); }; - - /** - * Log a debug message. - * - * @param pMsg the message. - */ - virtual void debug(const char* pMsg, ...) const; - virtual void debug(BaseString &pMsg) const { debug(pMsg.c_str()); }; - -protected: - - NdbMutex *m_mutex; - - void log(LoggerLevel logLevel, const char* msg, va_list ap) const; - -private: - /** Prohibit */ - Logger(const Logger&); - Logger operator = (const Logger&); - bool operator == (const Logger&); - - STATIC_CONST( MAX_LOG_LEVELS = 8 ); - - bool m_logLevels[MAX_LOG_LEVELS]; - - LogHandlerList* m_pHandlerList; - const char* m_pCategory; - - /* Default handlers */ - NdbMutex *m_handler_mutex; - LogHandler* m_pConsoleHandler; - LogHandler* m_pFileHandler; - LogHandler* m_pSyslogHandler; -}; - -#endif diff --git a/storage/ndb/include/logger/SysLogHandler.hpp b/storage/ndb/include/logger/SysLogHandler.hpp deleted file mode 100644 index a5bd90102d5..00000000000 --- a/storage/ndb/include/logger/SysLogHandler.hpp +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SYSLOGHANDLER_H -#define SYSLOGHANDLER_H - -#include "LogHandler.hpp" -#ifndef NDB_WIN32 -#include -#endif - -/** - * Logs messages to syslog. The default identity is 'NDB'. - * See 'man 3 syslog'. - * - * It logs the following severity levels. - *
- *
- *  LOG_ALERT           A condition  that  should  be  corrected
- *                      immediately,  such as a corrupted system
- *                      database.
- *
- *  LOG_CRIT            Critical conditions, such as hard device
- *                      errors.
- *
- *  LOG_ERR             Errors.
- *
- *  LOG_WARNING         Warning messages.
- *
- *  LOG_INFO            Informational messages.
- *
- *  LOG_DEBUG           Messages that contain  information  nor-
- *                      mally  of use only when debugging a pro-
- *                      gram.
- * 
- * - * @see LogHandler - * @version #@ $Id: SysLogHandler.hpp,v 1.2 2003/09/01 10:15:53 innpeno Exp $ - */ -class SysLogHandler : public LogHandler -{ -public: - /** - * Default constructor. - */ - SysLogHandler(); - - /** - * Create a new syslog handler with the specified identity. - * - * @param pIdentity a syslog identity. - * @param facility syslog facility, defaults to LOG_USER - */ - SysLogHandler(const char* pIdentity, int facility); - - /** - * Destructor. - */ - virtual ~SysLogHandler(); - - virtual bool open(); - virtual bool close(); - - virtual bool setParam(const BaseString ¶m, const BaseString &value); - bool setFacility(const BaseString &facility); - -protected: - virtual void writeHeader(const char* pCategory, Logger::LoggerLevel level); - virtual void writeMessage(const char* pMsg); - virtual void writeFooter(); - -private: - /** Prohibit*/ - SysLogHandler(const SysLogHandler&); - SysLogHandler operator = (const SysLogHandler&); - bool operator == (const SysLogHandler&); - - int m_severity; - const char* m_pCategory; - - /** Syslog identity for all log entries. */ - const char* m_pIdentity; - int m_facility; -}; - -#endif diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h deleted file mode 100644 index eef5199c4fa..00000000000 --- a/storage/ndb/include/mgmapi/mgmapi.h +++ /dev/null @@ -1,1186 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MGMAPI_H -#define MGMAPI_H - -#include "mgmapi_config_parameters.h" -#include "ndb_logevent.h" -#include "mgmapi_error.h" - -#define MGM_LOGLEVELS CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1 -#define NDB_MGM_MAX_LOGLEVEL 15 - -/** - * @section MySQL Cluster Management API - * - * The MySQL Cluster Management API (MGM API) is a C language API - * that is used for: - * - Starting and stopping database nodes (ndbd processes) - * - Starting and stopping Cluster backups - * - Controlling the NDB Cluster log - * - Performing other administrative tasks - * - * @section secMgmApiGeneral General Concepts - * - * Each MGM API function needs a management server handle - * of type @ref NdbMgmHandle. - * This handle is created by calling the function - * function ndb_mgm_create_handle() and freed by calling - * ndb_mgm_destroy_handle(). - * - * A function can return any of the following: - * -# An integer value, with - * a value of -1 indicating an error. - * -# A non-constant pointer value. A NULL value indicates an error; - * otherwise, the return value must be freed - * by the programmer - * -# A constant pointer value, with a NULL value indicating an error. - * The returned value should not be freed. - * - * Error conditions can be identified by using the appropriate - * error-reporting functions ndb_mgm_get_latest_error() and - * @ref ndb_mgm_error. - * - * Here is an example using the MGM API (without error handling for brevity's sake). - * @code - * NdbMgmHandle handle= ndb_mgm_create_handle(); - * ndb_mgm_connect(handle,0,0,0); - * struct ndb_mgm_cluster_state *state= ndb_mgm_get_status(handle); - * for(int i=0; i < state->no_of_nodes; i++) - * { - * struct ndb_mgm_node_state *node_state= &state->node_states[i]; - * printf("node with ID=%d ", node_state->node_id); - * if(node_state->version != 0) - * printf("connected\n"); - * else - * printf("not connected\n"); - * } - * free((void*)state); - * ndb_mgm_destroy_handle(&handle); - * @endcode - * - * @section secLogEvents Log Events - * - * The database nodes and management server(s) regularly and on specific - * occations report on various log events that occurs in the cluster. These - * log events are written to the cluster log. Optionally a mgmapi client - * may listen to these events by using the method ndb_mgm_listen_event(). - * Each log event belongs to a category, @ref ndb_mgm_event_category, and - * has a severity, @ref ndb_mgm_event_severity, associated with it. Each - * log event also has a level (0-15) associated with it. - * - * Which log events that come out is controlled with ndb_mgm_listen_event(), - * ndb_mgm_set_clusterlog_loglevel(), and - * ndb_mgm_set_clusterlog_severity_filter(). - * - * Below is an example of how to listen to events related to backup. - * - * @code - * int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; - * int fd = ndb_mgm_listen_event(handle, filter); - * @endcode - * - * - * @section secSLogEvents Structured Log Events - * - * The following steps are involved: - * - Create a NdbEventLogHandle using ndb_mgm_create_logevent_handle() - * - Wait and store log events using ndb_logevent_get_next() - * - The log event data is available in the struct ndb_logevent. The - * data which is specific to a particular event is stored in a union - * between structs so use ndb_logevent::type to decide which struct - * is valid. - * - * Sample code for listening to Backup related events. The availaable log - * events are listed in @ref ndb_logevent.h - * - * @code - * int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; - * NdbEventLogHandle le_handle= ndb_mgm_create_logevent_handle(handle, filter); - * struct ndb_logevent le; - * int r= ndb_logevent_get_next(le_handle,&le,0); - * if (r < 0) error - * else if (r == 0) no event - * - * switch (le.type) - * { - * case NDB_LE_BackupStarted: - * ... le.BackupStarted.starting_node; - * ... le.BackupStarted.backup_id; - * break; - * case NDB_LE_BackupFailedToStart: - * ... le.BackupFailedToStart.error; - * break; - * case NDB_LE_BackupCompleted: - * ... le.BackupCompleted.stop_gci; - * break; - * case NDB_LE_BackupAborted: - * ... le.BackupStarted.backup_id; - * break; - * default: - * break; - * } - * @endcode - */ - -/* - * @page ndb_logevent.h ndb_logevent.h - * @include ndb_logevent.h - */ - -/** @addtogroup MGM_C_API - * @{ - */ - -#include -#include -#include "ndb_logevent.h" -#include "mgmapi_config_parameters.h" - -#ifdef __cplusplus -extern "C" { -#endif - - /** - * The NdbMgmHandle. - */ - typedef struct ndb_mgm_handle * NdbMgmHandle; - - /** - * NDB Cluster node types - */ - enum ndb_mgm_node_type { - NDB_MGM_NODE_TYPE_UNKNOWN = -1 /** Node type not known*/ - ,NDB_MGM_NODE_TYPE_API /** An application (NdbApi) node */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = NODE_TYPE_API -#endif - ,NDB_MGM_NODE_TYPE_NDB /** A database node */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = NODE_TYPE_DB -#endif - ,NDB_MGM_NODE_TYPE_MGM /** A management server node */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = NODE_TYPE_MGM -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ,NDB_MGM_NODE_TYPE_MIN = 0 /** Min valid value*/ - ,NDB_MGM_NODE_TYPE_MAX = 3 /** Max valid value*/ -#endif - }; - - /** - * Database node status - */ - enum ndb_mgm_node_status { - /** Node status not known*/ - NDB_MGM_NODE_STATUS_UNKNOWN = 0, - /** No contact with node*/ - NDB_MGM_NODE_STATUS_NO_CONTACT = 1, - /** Has not run starting protocol*/ - NDB_MGM_NODE_STATUS_NOT_STARTED = 2, - /** Is running starting protocol*/ - NDB_MGM_NODE_STATUS_STARTING = 3, - /** Running*/ - NDB_MGM_NODE_STATUS_STARTED = 4, - /** Is shutting down*/ - NDB_MGM_NODE_STATUS_SHUTTING_DOWN = 5, - /** Is restarting*/ - NDB_MGM_NODE_STATUS_RESTARTING = 6, - /** Maintenance mode*/ - NDB_MGM_NODE_STATUS_SINGLEUSER = 7, - /** Resume mode*/ - NDB_MGM_NODE_STATUS_RESUME = 8, -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** Min valid value*/ - NDB_MGM_NODE_STATUS_MIN = 0, - /** Max valid value*/ - NDB_MGM_NODE_STATUS_MAX = 8 -#endif - }; - - /** - * Status of a node in the cluster. - * - * Sub-structure in enum ndb_mgm_cluster_state - * returned by ndb_mgm_get_status(). - * - * @note node_status, start_phase, - * dynamic_id - * and node_group are relevant only for database nodes, - * i.e. node_type == @ref NDB_MGM_NODE_TYPE_NDB. - */ - struct ndb_mgm_node_state { - /** NDB Cluster node ID*/ - int node_id; - /** Type of NDB Cluster node*/ - enum ndb_mgm_node_type node_type; - /** State of node*/ - enum ndb_mgm_node_status node_status; - /** Start phase. - * - * @note Start phase is only valid if the node_type is - * NDB_MGM_NODE_TYPE_NDB and the node_status is - * NDB_MGM_NODE_STATUS_STARTING - */ - int start_phase; - /** ID for heartbeats and master take-over (only valid for DB nodes) - */ - int dynamic_id; - /** Node group of node (only valid for DB nodes)*/ - int node_group; - /** Internal version number*/ - int version; - /** Number of times node has connected or disconnected to the - * management server - */ - int connect_count; - /** IP address of node when it connected to the management server. - * @note This value will be empty if the management server has restarted - * since the node last connected. - */ - char connect_address[ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - sizeof("000.000.000.000")+1 -#endif - ]; - }; - - /** - * State of all nodes in the cluster; returned from - * ndb_mgm_get_status() - */ - struct ndb_mgm_cluster_state { - /** Number of entries in the node_states array */ - int no_of_nodes; - /** An array with node_states*/ - struct ndb_mgm_node_state node_states[ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - 1 -#endif - ]; - }; - - /** - * Default reply from the server (reserved for future use) - */ - struct ndb_mgm_reply { - /** 0 if successful, otherwise error code. */ - int return_code; - /** Error or reply message.*/ - char message[256]; - }; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Default information types - */ - enum ndb_mgm_info { - /** ?*/ - NDB_MGM_INFO_CLUSTER, - /** Cluster log*/ - NDB_MGM_INFO_CLUSTERLOG - }; - - /** - * Signal log modes - * (Used only in the development of NDB Cluster.) - */ - enum ndb_mgm_signal_log_mode { - /** Log receiving signals */ - NDB_MGM_SIGNAL_LOG_MODE_IN, - /** Log sending signals*/ - NDB_MGM_SIGNAL_LOG_MODE_OUT, - /** Log both sending/receiving*/ - NDB_MGM_SIGNAL_LOG_MODE_INOUT, - /** Log off*/ - NDB_MGM_SIGNAL_LOG_MODE_OFF - }; -#endif - - /***************************************************************************/ - /** - * @name Functions: Error Handling - * @{ - */ - - /** - * Get the most recent error associated with the management server whose handle - * is used as the value of handle. - * - * @param handle Management handle - * @return Latest error code - */ - int ndb_mgm_get_latest_error(const NdbMgmHandle handle); - - /** - * Get the most recent general error message associated with a handle - * - * @param handle Management handle. - * @return Latest error message - */ - const char * ndb_mgm_get_latest_error_msg(const NdbMgmHandle handle); - - /** - * Get the most recent error description associated with a handle - * - * The error description gives some additional information regarding - * the error message. - * - * @param handle Management handle. - * @return Latest error description - */ - const char * ndb_mgm_get_latest_error_desc(const NdbMgmHandle handle); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get the most recent internal source code error line associated with a handle - * - * @param handle Management handle. - * @return Latest internal source code line of latest error - * @deprecated - */ - int ndb_mgm_get_latest_error_line(const NdbMgmHandle handle); -#endif - - /** - * Set error stream - */ - void ndb_mgm_set_error_stream(NdbMgmHandle, FILE *); - - - /** @} *********************************************************************/ - /** - * @name Functions: Create/Destroy Management Server Handles - * @{ - */ - - /** - * Create a handle to a management server. - * - * @return A management handle
- * or NULL if no management handle could be created. - */ - NdbMgmHandle ndb_mgm_create_handle(); - - /** - * Destroy a management server handle. - * - * @param handle Management handle - */ - void ndb_mgm_destroy_handle(NdbMgmHandle * handle); - - /** - * Set a name of the handle. Name is reported in cluster log. - * - * @param handle Management handle - * @param name Name - */ - void ndb_mgm_set_name(NdbMgmHandle handle, const char *name); - - /** @} *********************************************************************/ - /** - * @name Functions: Connect/Disconnect Management Server - * @{ - */ - - /** - * Sets the connectstring for a management server - * - * @param handle Management handle - * @param connect_string Connect string to the management server, - * - * @return -1 on error. - * - * @code - * := [,][,] - * := nodeid= - * := [:] - * is an integer greater than 0 identifying a node in config.ini - * is an integer referring to a regular unix port - * is a string containing a valid network host address - * @endcode - */ - int ndb_mgm_set_connectstring(NdbMgmHandle handle, - const char *connect_string); - - /** - * Returns the number of management servers in the connect string - * (as set by ndb_mgm_set_connectstring()). This can be used - * to help work out how long the maximum amount of time that - * ndb_mgm_connect can take. - * - * @param handle Management handle - * - * @return < 0 on error - */ - int ndb_mgm_number_of_mgmd_in_connect_string(NdbMgmHandle handle); - - int ndb_mgm_set_configuration_nodeid(NdbMgmHandle handle, int nodeid); - int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle); - int ndb_mgm_get_connected_port(NdbMgmHandle handle); - const char *ndb_mgm_get_connected_host(NdbMgmHandle handle); - const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz); - - /** - * Set local bindaddress - * @param arg - Srting of form "host[:port]" - * @note must be called before connect - * @note Error on binding local address will not be reported until connect - * @return 0 on success - */ - int ndb_mgm_set_bindaddress(NdbMgmHandle, const char * arg); - - /** - * Gets the connectstring used for a connection - * - * @note This function returns the default connectstring if no call to - * ndb_mgm_set_connectstring() has been performed. Also, the - * returned connectstring may be formatted differently. - * - * @param handle Management handle - * @param buf Buffer to hold result - * @param buf_sz Size of buffer. - * - * @return connectstring (same as buf) - */ - const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz); - - /** - * DEPRICATED: use ndb_mgm_set_timeout instead. - * - * @param handle NdbMgmHandle - * @param seconds number of seconds - * @return non-zero on success - */ - int ndb_mgm_set_connect_timeout(NdbMgmHandle handle, unsigned int seconds); - - /** - * Sets the number of milliseconds for timeout of network operations - * Default is 60 seconds. - * Only increments of 1000 ms are supported. No function is gaurenteed - * to return in a fraction of a second. - * - * @param handle NdbMgmHandle - * @param timeout_ms number of milliseconds - * @return zero on success - */ - int ndb_mgm_set_timeout(NdbMgmHandle handle, unsigned int timeout_ms); - - /** - * Connects to a management server. Connectstring is set by - * ndb_mgm_set_connectstring(). - * - * The timeout value is for connect to each management server. - * Use ndb_mgm_number_of_mgmd_in_connect_string to work out - * the approximate maximum amount of time that could be spent in this - * function. - * - * @param handle Management handle. - * @param no_retries Number of retries to connect - * (0 means connect once). - * @param retry_delay_in_seconds - * How long to wait until retry is performed. - * @param verbose Make printout regarding connect retries. - * - * @return -1 on error. - */ - int ndb_mgm_connect(NdbMgmHandle handle, int no_retries, - int retry_delay_in_seconds, int verbose); - /** - * Return true if connected. - * - * @param handle Management handle - * @return 0 if not connected, non-zero if connected. - */ - int ndb_mgm_is_connected(NdbMgmHandle handle); - - /** - * Disconnects from a management server - * - * @param handle Management handle. - * @return -1 on error. - */ - int ndb_mgm_disconnect(NdbMgmHandle handle); - - /** - * Gets connection node ID - * - * @param handle Management handle - * - * @return Node ID; 0 indicates that no node ID has been - * specified - */ - int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle); - - /** - * Gets connection port - * - * @param handle Management handle - * - * @return port - */ - int ndb_mgm_get_connected_port(NdbMgmHandle handle); - - /** - * Gets connection host - * - * @param handle Management handle - * - * @return hostname - */ - const char *ndb_mgm_get_connected_host(NdbMgmHandle handle); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** @} *********************************************************************/ - /** - * @name Functions: Used to convert between different data formats - * @{ - */ - - /** - * Converts a string to an ndb_mgm_node_type value - * - * @param type Node type as string. - * @return NDB_MGM_NODE_TYPE_UNKNOWN if invalid string. - */ - enum ndb_mgm_node_type ndb_mgm_match_node_type(const char * type); - - /** - * Converts an ndb_mgm_node_type to a string - * - * @param type Node type. - * @return NULL if invalid ID. - */ - const char * ndb_mgm_get_node_type_string(enum ndb_mgm_node_type type); - - /** - * Converts an ndb_mgm_node_type to a alias string - * - * @param type Node type. - * @return NULL if the ID is invalid. - */ - const char * ndb_mgm_get_node_type_alias_string(enum ndb_mgm_node_type type, - const char **str); - - /** - * Converts a string to a ndb_mgm_node_status value - * - * @param status NDB node status string. - * @return NDB_MGM_NODE_STATUS_UNKNOWN if invalid string. - */ - enum ndb_mgm_node_status ndb_mgm_match_node_status(const char * status); - - /** - * Converts an ID to a string - * - * @param status NDB node status. - * @return NULL if invalid ID. - */ - const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status); - - const char * ndb_mgm_get_event_severity_string(enum ndb_mgm_event_severity); - ndb_mgm_event_category ndb_mgm_match_event_category(const char *); - const char * ndb_mgm_get_event_category_string(enum ndb_mgm_event_category); -#endif - - /** @} *********************************************************************/ - /** - * @name Functions: Cluster status - * @{ - */ - - /** - * Gets status of the nodes in an NDB Cluster - * - * @note The caller must free the pointer returned by this function. - * - * @param handle Management handle. - * - * @return Cluster state (or NULL on error). - */ - struct ndb_mgm_cluster_state * ndb_mgm_get_status(NdbMgmHandle handle); - - /** @} *********************************************************************/ - /** - * @name Functions: Start/stop nodes - * @{ - */ - - /** - * Stops database nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to be stopped
- * 0: All database nodes in cluster
- * n: Stop the n node(s) specified in the - * array node_list - * @param node_list List of node IDs for database nodes to be stopped - * - * @return Number of nodes stopped (-1 on error) - * - * @note This function is equivalent - * to calling ndb_mgm_stop2(handle, no_of_nodes, node_list, 0) - */ - int ndb_mgm_stop(NdbMgmHandle handle, int no_of_nodes, - const int * node_list); - - /** - * Stops database nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to stop
- * 0: All database nodes in cluster
- * n: Stop the n node(s) specified in - * the array node_list - * @param node_list List of node IDs of database nodes to be stopped - * @param abort Don't perform graceful stop, - * but rather stop immediately - * - * @return Number of nodes stopped (-1 on error). - */ - int ndb_mgm_stop2(NdbMgmHandle handle, int no_of_nodes, - const int * node_list, int abort); - - /** - * Stops cluster nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to stop
- * -1: All database and management nodes
- * 0: All database nodes in cluster
- * n: Stop the n node(s) specified in - * the array node_list - * @param node_list List of node IDs of database nodes to be stopped - * @param abort Don't perform graceful stop, - * but rather stop immediately - * @param disconnect Returns true if you need to disconnect to apply - * the stop command (e.g. stopping the mgm server - * that handle is connected to) - * - * @return Number of nodes stopped (-1 on error). - */ - int ndb_mgm_stop3(NdbMgmHandle handle, int no_of_nodes, - const int * node_list, int abort, int *disconnect); - - - /** - * Restart database nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to restart
- * 0: All database nodes in cluster
- * n: Restart the n node(s) specified in the - * array node_list - * @param node_list List of node IDs of database nodes to be restarted - * - * @return Number of nodes restarted (-1 on error). - * - * @note This function is equivalent to calling - * ndb_mgm_restart2(handle, no_of_nodes, node_list, 0, 0, 0); - */ - int ndb_mgm_restart(NdbMgmHandle handle, int no_of_nodes, - const int * node_list); - - /** - * Restart database nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to be restarted:
- * 0: Restart all database nodes in the cluster
- * n: Restart the n node(s) specified in the - * array node_list - * @param node_list List of node IDs of database nodes to be restarted - * @param initial Remove filesystem from restarting node(s) - * @param nostart Don't actually start node(s) but leave them - * waiting for start command - * @param abort Don't perform graceful restart, - * but rather restart immediately - * - * @return Number of nodes stopped (-1 on error). - */ - int ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, - const int * node_list, int initial, - int nostart, int abort); - - /** - * Restart nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to be restarted:
- * 0: Restart all database nodes in the cluster
- * n: Restart the n node(s) specified in the - * array node_list - * @param node_list List of node IDs of database nodes to be restarted - * @param initial Remove filesystem from restarting node(s) - * @param nostart Don't actually start node(s) but leave them - * waiting for start command - * @param abort Don't perform graceful restart, - * but rather restart immediately - * @param disconnect Returns true if mgmapi client must disconnect from - * server to apply the requested operation. (e.g. - * restart the management server) - * - * - * @return Number of nodes stopped (-1 on error). - */ - int ndb_mgm_restart3(NdbMgmHandle handle, int no_of_nodes, - const int * node_list, int initial, - int nostart, int abort, int *disconnect); - - /** - * Start database nodes - * - * @param handle Management handle. - * @param no_of_nodes Number of database nodes to be started
- * 0: Start all database nodes in the cluster
- * n: Start the n node(s) specified in - * the array node_list - * @param node_list List of node IDs of database nodes to be started - * - * @return Number of nodes actually started (-1 on error). - * - * @note The nodes to be started must have been started with nostart(-n) - * argument. - * This means that the database node binary is started and - * waiting for a START management command which will - * actually enable the database node - */ - int ndb_mgm_start(NdbMgmHandle handle, - int no_of_nodes, - const int * node_list); - - /** @} *********************************************************************/ - /** - * @name Functions: Controlling Clusterlog output - * @{ - */ - - /** - * Filter cluster log severities - * - * @param handle NDB management handle. - * @param severity A cluster log severity to filter. - * @param enable set 1=enable o 0=disable - * @param reply Reply message. - * - * @return -1 on error. - */ - int ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle, - enum ndb_mgm_event_severity severity, - int enable, - struct ndb_mgm_reply* reply); - /** - * Get clusterlog severity filter - * - * @param handle NDB management handle - * - * @param loglevel A vector of seven (NDB_MGM_EVENT_SEVERITY_ALL) - * elements of struct ndb_mgm_severity, - * where each element contains - * 1 if a severity indicator is enabled and 0 if not. - * A severity level is stored at position - * ndb_mgm_clusterlog_level; - * for example the "error" level is stored in position - * [NDB_MGM_EVENT_SEVERITY_ERROR]. - * The first element [NDB_MGM_EVENT_SEVERITY_ON] in - * the vector signals whether the cluster log - * is disabled or enabled. - * @param severity_size The size of the vector (NDB_MGM_EVENT_SEVERITY_ALL) - * @return Number of returned severities or -1 on error - */ - int ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle, - struct ndb_mgm_severity* severity, - unsigned int severity_size); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get clusterlog severity filter - * - * @param handle NDB management handle - * - * @return A vector of seven elements, - * where each element contains - * 1 if a severity indicator is enabled and 0 if not. - * A severity level is stored at position - * ndb_mgm_clusterlog_level; - * for example the "error" level is stored in position - * [NDB_MGM_EVENT_SEVERITY_ERROR]. - * The first element [NDB_MGM_EVENT_SEVERITY_ON] in - * the vector signals - * whether the cluster log - * is disabled or enabled. - */ - const unsigned int *ndb_mgm_get_clusterlog_severity_filter_old(NdbMgmHandle handle); -#endif - - /** - * Set log category and levels for the cluster log - * - * @param handle NDB management handle. - * @param nodeId Node ID. - * @param category Event category. - * @param level Log level (0-15). - * @param reply Reply message. - * @return -1 on error. - */ - int ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle, - int nodeId, - enum ndb_mgm_event_category category, - int level, - struct ndb_mgm_reply* reply); - - /** - * get log category and levels - * - * @param handle NDB management handle. - * @param loglevel A vector of twelve (MGM_LOGLEVELS) elements - * of struct ndb_mgm_loglevel, - * where each element contains - * loglevel of corresponding category - * @param loglevel_size The size of the vector (MGM_LOGLEVELS) - * @return Number of returned loglevels or -1 on error - */ - int ndb_mgm_get_clusterlog_loglevel(NdbMgmHandle handle, - struct ndb_mgm_loglevel* loglevel, - unsigned int loglevel_size); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * get log category and levels - * - * @param handle NDB management handle. - * @return A vector of twelve elements, - * where each element contains - * loglevel of corresponding category - */ - const unsigned int *ndb_mgm_get_clusterlog_loglevel_old(NdbMgmHandle handle); -#endif - - - /** @} *********************************************************************/ - /** - * @name Functions: Listening to log events - * @{ - */ - - /** - * Listen to log events. They are read from the return file descriptor - * and the format is textual, and the same as in the cluster log. - * - * @param handle NDB management handle. - * @param filter pairs of { level, ndb_mgm_event_category } that will be - * pushed to fd, level=0 ends list. - * - * @return fd filedescriptor to read events from - */ - int ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Set log category and levels for the Node - * - * @param handle NDB management handle. - * @param nodeId Node ID. - * @param category Event category. - * @param level Log level (0-15). - * @param reply Reply message. - * @return -1 on error. - */ - int ndb_mgm_set_loglevel_node(NdbMgmHandle handle, - int nodeId, - enum ndb_mgm_event_category category, - int level, - struct ndb_mgm_reply* reply); -#endif - - /** - * The NdbLogEventHandle - */ - typedef struct ndb_logevent_handle * NdbLogEventHandle; - - /** - * Listen to log events. - * - * @param handle NDB management handle. - * @param filter pairs of { level, ndb_mgm_event_category } that will be - * pushed to fd, level=0 ends list. - * - * @return NdbLogEventHandle - */ - NdbLogEventHandle ndb_mgm_create_logevent_handle(NdbMgmHandle, - const int filter[]); - void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle*); - - /** - * Retrieve filedescriptor from NdbLogEventHandle. May be used in - * e.g. an application select() statement. - * - * @note Do not attemt to read from it, it will corrupt the parsing. - * - * @return filedescriptor, -1 on failure. - */ - int ndb_logevent_get_fd(const NdbLogEventHandle); - - /** - * Attempt to retrieve next log event and will fill in the supplied - * struct dst - * - * @param dst Pointer to struct to fill in event information - * @param timeout_in_milliseconds Timeout for waiting for event - * - * @return >0 if event exists, 0 no event (timed out), or -1 on error. - * - * @note Return value <=0 will leave dst untouched - */ - int ndb_logevent_get_next(const NdbLogEventHandle, - struct ndb_logevent *dst, - unsigned timeout_in_milliseconds); - - /** - * Retrieve laterst error code - * - * @return error code - */ - int ndb_logevent_get_latest_error(const NdbLogEventHandle); - - /** - * Retrieve laterst error message - * - * @return error message - */ - const char *ndb_logevent_get_latest_error_msg(const NdbLogEventHandle); - - - /** @} *********************************************************************/ - /** - * @name Functions: Backup - * @{ - */ - - /** - * Start backup - * - * @param handle NDB management handle. - * @param wait_completed 0: Don't wait for confirmation
- * 1: Wait for backup to be started
- * 2: Wait for backup to be completed - * @param backup_id Backup ID is returned from function. - * @param reply Reply message. - * @return -1 on error. - * @note backup_id will not be returned if - * wait_completed == 0 - */ - int ndb_mgm_start_backup(NdbMgmHandle handle, int wait_completed, - unsigned int* backup_id, - struct ndb_mgm_reply* reply); - - /** - * Abort backup - * - * @param handle NDB management handle. - * @param backup_id Backup ID. - * @param reply Reply message. - * @return -1 on error. - */ - int ndb_mgm_abort_backup(NdbMgmHandle handle, unsigned int backup_id, - struct ndb_mgm_reply* reply); - - - /** @} *********************************************************************/ - /** - * @name Functions: Single User Mode - * @{ - */ - - /** - * Enter Single user mode - * - * @param handle NDB management handle. - * @param nodeId Node ID of the single user node - * @param reply Reply message. - * @return -1 on error. - */ - int ndb_mgm_enter_single_user(NdbMgmHandle handle, unsigned int nodeId, - struct ndb_mgm_reply* reply); - - /** - * Exit Single user mode - * - * @param handle NDB management handle. - * @param reply Reply message. - * - * @return -1 on error. - */ - int ndb_mgm_exit_single_user(NdbMgmHandle handle, - struct ndb_mgm_reply* reply); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** @} *********************************************************************/ - /** - * @name Configuration handling - * @{ - */ - - /** - * Get configuration - * @param handle NDB management handle. - * @param version Version of configuration, 0 means latest - * (Currently this is the only supported value for this parameter) - * - * @return configuration - * - * @note The caller is responsible for calling ndb_mgm_destroy_configuration() - */ - struct ndb_mgm_configuration * ndb_mgm_get_configuration(NdbMgmHandle handle, - unsigned version); - void ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *); - - int ndb_mgm_alloc_nodeid(NdbMgmHandle handle, - unsigned version, int nodetype, int log_event); - - /** - * End Session - * - * This function tells the mgm server to free all resources associated with - * this connection. It will also close it. - * - * This differs from just disconnecting as we now synchronously clean up, - * so that a quickly restarting server that needs the same node id can - * get it when it restarts. - * - * @param handle NDB management handle - * @return 0 on success - * - * @note you still have to destroy the NdbMgmHandle. - */ - int ndb_mgm_end_session(NdbMgmHandle handle); - - /** - * ndb_mgm_get_fd - * - * get the file descriptor of the handle. - * INTERNAL ONLY. - * USE FOR TESTING. OTHER USES ARE NOT A GOOD IDEA. - * - * @param handle NDB management handle - * @return handle->socket - * - */ - int ndb_mgm_get_fd(NdbMgmHandle handle); - - /** - * Get the node id of the mgm server we're connected to - */ - Uint32 ndb_mgm_get_mgmd_nodeid(NdbMgmHandle handle); - - /** - * Get the version of the mgm server we're talking to. - * Designed to allow switching of protocol depending on version - * so that new clients can speak to old servers in a compat mode - */ - int ndb_mgm_get_version(NdbMgmHandle handle, - int *major, int *minor, int* build, - int len, char* str); - - - /** - * Config iterator - */ - typedef struct ndb_mgm_configuration_iterator ndb_mgm_configuration_iterator; - - ndb_mgm_configuration_iterator* ndb_mgm_create_configuration_iterator - (struct ndb_mgm_configuration *, unsigned type_of_section); - void ndb_mgm_destroy_iterator(ndb_mgm_configuration_iterator*); - - int ndb_mgm_first(ndb_mgm_configuration_iterator*); - int ndb_mgm_next(ndb_mgm_configuration_iterator*); - int ndb_mgm_valid(const ndb_mgm_configuration_iterator*); - int ndb_mgm_find(ndb_mgm_configuration_iterator*, - int param, unsigned value); - - int ndb_mgm_get_int_parameter(const ndb_mgm_configuration_iterator*, - int param, unsigned * value); - int ndb_mgm_get_int64_parameter(const ndb_mgm_configuration_iterator*, - int param, Uint64 * value); - int ndb_mgm_get_string_parameter(const ndb_mgm_configuration_iterator*, - int param, const char ** value); - int ndb_mgm_purge_stale_sessions(NdbMgmHandle handle, char **); - int ndb_mgm_check_connection(NdbMgmHandle handle); - - int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length); - - struct ndb_mgm_param_info - { - Uint32 m_id; - const char * m_name; - }; - int ndb_mgm_get_db_parameter_info(Uint32 paramId, struct ndb_mgm_param_info * info, - size_t * size); -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - enum ndb_mgm_clusterlog_level { - NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL = -1, - NDB_MGM_CLUSTERLOG_ON = 0, - NDB_MGM_CLUSTERLOG_DEBUG = 1, - NDB_MGM_CLUSTERLOG_INFO = 2, - NDB_MGM_CLUSTERLOG_WARNING = 3, - NDB_MGM_CLUSTERLOG_ERROR = 4, - NDB_MGM_CLUSTERLOG_CRITICAL = 5, - NDB_MGM_CLUSTERLOG_ALERT = 6, - NDB_MGM_CLUSTERLOG_ALL = 7 - }; - inline - int ndb_mgm_filter_clusterlog(NdbMgmHandle h, - enum ndb_mgm_clusterlog_level s, - int e, struct ndb_mgm_reply* r) - { return ndb_mgm_set_clusterlog_severity_filter(h,(ndb_mgm_event_severity)s, - e,r); } - struct ndb_mgm_severity { - enum ndb_mgm_event_severity category; - unsigned int value; - }; - - inline - const unsigned int * ndb_mgm_get_logfilter(NdbMgmHandle h) - { return ndb_mgm_get_clusterlog_severity_filter_old(h); } - - inline - int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle h, int n, - enum ndb_mgm_event_category c, - int l, struct ndb_mgm_reply* r) - { return ndb_mgm_set_clusterlog_loglevel(h,n,c,l,r); } - - struct ndb_mgm_loglevel { - enum ndb_mgm_event_category category; - unsigned int value; - }; - - inline - const unsigned int * ndb_mgm_get_loglevel_clusterlog(NdbMgmHandle h) - { return ndb_mgm_get_clusterlog_loglevel_old(h); } - -#endif - -#ifdef __cplusplus -} -#endif - -/** @} */ - -#endif diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h deleted file mode 100644 index 4e6a42b25e3..00000000000 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ /dev/null @@ -1,208 +0,0 @@ -/* Copyright (C) 2004-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef MGMAPI_CONFIG_PARAMTERS_H -#define MGMAPI_CONFIG_PARAMTERS_H - -#define CFG_SYS_NAME 3 -#define CFG_SYS_PRIMARY_MGM_NODE 1 -#define CFG_SYS_CONFIG_GENERATION 2 -#define CFG_SYS_PORT_BASE 8 - -#define CFG_NODE_ID 3 -#define CFG_NODE_BYTE_ORDER 4 -#define CFG_NODE_HOST 5 -#define CFG_NODE_SYSTEM 6 -#define CFG_NODE_DATADIR 7 - -/** - * DB config parameters - */ -#define CFG_DB_NO_SAVE_MSGS 100 - -#define CFG_DB_NO_REPLICAS 101 -#define CFG_DB_NO_TABLES 102 -#define CFG_DB_NO_ATTRIBUTES 103 -#define CFG_DB_NO_INDEXES 104 -#define CFG_DB_NO_TRIGGERS 105 - -#define CFG_DB_NO_TRANSACTIONS 106 -#define CFG_DB_NO_OPS 107 -#define CFG_DB_NO_SCANS 108 -#define CFG_DB_NO_TRIGGER_OPS 109 -#define CFG_DB_NO_INDEX_OPS 110 - -#define CFG_DB_TRANS_BUFFER_MEM 111 -#define CFG_DB_DATA_MEM 112 -#define CFG_DB_INDEX_MEM 113 -#define CFG_DB_MEMLOCK 114 - -#define CFG_DB_START_PARTIAL_TIMEOUT 115 -#define CFG_DB_START_PARTITION_TIMEOUT 116 -#define CFG_DB_START_FAILURE_TIMEOUT 117 - -#define CFG_DB_HEARTBEAT_INTERVAL 118 -#define CFG_DB_API_HEARTBEAT_INTERVAL 119 -#define CFG_DB_LCP_INTERVAL 120 -#define CFG_DB_GCP_INTERVAL 121 -#define CFG_DB_ARBIT_TIMEOUT 122 - -#define CFG_DB_WATCHDOG_INTERVAL 123 -#define CFG_DB_STOP_ON_ERROR 124 - -#define CFG_DB_FILESYSTEM_PATH 125 -#define CFG_DB_NO_REDOLOG_FILES 126 -#define CFG_DB_REDOLOG_FILE_SIZE 140 - -#define CFG_DB_LCP_DISC_PAGES_TUP 127 -#define CFG_DB_LCP_DISC_PAGES_TUP_SR 128 -#define CFG_DB_LCP_DISC_PAGES_ACC 137 -#define CFG_DB_LCP_DISC_PAGES_ACC_SR 138 - -#define CFG_DB_TRANSACTION_CHECK_INTERVAL 129 -#define CFG_DB_TRANSACTION_INACTIVE_TIMEOUT 130 -#define CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT 131 - -#define CFG_DB_PARALLEL_BACKUPS 132 -#define CFG_DB_BACKUP_MEM 133 -#define CFG_DB_BACKUP_DATA_BUFFER_MEM 134 -#define CFG_DB_BACKUP_LOG_BUFFER_MEM 135 -#define CFG_DB_BACKUP_WRITE_SIZE 136 -#define CFG_DB_BACKUP_MAX_WRITE_SIZE 139 - -#define CFG_DB_WATCHDOG_INTERVAL_INITIAL 141 - -#define CFG_LOG_DESTINATION 147 - -#define CFG_DB_DISCLESS 148 - -#define CFG_DB_NO_ORDERED_INDEXES 149 -#define CFG_DB_NO_UNIQUE_HASH_INDEXES 150 -#define CFG_DB_NO_LOCAL_OPS 151 -#define CFG_DB_NO_LOCAL_SCANS 152 -#define CFG_DB_BATCH_SIZE 153 - -#define CFG_DB_UNDO_INDEX_BUFFER 154 -#define CFG_DB_UNDO_DATA_BUFFER 155 -#define CFG_DB_REDO_BUFFER 156 - -#define CFG_DB_LONG_SIGNAL_BUFFER 157 - -#define CFG_DB_BACKUP_DATADIR 158 - -#define CFG_DB_MAX_OPEN_FILES 159 -#define CFG_DB_DISK_PAGE_BUFFER_MEMORY 160 /* used from 5.1 */ -#define CFG_DB_STRING_MEMORY 161 /* used from 5.1 */ -#define CFG_DB_INITIAL_OPEN_FILES 162 /* used from 5.1 */ - -#define CFG_DB_DISK_PAGE_BUFFER_MEMORY 160 -#define CFG_DB_STRING_MEMORY 161 - -#define CFG_DB_DISK_SYNCH_SIZE 163 -#define CFG_DB_CHECKPOINT_SPEED 164 -#define CFG_DB_CHECKPOINT_SPEED_SR 165 - -#define CFG_DB_MEMREPORT_FREQUENCY 166 - -#define CFG_DB_O_DIRECT 168 - -#define CFG_DB_MAX_ALLOCATE 169 - -#define CFG_DB_SGA 198 /* super pool mem */ -#define CFG_DB_DATA_MEM_2 199 /* used in special build in 5.1 */ - -#define CFG_NODE_ARBIT_RANK 200 -#define CFG_NODE_ARBIT_DELAY 201 - -#define CFG_MIN_LOGLEVEL 250 -#define CFG_LOGLEVEL_STARTUP 250 -#define CFG_LOGLEVEL_SHUTDOWN 251 -#define CFG_LOGLEVEL_STATISTICS 252 -#define CFG_LOGLEVEL_CHECKPOINT 253 -#define CFG_LOGLEVEL_NODERESTART 254 -#define CFG_LOGLEVEL_CONNECTION 255 -#define CFG_LOGLEVEL_INFO 256 -#define CFG_LOGLEVEL_WARNING 257 -#define CFG_LOGLEVEL_ERROR 258 -#define CFG_LOGLEVEL_CONGESTION 259 -#define CFG_LOGLEVEL_DEBUG 260 -#define CFG_LOGLEVEL_BACKUP 261 -#define CFG_MAX_LOGLEVEL 261 - -#define CFG_MGM_PORT 300 - -#define CFG_CONNECTION_NODE_1 400 -#define CFG_CONNECTION_NODE_2 401 -#define CFG_CONNECTION_SEND_SIGNAL_ID 402 -#define CFG_CONNECTION_CHECKSUM 403 -#define CFG_CONNECTION_NODE_1_SYSTEM 404 -#define CFG_CONNECTION_NODE_2_SYSTEM 405 -#define CFG_CONNECTION_SERVER_PORT 406 -#define CFG_CONNECTION_HOSTNAME_1 407 -#define CFG_CONNECTION_HOSTNAME_2 408 -#define CFG_CONNECTION_GROUP 409 -#define CFG_CONNECTION_NODE_ID_SERVER 410 - -#define CFG_TCP_SERVER 452 -#define CFG_TCP_SEND_BUFFER_SIZE 454 -#define CFG_TCP_RECEIVE_BUFFER_SIZE 455 -#define CFG_TCP_PROXY 456 - -#define CFG_SHM_SEND_SIGNAL_ID 500 -#define CFG_SHM_CHECKSUM 501 -#define CFG_SHM_KEY 502 -#define CFG_SHM_BUFFER_MEM 503 -#define CFG_SHM_SIGNUM 504 - -#define CFG_SCI_HOST1_ID_0 550 -#define CFG_SCI_HOST1_ID_1 551 -#define CFG_SCI_HOST2_ID_0 552 -#define CFG_SCI_HOST2_ID_1 553 -#define CFG_SCI_SEND_LIMIT 554 -#define CFG_SCI_BUFFER_MEM 555 - -#define CFG_602 602 // Removed: was OSE -#define CFG_603 603 // Removed: was OSE -#define CFG_604 604 // Removed: was OSE - -/** - * API Config variables - * - */ -#define CFG_MAX_SCAN_BATCH_SIZE 800 -#define CFG_BATCH_BYTE_SIZE 801 -#define CFG_BATCH_SIZE 802 - -/** - * Internal - */ -#define CFG_DB_STOP_ON_ERROR_INSERT 1 - -#define CFG_TYPE_OF_SECTION 999 -#define CFG_SECTION_SYSTEM 1000 -#define CFG_SECTION_NODE 2000 -#define CFG_SECTION_CONNECTION 3000 - -#define NODE_TYPE_DB 0 -#define NODE_TYPE_API 1 -#define NODE_TYPE_MGM 2 - -#define CONNECTION_TYPE_TCP 0 -#define CONNECTION_TYPE_SHM 1 -#define CONNECTION_TYPE_SCI 2 -#define CONNECTION_TYPE_OSE 3 // Removed. - -#endif diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h deleted file mode 100644 index b384b967e21..00000000000 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters_debug.h +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef MGMAPI_CONFIG_PARAMTERS_DEBUG_H -#define MGMAPI_CONFIG_PARAMTERS_DEBUG_H - -#include "mgmapi_config_parameters.h" - -#define CFG_DB_STOP_ON_ERROR_INSERT 1 - -#endif diff --git a/storage/ndb/include/mgmapi/mgmapi_debug.h b/storage/ndb/include/mgmapi/mgmapi_debug.h deleted file mode 100644 index daedbdc7160..00000000000 --- a/storage/ndb/include/mgmapi/mgmapi_debug.h +++ /dev/null @@ -1,154 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MGMAPI_DEBUG_H -#define MGMAPI_DEBUG_H - -#ifdef __cplusplus -extern "C" { -#endif - - /** - * Start signal logging. - * - * @param handle the NDB management handle. - * @param nodeId the node Id. - * @param reply the reply message. - * @return 0 if successful. - */ - int ndb_mgm_start_signallog(NdbMgmHandle handle, - int nodeId, - struct ndb_mgm_reply* reply); - - /** - * Stop signal logging. - * - * @param handle the NDB management handle. - * @param nodeId the node Id. - * @param reply the reply message. - * @return 0 if successful. - */ - int ndb_mgm_stop_signallog(NdbMgmHandle handle, - int nodeId, - struct ndb_mgm_reply* reply); - - /** - * Set the signals to log. - * - * @param handle the NDB management handle. - * @param nodeId the node id. - * @param mode the signal log mode. - * @param blockNames the block names (space separated). - * @param reply the reply message. - * @return 0 if successful or an error code. - */ - int ndb_mgm_log_signals(NdbMgmHandle handle, - int nodeId, - enum ndb_mgm_signal_log_mode mode, - const char* blockNames, - struct ndb_mgm_reply* reply); - - /** - * Set trace. - * - * @param handle the NDB management handle. - * @param nodeId the node id. - * @param traceNumber the trace number. - * @param reply the reply message. - * @return 0 if successful or an error code. - */ - int ndb_mgm_set_trace(NdbMgmHandle handle, - int nodeId, - int traceNumber, - struct ndb_mgm_reply* reply); - - /** - * Provoke an error. - * - * @param handle the NDB management handle. - * @param nodeId the node id. - * @param errrorCode the errorCode. - * @param reply the reply message. - * @return 0 if successful or an error code. - */ - int ndb_mgm_insert_error(NdbMgmHandle handle, - int nodeId, - int errorCode, - struct ndb_mgm_reply* reply); - - /** - * Dump state - * - * @param handle the NDB management handle. - * @param nodeId the node id. - * @param args integer array - * @param number of args in int array - * @param reply the reply message. - * @return 0 if successful or an error code. - */ - int ndb_mgm_dump_state(NdbMgmHandle handle, - int nodeId, - const int * args, - int num_args, - struct ndb_mgm_reply* reply); - - - /** - * - * @param handle the NDB management handle. - * @param nodeId the node id. 0 = all db nodes - * @param errrorCode the errorCode. - * @param reply the reply message. - * @return 0 if successful or an error code. - */ - int ndb_mgm_set_int_parameter(NdbMgmHandle handle, - int node, - int param, - unsigned value, - struct ndb_mgm_reply* reply); - - int ndb_mgm_set_int64_parameter(NdbMgmHandle handle, - int node, - int param, - unsigned long long value, - struct ndb_mgm_reply* reply); - - int ndb_mgm_set_string_parameter(NdbMgmHandle handle, - int node, - int param, - const char * value, - struct ndb_mgm_reply* reply); - - Uint64 ndb_mgm_get_session_id(NdbMgmHandle handle); - - struct NdbMgmSession { - Uint64 id; - Uint32 m_stopSelf; - Uint32 m_stop; - Uint32 nodeid; - Uint32 parser_buffer_len; - Uint32 parser_status; - }; - - int ndb_mgm_get_session(NdbMgmHandle handle, Uint64 id, - struct NdbMgmSession *s, int *len); - -#ifdef __cplusplus -} -#endif - - -#endif diff --git a/storage/ndb/include/mgmapi/mgmapi_error.h b/storage/ndb/include/mgmapi/mgmapi_error.h deleted file mode 100644 index f732eeff51c..00000000000 --- a/storage/ndb/include/mgmapi/mgmapi_error.h +++ /dev/null @@ -1,122 +0,0 @@ -/* Copyright (c) 2003, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MGMAPI_ERROR_H -#define MGMAPI_ERROR_H - -#ifdef __cplusplus -extern "C" { -#endif - /** - * Error codes - */ - enum ndb_mgm_error { - /** Not an error */ - NDB_MGM_NO_ERROR = 0, - - /* Request for service errors */ - /** Supplied connectstring is illegal */ - NDB_MGM_ILLEGAL_CONNECT_STRING = 1001, - /** Supplied NdbMgmHandle is illegal */ - NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005, - /** Illegal reply from server */ - NDB_MGM_ILLEGAL_SERVER_REPLY = 1006, - /** Illegal number of nodes */ - NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007, - /** Illegal node status */ - NDB_MGM_ILLEGAL_NODE_STATUS = 1008, - /** Memory allocation error */ - NDB_MGM_OUT_OF_MEMORY = 1009, - /** Management server not connected */ - NDB_MGM_SERVER_NOT_CONNECTED = 1010, - /** Could not connect to socker */ - NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011, - /** Could not bind local address */ - NDB_MGM_BIND_ADDRESS = 1012, - - /* Alloc node id failures */ - /** Generic error, retry may succeed */ - NDB_MGM_ALLOCID_ERROR = 1101, - /** Non retriable error */ - NDB_MGM_ALLOCID_CONFIG_MISMATCH = 1102, - - /* Service errors - Start/Stop Node or System */ - /** Start failed */ - NDB_MGM_START_FAILED = 2001, - /** Stop failed */ - NDB_MGM_STOP_FAILED = 2002, - /** Restart failed */ - NDB_MGM_RESTART_FAILED = 2003, - - /* Service errors - Backup */ - /** Unable to start backup */ - NDB_MGM_COULD_NOT_START_BACKUP = 3001, - /** Unable to abort backup */ - NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002, - - /* Service errors - Single User Mode */ - /** Unable to enter single user mode */ - NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001, - /** Unable to exit single user mode */ - NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002, - - /* Usage errors */ - /** Usage error */ - NDB_MGM_USAGE_ERROR = 5001 - }; - struct Ndb_Mgm_Error_Msg { - enum ndb_mgm_error code; - const char * msg; - }; - const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = { - { NDB_MGM_NO_ERROR, "No error" }, - - /* Request for service errors */ - { NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" }, - { NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" }, - { NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" }, - { NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" }, - { NDB_MGM_ILLEGAL_NODE_STATUS, "Illegal node status" }, - { NDB_MGM_OUT_OF_MEMORY, "Out of memory" }, - { NDB_MGM_SERVER_NOT_CONNECTED, "Management server not connected" }, - { NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, "Could not connect to socket" }, - - /* Service errors - Start/Stop Node or System */ - { NDB_MGM_START_FAILED, "Start failed" }, - { NDB_MGM_STOP_FAILED, "Stop failed" }, - { NDB_MGM_RESTART_FAILED, "Restart failed" }, - - /* Service errors - Backup */ - { NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" }, - { NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" }, - - /* Service errors - Single User Mode */ - { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE, - "Could not enter single user mode" }, - { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE, - "Could not exit single user mode" }, - - /* Usage errors */ - { NDB_MGM_USAGE_ERROR, - "Usage error" } - }; - const int ndb_mgm_noOfErrorMsgs = - sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg); -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/mgmapi/ndb_logevent.h b/storage/ndb/include/mgmapi/ndb_logevent.h deleted file mode 100644 index 189874b8e21..00000000000 --- a/storage/ndb/include/mgmapi/ndb_logevent.h +++ /dev/null @@ -1,706 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_LOGEVENT_H -#define NDB_LOGEVENT_H - -/** @addtogroup MGM_C_API - * @{ - */ - -#include "mgmapi_config_parameters.h" - -#ifdef __cplusplus -extern "C" { -#endif - - /** - * Available log events grouped by @ref ndb_mgm_event_category - */ - - enum Ndb_logevent_type { - - NDB_LE_ILLEGAL_TYPE = -1, - - /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ - NDB_LE_Connected = 0, - /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ - NDB_LE_Disconnected = 1, - /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ - NDB_LE_CommunicationClosed = 2, - /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ - NDB_LE_CommunicationOpened = 3, - /** NDB_MGM_EVENT_CATEGORY_CONNECTION */ - NDB_LE_ConnectedApiVersion = 51, - - /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ - NDB_LE_GlobalCheckpointStarted = 4, - /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ - NDB_LE_GlobalCheckpointCompleted = 5, - /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ - NDB_LE_LocalCheckpointStarted = 6, - /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ - NDB_LE_LocalCheckpointCompleted = 7, - /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ - NDB_LE_LCPStoppedInCalcKeepGci = 8, - /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */ - NDB_LE_LCPFragmentCompleted = 9, - - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_NDBStartStarted = 10, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_NDBStartCompleted = 11, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_STTORRYRecieved = 12, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_StartPhaseCompleted = 13, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_CM_REGCONF = 14, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_CM_REGREF = 15, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_FIND_NEIGHBOURS = 16, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_NDBStopStarted = 17, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_NDBStopCompleted = 53, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_NDBStopForced = 59, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_NDBStopAborted = 18, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_StartREDOLog = 19, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_StartLog = 20, - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_UNDORecordsExecuted = 21, - - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NR_CopyDict = 22, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NR_CopyDistr = 23, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NR_CopyFragsStarted = 24, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NR_CopyFragDone = 25, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NR_CopyFragsCompleted = 26, - - /* NODEFAIL */ - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NodeFailCompleted = 27, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_NODE_FAILREP = 28, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_ArbitState = 29, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_ArbitResult = 30, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_GCP_TakeoverStarted = 31, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_GCP_TakeoverCompleted = 32, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_LCP_TakeoverStarted = 33, - /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */ - NDB_LE_LCP_TakeoverCompleted = 34, - - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_TransReportCounters = 35, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_OperationReportCounters = 36, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_TableCreated = 37, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_UndoLogBlocked = 38, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_JobStatistic = 39, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_SendBytesStatistic = 40, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_ReceiveBytesStatistic = 41, - /** NDB_MGM_EVENT_CATEGORY_STATISTIC */ - NDB_LE_MemoryUsage = 50, - - /** NDB_MGM_EVENT_CATEGORY_ERROR */ - NDB_LE_TransporterError = 42, - /** NDB_MGM_EVENT_CATEGORY_ERROR */ - NDB_LE_TransporterWarning = 43, - /** NDB_MGM_EVENT_CATEGORY_ERROR */ - NDB_LE_MissedHeartbeat = 44, - /** NDB_MGM_EVENT_CATEGORY_ERROR */ - NDB_LE_DeadDueToHeartbeat = 45, - /** NDB_MGM_EVENT_CATEGORY_ERROR */ - NDB_LE_WarningEvent = 46, - - /** NDB_MGM_EVENT_CATEGORY_INFO */ - NDB_LE_SentHeartbeat = 47, - /** NDB_MGM_EVENT_CATEGORY_INFO */ - NDB_LE_CreateLogBytes = 48, - /** NDB_MGM_EVENT_CATEGORY_INFO */ - NDB_LE_InfoEvent = 49, - - /* 50 used */ - /* 51 used */ - - /* SINGLE USER */ - NDB_LE_SingleUser = 52, - /* 53 used */ - - /** NDB_MGM_EVENT_CATEGORY_BACKUP */ - NDB_LE_BackupStarted = 54, - /** NDB_MGM_EVENT_CATEGORY_BACKUP */ - NDB_LE_BackupFailedToStart = 55, - /** NDB_MGM_EVENT_CATEGORY_BACKUP */ - NDB_LE_BackupCompleted = 56, - /** NDB_MGM_EVENT_CATEGORY_BACKUP */ - NDB_LE_BackupAborted = 57, - - /** NDB_MGM_EVENT_CATEGORY_INFO */ - NDB_LE_EventBufferStatus = 58, - - /* 59 used */ - - /** NDB_MGM_EVENT_CATEGORY_STARTUP */ - NDB_LE_StartReport = 60 - - /* 60 unused */ - /* 61 unused */ - /* 62 unused */ - - }; - - /** - * Log event severities (used to filter the cluster log, - * ndb_mgm_set_clusterlog_severity_filter(), and filter listening to events - * ndb_mgm_listen_event()) - */ - enum ndb_mgm_event_severity { - NDB_MGM_ILLEGAL_EVENT_SEVERITY = -1, - /* Must be a nonnegative integer (used for array indexing) */ - /** Cluster log on */ - NDB_MGM_EVENT_SEVERITY_ON = 0, - /** Used in NDB Cluster developement */ - NDB_MGM_EVENT_SEVERITY_DEBUG = 1, - /** Informational messages*/ - NDB_MGM_EVENT_SEVERITY_INFO = 2, - /** Conditions that are not error condition, but might require handling. - */ - NDB_MGM_EVENT_SEVERITY_WARNING = 3, - /** Conditions that, while not fatal, should be corrected. */ - NDB_MGM_EVENT_SEVERITY_ERROR = 4, - /** Critical conditions, like device errors or out of resources */ - NDB_MGM_EVENT_SEVERITY_CRITICAL = 5, - /** A condition that should be corrected immediately, - * such as a corrupted system - */ - NDB_MGM_EVENT_SEVERITY_ALERT = 6, - /* must be next number, works as bound in loop */ - /** All severities */ - NDB_MGM_EVENT_SEVERITY_ALL = 7 - }; - - /** - * Log event categories, used to set filter level on the log events using - * ndb_mgm_set_clusterlog_loglevel() and ndb_mgm_listen_event() - */ - enum ndb_mgm_event_category { - /** - * Invalid log event category - */ - NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1, - /** - * Log events during all kinds of startups - */ - NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP, - /** - * Log events during shutdown - */ - NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN, - /** - * Statistics log events - */ - NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS, - /** - * Log events related to checkpoints - */ - NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT, - /** - * Log events during node restart - */ - NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART, - /** - * Log events related to connections between cluster nodes - */ - NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION, - /** - * Backup related log events - */ - NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP, - /** - * Congestion related log events - */ - NDB_MGM_EVENT_CATEGORY_CONGESTION = CFG_LOGLEVEL_CONGESTION, -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Loglevel debug - */ - NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG, -#endif - /** - * Uncategorized log events (severity info) - */ - NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO, - /** - * Uncategorized log events (severity warning or higher) - */ - NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR, -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL, - NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL -#endif - }; - - struct ndb_logevent_Connected { - unsigned node; - }; - - struct ndb_logevent_Disconnected { - unsigned node; - }; - - struct ndb_logevent_CommunicationClosed { - unsigned node; - }; - - struct ndb_logevent_CommunicationOpened { - unsigned node; - }; - - struct ndb_logevent_ConnectedApiVersion { - unsigned node; - unsigned version; - }; - - /* CHECKPOINT */ - struct ndb_logevent_GlobalCheckpointStarted { - unsigned gci; - }; - struct ndb_logevent_GlobalCheckpointCompleted { - unsigned gci; - }; - struct ndb_logevent_LocalCheckpointStarted { - unsigned lci; - unsigned keep_gci; - unsigned restore_gci; - }; - struct ndb_logevent_LocalCheckpointCompleted { - unsigned lci; - }; - struct ndb_logevent_LCPStoppedInCalcKeepGci { - unsigned data; - }; - struct ndb_logevent_LCPFragmentCompleted { - unsigned node; - unsigned table_id; - unsigned fragment_id; - }; - struct ndb_logevent_UndoLogBlocked { - unsigned acc_count; - unsigned tup_count; - }; - - /* STARTUP */ - struct ndb_logevent_NDBStartStarted { - unsigned version; - }; - struct ndb_logevent_NDBStartCompleted { - unsigned version; - }; - struct ndb_logevent_STTORRYRecieved { - }; - struct ndb_logevent_StartPhaseCompleted { - unsigned phase; - unsigned starttype; - }; - struct ndb_logevent_CM_REGCONF { - unsigned own_id; - unsigned president_id; - unsigned dynamic_id; - }; - struct ndb_logevent_CM_REGREF { - unsigned own_id; - unsigned other_id; - unsigned cause; - }; - struct ndb_logevent_FIND_NEIGHBOURS { - unsigned own_id; - unsigned left_id; - unsigned right_id; - unsigned dynamic_id; - }; - struct ndb_logevent_NDBStopStarted { - unsigned stoptype; - }; - struct ndb_logevent_NDBStopCompleted { - unsigned action; - unsigned signum; - }; - struct ndb_logevent_NDBStopForced { - unsigned action; - unsigned signum; - unsigned error; - unsigned sphase; - unsigned extra; - }; - struct ndb_logevent_NDBStopAborted { - }; - struct ndb_logevent_StartREDOLog { - unsigned node; - unsigned keep_gci; - unsigned completed_gci; - unsigned restorable_gci; - }; - struct ndb_logevent_StartLog { - unsigned log_part; - unsigned start_mb; - unsigned stop_mb; - unsigned gci; - }; - struct ndb_logevent_UNDORecordsExecuted { - unsigned block; - unsigned data1; - unsigned data2; - unsigned data3; - unsigned data4; - unsigned data5; - unsigned data6; - unsigned data7; - unsigned data8; - unsigned data9; - unsigned data10; - }; - - /* NODERESTART */ - struct ndb_logevent_NR_CopyDict { - }; - struct ndb_logevent_NR_CopyDistr { - }; - struct ndb_logevent_NR_CopyFragsStarted { - unsigned dest_node; - }; - struct ndb_logevent_NR_CopyFragDone { - unsigned dest_node; - unsigned table_id; - unsigned fragment_id; - }; - struct ndb_logevent_NR_CopyFragsCompleted { - unsigned dest_node; - }; - - struct ndb_logevent_NodeFailCompleted { - unsigned block; /* 0 = all */ - unsigned failed_node; - unsigned completing_node; /* 0 = all */ - }; - struct ndb_logevent_NODE_FAILREP { - unsigned failed_node; - unsigned failure_state; - }; - struct ndb_logevent_ArbitState { - unsigned code; /* code & state << 16 */ - unsigned arbit_node; - unsigned ticket_0; - unsigned ticket_1; - /* TODO */ - }; - struct ndb_logevent_ArbitResult { - unsigned code; /* code & state << 16 */ - unsigned arbit_node; - unsigned ticket_0; - unsigned ticket_1; - /* TODO */ - }; - struct ndb_logevent_GCP_TakeoverStarted { - }; - struct ndb_logevent_GCP_TakeoverCompleted { - }; - struct ndb_logevent_LCP_TakeoverStarted { - }; - struct ndb_logevent_LCP_TakeoverCompleted { - unsigned state; - }; - - /* STATISTIC */ - struct ndb_logevent_TransReportCounters { - unsigned trans_count; - unsigned commit_count; - unsigned read_count; - unsigned simple_read_count; - unsigned write_count; - unsigned attrinfo_count; - unsigned conc_op_count; - unsigned abort_count; - unsigned scan_count; - unsigned range_scan_count; - }; - struct ndb_logevent_OperationReportCounters { - unsigned ops; - }; - struct ndb_logevent_TableCreated { - unsigned table_id; - }; - struct ndb_logevent_JobStatistic { - unsigned mean_loop_count; - }; - struct ndb_logevent_SendBytesStatistic { - unsigned to_node; - unsigned mean_sent_bytes; - }; - struct ndb_logevent_ReceiveBytesStatistic { - unsigned from_node; - unsigned mean_received_bytes; - }; - struct ndb_logevent_MemoryUsage { - int gth; - /* union is for compatibility backward. - * page_size_kb member variable should be removed in the future - */ - union { - unsigned page_size_kb; - unsigned page_size_bytes; - }; - unsigned pages_used; - unsigned pages_total; - unsigned block; - }; - - /* ERROR */ - struct ndb_logevent_TransporterError { - unsigned to_node; - unsigned code; - }; - struct ndb_logevent_TransporterWarning { - unsigned to_node; - unsigned code; - }; - struct ndb_logevent_MissedHeartbeat { - unsigned node; - unsigned count; - }; - struct ndb_logevent_DeadDueToHeartbeat { - unsigned node; - }; - struct ndb_logevent_WarningEvent { - /* TODO */ - }; - - /* INFO */ - struct ndb_logevent_SentHeartbeat { - unsigned node; - }; - struct ndb_logevent_CreateLogBytes { - unsigned node; - }; - struct ndb_logevent_InfoEvent { - /* TODO */ - }; - struct ndb_logevent_EventBufferStatus { - unsigned usage; - unsigned alloc; - unsigned max; - unsigned apply_gci_l; - unsigned apply_gci_h; - unsigned latest_gci_l; - unsigned latest_gci_h; - }; - - /** Log event data for @ref NDB_LE_BackupStarted */ - struct ndb_logevent_BackupStarted { - unsigned starting_node; - unsigned backup_id; - }; - /** Log event data @ref NDB_LE_BackupFailedToStart */ - struct ndb_logevent_BackupFailedToStart { - unsigned starting_node; - unsigned error; - }; - /** Log event data @ref NDB_LE_BackupCompleted */ - struct ndb_logevent_BackupCompleted { - unsigned starting_node; - unsigned backup_id; - unsigned start_gci; - unsigned stop_gci; - unsigned n_records; - unsigned n_log_records; - unsigned n_bytes; - unsigned n_log_bytes; - }; - /** Log event data @ref NDB_LE_BackupAborted */ - struct ndb_logevent_BackupAborted { - unsigned starting_node; - unsigned backup_id; - unsigned error; - }; - /** Log event data @ref NDB_LE_SingleUser */ - struct ndb_logevent_SingleUser { - unsigned type; - unsigned node_id; - }; - /** Log even data @ref NDB_LE_StartReport */ - struct ndb_logevent_StartReport { - unsigned report_type; - unsigned remaining_time; - unsigned bitmask_size; - unsigned bitmask_data[1]; - }; - - /** - * Structure to store and retrieve log event information. - * @see @ref secSLogEvents - */ - struct ndb_logevent { - /** NdbLogEventHandle (to be used for comparing only) - * set in ndb_logevent_get_next() - */ - void *handle; - - /** Which event */ - enum Ndb_logevent_type type; - - /** Time when log event was registred at the management server */ - unsigned time; - - /** Category of log event */ - enum ndb_mgm_event_category category; - - /** Severity of log event */ - enum ndb_mgm_event_severity severity; - - /** Level (0-15) of log event */ - unsigned level; - - /** Node ID of the node that reported the log event */ - unsigned source_nodeid; - - /** Union of log event specific data. Use @ref type to decide - * which struct to use - */ - union { - /* CONNECT */ - struct ndb_logevent_Connected Connected; - struct ndb_logevent_Disconnected Disconnected; - struct ndb_logevent_CommunicationClosed CommunicationClosed; - struct ndb_logevent_CommunicationOpened CommunicationOpened; - struct ndb_logevent_ConnectedApiVersion ConnectedApiVersion; - - /* CHECKPOINT */ - struct ndb_logevent_GlobalCheckpointStarted GlobalCheckpointStarted; - struct ndb_logevent_GlobalCheckpointCompleted GlobalCheckpointCompleted; - struct ndb_logevent_LocalCheckpointStarted LocalCheckpointStarted; - struct ndb_logevent_LocalCheckpointCompleted LocalCheckpointCompleted; - struct ndb_logevent_LCPStoppedInCalcKeepGci LCPStoppedInCalcKeepGci; - struct ndb_logevent_LCPFragmentCompleted LCPFragmentCompleted; - struct ndb_logevent_UndoLogBlocked UndoLogBlocked; - - /* STARTUP */ - struct ndb_logevent_NDBStartStarted NDBStartStarted; - struct ndb_logevent_NDBStartCompleted NDBStartCompleted; - struct ndb_logevent_STTORRYRecieved STTORRYRecieved; - struct ndb_logevent_StartPhaseCompleted StartPhaseCompleted; - struct ndb_logevent_CM_REGCONF CM_REGCONF; - struct ndb_logevent_CM_REGREF CM_REGREF; - struct ndb_logevent_FIND_NEIGHBOURS FIND_NEIGHBOURS; - struct ndb_logevent_NDBStopStarted NDBStopStarted; - struct ndb_logevent_NDBStopCompleted NDBStopCompleted; - struct ndb_logevent_NDBStopForced NDBStopForced; - struct ndb_logevent_NDBStopAborted NDBStopAborted; - struct ndb_logevent_StartREDOLog StartREDOLog; - struct ndb_logevent_StartLog StartLog; - struct ndb_logevent_UNDORecordsExecuted UNDORecordsExecuted; - - /* NODERESTART */ - struct ndb_logevent_NR_CopyDict NR_CopyDict; - struct ndb_logevent_NR_CopyDistr NR_CopyDistr; - struct ndb_logevent_NR_CopyFragsStarted NR_CopyFragsStarted; - struct ndb_logevent_NR_CopyFragDone NR_CopyFragDone; - struct ndb_logevent_NR_CopyFragsCompleted NR_CopyFragsCompleted; - - struct ndb_logevent_NodeFailCompleted NodeFailCompleted; - struct ndb_logevent_NODE_FAILREP NODE_FAILREP; - struct ndb_logevent_ArbitState ArbitState; - struct ndb_logevent_ArbitResult ArbitResult; - struct ndb_logevent_GCP_TakeoverStarted GCP_TakeoverStarted; - struct ndb_logevent_GCP_TakeoverCompleted GCP_TakeoverCompleted; - struct ndb_logevent_LCP_TakeoverStarted LCP_TakeoverStarted; - struct ndb_logevent_LCP_TakeoverCompleted LCP_TakeoverCompleted; - - /* STATISTIC */ - struct ndb_logevent_TransReportCounters TransReportCounters; - struct ndb_logevent_OperationReportCounters OperationReportCounters; - struct ndb_logevent_TableCreated TableCreated; - struct ndb_logevent_JobStatistic JobStatistic; - struct ndb_logevent_SendBytesStatistic SendBytesStatistic; - struct ndb_logevent_ReceiveBytesStatistic ReceiveBytesStatistic; - struct ndb_logevent_MemoryUsage MemoryUsage; - - /* ERROR */ - struct ndb_logevent_TransporterError TransporterError; - struct ndb_logevent_TransporterWarning TransporterWarning; - struct ndb_logevent_MissedHeartbeat MissedHeartbeat; - struct ndb_logevent_DeadDueToHeartbeat DeadDueToHeartbeat; - struct ndb_logevent_WarningEvent WarningEvent; - - /* INFO */ - struct ndb_logevent_SentHeartbeat SentHeartbeat; - struct ndb_logevent_CreateLogBytes CreateLogBytes; - struct ndb_logevent_InfoEvent InfoEvent; - struct ndb_logevent_EventBufferStatus EventBufferStatus; - - /** Log event data for @ref NDB_LE_BackupStarted */ - struct ndb_logevent_BackupStarted BackupStarted; - /** Log event data @ref NDB_LE_BackupFailedToStart */ - struct ndb_logevent_BackupFailedToStart BackupFailedToStart; - /** Log event data @ref NDB_LE_BackupCompleted */ - struct ndb_logevent_BackupCompleted BackupCompleted; - /** Log event data @ref NDB_LE_BackupAborted */ - struct ndb_logevent_BackupAborted BackupAborted; - /** Log event data @ref NDB_LE_SingleUser */ - struct ndb_logevent_SingleUser SingleUser; - /** Log even data @ref NDB_LE_StartReport */ - struct ndb_logevent_StartReport StartReport; -#ifndef DOXYGEN_FIX - }; -#else - } ; -#endif - }; - -enum ndb_logevent_handle_error { - NDB_LEH_NO_ERROR, - NDB_LEH_READ_ERROR, - NDB_LEH_MISSING_EVENT_SPECIFIER, - NDB_LEH_UNKNOWN_EVENT_TYPE, - NDB_LEH_UNKNOWN_EVENT_VARIABLE, - NDB_LEH_INTERNAL_ERROR -}; - -#ifdef __cplusplus -} -#endif - -/** @} */ - -#endif diff --git a/storage/ndb/include/mgmapi/ndb_logevent.txt b/storage/ndb/include/mgmapi/ndb_logevent.txt deleted file mode 100644 index 6fe2d3f87b7..00000000000 --- a/storage/ndb/include/mgmapi/ndb_logevent.txt +++ /dev/null @@ -1,56 +0,0 @@ -To add a new event edit the following 3 files in totally 5 places: - -example shows EventBufferUsage added. - -Public interface: - -*** ndb/include/mgmapi/ndb_logevent.h - - /** NDB_MGM_EVENT_CATEGORY_INFO */ - NDB_LE_EventBufferUsage = 58 -... - - - /** Log event specific data for for corresponding NDB_LE_ log event */ - struct { - unsigned usage; - unsigned avail; - } EventBufferUsage; - -*** ndb/src/mgmapi/ndb_logevent.cpp - - ROW( EventBufferUsage, "usage", 1, usage), - ROW( EventBufferUsage, "avail", 2, avail), - - - -Internal impl: - -*** ndb/src/common/debugger/EventLogger.cpp - -void getTextEventBufferUsage(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Event buffer usage: %d(%d\%)", - theData[1], - theData[2] ? (theData[1]*100)/theData[2] : 0); -} - -... - - ROW(EventBufferUsage, LogLevel::llInfo, 7, Logger::LL_INFO ), - -*** sending event from ndbd kernel - - Uint32 *data= signal->getDataPtrSend(); - data[0]= NDB_LE_EventBufferUsage; - data[1]= 0; - data[2]= 0; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - -*** sending event from ndbapi (internal) - - Uint32 data[3]; - data[0]= NDB_LE_EventBufferUsage; - data[1]= 0; - data[2]= 0; - m_ndb->theImpl->send_event_report(data,3); diff --git a/storage/ndb/include/mgmapi/ndbd_exit_codes.h b/storage/ndb/include/mgmapi/ndbd_exit_codes.h deleted file mode 100644 index ffec2f33d0f..00000000000 --- a/storage/ndb/include/mgmapi/ndbd_exit_codes.h +++ /dev/null @@ -1,167 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBD_EXIT_CODES_H -#define NDBD_EXIT_CODES_H - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -/** - * Exit error codes for NDBD - * - * These errorcodes should be used whenever a condition - * is detected where it's necesssary to shutdown NDB. - * - * Example: When another node fails while a NDB node are performing - * a system restart the node should be shutdown. This - * is kind of an error but the cause of the error is known - * and a proper errormessage describing the problem should - * be printed in error.log. It's therefore important to use - * the proper errorcode. - * - */ - -typedef enum -{ - ndbd_exit_st_success = 0, - ndbd_exit_st_unknown = 1, - ndbd_exit_st_permanent = 2, - ndbd_exit_st_temporary = 3, - ndbd_exit_st_filesystem_error = 4 -} ndbd_exit_status_enum; - -typedef enum -{ - ndbd_exit_cl_none = 0, - ndbd_exit_cl_unknown = 1, - ndbd_exit_cl_internal_error = 2, - ndbd_exit_cl_configuration_error = 3, - ndbd_exit_cl_arbitration_error = 4, - ndbd_exit_cl_restart_error = 5, - ndbd_exit_cl_resource_configuration_error = 6, - ndbd_exit_cl_filesystem_full_error = 7, - ndbd_exit_cl_filesystem_inconsistency_error = 8, - ndbd_exit_cl_filesystem_limit = 9 -} ndbd_exit_classification_enum; - -typedef ndbd_exit_status_enum ndbd_exit_status; -typedef ndbd_exit_classification_enum ndbd_exit_classification; - -/* Errorcodes before block division was used */ -#define NDBD_EXIT_GENERIC 2300 -#define NDBD_EXIT_PRGERR 2301 -#define NDBD_EXIT_NODE_NOT_IN_CONFIG 2302 -#define NDBD_EXIT_SYSTEM_ERROR 2303 -#define NDBD_EXIT_INDEX_NOTINRANGE 2304 -#define NDBD_EXIT_ARBIT_SHUTDOWN 2305 -#define NDBD_EXIT_POINTER_NOTINRANGE 2306 -#define NDBD_EXIT_PARTITIONED_SHUTDOWN 2307 -#define NDBD_EXIT_SR_OTHERNODEFAILED 2308 -#define NDBD_EXIT_NODE_NOT_DEAD 2309 -#define NDBD_EXIT_SR_REDOLOG 2310 -#define NDBD_EXIT_SR_RESTARTCONFLICT 2311 -#define NDBD_EXIT_NO_MORE_UNDOLOG 2312 -#define NDBD_EXIT_SR_UNDOLOG 2313 -#define NDBD_EXIT_SINGLE_USER_MODE 2314 -#define NDBD_EXIT_NODE_DECLARED_DEAD 2315 -#define NDBD_EXIT_SR_SCHEMAFILE 2316 -#define NDBD_EXIT_MEMALLOC 2327 -#define NDBD_EXIT_BLOCK_JBUFCONGESTION 2334 -#define NDBD_EXIT_TIME_QUEUE_SHORT 2335 -#define NDBD_EXIT_TIME_QUEUE_LONG 2336 -#define NDBD_EXIT_TIME_QUEUE_DELAY 2337 -#define NDBD_EXIT_TIME_QUEUE_INDEX 2338 -#define NDBD_EXIT_BLOCK_BNR_ZERO 2339 -#define NDBD_EXIT_WRONG_PRIO_LEVEL 2340 -#define NDBD_EXIT_NDBREQUIRE 2341 -#define NDBD_EXIT_ERROR_INSERT 2342 -#define NDBD_EXIT_NDBASSERT 2343 -#define NDBD_EXIT_INVALID_CONFIG 2350 -#define NDBD_EXIT_OUT_OF_LONG_SIGNAL_MEMORY 2351 - -/* Errorcodes for fatal resource errors */ -#define NDBD_EXIT_RESOURCE_ALLOC_ERROR 2500 - -#define NDBD_EXIT_OS_SIGNAL_RECEIVED 6000 - -/* VM 6050-> */ -#define NDBD_EXIT_WATCHDOG_TERMINATE 6050 -#define NDBD_EXIT_SIGNAL_LOST 6051 -#define NDBD_EXIT_SIGNAL_LOST_SEND_BUFFER_FULL 6052 -#define NDBD_EXIT_ILLEGAL_SIGNAL 6053 -#define NDBD_EXIT_CONNECTION_SETUP_FAILED 6054 - -/* NDBCNTR 6100-> */ -#define NDBD_EXIT_RESTART_TIMEOUT 6100 -#define NDBD_EXIT_RESTART_DURING_SHUTDOWN 6101 - -/* TC 6200-> */ -/* DIH 6300-> */ -#define NDBD_EXIT_MAX_CRASHED_REPLICAS 6300 -#define NDBD_EXIT_MASTER_FAILURE_DURING_NR 6301 -#define NDBD_EXIT_LOST_NODE_GROUP 6302 -#define NDBD_EXIT_NO_RESTORABLE_REPLICA 6303 - -/* ACC 6600-> */ -#define NDBD_EXIT_SR_OUT_OF_INDEXMEMORY 6600 -/* TUP 6800-> */ -#define NDBD_EXIT_SR_OUT_OF_DATAMEMORY 6800 -/* LQH 7200-> */ - - -/* Errorcodes for NDB filesystem */ -#define NDBD_EXIT_AFS_NOPATH 2801 -/* -#define NDBD_EXIT_AFS_CHANNALFULL 2802 -#define NDBD_EXIT_AFS_NOMORETHREADS 2803 -*/ -#define NDBD_EXIT_AFS_PARAMETER 2804 -#define NDBD_EXIT_AFS_INVALIDPATH 2805 -#define NDBD_EXIT_AFS_MAXOPEN 2806 -#define NDBD_EXIT_AFS_ALREADY_OPEN 2807 - -#define NDBD_EXIT_AFS_ENVIRONMENT 2808 -#define NDBD_EXIT_AFS_TEMP_NO_ACCESS 2809 -#define NDBD_EXIT_AFS_DISK_FULL 2810 -#define NDBD_EXIT_AFS_PERMISSION_DENIED 2811 -#define NDBD_EXIT_AFS_INVALID_PARAM 2812 -#define NDBD_EXIT_AFS_UNKNOWN 2813 -#define NDBD_EXIT_AFS_NO_MORE_RESOURCES 2814 -#define NDBD_EXIT_AFS_NO_SUCH_FILE 2815 -#define NDBD_EXIT_AFS_READ_UNDERFLOW 2816 - -#define NDBD_EXIT_INVALID_LCP_FILE 2352 -#define NDBD_EXIT_INSUFFICENT_NODES 2353 - -const char * -ndbd_exit_message(int faultId, ndbd_exit_classification *cl); -const char * -ndbd_exit_classification_message(ndbd_exit_classification classification, - ndbd_exit_status *status); -const char * -ndbd_exit_status_message(ndbd_exit_status status); - -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* NDBD_EXIT_CODES_H */ diff --git a/storage/ndb/include/mgmcommon/ConfigRetriever.hpp b/storage/ndb/include/mgmcommon/ConfigRetriever.hpp deleted file mode 100644 index a40145090d7..00000000000 --- a/storage/ndb/include/mgmcommon/ConfigRetriever.hpp +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ConfigRetriever_H -#define ConfigRetriever_H - -#include -#include -#include - -/** - * @class ConfigRetriever - * @brief Used by nodes (DB, MGM, API) to get their config from MGM server. - */ -class ConfigRetriever { -public: - ConfigRetriever(const char * _connect_string, - Uint32 version, Uint32 nodeType, - const char * _bind_address = 0, - int timeout_ms = 30000); - ~ConfigRetriever(); - - int do_connect(int no_retries, int retry_delay_in_seconds, int verbose); - int disconnect(); - - /** - * Get configuration for current node. - * - * Configuration is fetched from one MGM server configured in local config - * file. The method loops over all the configured MGM servers and tries - * to establish a connection. This is repeated until a connection is - * established, so the function hangs until a connection is established. - * - * @return ndb_mgm_configuration object if succeeded, - * NULL if erroneous local config file or configuration error. - */ - struct ndb_mgm_configuration * getConfig(); - - void resetError(); - int hasError(); - const char * getErrorString(); - - /** - * @return Node id of this node (as stated in local config or connectString) - */ - Uint32 allocNodeId(int no_retries, int retry_delay_in_seconds); - - int setNodeId(Uint32 nodeid); - - /** - * Get config using socket - */ - struct ndb_mgm_configuration * getConfig(NdbMgmHandle handle); - - /** - * Get config from file - */ - struct ndb_mgm_configuration * getConfig(const char * file); - - /** - * Verify config - */ - bool verifyConfig(const struct ndb_mgm_configuration *, Uint32 nodeid); - - Uint32 get_mgmd_port() const; - const char *get_mgmd_host() const; - const char *get_connectstring(char *buf, int buf_sz) const; - NdbMgmHandle get_mgmHandle() { return m_handle; }; - NdbMgmHandle* get_mgmHandlePtr() { return &m_handle; }; - void end_session(bool end) { m_end_session= end; }; - - Uint32 get_configuration_nodeid() const; -private: - BaseString errorString; - enum ErrorType { - CR_NO_ERROR = 0, - CR_ERROR = 1, - CR_RETRY = 2 - }; - ErrorType latestErrorType; - - void setError(ErrorType, const char * errorMsg); - - Uint32 _ownNodeId; - bool m_end_session; - - /* - Uint32 m_mgmd_port; - const char *m_mgmd_host; - */ - - Uint32 m_version; - Uint32 m_node_type; - NdbMgmHandle m_handle; -}; - -#endif - - diff --git a/storage/ndb/include/mgmcommon/IPCConfig.hpp b/storage/ndb/include/mgmcommon/IPCConfig.hpp deleted file mode 100644 index 1137a6758db..00000000000 --- a/storage/ndb/include/mgmcommon/IPCConfig.hpp +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef IPCConfig_H -#define IPCConfig_H - -#include -#include -#include -#include - -/** - * @class IPCConfig - * @brief Config transporters in TransporterRegistry using Properties config - */ -class IPCConfig -{ -public: - IPCConfig(Properties * props); - ~IPCConfig(); - - /** @return 0 for OK */ - int init(); - - NodeId ownId() const; - - /** @return No of transporters configured */ - int configureTransporters(class TransporterRegistry * theTransporterRegistry); - - /** - * Supply a nodeId, - * and get next higher node id - * @return false if none found, true otherwise - * - * getREPHBFrequency and getNodeType uses the last Id supplied to - * getNextRemoteNodeId. - */ - bool getNextRemoteNodeId(NodeId & nodeId) const; - Uint32 getREPHBFrequency(NodeId id) const; - const char* getNodeType(NodeId id) const; - - NodeId getNoOfRemoteNodes() const { - return theNoOfRemoteNodes; - } - - void print() const { props->print(); } - - static Uint32 configureTransporters(Uint32 nodeId, - const struct ndb_mgm_configuration &, - class TransporterRegistry &); - -private: - NodeId the_ownId; - Properties * props; - - bool addRemoteNodeId(NodeId nodeId); - NodeId theNoOfRemoteNodes; - NodeId theRemoteNodeIds[MAX_NODES]; -}; - -inline -NodeId -IPCConfig::ownId() const -{ - return the_ownId; -} - - - -#endif // IPCConfig_H diff --git a/storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp b/storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp deleted file mode 100644 index 42667a29bc5..00000000000 --- a/storage/ndb/include/mgmcommon/MgmtErrorReporter.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//****************************************************************************** -// Description: This file contains the error reporting macros to be used -// within management server. -// -// Author: Peter Lind -//****************************************************************************** - - -#include // exit -#include - -#define REPORT_WARNING(message) \ - ndbout << "WARNING: " << message << endl - -//**************************************************************************** -// Description: Report a warning, the message is printed on ndbout. -// Parameters: -// message: A text describing the warning. -// Returns: - -//**************************************************************************** - - -#define REPORT_ERROR(message) \ - ndbout << "ERROR: " << message << endl - -//**************************************************************************** -// Description: Report an error, the message is printed on ndbout. -// Parameters: -// message: A text describing the error. -// Returns: - -//**************************************************************************** - - -#ifdef MGMT_TRACE - -#define TRACE(message) \ - ndbout << "MGMT_TRACE: " << message << endl -#else -#define TRACE(message) - -#endif - -//**************************************************************************** -// Description: Print a message on ndbout. -// Parameters: -// message: The message -// Returns: - -//**************************************************************************** - -#define MGM_REQUIRE(x) \ - if (!(x)) { ndbout << __FILE__ << " " << __LINE__ \ - << ": Warning! Requirement failed" << endl; } diff --git a/storage/ndb/include/ndb_constants.h b/storage/ndb/include/ndb_constants.h deleted file mode 100644 index ff2a069bcf1..00000000000 --- a/storage/ndb/include/ndb_constants.h +++ /dev/null @@ -1,101 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/** - * @file ndb_constants.h - * - * Constants common to NDB API and NDB kernel. - * Changing the values makes database upgrade impossible. - * - * New or removed definitions must be replicated to - * NdbDictionary.hpp and NdbSqlUtil.hpp. - * - * Not for use by application programs. - * Use the enums provided by NdbDictionary instead. - */ - -#ifndef NDB_CONSTANTS_H -#define NDB_CONSTANTS_H - -/* - * Data type constants. - */ - -#define NDB_TYPE_UNDEFINED 0 - -#define NDB_TYPE_TINYINT 1 -#define NDB_TYPE_TINYUNSIGNED 2 -#define NDB_TYPE_SMALLINT 3 -#define NDB_TYPE_SMALLUNSIGNED 4 -#define NDB_TYPE_MEDIUMINT 5 -#define NDB_TYPE_MEDIUMUNSIGNED 6 -#define NDB_TYPE_INT 7 -#define NDB_TYPE_UNSIGNED 8 -#define NDB_TYPE_BIGINT 9 -#define NDB_TYPE_BIGUNSIGNED 10 -#define NDB_TYPE_FLOAT 11 -#define NDB_TYPE_DOUBLE 12 -#define NDB_TYPE_OLDDECIMAL 13 -#define NDB_TYPE_CHAR 14 -#define NDB_TYPE_VARCHAR 15 -#define NDB_TYPE_BINARY 16 -#define NDB_TYPE_VARBINARY 17 -#define NDB_TYPE_DATETIME 18 -#define NDB_TYPE_DATE 19 -#define NDB_TYPE_BLOB 20 -#define NDB_TYPE_TEXT 21 -#define NDB_TYPE_BIT 22 -#define NDB_TYPE_LONGVARCHAR 23 -#define NDB_TYPE_LONGVARBINARY 24 -#define NDB_TYPE_TIME 25 -#define NDB_TYPE_YEAR 26 -#define NDB_TYPE_TIMESTAMP 27 -#define NDB_TYPE_OLDDECIMALUNSIGNED 28 -#define NDB_TYPE_DECIMAL 29 -#define NDB_TYPE_DECIMALUNSIGNED 30 - -#define NDB_TYPE_MAX 31 - - -/* - * Attribute array type. - */ - -#define NDB_ARRAYTYPE_FIXED 0 /* 0 length bytes */ -#define NDB_ARRAYTYPE_SHORT_VAR 1 /* 1 length bytes */ -#define NDB_ARRAYTYPE_MEDIUM_VAR 2 /* 2 length bytes */ - -/* - * Attribute storage type. - */ - -#define NDB_STORAGETYPE_MEMORY 0 -#define NDB_STORAGETYPE_DISK 1 - -/* - * Table temporary status. - */ -#define NDB_TEMP_TAB_PERMANENT 0 -#define NDB_TEMP_TAB_TEMPORARY 1 - -/* - * Table single user mode - */ -#define NDB_SUM_LOCKED 0 -#define NDB_SUM_READONLY 1 -#define NDB_SUM_READ_WRITE 2 - -#endif diff --git a/storage/ndb/include/ndb_global.h.in b/storage/ndb/include/ndb_global.h.in deleted file mode 100644 index 9097f03f63a..00000000000 --- a/storage/ndb/include/ndb_global.h.in +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_GLOBAL_H -#define NDB_GLOBAL_H - -#include -#include - -#define NDB_PORT "@ndb_port@" -#define NDB_TCP_BASE_PORT "@ndb_port_base@" - -#if defined(_WIN32) || defined(_WIN64) || defined(__WIN32__) || defined(WIN32) -#define NDB_WIN32 -#include -#define PATH_MAX 256 -#define DIR_SEPARATOR "\\" -#define MYSQLCLUSTERDIR "c:\\mysql\\mysql-cluster" -#define HAVE_STRCASECMP -#define strcasecmp _strcmpi -#pragma warning(disable: 4503 4786) -#else -#undef NDB_WIN32 -#define DIR_SEPARATOR "/" -#endif - -#include - -#if ! (NDB_SIZEOF_CHAR == SIZEOF_CHAR) -#error "Invalid define for Uint8" -#endif - -#if ! (NDB_SIZEOF_INT == SIZEOF_INT) -#error "Invalid define for Uint32" -#endif - -#if ! (NDB_SIZEOF_LONG_LONG == SIZEOF_LONG_LONG) -#error "Invalid define for Uint64" -#endif - -#include - -#ifdef _AIX -#undef _H_STRINGS -#endif -#include -#include -#include - -#ifdef HAVE_STDARG_H -#include -#endif - -#ifdef TIME_WITH_SYS_TIME -#include -#endif - -#ifdef HAVE_FCNTL_H -#include -#endif - -#ifdef HAVE_SYS_PARAM_H -#include -#endif - -#ifdef HAVE_SYS_STAT_H - #if defined(__cplusplus) && defined(_APP32_64BIT_OFF_T) && defined(_INCLUDE_AES_SOURCE) - #undef _INCLUDE_AES_SOURCE - #include - #define _INCLUDE_AES_SOURCE - #else - #include - #endif -#endif - -#ifdef HAVE_SYS_RESOURCE_H -#include -#endif - -#ifdef HAVE_SYS_WAIT_H -#include -#endif - -#ifdef HAVE_SYS_MMAN_H -#include -#endif - -#ifndef HAVE_STRDUP -extern char * strdup(const char *s); -#endif - -#ifndef HAVE_STRCASECMP -extern int strcasecmp(const char *s1, const char *s2); -extern int strncasecmp(const char *s1, const char *s2, size_t n); -#endif - -static const char table_name_separator = '/'; - -#if defined(_AIX) || defined(WIN32) || defined(NDB_VC98) -#define STATIC_CONST(x) enum { x } -#else -#define STATIC_CONST(x) static const Uint32 x -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#ifdef __cplusplus -} -#endif - -#include "ndb_init.h" - -#ifndef PATH_MAX -#define PATH_MAX 1024 -#endif - -#if defined(_lint) || defined(FORCE_INIT_OF_VARS) -#define LINT_SET_PTR = {0,0} -#else -#define LINT_SET_PTR -#endif - -#ifndef MIN -#define MIN(x,y) (((x)<(y))?(x):(y)) -#endif - -#ifndef MAX -#define MAX(x,y) (((x)>(y))?(x):(y)) -#endif - -#define NDB_O_DIRECT_WRITE_ALIGNMENT 512 - -#endif diff --git a/storage/ndb/include/ndb_init.h b/storage/ndb/include/ndb_init.h deleted file mode 100644 index 3fd6ccb202a..00000000000 --- a/storage/ndb/include/ndb_init.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#ifndef NDB_INIT_H -#define NDB_INIT_H - -#ifdef __cplusplus -extern "C" { -#endif -/* call in main() - does not return on error */ -extern int ndb_init(void); -extern void ndb_end(int); -#define NDB_INIT(prog_name) {my_progname=(prog_name); ndb_init();} -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/ndb_net.h b/storage/ndb/include/ndb_net.h deleted file mode 100644 index 357cf8f4671..00000000000 --- a/storage/ndb/include/ndb_net.h +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef NDBNET_H -#define NDBNET_H - -#include - -#endif diff --git a/storage/ndb/include/ndb_types.h.in b/storage/ndb/include/ndb_types.h.in deleted file mode 100644 index df368ef3e53..00000000000 --- a/storage/ndb/include/ndb_types.h.in +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/** - * @file ndb_types.h - */ - -#ifndef NDB_TYPES_H -#define NDB_TYPES_H - -#if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(_WIN64) -#define NDB_SIZEOF_CHARP SIZEOF_CHARP -#define NDB_SIZEOF_CHAR SIZEOF_CHAR -#define NDB_SIZEOF_SHORT 2 -#define NDB_SIZEOF_INT SIZEOF_INT -#define NDB_SIZEOF_LONG SIZEOF_LONG -#define NDB_SIZEOF_LONG_LONG SIZEOF_LONG_LONG -typedef unsigned __int64 Uint64; -typedef signed __int64 Int64; -#else -#define NDB_SIZEOF_CHARP @NDB_SIZEOF_CHARP@ -#define NDB_SIZEOF_CHAR @NDB_SIZEOF_CHAR@ -#define NDB_SIZEOF_INT @NDB_SIZEOF_INT@ -#define NDB_SIZEOF_SHORT @NDB_SIZEOF_SHORT@ -#define NDB_SIZEOF_LONG @NDB_SIZEOF_LONG@ -#define NDB_SIZEOF_LONG_LONG @NDB_SIZEOF_LONG_LONG@ -typedef unsigned long long Uint64; -typedef signed long long Int64; -#endif - -typedef signed char Int8; -typedef unsigned char Uint8; -typedef signed short Int16; -typedef unsigned short Uint16; -typedef signed int Int32; -typedef unsigned int Uint32; - -typedef unsigned int UintR; - -#ifdef __SIZE_TYPE__ - typedef __SIZE_TYPE__ UintPtr; -#elif NDB_SIZEOF_CHARP == 4 - typedef Uint32 UintPtr; -#elif NDB_SIZEOF_CHARP == 8 - typedef Uint64 UintPtr; -#else - #error "Unknown size of (char *)" -#endif - -#if ! (NDB_SIZEOF_CHAR == 1) -#error "Invalid define for Uint8" -#endif - -#if ! (NDB_SIZEOF_SHORT == 2) -#error "Invalid define for Uint16" -#endif - -#if ! (NDB_SIZEOF_INT == 4) -#error "Invalid define for Uint32" -#endif - -#if ! (NDB_SIZEOF_LONG_LONG == 8) -#error "Invalid define for Uint64" -#endif - -#include "ndb_constants.h" - -#endif diff --git a/storage/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in deleted file mode 100644 index 4cebb9aa959..00000000000 --- a/storage/ndb/include/ndb_version.h.in +++ /dev/null @@ -1,141 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_VERSION_H -#define NDB_VERSION_H - -#include - -/* NDB build version */ -#define NDB_VERSION_BUILD @NDB_VERSION_BUILD@ - -/* NDB major version */ -#define NDB_VERSION_MAJOR @NDB_VERSION_MAJOR@ - -/* NDB minor version */ -#define NDB_VERSION_MINOR @NDB_VERSION_MINOR@ - -/* NDB status version */ -#define NDB_VERSION_STATUS "@NDB_VERSION_STATUS@" - - -#define NDB_MAKE_VERSION(A,B,C) (((A) << 16) | ((B) << 8) | ((C) << 0)) - -#define NDB_VERSION_D NDB_MAKE_VERSION(NDB_VERSION_MAJOR, NDB_VERSION_MINOR, NDB_VERSION_BUILD) -#define NDB_VERSION_STRING_BUF_SZ 100 -#ifdef __cplusplus -extern "C" { -#endif - -void ndbPrintVersion(); - -Uint32 ndbMakeVersion(Uint32 major, Uint32 minor, Uint32 build); - -Uint32 ndbGetMajor(Uint32 version); - -Uint32 ndbGetMinor(Uint32 version); - -Uint32 ndbGetBuild(Uint32 version); - -const char* ndbGetVersionString(Uint32 version, const char * status, - char *buf, unsigned sz); -const char* ndbGetOwnVersionString(); - -Uint32 ndbGetOwnVersion(); - -#ifdef __cplusplus -} -#endif - -#define NDB_VERSION_STRING ndbGetOwnVersionString() - -#define NDB_VERSION ndbGetOwnVersion() - -/** - * Version id - * - * Used by transporter and when communicating with - * managment server - */ -/*#define NDB_VERSION_ID 0*/ - -/** - * From which version do we support rowid - */ -#define NDBD_ROWID_VERSION (NDB_MAKE_VERSION(5,1,6)) -#define NDBD_INCL_NODECONF_VERSION_4 NDB_MAKE_VERSION(4,1,17) -#define NDBD_INCL_NODECONF_VERSION_5 NDB_MAKE_VERSION(5,0,18) -#define NDBD_FRAGID_VERSION (NDB_MAKE_VERSION(5,1,6)) -#define NDBD_DICT_LOCK_VERSION_5 NDB_MAKE_VERSION(5,0,23) -#define NDBD_DICT_LOCK_VERSION_5_1 NDB_MAKE_VERSION(5,1,12) - -#define NDBD_UPDATE_FRAG_DIST_KEY_50 NDB_MAKE_VERSION(5,0,26) -#define NDBD_UPDATE_FRAG_DIST_KEY_51 NDB_MAKE_VERSION(5,1,12) - -#define NDBD_QMGR_SINGLEUSER_VERSION_5 NDB_MAKE_VERSION(5,0,25) - -#define NDBD_NODE_VERSION_REP NDB_MAKE_VERSION(6,1,1) - -#define NDBD_PREPARE_COPY_FRAG_VERSION NDB_MAKE_VERSION(6,2,1) -#define NDBD_PREPARE_COPY_FRAG_V2_51 NDB_MAKE_VERSION(5,1,23) -#define NDBD_PREPARE_COPY_FRAG_V2_62 NDB_MAKE_VERSION(6,2,8) -#define NDBD_PREPARE_COPY_FRAG_V2_63 NDB_MAKE_VERSION(6,3,6) - -/** - * 0 = NO PREP COPY FRAG SUPPORT - * 1 = NO MAX PAGE SUPPORT - * 2 = LATEST VERSION - */ -static -inline -int -ndb_check_prep_copy_frag_version(Uint32 version) -{ - const Uint32 major = (version >> 16) & 0xFF; - const Uint32 minor = (version >> 8) & 0xFF; - - if (version == NDB_VERSION_D) - return 2; - - if (major >= 6) - { - if (minor == 2) - { - if (version >= NDBD_PREPARE_COPY_FRAG_V2_62) - return 2; - if (version >= NDBD_PREPARE_COPY_FRAG_VERSION) - return 1; - return 0; - } - else if (minor == 3) - { - if (version >= NDBD_PREPARE_COPY_FRAG_V2_63) - return 2; - return 1; - } - return 2; - } - else if (major == 5 && minor == 1) - { - if (version >= NDBD_PREPARE_COPY_FRAG_V2_51) - return 2; - } - - return 0; -} - -#endif - diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp deleted file mode 100644 index a2e681bab41..00000000000 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ /dev/null @@ -1,1806 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/** - @mainpage NDB API Programmers' Guide - - This guide assumes a basic familiarity with MySQL Cluster concepts found - on http://dev.mysql.com/doc/mysql/en/mysql-cluster.html. - Some of the fundamental ones are also described in section @ref secConcepts. - - The NDB API is a MySQL Cluster application interface - that implements transactions. - The NDB API consists of the following fundamental classes: - - Ndb_cluster_connection, representing a connection to a cluster, - - Ndb is the main class, representing a connection to a database, - - NdbTransaction represents a transaction, - - NdbOperation represents an operation using a primary key, - - NdbScanOperation represents an operation performing a full table scan. - - NdbIndexOperation represents an operation using a unique hash index, - - NdbIndexScanOperation represents an operation performing a scan using - an ordered index, - - NdbRecAttr represents an attribute value - - NdbDictionary represents meta information about tables and attributes. - - In addition, the NDB API defines a structure NdbError, which contains the - specification for an error. - - It is also possible to receive "events" triggered when data in the database in changed. - This is done through the NdbEventOperation class. - - There are also some auxiliary classes, which are listed in the class hierarchy. - - The main structure of an application program is as follows: - -# Connect to a cluster using the Ndb_cluster_connection - object. - -# Initiate a database connection by constructing and initialising one or more Ndb objects. - -# Define and execute transactions using the NdbTransaction class. - -# Delete Ndb objects. - -# Terminate the connection to the cluster (terminate instance of Ndb_cluster_connection). - - The procedure for using transactions is as follows: - -# Start transaction (instantiate an NdbTransaction object) - -# Add and define operations associated with the transaction using instances of one or more of the - NdbOperation, NdbScanOperation, NdbIndexOperation, and NdbIndexScanOperation classes - -# Execute transaction (call NdbTransaction::execute()) - - The operation can be of two different types, - Commit or NoCommit. - If the operation is of type NoCommit, - then the application program executes the operation part of a transaction, - but without actually committing the transaction. - After executing a NoCommit operation, the program can continue - to add and define more operations to the transaction - for later execution. - - If the operation is of type Commit, then the transaction is - immediately committed. The transaction must be closed after it has been - commited (event if commit fails), and no further addition or definition of - operations for this transaction is allowed. - - @section secSync Synchronous Transactions - - Synchronous transactions are defined and executed as follows: - - -# Start (create) the transaction, which is - referenced by an NdbTransaction object - (typically created using Ndb::startTransaction()). - At this point, the transaction is only being defined, - and is not yet sent to the NDB kernel. - -# Define operations and add them to the transaction, using one or more of - - NdbTransaction::getNdbOperation() - - NdbTransaction::getNdbScanOperation() - - NdbTransaction::getNdbIndexOperation() - - NdbTransaction::getNdbIndexScanOperation() - along with the appropriate methods of the respective NdbOperation class - (or one possiblt one or more of its subclasses). - Note that the transaction has still not yet been sent to the NDB kernel. - -# Execute the transaction, using the NdbTransaction::execute() method. - -# Close the transaction (call Ndb::closeTransaction()). - - For an example of this process, see the program listing in - @ref ndbapi_simple.cpp. - - To execute several parallel synchronous transactions, one can either - use multiple Ndb objects in several threads, or start multiple - application programs. - - @section secNdbOperations Operations - - A NdbTransaction consists of a list of operations, each of which is represented - by an instance of NdbOperation, NdbScanOperation, NdbIndexOperation, or - NdbIndexScanOperation. - -

Single row operations

- After the operation is created using NdbTransaction::getNdbOperation() - (or NdbTransaction::getNdbIndexOperation()), it is defined in the following - three steps: - -# Define the standard operation type, using NdbOperation::readTuple() - -# Specify search conditions, using NdbOperation::equal() - -# Specify attribute actions, using NdbOperation::getValue() - - Here are two brief examples illustrating this process. For the sake of - brevity, we omit error handling. - - This first example uses an NdbOperation: - @code - // 1. Retrieve table object - myTable= myDict->getTable("MYTABLENAME"); - - // 2. Create - myOperation= myTransaction->getNdbOperation(myTable); - - // 3. Define type of operation and lock mode - myOperation->readTuple(NdbOperation::LM_Read); - - // 4. Specify Search Conditions - myOperation->equal("ATTR1", i); - - // 5. Attribute Actions - myRecAttr= myOperation->getValue("ATTR2", NULL); - @endcode - For additional examples of this sort, see @ref ndbapi_simple.cpp. - - The second example uses an NdbIndexOperation: - @code - // 1. Retrieve index object - myIndex= myDict->getIndex("MYINDEX", "MYTABLENAME"); - - // 2. Create - myOperation= myTransaction->getNdbIndexOperation(myIndex); - - // 3. Define type of operation and lock mode - myOperation->readTuple(NdbOperation::LM_Read); - - // 4. Specify Search Conditions - myOperation->equal("ATTR1", i); - - // 5. Attribute Actions - myRecAttr = myOperation->getValue("ATTR2", NULL); - @endcode - Another example of this second type can be found in - @ref ndbapi_simple_index.cpp. - - We will now discuss in somewhat greater detail each step involved in the - creation and use of synchronous transactions. - -

Step 1: Define single row operation type

- The following operation types are supported: - -# NdbOperation::insertTuple() : - inserts a non-existing tuple - -# NdbOperation::writeTuple() : - updates an existing tuple if is exists, - otherwise inserts a new tuple - -# NdbOperation::updateTuple() : - updates an existing tuple - -# NdbOperation::deleteTuple() : - deletes an existing tuple - -# NdbOperation::readTuple() : - reads an existing tuple with specified lock mode - - All of these operations operate on the unique tuple key. - (When NdbIndexOperation is used then all of these operations - operate on a defined unique hash index.) - - @note If you want to define multiple operations within the same transaction, - then you need to call NdbTransaction::getNdbOperation() or - NdbTransaction::getNdbIndexOperation() for each operation. - -

Step 2: Specify Search Conditions

- The search condition is used to select tuples. Search conditions are set using NdbOperation::equal(). - -

Step 3: Specify Attribute Actions

- Next, it is necessary to determine which attributes should be read or updated. - It is important to remember that: - - Deletes can neither read nor set values, but only delete them - - Reads can only read values - - Updates can only set values - Normally the attribute is identified by name, but it is - also possible to use the attribute's identity to determine the - attribute. - - NdbOperation::getValue() returns an NdbRecAttr object - containing the read value. - To obtain the actual value, one of two methods can be used; - the application can either - - use its own memory (passed through a pointer aValue) to - NdbOperation::getValue(), or - - receive the attribute value in an NdbRecAttr object allocated - by the NDB API. - - The NdbRecAttr object is released when Ndb::closeTransaction() - is called. - Thus, the application cannot reference this object following - any subsequent call to Ndb::closeTransaction(). - Attempting to read data from an NdbRecAttr object before - calling NdbTransaction::execute() yields an undefined result. - - - @subsection secScan Scan Operations - - Scans are roughly the equivalent of SQL cursors, providing a means to - preform high-speed row processing. A scan can be performed - on either a table (using @ref NdbScanOperation) or - an ordered index (by means of an @ref NdbIndexScanOperation). - - Scan operations are characterised by the following: - - They can perform only reads (shared, exclusive or dirty) - - They can potentially work with multiple rows - - They can be used to update or delete multiple rows - - They can operate on several nodes in parallel - - After the operation is created using NdbTransaction::getNdbScanOperation() - (or NdbTransaction::getNdbIndexScanOperation()), - it is carried out in the following three steps: - -# Define the standard operation type, using NdbScanOperation::readTuples() - -# Specify search conditions, using @ref NdbScanFilter and/or - @ref NdbIndexScanOperation::setBound() - -# Specify attribute actions, using NdbOperation::getValue() - -# Executing the transaction, using NdbTransaction::execute() - -# Traversing the result set by means of succssive calls to - NdbScanOperation::nextResult() - - Here are two brief examples illustrating this process. Once again, in order - to keep things relatively short and simple, we will forego any error handling. - - This first example performs a table scan, using an NdbScanOperation: - @code - // 1. Retrieve table object - myTable= myDict->getTable("MYTABLENAME"); - - // 2. Create - myOperation= myTransaction->getNdbScanOperation(myTable); - - // 3. Define type of operation and lock mode - myOperation->readTuples(NdbOperation::LM_Read); - - // 4. Specify Search Conditions - NdbScanFilter sf(myOperation); - sf.begin(NdbScanFilter::OR); - sf.eq(0, i); // Return rows with column 0 equal to i or - sf.eq(1, i+1); // column 1 equal to (i+1) - sf.end(); - - // 5. Attribute Actions - myRecAttr= myOperation->getValue("ATTR2", NULL); - @endcode - - Our second example uses an NdbIndexScanOperation to perform an index scan: - @code - // 1. Retrieve index object - myIndex= myDict->getIndex("MYORDEREDINDEX", "MYTABLENAME"); - - // 2. Create - myOperation= myTransaction->getNdbIndexScanOperation(myIndex); - - // 3. Define type of operation and lock mode - myOperation->readTuples(NdbOperation::LM_Read); - - // 4. Specify Search Conditions - // All rows with ATTR1 between i and (i+1) - myOperation->setBound("ATTR1", NdbIndexScanOperation::BoundGE, i); - myOperation->setBound("ATTR1", NdbIndexScanOperation::BoundLE, i+1); - - // 5. Attribute Actions - myRecAttr = MyOperation->getValue("ATTR2", NULL); - @endcode - - Some additional discussion of each step required to perform a scan follows: - -

Step 1: Define Scan Operation Type

- It is important to remember that only a single operation is supported for each scan operation - (@ref NdbScanOperation::readTuples() or @ref NdbIndexScanOperation::readTuples()). - - @note If you want to define multiple scan operations within the same - transaction, then you need to call - NdbTransaction::getNdbScanOperation() or - NdbTransaction::getNdbIndexScanOperation() separately for each operation. - -

Step 2: Specify Search Conditions

- The search condition is used to select tuples. - If no search condition is specified, the scan will return all rows - in the table. - - The search condition can be an @ref NdbScanFilter (which can be used on both - @ref NdbScanOperation and @ref NdbIndexScanOperation) or bounds which - can only be used on index scans (@ref NdbIndexScanOperation::setBound()). - An index scan can use both NdbScanFilter and bounds. - - @note When NdbScanFilter is used, each row is examined, whether or not it is - actually returned. However, when using bounds, only rows within the bounds will be examined. - -

Step 3: Specify Attribute Actions

- - Next, it is necessary to define which attributes should be read. - As with transaction attributes, scan attributes are defined by name but it is - also possible to use the attributes' identities to define attributes. - - As previously discussed (see @ref secSync), the value read is returned as - an NdbRecAttr object by the NdbOperation::getValue() method. - -

Using Scan to Update/Delete

- Scanning can also be used to update or delete rows. - This is performed by - -# Scanning using exclusive locks (using NdbOperation::LM_Exclusive) - -# When iterating through the result set, for each row optionally calling - either NdbScanOperation::updateCurrentTuple() or - NdbScanOperation::deleteCurrentTuple() - -# (If performing NdbScanOperation::updateCurrentTuple():) - Setting new values for records simply by using @ref NdbOperation::setValue(). - NdbOperation::equal() should not be called in such cases, as the primary - key is retrieved from the scan. - - @note The actual update or delete will not be performed until the next - call to NdbTransaction::execute(), just as with single row operations. - NdbTransaction::execute() also must be called before any locks are released; - see @ref secScanLocks for more information. - -

Features Specific to Index Scans

- - When performing an index scan, it is possible to - scan only a subset of a table using @ref NdbIndexScanOperation::setBound(). - In addition, result sets can be sorted in either ascending or descending order, using - @ref NdbIndexScanOperation::readTuples(). Note that rows are returned unordered - by default, that is, unless sorted is set to true. - It is also important to note that, when using NdbIndexScanOperation::BoundEQ - on a partition key, only fragments containing rows will actually be scanned. - - @note When performing a sorted scan, any value passed as the - NdbIndexScanOperation::readTuples() method's parallel argument - will be ignored and maximum parallelism will be used instead. In other words, all - fragments which it is possible to scan will be scanned simultaneously and in parallel - in such cases. - - @subsection secScanLocks Lock handling with scans - - Performing scans on either a tables or an index has the potential - return a great many records; however, Ndb will lock only a predetermined - number of rows per fragment at a time. - How many rows will be locked per fragment is controlled by the - batch parameter passed to NdbScanOperation::readTuples(). - - In order to allow the application to handle how locks are released, - NdbScanOperation::nextResult() has a Boolean parameter fetch_allow. - If NdbScanOperation::nextResult() is called with fetch_allow equal to - false, then no locks may be released as result of the function call. - Otherwise the locks for the current batch may be released. - - This next example shows a scan delete that handle locks in an efficient manner. - For the sake of brevity, we omit error-handling. - @code - int check; - - // Outer loop for each batch of rows - while((check = MyScanOperation->nextResult(true)) == 0) - { - do - { - // Inner loop for each row within batch - MyScanOperation->deleteCurrentTuple(); - } while((check = MyScanOperation->nextResult(false)) == 0); - - // When no more rows in batch, exeute all defined deletes - MyTransaction->execute(NoCommit); - } - @endcode - - See @ref ndbapi_scan.cpp for a more complete example of a scan. - - @section secError Error Handling - - Errors can occur either when operations making up a transaction are being - defined, or when the transaction is actually being executed. Catching and - handling either sort of error requires testing the value returned by - NdbTransaction::execute(), and then, if an error is indicated (that is, - if this value is equal to -1), using the following two methods in order to - identify the error's type and location: - - - NdbTransaction::getNdbErrorOperation() returns a reference to the - operation causing the most recent error. - - NdbTransaction::getNdbErrorLine() yields the method number of the - erroneous method in the operation. - - This short example illustrates how to detect an error and to use these - two methods to identify it: - - @code - theTransaction = theNdb->startTransaction(); - theOperation = theTransaction->getNdbOperation("TEST_TABLE"); - if (theOperation == NULL) goto error; - theOperation->readTuple(NdbOperation::LM_Read); - theOperation->setValue("ATTR_1", at1); - theOperation->setValue("ATTR_2", at1); // Error occurs here - theOperation->setValue("ATTR_3", at1); - theOperation->setValue("ATTR_4", at1); - - if (theTransaction->execute(Commit) == -1) { - errorLine = theTransaction->getNdbErrorLine(); - errorOperation = theTransaction->getNdbErrorOperation(); - } - @endcode - - Here errorLine will be 3, as the error occurred in the - third method called on the NdbOperation object (in this case, - theOperation); if the result of - NdbTransaction::getNdbErrorLine() is 0, this means that the error - occurred when the operations were executed. In this example, - errorOperation will be a pointer to the theOperation - object. The NdbTransaction::getNdbError() method returns an NdbError - object providing information about the error. - - @note Transactions are not automatically closed when an error occurs. Call - Ndb::closeTransaction() to close the transaction. - - One recommended way to handle a transaction failure - (i.e. an error is reported) is to: - -# Rollback transaction (call NdbTransaction::execute() with a special parameter) - -# Close transaction (call NdbTransaction::closeTransaction()) - -# If the error was temporary, attempt to restart the transaction - - Several errors can occur when a transaction contains multiple - operations which are simultaneously executed. - In this case the application has to go through all operations - and query their NdbError objects to find out what really happened. - - It is also important to note that errors can occur even when a commit is - reported as successful. In order to handle such situations, the NDB API - provides an additional NdbTransaction::commitStatus() method to check the - transactions's commit status. - -******************************************************************************/ - -/** - * @page ndbapi_simple.cpp ndbapi_simple.cpp - * @include ndbapi_simple.cpp - */ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - * @page ndbapi_async.cpp ndbapi_async.cpp - * @include ndbapi_async.cpp - */ -/** - * @page ndbapi_async1.cpp ndbapi_async1.cpp - * @include ndbapi_async1.cpp - */ -#endif - -/** - * @page ndbapi_retries.cpp ndbapi_retries.cpp - * @include ndbapi_retries.cpp - */ - -/** - * @page ndbapi_simple_index.cpp ndbapi_simple_index.cpp - * @include ndbapi_simple_index.cpp - */ - -/** - * @page ndbapi_scan.cpp ndbapi_scan.cpp - * @include ndbapi_scan.cpp - */ - -/** - * @page ndbapi_event.cpp ndbapi_event.cpp - * @include ndbapi_event.cpp - */ - - -/** - @page secAdapt Adaptive Send Algorithm - - At the time of "sending" a transaction - (using NdbTransaction::execute()), the transactions - are in reality not immediately transfered to the NDB Kernel. - Instead, the "sent" transactions are only kept in a - special send list (buffer) in the Ndb object to which they belong. - The adaptive send algorithm decides when transactions should - actually be transferred to the NDB kernel. - - The NDB API is designed as a multi-threaded interface and so - it is often desirable to transfer database operations from more than - one thread at a time. - The NDB API keeps track of which Ndb objects are active in transferring - information to the NDB kernel and the expected amount of threads to - interact with the NDB kernel. - Note that a given instance of Ndb should be used in at most one thread; - different threads should not use the same Ndb object. - - There are four conditions leading to the transfer of database - operations from Ndb object buffers to the NDB kernel: - -# The NDB Transporter (TCP/IP, SCI or shared memory) - decides that a buffer is full and sends it off. - The buffer size is implementation-dependent and - may change between MySQL Cluster releases. - On TCP/IP the buffer size is usually around 64 KB; - Since each Ndb object provides a single buffer per storage node, - the notion of a "full" buffer is local to this storage node. - -# The accumulation of statistical data on transferred information - may force sending of buffers to all storage nodes. - -# Every 10 ms, a special transmission thread checks whether or not - any send activity has occurred. If not, then the thread will - force transmission to all nodes. - This means that 20 ms is the maximum time database operations - are kept waiting before being sent off. The 10-millisecond limit - is likely to become a configuration parameter in - future releases of MySQL Cluster; however, for checks that - are more frequent than each 10 ms, - additional support from the operating system is required. - -# For methods that are affected by the adaptive send alorithm - (such as NdbTransaction::execute()), there is a force - parameter - that overrides its default behaviour in this regard and forces - immediate transmission to all nodes. See the inidvidual NDB API class - listings for more information. - - @note The conditions listed above are subject to change in future releases - of MySQL Cluster. -*/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - - For each of these "sent" transactions, there are three - possible states: - -# Waiting to be transferred to NDB Kernel. - -# Has been transferred to the NDB Kernel and is currently - being processed. - -# Has been transferred to the NDB Kernel and has - finished processing. - Now it is waiting for a call to a poll method. - (When the poll method is invoked, - then the transaction callback method will be executed.) - - The poll method invoked (either Ndb::pollNdb() or Ndb::sendPollNdb()) - will return when: - -# at least 'minNoOfEventsToWakeup' of the transactions - in the send list have transitioned to state 3 as described above, and - -# all of these transactions have executed their callback methods. -*/ -#endif - -/** - @page secConcepts MySQL Cluster Concepts - - The NDB Kernel is the collection of storage nodes - belonging to a MySQL Cluster. - The application programmer can for most purposes view the - set of all storage nodes as a single entity. - Each storage node is made up of three main components: - - TC : The transaction co-ordinator - - ACC : Index storage component - - TUP : Data storage component - - When an application program executes a transaction, - it connects to one transaction co-ordinator on one storage node. - Usually, the programmer does not need to specify which TC should be used, - but in some cases when performance is important, the programmer can - provide "hints" to use a certain TC. - (If the node with the desired transaction co-ordinator is down, then another TC will - automatically take over the work.) - - Every storage node has an ACC and a TUP which store - the indexes and data portions of the database table fragment. - Even though one TC is responsible for the transaction, - several ACCs and TUPs on other storage nodes might be involved in the - execution of the transaction. - - - @section secNdbKernelConnection Selecting a Transaction Co-ordinator - - The default method is to select the transaction co-ordinator (TC) determined to be - the "closest" storage node, using a heuristic for proximity based on - the type of transporter connection. In order of closest to most distant, these are - - SCI - - SHM - - TCP/IP (localhost) - - TCP/IP (remote host) - If there are several connections available with the same proximity, they will each be - selected in a round robin fashion for every transaction. Optionally - one may set the method for TC selection to round-robin mode, where each new set of - transactions is placed on the next DB node. The pool of connections from which this - selection is made consists of all available connections. - - As noted previously, the application programmer can provide hints to the NDB API as to - which transaction co-ordinator it should use. This is done by - providing a table and partition key - (usually the primary key). - By using the primary key as the partition key, - the transaction will be placed on the node where the primary replica - of that record resides. - Note that this is only a hint; the system can be - reconfigured at any time, in which case the NDB API will choose a transaction - co-ordinator without using the hint. - For more information, see NdbDictionary::Column::getPartitionKey() and - Ndb::startTransaction(). The application programmer can specify - the partition key from SQL by using the construct, - CREATE TABLE ... ENGINE=NDB PARTITION BY KEY (attribute-list);. - - - @section secRecordStruct NDB Record Structure - The NDB Cluster engine used by MySQL Cluster is a relational database engine - storing records in tables just as with any other RDBMS. - Table rows represent records as tuples of relational data. - When a new table is created, its attribute schema is specified for the table as a whole, - and thus each record of the table has the same structure. Again, this is typical - of relational databases, and NDB is no different in this regard. - - - @subsection secKeys Primary Keys - Each record has from 1 up to 32 attributes which belong - to the primary key of the table. - - @section secTrans Transactions - - Transactions are committed first to main memory, - and then to disk after a global checkpoint (GCP) is issued. - Since all data is (in most NDB Cluster configurations) - synchronously replicated and stored on multiple NDB nodes, - the system can still handle processor failures without loss - of data. - However, in the case of a system failure (e.g. the whole system goes down), - then all (committed or not) transactions occurring since the latest GCP are lost. - - - @subsection secConcur Concurrency Control - NDB Cluster uses pessimistic concurrency control based on locking. - If a requested lock (implicit and depending on database operation) - cannot be attained within a specified time, - then a timeout error occurs. - - Concurrent transactions as requested by parallel application programs and - thread-based applications can sometimes deadlock when they try to access - the same information simultaneously. - Thus, applications need to be written in a manner so that timeout errors - occurring due to such deadlocks are handled gracefully. This generally - means that the transaction encountering a timeout should be rolled back - and restarted. - - - @section secHint Hints and Performance - - Placing the transaction co-ordinator in close proximity - to the actual data used in the transaction can in many cases - improve performance significantly. This is particularly true for - systems using TCP/IP. For example, a Solaris system using a single 500 MHz processor - has a cost model for TCP/IP communication which can be represented by the formula - - [30 microseconds] + ([100 nanoseconds] * [number of bytes]) - - This means that if we can ensure that we use "popular" links we increase - buffering and thus drastically reduce the communication cost. - The same system using SCI has a different cost model: - - [5 microseconds] + ([10 nanoseconds] * [number of bytes]) - - Thus, the efficiency of an SCI system is much less dependent on selection of - transaction co-ordinators. - Typically, TCP/IP systems spend 30-60% of their working time on communication, - whereas for SCI systems this figure is closer to 5-10%. - Thus, employing SCI for data transport means that less care from the NDB API - programmer is required and greater scalability can be achieved, even for - applications using data from many different parts of the database. - - A simple example is an application that uses many simple updates where - a transaction needs to update one record. - This record has a 32 bit primary key, - which is also the partition key. - Then the keyData will be the address of the integer - of the primary key and keyLen will be 4. -*/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - (A transaction's execution can also be divided into three - steps: prepare, send, and poll. This allows us to perform asynchronous - transactions. More about this later.) -*/ -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - Another way to execute several parallel transactions is to use - asynchronous transactions. -*/ -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - Operations are of two different kinds: - -# standard operations, and - -# interpreted program operations. -*/ -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** -

Interpreted Program Operations

- The following types of interpreted program operations exist: - -# NdbOperation::interpretedUpdateTuple : - updates a tuple using an interpreted program - -# NdbOperation::interpretedDeleteTuple : - delete a tuple using an interpreted program - - The operations interpretedUpdateTuple and interpretedDeleteTuple both - work using the unique tuple key. - - These interpreted programs - make it possible to perform computations - inside the NDB Cluster Kernel instead of in the application - program. - This is sometimes very effective, since no intermediate results - are sent to the application, only the final result. - - -

Interpreted Update and Delete

- - Operations for interpreted updates and deletes must follow a - certain order when defining operations on a tuple. - As for read and write operations, - one must first define the operation type and then the search key. - -# The first step is to define the initial readings. - In this phase it is only allowed to use the - NdbOperation::getValue method. - This part might be empty. - -# The second step is to define the interpreted part. - The methods supported are the methods listed below except - NdbOperation::def_subroutine and NdbOperation::ret_sub - which can only be used in a subroutine. - NdbOperation::incValue and NdbOperation::subValue - increment and decrement attributes - (currently only unsigned integers supported). - This part can also be empty since interpreted updates - can be used for reading and updating the same tuple. -

- Even though getValue and setValue are not really interpreted - program instructions, it is still allowed to use them as - the last instruction of the program. - (If a getValue or setValue is found when an interpret_exit_ok - could have been issued then the interpreted_exit_ok - will be inserted. - A interpret_exit_ok should be viewed as a jump to the first - instruction after the interpreted instructions.) - -# The third step is to define all updates without any - interpreted program instructions. - Here a set of NdbOperation::setValue methods are called. - There might be zero such calls. - -# The fourth step is the final readings. - The initial readings reads the initial value of attributes - and the final readings reads them after their updates. - There might be zero NdbOperation::getValue calls. - -# The fifth step is possible subroutine definitions using - NdbOperation::def_subroutine and NdbOperation::ret_sub. -*/ -#endif -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** -

Interpreted Programs

- Interpretation programs are executed in a - register-based virtual machine. - The virtual machine has eight 64 bit registers numbered 0-7. - Each register contains type information which is used both - for type conversion and for type checking. - - @note Arrays are currently not supported in the virtual machine. - Currently only unsigned integers are supported and of size - maximum 64 bits. - - All errors in the interpretation program will cause a - transaction abort, but will not affect any other transactions. - - The following are legal interpreted program instructions: - -# incValue : Add to an attribute - -# subValue : Subtract from an attribute - -# def_label : Define a label in the interpreted program - -# add_reg : Add two registers - -# sub_reg : Subtract one register from another - -# load_const_u32 : Load an unsigned 32 bit value into a register - -# load_const_u64 : Load an unsigned 64 bit value into a register - -# load_const_null : Load a NULL value into a register - -# read_attr : Read attribute value into a register - -# write_attr : Write a register value into an attribute - -# branch_ge : Compares registers and possibly jumps to specified label - -# branch_gt : Compares registers and possibly jumps to specified label - -# branch_le : Compares registers and possibly jumps to specified label - -# branch_lt : Compares registers and possibly jumps to specified label - -# branch_eq : Compares registers and possibly jumps to specified label - -# branch_ne : Compares registers and possibly jumps to specified label - -# branch_ne_null : Jumps if register does not contain NULL value - -# branch_eq_null : Jumps if register contains NULL value - -# branch_label : Unconditional jump to label - -# interpret_exit_ok : Exit interpreted program - (approving tuple if used in scan) - -# interpret_exit_nok : Exit interpreted program - (disqualifying tuple if used in scan) - - There are also three instructions for subroutines, which - are described in the next section. - - @subsection subsubSub Interpreted Programs: Subroutines - - The following are legal interpreted program instructions for - subroutines: - -# NdbOperation::def_subroutine : - Defines start of subroutine in interpreted program code - -# NdbOperation::call_sub : - Calls a subroutine - -# NdbOperation::ret_sub : - Return from subroutine - - The virtual machine executes subroutines using a stack for - its operation. - The stack allows for up to 24 subroutine calls in succession. - Deeper subroutine nesting will cause an abort of the transaction. - - All subroutines starts with the instruction - NdbOperation::def_subroutine and ends with the instruction - NdbOperation::ret_sub. - If it is necessary to return earlier in the subroutine - it has to be done using a branch_label instruction - to a label defined right before the - NdbOperation::ret_sub instruction. - - @note The subroutines are automatically numbered starting with 0. - The parameter used by NdbOperation::def_subroutine - should match the automatic numbering to make it easier to - debug the interpreted program. -*/ -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - @section secAsync Asynchronous Transactions - The asynchronous interface is used to increase the speed of - transaction executing by better utilizing the connection - between the application and the NDB Kernel. - The interface is used to send many transactions - at the same time to the NDB kernel. - This is often much more efficient than using synchronous transactions. - The main reason for using this method is to ensure that - Sending many transactions at the same time ensures that bigger - chunks of data are sent when actually sending and thus decreasing - the operating system overhead. - - The synchronous call to NdbTransaction::execute - normally performs three main steps:
- -# Prepare - Check transaction status - - if problems, abort the transaction - - if ok, proceed - -# Send - Send the defined operations since last execute - or since start of transaction. - -# Poll - Wait for response from NDB kernel. - - The asynchronous method NdbTransaction::executeAsynchPrepare - only perform step 1. - (The abort part in step 1 is only prepared for. The actual - aborting of the transaction is performed in a later step.) - - Asynchronous transactions are defined and executed - in the following way. - -# Start (create) transactions (same way as for the - synchronous transactions) - -# Add and define operations (also as in the synchronous case) - -# Prepare transactions - (using NdbTransaction::executeAsynchPrepare or - NdbTransaction::executeAsynch) - -# Send transactions to NDB Kernel - (using Ndb::sendPreparedTransactions, - NdbTransaction::executeAsynch, or Ndb::sendPollNdb) - -# Poll NDB kernel to find completed transactions - (using Ndb::pollNdb or Ndb::sendPollNdb) - -# Close transactions (same way as for the synchronous transactions) - - See example program in section @ref ndbapi_example2.cpp. - - This prepare-send-poll protocol actually exists in four variants: - - (Prepare-Send-Poll). This is the one-step variant provided - by synchronous transactions. - - (Prepare-Send)-Poll. This is the two-step variant using - NdbTransaction::executeAsynch and Ndb::pollNdb. - - Prepare-(Send-Poll). This is the two-step variant using - NdbTransaction::executeAsynchPrepare and Ndb::sendPollNdb. - - Prepare-Send-Poll. This is the three-step variant using - NdbTransaction::executeAsynchPrepare, Ndb::sendPreparedTransactions, and - Ndb::pollNdb. - - Transactions first has to be prepared by using method - NdbTransaction::executeAsynchPrepare or NdbTransaction::executeAsynch. - The difference between these is that - NdbTransaction::executeAsynch also sends the transaction to - the NDB kernel. - One of the arguments to these methods is a callback method. - The callback method is executed during polling (item 5 above). - - Note that NdbTransaction::executeAsynchPrepare does not - send the transaction to the NDB kernel. When using - NdbTransaction::executeAsynchPrepare, you either have to call - Ndb::sendPreparedTransactions or Ndb::sendPollNdb to send the - database operations. - (Ndb::sendPollNdb also polls Ndb for completed transactions.) - - The methods Ndb::pollNdb and Ndb::sendPollNdb checks if any - sent transactions are completed. The method Ndb::sendPollNdb - also send all prepared transactions before polling NDB. - Transactions still in the definition phase (i.e. items 1-3 above, - transactions which has not yet been sent to the NDB kernel) are not - affected by poll-calls. - The poll method invoked (either Ndb::pollNdb or Ndb::sendPollNdb) - will return when: - -# at least 'minNoOfEventsToWakeup' of the transactions - are finished processing, and - -# all of these transactions have executed their - callback methods. - - The poll method returns the number of transactions that - have finished processing and executed their callback methods. - - @note When an asynchronous transaction has been started and sent to - the NDB kernel, it is not allowed to execute any methods on - objects belonging to this transaction until the transaction - callback method have been executed. - (The transaction is stated and sent by either - NdbTransaction::executeAsynch or through the combination of - NdbTransaction::executeAsynchPrepare and either - Ndb::sendPreparedTransactions or Ndb::sendPollNdb). - - More about how transactions are sent the NDB Kernel is - available in section @ref secAdapt. -*/ -#endif - - -/** - - Put this back when real array ops are supported - i.e. get/setValue("kalle[3]"); - - @subsection secArrays Array Attributes - A table attribute in NDB Cluster can be of type Array, - meaning that the attribute consists of an ordered sequence of - elements. In such cases, attribute size is the size - (expressed in bits) of any one element making up the array; the - array size is the number of elements in the array. - -*/ - -#ifndef Ndb_H -#define Ndb_H - -#include -#include -#include -#include -#include - -class NdbObjectIdMap; -class NdbOperation; -class NdbEventOperationImpl; -class NdbScanOperation; -class NdbIndexScanOperation; -class NdbIndexOperation; -class NdbTransaction; -class NdbApiSignal; -class NdbRecAttr; -class NdbLabel; -class NdbBranch; -class NdbSubroutine; -class NdbCall; -class Table; -class BaseString; -class NdbEventOperation; -class NdbBlob; -class NdbReceiver; -class TransporterFacade; -class PollGuard; -class Ndb_local_table_info; -template struct Ndb_free_list_t; - -typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*); - -#define WAITFOR_RESPONSE_TIMEOUT 120000 // Milliseconds - -#define NDB_SYSTEM_DATABASE "sys" -#define NDB_SYSTEM_SCHEMA "def" - -/** - * @class Ndb - * @brief Represents the NDB kernel and is the main class of the NDB API. - * - * Always start your application program by creating an Ndb object. - * By using several Ndb objects it is possible to design - * a multi-threaded application, but note that Ndb objects - * cannot be shared by several threads. - * Different threads should use different Ndb objects. - * A thread might however use multiple Ndb objects. - * Currently there is a limit of maximum 128 Ndb objects - * per application process. - * - * @note It is not allowed to call methods in the NDB API - * on the same Ndb object in different threads - * simultaneously (without special handling of the - * Ndb object). - * - * @note The Ndb object is multi-thread safe in the following manner. - * Each Ndb object can ONLY be handled in one thread. - * If an Ndb object is handed over to another thread then the - * application must ensure that a memory barrier is used to - * ensure that the new thread see all updates performed by - * the previous thread. - * Semaphores, mutexes and so forth are easy ways of issuing memory - * barriers without having to bother about the memory barrier concept. - * - */ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -// to be documented later -/* - * If one Ndb object is used to handle parallel transactions through the - * asynchronous programming interface, please read the notes regarding - * asynchronous transactions (Section @ref secAsync). - * The asynchronous interface provides much higher performance - * in some situations, but is more complicated for the application designer. - * - * @note Each Ndb object should either use the methods for - * asynchronous transaction or the methods for - * synchronous transactions but not both. - */ -#endif - -class Ndb -{ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbReceiver; - friend class NdbOperation; - friend class NdbEventOperationImpl; - friend class NdbEventBuffer; - friend class NdbTransaction; - friend class Table; - friend class NdbApiSignal; - friend class NdbIndexOperation; - friend class NdbScanOperation; - friend class NdbIndexScanOperation; - friend class NdbDictionaryImpl; - friend class NdbDictInterface; - friend class NdbBlob; - friend class NdbImpl; - friend class Ndb_internal; - friend class NdbScanFilterImpl; -#endif - -public: - /** - * @name General - * @{ - */ - /** - * The Ndb object represents a connection to a database. - * - * @note The init() method must be called before the Ndb object may actually be used. - * - * @param ndb_cluster_connection is a connection to the cluster containing - * the database to be used - * @param aCatalogName is the name of the catalog to be used. - * @note The catalog name provides a namespace for the tables and - * indexes created in any connection from the Ndb object. - * @param aSchemaName is the name of the schema you - * want to use. - * @note The schema name provides an additional namespace - * for the tables and indexes created in a given catalog. - */ - Ndb(Ndb_cluster_connection *ndb_cluster_connection, - const char* aCatalogName = "", const char* aSchemaName = "def"); - - ~Ndb(); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * The current ndb_cluster_connection get_ndb_cluster_connection. - * - * @return the current connection - */ - Ndb_cluster_connection& get_ndb_cluster_connection(); -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * The current catalog name can be fetched by getCatalogName. - * - * @return the current catalog name - */ - const char * getCatalogName() const; - - /** - * The current catalog name can be set by setCatalogName. - * - * @param aCatalogName is the new name of the current catalog - */ - int setCatalogName(const char * aCatalogName); - - /** - * The current schema name can be fetched by getSchemaName. - * - * @return the current schema name - */ - const char * getSchemaName() const; - - /** - * The current schema name can be set by setSchemaName. - * - * @param aSchemaName is the new name of the current schema - */ - int setSchemaName(const char * aSchemaName); -#endif - - /** - * The current database name can be fetched by getDatabaseName. - * - * @return the current database name - */ - const char * getDatabaseName() const; - - /** - * The current database name can be set by setDatabaseName. - * - * @param aDatabaseName is the new name of the current database - */ - int setDatabaseName(const char * aDatabaseName); - - /** - * The current database schema name can be fetched by getDatabaseSchemaName. - * - * @return the current database schema name - */ - const char * getDatabaseSchemaName() const; - - /** - * The current database schema name can be set by setDatabaseSchemaName. - * - * @param aDatabaseSchemaName is the new name of the current database schema - */ - int setDatabaseSchemaName(const char * aDatabaseSchemaName); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** Set database and schema name to match previously retrieved table - * - * Returns non-zero if table internal name does not contain - * non-empty database and schema names - */ - int setDatabaseAndSchemaName(const NdbDictionary::Table* t); -#endif - - /** - * Initializes the Ndb object - * - * @param maxNoOfTransactions - * Maximum number of parallel - * NdbTransaction objects that can be handled by the Ndb object. - * Maximum value is 1024. - * - * @note each scan or index scan operation uses one extra - * NdbTransaction object - * - * @return 0 if successful, -1 otherwise. - */ - int init(int maxNoOfTransactions = 4); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Wait for Ndb object to successfully set-up connections to - * the NDB kernel. - * Starting to use the Ndb object without using this method - * gives unspecified behavior. - * - * @param timeout The maximum time we will wait for - * the initiation process to finish. - * Timeout is expressed in seconds. - * @return 0: Ndb is ready and timeout has not occurred.
- * -1: Timeout has expired - */ - int waitUntilReady(int timeout = 60); -#endif - - /** @} *********************************************************************/ - - /** - * @name Meta Information - * @{ - */ - - /** - * Get an object for retrieving or manipulating database schema information - * - * @note this object operates outside any transaction - * - * @return Object containing meta information about all tables - * in NDB Cluster. - */ - class NdbDictionary::Dictionary* getDictionary() const; - - - /** @} *********************************************************************/ - - /** - * @name Event subscriptions - * @{ - */ - - /** - * Create a subcription to an event defined in the database - * - * @param eventName - * unique identifier of the event - * - * @return Object representing an event, NULL on failure - */ - NdbEventOperation* createEventOperation(const char* eventName); - /** - * Drop a subscription to an event - * - * @param eventOp - * Event operation - * - * @return 0 on success - */ - int dropEventOperation(NdbEventOperation* eventOp); - - /** - * Wait for an event to occur. Will return as soon as an event - * is detected on any of the created events. - * - * @param aMillisecondNumber - * maximum time to wait - * - * @return > 0 if events available, 0 if no events available, < 0 on failure - */ - int pollEvents(int aMillisecondNumber, Uint64 *latestGCI= 0); - - /** - * Returns an event operation that has data after a pollEvents - * - * @return an event operations that has data, NULL if no events left with data. - */ - NdbEventOperation *nextEvent(); - - /** - * Iterate over distinct event operations which are part of current - * GCI. Valid after nextEvent. Used to get summary information for - * the epoch (e.g. list of all tables) before processing event data. - * - * Set *iter=0 to start. Returns NULL when no more. If event_types - * is not NULL, it returns bitmask of received event types. - */ - const NdbEventOperation* - getGCIEventOperations(Uint32* iter, Uint32* event_types); - - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - int flushIncompleteEvents(Uint64 gci); - NdbEventOperation *getEventOperation(NdbEventOperation* eventOp= 0); - Uint64 getLatestGCI(); - void forceGCP(); - void setReportThreshEventGCISlip(unsigned thresh); - void setReportThreshEventFreeMem(unsigned thresh); -#endif - - /** @} *********************************************************************/ - - /** - * @name Starting and Closing Transactions - * @{ - */ - - /** - * Structure for passing in pointers to startTransaction - * - */ - struct Key_part_ptr - { - const void * ptr; - unsigned len; - }; - - /** - * Start a transaction - * - * @note When the transaction is completed it must be closed using - * Ndb::closeTransaction or NdbTransaction::close. - * The transaction must be closed independent of its outcome, i.e. - * even if there is an error. - * - * @param table Pointer to table object used for deciding - * which node to run the Transaction Coordinator on - * @param keyData Pointer to partition key corresponding to - * table - * @param keyLen Length of partition key expressed in bytes - * - * @return NdbTransaction object, or NULL on failure. - */ - NdbTransaction* startTransaction(const NdbDictionary::Table *table= 0, - const char *keyData = 0, - Uint32 keyLen = 0); - - /** - * Compute hash value given table/keys - * - * @param hashvalueptr - OUT, is set to hashvalue if return value is 0 - * @param table Pointer to table object - * @param keyData Null-terminated array of pointers to keyParts that is - * part of distribution key. - * Length of resp. keyPart will be read from - * metadata and checked against passed value - * @param xfrmbuf Pointer to temporary buffer that will be used - * to calculate hashvalue - * @param xfrmbuflen Lengh of buffer - * - * @note if xfrmbuf is null (default) malloc/free will be made - * if xfrmbuf is not null but length is too short, method will fail - * - * @return 0 - ok - hashvalueptr is set - * else - fail, return error code - */ - static int computeHash(Uint32* hashvalueptr, - const NdbDictionary::Table*, - const struct Key_part_ptr * keyData, - void* xfrmbuf = 0, Uint32 xfrmbuflen = 0); - - /** - * Close a transaction. - * - * @note should be called after the transaction has completed, irrespective - * of success or failure - */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * @note It is not allowed to call Ndb::closeTransaction after sending the - * transaction asynchronously with either - * Ndb::sendPreparedTransactions or - * Ndb::sendPollNdb before the callback method has been called. - * (The application should keep track of the number of - * outstanding transactions and wait until all of them - * has completed before calling Ndb::closeTransaction). - * If the transaction is not committed it will be aborted. - */ -#endif - void closeTransaction(NdbTransaction*); - - /** @} *********************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - // to be documented later - /** - * @name Asynchronous Transactions - * @{ - */ - - /** - * Wait for prepared transactions. - * Will return as soon as at least 'minNoOfEventsToWakeUp' - * of them have completed, or the maximum time given as timeout has passed. - * - * @param aMillisecondNumber - * Maximum time to wait for transactions to complete. Polling - * without wait is achieved by setting the timer to zero. - * Time is expressed in milliseconds. - * @param minNoOfEventsToWakeup Minimum number of transactions - * which has to wake up before the poll-call will return. - * If minNoOfEventsToWakeup is - * set to a value larger than 1 then this is the minimum - * number of transactions that need to complete before the - * poll will return. - * Setting it to zero means that one should wait for all - * outstanding transactions to return before waking up. - * @return Number of transactions polled. - */ - int pollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT, - int minNoOfEventsToWakeup = 1); - - /** - * This send method will send all prepared database operations. - * The default method is to do it non-force and instead - * use the adaptive algorithm. (See Section @ref secAdapt.) - * The second option is to force the sending and - * finally there is the third alternative which is - * also non-force but also making sure that the - * adaptive algorithm do not notice the send. - * In this case the sending will be performed on a - * cyclical 10 millisecond event. - * - * @param forceSend When operations should be sent to NDB Kernel. - * (See @ref secAdapt.) - * - 0: non-force, adaptive algorithm notices it (default); - * - 1: force send, adaptive algorithm notices it; - * - 2: non-force, adaptive algorithm do not notice the send. - */ - void sendPreparedTransactions(int forceSend = 0); - - /** - * This is a send-poll variant that first calls - * Ndb::sendPreparedTransactions and then Ndb::pollNdb. - * It is however somewhat faster than calling the methods - * separately, since some mutex-operations are avoided. - * See documentation of Ndb::pollNdb and Ndb::sendPreparedTransactions - * for more details. - * - * @param aMillisecondNumber Timeout specifier - * Polling without wait is achieved by setting the - * millisecond timer to zero. - * @param minNoOfEventsToWakeup Minimum number of transactions - * which has to wake up before the poll-call will return. - * If minNoOfEventsToWakeup is - * set to a value larger than 1 then this is the minimum - * number of transactions that need to complete before the - * poll-call will return. - * Setting it to zero means that one should wait for all - * outstanding transactions to return before waking up. - * @param forceSend When operations should be sent to NDB Kernel. - * (See @ref secAdapt.) - * - 0: non-force, adaptive algorithm notices it (default); - * - 1: force send, adaptive algorithm notices it; - * - 2: non-force, adaptive algorithm does not notice the send. - * @return Number of transactions polled. - */ - int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT, - int minNoOfEventsToWakeup = 1, - int forceSend = 0); - /** @} *********************************************************************/ -#endif - - /** - * @name Error Handling - * @{ - */ - - /** - * Get the NdbError object - * - * @note The NdbError object is valid until a new NDB API method is called. - */ - const NdbError & getNdbError() const; - - /** - * Get a NdbError object for a specific error code - * - * The NdbError object is valid until you call a new NDB API method. - */ - const NdbError & getNdbError(int errorCode); - - - /** @} *********************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Get the application node identity. - * - * @return Node id of this application. - */ - int getNodeId(); - - bool usingFullyQualifiedNames(); - - /** - * Different types of tampering with the NDB Cluster. - * Only for debugging purposes only. - */ - enum TamperType { - LockGlbChp = 1, ///< Lock GCP - UnlockGlbChp, ///< Unlock GCP - CrashNode, ///< Crash an NDB node - ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster - InsertError ///< Execute an error in NDB Cluster - ///< (may crash system) - }; - - /** - * For testing purposes it is possible to tamper with the NDB Cluster - * (i.e. send a special signal to DBDIH, the NDB distribution handler). - * This feature should only used for debugging purposes. - * In a release versions of NDB Cluster, - * this call always return -1 and does nothing. - * - * @param aAction Action to be taken according to TamperType above - * - * @param aNode Which node the action will be taken - * -1: Master DIH. - * 0-16: Nodnumber. - * @return -1 indicates error, other values have meaning dependent - * on type of tampering. - */ - int NdbTamper(TamperType aAction, int aNode); - - /** - * Return a unique tuple id for a table. The id sequence is - * ascending but may contain gaps. Methods which have no - * TupleIdRange argument use NDB API dict cache. They may - * not be called from mysqld. - * - * @param aTableName table name - * - * @param cacheSize number of values to cache in this Ndb object - * - * @return 0 or -1 on error, and tupleId in out parameter - */ - struct TupleIdRange { - TupleIdRange() {} - Uint64 m_first_tuple_id; - Uint64 m_last_tuple_id; - Uint64 m_highest_seen; - void reset() { - m_first_tuple_id = ~(Uint64)0; - m_last_tuple_id = ~(Uint64)0; - m_highest_seen = 0; - }; - }; - - int initAutoIncrement(); - - int getAutoIncrementValue(const char* aTableName, - Uint64 & autoValue, Uint32 cacheSize, - Uint64 step = 1, Uint64 start = 1); - int getAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & autoValue, Uint32 cacheSize, - Uint64 step = 1, Uint64 start = 1); - int getAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 & autoValue, - Uint32 cacheSize, - Uint64 step = 1, Uint64 start = 1); - int readAutoIncrementValue(const char* aTableName, - Uint64 & autoValue); - int readAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 & autoValue); - int readAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 & autoValue); - int setAutoIncrementValue(const char* aTableName, - Uint64 autoValue, bool modify); - int setAutoIncrementValue(const NdbDictionary::Table * aTable, - Uint64 autoValue, bool modify); - int setAutoIncrementValue(const NdbDictionary::Table * aTable, - TupleIdRange & range, Uint64 autoValue, - bool modify); - bool checkUpdateAutoIncrementValue(TupleIdRange & range, Uint64 autoValue); -private: - int getTupleIdFromNdb(const NdbTableImpl* table, - TupleIdRange & range, Uint64 & tupleId, - Uint32 cacheSize, Uint64 step = 1, Uint64 start = 1); - int readTupleIdFromNdb(const NdbTableImpl* table, - TupleIdRange & range, Uint64 & tupleId); - int setTupleIdInNdb(const NdbTableImpl* table, - TupleIdRange & range, Uint64 tupleId, bool modify); - int checkTupleIdInNdb(TupleIdRange & range, - Uint64 tupleId); - int opTupleIdOnNdb(const NdbTableImpl* table, - TupleIdRange & range, Uint64 & opValue, Uint32 op); -public: - - /** - */ - NdbTransaction* hupp( NdbTransaction* ); - Uint32 getReference() const { return theMyRef;} - - struct Free_list_usage - { - const char * m_name; - Uint32 m_created; - Uint32 m_free; - Uint32 m_sizeof; - }; - - Free_list_usage * get_free_list_usage(Free_list_usage*); -#endif - - - -/***************************************************************************** - * These are service routines used by the other classes in the NDBAPI. - ****************************************************************************/ - Uint32 get_cond_wait_index() { return cond_wait_index; } - void set_cond_wait_index(Uint32 index) { cond_wait_index = index; } -private: - Uint32 cond_wait_index; - Ndb *cond_signal_ndb; - void cond_signal(); - - void setup(Ndb_cluster_connection *ndb_cluster_connection, - const char* aCatalogName, const char* aSchemaName); - - void connected(Uint32 block_reference); - void report_node_connected(Uint32 nodeId); - - - NdbTransaction* startTransactionLocal(Uint32 aPrio, Uint32 aFragmentId); - -// Connect the connection object to the Database. - int NDB_connect(Uint32 tNode); - NdbTransaction* doConnect(Uint32 nodeId); - void doDisconnect(); - - NdbReceiver* getNdbScanRec();// Get a NdbScanReceiver from idle list - NdbLabel* getNdbLabel(); // Get a NdbLabel from idle list - NdbBranch* getNdbBranch(); // Get a NdbBranch from idle list - NdbSubroutine* getNdbSubroutine();// Get a NdbSubroutine from idle - NdbCall* getNdbCall(); // Get a NdbCall from idle list - NdbApiSignal* getSignal(); // Get an operation from idle list - NdbRecAttr* getRecAttr(); // Get a receeive attribute object from - // idle list of the Ndb object. - NdbOperation* getOperation(); // Get an operation from idle list - NdbIndexScanOperation* getScanOperation(); // Get a scan operation from idle - NdbIndexOperation* getIndexOperation();// Get an index operation from idle - - NdbBlob* getNdbBlob();// Get a blob handle etc - - void releaseSignal(NdbApiSignal* anApiSignal); - void releaseSignalsInList(NdbApiSignal** pList); - void releaseNdbScanRec(NdbReceiver* aNdbScanRec); - void releaseNdbLabel(NdbLabel* anNdbLabel); - void releaseNdbBranch(NdbBranch* anNdbBranch); - void releaseNdbSubroutine(NdbSubroutine* anNdbSubroutine); - void releaseNdbCall(NdbCall* anNdbCall); - void releaseRecAttr (NdbRecAttr* aRecAttr); - void releaseOperation(NdbOperation* anOperation); - void releaseScanOperation(NdbIndexScanOperation*); - void releaseNdbBlob(NdbBlob* aBlob); - - void check_send_timeout(); - void remove_sent_list(Uint32); - Uint32 insert_completed_list(NdbTransaction*); - Uint32 insert_sent_list(NdbTransaction*); - - // Handle a received signal. Used by both - // synchronous and asynchronous interface - void handleReceivedSignal(NdbApiSignal* anApiSignal, struct LinearSectionPtr ptr[3]); - - int sendRecSignal(Uint16 aNodeId, - Uint32 aWaitState, - NdbApiSignal* aSignal, - Uint32 nodeSequence, - Uint32 *ret_conn_seq= 0); - - // Sets Restart GCI in Ndb object - void RestartGCI(int aRestartGCI); - - // Get block number of this NDBAPI object - int getBlockNumber(); - - /**************************************************************************** - * These are local service routines used by this class. - ***************************************************************************/ - - int createConIdleList(int aNrOfCon); - int createOpIdleList( int nrOfOp ); - - void freeOperation(); // Free the first idle operation. - void freeScanOperation(); // Free the first idle scan operation. - void freeIndexOperation(); // Free the first idle index operation. - void freeNdbCon(); // Free the first idle connection. - void freeSignal(); // Free the first idle signal - void freeRecAttr(); // Free the first idle receive attr obj - void freeNdbLabel(); // Free the first idle NdbLabel obj - void freeNdbBranch();// Free the first idle NdbBranch obj - void freeNdbSubroutine();// Free the first idle NdbSubroutine obj - void freeNdbCall(); // Free the first idle NdbCall obj - void freeNdbScanRec(); // Free the first idle NdbScanRec obj - void freeNdbBlob(); // Free the first etc - - NdbTransaction* getNdbCon(); // Get a connection from idle list - - /** - * Get a connected NdbTransaction to nodeId - * Returns NULL if none found - */ - NdbTransaction* getConnectedNdbTransaction(Uint32 nodeId); - - // Release and disconnect from DBTC a connection - // and seize it to theConIdleList - void releaseConnectToNdb (NdbTransaction*); - - // Release a connection to idle list - void releaseNdbCon (NdbTransaction*); - - int checkInitState(); // Check that we are initialized - void report_node_failure(Uint32 node_id); // Report Failed node - void report_node_failure_completed(Uint32 node_id); // Report Failed node(NF comp.) - - void checkFailedNode(); // Check for failed nodes - - int NDB_connect(); // Perform connect towards NDB Kernel - - // Release arrays of NdbTransaction pointers - void releaseTransactionArrays(); - - Uint32 pollCompleted(NdbTransaction** aCopyArray); - void sendPrepTrans(int forceSend); - void reportCallback(NdbTransaction** aCopyArray, Uint32 aNoOfComplTrans); - int poll_trans(int milliSecs, int noOfEventsToWaitFor, PollGuard *pg); - void waitCompletedTransactions(int milliSecs, int noOfEventsToWaitFor, - PollGuard *pg); - void completedTransaction(NdbTransaction* aTransaction); - void completedScanTransaction(NdbTransaction* aTransaction); - - void abortTransactionsAfterNodeFailure(Uint16 aNodeId); - - static - const char * externalizeTableName(const char * internalTableName, - bool fullyQualifiedNames); - const char * externalizeTableName(const char * internalTableName); - const BaseString internalize_table_name(const char * external_name) const; - - static - const char * externalizeIndexName(const char * internalIndexName, - bool fullyQualifiedNames); - const char * externalizeIndexName(const char * internalIndexName); - const BaseString old_internalize_index_name(const NdbTableImpl * table, - const char * external_name) const; - const BaseString internalize_index_name(const NdbTableImpl * table, - const char * external_name) const; - - static - const BaseString getDatabaseFromInternalName(const char * internalName); - static - const BaseString getSchemaFromInternalName(const char * internalName); - - void* int2void (Uint32 val); - NdbReceiver* void2rec (void* val); - NdbTransaction* void2con (void* val); - NdbOperation* void2rec_op (void* val); - NdbIndexOperation* void2rec_iop (void* val); - -/****************************************************************************** - * These are the private variables in this class. - *****************************************************************************/ - NdbTransaction** thePreparedTransactionsArray; - NdbTransaction** theSentTransactionsArray; - NdbTransaction** theCompletedTransactionsArray; - - Uint32 theNoOfPreparedTransactions; - Uint32 theNoOfSentTransactions; - Uint32 theNoOfCompletedTransactions; - Uint32 theRemainingStartTransactions; - Uint32 theMaxNoOfTransactions; - Uint32 theMinNoOfEventsToWakeUp; - - Uint32 theNextConnectNode; - - bool fullyQualifiedNames; - - - - class NdbImpl * theImpl; - class NdbDictionaryImpl* theDictionary; - class NdbEventBuffer* theEventBuffer; - - NdbTransaction* theTransactionList; - NdbTransaction** theConnectionArray; - - Uint32 theMyRef; // My block reference - Uint32 theNode; // The node number of our node - - Uint64 the_last_check_time; - Uint64 theFirstTransId; - // The tupleId is retrieved from DB - const NdbDictionary::Table *m_sys_tab_0; - - Uint32 theRestartGCI; // the Restart GCI used by DIHNDBTAMPER - - NdbError theError; - - Int32 theNdbBlockNumber; - - enum InitType { - NotConstructed, - NotInitialised, - StartingInit, - Initialised, - InitConfigError - } theInitState; - - NdbApiSignal* theCommitAckSignal; - - -#ifdef POORMANSPURIFY - int cfreeSignals; - int cnewSignals; - int cgetSignals; - int creleaseSignals; -#endif - - static void executeMessage(void*, NdbApiSignal *, - struct LinearSectionPtr ptr[3]); - static void statusMessage(void*, Uint32, bool, bool); -#ifdef VM_TRACE - void printState(const char* fmt, ...); -#endif -}; - -#endif diff --git a/storage/ndb/include/ndbapi/NdbApi.hpp b/storage/ndb/include/ndbapi/NdbApi.hpp deleted file mode 100644 index d3350557092..00000000000 --- a/storage/ndb/include/ndbapi/NdbApi.hpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbApi_H -#define NdbApi_H - -#include "ndb_init.h" -#include "ndb_cluster_connection.hpp" -#include "ndbapi_limits.h" -#include "Ndb.hpp" -#include "NdbTransaction.hpp" -#include "NdbOperation.hpp" -#include "NdbScanOperation.hpp" -#include "NdbIndexOperation.hpp" -#include "NdbIndexScanOperation.hpp" -#include "NdbScanFilter.hpp" -#include "NdbRecAttr.hpp" -#include "NdbDictionary.hpp" -#include "NdbEventOperation.hpp" -#include "NdbPool.hpp" -#include "NdbBlob.hpp" -#endif diff --git a/storage/ndb/include/ndbapi/NdbBlob.hpp b/storage/ndb/include/ndbapi/NdbBlob.hpp deleted file mode 100644 index 9df439d1d7f..00000000000 --- a/storage/ndb/include/ndbapi/NdbBlob.hpp +++ /dev/null @@ -1,410 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbBlob_H -#define NdbBlob_H - -#include -#include -#include -#include - -class Ndb; -class NdbTransaction; -class NdbOperation; -class NdbRecAttr; -class NdbTableImpl; -class NdbColumnImpl; -class NdbEventOperationImpl; - -/** - * @class NdbBlob - * @brief Blob handle - * - * Blob data is stored in 2 places: - * - * - "header" and "inline bytes" stored in the blob attribute - * - "blob parts" stored in a separate table NDB$BLOB__ - * - * Inline and part sizes can be set via NdbDictionary::Column methods - * when the table is created. - * - * NdbBlob is a blob handle. To access blob data, the handle must be - * created using NdbOperation::getBlobHandle in operation prepare phase. - * The handle has following states: - * - * - prepared: before the operation is executed - * - active: after execute or next result but before transaction commit - * - closed: after transaction commit - * - invalid: after rollback or transaction close - * - * NdbBlob supports 3 styles of data access: - * - * - in prepare phase, NdbBlob methods getValue and setValue are used to - * prepare a read or write of a blob value of known size - * - * - in prepare phase, setActiveHook is used to define a routine which - * is invoked as soon as the handle becomes active - * - * - in active phase, readData and writeData are used to read or write - * blob data of arbitrary size - * - * The styles can be applied in combination (in above order). - * - * Blob operations take effect at next transaction execute. In some - * cases NdbBlob is forced to do implicit executes. To avoid this, - * operate on complete blob parts. - * - * Use NdbTransaction::executePendingBlobOps to flush your reads and - * writes. It avoids execute penalty if nothing is pending. It is not - * needed after execute (obviously) or after next scan result. - * - * NdbBlob also supports reading post or pre blob data from events. The - * handle can be read after next event on main table has been retrieved. - * The data is available immediately. See NdbEventOperation. - * - * Non-void NdbBlob methods return -1 on error and 0 on success. Output - * parameters are used when necessary. - * - * Usage notes for different operation types: - * - * - insertTuple must use setValue if blob attribute is non-nullable - * - * - readTuple or scan readTuples with lock mode LM_CommittedRead is - * automatically upgraded to lock mode LM_Read if any blob attributes - * are accessed (to guarantee consistent view) - * - * - readTuple (with any lock mode) can only read blob value - * - * - updateTuple can either overwrite existing value with setValue or - * update it in active phase - * - * - writeTuple always overwrites blob value and must use setValue if - * blob attribute is non-nullable - * - * - deleteTuple creates implicit non-accessible blob handles - * - * - scan readTuples (any lock mode) can use its blob handles only - * to read blob value - * - * - scan readTuples with lock mode LM_Exclusive can update row and blob - * value using updateCurrentTuple, where the operation returned must - * create its own blob handles explicitly - * - * - scan readTuples with lock mode LM_Exclusive can delete row (and - * therefore blob values) using deleteCurrentTuple, which creates - * implicit non-accessible blob handles - * - * - the operation returned by lockCurrentTuple cannot update blob value - * - * Bugs / limitations: - * - * - too many pending blob ops can blow up i/o buffers - * - * - table and its blob part tables are not created atomically - */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - * - there is no support for an asynchronous interface - */ -#endif - -class NdbBlob { -public: - /** - * State. - */ - enum State { - Idle = 0, - Prepared = 1, - Active = 2, - Closed = 3, - Invalid = 9 - }; - /** - * Get the state of a NdbBlob object. - */ - State getState(); - /** - * Returns -1 for normal statement based blob and 0/1 for event - * operation post/pre data blob. Always succeeds. - */ - void getVersion(int& version); - /** - * Inline blob header. - */ - struct Head { - Uint64 length; - }; - /** - * Prepare to read blob value. The value is available after execute. - * Use getNull() to check for NULL and getLength() to get the real length - * and to check for truncation. Sets current read/write position to - * after the data read. - */ - int getValue(void* data, Uint32 bytes); - /** - * Prepare to insert or update blob value. An existing longer blob - * value will be truncated. The data buffer must remain valid until - * execute. Sets current read/write position to after the data. Set - * data to null pointer (0) to create a NULL value. - */ - int setValue(const void* data, Uint32 bytes); - /** - * Callback for setActiveHook(). Invoked immediately when the prepared - * operation has been executed (but not committed). Any getValue() or - * setValue() is done first. The blob handle is active so readData or - * writeData() etc can be used to manipulate blob value. A user-defined - * argument is passed along. Returns non-zero on error. - */ - typedef int ActiveHook(NdbBlob* me, void* arg); - /** - * Define callback for blob handle activation. The queue of prepared - * operations will be executed in no commit mode up to this point and - * then the callback is invoked. - */ - int setActiveHook(ActiveHook* activeHook, void* arg); -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int getDefined(int& isNull); - int getNull(bool& isNull); -#endif - /** - * Return -1, 0, 1 if blob is undefined, non-null, or null. For - * non-event blob, undefined causes a state error. - */ - int getNull(int& isNull); - /** - * Set blob to NULL. - */ - int setNull(); - /** - * Get current length in bytes. Use getNull to distinguish between - * length 0 blob and NULL blob. - */ - int getLength(Uint64& length); - /** - * Truncate blob to given length. Has no effect if the length is - * larger than current length. - */ - int truncate(Uint64 length = 0); - /** - * Get current read/write position. - */ - int getPos(Uint64& pos); - /** - * Set read/write position. Must be between 0 and current length. - * "Sparse blobs" are not supported. - */ - int setPos(Uint64 pos); - /** - * Read at current position and set new position to first byte after - * the data read. A read past blob end returns actual number of bytes - * read in the in/out bytes parameter. - */ - int readData(void* data, Uint32& bytes); - /** - * Write at current position and set new position to first byte after - * the data written. A write past blob end extends the blob value. - */ - int writeData(const void* data, Uint32 bytes); - /** - * Return the blob column. - */ - const NdbDictionary::Column* getColumn(); - /** - * Get blob parts table name. Useful only to test programs. - */ - static int getBlobTableName(char* btname, Ndb* anNdb, const char* tableName, const char* columnName); - /** - * Get blob event name. The blob event is created if the main event - * monitors the blob column. The name includes main event name. - */ - static int getBlobEventName(char* bename, Ndb* anNdb, const char* eventName, const char* columnName); - /** - * Return error object. The error may be blob specific or may be - * copied from a failed implicit operation. - * - * The error code is copied back to the operation unless the operation - * already has a non-zero error code. - */ - const NdbError& getNdbError() const; - /** - * Return info about all blobs in this operation. - * - * Get first blob in list. - */ - NdbBlob* blobsFirstBlob(); - /** - * Return info about all blobs in this operation. - * - * Get next blob in list. Initialize with blobsFirstBlob(). - */ - NdbBlob* blobsNextBlob(); - -private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbTransaction; - friend class NdbOperation; - friend class NdbScanOperation; - friend class NdbDictionaryImpl; - friend class NdbResultSet; // atNextResult - friend class NdbEventBuffer; - friend class NdbEventOperationImpl; -#endif - // state - State theState; - void setState(State newState); - // quick and dirty support for events (consider subclassing) - int theEventBlobVersion; // -1=normal blob 0=post event 1=pre event - // define blob table - static void getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnImpl* c); - static void getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c); - static void getBlobEventName(char* bename, const NdbEventImpl* e, const NdbColumnImpl* c); - static void getBlobEvent(NdbEventImpl& be, const NdbEventImpl* e, const NdbColumnImpl* c); - // ndb api stuff - Ndb* theNdb; - NdbTransaction* theNdbCon; - NdbOperation* theNdbOp; - NdbEventOperationImpl* theEventOp; - NdbEventOperationImpl* theBlobEventOp; - NdbRecAttr* theBlobEventPkRecAttr; - NdbRecAttr* theBlobEventDistRecAttr; - NdbRecAttr* theBlobEventPartRecAttr; - NdbRecAttr* theBlobEventDataRecAttr; - const NdbTableImpl* theTable; - const NdbTableImpl* theAccessTable; - const NdbTableImpl* theBlobTable; - const NdbColumnImpl* theColumn; - char theFillChar; - // sizes - Uint32 theInlineSize; - Uint32 thePartSize; - Uint32 theStripeSize; - // getValue/setValue - bool theGetFlag; - char* theGetBuf; - bool theSetFlag; - const char* theSetBuf; - Uint32 theGetSetBytes; - // pending ops - Uint8 thePendingBlobOps; - // activation callback - ActiveHook* theActiveHook; - void* theActiveHookArg; - // buffers - struct Buf { - char* data; - unsigned size; - unsigned maxsize; - Buf(); - ~Buf(); - void alloc(unsigned n); - void zerorest(); - void copyfrom(const Buf& src); - }; - Buf theKeyBuf; - Buf theAccessKeyBuf; - Buf thePackKeyBuf; - Buf theHeadInlineBuf; - Buf theHeadInlineCopyBuf; // for writeTuple - Buf thePartBuf; - Buf theBlobEventDataBuf; - Uint32 thePartNumber; // for event - Head* theHead; - char* theInlineData; - NdbRecAttr* theHeadInlineRecAttr; - NdbOperation* theHeadInlineReadOp; - bool theHeadInlineUpdateFlag; - // length and read/write position - int theNullFlag; - Uint64 theLength; - Uint64 thePos; - // errors - NdbError theError; - // for keeping in lists - NdbBlob* theNext; - // initialization - NdbBlob(Ndb*); - void init(); - void release(); - // classify operations - bool isTableOp(); - bool isIndexOp(); - bool isKeyOp(); - bool isReadOp(); - bool isInsertOp(); - bool isUpdateOp(); - bool isWriteOp(); - bool isDeleteOp(); - bool isScanOp(); - bool isReadOnlyOp(); - bool isTakeOverOp(); - // computations - Uint32 getPartNumber(Uint64 pos); - Uint32 getPartCount(); - Uint32 getDistKey(Uint32 part); - // pack / unpack - int packKeyValue(const NdbTableImpl* aTable, const Buf& srcBuf); - int unpackKeyValue(const NdbTableImpl* aTable, Buf& dstBuf); - // getters and setters - int getTableKeyValue(NdbOperation* anOp); - int setTableKeyValue(NdbOperation* anOp); - int setAccessKeyValue(NdbOperation* anOp); - int setPartKeyValue(NdbOperation* anOp, Uint32 part); - int getHeadInlineValue(NdbOperation* anOp); - void getHeadFromRecAttr(); - int setHeadInlineValue(NdbOperation* anOp); - // data operations - int readDataPrivate(char* buf, Uint32& bytes); - int writeDataPrivate(const char* buf, Uint32 bytes); - int readParts(char* buf, Uint32 part, Uint32 count); - int readTableParts(char* buf, Uint32 part, Uint32 count); - int readEventParts(char* buf, Uint32 part, Uint32 count); - int insertParts(const char* buf, Uint32 part, Uint32 count); - int updateParts(const char* buf, Uint32 part, Uint32 count); - int deleteParts(Uint32 part, Uint32 count); - int deletePartsUnknown(Uint32 part); - // pending ops - int executePendingBlobReads(); - int executePendingBlobWrites(); - // callbacks - int invokeActiveHook(); - // blob handle maintenance - int atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn); - int atPrepare(NdbEventOperationImpl* anOp, NdbEventOperationImpl* aBlobOp, const NdbColumnImpl* aColumn, int version); - int prepareColumn(); - int preExecute(NdbTransaction::ExecType anExecType, bool& batch); - int postExecute(NdbTransaction::ExecType anExecType); - int preCommit(); - int atNextResult(); - int atNextEvent(); - // errors - void setErrorCode(int anErrorCode, bool invalidFlag = false); - void setErrorCode(NdbOperation* anOp, bool invalidFlag = false); - void setErrorCode(NdbTransaction* aCon, bool invalidFlag = false); - void setErrorCode(NdbEventOperationImpl* anOp, bool invalidFlag = false); -#ifdef VM_TRACE - int getOperationType() const; - friend class NdbOut& operator<<(NdbOut&, const NdbBlob&); -#endif - // list stuff - void next(NdbBlob* obj) { theNext= obj;} - NdbBlob* next() { return theNext;} - friend struct Ndb_free_list_t; -}; - -#endif diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp deleted file mode 100644 index f95a268e42d..00000000000 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ /dev/null @@ -1,1944 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbDictionary_H -#define NdbDictionary_H - -#include - -class Ndb; -struct charset_info_st; -typedef const struct charset_info_st CHARSET_INFO; - -/** - * @class NdbDictionary - * @brief Data dictionary class - * - * The preferred and supported way to create and drop tables and indexes - * in ndb is through the - * MySQL Server (see MySQL reference Manual, section MySQL Cluster). - * - * Tables and indexes that are created directly through the - * NdbDictionary class - * can not be viewed from the MySQL Server. - * Dropping indexes directly via the NdbApi will cause inconsistencies - * if they were originally created from a MySQL Cluster. - * - * This class supports schema data enquiries such as: - * -# Enquiries about tables - * (Dictionary::getTable, Table::getNoOfColumns, - * Table::getPrimaryKey, and Table::getNoOfPrimaryKeys) - * -# Enquiries about indexes - * (Dictionary::getIndex, Index::getNoOfColumns, - * and Index::getColumn) - * - * This class supports schema data definition such as: - * -# Creating tables (Dictionary::createTable) and table columns - * -# Dropping tables (Dictionary::dropTable) - * -# Creating secondary indexes (Dictionary::createIndex) - * -# Dropping secondary indexes (Dictionary::dropIndex) - * - * NdbDictionary has several help (inner) classes to support this: - * -# NdbDictionary::Dictionary the dictionary handling dictionary objects - * -# NdbDictionary::Table for creating tables - * -# NdbDictionary::Column for creating table columns - * -# NdbDictionary::Index for creating secondary indexes - * - * See @ref ndbapi_simple_index.cpp for details of usage. - */ -class NdbDictionary { -public: - NdbDictionary() {} /* Remove gcc warning */ - /** - * @class Object - * @brief Meta information about a database object (a table, index, etc) - */ - class Object { - public: - Object() {} /* Remove gcc warning */ - virtual ~Object() {} /* Remove gcc warning */ - /** - * Status of object - */ - enum Status { - New, ///< The object only exist in memory and - ///< has not been created in the NDB Kernel - Changed, ///< The object has been modified in memory - ///< and has to be commited in NDB Kernel for - ///< changes to take effect - Retrieved, ///< The object exist and has been read - ///< into main memory from NDB Kernel - Invalid, ///< The object has been invalidated - ///< and should not be used - Altered ///< Table has been altered in NDB kernel - ///< but is still valid for usage - }; - - /** - * Get status of object - */ - virtual Status getObjectStatus() const = 0; - - /** - * Get version of object - */ - virtual int getObjectVersion() const = 0; - - virtual int getObjectId() const = 0; - - /** - * Object type - */ - enum Type { - TypeUndefined = 0, ///< Undefined - SystemTable = 1, ///< System table - UserTable = 2, ///< User table (may be temporary) - UniqueHashIndex = 3, ///< Unique un-ordered hash index - OrderedIndex = 6, ///< Non-unique ordered index - HashIndexTrigger = 7, ///< Index maintenance, internal - IndexTrigger = 8, ///< Index maintenance, internal - SubscriptionTrigger = 9,///< Backup or replication, internal - ReadOnlyConstraint = 10,///< Trigger, internal - Tablespace = 20, ///< Tablespace - LogfileGroup = 21, ///< Logfile group - Datafile = 22, ///< Datafile - Undofile = 23 ///< Undofile - }; - - /** - * Object state - */ - enum State { - StateUndefined = 0, ///< Undefined - StateOffline = 1, ///< Offline, not usable - StateBuilding = 2, ///< Building, not yet usable - StateDropping = 3, ///< Offlining or dropping, not usable - StateOnline = 4, ///< Online, usable - StateBackup = 5, ///< Online, being backuped, usable - StateBroken = 9 ///< Broken, should be dropped and re-created - }; - - /** - * Object store - */ - enum Store { - StoreUndefined = 0, ///< Undefined - StoreNotLogged = 1, ///< Object or data deleted on system restart - StorePermanent = 2 ///< Permanent. logged to disk - }; - - /** - * Type of fragmentation. - * - * This parameter specifies how data in the table or index will - * be distributed among the db nodes in the cluster.
- * The bigger the table the more number of fragments should be used. - * Note that all replicas count as same "fragment".
- * For a table, default is FragAllMedium. For a unique hash index, - * default is taken from underlying table and cannot currently - * be changed. - */ - enum FragmentType { - FragUndefined = 0, ///< Fragmentation type undefined or default - FragSingle = 1, ///< Only one fragment - FragAllSmall = 2, ///< One fragment per node, default - FragAllMedium = 3, ///< two fragments per node - FragAllLarge = 4, ///< Four fragments per node. - DistrKeyHash = 5, - DistrKeyLin = 6, - UserDefined = 7 - }; - }; - - class Dictionary; // Forward declaration - - class ObjectId : public Object - { - public: - ObjectId(); - virtual ~ObjectId(); - - /** - * Get status of object - */ - virtual Status getObjectStatus() const; - - /** - * Get version of object - */ - virtual int getObjectVersion() const; - - virtual int getObjectId() const; - - private: - friend class NdbDictObjectImpl; - class NdbDictObjectImpl & m_impl; - }; - - class Table; // forward declaration - class Tablespace; // forward declaration -// class NdbEventOperation; // forward declaration - - /** - * @class Column - * @brief Represents a column in an NDB Cluster table - * - * Each column has a type. The type of a column is determined by a number - * of type specifiers. - * The type specifiers are: - * - Builtin type - * - Array length or max length - * - Precision and scale (not used yet) - * - Character set for string types - * - Inline and part sizes for blobs - * - * Types in general correspond to MySQL types and their variants. - * Data formats are same as in MySQL. NDB API provides no support for - * constructing such formats. NDB kernel checks them however. - */ - class Column { - public: - /** - * The builtin column types - */ - enum Type { - Undefined = NDB_TYPE_UNDEFINED, ///< Undefined - Tinyint = NDB_TYPE_TINYINT, ///< 8 bit. 1 byte signed integer, can be used in array - Tinyunsigned = NDB_TYPE_TINYUNSIGNED, ///< 8 bit. 1 byte unsigned integer, can be used in array - Smallint = NDB_TYPE_SMALLINT, ///< 16 bit. 2 byte signed integer, can be used in array - Smallunsigned = NDB_TYPE_SMALLUNSIGNED, ///< 16 bit. 2 byte unsigned integer, can be used in array - Mediumint = NDB_TYPE_MEDIUMINT, ///< 24 bit. 3 byte signed integer, can be used in array - Mediumunsigned = NDB_TYPE_MEDIUMUNSIGNED,///< 24 bit. 3 byte unsigned integer, can be used in array - Int = NDB_TYPE_INT, ///< 32 bit. 4 byte signed integer, can be used in array - Unsigned = NDB_TYPE_UNSIGNED, ///< 32 bit. 4 byte unsigned integer, can be used in array - Bigint = NDB_TYPE_BIGINT, ///< 64 bit. 8 byte signed integer, can be used in array - Bigunsigned = NDB_TYPE_BIGUNSIGNED, ///< 64 Bit. 8 byte signed integer, can be used in array - Float = NDB_TYPE_FLOAT, ///< 32-bit float. 4 bytes float, can be used in array - Double = NDB_TYPE_DOUBLE, ///< 64-bit float. 8 byte float, can be used in array - Olddecimal = NDB_TYPE_OLDDECIMAL, ///< MySQL < 5.0 signed decimal, Precision, Scale - Olddecimalunsigned = NDB_TYPE_OLDDECIMALUNSIGNED, - Decimal = NDB_TYPE_DECIMAL, ///< MySQL >= 5.0 signed decimal, Precision, Scale - Decimalunsigned = NDB_TYPE_DECIMALUNSIGNED, - Char = NDB_TYPE_CHAR, ///< Len. A fixed array of 1-byte chars - Varchar = NDB_TYPE_VARCHAR, ///< Length bytes: 1, Max: 255 - Binary = NDB_TYPE_BINARY, ///< Len - Varbinary = NDB_TYPE_VARBINARY, ///< Length bytes: 1, Max: 255 - Datetime = NDB_TYPE_DATETIME, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes ) - Date = NDB_TYPE_DATE, ///< Precision down to 1 day(sizeof(Date) == 4 bytes ) - Blob = NDB_TYPE_BLOB, ///< Binary large object (see NdbBlob) - Text = NDB_TYPE_TEXT, ///< Text blob - Bit = NDB_TYPE_BIT, ///< Bit, length specifies no of bits - Longvarchar = NDB_TYPE_LONGVARCHAR, ///< Length bytes: 2, little-endian - Longvarbinary = NDB_TYPE_LONGVARBINARY, ///< Length bytes: 2, little-endian - Time = NDB_TYPE_TIME, ///< Time without date - Year = NDB_TYPE_YEAR, ///< Year 1901-2155 (1 byte) - Timestamp = NDB_TYPE_TIMESTAMP ///< Unix time - }; - - /* - * Array type specifies internal attribute format. - * - * - ArrayTypeFixed is stored as fixed number of bytes. This type - * is fastest to access but can waste space. - * - * - ArrayTypeVar is stored as variable number of bytes with a fixed - * overhead of 2 bytes. - * - * Default is ArrayTypeVar for Var* types and ArrayTypeFixed for - * others. The default is normally ok. - */ - enum ArrayType { - ArrayTypeFixed = NDB_ARRAYTYPE_FIXED, // 0 length bytes - ArrayTypeShortVar = NDB_ARRAYTYPE_SHORT_VAR, // 1 length bytes - ArrayTypeMediumVar = NDB_ARRAYTYPE_MEDIUM_VAR // 2 length bytes - }; - - /* - * Storage type specifies whether attribute is stored in memory or - * on disk. Default is memory. Disk attributes are potentially - * much slower to access and cannot be indexed in version 5.1. - */ - enum StorageType { - StorageTypeMemory = NDB_STORAGETYPE_MEMORY, - StorageTypeDisk = NDB_STORAGETYPE_DISK - }; - - /** - * @name General - * @{ - */ - - /** - * Get name of column - * @return Name of the column - */ - const char* getName() const; - - /** - * Get if the column is nullable or not - */ - bool getNullable() const; - - /** - * Check if column is part of primary key - */ - bool getPrimaryKey() const; - - /** - * Get number of column (horizontal position within table) - */ - int getColumnNo() const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - int getAttrId() const; -#endif - - /** - * Check if column is equal to some other column - * @param column Column to compare with - * @return true if column is equal to some other column otherwise false. - */ - bool equal(const Column& column) const; - - - /** @} *******************************************************************/ - /** - * @name Get Type Specifiers - * @{ - */ - - /** - * Get type of column - */ - Type getType() const; - - /** - * Get precision of column. - * @note Only applicable for decimal types - */ - int getPrecision() const; - - /** - * Get scale of column. - * @note Only applicable for decimal types - */ - int getScale() const; - - /** - * Get length for column - * Array length for column or max length for variable length arrays. - */ - int getLength() const; - - /** - * For Char or Varchar or Text, get MySQL CHARSET_INFO. This - * specifies both character set and collation. See get_charset() - * etc in MySQL. (The cs is not "const" in MySQL). - */ - CHARSET_INFO* getCharset() const; - - - /** - * For blob, get "inline size" i.e. number of initial bytes - * to store in table's blob attribute. This part is normally in - * main memory and can be indexed and interpreted. - */ - int getInlineSize() const; - - /** - * For blob, get "part size" i.e. number of bytes to store in - * each tuple of the "blob table". Can be set to zero to omit parts - * and to allow only inline bytes ("tinyblob"). - */ - int getPartSize() const; - - /** - * For blob, set or get "stripe size" i.e. number of consecutive - * parts to store in each node group. - */ - int getStripeSize() const; - - /** - * Get size of element - */ - int getSize() const; - - /** - * Check if column is part of partition key - * - * A partition key is a set of attributes which are used - * to distribute the tuples onto the NDB nodes. - * The partition key uses the NDB Cluster hashing function. - * - * An example where this is useful is TPC-C where it might be - * good to use the warehouse id and district id as the partition key. - * This would place all data for a specific district and warehouse - * in the same database node. - * - * Locally in the fragments the full primary key - * will still be used with the hashing algorithm. - * - * @return true then the column is part of - * the partition key. - */ - bool getPartitionKey() const; -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - inline bool getDistributionKey() const { return getPartitionKey(); }; -#endif - - ArrayType getArrayType() const; - StorageType getStorageType() const; - - /** @} *******************************************************************/ - - - /** - * @name Column creation - * @{ - * - * These operations should normally not be performed in an NbdApi program - * as results will not be visable in the MySQL Server - * - */ - - /** - * Constructor - * @param name Name of column - */ - Column(const char * name = ""); - /** - * Copy constructor - * @param column Column to be copied - */ - Column(const Column& column); - ~Column(); - - /** - * Set name of column - * @param name Name of the column - */ - int setName(const char * name); - - /** - * Set whether column is nullable or not - */ - void setNullable(bool); - - /** - * Set that column is part of primary key - */ - void setPrimaryKey(bool); - - /** - * Set type of column - * @param type Type of column - * - * @note setType resets all column attributes - * to (type dependent) defaults and should be the first - * method to call. Default type is Unsigned. - */ - void setType(Type type); - - /** - * Set precision of column. - * @note Only applicable for decimal types - */ - void setPrecision(int); - - /** - * Set scale of column. - * @note Only applicable for decimal types - */ - void setScale(int); - - /** - * Set length for column - * Array length for column or max length for variable length arrays. - */ - void setLength(int length); - - /** - * For Char or Varchar or Text, get MySQL CHARSET_INFO. This - * specifies both character set and collation. See get_charset() - * etc in MySQL. (The cs is not "const" in MySQL). - */ - void setCharset(CHARSET_INFO* cs); - - /** - * For blob, get "inline size" i.e. number of initial bytes - * to store in table's blob attribute. This part is normally in - * main memory and can be indexed and interpreted. - */ - void setInlineSize(int size); - - /** - * For blob, get "part size" i.e. number of bytes to store in - * each tuple of the "blob table". Can be set to zero to omit parts - * and to allow only inline bytes ("tinyblob"). - */ - void setPartSize(int size); - - /** - * For blob, get "stripe size" i.e. number of consecutive - * parts to store in each node group. - */ - void setStripeSize(int size); - - /** - * Set partition key - * @see getPartitionKey - * - * @param enable If set to true, then the column will be part of - * the partition key. - */ - void setPartitionKey(bool enable); -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - inline void setDistributionKey(bool enable) - { setPartitionKey(enable); }; -#endif - - void setArrayType(ArrayType type); - void setStorageType(StorageType type); - - /** @} *******************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - const Table * getBlobTable() const; - - void setAutoIncrement(bool); - bool getAutoIncrement() const; - void setAutoIncrementInitialValue(Uint64 val); - int setDefaultValue(const char*); - const char* getDefaultValue() const; - - static const Column * FRAGMENT; - static const Column * FRAGMENT_FIXED_MEMORY; - static const Column * FRAGMENT_VARSIZED_MEMORY; - static const Column * ROW_COUNT; - static const Column * COMMIT_COUNT; - static const Column * ROW_SIZE; - static const Column * RANGE_NO; - static const Column * DISK_REF; - static const Column * RECORDS_IN_RANGE; - static const Column * ROWID; - static const Column * ROW_GCI; - static const Column * ANY_VALUE; - static const Column * COPY_ROWID; - - int getSizeInBytes() const; -#endif - - private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbRecAttr; - friend class NdbColumnImpl; -#endif - class NdbColumnImpl & m_impl; - Column(NdbColumnImpl&); - Column& operator=(const Column&); - }; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * ??? - */ - typedef Column Attribute; -#endif - - /** - * @brief Represents a table in NDB Cluster - * - * TableSize
- * When calculating the data storage one should add the size of all - * attributes (each attributeconsumes at least 4 bytes) and also an overhead - * of 12 byte. Variable size attributes (not supported yet) will have a - * size of 12 bytes plus the actual data storage parts where there is an - * additional overhead based on the size of the variable part.
- * An example table with 5 attributes: - * one 64 bit attribute, one 32 bit attribute, - * two 16 bit attributes and one array of 64 8 bits. - * This table will consume - * 12 (overhead) + 8 + 4 + 2*4 (4 is minimum) + 64 = 96 bytes per record. - * Additionally an overhead of about 2 % as page headers and waste should - * be allocated. Thus, 1 million records should consume 96 MBytes - * plus the overhead 2 MByte and rounded up to 100 000 kBytes.
- * - */ - class Table : public Object { - public: - /* - * Single user mode specifies access rights to table during single user mode - */ - enum SingleUserMode { - SingleUserModeLocked = NDB_SUM_LOCKED, - SingleUserModeReadOnly = NDB_SUM_READONLY, - SingleUserModeReadWrite = NDB_SUM_READ_WRITE - }; - - /** - * @name General - * @{ - */ - - /** - * Get table name - */ - const char * getName() const; - - /** - * Get table id - */ - int getTableId() const; - - /** - * Get column definition via name. - * @return null if none existing name - */ - const Column* getColumn(const char * name) const; - - /** - * Get column definition via index in table. - * @return null if none existing name - */ - Column* getColumn(const int attributeId); - - /** - * Get column definition via name. - * @return null if none existing name - */ - Column* getColumn(const char * name); - - /** - * Get column definition via index in table. - * @return null if none existing name - */ - const Column* getColumn(const int attributeId) const; - - /** @} *******************************************************************/ - /** - * @name Storage - * @{ - */ - - /** - * If set to false, then the table is a temporary - * table and is not logged to disk. - * - * In case of a system restart the table will still - * be defined and exist but will be empty. - * Thus no checkpointing and no logging is performed on the table. - * - * The default value is true and indicates a normal table - * with full checkpointing and logging activated. - */ - bool getLogging() const; - - /** - * Get fragmentation type - */ - FragmentType getFragmentType() const; - - /** - * Get KValue (Hash parameter.) - * Only allowed value is 6. - * Later implementations might add flexibility in this parameter. - */ - int getKValue() const; - - /** - * Get MinLoadFactor (Hash parameter.) - * This value specifies the load factor when starting to shrink - * the hash table. - * It must be smaller than MaxLoadFactor. - * Both these factors are given in percentage. - */ - int getMinLoadFactor() const; - - /** - * Get MaxLoadFactor (Hash parameter.) - * This value specifies the load factor when starting to split - * the containers in the local hash tables. - * 100 is the maximum which will optimize memory usage. - * A lower figure will store less information in each container and thus - * find the key faster but consume more memory. - */ - int getMaxLoadFactor() const; - - /** @} *******************************************************************/ - /** - * @name Other - * @{ - */ - - /** - * Get number of columns in the table - */ - int getNoOfColumns() const; - - /** - * Get number of primary keys in the table - */ - int getNoOfPrimaryKeys() const; - - /** - * Get name of primary key - */ - const char* getPrimaryKey(int no) const; - - /** - * Check if table is equal to some other table - */ - bool equal(const Table&) const; - - /** - * Get frm file stored with this table - */ - const void* getFrmData() const; - Uint32 getFrmLength() const; - - /** - * Get Fragment Data (id, state and node group) - */ - const void *getFragmentData() const; - Uint32 getFragmentDataLen() const; - - /** - * Get Range or List Array (value, partition) - */ - const void *getRangeListData() const; - Uint32 getRangeListDataLen() const; - - /** - * Get Tablespace Data (id, version) - */ - const void *getTablespaceData() const; - Uint32 getTablespaceDataLen() const; - - /** @} *******************************************************************/ - - /** - * @name Table creation - * @{ - * - * These methods should normally not be used in an application as - * the result is not accessible from the MySQL Server - * - */ - - /** - * Constructor - * @param name Name of table - */ - Table(const char * name = ""); - - /** - * Copy constructor - * @param table Table to be copied - */ - Table(const Table& table); - virtual ~Table(); - - /** - * Assignment operator, deep copy - * @param table Table to be copied - */ - Table& operator=(const Table& table); - - /** - * Name of table - * @param name Name of table - */ - int setName(const char * name); - - /** - * Add a column definition to a table - * @note creates a copy - */ - int addColumn(const Column &); - - /** - * @see NdbDictionary::Table::getLogging. - */ - void setLogging(bool); - - /** - * Set/Get Linear Hash Flag - */ - void setLinearFlag(Uint32 flag); - bool getLinearFlag() const; - - /** - * Set fragment count - */ - void setFragmentCount(Uint32); - - /** - * Get fragment count - */ - Uint32 getFragmentCount() const; - - /** - * Set fragmentation type - */ - void setFragmentType(FragmentType); - - /** - * Set KValue (Hash parameter.) - * Only allowed value is 6. - * Later implementations might add flexibility in this parameter. - */ - void setKValue(int kValue); - - /** - * Set MinLoadFactor (Hash parameter.) - * This value specifies the load factor when starting to shrink - * the hash table. - * It must be smaller than MaxLoadFactor. - * Both these factors are given in percentage. - */ - void setMinLoadFactor(int); - - /** - * Set MaxLoadFactor (Hash parameter.) - * This value specifies the load factor when starting to split - * the containers in the local hash tables. - * 100 is the maximum which will optimize memory usage. - * A lower figure will store less information in each container and thus - * find the key faster but consume more memory. - */ - void setMaxLoadFactor(int); - - int setTablespaceName(const char * name); - const char * getTablespaceName() const; - int setTablespace(const class Tablespace &); - bool getTablespace(Uint32 *id= 0, Uint32 *version= 0) const; - - /** - * Get table object type - */ - Object::Type getObjectType() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - void setStatusInvalid() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Set/Get indicator if default number of partitions is used in table. - */ - void setDefaultNoPartitionsFlag(Uint32 indicator); - Uint32 getDefaultNoPartitionsFlag() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - - /** - * Set frm file to store with this table - */ - int setFrm(const void* data, Uint32 len); - - /** - * Set array of fragment information containing - * Fragment Identity - * Node group identity - * Fragment State - */ - int setFragmentData(const void* data, Uint32 len); - - /** - * Set/Get tablespace names per fragment - */ - int setTablespaceNames(const void* data, Uint32 len); - const void *getTablespaceNames(); - Uint32 getTablespaceNamesLen() const; - - /** - * Set tablespace information per fragment - * Contains a tablespace id and a tablespace version - */ - int setTablespaceData(const void* data, Uint32 len); - - /** - * Set array of information mapping range values and list values - * to fragments. This is essentially a sorted map consisting of - * pairs of value, fragment identity. For range partitions there is - * one pair per fragment. For list partitions it could be any number - * of pairs, at least as many as there are fragments. - */ - int setRangeListData(const void* data, Uint32 len); - - /** - * Set table object type - */ - void setObjectType(Object::Type type); - - /** - * Set/Get Maximum number of rows in table (only used to calculate - * number of partitions). - */ - void setMaxRows(Uint64 maxRows); - Uint64 getMaxRows() const; - - /** - * Set/Get Minimum number of rows in table (only used to calculate - * number of partitions). - */ - void setMinRows(Uint64 minRows); - Uint64 getMinRows() const; - - /** - * Set/Get SingleUserMode - */ - void setSingleUserMode(enum SingleUserMode); - enum SingleUserMode getSingleUserMode() const; - - - /** @} *******************************************************************/ - - /** - * - */ - void setRowGCIIndicator(bool value); - bool getRowGCIIndicator() const; - - void setRowChecksumIndicator(bool value); - bool getRowChecksumIndicator() const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - const char *getMysqlName() const; - - void setStoredTable(bool x) { setLogging(x); } - bool getStoredTable() const { return getLogging(); } - - int getRowSizeInBytes() const ; - int createTableInDb(Ndb*, bool existingEqualIsOk = true) const ; - - int getReplicaCount() const ; - - bool getTemporary(); - void setTemporary(bool); - - /** - * Only table with varpart do support online add column - * Add property so that table wo/ varsize column(s) still - * allocates varpart-ref, so that later online add column is possible - */ - bool getForceVarPart() const; - void setForceVarPart(bool); - - /** - * Check if any of column in bitmaps are disk columns - * returns bitmap of different columns - * bit 0 = atleast 1 pk column is set - * bit 1 = atleast 1 disk column set - * bit 2 = atleast 1 non disk column set - * passing NULL pointer will equal to bitmap with all columns set - */ - int checkColumns(const Uint32* bitmap, unsigned len_in_bytes) const; -#endif - - // these 2 are not de-doxygenated - - /** - * This method is not needed in normal usage. - * - * Compute aggregate data on table being defined. Required for - * aggregate methods such as getNoOfPrimaryKeys() to work before - * table has been created and retrieved via getTable(). - * - * May adjust some column flags. If no PK is so far marked as - * distribution key then all PK's will be marked. - * - * Returns 0 on success. Returns -1 and sets error if an - * inconsistency is detected. - */ - int aggregate(struct NdbError& error); - - /** - * This method is not needed in normal usage. - * - * Validate new table definition before create. Does aggregate() - * and additional checks. There may still be errors which are - * detected only by NDB kernel at create table. - * - * Create table and retrieve table do validate() automatically. - * - * Returns 0 on success. Returns -1 and sets error if an - * inconsistency is detected. - */ - int validate(struct NdbError& error); - - private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbDictionaryImpl; - friend class NdbTableImpl; - friend class NdbEventOperationImpl; -#endif - class NdbTableImpl & m_impl; - Table(NdbTableImpl&); - }; - - /** - * @class Index - * @brief Represents an index in an NDB Cluster - */ - class Index : public Object { - public: - - /** - * @name Getting Index properties - * @{ - */ - - /** - * Get the name of an index - */ - const char * getName() const; - - /** - * Get the name of the table being indexed - */ - const char * getTable() const; - - /** - * Get the number of columns in the index - */ - unsigned getNoOfColumns() const; - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get the number of columns in the index - * Depricated, use getNoOfColumns instead. - */ - int getNoOfIndexColumns() const; -#endif - - /** - * Get a specific column in the index - */ - const Column * getColumn(unsigned no) const ; - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get a specific column name in the index - * Depricated, use getColumn instead. - */ - const char * getIndexColumn(int no) const ; -#endif - - /** - * Represents type of index - */ - enum Type { - Undefined = 0, ///< Undefined object type (initial value) - UniqueHashIndex = 3, ///< Unique un-ordered hash index - ///< (only one currently supported) - OrderedIndex = 6 ///< Non-unique ordered index - }; - - /** - * Get index type of the index - */ - Type getType() const; - - /** - * Check if index is set to be stored on disk - * - * @return if true then logging id enabled - * - * @note Non-logged indexes are rebuilt at system restart. - * @note Ordered index does not currently support logging. - */ - bool getLogging() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - - /** @} *******************************************************************/ - - /** - * @name Index creation - * @{ - * - * These methods should normally not be used in an application as - * the result will not be visible from the MySQL Server - * - */ - - /** - * Constructor - * @param name Name of index - */ - Index(const char * name = ""); - virtual ~Index(); - - /** - * Set the name of an index - */ - int setName(const char * name); - - /** - * Define the name of the table to be indexed - */ - int setTable(const char * name); - - /** - * Add a column to the index definition - * Note that the order of columns will be in - * the order they are added (only matters for ordered indexes). - */ - int addColumn(const Column & c); - - /** - * Add a column name to the index definition - * Note that the order of indexes will be in - * the order they are added (only matters for ordered indexes). - */ - int addColumnName(const char * name); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Add a column name to the index definition - * Note that the order of indexes will be in - * the order they are added (only matters for ordered indexes). - * Depricated, use addColumnName instead. - */ - int addIndexColumn(const char * name); -#endif - - /** - * Add several column names to the index definition - * Note that the order of indexes will be in - * the order they are added (only matters for ordered indexes). - */ - int addColumnNames(unsigned noOfNames, const char ** names); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Add several column names to the index definition - * Note that the order of indexes will be in - * the order they are added (only matters for ordered indexes). - * Depricated, use addColumnNames instead. - */ - int addIndexColumns(int noOfNames, const char ** names); -#endif - - /** - * Set index type of the index - */ - void setType(Type type); - - /** - * Enable/Disable index storage on disk - * - * @param enable If enable is set to true, then logging becomes enabled - * - * @see NdbDictionary::Index::getLogging - */ - void setLogging(bool enable); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - void setStoredIndex(bool x) { setLogging(x); } - bool getStoredIndex() const { return getLogging(); } - - bool getTemporary(); - void setTemporary(bool); -#endif - - /** @} *******************************************************************/ - - private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbIndexImpl; - friend class NdbIndexStat; -#endif - class NdbIndexImpl & m_impl; - Index(NdbIndexImpl&); - }; - - /** - * @brief Represents an Event in NDB Cluster - * - */ - class Event : public Object { - public: - /** - * Specifies the type of database operations an Event listens to - */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** TableEvent must match 1 << TriggerEvent */ -#endif - enum TableEvent { - TE_INSERT =1<<0, ///< Insert event on table - TE_DELETE =1<<1, ///< Delete event on table - TE_UPDATE =1<<2, ///< Update event on table -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - TE_SCAN =1<<3, ///< Scan event on table - TE_FIRST_NON_DATA_EVENT =1<<4, -#endif - TE_DROP =1<<4, ///< Drop of table - TE_ALTER =1<<5, ///< Alter of table - TE_CREATE =1<<6, ///< Create of table - TE_GCP_COMPLETE=1<<7, ///< GCP is complete - TE_CLUSTER_FAILURE=1<<8, ///< Cluster is unavailable - TE_STOP =1<<9, ///< Stop of event operation - TE_NODE_FAILURE=1<<10, ///< Node failed - TE_SUBSCRIBE =1<<11, ///< Node subscribes - TE_UNSUBSCRIBE =1<<12, ///< Node unsubscribes - TE_ALL=0xFFFF ///< Any/all event on table (not relevant when - ///< events are received) - }; -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - enum _TableEvent { - _TE_INSERT=0, - _TE_DELETE=1, - _TE_UPDATE=2, - _TE_SCAN=3, - _TE_FIRST_NON_DATA_EVENT=4, - _TE_DROP=4, - _TE_ALTER=5, - _TE_CREATE=6, - _TE_GCP_COMPLETE=7, - _TE_CLUSTER_FAILURE=8, - _TE_STOP=9, - _TE_NODE_FAILURE=10, - _TE_SUBSCRIBE=11, - _TE_UNSUBSCRIBE=12, - _TE_NUL=13, // internal (e.g. INS o DEL within same GCI) - _TE_ACTIVE=14 // internal (node becomes active) - }; -#endif - /** - * Specifies the durability of an event - * (future version may supply other types) - */ - enum EventDurability { - ED_UNDEFINED -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 0 -#endif -#if 0 // not supported - ,ED_SESSION = 1, - // Only this API can use it - // and it's deleted after api has disconnected or ndb has restarted - - ED_TEMPORARY = 2 - // All API's can use it, - // But's its removed when ndb is restarted -#endif - ,ED_PERMANENT ///< All API's can use it. - ///< It's still defined after a cluster system restart -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 3 -#endif - }; - - /** - * Specifies reporting options for table events - */ - enum EventReport { - ER_UPDATED = 0, - ER_ALL = 1, // except not-updated blob inlines - ER_SUBSCRIBE = 2 - }; - - /** - * Constructor - * @param name Name of event - */ - Event(const char *name); - /** - * Constructor - * @param name Name of event - * @param table Reference retrieved from NdbDictionary - */ - Event(const char *name, const NdbDictionary::Table& table); - virtual ~Event(); - /** - * Set unique identifier for the event - */ - int setName(const char *name); - /** - * Get unique identifier for the event - */ - const char *getName() const; - /** - * Get table that the event is defined on - * - * @return pointer to table or NULL if no table has been defined - */ - const NdbDictionary::Table * getTable() const; - /** - * Define table on which events should be detected - * - * @note calling this method will default to detection - * of events on all columns. Calling subsequent - * addEventColumn calls will override this. - * - * @param table reference retrieved from NdbDictionary - */ - void setTable(const NdbDictionary::Table& table); - /** - * Set table for which events should be detected - * - * @note preferred way is using setTable(const NdbDictionary::Table&) - * or constructor with table object parameter - */ - int setTable(const char *tableName); - /** - * Get table name for events - * - * @return table name - */ - const char* getTableName() const; - /** - * Add type of event that should be detected - */ - void addTableEvent(const TableEvent te); - /** - * Check if a specific table event will be detected - */ - bool getTableEvent(const TableEvent te) const; - /** - * Set durability of the event - */ - void setDurability(EventDurability); - /** - * Get durability of the event - */ - EventDurability getDurability() const; - /** - * Set report option of the event - */ - void setReport(EventReport); - /** - * Get report option of the event - */ - EventReport getReport() const; -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void addColumn(const Column &c); -#endif - /** - * Add a column on which events should be detected - * - * @param attrId Column id - * - * @note errors will mot be detected until createEvent() is called - */ - void addEventColumn(unsigned attrId); - /** - * Add a column on which events should be detected - * - * @param columnName Column name - * - * @note errors will not be detected until createEvent() is called - */ - void addEventColumn(const char * columnName); - /** - * Add several columns on which events should be detected - * - * @param n Number of columns - * @param columnNames Column names - * - * @note errors will mot be detected until - * NdbDictionary::Dictionary::createEvent() is called - */ - void addEventColumns(int n, const char ** columnNames); - - /** - * Get no of columns defined in an Event - * - * @return Number of columns, -1 on error - */ - int getNoOfEventColumns() const; - - /** - * Get a specific column in the event - */ - const Column * getEventColumn(unsigned no) const; - - /** - * The merge events flag is false by default. Setting it true - * implies that events are merged in following ways: - * - * - for given NdbEventOperation associated with this event, - * events on same PK within same GCI are merged into single event - * - * - a blob table event is created for each blob attribute - * and blob events are handled as part of main table events - * - * - blob post/pre data from the blob part events can be read - * via NdbBlob methods as a single value - * - * NOTE: Currently this flag is not inherited by NdbEventOperation - * and must be set on NdbEventOperation explicitly. - */ - void mergeEvents(bool flag); - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void print(); -#endif - - private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbEventImpl; - friend class NdbEventOperationImpl; -#endif - class NdbEventImpl & m_impl; - Event(NdbEventImpl&); - }; - - struct AutoGrowSpecification { - Uint32 min_free; - Uint64 max_size; - Uint64 file_size; - const char * filename_pattern; - }; - - /** - * @class LogfileGroup - */ - class LogfileGroup : public Object { - public: - LogfileGroup(); - LogfileGroup(const LogfileGroup&); - virtual ~LogfileGroup(); - - void setName(const char * name); - const char* getName() const; - - void setUndoBufferSize(Uint32 sz); - Uint32 getUndoBufferSize() const; - - void setAutoGrowSpecification(const AutoGrowSpecification&); - const AutoGrowSpecification& getAutoGrowSpecification() const; - - Uint64 getUndoFreeWords() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - - private: - friend class NdbDictionaryImpl; - friend class NdbLogfileGroupImpl; - class NdbLogfileGroupImpl & m_impl; - LogfileGroup(NdbLogfileGroupImpl&); - }; - - /** - * @class Tablespace - */ - class Tablespace : public Object { - public: - Tablespace(); - Tablespace(const Tablespace&); - virtual ~Tablespace(); - - void setName(const char * name); - const char* getName() const; - - void setExtentSize(Uint32 sz); - Uint32 getExtentSize() const; - - void setAutoGrowSpecification(const AutoGrowSpecification&); - const AutoGrowSpecification& getAutoGrowSpecification() const; - - void setDefaultLogfileGroup(const char * name); - void setDefaultLogfileGroup(const class LogfileGroup&); - - const char * getDefaultLogfileGroup() const; - Uint32 getDefaultLogfileGroupId() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - - private: - friend class NdbTablespaceImpl; - class NdbTablespaceImpl & m_impl; - Tablespace(NdbTablespaceImpl&); - }; - - class Datafile : public Object { - public: - Datafile(); - Datafile(const Datafile&); - virtual ~Datafile(); - - void setPath(const char * name); - const char* getPath() const; - - void setSize(Uint64); - Uint64 getSize() const; - Uint64 getFree() const; - - int setTablespace(const char * name); - int setTablespace(const class Tablespace &); - const char * getTablespace() const; - void getTablespaceId(ObjectId * dst) const; - - void setNode(Uint32 nodeId); - Uint32 getNode() const; - - Uint32 getFileNo() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - - private: - friend class NdbDatafileImpl; - class NdbDatafileImpl & m_impl; - Datafile(NdbDatafileImpl&); - }; - - class Undofile : public Object { - public: - Undofile(); - Undofile(const Undofile&); - virtual ~Undofile(); - - void setPath(const char * path); - const char* getPath() const; - - void setSize(Uint64); - Uint64 getSize() const; - - void setLogfileGroup(const char * name); - void setLogfileGroup(const class LogfileGroup &); - const char * getLogfileGroup() const; - void getLogfileGroupId(ObjectId * dst) const; - - void setNode(Uint32 nodeId); - Uint32 getNode() const; - - Uint32 getFileNo() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - - /** - * Get object id - */ - virtual int getObjectId() const; - - private: - friend class NdbUndofileImpl; - class NdbUndofileImpl & m_impl; - Undofile(NdbUndofileImpl&); - }; - - /** - * @class Dictionary - * @brief Dictionary for defining and retreiving meta data - */ - class Dictionary { - public: - /** - * @class List - * @brief Structure for retrieving lists of object names - */ - struct List { - /** - * @struct Element - * @brief Object to be stored in an NdbDictionary::Dictionary::List - */ - struct Element { - unsigned id; ///< Id of object - Object::Type type; ///< Type of object - Object::State state; ///< State of object - Object::Store store; ///< How object is logged - Uint32 temp; ///< Temporary status of object - char * database; ///< In what database the object resides - char * schema; ///< What schema the object is defined in - char * name; ///< Name of object - Element() : - id(0), - type(Object::TypeUndefined), - state(Object::StateUndefined), - store(Object::StoreUndefined), - temp(NDB_TEMP_TAB_PERMANENT), - database(0), - schema(0), - name(0) { - } - }; - unsigned count; ///< Number of elements in list - Element * elements; ///< Pointer to array of elements - List() : count(0), elements(0) {} - ~List() { - if (elements != 0) { - for (unsigned i = 0; i < count; i++) { - delete[] elements[i].database; - delete[] elements[i].schema; - delete[] elements[i].name; - elements[i].name = 0; - } - delete[] elements; - count = 0; - elements = 0; - } - } - }; - - /** - * @name General - * @{ - */ - - /** - * Fetch list of all objects, optionally restricted to given type. - * - * @param list List of objects returned in the dictionary - * @param type Restrict returned list to only contain objects of - * this type - * - * @return -1 if error. - * - */ - int listObjects(List & list, Object::Type type = Object::TypeUndefined); - int listObjects(List & list, - Object::Type type = Object::TypeUndefined) const; - - /** - * Get the latest error - * - * @return Error object. - */ - const struct NdbError & getNdbError() const; - - /** @} *******************************************************************/ - - /** - * @name Retrieving references to Tables and Indexes - * @{ - */ - - /** - * Get table with given name, NULL if undefined - * @param name Name of table to get - * @return table if successful otherwise NULL. - */ - const Table * getTable(const char * name) const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Given main table, get blob table. - */ - const Table * getBlobTable(const Table *, const char * col_name); - const Table * getBlobTable(const Table *, Uint32 col_no); - - /* - * Save a table definition in dictionary cache - * @param table Object to put into cache - */ - void putTable(const Table * table); -#endif - - /** - * Get index with given name, NULL if undefined - * @param indexName Name of index to get. - * @param tableName Name of table that index belongs to. - * @return index if successful, otherwise 0. - */ - const Index * getIndex(const char * indexName, - const char * tableName) const; - - /** - * Fetch list of indexes of given table. - * @param list Reference to list where to store the listed indexes - * @param tableName Name of table that index belongs to. - * @return 0 if successful, otherwise -1 - */ - int listIndexes(List & list, const char * tableName); - int listIndexes(List & list, const char * tableName) const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Fetch list of indexes of given table. - * @param list Reference to list where to store the listed indexes - * @param table Reference to table that index belongs to. - * @return 0 if successful, otherwise -1 - */ - int listIndexes(List & list, const Table &table) const; -#endif - - /** @} *******************************************************************/ - /** - * @name Events - * @{ - */ - - /** - * Create event given defined Event instance - * @param event Event to create - * @return 0 if successful otherwise -1. - */ - int createEvent(const Event &event); - - /** - * Drop event with given name - * @param eventName Name of event to drop. - * @return 0 if successful otherwise -1. - */ - int dropEvent(const char * eventName); - - /** - * Get event with given name. - * @param eventName Name of event to get. - * @return an Event if successful, otherwise NULL. - */ - const Event * getEvent(const char * eventName); - - /** @} *******************************************************************/ - - /** - * @name Table creation - * @{ - * - * These methods should normally not be used in an application as - * the result will not be visible from the MySQL Server - */ - - /** - * Create defined table given defined Table instance - * @param table Table to create - * @return 0 if successful otherwise -1. - */ - int createTable(const Table &table); - - /** - * Drop table given retrieved Table instance - * @param table Table to drop - * @return 0 if successful otherwise -1. - */ - int dropTable(Table & table); - - /** - * Drop table given table name - * @param name Name of table to drop - * @return 0 if successful otherwise -1. - */ - int dropTable(const char * name); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Alter defined table given defined Table instance - * @param table Table to alter - * @return -2 (incompatible version)
- * -1 general error
- * 0 success - */ - int alterTable(const Table &table); - - /** - * Invalidate cached table object - * @param name Name of table to invalidate - */ - void invalidateTable(const char * name); -#endif - - /** - * Remove table from local cache - */ - void removeCachedTable(const char * table); - /** - * Remove index from local cache - */ - void removeCachedIndex(const char * index, const char * table); - - - /** @} *******************************************************************/ - /** - * @name Index creation - * @{ - * - * These methods should normally not be used in an application as - * the result will not be visible from the MySQL Server - * - */ - - /** - * Create index given defined Index instance - * @param index Index to create - * @return 0 if successful otherwise -1. - */ - int createIndex(const Index &index); - int createIndex(const Index &index, const Table &table); - - /** - * Drop index with given name - * @param indexName Name of index to drop. - * @param tableName Name of table that index belongs to. - * @return 0 if successful otherwise -1. - */ - int dropIndex(const char * indexName, - const char * tableName); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void removeCachedTable(const Table *table); - void removeCachedIndex(const Index *index); - void invalidateTable(const Table *table); - /** - * Invalidate cached index object - */ - void invalidateIndex(const char * indexName, - const char * tableName); - void invalidateIndex(const Index *index); - /** - * Force gcp and wait for gcp complete - */ - int forceGCPWait(); -#endif - - /** @} *******************************************************************/ - - /** @} *******************************************************************/ - /** - * @name Disk data objects - * @{ - */ - - int createLogfileGroup(const LogfileGroup &, ObjectId* = 0); - int dropLogfileGroup(const LogfileGroup&); - LogfileGroup getLogfileGroup(const char * name); - - int createTablespace(const Tablespace &, ObjectId* = 0); - int dropTablespace(const Tablespace&); - Tablespace getTablespace(const char * name); - Tablespace getTablespace(Uint32 tablespaceId); - - int createDatafile(const Datafile &, bool overwrite_existing = false, ObjectId* = 0); - int dropDatafile(const Datafile&); - Datafile getDatafile(Uint32 node, const char * path); - - int createUndofile(const Undofile &, bool overwrite_existing = false, ObjectId * = 0); - int dropUndofile(const Undofile&); - Undofile getUndofile(Uint32 node, const char * path); - - /** @} *******************************************************************/ - - protected: - Dictionary(Ndb & ndb); - ~Dictionary(); - - private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbDictionaryImpl; - friend class UtilTransactions; - friend class NdbBlob; -#endif - class NdbDictionaryImpl & m_impl; - Dictionary(NdbDictionaryImpl&); - const Table * getIndexTable(const char * indexName, - const char * tableName) const; - public: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - const Table * getTable(const char * name, void **data) const; - void set_local_table_data_size(unsigned sz); - - const Index * getIndexGlobal(const char * indexName, - const Table &ndbtab) const; - const Table * getTableGlobal(const char * tableName) const; - int alterTableGlobal(const Table &f, const Table &t); - int dropTableGlobal(const Table &ndbtab); - int dropIndexGlobal(const Index &index); - int removeIndexGlobal(const Index &ndbidx, int invalidate) const; - int removeTableGlobal(const Table &ndbtab, int invalidate) const; -#endif - }; -}; - -class NdbOut& operator <<(class NdbOut& out, const NdbDictionary::Column& col); - -#endif diff --git a/storage/ndb/include/ndbapi/NdbError.hpp b/storage/ndb/include/ndbapi/NdbError.hpp deleted file mode 100644 index b2132f12b72..00000000000 --- a/storage/ndb/include/ndbapi/NdbError.hpp +++ /dev/null @@ -1,250 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_ERROR_HPP -#define NDB_ERROR_HPP - -#include - -/** - * @struct NdbError - * @brief Contains error information - * - * A NdbError consists of five parts: - * -# Error status : Application impact - * -# Error classification : Logical error group - * -# Error code : Internal error code - * -# Error message : Context independent description of error - * -# Error details : Context dependent information - * (not always available) - * - * Error status is usually used for programming against errors. - * If more detailed error control is needed, it is possible to - * use the error classification. - * - * It is not recommended to write application programs dependent on - * specific error codes. - * - * The error messages and error details may - * change without notice. - * - * For example of use, see @ref ndbapi_retries.cpp. - */ -struct NdbError { - /** - * Status categorizes error codes into status values reflecting - * what the application should do when encountering errors - */ - enum Status { - /** - * The error code indicate success
- * (Includes classification: NdbError::NoError) - */ - Success = ndberror_st_success, - - /** - * The error code indicates a temporary error. - * The application should typically retry.
- * (Includes classifications: NdbError::InsufficientSpace, - * NdbError::TemporaryResourceError, NdbError::NodeRecoveryError, - * NdbError::OverloadError, NdbError::NodeShutdown - * and NdbError::TimeoutExpired.) - */ - TemporaryError = ndberror_st_temporary, - - /** - * The error code indicates a permanent error.
- * (Includes classifications: NdbError::PermanentError, - * NdbError::ApplicationError, NdbError::NoDataFound, - * NdbError::ConstraintViolation, NdbError::SchemaError, - * NdbError::UserDefinedError, NdbError::InternalError, and, - * NdbError::FunctionNotImplemented.) - */ - PermanentError = ndberror_st_permanent, - - /** - * The result/status is unknown.
- * (Includes classifications: NdbError::UnknownResultError, and - * NdbError::UnknownErrorCode.) - */ - UnknownResult = ndberror_st_unknown - }; - - /** - * Type of error - */ - enum Classification { - /** - * Success. No error occurred. - */ - NoError = ndberror_cl_none, - - /** - * Error in application program. - */ - ApplicationError = ndberror_cl_application, - - /** - * Read operation failed due to missing record. - */ - NoDataFound = ndberror_cl_no_data_found, - - /** - * E.g. inserting a tuple with a primary key already existing - * in the table. - */ - ConstraintViolation = ndberror_cl_constraint_violation, - - /** - * Error in creating table or usage of table. - */ - SchemaError = ndberror_cl_schema_error, - - /** - * Error occurred in interpreted program. - */ - UserDefinedError = ndberror_cl_user_defined, - - /** - * E.g. insufficient memory for data or indexes. - */ - InsufficientSpace = ndberror_cl_insufficient_space, - - /** - * E.g. too many active transactions. - */ - TemporaryResourceError = ndberror_cl_temporary_resource, - - /** - * Temporary failures which are probably inflicted by a node - * recovery in progress. Examples: information sent between - * application and NDB lost, distribution change. - */ - NodeRecoveryError = ndberror_cl_node_recovery, - - /** - * E.g. out of log file space. - */ - OverloadError = ndberror_cl_overload, - - /** - * Timeouts, often inflicted by deadlocks in NDB. - */ - TimeoutExpired = ndberror_cl_timeout_expired, - - /** - * Is is unknown whether the transaction was committed or not. - */ - UnknownResultError = ndberror_cl_unknown_result, - - /** - * A serious error in NDB has occurred. - */ - InternalError = ndberror_cl_internal_error, - - /** - * A function used is not yet implemented. - */ - FunctionNotImplemented = ndberror_cl_function_not_implemented, - - /** - * Error handler could not determine correct error code. - */ - UnknownErrorCode = ndberror_cl_unknown_error_code, - - /** - * Node shutdown - */ - NodeShutdown = ndberror_cl_node_shutdown, - - /** - * Schema object already exists - */ - SchemaObjectExists = ndberror_cl_schema_object_already_exists, - - /** - * Request sent to non master - */ - InternalTemporary = ndberror_cl_internal_temporary - }; - - /** - * Error status. - */ - Status status; - - /** - * Error type - */ - Classification classification; - - /** - * Error code - */ - int code; - - /** - * Mysql error code - */ - int mysql_code; - - /** - * Error message - */ - const char * message; - - /** - * The detailed description. This is extra information regarding the - * error which is not included in the error message. - * - * @note Is NULL when no details specified - */ - char * details; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NdbError(){ - status = UnknownResult; - classification = NoError; - code = 0; - mysql_code = 0; - message = 0; - details = 0; - } - NdbError(const ndberror_struct & ndberror){ - status = (NdbError::Status) ndberror.status; - classification = (NdbError::Classification) ndberror.classification; - code = ndberror.code; - mysql_code = ndberror.mysql_code; - message = ndberror.message; - details = ndberror.details; - } - operator ndberror_struct() const { - ndberror_struct ndberror; - ndberror.status = (ndberror_status_enum) status; - ndberror.classification = (ndberror_classification_enum) classification; - ndberror.code = code; - ndberror.mysql_code = mysql_code; - ndberror.message = message; - ndberror.details = details; - return ndberror; - } -#endif -}; - -class NdbOut& operator <<(class NdbOut&, const NdbError &); -class NdbOut& operator <<(class NdbOut&, const NdbError::Status&); -class NdbOut& operator <<(class NdbOut&, const NdbError::Classification&); -#endif diff --git a/storage/ndb/include/ndbapi/NdbEventOperation.hpp b/storage/ndb/include/ndbapi/NdbEventOperation.hpp deleted file mode 100644 index 5f41f30a38b..00000000000 --- a/storage/ndb/include/ndbapi/NdbEventOperation.hpp +++ /dev/null @@ -1,268 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB, 2010 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbEventOperation_H -#define NdbEventOperation_H - -class NdbGlobalEventBuffer; -class NdbEventOperationImpl; - -/** - * @class NdbEventOperation - * @brief Class of operations for getting change events from database. - * - * Brief description on how to work with events: - * - * - An event, represented by an NdbDictionary::Event, i created in the - * Database through - * NdbDictionary::Dictionary::createEvent() (note that this can be done - * by any application or thread and not necessarily by the "listener") - * - To listen to events, an NdbEventOperation object is instantiated by - * Ndb::createEventOperation() - * - execute() starts the event flow. Use Ndb::pollEvents() to wait - * for an event to occur. Use Ndb::nextEvent() to iterate - * through the events that have occured. - * - The instance is removed by Ndb::dropEventOperation() - * - * For more info see: - * @ref ndbapi_event.cpp - * - * Known limitations: - * - * - Maximum number of active NdbEventOperations are now set at compile time. - * Today 100. This will become a configuration parameter later. - * - Maximum number of NdbEventOperations tied to same event are maximum 16 - * per process. - * - * Known issues: - * - * - When several NdbEventOperation's are tied to the same event in the same - * process they will share the circular buffer. The BufferLength will then - * be the same for all and decided by the first NdbEventOperation - * instantiation. Just make sure to instantiate the "largest" one first. - * - Today all events INSERT/DELETE/UPDATE and all changed attributes are - * sent to the API, even if only specific attributes have been specified. - * These are however hidden from the user and only relevant data is shown - * after Ndb::nextEvent(). - * - "False" exits from Ndb::pollEvents() may occur and thus - * the subsequent Ndb::nextEvent() will return NULL, - * since there was no available data. Just do Ndb::pollEvents() again. - * - Event code does not check table schema version. Make sure to drop events - * after table is dropped. Will be fixed in later - * versions. - * - If a node failure has occured not all events will be recieved - * anymore. Drop NdbEventOperation and Create again after nodes are up - * again. Will be fixed in later versions. - * - * Test status: - * - * - Tests have been run on 1-node and 2-node systems - * - * Useful API programs: - * - * - ndb_select_all -d sys 'NDB$EVENTS_0' - * shows contents in the system table containing created events. - * - * @note this is an inteface to viewing events that is subject to change - */ -class NdbEventOperation { -public: - /** - * State of the NdbEventOperation object - */ - enum State { - EO_CREATED, ///< Created but execute() not called - EO_EXECUTING, ///< execute() called - EO_DROPPED, ///< Waiting to be deleted, Object unusable. - EO_ERROR ///< An error has occurred. Object unusable. - }; - /** - * Retrieve current state of the NdbEventOperation object - */ - State getState(); - /** - * See NdbDictionary::Event. Default is false. - */ - void mergeEvents(bool flag); - - /** - * Activates the NdbEventOperation to start receiving events. The - * changed attribute values may be retrieved after Ndb::nextEvent() - * has returned not NULL. The getValue() methods must be called - * prior to execute(). - * - * @return 0 if successful otherwise -1. - */ - int execute(); - - /** - * Defines a retrieval operation of an attribute value. - * The NDB API allocate memory for the NdbRecAttr object that - * will hold the returned attribute value. - * - * @note Note that it is the applications responsibility - * to allocate enough memory for aValue (if non-NULL). - * The buffer aValue supplied by the application must be - * aligned appropriately. The buffer is used directly - * (avoiding a copy penalty) only if it is aligned on a - * 4-byte boundary and the attribute size in bytes - * (i.e. NdbRecAttr::attrSize() times NdbRecAttr::arraySize() is - * a multiple of 4). - * - * @note There are two versions, getValue() and - * getPreValue() for retrieving the current and - * previous value repectively. - * - * @note This method does not fetch the attribute value from - * the database! The NdbRecAttr object returned by this method - * is not readable/printable before the - * execute() has been made and - * Ndb::nextEvent() has returned not NULL. - * If a specific attribute has not changed the corresponding - * NdbRecAttr will be in state UNDEFINED. This is checked by - * NdbRecAttr::isNULL() which then returns -1. - * - * @param anAttrName Attribute name - * @param aValue If this is non-NULL, then the attribute value - * will be returned in this parameter.
- * If NULL, then the attribute value will only - * be stored in the returned NdbRecAttr object. - * @return An NdbRecAttr object to hold the value of - * the attribute, or a NULL pointer - * (indicating error). - */ - NdbRecAttr *getValue(const char *anAttrName, char *aValue = 0); - /** - * See getValue(). - */ - NdbRecAttr *getPreValue(const char *anAttrName, char *aValue = 0); - - /** - * These methods replace getValue/getPreValue for blobs. Each - * method creates a blob handle NdbBlob. The handle supports only - * read operations. See NdbBlob. - */ - NdbBlob* getBlobHandle(const char *anAttrName); - NdbBlob* getPreBlobHandle(const char *anAttrName); - - int isOverrun() const; - - /** - * In the current implementation a nodefailiure may cause loss of events, - * in which case isConsistent() will return false - */ - bool isConsistent() const; - - /** - * Query for occured event type. - * - * @note Only valid after Ndb::nextEvent() has been called and - * returned a not NULL value - * - * @return type of event - */ - NdbDictionary::Event::TableEvent getEventType() const; - - /** - * Check if table name has changed, for event TE_ALTER - */ - bool tableNameChanged() const; - - /** - * Check if table frm has changed, for event TE_ALTER - */ - bool tableFrmChanged() const; - - /** - * Check if table fragmentation has changed, for event TE_ALTER - */ - bool tableFragmentationChanged() const; - - /** - * Check if table range partition list name has changed, for event TE_ALTER - */ - bool tableRangeListChanged() const; - - /** - * Retrieve the GCI of the latest retrieved event - * - * @return GCI number - */ - Uint64 getGCI() const; - - /** - * Retrieve the AnyValue of the latest retrieved event - * - * @return AnyValue - */ - Uint32 getAnyValue() const; - - /** - * Retrieve the complete GCI in the cluster (not necessarily - * associated with an event) - * - * @return GCI number - */ - Uint64 getLatestGCI() const; - - /** - * Get the latest error - * - * @return Error object. - */ - const struct NdbError & getNdbError() const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** these are subject to change at any time */ - const NdbDictionary::Table* getTable() const; - const NdbDictionary::Event *getEvent() const; - const NdbRecAttr *getFirstPkAttr() const; - const NdbRecAttr *getFirstPkPreAttr() const; - const NdbRecAttr *getFirstDataAttr() const; - const NdbRecAttr *getFirstDataPreAttr() const; - -// bool validateTable(NdbDictionary::Table &table) const; - - void setCustomData(void * data); - void * getCustomData() const; - - void clearError(); - int hasError() const; - - int getReqNodeId() const; - int getNdbdNodeId() const; -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /* - * - */ - void print(); -#endif - -private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbEventOperationImpl; - friend class NdbEventBuffer; -#endif - NdbEventOperation(Ndb *theNdb, const char* eventName); - ~NdbEventOperation(); - class NdbEventOperationImpl &m_impl; - NdbEventOperation(NdbEventOperationImpl& impl); -}; - -typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*); -#endif diff --git a/storage/ndb/include/ndbapi/NdbIndexOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp deleted file mode 100644 index 537f562ca18..00000000000 --- a/storage/ndb/include/ndbapi/NdbIndexOperation.hpp +++ /dev/null @@ -1,190 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbIndexOperation_H -#define NdbIndexOperation_H - -#include "NdbOperation.hpp" - -class Index; -class NdbResultSet; - -/** - * @class NdbIndexOperation - * @brief Class of index operations for use in transactions - */ -class NdbIndexOperation : public NdbOperation -{ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbTransaction; -#endif - -public: - /** - * @name Define Standard Operation - * @{ - */ - - /** insert is not allowed */ - int insertTuple(); - - /** - * Define the NdbIndexOperation to be a standard operation of type readTuple. - * When calling NdbTransaction::execute, this operation - * reads a tuple. - * - * @return 0 if successful otherwise -1. - */ - int readTuple(LockMode); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Define the NdbIndexOperation to be a standard operation of type readTuple. - * When calling NdbTransaction::execute, this operation - * reads a tuple. - * - * @return 0 if successful otherwise -1. - */ - int readTuple(); - - /** - * Define the NdbIndexOperation to be a standard operation of type - * readTupleExclusive. - * When calling NdbTransaction::execute, this operation - * read a tuple using an exclusive lock. - * - * @return 0 if successful otherwise -1. - */ - int readTupleExclusive(); - - /** - * Define the NdbIndexOperation to be a standard operation of type simpleRead. - * When calling NdbTransaction::execute, this operation - * reads an existing tuple (using shared read lock), - * but releases lock immediately after read. - * - * @note Using this operation twice in the same transaction - * may produce different results (e.g. if there is another - * transaction which updates the value between the - * simple reads). - * - * Note that simpleRead can read the value from any database node while - * standard read always read the value on the database node which is - * primary for the record. - * - * @return 0 if successful otherwise -1. - */ - int simpleRead(); - - /** - * Define the NdbOperation to be a standard operation of type committedRead. - * When calling NdbTransaction::execute, this operation - * read latest committed value of the record. - * - * This means that if another transaction is updating the - * record, then the current transaction will not wait. - * It will instead use the latest committed value of the - * record. - * - * @return 0 if successful otherwise -1. - */ - int dirtyRead(); - - int committedRead(); -#endif - - /** - * Define the NdbIndexOperation to be a standard operation of type - * updateTuple. - * - * When calling NdbTransaction::execute, this operation - * updates a tuple in the table. - * - * @return 0 if successful otherwise -1. - */ - int updateTuple(); - - /** - * Define the NdbIndexOperation to be a standard operation of type - * deleteTuple. - * - * When calling NdbTransaction::execute, this operation - * deletes a tuple. - * - * @return 0 if successful otherwise -1. - */ - int deleteTuple(); - - /** - * Get index object for this operation - */ - const NdbDictionary::Index * getIndex() const; - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Define the NdbIndexOperation to be a standard operation of type - * dirtyUpdate. - * - * When calling NdbTransaction::execute, this operation - * updates without two-phase commit. - * - * @return 0 if successful otherwise -1. - */ - int dirtyUpdate(); -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** @} *********************************************************************/ - /** - * @name Define Interpreted Program Operation - * @{ - */ - - /** - * Update a tuple using an interpreted program. - * - * @return 0 if successful otherwise -1. - */ - int interpretedUpdateTuple(); - - /** - * Delete a tuple using an interpreted program. - * - * @return 0 if successful otherwise -1. - */ - int interpretedDeleteTuple(); -#endif - - /** @} *********************************************************************/ - -private: - NdbIndexOperation(Ndb* aNdb); - ~NdbIndexOperation(); - - int receiveTCINDXREF(NdbApiSignal* aSignal); - - // Overloaded methods from NdbCursorOperation - int indxInit(const class NdbIndexImpl* anIndex, - const class NdbTableImpl* aTable, - NdbTransaction*); - - // Private attributes - const NdbIndexImpl* m_theIndex; - friend struct Ndb_free_list_t; -}; - -#endif diff --git a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp deleted file mode 100644 index 1d2c9da516e..00000000000 --- a/storage/ndb/include/ndbapi/NdbIndexScanOperation.hpp +++ /dev/null @@ -1,206 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbIndexScanOperation_H -#define NdbIndexScanOperation_H - -#include - -/** - * @class NdbIndexScanOperation - * @brief Class of scan operations for use to scan ordered index - */ -class NdbIndexScanOperation : public NdbScanOperation { -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbTransaction; - friend class NdbResultSet; - friend class NdbOperation; - friend class NdbScanOperation; - friend class NdbIndexStat; -#endif - -public: - /** - * readTuples using ordered index - * - * @param lock_mode Lock mode - * @param scan_flags see @ref ScanFlag - * @param parallel No of fragments to scan in parallel (0=max) - */ - virtual int readTuples(LockMode lock_mode = LM_Read, - Uint32 scan_flags = 0, - Uint32 parallel = 0, - Uint32 batch = 0); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * readTuples using ordered index - * - * @param lock_mode Lock mode - * @param batch No of rows to fetch from each fragment at a time - * @param parallel No of fragments to scan in parallel - * @param order_by Order result set in index order - * @param order_desc Order descending, ignored unless order_by - * @param read_range_no Enable reading of range no using @ref get_range_no - * @returns 0 for success and -1 for failure - * @see NdbScanOperation::readTuples - */ - inline int readTuples(LockMode lock_mode, - Uint32 batch, - Uint32 parallel, - bool order_by, - bool order_desc = false, - bool read_range_no = false, - bool keyinfo = false, - bool multi_range = false) { - Uint32 scan_flags = - (SF_OrderBy & -(Int32)order_by) | - (SF_Descending & -(Int32)order_desc) | - (SF_ReadRangeNo & -(Int32)read_range_no) | - (SF_KeyInfo & -(Int32)keyinfo) | - (SF_MultiRange & -(Int32)multi_range); - - return readTuples(lock_mode, scan_flags, parallel, batch); - } -#endif - - /** - * Type of ordered index key bound. The values (0-4) will not change - * and can be used explicitly (e.g. they could be computed). - */ - enum BoundType { - BoundLE = 0, ///< lower bound - BoundLT = 1, ///< lower bound, strict - BoundGE = 2, ///< upper bound - BoundGT = 3, ///< upper bound, strict - BoundEQ = 4 ///< equality - }; - - /** - * Define bound on index key in range scan. - * - * Each index key can have lower and/or upper bound. Setting the key - * equal to a value defines both upper and lower bounds. The bounds - * can be defined in any order. Conflicting definitions is an error. - * - * For equality, it is better to use BoundEQ instead of the equivalent - * pair of BoundLE and BoundGE. This is especially true when table - * partition key is an initial part of the index key. - * - * The sets of lower and upper bounds must be on initial sequences of - * index keys. All but possibly the last bound must be non-strict. - * So "a >= 2 and b > 3" is ok but "a > 2 and b >= 3" is not. - * - * The scan may currently return tuples for which the bounds are not - * satisfied. For example, "a <= 2 and b <= 3" scans the index up to - * (a=2, b=3) but also returns any (a=1, b=4). - * - * NULL is treated like a normal value which is less than any not-NULL - * value and equal to another NULL value. To compare against NULL use - * setBound with null pointer (0). - * - * An index stores also all-NULL keys. Doing index scan with empty - * bound set returns all table tuples. - * - * @param attr Attribute name, alternatively: - * @param type Type of bound - * @param value Pointer to bound value, 0 for NULL - * @return 0 if successful otherwise -1 - * - * @note See comment under equal() about data format and length. - */ -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int setBound(const char* attr, int type, const void* value, Uint32 len); -#endif - int setBound(const char* attr, int type, const void* value); - - /** - * Define bound on index key in range scan using index column id. - * See the other setBound() method for details. - */ -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int setBound(Uint32 anAttrId, int type, const void* aValue, Uint32 len); -#endif - int setBound(Uint32 anAttrId, int type, const void* aValue); - - /** - * Reset bounds and put operation in list that will be - * sent on next execute - */ - int reset_bounds(bool forceSend = false); - - /** - * Marks end of a bound, - * used when batching index reads (multiple ranges) - */ - int end_of_bound(Uint32 range_no); - - /** - * Return range no for current row - */ - int get_range_no(); - - /** - * Is current scan sorted - */ - bool getSorted() const { return m_ordered; } - - /** - * Is current scan sorted descending - */ - bool getDescending() const { return m_descending; } - -private: - NdbIndexScanOperation(Ndb* aNdb); - virtual ~NdbIndexScanOperation(); - - int setBound(const NdbColumnImpl*, int type, const void* aValue); - int insertBOUNDS(Uint32 * data, Uint32 sz); - Uint32 getKeyFromSCANTABREQ(Uint32* data, Uint32 size); - - virtual int equal_impl(const NdbColumnImpl*, const char*); - virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char*); - - void fix_get_values(); - int next_result_ordered(bool fetchAllowed, bool forceSend = false); - int send_next_scan_ordered(Uint32 idx); - int compare(Uint32 key, Uint32 cols, const NdbReceiver*, const NdbReceiver*); - - Uint32 m_sort_columns; - Uint32 m_this_bound_start; - Uint32 * m_first_bound_word; - - friend struct Ndb_free_list_t; -}; - -inline -int -NdbIndexScanOperation::setBound(const char* attr, int type, const void* value, - Uint32 len) -{ - return setBound(attr, type, value); -} - -inline -int -NdbIndexScanOperation::setBound(Uint32 anAttrId, int type, const void* value, - Uint32 len) -{ - return setBound(anAttrId, type, value); -} - -#endif diff --git a/storage/ndb/include/ndbapi/NdbIndexStat.hpp b/storage/ndb/include/ndbapi/NdbIndexStat.hpp deleted file mode 100644 index 9fd88c994c6..00000000000 --- a/storage/ndb/include/ndbapi/NdbIndexStat.hpp +++ /dev/null @@ -1,147 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbIndexStat_H -#define NdbIndexStat_H - -#include -#include -#include -class NdbIndexImpl; -class NdbIndexScanOperation; - -/* - * Statistics for an ordered index. - */ -class NdbIndexStat { -public: - NdbIndexStat(const NdbDictionary::Index* index); - ~NdbIndexStat(); - /* - * Allocate memory for cache. Argument is minimum number of stat - * entries and applies to lower and upper bounds separately. More - * entries may fit (keys have variable size). If not used, db is - * contacted always. - */ - int alloc_cache(Uint32 entries); - /* - * Flags for records_in_range. - */ - enum { - RR_UseDb = 1, // contact db - RR_NoUpdate = 2 // but do not update cache - }; - /* - * Estimate how many index records need to be scanned. The scan - * operation must be prepared with lock mode LM_CommittedRead and must - * have the desired bounds set. The routine may use local cache or - * may contact db by executing the operation. - * - * If returned count is zero then db was contacted and the count is - * exact. Otherwise the count is approximate. If cache is used then - * caller must provide estimated number of table rows. It will be - * multiplied by a percentage obtained from the cache (result zero is - * returned as 1). - */ - int records_in_range(const NdbDictionary::Index* index, - NdbIndexScanOperation* op, - Uint64 table_rows, - Uint64* count, - int flags); - /* - * Get latest error. - */ - const NdbError& getNdbError() const; - -private: - /* - * There are 2 areas: start keys and end keys. An area has pointers - * at beginning and entries at end. Pointers are sorted by key. - * - * A pointer contains entry offset and also entry timestamp. An entry - * contains the key and percentage of rows _not_ satisfying the bound - * i.e. less than start key or greater than end key. - * - * A key is an array of index key bounds. Each has type (0-4) in - * first word followed by data with AttributeHeader. - * - * Stat update comes as pair of start and end key and associated - * percentages. Stat query takes best match of start and end key from - * each area separately. Rows in range percentage is then computed by - * excluding the two i.e. as 100 - (start key pct + end key pct). - * - * TODO use more compact key format - */ - struct Pointer; - friend struct Pointer; - struct Entry; - friend struct Entry; - struct Area; - friend struct Area; - struct Pointer { - Uint16 m_pos; - Uint16 m_seq; - }; - struct Entry { - float m_pct; - Uint32 m_keylen; - }; - STATIC_CONST( EntrySize = sizeof(Entry) >> 2 ); - STATIC_CONST( PointerSize = sizeof(Pointer) >> 2 ); - struct Area { - Uint32* m_data; - Uint32 m_offset; - Uint32 m_free; - Uint16 m_entries; - Uint8 m_idir; - Uint8 pad1; - Area() {} - Pointer& get_pointer(unsigned i) const { - return *(Pointer*)&m_data[i]; - } - Entry& get_entry(unsigned i) const { - return *(Entry*)&m_data[get_pointer(i).m_pos]; - } - Uint32 get_pos(const Entry& e) const { - return (const Uint32*)&e - m_data; - } - unsigned get_firstpos() const { - return PointerSize * m_entries + m_free; - } - }; - const NdbIndexImpl& m_index; - Uint32 m_areasize; - Uint16 m_seq; - Area m_area[2]; - Uint32* m_cache; - NdbError m_error; -#ifdef VM_TRACE - void stat_verify(); -#endif - int stat_cmpkey(const Area& a, const Uint32* key1, Uint32 keylen1, - const Uint32* key2, Uint32 keylen2); - int stat_search(const Area& a, const Uint32* key, Uint32 keylen, - Uint32* idx, bool* match); - int stat_oldest(const Area& a); - int stat_delete(Area& a, Uint32 k); - int stat_update(const Uint32* key1, Uint32 keylen1, - const Uint32* key2, Uint32 keylen2, const float pct[2]); - int stat_select(const Uint32* key1, Uint32 keylen1, - const Uint32* key2, Uint32 keylen2, float pct[2]); - void set_error(int code); -}; - -#endif diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp deleted file mode 100644 index 9049c8cdd22..00000000000 --- a/storage/ndb/include/ndbapi/NdbOperation.hpp +++ /dev/null @@ -1,1356 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB, 2010 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbOperation_H -#define NdbOperation_H - -#include -#include "ndbapi_limits.h" -#include "NdbError.hpp" -#include "NdbReceiver.hpp" -#include "NdbDictionary.hpp" -#include "Ndb.hpp" - -class Ndb; -class NdbApiSignal; -class NdbRecAttr; -class NdbOperation; -class NdbTransaction; -class NdbColumnImpl; -class NdbBlob; - -/** - * @class NdbOperation - * @brief Class of operations for use in transactions. - */ -class NdbOperation -{ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbTransaction; - friend class NdbScanOperation; - friend class NdbScanReceiver; - friend class NdbScanFilter; - friend class NdbScanFilterImpl; - friend class NdbReceiver; - friend class NdbBlob; -#endif - -public: - /** - * @name Define Standard Operation Type - * @{ - */ - - /** - * Different access types (supported by sub-classes of NdbOperation) - */ - - enum Type { - PrimaryKeyAccess ///< Read, insert, update, or delete using pk -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 0 // NdbOperation -#endif - ,UniqueIndexAccess ///< Read, update, or delete using unique index -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 1 // NdbIndexOperation -#endif - ,TableScan ///< Full table scan -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 2 // NdbScanOperation -#endif - ,OrderedIndexScan ///< Ordered index scan -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 3 // NdbIndexScanOperation -#endif - }; - - /** - * Lock when performing read - */ - - enum LockMode { - LM_Read ///< Read with shared lock -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 0 -#endif - ,LM_Exclusive ///< Read with exclusive lock -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 1 -#endif - ,LM_CommittedRead ///< Ignore locks, read last committed value -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 2, - LM_Dirty = 2, -#endif - LM_SimpleRead = 3 ///< Read with shared lock, but release lock directly - }; - - /** - * How should transaction be handled if operation fails. - * - * If AO_IgnoreError, a failure in one operation will not abort the - * transaction, and NdbTransaction::execute() will return 0 (success). Use - * NdbOperation::getNdbError() to check for errors from individual - * operations. - * - * If AbortOnError, a failure in one operation will abort the transaction - * and cause NdbTransaction::execute() to return -1. - * - * Abort option can be set on execute(), or in the individual operation. - * Setting AO_IgnoreError or AbortOnError in execute() overrides the settings - * on individual operations. Setting DefaultAbortOption in execute() (the - * default) causes individual operation settings to be used. - * - * For READ, default is AO_IgnoreError - * DML, default is AbortOnError - * CommittedRead does _only_ support AO_IgnoreError - */ - enum AbortOption { - DefaultAbortOption = -1,///< Use default as specified by op-type - AbortOnError = 0, ///< Abort transaction on failed operation - AO_IgnoreError = 2 ///< Transaction continues on failed operation - }; - - /** - * Define the NdbOperation to be a standard operation of type insertTuple. - * When calling NdbTransaction::execute, this operation - * adds a new tuple to the table. - * - * @return 0 if successful otherwise -1. - */ - virtual int insertTuple(); - - /** - * Define the NdbOperation to be a standard operation of type updateTuple. - * When calling NdbTransaction::execute, this operation - * updates a tuple in the table. - * - * @return 0 if successful otherwise -1. - */ - virtual int updateTuple(); - - /** - * Define the NdbOperation to be a standard operation of type writeTuple. - * When calling NdbTransaction::execute, this operation - * writes a tuple to the table. - * If the tuple exists, it updates it, otherwise an insert takes place. - * - * @return 0 if successful otherwise -1. - */ - virtual int writeTuple(); - - /** - * Define the NdbOperation to be a standard operation of type deleteTuple. - * When calling NdbTransaction::execute, this operation - * delete a tuple. - * - * @return 0 if successful otherwise -1. - */ - virtual int deleteTuple(); - - /** - * Define the NdbOperation to be a standard operation of type readTuple. - * When calling NdbTransaction::execute, this operation - * reads a tuple. - * - * @return 0 if successful otherwise -1. - */ - virtual int readTuple(LockMode); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Define the NdbOperation to be a standard operation of type readTuple. - * When calling NdbTransaction::execute, this operation - * reads a tuple. - * - * @return 0 if successful otherwise -1. - */ - virtual int readTuple(); - - /** - * Define the NdbOperation to be a standard operation of type - * readTupleExclusive. - * When calling NdbTransaction::execute, this operation - * read a tuple using an exclusive lock. - * - * @return 0 if successful otherwise -1. - */ - virtual int readTupleExclusive(); - - /** - * Define the NdbOperation to be a standard operation of type - * simpleRead. - * When calling NdbTransaction::execute, this operation - * reads an existing tuple (using shared read lock), - * but releases lock immediately after read. - * - * @note Using this operation twice in the same transaction - * may produce different results (e.g. if there is another - * transaction which updates the value between the - * simple reads). - * - * Note that simpleRead can read the value from any database node while - * standard read always read the value on the database node which is - * primary for the record. - * - * @return 0 if successful otherwise -1. - */ - virtual int simpleRead(); - - /** - * Define the NdbOperation to be a standard operation of type committedRead. - * When calling NdbTransaction::execute, this operation - * read latest committed value of the record. - * - * This means that if another transaction is updating the - * record, then the current transaction will not wait. - * It will instead use the latest committed value of the - * record. - * dirtyRead is a deprecated name for committedRead - * - * @return 0 if successful otherwise -1. - * @depricated - */ - virtual int dirtyRead(); - - /** - * Define the NdbOperation to be a standard operation of type committedRead. - * When calling NdbTransaction::execute, this operation - * read latest committed value of the record. - * - * This means that if another transaction is updating the - * record, then the current transaction will not wait. - * It will instead use the latest committed value of the - * record. - * - * @return 0 if successful otherwise -1. - */ - virtual int committedRead(); - - /** - * Define the NdbOperation to be a standard operation of type dirtyUpdate. - * When calling NdbTransaction::execute, this operation - * updates without two-phase commit. - * - * @return 0 if successful otherwise -1. - */ - virtual int dirtyUpdate(); - - /** - * Define the NdbOperation to be a standard operation of type dirtyWrite. - * When calling NdbTransaction::execute, this operation - * writes without two-phase commit. - * - * @return 0 if successful otherwise -1. - */ - virtual int dirtyWrite(); -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** @} *********************************************************************/ - /** - * @name Define Interpreted Program Operation Type - * @{ - */ - - /** - * Update a tuple using an interpreted program. - * - * @return 0 if successful otherwise -1. - */ - virtual int interpretedUpdateTuple(); - - /** - * Delete a tuple using an interpreted program. - * - * @return 0 if successful otherwise -1. - */ - virtual int interpretedDeleteTuple(); -#endif - - /** @} *********************************************************************/ - - /** - * @name Specify Search Conditions - * @{ - */ - /** - * Define a search condition with equality. - * The condition is true if the attribute has the given value. - * To set search conditions on multiple attributes, - * use several equals (then all of them must be satisfied for the - * tuple to be selected). - * - * @note For insertTuple() it is also allowed to define the - * search key by using setValue(). - * - * @note There are 10 versions of equal() with - * slightly different parameters. - * - * @note If attribute has fixed size, value must include all bytes. - * In particular a Char must be native-blank padded. - * If attribute has variable size, value must start with - * 1 or 2 little-endian length bytes (2 if Long*). - * - * @param anAttrName Attribute name - * @param aValue Attribute value. - * @return -1 if unsuccessful. - */ -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int equal(const char* anAttrName, const char* aValue, Uint32 len); -#endif - int equal(const char* anAttrName, const char* aValue); - int equal(const char* anAttrName, Int32 aValue); - int equal(const char* anAttrName, Uint32 aValue); - int equal(const char* anAttrName, Int64 aValue); - int equal(const char* anAttrName, Uint64 aValue); -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int equal(Uint32 anAttrId, const char* aValue, Uint32 len); -#endif - int equal(Uint32 anAttrId, const char* aValue); - int equal(Uint32 anAttrId, Int32 aValue); - int equal(Uint32 anAttrId, Uint32 aValue); - int equal(Uint32 anAttrId, Int64 aValue); - int equal(Uint32 anAttrId, Uint64 aValue); - - /** @} *********************************************************************/ - /** - * @name Specify Attribute Actions for Operations - * @{ - */ - - /** - * Defines a retrieval operation of an attribute value. - * The NDB API allocate memory for the NdbRecAttr object that - * will hold the returned attribute value. - * - * @note Note that it is the applications responsibility - * to allocate enough memory for aValue (if non-NULL). - * The buffer aValue supplied by the application must be - * aligned appropriately. The buffer is used directly - * (avoiding a copy penalty) only if it is aligned on a - * 4-byte boundary and the attribute size in bytes - * (i.e. NdbRecAttr::attrSize times NdbRecAttr::arraySize is - * a multiple of 4). - * - * @note There are two versions of NdbOperation::getValue with - * slightly different parameters. - * - * @note This method does not fetch the attribute value from - * the database! The NdbRecAttr object returned by this method - * is not readable/printable before the - * transaction has been executed with NdbTransaction::execute. - * - * @param anAttrName Attribute name - * @param aValue If this is non-NULL, then the attribute value - * will be returned in this parameter.
- * If NULL, then the attribute value will only - * be stored in the returned NdbRecAttr object. - * @return An NdbRecAttr object to hold the value of - * the attribute, or a NULL pointer - * (indicating error). - */ - NdbRecAttr* getValue(const char* anAttrName, char* aValue = 0); - NdbRecAttr* getValue(Uint32 anAttrId, char* aValue = 0); - NdbRecAttr* getValue(const NdbDictionary::Column*, char* val = 0); - - /** - * Define an attribute to set or update in query. - * - * To set a NULL value, use the following construct: - * @code - * setValue("ATTR_NAME", (char*)NULL); - * @endcode - * - * There are a number of NdbOperation::setValue methods that - * take a certain type as input - * (pass by value rather than passing a pointer). - * As the interface is currently implemented it is the responsibility - * of the application programmer to use the correct types. - * - * The NDB API will however check that the application sends - * a correct length to the interface as given in the length parameter. - * The passing of char* as the value can contain any type or - * any type of array. - * If length is not provided or set to zero, - * then the API will assume that the pointer - * is correct and not bother with checking it. - * - * @note For insertTuple() the NDB API will automatically detect that - * it is supposed to use equal() instead. - * - * @note For insertTuple() it is not necessary to use - * setValue() on key attributes before other attributes. - * - * @note There are 14 versions of NdbOperation::setValue with - * slightly different parameters. - * - * @note See note under equal() about value format and length. - * - * @param anAttrName Name (or Id) of attribute. - * @param aValue Attribute value to set. - * @return -1 if unsuccessful. - */ -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int setValue(const char* anAttrName, const char* aValue, Uint32 len); -#endif - int setValue(const char* anAttrName, const char* aValue); - int setValue(const char* anAttrName, Int32 aValue); - int setValue(const char* anAttrName, Uint32 aValue); - int setValue(const char* anAttrName, Int64 aValue); - int setValue(const char* anAttrName, Uint64 aValue); - int setValue(const char* anAttrName, float aValue); - int setValue(const char* anAttrName, double aValue); -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - int setAnyValue(Uint32 aValue); -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int setValue(Uint32 anAttrId, const char* aValue, Uint32 len); -#endif - int setValue(Uint32 anAttrId, const char* aValue); - int setValue(Uint32 anAttrId, Int32 aValue); - int setValue(Uint32 anAttrId, Uint32 aValue); - int setValue(Uint32 anAttrId, Int64 aValue); - int setValue(Uint32 anAttrId, Uint64 aValue); - int setValue(Uint32 anAttrId, float aValue); - int setValue(Uint32 anAttrId, double aValue); - - /** - * This method replaces getValue/setValue for blobs. It creates - * a blob handle NdbBlob. A second call with same argument returns - * the previously created handle. The handle is linked to the - * operation and is maintained automatically. - * - * See NdbBlob for details. - */ - virtual NdbBlob* getBlobHandle(const char* anAttrName); - virtual NdbBlob* getBlobHandle(Uint32 anAttrId); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** @} *********************************************************************/ - /** - * @name Specify Interpreted Program Instructions - * @{ - */ - - /** - * Interpreted program instruction: Add a value to an attribute. - * - * @note Destroys the contents of registers 6 and 7. - * (The instruction uses these registers for its operation.) - * - * @note There are four versions of NdbOperation::incValue with - * slightly different parameters. - * - * @param anAttrName Attribute name. - * @param aValue Value to add. - * @return -1 if unsuccessful. - */ - int incValue(const char* anAttrName, Uint32 aValue); - int incValue(const char* anAttrName, Uint64 aValue); - int incValue(Uint32 anAttrId, Uint32 aValue); - int incValue(Uint32 anAttrId, Uint64 aValue); - - /** - * Interpreted program instruction: - * Subtract a value from an attribute in an interpreted operation. - * - * @note Destroys the contents of registers 6 and 7. - * (The instruction uses these registers for its operation.) - * - * @note There are four versions of NdbOperation::subValue with - * slightly different parameters. - * - * @param anAttrName Attribute name. - * @param aValue Value to subtract. - * @return -1 if unsuccessful. - */ - int subValue(const char* anAttrName, Uint32 aValue); - int subValue(const char* anAttrName, Uint64 aValue); - int subValue(Uint32 anAttrId, Uint32 aValue); - int subValue(Uint32 anAttrId, Uint64 aValue); - - /** - * Interpreted program instruction: - * Define a jump label in an interpreted operation. - * - * @note The labels are automatically numbered starting with 0. - * The parameter used by NdbOperation::def_label should - * match the automatic numbering to make it easier to - * debug the interpreted program. - * - * @param labelNumber Label number. - * @return -1 if unsuccessful. - */ - int def_label(int labelNumber); - - /** - * Interpreted program instruction: - * Add two registers into a third. - * - * @param RegSource1 First register. - * @param RegSource2 Second register. - * @param RegDest Destination register where the result will be stored. - * @return -1 if unsuccessful. - */ - int add_reg(Uint32 RegSource1, Uint32 RegSource2, Uint32 RegDest); - - /** - * Interpreted program instruction: - * Substract RegSource2 from RegSource1 and put the result in RegDest. - * - * @param RegSource1 First register. - * @param RegSource2 Second register. - * @param RegDest Destination register where the result will be stored. - * @return -1 if unsuccessful. - */ - int sub_reg(Uint32 RegSource1, Uint32 RegSource2, Uint32 RegDest); - - /** - * Interpreted program instruction: - * Load a constant into a register. - * - * @param RegDest Destination register. - * @param Constant Value to load. - * @return -1 if unsuccessful. - */ - int load_const_u32(Uint32 RegDest, Uint32 Constant); - int load_const_u64(Uint32 RegDest, Uint64 Constant); - - /** - * Interpreted program instruction: - * Load NULL value into a register. - * - * @param RegDest Destination register. - * @return -1 if unsuccessful. - */ - int load_const_null(Uint32 RegDest); - - /** - * Interpreted program instruction: - * Read an attribute into a register. - * - * @param anAttrName Attribute name. - * @param RegDest Destination register. - * @return -1 if unsuccessful. - */ - int read_attr(const char* anAttrName, Uint32 RegDest); - - /** - * Interpreted program instruction: - * Write an attribute from a register. - * - * @param anAttrName Attribute name. - * @param RegSource Source register. - * @return -1 if unsuccessful. - */ - int write_attr(const char* anAttrName, Uint32 RegSource); - - /** - * Interpreted program instruction: - * Read an attribute into a register. - * - * @param anAttrId the attribute id. - * @param RegDest the destination register. - * @return -1 if unsuccessful. - */ - int read_attr(Uint32 anAttrId, Uint32 RegDest); - - /** - * Interpreted program instruction: - * Write an attribute from a register. - * - * @param anAttrId the attribute id. - * @param RegSource the source register. - * @return -1 if unsuccessful. - */ - int write_attr(Uint32 anAttrId, Uint32 RegSource); - - /** - * Interpreted program instruction: - * Define a search condition. Last two letters in the function name - * describes the search condition. - * The condition compares RegR with RegL and therefore appears - * to be reversed. - * - * - ge RegR >= RegL - * - gt RegR > RegL - * - le RegR <= RegL - * - lt RegR < RegL - * - eq RegR = RegL - * - ne RegR <> RegL - * - * @param RegLvalue left value. - * @param RegRvalue right value. - * @param Label the label to jump to. - * @return -1 if unsuccessful. - */ - int branch_ge(Uint32 RegLvalue, Uint32 RegRvalue, Uint32 Label); - int branch_gt(Uint32 RegLvalue, Uint32 RegRvalue, Uint32 Label); - int branch_le(Uint32 RegLvalue, Uint32 RegRvalue, Uint32 Label); - int branch_lt(Uint32 RegLvalue, Uint32 RegRvalue, Uint32 Label); - int branch_eq(Uint32 RegLvalue, Uint32 RegRvalue, Uint32 Label); - int branch_ne(Uint32 RegLvalue, Uint32 RegRvalue, Uint32 Label); - - /** - * Interpreted program instruction: - * Jump to Label if RegLvalue is not NULL. - * - * @param RegLvalue the value to check. - * @param Label the label to jump to. - * @return -1 if unsuccessful. - */ - int branch_ne_null(Uint32 RegLvalue, Uint32 Label); - - /** - * Interpreted program instruction: - * Jump to Label if RegLvalue is equal to NULL. - * - * @param RegLvalue Value to check. - * @param Label Label to jump to. - * @return -1 if unsuccessful. - */ - int branch_eq_null(Uint32 RegLvalue, Uint32 Label); - - /** - * Interpreted program instruction: - * Jump to Label. - * - * @param Label Label to jump to. - * @return -1 if unsuccessful. - */ - int branch_label(Uint32 Label); - - /** - * Interpreted program instruction: branch after memcmp - * @param ColId Column to check - * @param Label Label to jump to - * @return -1 if unsuccessful - */ - int branch_col_eq_null(Uint32 ColId, Uint32 Label); - int branch_col_ne_null(Uint32 ColId, Uint32 Label); - - /** - * Interpreted program instruction: branch after memcmp - * @param ColId column to check - * @param val search value - * @param len length of search value - * @param nopad force non-padded comparison for a Char column - * @param Label label to jump to - * @return -1 if unsuccessful - */ - int branch_col_eq(Uint32 ColId, const void * val, Uint32 len, - bool nopad, Uint32 Label); - int branch_col_ne(Uint32 ColId, const void * val, Uint32 len, - bool nopad, Uint32 Label); - int branch_col_lt(Uint32 ColId, const void * val, Uint32 len, - bool nopad, Uint32 Label); - int branch_col_le(Uint32 ColId, const void * val, Uint32 len, - bool nopad, Uint32 Label); - int branch_col_gt(Uint32 ColId, const void * val, Uint32 len, - bool nopad, Uint32 Label); - int branch_col_ge(Uint32 ColId, const void * val, Uint32 len, - bool nopad, Uint32 Label); - /** - * The argument is always plain char, even if the field is varchar - * (changed in 5.0.22). - */ - int branch_col_like(Uint32 ColId, const void *, Uint32 len, - bool nopad, Uint32 Label); - int branch_col_notlike(Uint32 ColId, const void *, Uint32 len, - bool nopad, Uint32 Label); - - /** - * Interpreted program instruction: Exit with Ok - * - * For scanning transactions, - * end interpreted operation and return the row to the application. - * - * For non-scanning transactions, - * exit interpreted program. - * - * @return -1 if unsuccessful. - */ - int interpret_exit_ok(); - - /** - * Interpreted program instruction: Exit with Not Ok - * - * For scanning transactions, - * continue with the next row without returning the current row. - * - * For non-scanning transactions, - * abort the whole transaction. - * - * @note A method also exists without the error parameter. - * - * @param ErrorCode An error code given by the application programmer. - * @return -1 if unsuccessful. - */ - int interpret_exit_nok(Uint32 ErrorCode); - int interpret_exit_nok(); - - - /** - * Interpreted program instruction: - * - * For scanning transactions, - * return this row, but no more from this fragment - * - * For non-scanning transactions, - * abort the whole transaction. - * - * @return -1 if unsuccessful. - */ - int interpret_exit_last_row(); - - /** - * Interpreted program instruction: - * Define a subroutine in an interpreted operation. - * - * @param SubroutineNumber the subroutine number. - * @return -1 if unsuccessful. - */ - int def_subroutine(int SubroutineNumber); - - /** - * Interpreted program instruction: - * Call a subroutine. - * - * @param Subroutine the subroutine to call. - * @return -1 if unsuccessful. - */ - int call_sub(Uint32 Subroutine); - - /** - * Interpreted program instruction: - * End a subroutine. - * - * @return -1 if unsuccessful. - */ - int ret_sub(); -#endif - - /** @} *********************************************************************/ - - /** - * @name Error Handling - * @{ - */ - - /** - * Get the latest error code. - * - * @return error code. - */ - const NdbError & getNdbError() const; - - /** - * Get the method number where the error occured. - * - * @return method number where the error occured. - */ - int getNdbErrorLine(); - - /** - * Get table name of this operation. - */ - const char* getTableName() const; - - /** - * Get table object for this operation - */ - const NdbDictionary::Table * getTable() const; - - /** - * Get the type of access for this operation - */ - Type getType() const; - - /** @} *********************************************************************/ - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Type of operation - */ - enum OperationType { - ReadRequest = 0, ///< Read operation - UpdateRequest = 1, ///< Update Operation - InsertRequest = 2, ///< Insert Operation - DeleteRequest = 3, ///< Delete Operation - WriteRequest = 4, ///< Write Operation - ReadExclusive = 5, ///< Read exclusive - OpenScanRequest, ///< Scan Operation - OpenRangeScanRequest, ///< Range scan operation - NotDefined2, ///< Internal for debugging - NotDefined ///< Internal for debugging - }; -#endif - - /** - * Return lock mode for operation - */ - LockMode getLockMode() const { return theLockMode; } - - /** - * Get/set abort option - */ - AbortOption getAbortOption() const; - int setAbortOption(AbortOption); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - - /** - * Set/get partition key - */ - void setPartitionId(Uint32 id); - void setPartitionHash(Uint32 key); - void setPartitionHash(const Uint64 *, Uint32 len); - Uint32 getPartitionId() const; -#endif -protected: - int handle_distribution_key(const Uint64 *, Uint32 len); -protected: -/****************************************************************************** - * These are the methods used to create and delete the NdbOperation objects. - *****************************************************************************/ - - bool needReply(); -/****************************************************************************** - * These methods are service routines used by the other NDB API classes. - *****************************************************************************/ -//-------------------------------------------------------------- -// Initialise after allocating operation to a transaction -//-------------------------------------------------------------- - int init(const class NdbTableImpl*, NdbTransaction* aCon); - void initInterpreter(); - - NdbOperation(Ndb* aNdb, Type aType = PrimaryKeyAccess); - virtual ~NdbOperation(); - void next(NdbOperation*); // Set next pointer - NdbOperation* next(); // Get next pointer - -public: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NdbTransaction* getNdbTransaction(); - const NdbOperation* next() const; - const NdbRecAttr* getFirstRecAttr() const; -#endif -protected: - - enum OperationStatus - { - Init, - OperationDefined, - TupleKeyDefined, - GetValue, - SetValue, - ExecInterpretedValue, - SetValueInterpreted, - FinalGetValue, - SubroutineExec, - SubroutineEnd, - WaitResponse, - WaitCommitResponse, - Finished, - ReceiveFinished - }; - - OperationStatus Status(); // Read the status information - - void Status(OperationStatus); // Set the status information - - void NdbCon(NdbTransaction*); // Set reference to connection - // object. - - virtual void release(); // Release all operations - // connected to - // the operations object. - void setStartIndicator(); - -/****************************************************************************** - * The methods below is the execution part of the NdbOperation - * class. This is where the NDB signals are sent and received. The - * operation can send TC[KEY/INDX]REQ, [INDX]ATTRINFO. - * It can receive TC[KEY/INDX]CONF, TC[KEY/INDX]REF, [INDX]ATTRINFO. - * When an operation is received in its fulness or a refuse message - * was sent, then the connection object is told about this situation. - *****************************************************************************/ - - int doSend(int ProcessorId, Uint32 lastFlag); - virtual int prepareSend(Uint32 TC_ConnectPtr, - Uint64 TransactionId, - AbortOption); - virtual void setLastFlag(NdbApiSignal* signal, Uint32 lastFlag); - - int prepareSendInterpreted(); // Help routine to prepare* - - int receiveTCKEYREF(NdbApiSignal*); - - int checkMagicNumber(bool b = true); // Verify correct object - - int checkState_TransId(NdbApiSignal* aSignal); - -/****************************************************************************** - * These are support methods only used locally in this class. -******************************************************************************/ - - virtual int equal_impl(const NdbColumnImpl*,const char* aValue); - virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char* aValue = 0); - int setValue(const NdbColumnImpl* anAttrObject, const char* aValue); - NdbBlob* getBlobHandle(NdbTransaction* aCon, const NdbColumnImpl* anAttrObject); - int incValue(const NdbColumnImpl* anAttrObject, Uint32 aValue); - int incValue(const NdbColumnImpl* anAttrObject, Uint64 aValue); - int subValue(const NdbColumnImpl* anAttrObject, Uint32 aValue); - int subValue(const NdbColumnImpl* anAttrObject, Uint64 aValue); - int read_attr(const NdbColumnImpl* anAttrObject, Uint32 RegDest); - int write_attr(const NdbColumnImpl* anAttrObject, Uint32 RegSource); - int branch_reg_reg(Uint32 type, Uint32, Uint32, Uint32); - int branch_col(Uint32 type, Uint32, const void *, Uint32, bool, Uint32 Label); - int branch_col_null(Uint32 type, Uint32 col, Uint32 Label); - - // Handle ATTRINFO signals - int insertATTRINFO(Uint32 aData); - int insertATTRINFOloop(const Uint32* aDataPtr, Uint32 aLength); - - int insertKEYINFO(const char* aValue, - Uint32 aStartPosition, - Uint32 aKeyLenInByte); - void reorderKEYINFO(); - - virtual void setErrorCode(int aErrorCode); - virtual void setErrorCodeAbort(int aErrorCode); - - void handleFailedAI_ElemLen(); // When not all attribute data - // were received - - int incCheck(const NdbColumnImpl* anAttrObject); - int initial_interpreterCheck(); - int intermediate_interpreterCheck(); - int read_attrCheck(const NdbColumnImpl* anAttrObject); - int write_attrCheck(const NdbColumnImpl* anAttrObject); - int labelCheck(); - int insertCall(Uint32 aCall); - int insertBranch(Uint32 aBranch); - - Uint32 ptr2int() { return theReceiver.getId(); }; - - // get table or index key from prepared signals - int getKeyFromTCREQ(Uint32* data, Uint32 & size); - - virtual void setReadLockMode(LockMode lockMode); - -/****************************************************************************** - * These are the private variables that are defined in the operation objects. - *****************************************************************************/ - - Type m_type; - - NdbReceiver theReceiver; - - NdbError theError; // Errorcode - int theErrorLine; // Error line - - Ndb* theNdb; // Point back to the Ndb object. - NdbTransaction* theNdbCon; // Point back to the connection object. - NdbOperation* theNext; // Next pointer to operation. - - union { - NdbApiSignal* theTCREQ; // The TC[KEY/INDX]REQ signal object - NdbApiSignal* theSCAN_TABREQ; - }; - - NdbApiSignal* theFirstATTRINFO; // The first ATTRINFO signal object - NdbApiSignal* theCurrentATTRINFO; // The current ATTRINFO signal object - Uint32 theTotalCurrAI_Len; // The total number of attribute info - // words currently defined - Uint32 theAI_LenInCurrAI; // The number of words defined in the - // current ATTRINFO signal - NdbApiSignal* theLastKEYINFO; // The first KEYINFO signal object - - class NdbLabel* theFirstLabel; - class NdbLabel* theLastLabel; - class NdbBranch* theFirstBranch; - class NdbBranch* theLastBranch; - class NdbCall* theFirstCall; - class NdbCall* theLastCall; - class NdbSubroutine* theFirstSubroutine; - class NdbSubroutine* theLastSubroutine; - Uint32 theNoOfLabels; - Uint32 theNoOfSubroutines; - - Uint32* theKEYINFOptr; // Pointer to where to write KEYINFO - Uint32* theATTRINFOptr; // Pointer to where to write ATTRINFO - - const class NdbTableImpl* m_currentTable; // The current table - const class NdbTableImpl* m_accessTable; // Index table (== current for pk) - - // Set to TRUE when a tuple key attribute has been defined. - Uint32 theTupleKeyDefined[NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY][3]; - - Uint32 theTotalNrOfKeyWordInSignal; // The total number of - // keyword in signal. - - Uint32 theTupKeyLen; // Length of the tuple key in words - // left until done - Uint8 theNoOfTupKeyLeft; // The number of tuple key attributes - OperationType theOperationType; // Read Request, Update Req...... - - LockMode theLockMode; // Can be set to WRITE if read operation - OperationStatus theStatus; // The status of the operation. - - Uint32 theMagicNumber; // Magic number to verify that object - // is correct - Uint32 theScanInfo; // Scan info bits (take over flag etc) - Uint32 theDistributionKey; // Distribution Key size if used - - Uint32 theSubroutineSize; // Size of subroutines for interpretation - Uint32 theInitialReadSize; // Size of initial reads for interpretation - Uint32 theInterpretedSize; // Size of interpretation - Uint32 theFinalUpdateSize; // Size of final updates for interpretation - Uint32 theFinalReadSize; // Size of final reads for interpretation - - Uint8 theStartIndicator; // Indicator of whether start operation - Uint8 theCommitIndicator; // Indicator of whether commit operation - Uint8 theSimpleIndicator; // Indicator of whether simple operation - Uint8 theDirtyIndicator; // Indicator of whether dirty operation - Uint8 theInterpretIndicator; // Indicator of whether interpreted operation - Int8 theDistrKeyIndicator_; // Indicates whether distr. key is used - Uint8 m_no_disk_flag; - - Uint16 m_tcReqGSN; - Uint16 m_keyInfoGSN; - Uint16 m_attrInfoGSN; - - // Blobs in this operation - NdbBlob* theBlobList; - - /* - * Abort option per operation, used by blobs. - * See also comments on enum AbortOption. - */ - Int8 m_abortOption; - - /* - * For blob impl, option to not propagate error to trans level. - * Could be AO_IgnoreError variant if we want it public. - * Ignored unless AO_IgnoreError is also set. - */ - Int8 m_noErrorPropagation; - - friend struct Ndb_free_list_t; -}; - -#ifdef NDB_NO_DROPPED_SIGNAL -#include -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -inline -int -NdbOperation::checkMagicNumber(bool b) -{ - if (theMagicNumber != 0xABCDEF01){ -#ifdef NDB_NO_DROPPED_SIGNAL - if(b) abort(); -#endif - return -1; - } - return 0; -} - -inline -void -NdbOperation::setStartIndicator() -{ - theStartIndicator = 1; -} - -inline -int -NdbOperation::getNdbErrorLine() -{ - return theErrorLine; -} - -/****************************************************************************** -void next(NdbOperation* aNdbOperation); - -Parameters: aNdbOperation: Pointers to the NdbOperation object. -Remark: Set the next variable of the operation object. -******************************************************************************/ -inline -void -NdbOperation::next(NdbOperation* aNdbOperation) -{ - theNext = aNdbOperation; -} - -/****************************************************************************** -NdbOperation* next(); - -Return Value: Return next pointer to NdbOperation object. -Remark: Get the next variable of the operation object. -******************************************************************************/ -inline -NdbOperation* -NdbOperation::next() -{ - return theNext; -} - -inline -const NdbOperation* -NdbOperation::next() const -{ - return theNext; -} - -inline -const NdbRecAttr* -NdbOperation::getFirstRecAttr() const -{ - return theReceiver.theFirstRecAttr; -} - -/****************************************************************************** -Type getType() - -Return Value Return the Type. -Remark: Gets type of access. -******************************************************************************/ -inline -NdbOperation::Type -NdbOperation::getType() const -{ - return m_type; -} - -/****************************************************************************** -OperationStatus Status(); - -Return Value Return the OperationStatus. -Parameters: aStatus: The status. -Remark: Sets Operation status. -******************************************************************************/ -inline -NdbOperation::OperationStatus -NdbOperation::Status() -{ - return theStatus; -} - -/****************************************************************************** -void Status(OperationStatus aStatus); - -Parameters: aStatus: The status. -Remark: Sets Operation - status. -******************************************************************************/ -inline -void -NdbOperation::Status( OperationStatus aStatus ) -{ - theStatus = aStatus; -} - -/****************************************************************************** -void NdbCon(NdbTransaction* aNdbCon); - -Parameters: aNdbCon: Pointers to NdbTransaction object. -Remark: Set the reference to the connection in the operation object. -******************************************************************************/ -inline -void -NdbOperation::NdbCon(NdbTransaction* aNdbCon) -{ - theNdbCon = aNdbCon; -} - -inline -int -NdbOperation::equal(const char* anAttrName, const char* aValue, Uint32 len) -{ - return equal(anAttrName, aValue); -} - -inline -int -NdbOperation::equal(const char* anAttrName, Int32 aPar) -{ - return equal(anAttrName, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::equal(const char* anAttrName, Uint32 aPar) -{ - return equal(anAttrName, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::equal(const char* anAttrName, Int64 aPar) -{ - return equal(anAttrName, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::equal(const char* anAttrName, Uint64 aPar) -{ - return equal(anAttrName, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::equal(Uint32 anAttrId, const char* aValue, Uint32 len) -{ - return equal(anAttrId, aValue); -} - -inline -int -NdbOperation::equal(Uint32 anAttrId, Int32 aPar) -{ - return equal(anAttrId, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::equal(Uint32 anAttrId, Uint32 aPar) -{ - return equal(anAttrId, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::equal(Uint32 anAttrId, Int64 aPar) -{ - return equal(anAttrId, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::equal(Uint32 anAttrId, Uint64 aPar) -{ - return equal(anAttrId, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, const char* aValue, Uint32 len) -{ - return setValue(anAttrName, aValue); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, Int32 aPar) -{ - return setValue(anAttrName, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, Uint32 aPar) -{ - return setValue(anAttrName, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, Int64 aPar) -{ - return setValue(anAttrName, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, Uint64 aPar) -{ - return setValue(anAttrName, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, float aPar) -{ - return setValue(anAttrName, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::setValue(const char* anAttrName, double aPar) -{ - return setValue(anAttrName, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, const char* aValue, Uint32 len) -{ - return setValue(anAttrId, aValue); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, Int32 aPar) -{ - return setValue(anAttrId, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, Uint32 aPar) -{ - return setValue(anAttrId, (const char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, Int64 aPar) -{ - return setValue(anAttrId, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, Uint64 aPar) -{ - return setValue(anAttrId, (const char*)&aPar, (Uint32)8); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, float aPar) -{ - return setValue(anAttrId, (char*)&aPar, (Uint32)4); -} - -inline -int -NdbOperation::setValue(Uint32 anAttrId, double aPar) -{ - return setValue(anAttrId, (const char*)&aPar, (Uint32)8); -} - -#endif // doxygen - -#endif diff --git a/storage/ndb/include/ndbapi/NdbPool.hpp b/storage/ndb/include/ndbapi/NdbPool.hpp deleted file mode 100644 index 1baec1a6108..00000000000 --- a/storage/ndb/include/ndbapi/NdbPool.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2003, 2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -class Ndb; -class NdbPool; - -bool -create_instance(Ndb_cluster_connection* cc, - Uint32 max_ndb_objects, - Uint32 no_conn_obj, - Uint32 init_no_ndb_objects); - -void -drop_instance(); - -Ndb* -get_ndb_object(Uint32 &hint_id, - const char* a_catalog_name, - const char* a_schema_name); - -void -return_ndb_object(Ndb* returned_object, Uint32 id); - diff --git a/storage/ndb/include/ndbapi/NdbRecAttr.hpp b/storage/ndb/include/ndbapi/NdbRecAttr.hpp deleted file mode 100644 index 17c05eca54f..00000000000 --- a/storage/ndb/include/ndbapi/NdbRecAttr.hpp +++ /dev/null @@ -1,477 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbRecAttr_H -#define NdbRecAttr_H - -#include "NdbDictionary.hpp" -#include "Ndb.hpp" - -class NdbOperation; - -/** - * @class NdbRecAttr - * @brief Contains value of an attribute. - * - * NdbRecAttr objects are used to store the attribute value - * after retrieving the value from the NDB Cluster using the method - * NdbOperation::getValue. The objects are allocated by the NDB API. - * An example application program follows: - * - * @code - * MyRecAttr = MyOperation->getValue("ATTR2", NULL); - * if (MyRecAttr == NULL) goto error; - * - * if (MyTransaction->execute(Commit) == -1) goto error; - * - * ndbout << MyRecAttr->u_32_value(); - * @endcode - * For more examples, see - * @ref ndbapi_simple.cpp. - * - * @note The NdbRecAttr object is instantiated with its value when - * NdbTransaction::execute is called. Before this, the value is - * undefined. (NdbRecAttr::isNULL can be used to check - * if the value is defined or not.) - * This means that an NdbRecAttr object only has valid information - * between the time of calling NdbTransaction::execute and - * the time of Ndb::closeTransaction. - * The value of the null indicator is -1 until the - * NdbTransaction::execute method have been called. - * - * For simple types, there are methods which directly getting the value - * from the NdbRecAttr object. - * - * To get a reference to the value, there are two methods: - * NdbRecAttr::aRef (memory is released by NDB API) and - * NdbRecAttr::getAttributeObject (memory must be released - * by application program). - * The two methods may return different pointers. - * - * There are also methods to check attribute type, attribute size and - * array size. - * The method NdbRecAttr::arraySize returns the number of elements in the - * array (where each element is of size given by NdbRecAttr::attrSize). - * The NdbRecAttr::arraySize method is needed when reading variable-sized - * attributes. - * - * @note Variable-sized attributes are not yet supported. - */ -class NdbRecAttr -{ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbOperation; - friend class NdbIndexScanOperation; - friend class NdbEventOperationImpl; - friend class NdbReceiver; - friend class Ndb; - friend class NdbOut& operator<<(class NdbOut&, const class AttributeS&); -#endif - -public: - /** - * @name Getting meta information - * @{ - */ - const NdbDictionary::Column * getColumn() const; - - /** - * Get type of column - * @return Data type of the column - */ - NdbDictionary::Column::Type getType() const; - - /** - * Get attribute (element) size in bytes. - * - */ - Uint32 get_size_in_bytes() const { return m_size_in_bytes; } - - /** @} *********************************************************************/ - /** - * @name Getting stored value - * @{ - */ - - /** - * Check if attribute value is NULL. - * - * @return -1 = Not defined (Failure or - * NdbTransaction::execute not yet called).
- * 0 = Attribute value is defined, but not equal to NULL.
- * 1 = Attribute value is defined and equal to NULL. - */ - int isNULL() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return 64 bit long value. - */ - Int64 int64_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return 32 bit int value. - */ - Int32 int32_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Medium value. - */ - Int32 medium_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Short value. - */ - short short_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Char value. - */ - char char_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Int8 value. - */ - Int8 int8_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return 64 bit unsigned value. - */ - Uint64 u_64_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return 32 bit unsigned value. - */ - Uint32 u_32_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Unsigned medium value. - */ - Uint32 u_medium_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Unsigned short value. - */ - Uint16 u_short_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Unsigned char value. - */ - Uint8 u_char_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Uint8 value. - */ - Uint8 u_8_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Float value. - */ - float float_value() const; - - /** - * Get value stored in NdbRecAttr object. - * - * @return Double value. - */ - double double_value() const; - - /** @} *********************************************************************/ - /** - * @name Getting reference to stored value - * @{ - */ - - /** - * Get reference to attribute value. - * - * Returns a char*-pointer to the value. - * The pointer is aligned appropriately for the data type. - * The memory is released when Ndb::closeTransaction is executed - * for the transaction which read the value. - * - * @note The memory is released by NDB API. - * - * @note The pointer to the attribute value stored in an NdbRecAttr - * object (i.e. the pointer returned by aRef) is constant. - * This means that this method can be called anytime after - * NdbOperation::getValue has been called. - * - * @return Pointer to attribute value. - */ - char* aRef() const; - - /** @} *********************************************************************/ - - /** - * Make a copy of RecAttr object including all data. - * - * @note Copy needs to be deleted by application program. - */ - NdbRecAttr * clone() const; - - /** - * Destructor - * - * @note You should only delete RecAttr-copies, - * i.e. objects that has been cloned. - */ - ~NdbRecAttr(); - -public: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - const NdbRecAttr* next() const; -#endif -private: - - Uint32 attrId() const; /* Get attribute id */ - bool setNULL(); /* Set NULL indicator */ - void setUNDEFINED(); // - - bool receive_data(const Uint32*, Uint32); - - void release(); /* Release memory if allocated */ - void init(); /* Initialise object when allocated */ - - NdbRecAttr(Ndb*); - void next(NdbRecAttr* aRecAttr); - NdbRecAttr* next(); - - int setup(const class NdbDictionary::Column* col, char* aValue); - int setup(const class NdbColumnImpl* anAttrInfo, char* aValue); - /* Set up attributes and buffers */ - bool copyoutRequired() const; /* Need to copy data to application */ - void copyout(); /* Copy from storage to application */ - - Uint64 theStorage[4]; /* The data storage here if <= 32 bytes */ - Uint64* theStorageX; /* The data storage here if > 32 bytes */ - char* theValue; /* The data storage in the application */ - void* theRef; /* Pointer to one of above */ - - NdbRecAttr* theNext; /* Next pointer */ - Uint32 theAttrId; /* The attribute id */ - - Int32 m_size_in_bytes; - const NdbDictionary::Column* m_column; - - friend struct Ndb_free_list_t; -}; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -inline -NdbDictionary::Column::Type -NdbRecAttr::getType() const { - return m_column->getType(); -} - -inline -const NdbDictionary::Column * -NdbRecAttr::getColumn() const { - return m_column; -} - -inline -Int32 -NdbRecAttr::int32_value() const -{ - return *(Int32*)theRef; -} - -inline -short -NdbRecAttr::short_value() const -{ - return *(short*)theRef; -} - -inline -char -NdbRecAttr::char_value() const -{ - return *(char*)theRef; -} - -inline -Int8 -NdbRecAttr::int8_value() const -{ - return *(Int8*)theRef; -} - -inline -Uint32 -NdbRecAttr::u_32_value() const -{ - return *(Uint32*)theRef; -} - -inline -Uint16 -NdbRecAttr::u_short_value() const -{ - return *(Uint16*)theRef; -} - -inline -Uint8 -NdbRecAttr::u_char_value() const -{ - return *(Uint8*)theRef; -} - -inline -Uint8 -NdbRecAttr::u_8_value() const -{ - return *(Uint8*)theRef; -} - -inline -void -NdbRecAttr::release() -{ - if (theStorageX != 0) { - delete [] theStorageX; - theStorageX = 0; - } -} - -inline -void -NdbRecAttr::init() -{ - theStorageX = 0; - theValue = 0; - theRef = 0; - theNext = 0; - theAttrId = 0xFFFF; -} - -inline -void -NdbRecAttr::next(NdbRecAttr* aRecAttr) -{ - theNext = aRecAttr; -} - -inline -NdbRecAttr* -NdbRecAttr::next() -{ - return theNext; -} - -inline -const NdbRecAttr* -NdbRecAttr::next() const -{ - return theNext; -} - -inline -char* -NdbRecAttr::aRef() const -{ - return (char*)theRef; -} - -inline -bool -NdbRecAttr::copyoutRequired() const -{ - return theRef != theValue && theValue != 0; -} - -inline -Uint32 -NdbRecAttr::attrId() const -{ - return theAttrId; -} - -inline -bool -NdbRecAttr::setNULL() -{ - m_size_in_bytes= 0; - return true; -} - -inline -int -NdbRecAttr::isNULL() const -{ - return m_size_in_bytes == 0 ? 1 : (m_size_in_bytes > 0 ? 0 : -1); -} - -inline -void -NdbRecAttr::setUNDEFINED() -{ - m_size_in_bytes= -1; -} - -class NdbOut& operator <<(class NdbOut&, const NdbRecAttr &); - -class NdbRecordPrintFormat -{ -public: - NdbRecordPrintFormat(); - virtual ~NdbRecordPrintFormat(); - const char *lines_terminated_by; - const char *fields_terminated_by; - const char *start_array_enclosure; - const char *end_array_enclosure; - const char *fields_enclosed_by; - const char *fields_optionally_enclosed_by; - const char *hex_prefix; - const char *null_string; - int hex_format; -}; -NdbOut& -ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r, - const NdbRecordPrintFormat &f); - -#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -#endif - diff --git a/storage/ndb/include/ndbapi/NdbReceiver.hpp b/storage/ndb/include/ndbapi/NdbReceiver.hpp deleted file mode 100644 index 02cff25139a..00000000000 --- a/storage/ndb/include/ndbapi/NdbReceiver.hpp +++ /dev/null @@ -1,150 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbReceiver_H -#define NdbReceiver_H -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL // Not part of public interface - -#include - -class Ndb; -class NdbTransaction; - -class NdbReceiver -{ - friend class Ndb; - friend class NdbOperation; - friend class NdbScanOperation; - friend class NdbIndexOperation; - friend class NdbIndexScanOperation; - friend class NdbTransaction; -public: - enum ReceiverType { NDB_UNINITIALIZED, - NDB_OPERATION = 1, - NDB_SCANRECEIVER = 2, - NDB_INDEX_OPERATION = 3 - }; - - NdbReceiver(Ndb *aNdb); - int init(ReceiverType type, void* owner); - void release(); - ~NdbReceiver(); - - Uint32 getId(){ - return m_id; - } - - ReceiverType getType(){ - return m_type; - } - - inline NdbTransaction * getTransaction(); - void* getOwner(){ - return m_owner; - } - - bool checkMagicNumber() const; - - inline void next(NdbReceiver* next_arg) { m_next = next_arg;} - inline NdbReceiver* next() { return m_next; } - - void setErrorCode(int); -private: - Uint32 theMagicNumber; - Ndb* m_ndb; - Uint32 m_id; - Uint32 m_tcPtrI; - Uint32 m_hidden_count; - ReceiverType m_type; - void* m_owner; - NdbReceiver* m_next; - - /** - * At setup - */ - class NdbRecAttr * getValue(const class NdbColumnImpl*, char * user_dst_ptr); - int do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size, Uint32 range); - void prepareSend(); - void calculate_batch_size(Uint32, Uint32, Uint32&, Uint32&, Uint32&); - - int execKEYINFO20(Uint32 info, const Uint32* ptr, Uint32 len); - int execTRANSID_AI(const Uint32* ptr, Uint32 len); - int execTCOPCONF(Uint32 len); - int execSCANOPCONF(Uint32 tcPtrI, Uint32 len, Uint32 rows); - class NdbRecAttr* theFirstRecAttr; - class NdbRecAttr* theCurrentRecAttr; - class NdbRecAttr** m_rows; - - Uint32 m_list_index; // When using multiple - Uint32 m_current_row; - Uint32 m_result_rows; - Uint32 m_defined_rows; - - Uint32 m_expected_result_length; - Uint32 m_received_result_length; - - bool nextResult() const { return m_current_row < m_result_rows; } - NdbRecAttr* copyout(NdbReceiver&); -}; - -#ifdef NDB_NO_DROPPED_SIGNAL -#include -#endif - -inline -bool -NdbReceiver::checkMagicNumber() const { - bool retVal = (theMagicNumber == 0x11223344); -#ifdef NDB_NO_DROPPED_SIGNAL - if(!retVal){ - abort(); - } -#endif - return retVal; -} - -inline -void -NdbReceiver::prepareSend(){ - m_current_row = 0; - m_received_result_length = 0; - m_expected_result_length = 0; - theCurrentRecAttr = theFirstRecAttr; -} - -inline -int -NdbReceiver::execTCOPCONF(Uint32 len){ - Uint32 tmp = m_received_result_length; - m_expected_result_length = len; -#ifdef assert - assert(!(tmp && !len)); -#endif - return ((bool)len ^ (bool)tmp ? 0 : 1); -} - -inline -int -NdbReceiver::execSCANOPCONF(Uint32 tcPtrI, Uint32 len, Uint32 rows){ - m_tcPtrI = tcPtrI; - m_result_rows = rows; - Uint32 tmp = m_received_result_length; - m_expected_result_length = len; - return (tmp == len ? 1 : 0); -} - -#endif // DOXYGEN_SHOULD_SKIP_INTERNAL -#endif diff --git a/storage/ndb/include/ndbapi/NdbScanFilter.hpp b/storage/ndb/include/ndbapi/NdbScanFilter.hpp deleted file mode 100644 index 99a31143bc3..00000000000 --- a/storage/ndb/include/ndbapi/NdbScanFilter.hpp +++ /dev/null @@ -1,205 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_SCAN_FILTER_HPP -#define NDB_SCAN_FILTER_HPP - -#include -#include - -/** - * @class NdbScanFilter - * @brief A simple way to specify filters for scan operations - * - * @note This filter interface is under development and may change in - * the future! - * - */ -class NdbScanFilter { -public: - /** - * Constructor - * @param op The NdbOperation that the filter belongs to (is applied to). - * @param abort_on_too_large abort transaction on filter too large - * default: true - * @param max_size Maximum size of generated filter in words - */ - NdbScanFilter(class NdbOperation * op, - bool abort_on_too_large = true, - Uint32 max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS); - ~NdbScanFilter(); - - /** - * Group operators - */ - enum Group { - AND = 1, ///< (x1 AND x2 AND x3) - OR = 2, ///< (x1 OR x2 OR X3) - NAND = 3, ///< NOT (x1 AND x2 AND x3) - NOR = 4 ///< NOT (x1 OR x2 OR x3) - }; - - enum BinaryCondition - { - COND_LE = 0, ///< lower bound - COND_LT = 1, ///< lower bound, strict - COND_GE = 2, ///< upper bound - COND_GT = 3, ///< upper bound, strict - COND_EQ = 4, ///< equality - COND_NE = 5, ///< not equal - COND_LIKE = 6, ///< like - COND_NOT_LIKE = 7 ///< not like - }; - - /** - * @name Grouping - * @{ - */ - - /** - * Begin of compound. - * ®return 0 if successful, -1 otherwize - */ - int begin(Group group = AND); - - /** - * End of compound. - * ®return 0 if successful, -1 otherwize - */ - int end(); - - /** @} *********************************************************************/ - - /** - * Explanation missing - */ - int istrue(); - - /** - * Explanation missing - */ - int isfalse(); - - /** - * Compare column ColId with val - */ - int cmp(BinaryCondition cond, int ColId, const void *val, Uint32 len = 0); - - /** - * @name Integer Comparators - * @{ - */ - /** Compare column value with integer for equal - * ®return 0 if successful, -1 otherwize - */ - int eq(int ColId, Uint32 value) { return cmp(COND_EQ, ColId, &value, 4);} - - /** Compare column value with integer for not equal. - * ®return 0 if successful, -1 otherwize - */ - int ne(int ColId, Uint32 value) { return cmp(COND_NE, ColId, &value, 4);} - /** Compare column value with integer for less than. - * ®return 0 if successful, -1 otherwize - */ - int lt(int ColId, Uint32 value) { return cmp(COND_LT, ColId, &value, 4);} - /** Compare column value with integer for less than or equal. - * ®return 0 if successful, -1 otherwize - */ - int le(int ColId, Uint32 value) { return cmp(COND_LE, ColId, &value, 4);} - /** Compare column value with integer for greater than. - * ®return 0 if successful, -1 otherwize - */ - int gt(int ColId, Uint32 value) { return cmp(COND_GT, ColId, &value, 4);} - /** Compare column value with integer for greater than or equal. - * ®return 0 if successful, -1 otherwize - */ - int ge(int ColId, Uint32 value) { return cmp(COND_GE, ColId, &value, 4);} - - /** Compare column value with integer for equal. 64-bit. - * ®return 0 if successful, -1 otherwize - */ - int eq(int ColId, Uint64 value) { return cmp(COND_EQ, ColId, &value, 8);} - /** Compare column value with integer for not equal. 64-bit. - * ®return 0 if successful, -1 otherwize - */ - int ne(int ColId, Uint64 value) { return cmp(COND_NE, ColId, &value, 8);} - /** Compare column value with integer for less than. 64-bit. - * ®return 0 if successful, -1 otherwize - */ - int lt(int ColId, Uint64 value) { return cmp(COND_LT, ColId, &value, 8);} - /** Compare column value with integer for less than or equal. 64-bit. - * ®return 0 if successful, -1 otherwize - */ - int le(int ColId, Uint64 value) { return cmp(COND_LE, ColId, &value, 8);} - /** Compare column value with integer for greater than. 64-bit. - * ®return 0 if successful, -1 otherwize - */ - int gt(int ColId, Uint64 value) { return cmp(COND_GT, ColId, &value, 8);} - /** Compare column value with integer for greater than or equal. 64-bit. - * ®return 0 if successful, -1 otherwize - */ - int ge(int ColId, Uint64 value) { return cmp(COND_GE, ColId, &value, 8);} - /** @} *********************************************************************/ - - /** Check if column value is NULL */ - int isnull(int ColId); - /** Check if column value is non-NULL */ - int isnotnull(int ColId); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Like comparison operator. - * ®return 0 if successful, -1 otherwize - */ - int like(int ColId, const char * val, Uint32 len, bool nopad=false); - /** - * Notlike comparison operator. - * ®return 0 if successful, -1 otherwize - */ - int notlike(int ColId, const char * val, Uint32 len, bool nopad=false); - /** @} *********************************************************************/ -#endif - - enum Error { - FilterTooLarge = 4294 - }; - - /** - * Get filter level error. - * - * Most errors are set only on operation level, and they abort the - * transaction. The error FilterTooLarge is set on filter level and - * by default it propagates to operation level and also aborts the - * transaction. - * - * If option abort_on_too_large is set to false, then FilterTooLarge - * does not propagate. One can then either ignore this error (in - * which case no filtering is done) or try to define a new filter - * immediately. - */ - const class NdbError & getNdbError() const; -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NdbOperation * getNdbOperation(); -#endif -private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbScanFilterImpl; -#endif - class NdbScanFilterImpl & m_impl; - NdbScanFilter& operator=(const NdbScanFilter&); ///< Defined not implemented -}; - -#endif diff --git a/storage/ndb/include/ndbapi/NdbScanOperation.hpp b/storage/ndb/include/ndbapi/NdbScanOperation.hpp deleted file mode 100644 index d0bf585a018..00000000000 --- a/storage/ndb/include/ndbapi/NdbScanOperation.hpp +++ /dev/null @@ -1,318 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbScanOperation_H -#define NdbScanOperation_H - -#include - -class NdbBlob; -class NdbResultSet; -class PollGuard; - -/** - * @class NdbScanOperation - * @brief Class of scan operations for use in transactions. - */ -class NdbScanOperation : public NdbOperation { -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbTransaction; - friend class NdbResultSet; - friend class NdbOperation; - friend class NdbBlob; -#endif - -public: - /** - * Scan flags. OR-ed together and passed as second argument to - * readTuples. Note that SF_MultiRange has to be set if several - * ranges (bounds) are to be passed. - */ - enum ScanFlag { - SF_TupScan = (1 << 16), // scan TUP order - SF_DiskScan = (2 << 16), // scan in DISK order - SF_OrderBy = (1 << 24), // index scan in order - SF_Descending = (2 << 24), // index scan in descending order - SF_ReadRangeNo = (4 << 24), // enable @ref get_range_no - SF_MultiRange = (8 << 24), // scan is part of multi-range scan - SF_KeyInfo = 1 // request KeyInfo to be sent back - }; - - /** - * readTuples - * - * @param lock_mode Lock mode - * @param scan_flags see @ref ScanFlag - * @param parallel No of fragments to scan in parallel (0=max) - */ - virtual - int readTuples(LockMode lock_mode = LM_Read, - Uint32 scan_flags = 0, - Uint32 parallel = 0, - Uint32 batch = 0); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * readTuples - * @param lock_mode Lock mode - * @param batch No of rows to fetch from each fragment at a time - * @param parallel No of fragments to scan in parallell - * @note specifying 0 for batch and parallell means max performance - */ -#ifdef ndb_readtuples_impossible_overload - int readTuples(LockMode lock_mode = LM_Read, - Uint32 batch = 0, Uint32 parallel = 0, - bool keyinfo = false, bool multi_range = false); -#endif - - inline int readTuples(int parallell){ - return readTuples(LM_Read, 0, parallell); - } - - inline int readTuplesExclusive(int parallell = 0){ - return readTuples(LM_Exclusive, 0, parallell); - } -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NdbBlob* getBlobHandle(const char* anAttrName); - NdbBlob* getBlobHandle(Uint32 anAttrId); -#endif - - /** - * Get the next tuple in a scan transaction. - * - * After each call to nextResult - * the buffers and NdbRecAttr objects defined in - * NdbOperation::getValue are updated with values - * from the scanned tuple. - * - * @param fetchAllowed If set to false, then fetching is disabled - * @param forceSend If true send will occur immediately (see @ref secAdapt) - * - * The NDB API will contact the NDB Kernel for more tuples - * when necessary to do so unless you set the fetchAllowed - * to false. - * This will force NDB to process any records it - * already has in it's caches. When there are no more cached - * records it will return 2. You must then call nextResult - * with fetchAllowed = true in order to contact NDB for more - * records. - * - * fetchAllowed = false is useful when you want to update or - * delete all the records fetched in one transaction(This will save a - * lot of round trip time and make updates or deletes of scanned - * records a lot faster). - * While nextResult(false) - * returns 0 take over the record to another transaction. When - * nextResult(false) returns 2 you must execute and commit the other - * transaction. This will cause the locks to be transferred to the - * other transaction, updates or deletes will be made and then the - * locks will be released. - * After that, call nextResult(true) which will fetch new records and - * cache them in the NdbApi. - * - * @note If you don't take over the records to another transaction the - * locks on those records will be released the next time NDB Kernel - * is contacted for more records. - * - * @note Please contact for examples of efficient scan - * updates and deletes. - * - * @note See ndb/examples/ndbapi_scan_example for usage. - * - * @return - * - -1: if unsuccessful,
- * - 0: if another tuple was received, and
- * - 1: if there are no more tuples to scan. - * - 2: if there are no more cached records in NdbApi - */ - int nextResult(bool fetchAllowed = true, bool forceSend = false); - - /** - * Close scan - */ - void close(bool forceSend = false, bool releaseOp = false); - - /** - * Lock current tuple - * - * @return an NdbOperation or NULL. - */ - NdbOperation* lockCurrentTuple(); - /** - * Lock current tuple - * - * @param lockTrans Transaction that should perform the lock - * - * @return an NdbOperation or NULL. - */ - NdbOperation* lockCurrentTuple(NdbTransaction* lockTrans); - /** - * Update current tuple - * - * @return an NdbOperation or NULL. - */ - NdbOperation* updateCurrentTuple(); - /** - * Update current tuple - * - * @param updateTrans Transaction that should perform the update - * - * @return an NdbOperation or NULL. - */ - NdbOperation* updateCurrentTuple(NdbTransaction* updateTrans); - - /** - * Delete current tuple - * @return 0 on success or -1 on failure - */ - int deleteCurrentTuple(); - /** - * Delete current tuple - * - * @param takeOverTransaction Transaction that should perform the delete - * - * @return 0 on success or -1 on failure - */ - int deleteCurrentTuple(NdbTransaction* takeOverTransaction); - - /** - * Restart scan with exactly the same - * getValues and search conditions - */ - int restart(bool forceSend = false); - -protected: - NdbScanOperation(Ndb* aNdb, - NdbOperation::Type aType = NdbOperation::TableScan); - virtual ~NdbScanOperation(); - - int nextResultImpl(bool fetchAllowed = true, bool forceSend = false); - virtual void release(); - - int close_impl(class TransporterFacade*, bool forceSend, - PollGuard *poll_guard); - - // Overloaded methods from NdbCursorOperation - int executeCursor(int ProcessorId); - - // Overloaded private methods from NdbOperation - int init(const NdbTableImpl* tab, NdbTransaction*); - int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId); - int doSend(int ProcessorId); - virtual void setReadLockMode(LockMode lockMode); - - virtual void setErrorCode(int aErrorCode); - virtual void setErrorCodeAbort(int aErrorCode); - - NdbTransaction *m_transConnection; - - // Scan related variables - Uint32 theParallelism; - Uint32 m_keyInfo; - - int getFirstATTRINFOScan(); - int doSendScan(int ProcessorId); - int prepareSendScan(Uint32 TC_ConnectPtr, Uint64 TransactionId); - - int fix_receivers(Uint32 parallel); - void reset_receivers(Uint32 parallel, Uint32 ordered); - Uint32* m_array; // containing all arrays below - Uint32 m_allocated_receivers; - NdbReceiver** m_receivers; // All receivers - - Uint32* m_prepared_receivers; // These are to be sent - - /** - * owned by API/user thread - */ - Uint32 m_current_api_receiver; - Uint32 m_api_receivers_count; - NdbReceiver** m_api_receivers; // These are currently used by api - - /** - * owned by receiver thread - */ - Uint32 m_conf_receivers_count; // NOTE needs mutex to access - NdbReceiver** m_conf_receivers; // receive thread puts them here - - /** - * owned by receiver thread - */ - Uint32 m_sent_receivers_count; // NOTE needs mutex to access - NdbReceiver** m_sent_receivers; // receive thread puts them here - - int send_next_scan(Uint32 cnt, bool close); - void receiver_delivered(NdbReceiver*); - void receiver_completed(NdbReceiver*); - void execCLOSE_SCAN_REP(); - - int getKeyFromKEYINFO20(Uint32* data, Uint32 & size); - NdbOperation* takeOverScanOp(OperationType opType, NdbTransaction*); - - bool m_ordered; - bool m_descending; - Uint32 m_read_range_no; - NdbRecAttr *m_curr_row; // Pointer to last returned row - bool m_multi_range; // Mark if operation is part of multi-range scan - bool m_executed; // Marker if operation should be released at close -}; - -inline -NdbOperation* -NdbScanOperation::lockCurrentTuple(){ - return lockCurrentTuple(m_transConnection); -} - -inline -NdbOperation* -NdbScanOperation::lockCurrentTuple(NdbTransaction* takeOverTrans){ - return takeOverScanOp(NdbOperation::ReadRequest, - takeOverTrans); -} - -inline -NdbOperation* -NdbScanOperation::updateCurrentTuple(){ - return updateCurrentTuple(m_transConnection); -} - -inline -NdbOperation* -NdbScanOperation::updateCurrentTuple(NdbTransaction* takeOverTrans){ - return takeOverScanOp(NdbOperation::UpdateRequest, - takeOverTrans); -} - -inline -int -NdbScanOperation::deleteCurrentTuple(){ - return deleteCurrentTuple(m_transConnection); -} - -inline -int -NdbScanOperation::deleteCurrentTuple(NdbTransaction * takeOverTrans){ - void * res = takeOverScanOp(NdbOperation::DeleteRequest, - takeOverTrans); - if(res == 0) - return -1; - return 0; -} - -#endif diff --git a/storage/ndb/include/ndbapi/NdbTransaction.hpp b/storage/ndb/include/ndbapi/NdbTransaction.hpp deleted file mode 100644 index f3eea49d19d..00000000000 --- a/storage/ndb/include/ndbapi/NdbTransaction.hpp +++ /dev/null @@ -1,1059 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NdbTransaction_H -#define NdbTransaction_H - -#include -#include "NdbError.hpp" -#include "NdbDictionary.hpp" -#include "Ndb.hpp" -#include "NdbOperation.hpp" - -class NdbTransaction; -class NdbOperation; -class NdbScanOperation; -class NdbIndexScanOperation; -class NdbIndexOperation; -class NdbApiSignal; -class Ndb; -class NdbBlob; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -// to be documented later -/** - * NdbAsynchCallback functions are used when executing asynchronous - * transactions (using NdbTransaction::executeAsynchPrepare, or - * NdbTransaction::executeAsynch). - * The functions are called when the execute has finished. - * See @ref secAsync for more information. - */ -typedef void (* NdbAsynchCallback)(int, NdbTransaction*, void*); -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -enum AbortOption { - DefaultAbortOption = NdbOperation::DefaultAbortOption, - CommitIfFailFree = NdbOperation::AbortOnError, - TryCommit = NdbOperation::AbortOnError, - AbortOnError= NdbOperation::AbortOnError, - CommitAsMuchAsPossible = NdbOperation::AO_IgnoreError, - AO_IgnoreError= NdbOperation::AO_IgnoreError -}; -enum ExecType { - NoExecTypeDef = -1, - Prepare, - NoCommit, - Commit, - Rollback -}; -#endif - -/** - * @class NdbTransaction - * @brief Represents a transaction. - * - * A transaction (represented by an NdbTransaction object) - * belongs to an Ndb object and is created using - * Ndb::startTransaction(). - * A transaction consists of a list of operations - * (represented by NdbOperation, NdbScanOperation, NdbIndexOperation, - * and NdbIndexScanOperation objects). - * Each operation access exactly one table. - * - * After getting the NdbTransaction object, - * the first step is to get (allocate) an operation given the table name using - * one of the methods getNdbOperation(), getNdbScanOperation(), - * getNdbIndexOperation(), or getNdbIndexScanOperation(). - * Then the operation is defined. - * Several operations can be defined on the same - * NdbTransaction object, they will in that case be executed in parallell. - * When all operations are defined, the execute() - * method sends them to the NDB kernel for execution. - * - * The execute() method returns when the NDB kernel has - * completed execution of all operations defined before the call to - * execute(). All allocated operations should be properly defined - * before calling execute(). - * - * A call to execute() uses one out of three types of execution: - * -# NdbTransaction::NoCommit Executes operations without committing them. - * -# NdbTransaction::Commit Executes remaining operation and commits the - * complete transaction - * -# NdbTransaction::Rollback Rollbacks the entire transaction. - * - * execute() is equipped with an extra error handling parameter. - * There are two alternatives: - * -# NdbTransaction::AbortOnError (default). - * The transaction is aborted if there are any error during the - * execution - * -# NdbTransaction::AO_IgnoreError - * Continue execution of transaction even if operation fails - * - */ - -/* FUTURE IMPLEMENTATION: - * Later a prepare mode will be added when Ndb supports Prepare-To-Commit - * The NdbTransaction can deliver the Transaction Id of the transaction. - * After committing a transaction it is also possible to retrieve the - * global transaction checkpoint which the transaction was put in. - * - * FUTURE IMPLEMENTATION: - * There are three methods for acquiring the NdbOperation. - * -# The first method is the normal where a table name is - * provided. In this case the primary key must be supplied through - * the use of the NdbOperation::equal methods on the NdbOperation object. - * -# The second method provides the tuple identity of the tuple to be - * read. The tuple identity contains a table identifier and will - * thus be possible to use to ensure the attribute names provided - * are correct. If an object-oriented layer is put on top of NDB - * Cluster it is essential that all tables derived from a base - * class has the same attributes with the same type and the same - * name. Thus the application can use the tuple identity and need - * not known the table of the tuple. As long as the table is - * derived from the known base class everything is ok. - * It is not possible to provide any primary key since it is - * already supplied with the call to NdbTransaction::getNdbOperation. - * -# The third method is used when a scanned tuple is to be transferred to - * another transaction. In this case it is not possible to define the - * primary key since it came along from the scanned tuple. - * - */ - -class NdbTransaction -{ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class Ndb; - friend class NdbOperation; - friend class NdbScanOperation; - friend class NdbIndexOperation; - friend class NdbIndexScanOperation; - friend class NdbBlob; - friend class ha_ndbcluster; -#endif - -public: - - /** - * Execution type of transaction - */ - enum ExecType { -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - NoExecTypeDef= - ::NoExecTypeDef, ///< Erroneous type (Used for debugging only) - Prepare= ::Prepare, ///< Missing explanation -#endif - NoCommit= ///< Execute the transaction as far as it has - ///< been defined, but do not yet commit it -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ::NoCommit -#endif - ,Commit= ///< Execute and try to commit the transaction -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ::Commit -#endif - ,Rollback ///< Rollback transaction -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = ::Rollback -#endif - }; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Convenience method to fetch this transaction's Ndb* object - */ - Ndb * getNdb() { - return theNdb; - } -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get an NdbOperation for a table. - * Note that the operation has to be defined before it is executed. - * - * @note All operations within the same transaction need to - * be initialized with this method. - * - * @param aTableName The table name. - * @return Pointer to an NdbOperation object if successful, otherwise NULL. - */ - NdbOperation* getNdbOperation(const char* aTableName); -#endif - - /** - * Get an NdbOperation for a table. - * Note that the operation has to be defined before it is executed. - * - * @note All operations within the same transaction need to - * be initialized with this method. - * - * @param aTable - * A table object (fetched by NdbDictionary::Dictionary::getTable) - * @return Pointer to an NdbOperation object if successful, otherwise NULL. - */ - NdbOperation* getNdbOperation(const NdbDictionary::Table * aTable); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get an operation from NdbScanOperation idlelist and - * get the NdbTransaction object which - * was fetched by startTransaction pointing to this operation. - * - * @param aTableName The table name. - * @return pointer to an NdbOperation object if successful, otherwise NULL - */ - NdbScanOperation* getNdbScanOperation(const char* aTableName); -#endif - - /** - * Get an operation from NdbScanOperation idlelist and - * get the NdbTransaction object which - * was fetched by startTransaction pointing to this operation. - * - * @param aTable - * A table object (fetched by NdbDictionary::Dictionary::getTable) - * @return pointer to an NdbOperation object if successful, otherwise NULL - */ - NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * aTable); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get an operation from NdbIndexScanOperation idlelist and - * get the NdbTransaction object which - * was fetched by startTransaction pointing to this operation. - * - * @param anIndexName The index name. - * @param aTableName The table name. - * @return pointer to an NdbOperation object if successful, otherwise NULL - */ - NdbIndexScanOperation* getNdbIndexScanOperation(const char* anIndexName, - const char* aTableName); - NdbIndexScanOperation* getNdbIndexScanOperation - (const NdbDictionary::Index *anIndex, const NdbDictionary::Table *aTable); -#endif - - /** - * Get an operation from NdbIndexScanOperation idlelist and - * get the NdbTransaction object which - * was fetched by startTransaction pointing to this operation. - * - * @param anIndex - An index object (fetched by NdbDictionary::Dictionary::getIndex). - * @return pointer to an NdbOperation object if successful, otherwise NULL - */ - NdbIndexScanOperation* getNdbIndexScanOperation - (const NdbDictionary::Index *anIndex); - -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - /** - * Get an operation from NdbIndexOperation idlelist and - * get the NdbTransaction object that - * was fetched by startTransaction pointing to this operation. - * - * @param anIndexName The index name (as created by createIndex). - * @param aTableName The table name. - * @return Pointer to an NdbIndexOperation object if - * successful, otherwise NULL - */ - NdbIndexOperation* getNdbIndexOperation(const char* anIndexName, - const char* aTableName); - NdbIndexOperation* getNdbIndexOperation(const NdbDictionary::Index *anIndex, - const NdbDictionary::Table *aTable); -#endif - - /** - * Get an operation from NdbIndexOperation idlelist and - * get the NdbTransaction object that - * was fetched by startTransaction pointing to this operation. - * - * @param anIndex - * An index object (fetched by NdbDictionary::Dictionary::getIndex). - * @return Pointer to an NdbIndexOperation object if - * successful, otherwise NULL - */ - NdbIndexOperation* getNdbIndexOperation(const NdbDictionary::Index *anIndex); - - /** - * @name Execute Transaction - * @{ - */ - - /** - * Executes transaction. - * - * @param execType Execution type:
- * ExecType::NoCommit executes operations without - * committing them.
- * ExecType::Commit executes remaining operations and - * commits the complete transaction.
- * ExecType::Rollback rollbacks the entire transaction. - * @param abortOption Handling of error while excuting - * AbortOnError - Abort transaction if an operation fail - * IgnoreError - Accept failing operations - * @param force When operations should be sent to NDB Kernel. - * (See @ref secAdapt.) - * - 0: non-force, adaptive algorithm notices it - * (default); - * - 1: force send, adaptive algorithm notices it; - * - 2: non-force, adaptive algorithm do not notice - * the send. - * @return 0 if successful otherwise -1. - */ - int execute(ExecType execType, - NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, - int force = 0 ); -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - int execute(::ExecType execType, - ::AbortOption abortOption = ::DefaultAbortOption, - int force = 0 ) { - return execute ((ExecType)execType, - (NdbOperation::AbortOption)abortOption, - force); } -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - // to be documented later - /** - * Prepare an asynchronous transaction. - * - * See @ref secAsync for more information on - * how to use this method. - * - * @param execType Execution type:
- * ExecType::NoCommit executes operations without committing them.
- * ExecType::Commit executes remaining operations and commits the - * complete transaction.
- * ExecType::Rollback rollbacks the entire transaction. - * @param callback A callback method. This method gets - * called when the transaction has been - * executed. See @ref ndbapi_async1.cpp - * for an example on how to specify and use - * a callback method. - * @param anyObject A void pointer. This pointer is forwarded to the - * callback method and can be used to give - * the callback method some data to work on. - * It is up to the application programmer - * to decide on the use of this pointer. - * @param abortOption see @ref execute - */ - void executeAsynchPrepare(ExecType execType, - NdbAsynchCallback callback, - void* anyObject, - NdbOperation::AbortOption = NdbOperation::DefaultAbortOption); -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - void executeAsynchPrepare(::ExecType execType, - NdbAsynchCallback callback, - void* anyObject, - ::AbortOption ao = ::DefaultAbortOption) { - executeAsynchPrepare((ExecType)execType, callback, anyObject, - (NdbOperation::AbortOption)ao); } -#endif - - /** - * Prepare and send an asynchronous transaction. - * - * This method perform the same action as - * NdbTransaction::executeAsynchPrepare - * but also sends the operations to the NDB kernel. - * - * See NdbTransaction::executeAsynchPrepare for information - * about the parameters of this method. - * - * See @ref secAsync for more information on - * how to use this method. - */ - void executeAsynch(ExecType aTypeOfExec, - NdbAsynchCallback aCallback, - void* anyObject, - NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, - int forceSend= 0); -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED - void executeAsynch(::ExecType aTypeOfExec, - NdbAsynchCallback aCallback, - void* anyObject, - ::AbortOption abortOption= ::DefaultAbortOption, - int forceSend= 0) - { executeAsynch((ExecType)aTypeOfExec, aCallback, anyObject, - (NdbOperation::AbortOption)abortOption, forceSend); } -#endif -#endif - /** - * Refresh - * Update timeout counter of this transaction - * in the database. If you want to keep the transaction - * active in the database longer than the - * transaction abort timeout. - * @note It's not advised to take a lock on a record and keep it - * for a extended time since this can impact other transactions. - * - */ - int refresh(); - - /** - * Close transaction - * - * @note Equivalent to to calling Ndb::closeTransaction() - */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * @note It is not allowed to call NdbTransaction::close after sending the - * transaction asynchronously before the callback method has - * been called. - * (The application should keep track of the number of - * outstanding transactions and wait until all of them - * has completed before calling NdbTransaction::close). - * If the transaction is not committed it will be aborted. - */ -#endif - void close(); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** - * Restart transaction - * - * Once a transaction has been completed successfully - * it can be started again wo/ calling closeTransaction/startTransaction - * - * @note This method also releases completed operations - * - * @note This method does not close open scans, - * c.f. NdbScanOperation::close() - * - * @note This method can only be called _directly_ after commit - * and only if commit is successful - */ - int restart(); -#endif - - /** @} *********************************************************************/ - - /** - * @name Meta Information - * @{ - */ - - /** - * Get global checkpoint identity (GCI) of transaction. - * - * Each committed transaction belong to a GCI. - * The log for the committed transaction is saved on - * disk when a global checkpoint occurs. - * - * Whether or not the global checkpoint with this GCI has been - * saved on disk or not cannot be determined by this method. - * - * By comparing the GCI of a transaction with the value - * last GCI restored in a restarted NDB Cluster one can determine - * whether the transaction was restored or not. - * - * @note Global Checkpoint Identity is undefined for scan transactions - * (This is because no updates are performed in scan transactions.) - * - * @return GCI of transaction or -1 if GCI is not available. - * (Note that there has to be an NdbTransaction::execute call - * with Ndb::Commit for the GCI to be available.) - */ - int getGCI(); - - /** - * Get transaction identity. - * - * @return Transaction id. - */ - Uint64 getTransactionId(); - - /** - * The commit status of the transaction. - */ - enum CommitStatusType { - NotStarted, ///< Transaction not yet started - Started, ///< Missing explanation - Committed, ///< Transaction has been committed - Aborted, ///< Transaction has been aborted - NeedAbort ///< Missing explanation - }; - - /** - * Get the commit status of the transaction. - * - * @return The commit status of the transaction - */ - CommitStatusType commitStatus(); - - /** @} *********************************************************************/ - - /** - * @name Error Handling - * @{ - */ - - /** - * Get error object with information about the latest error. - * - * @return An error object with information about the latest error. - */ - const NdbError & getNdbError() const; - - /** - * Get the latest NdbOperation which had an error. - * This method is used on the NdbTransaction object to find the - * NdbOperation causing an error. - * To find more information about the - * actual error, use method NdbOperation::getNdbError() - * on the returned NdbOperation object. - * - * @return The NdbOperation causing the latest error. - */ - NdbOperation* getNdbErrorOperation(); - - /** - * Get the method number where the latest error occured. - * - * @return Line number where latest error occured. - */ - int getNdbErrorLine(); - - /** - * Get completed (i.e. executed) operations of a transaction - * - * This method should only be used after a transaction - * has been executed. - * - NdbTransaction::getNextCompletedOperation(NULL) returns the - * first NdbOperation object. - * - NdbTransaction::getNextCompletedOperation(op) returns the - * NdbOperation object defined after the NdbOperation "op". - * - * This method is typically used to fetch all NdbOperation:s of - * a transaction to check for errors (use NdbOperation::getNdbError - * to fetch the NdbError object of an NdbOperation). - * - * @note This method should only be used after the transaction has been - * executed and before the transaction has been closed. - * - * @param op Operation, NULL means get first operation - * @return Operation "after" op - */ - const NdbOperation * getNextCompletedOperation(const NdbOperation * op)const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - const NdbOperation* getFirstDefinedOperation()const{return theFirstOpInList;} - const NdbOperation* getLastDefinedOperation()const{return theLastOpInList;} - - /** @} *********************************************************************/ - - /** - * Execute the transaction in NoCommit mode if there are any not-yet - * executed blob part operations of given types. Otherwise do - * nothing. The flags argument is bitwise OR of (1 << optype) where - * optype comes from NdbOperation::OperationType. Only the basic PK - * ops are used (read, insert, update, delete). - */ - int executePendingBlobOps(Uint8 flags = 0xFF); - - /** - * Get nodeId of TC for this transaction - */ - Uint32 getConnectedNodeId(); // Get Connected node id -#endif - -private: - /** - * Release completed operations - */ - void releaseCompletedOperations(); - - typedef Uint64 TimeMillis_t; - /************************************************************************** - * These methods are service methods to other classes in the NDBAPI. * - **************************************************************************/ - - /************************************************************************** - * These are the create and delete methods of this class. * - **************************************************************************/ - NdbTransaction(Ndb* aNdb); - ~NdbTransaction(); - - int init(); // Initialize connection object for new transaction - - int executeNoBlobs(ExecType execType, - NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, - int force = 0 ); - - /** - * Set Connected node id - * and sequence no - */ - void setConnectedNodeId( Uint32 nodeId, Uint32 sequence); - - void setMyBlockReference( int ); // Set my block refrerence - void setTC_ConnectPtr( Uint32 ); // Sets TC Connect pointer - int getTC_ConnectPtr(); // Gets TC Connect pointer - void setBuddyConPtr(Uint32); // Sets Buddy Con Ptr - Uint32 getBuddyConPtr(); // Gets Buddy Con Ptr - NdbTransaction* next(); // Returns the next pointer - void next(NdbTransaction*); // Sets the next pointer - - enum ConStatusType { - NotConnected, - Connecting, - Connected, - DisConnecting, - ConnectFailure - }; - ConStatusType Status(); // Read the status information - void Status(ConStatusType); // Set the status information - - Uint32 get_send_size(); // Get size to send - void set_send_size(Uint32); // Set size to send; - - int receiveDIHNDBTAMPER(NdbApiSignal* anApiSignal); - int receiveTCSEIZECONF(NdbApiSignal* anApiSignal); - int receiveTCSEIZEREF(NdbApiSignal* anApiSignal); - int receiveTCRELEASECONF(NdbApiSignal* anApiSignal); - int receiveTCRELEASEREF(NdbApiSignal* anApiSignal); - int receiveTC_COMMITCONF(const class TcCommitConf *); - int receiveTCKEYCONF(const class TcKeyConf *, Uint32 aDataLength); - int receiveTCKEY_FAILCONF(const class TcKeyFailConf *); - int receiveTCKEY_FAILREF(NdbApiSignal* anApiSignal); - int receiveTC_COMMITREF(NdbApiSignal* anApiSignal); - int receiveTCROLLBACKCONF(NdbApiSignal* anApiSignal); // Rec TCPREPARECONF ? - int receiveTCROLLBACKREF(NdbApiSignal* anApiSignal); // Rec TCPREPAREREF ? - int receiveTCROLLBACKREP(NdbApiSignal* anApiSignal); - int receiveTCINDXCONF(const class TcIndxConf *, Uint32 aDataLength); - int receiveTCINDXREF(NdbApiSignal*); - int receiveSCAN_TABREF(NdbApiSignal*); - int receiveSCAN_TABCONF(NdbApiSignal*, const Uint32*, Uint32 len); - - int doSend(); // Send all operations - int sendROLLBACK(); // Send of an ROLLBACK - int sendTC_HBREP(); // Send a TCHBREP signal; - int sendCOMMIT(); // Send a TC_COMMITREQ signal; - void setGCI(int GCI); // Set the global checkpoint identity - - int OpCompleteFailure(NdbOperation*); - int OpCompleteSuccess(); - void CompletedOperations(); // Move active ops to list of completed - - void OpSent(); // Operation Sent with success - - // Free connection related resources and close transaction - void release(); - - // Release all operations in connection - void releaseOperations(); - - // Release all cursor operations in connection - void releaseOps(NdbOperation*); - void releaseScanOperations(NdbIndexScanOperation*); - bool releaseScanOperation(NdbIndexScanOperation** listhead, - NdbIndexScanOperation** listtail, - NdbIndexScanOperation* op); - void releaseExecutedScanOperation(NdbIndexScanOperation*); - - // Set the transaction identity of the transaction - void setTransactionId(Uint64 aTransactionId); - - // Indicate something went wrong in the definition phase - void setErrorCode(int anErrorCode); - - // Indicate something went wrong in the definition phase - void setOperationErrorCode(int anErrorCode); - - // Indicate something went wrong in the definition phase - void setOperationErrorCodeAbort(int anErrorCode, int abortOption = -1); - - int checkMagicNumber(); // Verify correct object - NdbOperation* getNdbOperation(const class NdbTableImpl* aTable, - NdbOperation* aNextOp = 0); - NdbIndexScanOperation* getNdbScanOperation(const class NdbTableImpl* aTable); - NdbIndexOperation* getNdbIndexOperation(const class NdbIndexImpl* anIndex, - const class NdbTableImpl* aTable, - NdbOperation* aNextOp = 0); - NdbIndexScanOperation* getNdbIndexScanOperation(const NdbIndexImpl* index, - const NdbTableImpl* table); - - void handleExecuteCompletion(); - - /**************************************************************************** - * These are the private variables of this class. - ****************************************************************************/ - - Uint32 ptr2int(); - Uint32 theId; - - // Keeps track of what the send method should do. - enum SendStatusType { - NotInit, - InitState, - sendOperations, - sendCompleted, - sendCOMMITstate, - sendABORT, - sendABORTfail, - sendTC_ROLLBACK, - sendTC_COMMIT, - sendTC_OP - }; - SendStatusType theSendStatus; - NdbAsynchCallback theCallbackFunction; // Pointer to the callback function - void* theCallbackObject; // The callback object pointer - Uint32 theTransArrayIndex; // Current index in a transaction - // array for this object - TimeMillis_t theStartTransTime; // Start time of the transaction - - NdbError theError; // Errorcode on transaction - int theErrorLine; // Method number of last error in NdbOperation - NdbOperation* theErrorOperation; // The NdbOperation where the error occurred - - Ndb* theNdb; // Pointer to Ndb object - NdbTransaction* theNext; // Next pointer. Used in idle list. - - NdbOperation* theFirstOpInList; // First operation in defining list. - NdbOperation* theLastOpInList; // Last operation in defining list. - - NdbOperation* theFirstExecOpInList; // First executing operation in list - NdbOperation* theLastExecOpInList; // Last executing operation in list. - - - NdbOperation* theCompletedFirstOp; // First & last operation in completed - NdbOperation* theCompletedLastOp; // operation list. - - Uint32 theNoOfOpSent; // How many operations have been sent - Uint32 theNoOfOpCompleted; // How many operations have completed - Uint32 theMyRef; // Our block reference - Uint32 theTCConPtr; // Transaction Co-ordinator connection pointer. - Uint64 theTransactionId; // theTransactionId of the transaction - Uint32 theGlobalCheckpointId; // The gloabl checkpoint identity of the transaction - Uint64 *p_latest_trans_gci; // Reference to latest gci for connection - ConStatusType theStatus; // The status of the connection - enum CompletionStatus { - NotCompleted, - CompletedSuccess, - CompletedFailure, - DefinitionFailure - } theCompletionStatus; // The Completion status of the transaction - CommitStatusType theCommitStatus; // The commit status of the transaction - Uint32 theMagicNumber; // Magic Number to verify correct object - - Uint32 thePriority; // Transaction Priority - - enum ReturnType { ReturnSuccess, ReturnFailure }; - ReturnType theReturnStatus; // Did we have any read/update/delete failing - // to find the tuple. - bool theTransactionIsStarted; - bool theInUseState; - bool theSimpleState; - - enum ListState { - NotInList, - InPreparedList, - InSendList, - InCompletedList - } theListState; - - Uint32 theDBnode; // The database node we are connected to - Uint32 theNodeSequence; // The sequence no of the db node - bool theReleaseOnClose; - - /** - * handle transaction spanning - * multiple TC/db nodes - * - * 1) Bitmask with used nodes - * 2) Bitmask with nodes failed during op - */ - Uint32 m_db_nodes[2]; - Uint32 m_failed_db_nodes[2]; - - int report_node_failure(Uint32 id); - - // Scan operations - bool m_waitForReply; - NdbIndexScanOperation* m_theFirstScanOperation; - NdbIndexScanOperation* m_theLastScanOperation; - - NdbIndexScanOperation* m_firstExecutedScanOp; - - // Scan operations - // The operation actually performing the scan - NdbScanOperation* theScanningOp; - Uint32 theBuddyConPtr; - // optim: any blobs - bool theBlobFlag; - Uint8 thePendingBlobOps; - inline bool hasBlobOperation() { return theBlobFlag; } - - static void sendTC_COMMIT_ACK(class TransporterFacade *, NdbApiSignal *, - Uint32 transId1, Uint32 transId2, - Uint32 aBlockRef); - - void completedFail(const char * s); -#ifdef VM_TRACE - void printState(); -#endif - bool checkState_TransId(const Uint32 * transId) const; - - void remove_list(NdbOperation*& head, NdbOperation*); - void define_scan_op(NdbIndexScanOperation*); - - friend class HugoOperations; - friend struct Ndb_free_list_t; -}; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -inline -Uint32 -NdbTransaction::get_send_size() -{ - return 0; -} - -inline -void -NdbTransaction::set_send_size(Uint32 send_size) -{ - return; -} - -#ifdef NDB_NO_DROPPED_SIGNAL -#include -#endif - -inline -int -NdbTransaction::checkMagicNumber() -{ - if (theMagicNumber == 0x37412619) - return 0; - else { -#ifdef NDB_NO_DROPPED_SIGNAL - abort(); -#endif - return -1; - } -} - -inline -bool -NdbTransaction::checkState_TransId(const Uint32 * transId) const { - const Uint32 tTmp1 = transId[0]; - const Uint32 tTmp2 = transId[1]; - Uint64 tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32); - bool b = theStatus == Connected && theTransactionId == tRecTransId; - return b; -} - -/************************************************************************************************ -void setTransactionId(Uint64 aTransactionId); - -Remark: Set the transaction identity. -************************************************************************************************/ -inline -void -NdbTransaction::setTransactionId(Uint64 aTransactionId) -{ - theTransactionId = aTransactionId; -} - -inline -void -NdbTransaction::setConnectedNodeId(Uint32 aNode, Uint32 aSequenceNo) -{ - theDBnode = aNode; - theNodeSequence = aSequenceNo; -} -/****************************************************************************** -int getConnectedNodeId(); - -Return Value: Return theDBnode. -Remark: Get Connected node id. -******************************************************************************/ -inline -Uint32 -NdbTransaction::getConnectedNodeId() -{ - return theDBnode; -} -/****************************************************************************** -void setMyBlockReference(int aBlockRef); - -Parameters: aBlockRef: The block refrerence. -Remark: Set my block refrerence. -******************************************************************************/ -inline -void -NdbTransaction::setMyBlockReference(int aBlockRef) -{ - theMyRef = aBlockRef; -} -/****************************************************************************** -void setTC_ConnectPtr(Uint32 aTCConPtr); - -Parameters: aTCConPtr: The connection pointer. -Remark: Sets TC Connect pointer. -******************************************************************************/ -inline -void -NdbTransaction::setTC_ConnectPtr(Uint32 aTCConPtr) -{ - theTCConPtr = aTCConPtr; -} - -/****************************************************************************** -int getTC_ConnectPtr(); - -Return Value: Return theTCConPtr. -Remark: Gets TC Connect pointer. -******************************************************************************/ -inline -int -NdbTransaction::getTC_ConnectPtr() -{ - return theTCConPtr; -} - -inline -void -NdbTransaction::setBuddyConPtr(Uint32 aBuddyConPtr) -{ - theBuddyConPtr = aBuddyConPtr; -} - -inline -Uint32 NdbTransaction::getBuddyConPtr() -{ - return theBuddyConPtr; -} - -/****************************************************************************** -NdbTransaction* next(); - -inline -void -NdbTransaction::setBuddyConPtr(Uint32 aBuddyConPtr) -{ - theBuddyConPtr = aBuddyConPtr; -} - -inline -Uint32 NdbTransaction::getBuddyConPtr() -{ - return theBuddyConPtr; -} - -Return Value: Return next pointer to NdbTransaction object. -Remark: Get the next pointer. -******************************************************************************/ -inline -NdbTransaction* -NdbTransaction::next() -{ - return theNext; -} - -/****************************************************************************** -void next(NdbTransaction aTransaction); - -Parameters: aTransaction: The connection object. -Remark: Sets the next pointer. -******************************************************************************/ -inline -void -NdbTransaction::next(NdbTransaction* aTransaction) -{ - theNext = aTransaction; -} - -/****************************************************************************** -ConStatusType Status(); - -Return Value Return the ConStatusType. -Parameters: aStatus: The status. -Remark: Sets Connect status. -******************************************************************************/ -inline -NdbTransaction::ConStatusType -NdbTransaction::Status() -{ - return theStatus; -} - -/****************************************************************************** -void Status(ConStatusType aStatus); - -Parameters: aStatus: The status. -Remark: Sets Connect status. -******************************************************************************/ -inline -void -NdbTransaction::Status( ConStatusType aStatus ) -{ - theStatus = aStatus; -} - - -/****************************************************************************** - void setGCI(); - -Remark: Set global checkpoint identity of the transaction -******************************************************************************/ -inline -void -NdbTransaction::setGCI(int aGlobalCheckpointId) -{ - theGlobalCheckpointId = aGlobalCheckpointId; -} - -/****************************************************************************** -void OpSent(); - -Remark: An operation was sent with success that expects a response. -******************************************************************************/ -inline -void -NdbTransaction::OpSent() -{ - theNoOfOpSent++; -} - -/****************************************************************************** -void executePendingBlobOps(); -******************************************************************************/ -#include -inline -int -NdbTransaction::executePendingBlobOps(Uint8 flags) -{ - if (thePendingBlobOps & flags) { - // not executeNoBlobs because there can be new ops with blobs - return execute(NoCommit); - } - return 0; -} - -inline -Uint32 -NdbTransaction::ptr2int(){ - return theId; -} - -typedef NdbTransaction NdbConnection; - -#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -#endif diff --git a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp deleted file mode 100644 index 39b30923ebc..00000000000 --- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp +++ /dev/null @@ -1,148 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#ifndef CLUSTER_CONNECTION_HPP -#define CLUSTER_CONNECTION_HPP -#include - -class Ndb_cluster_connection_node_iter -{ - friend class Ndb_cluster_connection_impl; -public: - Ndb_cluster_connection_node_iter() : scan_state(~0), - init_pos(0), - cur_pos(0) {}; -private: - unsigned char scan_state; - unsigned char init_pos; - unsigned char cur_pos; -}; - -/** - * @class Ndb_cluster_connection - * @brief Represents a connection to a cluster of storage nodes. - * - * Any NDB application program should begin with the creation of a - * single Ndb_cluster_connection object, and should make use of one - * and only one Ndb_cluster_connection. The application connects to - * a cluster management server when this object's connect() method is called. - * By using the wait_until_ready() method it is possible to wait - * for the connection to reach one or more storage nodes. - */ -class Ndb_cluster_connection { -public: - /** - * Create a connection to a cluster of storage nodes - * - * @param connectstring The connectstring for where to find the - * management server - */ - Ndb_cluster_connection(const char * connectstring = 0); - ~Ndb_cluster_connection(); - - /** - * Set a name on the connection, which will be reported in cluster log - * - * @param name - * - */ - void set_name(const char *name); - - /** - * Set timeout - * - * Used as a timeout when talking to the management server, - * helps limit the amount of time that we may block when connecting - * - * Basically just calls ndb_mgm_set_timeout(h,ms). - * - * The default is 30 seconds. - * - * @param timeout_ms millisecond timeout. As with ndb_mgm_set_timeout, - * only increments of 1000 are really supported, - * with not to much gaurentees about calls completing - * in any hard amount of time. - * @return 0 on success - */ - int set_timeout(int timeout_ms); - - /** - * Connect to a cluster management server - * - * @param no_retries specifies the number of retries to attempt - * in the event of connection failure; a negative value - * will result in the attempt to connect being repeated - * indefinitely - * - * @param retry_delay_in_seconds specifies how often retries should - * be performed - * - * @param verbose specifies if the method should print a report of its progess - * - * @return 0 = success, - * 1 = recoverable error, - * -1 = non-recoverable error - */ - int connect(int no_retries=0, int retry_delay_in_seconds=1, int verbose=0); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - int start_connect_thread(int (*connect_callback)(void)= 0); -#endif - - /** - * Wait until the requested connection with one or more storage nodes is successful - * - * @param timeout_for_first_alive Number of seconds to wait until - * first live node is detected - * @param timeout_after_first_alive Number of seconds to wait after - * first live node is detected - * - * @return = 0 all nodes live, - * > 0 at least one node live, - * < 0 error - */ - int wait_until_ready(int timeout_for_first_alive, - int timeout_after_first_alive); - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - int get_no_ready(); - const char *get_connectstring(char *buf, int buf_sz) const; - int get_connected_port() const; - const char *get_connected_host() const; - - void set_optimized_node_selection(int val); - - unsigned no_db_nodes(); - unsigned node_id(); - unsigned get_connect_count() const; - - void init_get_next_node(Ndb_cluster_connection_node_iter &iter); - unsigned int get_next_node(Ndb_cluster_connection_node_iter &iter); - unsigned get_active_ndb_objects() const; - - Uint64 *get_latest_trans_gci(); -#endif - -private: - friend class Ndb; - friend class NdbImpl; - friend class Ndb_cluster_connection_impl; - class Ndb_cluster_connection_impl & m_impl; - Ndb_cluster_connection(Ndb_cluster_connection_impl&); -}; - -#endif diff --git a/storage/ndb/include/ndbapi/ndb_opt_defaults.h b/storage/ndb/include/ndbapi/ndb_opt_defaults.h deleted file mode 100644 index bf97d931dd1..00000000000 --- a/storage/ndb/include/ndbapi/ndb_opt_defaults.h +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_OPT_DEFAULTS_H -#define NDB_OPT_DEFAULTS_H - -#define OPT_NDB_SHM_SIGNUM_DEFAULT 0 -#define OPT_NDB_SHM_DEFAULT 0 - -#endif diff --git a/storage/ndb/include/ndbapi/ndbapi_limits.h b/storage/ndb/include/ndbapi/ndbapi_limits.h deleted file mode 100644 index 93102474a77..00000000000 --- a/storage/ndb/include/ndbapi/ndbapi_limits.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2003-2005, 2007, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBAPI_LIMITS_H -#define NDBAPI_LIMITS_H - -#define NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY 32 -#define NDB_MAX_ATTRIBUTES_IN_INDEX NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY -#define NDB_MAX_ATTRIBUTES_IN_TABLE 128 - -#define NDB_MAX_TUPLE_SIZE_IN_WORDS 2013 -#define NDB_MAX_KEYSIZE_IN_WORDS 1023 -#define NDB_MAX_KEY_SIZE (NDB_MAX_KEYSIZE_IN_WORDS*4) -#define NDB_MAX_TUPLE_SIZE (NDB_MAX_TUPLE_SIZE_IN_WORDS*4) -#define NDB_MAX_ACTIVE_EVENTS 100 - -/* TUP ZATTR_BUFFER_SIZE 16384 (minus 1) minus place for getValue()s */ -#define NDB_MAX_SCANFILTER_SIZE_IN_WORDS (16384 - 1 - 1024) - -#endif diff --git a/storage/ndb/include/ndbapi/ndberror.h b/storage/ndb/include/ndbapi/ndberror.h deleted file mode 100644 index c1c54a2b0c9..00000000000 --- a/storage/ndb/include/ndbapi/ndberror.h +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBERROR_H -#define NDBERROR_H - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - -typedef enum -{ - ndberror_st_success = 0, - ndberror_st_temporary = 1, - ndberror_st_permanent = 2, - ndberror_st_unknown = 3 -} ndberror_status_enum; - -typedef enum -{ - ndberror_cl_none = 0, - ndberror_cl_application = 1, - ndberror_cl_no_data_found = 2, - ndberror_cl_constraint_violation = 3, - ndberror_cl_schema_error = 4, - ndberror_cl_user_defined = 5, - ndberror_cl_insufficient_space = 6, - ndberror_cl_temporary_resource = 7, - ndberror_cl_node_recovery = 8, - ndberror_cl_overload = 9, - ndberror_cl_timeout_expired = 10, - ndberror_cl_unknown_result = 11, - ndberror_cl_internal_error = 12, - ndberror_cl_function_not_implemented = 13, - ndberror_cl_unknown_error_code = 14, - ndberror_cl_node_shutdown = 15, - ndberror_cl_configuration = 16, - ndberror_cl_schema_object_already_exists = 17, - ndberror_cl_internal_temporary = 18 -} ndberror_classification_enum; - - -typedef struct { - - /** - * Error status. - */ - ndberror_status_enum status; - - /** - * Error type - */ - ndberror_classification_enum classification; - - /** - * Error code - */ - int code; - - /** - * Mysql error code - */ - int mysql_code; - - /** - * Error message - */ - const char * message; - - /** - * The detailed description. This is extra information regarding the - * error which is not included in the error message. - * - * @note Is NULL when no details specified - */ - char * details; - -} ndberror_struct; - - -typedef ndberror_status_enum ndberror_status; -typedef ndberror_classification_enum ndberror_classification; - -const char *ndberror_status_message(ndberror_status); -const char *ndberror_classification_message(ndberror_classification); -void ndberror_update(ndberror_struct *); -int ndb_error_string(int err_no, char *str, int size); - -#endif /* doxygen skip internal*/ - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/newtonapi/dba.h b/storage/ndb/include/newtonapi/dba.h deleted file mode 100644 index b02a7abb8af..00000000000 --- a/storage/ndb/include/newtonapi/dba.h +++ /dev/null @@ -1,730 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/** - * @mainpage DBA User Guide - * - * @section secIntro Introduction - * DBA is an API to access the NDB Cluster. - * - * DBA supports transactions using an asynchronous execution model. - * Everything but transactions is synchronous. - * - * DBA uses the concept of bindings to simplify database access. - * A binding is a relation between a database table and - * one or several C structs. - * A binding is created initially and then used multiple time during - * application execution. - * - * Each of the data accessing functions in DBA is implemented as a - * transaction, i.e. the call will either fully complete or - * nothing happens (the transaction fails). - * - * DBA also supports "read as much as possible" with bulk read. - * With bulk read the application can specify a set of primary keys and - * try to read all of the corresponding rows. The bulk read will not fail - * if a row does not exist but will instead inform the application using a - * RowFoundIndicator variable. - * - * A request is a transaction or a bulk read. - * - * @section secError Error Handling - * When a synchronous method in DBA fails these methods are applicable: - * -# DBA_GetLatestError() - * -# DBA_GetLatestNdbError() - * -# DBA_GetLatestErrorMsg() - * - * The DBA_GetLatestErrorMsg() will then return a description of - * what has failed. - * - * For asynchronous methods the application should: - * -# check that the RequestId returned by function is not - * @ref DBA_INVALID_REQID - * -# check Status supplied in callback (see @ref DBA_AsyncCallbackFn_t) - * - * If @ref DBA_INVALID_REQID is returned, - * the details of error can be found using - * "latest"-functions. - * - * If error is indicated in callback (using Status), when the - * "latest"-functions are NOT applicable. - * - * @section secExamples Example Programs - * - * - @ref common.hpp - * - @ref basic.cpp - * - @ref br_test.cpp - * - @ref ptr_binding_test.cpp - * - */ - -/** - * @page basic.cpp basic.cpp - * @include basic.cpp - */ - -/** - * @page common.hpp common.hpp - * @include common.hpp - */ - -/** - * @page br_test.cpp br_test.cpp - * @include br_test.cpp - */ - -/** - * @page ptr_binding_test.cpp ptr_binding_test.cpp - * @include ptr_binding_test.cpp - */ - -/** @addtogroup DBA - * @{ - */ - -/****** THIS LINE IS 80 CHARACTERS WIDE - DO *NOT* EXCEED 80 CHARACTERS! ****/ - -#ifndef DBA_H -#define DBA_H - -/* --- Include files ---- */ - -#include -#include - -/* --- Types and definitions --- */ - -/** - * Possible error status for DBA functions. - */ -typedef enum { - DBA_NO_ERROR = 0, /**< Success */ - - DBA_NOT_IMPLEMENTED = -1, /**< Function not implemented */ - DBA_NDB_ERROR = -2, /**< Uncategorised error from NDB */ - DBA_ERROR = -3, /**< Uncategorised error from DBA implementation */ - - DBA_APPLICATION_ERROR = 1, /**< Function called with invalid argument(s) - or other application errors */ - DBA_NO_DATA = 2, /**< No row with specified PK existed */ - DBA_CONSTRAINT_VIOLATION = 3, /**< There already exists a row with that PK*/ - - DBA_SCHEMA_ERROR = 4, /**< Table already exists */ - DBA_INSUFFICIENT_SPACE = 5, /**< The DB is full */ - DBA_TEMPORARY_ERROR = 6, /**< Some temporary problem occured */ - DBA_TIMEOUT = 7, /**< The request timed out, probably due to - dead-lock */ - DBA_OVERLOAD = 8, /**< The DB is overloaded */ - DBA_UNKNOWN_RESULT = 9 /**< It is unknown wheater transaction was - commited or aborted */ -} DBA_Error_t; - -/** - * Error code. This is the error code that is returned by NDB. - * Not to be confused by the status returned by the DBA implementation. - */ -typedef int DBA_ErrorCode_t; - -/** - * DBA column types - */ -typedef enum { - DBA_CHAR, /**< String */ - DBA_INT /**< Integer */ -} DBA_DataTypes_t; - - -/** - * Column description. - * Used for creating tables. - */ -typedef struct DBA_ColumnDesc { - - const char* Name; /**< Name of table column */ - DBA_DataTypes_t DataType; /**< Datatype of table column*/ - Size_t Size; /**< Column size in bytes */ - Boolean_t IsKey; /**< True if column is part of primary key */ - -} DBA_ColumnDesc_t; - -/** - * Used to simplify binding definitions. See @ref DBA_ColumnBinding - * for example. - * - * @param ColName Name of column in db table - * @param Type Column/field type. - * @param Struct Structure - * @param Field Field in structure - * @return Arg list for defining binding of type @ref DBA_Binding_t - */ -#define DBA_BINDING( ColName, Type, Struct, Field ) \ - { ColName, Type, PCN_SIZE_OF( Struct, Field ), \ - PCN_OFFSET_OF( Struct, Field ), 0, 0 } - -/** - * Used to simplify ptr binding definitions. See @ref DBA_ColumnBinding - * for example. - * - * @param Struct Structure - * @param Field Field in structure - * @return Arg list for defining binding of type @ref DBA_Binding_t - */ -#define DBA_BINDING_PTR(Struct, Field, ColBindings, NbCBindings) \ - { 0, DBA_CHAR, NbCBindings, PCN_OFFSET_OF( Struct, Field ), \ - 1, ColBindings } - -/** - * The @ref DBA_ColumnBinding_t is used to describe a binding between one - * column and one field of a C struct. - * - *
- * typedef struct Address {
- *   char StreetName[30];
- *   int  StreetNumber;
- * } Address_t;
- *
- * typdef struct Person {
- *   char        Name[30];
- *   Address_t * AddressPtr;
- * } Person_t; 
- * - * - * For example, if the field Name of a Person_t data structure is - * bound to the column "NAME", the corresponding binding would be - * defined as: - * - *
- * DBA_ColumnBinding_t NameBinding =
- *   DBA_BINDING( "name", DBA_CHAR, Person_t, Name ); 
- * - * - * There is also the @ref DBA_BINDING_PTR which is used when - * several linked structures should be put into one table. - * - * For example, if data in a Person_t data structure should be saved - * in the same table as the Address_t data structure - * (as the address belongs to the person), the corresponding binding would be - * defined as: - * - *
- * DBA_ColumnBinding_t AddrBinding[AddrLen]; This binding describes how the 
- *                                            fields in the Address_t 
- *                                            structure is linked to the 
- *                                            table PERSON_ADDRESS
- *
- * DBA_ColumnBinding_t AddressBinding = 
- *   DBA_BINDING_PTR(Person_t, AddressPtr, AddrBinding, AddrLen); 
- * - * - */ -struct DBA_ColumnBinding { - const char* Name; /**< Name of table column */ - DBA_DataTypes_t DataType; /**< Type of member in structure */ - Size_t Size; /**< Size in bytes of member - or no of @ref DBA_ColumnBinding's - when doing ptr binding */ - Size_t Offset; /**< Offset of the member */ - - Boolean_t Ptr; /**< True if binding is of ptr type */ - const struct DBA_ColumnBinding * SubBinding; /**< Address of Binding Ptr - valid if Ptr is true */ -}; - -/** - * Typedef: @ref DBA_ColumnBinding - */ -typedef struct DBA_ColumnBinding DBA_ColumnBinding_t; - -/** - * A @ref DBA_Binding_t object is used to establish a binding between - * one or more columns of a table to the fields of C structs. - * - * It is used with insert, and update and read transactions to define - * on which columns of the table the operations is performed, and to - * which members of a C data structure they map. - * - * All key columns must be bound to a field of the struct. - * - * The function @ref DBA_CreateBinding is used to create this binding. - */ -typedef struct DBA_Binding DBA_Binding_t; - -/* --- Exported functions --- */ - -/** - * Set DBA configuration parameter - *
- * Id Description                 Default Min  Max
- * == =========================== ======= ==== ====
- * 0  NBP Interval                   10    4   -
- * 1  Operations/Bulkread          1000    1   5000
- * 2  Start transaction timeout       0    0   -
- * 3  Force send algorithm            1    0   2
- *
- * @return Status - */ -DBA_Error_t DBA_SetParameter(int ParameterId, int Value); - -/** - * Set DBA configuration parameter. - * See @ref DBA_SetParameter for description of parameters. - * - * @return Status - */ -DBA_Error_t DBA_GetParameter(int ParameterId, int * Value); - -/** - * Initialize DBA library and connect to NDB Cluster. - * - * @return Status - */ -DBA_Error_t DBA_Open( ); - -/** - * Close connection to NDB cluster and free allocated memory. - * - * @return Error status - */ -DBA_Error_t DBA_Close(void); - -/** - * Get latest DBA error. - * - * @note Only applicable to synchronous methods - */ -DBA_Error_t DBA_GetLatestError(); - -/** - * Get latest NDB error. - * - * @note Only applicable to synchronous methods - */ -DBA_ErrorCode_t DBA_GetLatestNdbError(); - -/** - * Get latest error string associated with DBA_GetLatestError(). - * - * @note String must not be free by caller of this method. - * @note Only applicable to synchronous methods. - */ -const char * DBA_GetLatestErrorMsg(); - -/** - * Get error msg associated with code - * - * @note String must not be free by caller of this method - */ -const char * DBA_GetErrorMsg(DBA_Error_t); - -/** - * Get error msg associated with code - * - * @note String must not be free by caller of this method - */ -const char * DBA_GetNdbErrorMsg(DBA_ErrorCode_t); - -/** - * Create a table. - * - * @param TableName Name of table to create. - * @param NbColumns numbers of columns. - * @param Columns Column descriptions. - * @return Status. - */ -DBA_Error_t -DBA_CreateTable( const char* TableName, int NbColumns, - const DBA_ColumnDesc_t Columns[] ); - -/** - * Destroy a table. - * - * @param TableName Table name. - * @return Status. - * @note Not implemented - */ -DBA_Error_t -DBA_DropTable( const char* TableName ); - - -/** - * Test for existence of a table. - * - * @param TableName Table name. - * @return Boolean value indicating if table exists or not. - */ -Boolean_t -DBA_TableExists( const char* TableName ); - -/** - * Define a binding between the columns of a table and a C structure. - * - * @param TableName table - * @param NbCol number of columns bindings - * @param ColBinding bindings - * @param StructSz Sizeof structure. - * @return Created binding, or NULL if binding could not be created. - */ -DBA_Binding_t* -DBA_CreateBinding( const char* TableName, - int NbCol, const DBA_ColumnBinding_t ColsBinding[], - Size_t StructSz ); - -/** - * Destroys a @ref DBA_Binding_t allocated with @ref - * DBA_CreateBinding. - * - * @param pBinding Pointer to binding. - * @return Status. - */ -DBA_Error_t -DBA_DestroyBinding( DBA_Binding_t* Binding ); - -/** - * Used to identify a pending db request - */ -typedef long DBA_ReqId_t; - -/** - * An asynchronous call returning this means that the function was called - * with invalid arguments. The application should check error status - * with DBA_GetLatestError() etc. - */ -#define DBA_INVALID_REQID 0 - -/** - * Callback function for transactions. - * Will be called in NBP process (Newton Batch Process). - * - * @note The implementation of the callback function is not allowed to - * make an asynchronous database call. - * - * @param ReqId Request identifier - * @param Status Status of the request - * @param ErrorCode Error code given by NDB - * @see DBA_Error_t - */ -typedef void (*DBA_AsyncCallbackFn_t)( DBA_ReqId_t ReqId, - DBA_Error_t Status, - DBA_ErrorCode_t ErrorCode ); -/** - * Insert row(s) in the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of pointers to structures. - * @param NbRows No of rows to insert (i.e. length of pData array) - * @return Request identifier - * - * @note All the table columns must be part of the binding. - */ -DBA_ReqId_t -DBA_InsertRows( const DBA_Binding_t* pBinding, const void * const pData[], - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Insert row(s) in the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of structures. - * @param NbRows No of rows to insert (i.e. length of pData array) - * @return Request identifier - * - * @note All the table columns must be part of the binding. - */ -DBA_ReqId_t -DBA_ArrayInsertRows( const DBA_Binding_t* pBinding, const void * pData, - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Update row(s) in the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of pointers to structures. Fields that are part of the - * key are used to generate the where clause, the - * other fields are used to update the row. - * @param NbRows No of rows to update (i.e. length of pData array). - * @return Request identifier - */ -DBA_ReqId_t -DBA_UpdateRows( const DBA_Binding_t* pBinding, const void * const pData[], - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Update row(s) in the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of structures. Fields that are part of the - * key are used to generate the where clause, the - * other fields are used to update the row. - * @param NbRows No of rows to update (i.e. length of pData array). - * @return Request identifier - */ -DBA_ReqId_t -DBA_ArrayUpdateRows( const DBA_Binding_t* pBinding, const void * pData, - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Delete row(s) from the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of pointers to structures. - * Only fields part of the primary key needs to be set. - * @param NbRows No of rows to delete (i.e. length of pData array) - * @return Request identifier - */ -DBA_ReqId_t -DBA_DeleteRows( const DBA_Binding_t* pBinding, const void * const pData[], - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - - -/** - * Delete row(s) from the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of structures. Only fields part of the primary - * key needs to be set. - * @param NbRows No of rows to delete (i.e. length of pData array) - * @return Request identifier - */ -DBA_ReqId_t -DBA_ArrayDeleteRows( const DBA_Binding_t* pBinding, const void * pData, - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Updates/Inserts row(s) in the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of pointers to structures. - * @param NbRows No of rows to update/insert (i.e. length of pData array) - * @return Request identifier - * @note All the table columns must be part of the binding. - */ -DBA_ReqId_t -DBA_WriteRows( const DBA_Binding_t* pBinding, const void * const pData[], - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Update/Insert row(s) in the table (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of structures. - * @param NbRows No of rows to update/insert (i.e. length of pData array) - * @return Request identifier - * @note All the table columns must be part of the binding. - */ -DBA_ReqId_t -DBA_ArrayWriteRows( const DBA_Binding_t* pBinding, const void * pData, - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Read row(s) from a table of the database (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of pointers to structures. - * Only fields part of the primary key needs to be set. - * The other fields in the binding will be populated. - * @param NbRows No of rows to read (i.e. length of pData array) - * @return Request identifier - */ -DBA_ReqId_t -DBA_ReadRows( const DBA_Binding_t* pBinding, void * const pData[], - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Read row(s) from a table of the database (one transaction) - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of structures. - * Only fields part of the primary key needs to be set. - * The other fields in the binding will be populated. - * @param NbRows No of rows to read (i.e. length of pData array) - * @return Request identifier - */ -DBA_ReqId_t -DBA_ArrayReadRows( const DBA_Binding_t* pBinding, void * pData, - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/****** THIS LINE IS 80 CHARACTERS WIDE - DO *NOT* EXCEED 80 CHARACTERS! ****/ - -/** - * Insert one row for each specified binding (as one transaction). - * - * @param pBindings Array of pointers to bindings. - * @param pData Array of pointers to structures. - * @param NbBindings No of bindings (tables) to insert into, - * i.e. length of arrays pBindings and pData - * @return Request identifier - * @note It is valid to specify the same binding twice - * (with corresponding data pointer) if you want to insert two - * rows in one table - */ -DBA_ReqId_t -DBA_MultiInsertRow(const DBA_Binding_t * const pBindings[], - const void * const pData[], - int NbBindings, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Update one row for each specified binding (as one transaction). - * - * @param pBindings Array of pointers to bindings. - * @param pData Array of pointers to structures. - * @param NbBindings No of bindings (tables) to insert into - * i.e. length of arrays pBindings and pData - * @return Request identifier - * @note It is valid to specify the same binding twice - * (with corresponding data pointer) if you want to update two - * rows in one table - */ -DBA_ReqId_t -DBA_MultiUpdateRow(const DBA_Binding_t * const pBindings[], - const void * const pData[], - int NbBindings, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Update/insert one row for each specified binding (as one transaction). - * - * @param pBindings Array of pointers to bindings. - * @param pData Array of pointers to structures. - * @param NbBindings No of bindings (tables) to insert into - * i.e. length of arrays pBindings and pData - * @return Request identifier - * @note It is valid to specify the same binding twice - * (with corresponding data pointer) if you want to update/insert two - * rows in one table - */ -DBA_ReqId_t -DBA_MultiWriteRow(const DBA_Binding_t * const pBindings[], - const void * const pData[], - int NbBindings, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Delete one row for each specified binding (as one transaction). - * - * @param pBindings Array of pointers to bindings. - * @param pData Array of pointers to structures. - * @param NbBindings No of bindings (tables) to insert into - * i.e. length of arrays pBindings and pData - * @return Request identifier - * @note It is valid to specify the same binding twice - * (with corresponding data pointer) if you want to delete two - * rows in one table - */ -DBA_ReqId_t -DBA_MultiDeleteRow(const DBA_Binding_t * const pBindings[], - const void * const pData[], - int NbBindings, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Read one row for each specified binding (as one transaction). - * - * @param pBindings Array of pointers to bindings. - * @param pData Array of pointers to structures. - * @param NbBindings No of bindings (tables) to insert into - * i.e. length of arrays pBindings and pData - * @return Request identifier - * @note It is valid to specify the same binding twice - * (with corresponding data pointer) if you want to read two - * rows in one table - */ -DBA_ReqId_t -DBA_MultiReadRow(const DBA_Binding_t * const pBindings[], - void * const pData[], - int NbBindings, - DBA_AsyncCallbackFn_t CbFunc ); - -/****** THIS LINE IS 80 CHARACTERS WIDE - DO *NOT* EXCEED 80 CHARACTERS! ****/ - -/** - * A structure used for bulk reads. - * The structure contains a pointer to the data and an indicator. - * After the bulk read has completed, the indicator is set to 1 if the row - * was found and to 0 if the row was not found. - * - */ -typedef struct DBA_BulkReadResultSet { - void * DataPtr; /**< Pointer to data. Only fields part of - primary key members needs - to be set before bulk read. */ - Boolean_t RowFoundIndicator; /**< This indicator has a valid value - only after bulk read has completed. - If the value is 1 then the row was found */ -} DBA_BulkReadResultSet_t; - -/** - * Read rows from a table of the database (potentially multiple transactions) - * The users should for each NbRows specify the fields part of the primary key - * - * @param pBinding Binding between table columns and struct fields. - * @param pData Array of DBA_BulkReadResultSet_t, with DataPtr pointing to - * structure. Only the fields which are part of the - * primary key need be set. - * The RowFoundIndicator will be set when the request returns. - * @param NbRows No of rows to read (i.e. length of pData array) - * @return Request identifier - * - */ -DBA_ReqId_t -DBA_BulkReadRows(const DBA_Binding_t * pBinding, - DBA_BulkReadResultSet_t pData[], - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** - * Read rows from several tables of the database in potentially multiple - * transactions. - * - *
- * The pData array must be organized as follows:
- *   NbRows with DataPtr pointing to structure of type pBindings[0]
- *   NbRows with DataPtr pointing to structure of type pBindings[1]
- *   ... 
- * Meaning that the pData array must be (NbBindings * NbRows) in length. - * - * The user should for each (NbRows * NbBindings) specify the primary key - * fields. - * - * @param pBindings Array of pointers to bindings - * @param pData Array of DBA_BulkReadResultSet_t. - * With DataPtr pointing to structure. Only the fields which - * are part of the key need be set. - * The RowFoundIndicator will be set when the operations returns. - * @param NbBindings No of bindings (i.e. length of pBindings array) - * @param NbRows No of rows per binding to read - * @return Request identifier - */ -DBA_ReqId_t -DBA_BulkMultiReadRows(const DBA_Binding_t * const pBindings[], - DBA_BulkReadResultSet_t pData[], - int NbBindings, - int NbRows, - DBA_AsyncCallbackFn_t CbFunc ); - -/** @} */ - -#endif diff --git a/storage/ndb/include/newtonapi/defs/pcn_types.h b/storage/ndb/include/newtonapi/defs/pcn_types.h deleted file mode 100644 index 0fd8332a276..00000000000 --- a/storage/ndb/include/newtonapi/defs/pcn_types.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PCN_TYPES_H -#define PCN_TYPES_H - -#include - -typedef size_t Size_t; - -typedef int Boolean_t; - -typedef unsigned UInt32_t; - -#define PCN_TRUE true -#define PCN_FALSE false - -#define PCN_SIZE_OF(s, m ) sizeof(((s *)0)->m) -#define PCN_OFFSET_OF(s, m) offsetof(s, m) - -#endif diff --git a/storage/ndb/include/portlib/NdbCondition.h b/storage/ndb/include/portlib/NdbCondition.h deleted file mode 100644 index 9568a19586e..00000000000 --- a/storage/ndb/include/portlib/NdbCondition.h +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_CONDITION_H -#define NDB_CONDITION_H - -#include "NdbMutex.h" - -#ifdef __cplusplus -extern "C" { -#endif - -struct NdbCondition; - - -/** - * Create a condition - * - * returnvalue: pointer to the condition structure - */ -struct NdbCondition* NdbCondition_Create(void); - -/** - * Wait for a condition, allows a thread to wait for - * a condition and atomically releases the associated mutex. - * - * p_cond: pointer to the condition structure - * p_mutex: pointer to the mutex structure - * returnvalue: 0 = succeeded, 1 = failed - */ -int NdbCondition_Wait(struct NdbCondition* p_cond, - NdbMutex* p_mutex); - -/* - * Wait for a condition with timeout, allows a thread to - * wait for a condition and atomically releases the associated mutex. - * - * @param p_cond - pointer to the condition structure - * @param p_mutex - pointer to the mutex structure - * @param msec - Wait for msec milli seconds the most - * @return 0 = succeeded, 1 = failed - * @ - */ -int -NdbCondition_WaitTimeout(struct NdbCondition* p_cond, - NdbMutex* p_mutex, - int msec); - - -/** - * Signal a condition - * - * p_cond: pointer to the condition structure - * returnvalue: 0 = succeeded, 1 = failed - */ -int NdbCondition_Signal(struct NdbCondition* p_cond); - - -/** - * Broadcast a condition - * - * p_cond: pointer to the condition structure - * returnvalue: 0 = succeeded, 1 = failed - */ -int NdbCondition_Broadcast(struct NdbCondition* p_cond); - -/** - * Destroy a condition - * - * p_cond: pointer to the condition structure - * returnvalue: 0 = succeeded, 1 = failed - */ -int NdbCondition_Destroy(struct NdbCondition* p_cond); - -#ifdef __cplusplus -} -#endif - -#endif - - diff --git a/storage/ndb/include/portlib/NdbConfig.h b/storage/ndb/include/portlib/NdbConfig.h deleted file mode 100644 index d85b5a28852..00000000000 --- a/storage/ndb/include/portlib/NdbConfig.h +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_CONFIG_H -#define NDB_CONFIG_H - -#ifdef __cplusplus -extern "C" { -#endif - -const char* NdbConfig_get_path(int *len); -void NdbConfig_SetPath(const char *path); -char* NdbConfig_NdbCfgName(int with_ndb_home); -char* NdbConfig_ErrorFileName(int node_id); -char* NdbConfig_ClusterLogFileName(int node_id); -char* NdbConfig_SignalLogFileName(int node_id); -char* NdbConfig_TraceFileName(int node_id, int file_no); -char* NdbConfig_NextTraceFileName(int node_id); -char* NdbConfig_PidFileName(int node_id); -char* NdbConfig_StdoutFileName(int node_id); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/portlib/NdbDaemon.h b/storage/ndb/include/portlib/NdbDaemon.h deleted file mode 100644 index 53b7dca4190..00000000000 --- a/storage/ndb/include/portlib/NdbDaemon.h +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_DAEMON_H -#define NDB_DAEMON_H - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Become a daemon. - * lockfile the "pid file" or other resource to lock exclusively - * logfile daemon output is directed here (input is set to /dev/null) - * if NULL, output redirection is not done - * flags none currently - * returns 0 on success, on error -1 - */ -extern int -NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags); - -/* - * Test if the daemon is running (file is locked). - * lockfile the "pid file" - * flags none currently - * return 0 no, 1 yes, -1 - */ -extern int -NdbDaemon_Test(const char* lockfile, unsigned flags); - -/* - * Kill the daemon. - * lockfile the "pid file" - * flags none currently - * return 0 killed, 1 not running, -1 other error - */ -extern int -NdbDaemon_Kill(const char* lockfile, unsigned flags); - -/* - * Pid from last call, either forked off or found in lock file. - */ -extern long NdbDaemon_DaemonPid; - -/* - * Error code from last failed call. - */ -extern int NdbDaemon_ErrorCode; - -/* - * Error text from last failed call. - */ -extern char NdbDaemon_ErrorText[]; - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/portlib/NdbEnv.h b/storage/ndb/include/portlib/NdbEnv.h deleted file mode 100644 index ce792693c02..00000000000 --- a/storage/ndb/include/portlib/NdbEnv.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_ENV_H -#define NDB_ENV_H - - -#ifdef __cplusplus -extern "C" { -#endif - - const char* NdbEnv_GetEnv(const char* name, char * buf, int buflen); - -#ifdef __cplusplus - } -#endif - -#endif - - - diff --git a/storage/ndb/include/portlib/NdbHost.h b/storage/ndb/include/portlib/NdbHost.h deleted file mode 100644 index de13c61b40b..00000000000 --- a/storage/ndb/include/portlib/NdbHost.h +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_HOST_H -#define NDB_HOST_H - -#ifndef NDB_WIN32 -#include -#include -#endif - -#ifndef MAXHOSTNAMELEN -#define MAXHOSTNAMELEN 255 -#endif - -#ifdef __cplusplus -extern "C" { -#endif - - int NdbHost_GetHostName(char*); - int NdbHost_GetProcessId(); - -#ifdef __cplusplus - } -#endif - -#endif - - - diff --git a/storage/ndb/include/portlib/NdbMain.h b/storage/ndb/include/portlib/NdbMain.h deleted file mode 100644 index 4e40786b7da..00000000000 --- a/storage/ndb/include/portlib/NdbMain.h +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBMAIN_H -#define NDBMAIN_H - -#define NDB_MAIN(name) \ -int main(int argc, const char** argv) - -#define NDB_COMMAND(name, str_name, syntax, description, stacksize) \ -int main(int argc, const char** argv) - -#endif diff --git a/storage/ndb/include/portlib/NdbMem.h b/storage/ndb/include/portlib/NdbMem.h deleted file mode 100644 index 865a7d1a2a1..00000000000 --- a/storage/ndb/include/portlib/NdbMem.h +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_MEM_H -#define NDB_MEM_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * NdbMem_Create - * Create and initalise internal data structures for Ndb - */ -void NdbMem_Create(void); - - -/** - * NdbMem_Destroy - * Destroy all memory allocated by NdbMem - */ -void NdbMem_Destroy(void); - -/** - * NdbMem_Allocate - * Allocate size of memory - * @parameter size - size in bytes of memory to allocate - * @returns - pointer to memory if succesful otherwise NULL - */ -void* NdbMem_Allocate(size_t size); - -/** - * NdbMem_AllocateAlign - * Allocate size of memory - * @parameter size - size in bytes of memory to allocate - * @paramter alignment - byte boundary to align the data at - * @returns - pointer to memory if succesful otherwise NULL - */ -void* NdbMem_AllocateAlign(size_t size, size_t alignment); - - -/** - * NdbMem_Free - * Free the memory that ptr points to - * @parameter ptr - pointer to the memory to free - */ -void NdbMem_Free(void* ptr); - -/** - * NdbMem_MemLockAll - * Locks virtual memory in main memory - */ -int NdbMem_MemLockAll(int); - -/** - * NdbMem_MemUnlockAll - * Unlocks virtual memory - */ -int NdbMem_MemUnlockAll(void); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/portlib/NdbMutex.h b/storage/ndb/include/portlib/NdbMutex.h deleted file mode 100644 index 6de102238d4..00000000000 --- a/storage/ndb/include/portlib/NdbMutex.h +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_MUTEX_H -#define NDB_MUTEX_H - -#include - -#ifdef NDB_WIN32 -#include -#include -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined NDB_WIN32 -typedef CRITICAL_SECTION NdbMutex; -#else -#include -typedef pthread_mutex_t NdbMutex; -#endif - -/** - * Create a mutex - * - * p_mutex: pointer to the mutex structure - * returnvalue: pointer to the mutex structure - */ -NdbMutex* NdbMutex_Create(void); - -/** - * Destroy a mutex - * - * * p_mutex: pointer to the mutex structure - * * returnvalue: 0 = succeeded, -1 = failed - */ -int NdbMutex_Destroy(NdbMutex* p_mutex); - -/** - * Lock a mutex - * - * * p_mutex: pointer to the mutex structure - * * returnvalue: 0 = succeeded, -1 = failed - */ -int NdbMutex_Lock(NdbMutex* p_mutex); - -/** - * Unlock a mutex - * - * * p_mutex: pointer to the mutex structure - * * returnvalue: 0 = succeeded, -1 = failed - */ -int NdbMutex_Unlock(NdbMutex* p_mutex); - -/** - * Try to lock a mutex - * - * * p_mutex: pointer to the mutex structure - * * returnvalue: 0 = succeeded, -1 = failed - */ -int NdbMutex_Trylock(NdbMutex* p_mutex); - -#ifdef __cplusplus -} -#endif - -#ifdef __cplusplus -class NdbLockable { - friend class Guard; -public: - NdbLockable() { m_mutex = NdbMutex_Create(); } - ~NdbLockable() { NdbMutex_Destroy(m_mutex); } - - void lock() { NdbMutex_Lock(m_mutex); } - void unlock(){ NdbMutex_Unlock(m_mutex);} - bool tryLock(){ return NdbMutex_Trylock(m_mutex) == 0;} - - NdbMutex* getMutex() {return m_mutex;}; - -protected: - NdbMutex * m_mutex; -}; - -class Guard { -public: - Guard(NdbMutex *mtx) : m_mtx(mtx) { NdbMutex_Lock(m_mtx); }; - Guard(NdbLockable & l) : m_mtx(l.m_mutex) { NdbMutex_Lock(m_mtx); }; - ~Guard() { NdbMutex_Unlock(m_mtx); }; -private: - NdbMutex *m_mtx; -}; - -#endif - -#endif diff --git a/storage/ndb/include/portlib/NdbSleep.h b/storage/ndb/include/portlib/NdbSleep.h deleted file mode 100644 index 4f0d93cb48f..00000000000 --- a/storage/ndb/include/portlib/NdbSleep.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBSLEEP_H -#define NDBSLEEP_H - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Sleep for some time - * - * returnvalue: true = time is up, false = failed - */ -int NdbSleep_MicroSleep(int microseconds); -int NdbSleep_MilliSleep(int milliseconds); -int NdbSleep_SecSleep(int seconds); - -#ifdef __cplusplus -} -#endif - - -#endif diff --git a/storage/ndb/include/portlib/NdbTCP.h b/storage/ndb/include/portlib/NdbTCP.h deleted file mode 100644 index 0e801471440..00000000000 --- a/storage/ndb/include/portlib/NdbTCP.h +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_TCP_H -#define NDB_TCP_H - -#include -#include - -#if defined NDB_WIN32 - -/** - * Include files needed - */ -#include -#include - -#define InetErrno WSAGetLastError() -#define EWOULDBLOCK WSAEWOULDBLOCK -#define NDB_SOCKET_TYPE SOCKET -#define NDB_INVALID_SOCKET INVALID_SOCKET -#define _NDB_CLOSE_SOCKET(x) closesocket(x) - -#else - -/** - * Include files needed - */ -#include - -#define NDB_NONBLOCK O_NONBLOCK -#define NDB_SOCKET_TYPE int -#define NDB_INVALID_SOCKET -1 -#define _NDB_CLOSE_SOCKET(x) ::close(x) - -#define InetErrno errno - -#endif - -#define NDB_SOCKLEN_T SOCKET_SIZE_TYPE - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Convert host name or ip address to in_addr - * - * Returns 0 on success - * -1 on failure - * - * Implemented as: - * gethostbyname - * if not success - * inet_addr - */ -int Ndb_getInAddr(struct in_addr * dst, const char *address); - -#ifdef DBUG_OFF -#define NDB_CLOSE_SOCKET(fd) _NDB_CLOSE_SOCKET(fd) -#else -int NDB_CLOSE_SOCKET(int fd); -#endif - -int Ndb_check_socket_hup(NDB_SOCKET_TYPE sock); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/portlib/NdbThread.h b/storage/ndb/include/portlib/NdbThread.h deleted file mode 100644 index fd109283f25..00000000000 --- a/storage/ndb/include/portlib/NdbThread.h +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_THREAD_H -#define NDB_THREAD_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef enum NDB_THREAD_PRIO_ENUM { - NDB_THREAD_PRIO_HIGHEST, - NDB_THREAD_PRIO_HIGH, - NDB_THREAD_PRIO_MEAN, - NDB_THREAD_PRIO_LOW, - NDB_THREAD_PRIO_LOWEST -} NDB_THREAD_PRIO; - -typedef void* (NDB_THREAD_FUNC)(void*); -typedef void* NDB_THREAD_ARG; -typedef size_t NDB_THREAD_STACKSIZE; - -struct NdbThread; - -/* - Method to block/unblock thread from receiving KILL signal with - signum set in g_ndb_shm_signum in a portable manner. -*/ -#ifdef NDB_SHM_TRANSPORTER -void NdbThread_set_shm_sigmask(my_bool block); -#endif - -/** - * Create a thread - * - * * p_thread_func: pointer of the function to run in the thread - * * p_thread_arg: pointer to argument to be passed to the thread - * * thread_stack_size: stack size for this thread - * * p_thread_name: pointer to name of this thread - * * returnvalue: pointer to the created thread - */ -struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func, - NDB_THREAD_ARG *p_thread_arg, - const NDB_THREAD_STACKSIZE thread_stack_size, - const char* p_thread_name, - NDB_THREAD_PRIO thread_prio); - -/** - * Destroy a thread - * Deallocates memory for thread - * And NULL the pointer - * - */ -void NdbThread_Destroy(struct NdbThread** p_thread); - - -/** - * Waitfor a thread, suspend the execution of the calling thread - * until the wait_thread_id completes - * - * * p_wait_thread, pointer to the thread to wait for - * * status: exit code from thread waited for - * * returnvalue: true = succeded, false = failed - */ -int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status); - -/** - * Exit thread, terminates the calling thread - * - * * status: exit code - */ -void NdbThread_Exit(void *status); - -/** - * Set thread concurrency level - * - * * - */ -int NdbThread_SetConcurrencyLevel(int level); - - -#ifdef __cplusplus -} -#endif - -#endif - - - - - - - - - diff --git a/storage/ndb/include/portlib/NdbTick.h b/storage/ndb/include/portlib/NdbTick.h deleted file mode 100644 index b1482df06aa..00000000000 --- a/storage/ndb/include/portlib/NdbTick.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_TICK_H -#define NDB_TICK_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef Uint64 NDB_TICKS; - -/** - * Returns the current millisecond since 1970 - */ -NDB_TICKS NdbTick_CurrentMillisecond(void); - -/** - * Get current micro second - * Second method is simply abstraction on top of the first - * - * Returns 0 - Success - */ -int NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros); - -struct MicroSecondTimer { - NDB_TICKS seconds; - NDB_TICKS micro_seconds; -}; - -/** - * Get time between start and stop time in microseconds - * Abstraction to get time in struct - * - * 0 means stop happened at or before start time - */ -NDB_TICKS NdbTick_getMicrosPassed(struct MicroSecondTimer start, - struct MicroSecondTimer stop); -int NdbTick_getMicroTimer(struct MicroSecondTimer* time_now); - -#ifdef __cplusplus -} -#endif - -#endif - diff --git a/storage/ndb/include/portlib/PortDefs.h b/storage/ndb/include/portlib/PortDefs.h deleted file mode 100644 index fd6e4ea30bb..00000000000 --- a/storage/ndb/include/portlib/PortDefs.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PORT_DEFS_H -#define PORT_DEFS_H -/* - This file contains varoius declarations/definitions needed in the port of AXEVM to NT, as well as backporting - to Solaris... - - $Id: PortDefs.h,v 1.5 2003/10/07 07:59:59 mikael Exp $ -*/ - -#ifdef NDB_ALPHA -#ifdef NDB_GCC /* only for NDB_ALPHA */ -extern int gnuShouldNotUseRPCC(); -#define RPCC() gnuShouldNotUseRPCC(); -#else -#define RPCC() ((int)__asm(" rpcc v0;")) -#define MB() __asm(" mb;"); -#define WMB() __asm(" wmb;"); -#ifdef USE_INITIALSP -#define IS_IP() (__asm(" mov sp,v0;") < IPinitialSP) -#else /* USE_INITIALSP */ -#define IS_IP() (((__asm(" rpcc v0;") >> 32) & 0x7) == IP_CPU) -#endif -#endif /* NDB_GCC */ -#else /* NDB_ALPHA */ -#if defined NDB_SPARC -#define MB() asm ("membar 0x0;"); /* LoadLoad */ -#define WMB() asm ("membar 0x3;"); /* StoreStore */ -#else /* NDB_SPARC */ -#define MB() -#define WMB() -#endif /* NDB_SPARC */ -#define IS_IP() (1==1) -extern int shouldNotUseRPCC(); -#define RPCC() shouldNotUseRPCC(); -#endif /* NDB_ALPHA */ - -#endif diff --git a/storage/ndb/include/portlib/prefetch.h b/storage/ndb/include/portlib/prefetch.h deleted file mode 100644 index f97e9e06b8e..00000000000 --- a/storage/ndb/include/portlib/prefetch.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PREFETCH_H -#define PREFETCH_H - -#ifdef NDB_FORTE6 -#include -#endif - -#ifdef USE_PREFETCH -#define PREFETCH(addr) prefetch(addr) -#else -#define PREFETCH(addr) -#endif - -#ifdef USE_PREFETCH -#define WRITEHINT(addr) writehint(addr) -#else -#define WRITEHINT(addr) -#endif - -#include "PortDefs.h" - -#ifdef NDB_FORTE6 -#pragma optimize("", off) -#endif -inline void prefetch(void* p) -{ -#ifdef NDB_ALPHA - __asm(" ldl r31,0(a0);", p); -#endif /* NDB_ALPHA */ -#ifdef NDB_FORTE6 - sun_prefetch_read_once(p); -#else - (void)p; -#endif -} - -inline void writehint(void* p) -{ -#ifdef NDB_ALPHA - __asm(" wh64 (a0);", p); -#endif /* NDB_ALPHA */ -#ifdef NDB_FORTE6 - sun_prefetch_write_once(p); -#else - (void)p; -#endif -} -#ifdef NDB_FORTE6 -#pragma optimize("", on) -#endif - -#endif - diff --git a/storage/ndb/include/transporter/TransporterCallback.hpp b/storage/ndb/include/transporter/TransporterCallback.hpp deleted file mode 100644 index 55bbe49fdf9..00000000000 --- a/storage/ndb/include/transporter/TransporterCallback.hpp +++ /dev/null @@ -1,358 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//**************************************************************************** -// -// AUTHOR -// Åsa Fransson -// -// NAME -// TransporterCallback -// -// -//***************************************************************************/ -#ifndef TRANSPORTER_CALLBACK_H -#define TRANSPORTER_CALLBACK_H - -#include -#include "TransporterDefinitions.hpp" - - -/** - * Call back functions - */ - -/** - * The execute function - */ -void -execute(void * callbackObj, - SignalHeader * const header, - Uint8 prio, - Uint32 * const signalData, - LinearSectionPtr ptr[3]); - -/** - * A function to avoid job buffer overflow in NDB kernel, empty in API - * Non-zero return means we executed signals. This is necessary information - * to the transporter to ensure that it properly uses the transporter after - * coming back again. - */ -int -checkJobBuffer(); - -/** - * Report send length - */ -void -reportSendLen(void * callbackObj, - NodeId nodeId, Uint32 count, Uint64 bytes); - -/** - * Report average receive length - */ -void -reportReceiveLen(void * callbackObj, - NodeId nodeId, Uint32 count, Uint64 bytes); - -/** - * Report connection established - */ -void -reportConnect(void * callbackObj, NodeId nodeId); - -/** - * Report connection broken - */ - -void -reportDisconnect(void * callbackObj, - NodeId nodeId, Uint32 errNo); - -#define TE_DO_DISCONNECT 0x8000 - -enum TransporterError { - TE_NO_ERROR = 0, - /** - * TE_ERROR_CLOSING_SOCKET - * - * Error found during closing of socket - * - * Recommended behavior: Ignore - */ - TE_ERROR_CLOSING_SOCKET = 0x1, - - /** - * TE_ERROR_IN_SELECT_BEFORE_ACCEPT - * - * Error found during accept (just before) - * The transporter will retry. - * - * Recommended behavior: Ignore - * (or possible do setPerformState(PerformDisconnect) - */ - TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2, - - /** - * TE_INVALID_MESSAGE_LENGTH - * - * Error found in message (message length) - * - * Recommended behavior: setPerformState(PerformDisconnect) - */ - TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT, - - /** - * TE_INVALID_CHECKSUM - * - * Error found in message (checksum) - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT, - - /** - * TE_COULD_NOT_CREATE_SOCKET - * - * Error found while creating socket - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - TE_COULD_NOT_CREATE_SOCKET = 0x5, - - /** - * TE_COULD_NOT_BIND_SOCKET - * - * Error found while binding server socket - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - TE_COULD_NOT_BIND_SOCKET = 0x6, - - /** - * TE_LISTEN_FAILED - * - * Error found while listening to server socket - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - TE_LISTEN_FAILED = 0x7, - - /** - * TE_ACCEPT_RETURN_ERROR - * - * Error found during accept - * The transporter will retry. - * - * Recommended behavior: Ignore - * (or possible do setPerformState(PerformDisconnect) - */ - TE_ACCEPT_RETURN_ERROR = 0x8 - - /** - * TE_SHM_DISCONNECT - * - * The remote node has disconnected - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT - - /** - * TE_SHM_IPC_STAT - * - * Unable to check shm segment - * probably because remote node - * has disconnected and removed it - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT - - /** - * Permanent error - */ - ,TE_SHM_IPC_PERMANENT = 0x21 - - /** - * TE_SHM_UNABLE_TO_CREATE_SEGMENT - * - * Unable to create shm segment - * probably os something error - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd - - /** - * TE_SHM_UNABLE_TO_ATTACH_SEGMENT - * - * Unable to attach shm segment - * probably invalid group / user - * - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe - - /** - * TE_SHM_UNABLE_TO_REMOVE_SEGMENT - * - * Unable to remove shm segment - * - * Recommended behavior: Ignore (not much to do) - * Print warning to logfile - */ - ,TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf - - ,TE_TOO_SMALL_SIGID = 0x10 - ,TE_TOO_LARGE_SIGID = 0x11 - ,TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT - ,TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT - - /** - * TE_SIGNAL_LOST_SEND_BUFFER_FULL - * - * Send buffer is full, and trying to force send fails - * a signal is dropped!! very bad very bad - * - */ - ,TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT - - /** - * TE_SIGNAL_LOST - * - * Send failed for unknown reason - * a signal is dropped!! very bad very bad - * - */ - ,TE_SIGNAL_LOST = 0x15 - - /** - * TE_SEND_BUFFER_FULL - * - * The send buffer was full, but sleeping for a while solved it - */ - ,TE_SEND_BUFFER_FULL = 0x16 - - /** - * TE_SCI_UNABLE_TO_CLOSE_CHANNEL - * - * Unable to close the sci channel and the resources allocated by - * the sisci api. - */ - ,TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22 - - /** - * TE_SCI_LINK_ERROR - * - * There is no link from this node to the switch. - * No point in continuing. Must check the connections. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_LINK_ERROR = 0x0017 - - /** - * TE_SCI_UNABLE_TO_START_SEQUENCE - * - * Could not start a sequence, because system resources - * are exumed or no sequence has been created. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT - - /** - * TE_SCI_UNABLE_TO_REMOVE_SEQUENCE - * - * Could not remove a sequence - */ - ,TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT - - /** - * TE_SCI_UNABLE_TO_CREATE_SEQUENCE - * - * Could not create a sequence, because system resources are - * exempted. Must reboot. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT - - /** - * TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR - * - * Tried to send data on redundant link but failed. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT - - /** - * TE_SCI_CANNOT_INIT_LOCALSEGMENT - * - * Cannot initialize local segment. A whole lot of things has - * gone wrong (no system resources). Must reboot. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT - - /** - * TE_SCI_CANNOT_MAP_REMOTESEGMENT - * - * Cannot map remote segment. No system resources are left. - * Must reboot system. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNECT - - /** - * TE_SCI_UNABLE_TO_UNMAP_SEGMENT - * - * Cannot free the resources used by this segment (step 1). - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT - - /** - * TE_SCI_UNABLE_TO_REMOVE_SEGMENT - * - * Cannot free the resources used by this segment (step 2). - * Cannot guarantee that enough resources exist for NDB - * to map more segment - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNECT - - /** - * TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT - * - * Cannot disconnect from a remote segment. - * Recommended behavior: setPerformState(PerformDisonnect) - */ - ,TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT - - /* Used 0x21 */ - /* Used 0x22 */ -}; - -/** - * Report error - */ -void -reportError(void * callbackObj, NodeId nodeId, TransporterError errorCode, - const char *info = 0); - -void -transporter_recv_from(void* callbackObj, NodeId node); - -#endif diff --git a/storage/ndb/include/transporter/TransporterDefinitions.hpp b/storage/ndb/include/transporter/TransporterDefinitions.hpp deleted file mode 100644 index 328f3c86906..00000000000 --- a/storage/ndb/include/transporter/TransporterDefinitions.hpp +++ /dev/null @@ -1,131 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TransporterDefinitions_H -#define TransporterDefinitions_H - -#include -#include -#include - -/** - * The maximum number of transporters allowed - * A maximum is needed to be able to allocate the array of transporters - */ -const int MAX_NTRANSPORTERS = 128; - -/** - * The sendbuffer limit after which the contents of the buffer is sent - */ -const int TCP_SEND_LIMIT = 64000; - -enum SendStatus { - SEND_OK = 0, - SEND_BLOCKED = 1, - SEND_DISCONNECTED = 2, - SEND_BUFFER_FULL = 3, - SEND_MESSAGE_TOO_BIG = 4, - SEND_UNKNOWN_NODE = 5 -}; - -/** - * Protocol6 Header + - * (optional signal id) + (optional checksum) + (signal data) - */ -//const Uint32 MAX_MESSAGE_SIZE = (12+4+4+(4*25)); -const Uint32 MAX_MESSAGE_SIZE = (12+4+4+(4*25)+(3*4)+4*4096); - -/** - * TransporterConfiguration - * - * used for setting up a transporter. the union member specific is for - * information specific to a transporter type. - */ -struct TransporterConfiguration { - Int32 s_port; // negative port number implies dynamic port - const char *remoteHostName; - const char *localHostName; - NodeId remoteNodeId; - NodeId localNodeId; - NodeId serverNodeId; - bool checksum; - bool signalId; - bool isMgmConnection; // is a mgm connection, requires transforming - - union { // Transporter specific configuration information - - struct { - Uint32 sendBufferSize; // Size of SendBuffer of priority B - Uint32 maxReceiveSize; // Maximum no of bytes to receive - } tcp; - - struct { - Uint32 shmKey; - Uint32 shmSize; - int signum; - } shm; - - struct { - Uint32 prioASignalSize; - Uint32 prioBSignalSize; - } ose; - - struct { - Uint32 sendLimit; // Packet size - Uint32 bufferSize; // Buffer size - - Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host - - Uint32 remoteSciNodeId0; // SCInodeId for adapter 1 - Uint32 remoteSciNodeId1; // SCInodeId for adapter 2 - } sci; - }; -}; - -struct SignalHeader { - Uint32 theVerId_signalNumber; // 4 bit ver id - 16 bit gsn - Uint32 theReceiversBlockNumber; // Only 16 bit blocknum - Uint32 theSendersBlockRef; - Uint32 theLength; - Uint32 theSendersSignalId; - Uint32 theSignalId; - Uint16 theTrace; - Uint8 m_noOfSections; - Uint8 m_fragmentInfo; -}; /** 7x4 = 28 Bytes */ - -struct LinearSectionPtr { - Uint32 sz; - Uint32 * p; -}; - -struct SegmentedSectionPtr { - Uint32 sz; - Uint32 i; - struct SectionSegment * p; - - SegmentedSectionPtr() {} - SegmentedSectionPtr(Uint32 sz_arg, Uint32 i_arg, - struct SectionSegment *p_arg) - :sz(sz_arg), i(i_arg), p(p_arg) - {} - void setNull() { p = 0;} - bool isNull() const { return p == 0;} -}; - -class NdbOut & operator <<(class NdbOut & out, SignalHeader & sh); - -#endif // Define of TransporterDefinitions_H diff --git a/storage/ndb/include/transporter/TransporterRegistry.hpp b/storage/ndb/include/transporter/TransporterRegistry.hpp deleted file mode 100644 index a142f4da18b..00000000000 --- a/storage/ndb/include/transporter/TransporterRegistry.hpp +++ /dev/null @@ -1,343 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//**************************************************************************** -// -// NAME -// TransporterRegistry -// -// DESCRIPTION -// TransporterRegistry (singelton) is the interface to the -// transporter layer. It handles transporter states and -// holds the transporter arrays. -// -//***************************************************************************/ -#ifndef TransporterRegistry_H -#define TransporterRegistry_H - -#include "TransporterDefinitions.hpp" -#include -#include - -#include - -#include - -// A transporter is always in an IOState. -// NoHalt is used initially and as long as it is no restrictions on -// sending or receiving. -enum IOState { - NoHalt = 0, - HaltInput = 1, - HaltOutput = 2, - HaltIO = 3 -}; - -enum TransporterType { - tt_TCP_TRANSPORTER = 1, - tt_SCI_TRANSPORTER = 2, - tt_SHM_TRANSPORTER = 3 - // ID 4 was OSE Transporter which has been removed. Don't use ID 4. -}; - -static const char *performStateString[] = - { "is connected", - "is trying to connect", - "does nothing", - "is trying to disconnect" }; - -class Transporter; -class TCP_Transporter; -class SCI_Transporter; -class SHM_Transporter; - -class TransporterRegistry; -class SocketAuthenticator; - -class TransporterService : public SocketServer::Service { - SocketAuthenticator * m_auth; - TransporterRegistry * m_transporter_registry; -public: - TransporterService(SocketAuthenticator *auth= 0) - { - m_auth= auth; - m_transporter_registry= 0; - } - void setTransporterRegistry(TransporterRegistry *t) - { - m_transporter_registry= t; - } - SocketServer::Session * newSession(NDB_SOCKET_TYPE socket); -}; - -/** - * @class TransporterRegistry - * @brief ... - */ -class TransporterRegistry { - friend class SHM_Transporter; - friend class Transporter; - friend class TransporterService; -public: - /** - * Constructor - */ - TransporterRegistry(void * callback = 0 , - unsigned maxTransporters = MAX_NTRANSPORTERS, - unsigned sizeOfLongSignalMemory = 100); - - /** - * this handle will be used in the client connect thread - * to fetch information on dynamic ports. The old handle - * (if set) is destroyed, and this is destroyed by the destructor - */ - void set_mgm_handle(NdbMgmHandle h); - NdbMgmHandle get_mgm_handle(void) { return m_mgm_handle; }; - - bool init(NodeId localNodeId); - - /** - * after a connect from client, perform connection using correct transporter - */ - bool connect_server(NDB_SOCKET_TYPE sockfd); - - bool connect_client(NdbMgmHandle *h); - - /** - * Given a SocketClient, creates a NdbMgmHandle, turns it into a transporter - * and returns the socket. - */ - NDB_SOCKET_TYPE connect_ndb_mgmd(SocketClient *sc); - - /** - * Given a connected NdbMgmHandle, turns it into a transporter - * and returns the socket. - */ - NDB_SOCKET_TYPE connect_ndb_mgmd(NdbMgmHandle *h); - - /** - * Remove all transporters - */ - void removeAll(); - - /** - * Disconnect all transporters - */ - void disconnectAll(); - - /** - * Stops the server, disconnects all the transporter - * and deletes them and remove it from the transporter arrays - */ - ~TransporterRegistry(); - - bool start_service(SocketServer& server); - bool start_clients(); - bool stop_clients(); - void start_clients_thread(); - void update_connections(); - - /** - * Start/Stop receiving - */ - void startReceiving(); - void stopReceiving(); - - /** - * Start/Stop sending - */ - void startSending(); - void stopSending(); - - // A transporter is always in a PerformState. - // PerformIO is used initially and as long as any of the events - // PerformConnect, ... - enum PerformState { - CONNECTED = 0, - CONNECTING = 1, - DISCONNECTED = 2, - DISCONNECTING = 3 - }; - const char *getPerformStateString(NodeId nodeId) const - { return performStateString[(unsigned)performStates[nodeId]]; }; - - /** - * Get and set methods for PerformState - */ - void do_connect(NodeId node_id); - void do_disconnect(NodeId node_id); - bool is_connected(NodeId node_id) { return performStates[node_id] == CONNECTED; }; - void report_connect(NodeId node_id); - void report_disconnect(NodeId node_id, int errnum); - - /** - * Get and set methods for IOState - */ - IOState ioState(NodeId nodeId); - void setIOState(NodeId nodeId, IOState state); - - /** - * createTransporter - * - * If the config object indicates that the transporter - * to be created will act as a server and no server is - * started, startServer is called. A transporter of the selected kind - * is created and it is put in the transporter arrays. - */ - bool createTCPTransporter(struct TransporterConfiguration * config); - bool createSCITransporter(struct TransporterConfiguration * config); - bool createSHMTransporter(struct TransporterConfiguration * config); - - /** - * Get free buffer space - * - * Get #free bytes in send buffer for node - */ - Uint32 get_free_buffer(Uint32 node) const ; - - /** - * prepareSend - * - * When IOState is HaltOutput or HaltIO do not send or insert any - * signals in the SendBuffer, unless it is intended for the remote - * CMVMI block (blockno 252) - * Perform prepareSend on the transporter. - * - * NOTE signalHeader->xxxBlockRef should contain block numbers and - * not references - */ - SendStatus prepareSend(const SignalHeader * const signalHeader, Uint8 prio, - const Uint32 * const signalData, - NodeId nodeId, - const LinearSectionPtr ptr[3]); - - SendStatus prepareSend(const SignalHeader * const signalHeader, Uint8 prio, - const Uint32 * const signalData, - NodeId nodeId, - class SectionSegmentPool & pool, - const SegmentedSectionPtr ptr[3]); - - /** - * external_IO - * - * Equal to: poll(...); perform_IO() - * - */ - void external_IO(Uint32 timeOutMillis); - - Uint32 pollReceive(Uint32 timeOutMillis); - void performReceive(); - void performSend(); - - /** - * Force sending if more than or equal to sendLimit - * number have asked for send. Returns 0 if not sending - * and 1 if sending. - */ - int forceSendCheck(int sendLimit); - -#ifdef DEBUG_TRANSPORTER - void printState(); -#endif - - class Transporter_interface { - public: - NodeId m_remote_nodeId; - int m_s_service_port; // signed port number - const char *m_interface; - }; - Vector m_transporter_interface; - void add_transporter_interface(NodeId remoteNodeId, const char *interf, - int s_port); // signed port. <0 is dynamic - Transporter* get_transporter(NodeId nodeId); - NodeId get_localNodeId() { return localNodeId; }; - - - struct in_addr get_connect_address(NodeId node_id) const; -protected: - -private: - void * callbackObj; - - NdbMgmHandle m_mgm_handle; - - struct NdbThread *m_start_clients_thread; - bool m_run_start_clients_thread; - - int sendCounter; - NodeId localNodeId; - bool nodeIdSpecified; - unsigned maxTransporters; - int nTransporters; - int nTCPTransporters; - int nSCITransporters; - int nSHMTransporters; - - /** - * Arrays holding all transporters in the order they are created - */ - TCP_Transporter** theTCPTransporters; - SCI_Transporter** theSCITransporters; - SHM_Transporter** theSHMTransporters; - - /** - * Array, indexed by nodeId, holding all transporters - */ - TransporterType* theTransporterTypes; - Transporter** theTransporters; - - /** - * State arrays, index by host id - */ - PerformState* performStates; - IOState* ioStates; - - /** - * Unpack signal data - */ - Uint32 unpack(Uint32 * readPtr, - Uint32 bufferSize, - NodeId remoteNodeId, - IOState state); - - Uint32 * unpack(Uint32 * readPtr, - Uint32 * eodPtr, - NodeId remoteNodeId, - IOState state); - - /** - * Disconnect the transporter and remove it from - * theTransporters array. Do not allow any holes - * in theTransporters. Delete the transporter - * and remove it from theIndexedTransporters array - */ - void removeTransporter(NodeId nodeId); - - /** - * Used in polling if exists TCP_Transporter - */ - int tcpReadSelectReply; - fd_set tcpReadset; - - Uint32 poll_TCP(Uint32 timeOutMillis); - Uint32 poll_SCI(Uint32 timeOutMillis); - Uint32 poll_SHM(Uint32 timeOutMillis); - - int m_shm_own_pid; - int m_transp_count; -}; - -#endif // Define of TransporterRegistry_H diff --git a/storage/ndb/include/util/BaseString.hpp b/storage/ndb/include/util/BaseString.hpp deleted file mode 100644 index 919014c6e3d..00000000000 --- a/storage/ndb/include/util/BaseString.hpp +++ /dev/null @@ -1,285 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __UTIL_BASESTRING_HPP_INCLUDED__ -#define __UTIL_BASESTRING_HPP_INCLUDED__ - -#include -#include - -/** - * @class BaseString - * @brief Null terminated strings - */ -class BaseString { -public: - /** @brief Constructs an empty string */ - BaseString(); - - /** @brief Constructs a copy of a char * */ - BaseString(const char* s); - - /** @brief Constructs a copy of another BaseString */ - BaseString(const BaseString& str); - - /** @brief Destructor */ - ~BaseString(); - - /** @brief Returns a C-style string */ - const char* c_str() const; - - /** @brief Returns the length of the string */ - unsigned length() const; - - /** @brief Checks if the string is empty */ - bool empty() const; - - /** @brief Clear a string */ - void clear(); - - /** @brief Convert to uppercase */ - BaseString& ndb_toupper(); - - /** @brief Convert to lowercase */ - BaseString& ndb_tolower(); - - /** @brief Assigns from a char * */ - BaseString& assign(const char* s); - - /** @brief Assigns from another BaseString */ - BaseString& assign(const BaseString& str); - - /** @brief Assigns from char *s, with maximum length n */ - BaseString& assign(const char* s, size_t n); - - /** @brief Assigns from another BaseString, with maximum length n */ - BaseString& assign(const BaseString& str, size_t n); - - /** - * Assings from a Vector of BaseStrings, each Vector entry - * separated by separator. - * - * @param vector Vector of BaseStrings to append - * @param separator Separation between appended strings - */ - BaseString& assign(const Vector &vector, - const BaseString &separator = BaseString(" ")); - - /** @brief Appends a char * to the end */ - BaseString& append(const char* s); - - /** @brief Appends a char to the end */ - BaseString& append(char c); - - /** @brief Appends another BaseString to the end */ - BaseString& append(const BaseString& str); - - /** - * Appends a Vector of BaseStrings to the end, each Vector entry - * separated by separator. - * - * @param vector Vector of BaseStrings to append - * @param separator Separation between appended strings - */ - BaseString& append(const Vector &vector, - const BaseString &separator = BaseString(" ")); - - /** @brief Assigns from a format string a la printf() */ - BaseString& assfmt(const char* ftm, ...); - - /** @brief Appends a format string a la printf() to the end */ - BaseString& appfmt(const char* ftm, ...); - - /** - * Split a string into a vector of strings. Separate the string where - * any character included in separator exists. - * Maximally maxSize entries are added to the vector, if more separators - * exist in the string, the remainder of the string will be appended - * to the last entry in the vector. - * The vector will not be cleared, so any existing strings in the - * vector will remain. - * - * @param separator characters separating the entries - * @param vector where the separated strings will be stored - * @param maximum number of strings extracted - * - * @returns the number of string added to the vector - */ - int split(Vector &vector, - const BaseString &separator = BaseString(" "), - int maxSize = -1) const; - - /** - * Returns the index of the first occurance of the character c. - * - * @params c character to look for - * @returns index of character, of -1 if no character found - */ - ssize_t indexOf(char c); - - /** - * Returns the index of the last occurance of the character c. - * - * @params c character to look for - * @returns index of character, of -1 if no character found - */ - ssize_t lastIndexOf(char c); - - /** - * Returns a subset of a string - * - * @param start index of first character - * @param stop index of last character - * @return a new string - */ - BaseString substr(ssize_t start, ssize_t stop = -1); - - /** - * @brief Assignment operator - */ - BaseString& operator=(const BaseString& str); - - /** @brief Compare two strings */ - bool operator<(const BaseString& str) const; - /** @brief Are two strings equal? */ - bool operator==(const BaseString& str) const; - /** @brief Are two strings equal? */ - bool operator==(const char *str) const; - /** @brief Are two strings not equal? */ - bool operator!=(const BaseString& str) const; - /** @brief Are two strings not equal? */ - bool operator!=(const char *str) const; - - /** - * Trim string from delim - */ - BaseString& trim(const char * delim = " \t"); - - /** - * Return c-array with strings suitable for execve - * When whitespace is detected, the characters '"' and '\' are honored, - * to make it possible to give arguments containing whitespace. - * The semantics of '"' and '\' match that of most Unix shells. - */ - static char** argify(const char *argv0, const char *src); - - /** - * Trim string from delim - */ - static char* trim(char * src, const char * delim); - - /** - * snprintf on some platforms need special treatment - */ - static int snprintf(char *str, size_t size, const char *format, ...); - static int vsnprintf(char *str, size_t size, const char *format, va_list ap); -private: - char* m_chr; - unsigned m_len; - friend bool operator!(const BaseString& str); -}; - -inline const char* -BaseString::c_str() const -{ - return m_chr; -} - -inline unsigned -BaseString::length() const -{ - return m_len; -} - -inline bool -BaseString::empty() const -{ - return m_len == 0; -} - -inline void -BaseString::clear() -{ - delete[] m_chr; - m_chr = new char[1]; - m_chr[0] = 0; - m_len = 0; -} - -inline BaseString& -BaseString::ndb_toupper() { - for(unsigned i = 0; i < length(); i++) - m_chr[i] = toupper(m_chr[i]); - return *this; -} - -inline BaseString& -BaseString::ndb_tolower() { - for(unsigned i = 0; i < length(); i++) - m_chr[i] = tolower(m_chr[i]); - return *this; -} - -inline bool -BaseString::operator<(const BaseString& str) const -{ - return strcmp(m_chr, str.m_chr) < 0; -} - -inline bool -BaseString::operator==(const BaseString& str) const -{ - return strcmp(m_chr, str.m_chr) == 0; -} - -inline bool -BaseString::operator==(const char *str) const -{ - return strcmp(m_chr, str) == 0; -} - -inline bool -BaseString::operator!=(const BaseString& str) const -{ - return strcmp(m_chr, str.m_chr) != 0; -} - -inline bool -BaseString::operator!=(const char *str) const -{ - return strcmp(m_chr, str) != 0; -} - -inline bool -operator!(const BaseString& str) -{ - return str.m_chr == NULL; -} - -inline BaseString& -BaseString::assign(const BaseString& str) -{ - return assign(str.m_chr); -} - -inline BaseString& -BaseString::assign(const Vector &vector, - const BaseString &separator) { - assign(""); - return append(vector, separator); -} - -#endif /* !__UTIL_BASESTRING_HPP_INCLUDED__ */ diff --git a/storage/ndb/include/util/Bitmask.hpp b/storage/ndb/include/util/Bitmask.hpp deleted file mode 100644 index 7c7016a9f41..00000000000 --- a/storage/ndb/include/util/Bitmask.hpp +++ /dev/null @@ -1,966 +0,0 @@ -/* Copyright (c) 2003-2006, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_BITMASK_H -#define NDB_BITMASK_H - -#include - -/** - * Bitmask implementation. Size is given explicitly - * (as first argument). All methods are static. - */ -class BitmaskImpl { -public: - STATIC_CONST( NotFound = (unsigned)-1 ); - - /** - * get - Check if bit n is set. - */ - static bool get(unsigned size, const Uint32 data[], unsigned n); - - /** - * set - Set bit n to given value (true/false). - */ - static void set(unsigned size, Uint32 data[], unsigned n, bool value); - - /** - * set - Set bit n. - */ - static void set(unsigned size, Uint32 data[], unsigned n); - - /** - * set - Set all bits. - */ - static void set(unsigned size, Uint32 data[]); - - /** - * set bit from start to last - */ - static void set_range(unsigned size, Uint32 data[], unsigned start, unsigned last); - - /** - * assign - Set all bits in dst to corresponding in src/ - */ - static void assign(unsigned size, Uint32 dst[], const Uint32 src[]); - - /** - * clear - Clear bit n. - */ - static void clear(unsigned size, Uint32 data[], unsigned n); - - /** - * clear - Clear all bits. - */ - static void clear(unsigned size, Uint32 data[]); - - /** - * clear bit from start to last - */ - static void clear_range(unsigned size, Uint32 data[], unsigned start, unsigned last); - - static Uint32 getWord(unsigned size, Uint32 data[], unsigned word_pos); - static void setWord(unsigned size, Uint32 data[], - unsigned word_pos, Uint32 new_word); - /** - * isclear - Check if all bits are clear. This is faster - * than checking count() == 0. - */ - static bool isclear(unsigned size, const Uint32 data[]); - - /** - * count - Count number of set bits. - */ - static unsigned count(unsigned size, const Uint32 data[]); - - /** - * find - Find first set bit, starting at given position. - * Returns NotFound when not found. - */ - static unsigned find(unsigned size, const Uint32 data[], unsigned n); - - /** - * equal - Bitwise equal. - */ - static bool equal(unsigned size, const Uint32 data[], const Uint32 data2[]); - - /** - * bitOR - Bitwise (x | y) into first operand. - */ - static void bitOR(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * bitAND - Bitwise (x & y) into first operand. - */ - static void bitAND(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * bitANDC - Bitwise (x & ~y) into first operand. - */ - static void bitANDC(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * bitXOR - Bitwise (x ^ y) into first operand. - */ - static void bitXOR(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * bitXORC - Bitwise (x ^ ~y) into first operand. - */ - static void bitXORC(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * contains - Check if all bits set in data2 are set in data - */ - static bool contains(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * overlaps - Check if any bit set in data is set in data2 - */ - static bool overlaps(unsigned size, Uint32 data[], const Uint32 data2[]); - - /** - * getField - Get bitfield at given position and length (max 32 bits) - */ - static Uint32 getField(unsigned size, const Uint32 data[], - unsigned pos, unsigned len); - - /** - * setField - Set bitfield at given position and length (max 32 bits) - * Note : length == 0 not supported. - */ - static void setField(unsigned size, Uint32 data[], - unsigned pos, unsigned len, Uint32 val); - - - /** - * getField - Get bitfield at given position and length - * Note : length == 0 not supported. - */ - static void getField(unsigned size, const Uint32 data[], - unsigned pos, unsigned len, Uint32 dst[]); - - /** - * setField - Set bitfield at given position and length - */ - static void setField(unsigned size, Uint32 data[], - unsigned pos, unsigned len, const Uint32 src[]); - - /** - * getText - Return as hex-digits (only for debug routines). - */ - static char* getText(unsigned size, const Uint32 data[], char* buf); -private: - static void getFieldImpl(const Uint32 data[], unsigned, unsigned, Uint32 []); - static void setFieldImpl(Uint32 data[], unsigned, unsigned, const Uint32 []); -}; - -inline bool -BitmaskImpl::get(unsigned size, const Uint32 data[], unsigned n) -{ - assert(n < (size << 5)); - return (data[n >> 5] & (1 << (n & 31))) != 0; -} - -inline void -BitmaskImpl::set(unsigned size, Uint32 data[], unsigned n, bool value) -{ - value ? set(size, data, n) : clear(size, data, n); -} - -inline void -BitmaskImpl::set(unsigned size, Uint32 data[], unsigned n) -{ - assert(n < (size << 5)); - data[n >> 5] |= (1 << (n & 31)); -} - -inline void -BitmaskImpl::set(unsigned size, Uint32 data[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] = ~0; - } -} - -inline void -BitmaskImpl::set_range(unsigned size, Uint32 data[], - unsigned start, unsigned last) -{ - Uint32 *ptr = data + (start >> 5); - Uint32 *end = data + (last >> 5); - assert(start <= last); - assert(last < (size << 5)); - - Uint32 tmp_word = ~(Uint32)0 << (start & 31); - - if (ptr < end) - { - * ptr ++ |= tmp_word; - - for(; ptr < end; ) - { - * ptr ++ = ~(Uint32)0; - } - - tmp_word = ~(Uint32)0; - } - - tmp_word &= ~(~(Uint32)0 << (last & 31)); - - * ptr |= tmp_word; -} - -inline void -BitmaskImpl::assign(unsigned size, Uint32 dst[], const Uint32 src[]) -{ - for (unsigned i = 0; i < size; i++) { - dst[i] = src[i]; - } -} - -inline void -BitmaskImpl::clear(unsigned size, Uint32 data[], unsigned n) -{ - assert(n < (size << 5)); - data[n >> 5] &= ~(1 << (n & 31)); -} - -inline void -BitmaskImpl::clear(unsigned size, Uint32 data[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] = 0; - } -} - -inline void -BitmaskImpl::clear_range(unsigned size, Uint32 data[], - unsigned start, unsigned last) -{ - Uint32 *ptr = data + (start >> 5); - Uint32 *end = data + (last >> 5); - assert(start <= last); - assert(last < (size << 5)); - - Uint32 tmp_word = ~(Uint32)0 << (start & 31); - - if (ptr < end) - { - * ptr ++ &= ~tmp_word; - - for(; ptr < end; ) - { - * ptr ++ = 0; - } - - tmp_word = ~(Uint32)0; - } - - tmp_word &= ~(~(Uint32)0 << (last & 31)); - - * ptr &= ~tmp_word; -} - -inline -Uint32 -BitmaskImpl::getWord(unsigned size, Uint32 data[], unsigned word_pos) -{ - return data[word_pos]; -} - -inline void -BitmaskImpl::setWord(unsigned size, Uint32 data[], - unsigned word_pos, Uint32 new_word) -{ - data[word_pos] = new_word; - return; -} - -inline bool -BitmaskImpl::isclear(unsigned size, const Uint32 data[]) -{ - for (unsigned i = 0; i < size; i++) { - if (data[i] != 0) - return false; - } - return true; -} - -inline unsigned -BitmaskImpl::count(unsigned size, const Uint32 data[]) -{ - unsigned cnt = 0; - for (unsigned i = 0; i < size; i++) { - Uint32 x = data[i]; - while (x) { - x &= (x - 1); - cnt++; - } - } - return cnt; -} - -inline unsigned -BitmaskImpl::find(unsigned size, const Uint32 data[], unsigned n) -{ - while (n < (size << 5)) { // XXX make this smarter - if (get(size, data, n)) { - return n; - } - n++; - } - return NotFound; -} - -inline bool -BitmaskImpl::equal(unsigned size, const Uint32 data[], const Uint32 data2[]) -{ - for (unsigned i = 0; i < size; i++) { - if (data[i] != data2[i]) - return false; - } - return true; -} - -inline void -BitmaskImpl::bitOR(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] |= data2[i]; - } -} - -inline void -BitmaskImpl::bitAND(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] &= data2[i]; - } -} - -inline void -BitmaskImpl::bitANDC(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] &= ~data2[i]; - } -} - -inline void -BitmaskImpl::bitXOR(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] ^= data2[i]; - } -} - -inline void -BitmaskImpl::bitXORC(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned i = 0; i < size; i++) { - data[i] ^= ~data2[i]; - } -} - -inline bool -BitmaskImpl::contains(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned int i = 0; i < size; i++) - if ((data[i] & data2[i]) != data2[i]) - return false; - return true; -} - -inline bool -BitmaskImpl::overlaps(unsigned size, Uint32 data[], const Uint32 data2[]) -{ - for (unsigned int i = 0; i < size; i++) - if ((data[i] & data2[i]) != 0) - return true; - return false; -} - -inline Uint32 -BitmaskImpl::getField(unsigned size, const Uint32 data[], - unsigned pos, unsigned len) -{ - Uint32 val = 0; - for (unsigned i = 0; i < len; i++) - val |= get(size, data, pos + i) << i; - return val; -} - -inline void -BitmaskImpl::setField(unsigned size, Uint32 data[], - unsigned pos, unsigned len, Uint32 val) -{ - for (unsigned i = 0; i < len; i++) - set(size, data, pos + i, val & (1 << i)); -} - -inline char * -BitmaskImpl::getText(unsigned size, const Uint32 data[], char* buf) -{ - char * org = buf; - const char* const hex = "0123456789abcdef"; - for (int i = (size-1); i >= 0; i--) { - Uint32 x = data[i]; - for (unsigned j = 0; j < 8; j++) { - buf[7-j] = hex[x & 0xf]; - x >>= 4; - } - buf += 8; - } - *buf = 0; - return org; -} - -/** - * Bitmasks. The size is number of 32-bit words (Uint32). - * Unused bits in the last word must be zero. - * - * XXX replace size by length in bits - */ -template -struct BitmaskPOD { -public: - /** - * POD data representation - */ - struct Data { - Uint32 data[size]; -#if 0 - Data & operator=(const BitmaskPOD & src) { - src.copyto(size, data); - return *this; - } -#endif - }; -private: - - Data rep; -public: - STATIC_CONST( Size = size ); - STATIC_CONST( NotFound = BitmaskImpl::NotFound ); - STATIC_CONST( TextLength = size * 8 ); - - /** - * assign - Set all bits in dst to corresponding in src/ - */ - void assign(const typename BitmaskPOD::Data & src); - - /** - * assign - Set all bits in dst to corresponding in src/ - */ - static void assign(Uint32 dst[], const Uint32 src[]); - static void assign(Uint32 dst[], const BitmaskPOD & src); - void assign(const BitmaskPOD & src); - - /** - * copy this to dst - */ - void copyto(unsigned sz, Uint32 dst[]) const; - - /** - * assign this according to src/em> - */ - void assign(unsigned sz, const Uint32 src[]); - - /** - * get - Check if bit n is set. - */ - static bool get(const Uint32 data[], unsigned n); - bool get(unsigned n) const; - - /** - * set - Set bit n to given value (true/false). - */ - static void set(Uint32 data[], unsigned n, bool value); - void set(unsigned n, bool value); - - /** - * set - Set bit n. - */ - static void set(Uint32 data[], unsigned n); - void set(unsigned n); - - /** - * set - set all bits. - */ - static void set(Uint32 data[]); - void set(); - - /** - * clear - Clear bit n. - */ - static void clear(Uint32 data[], unsigned n); - void clear(unsigned n); - - /** - * clear - Clear all bits. - */ - static void clear(Uint32 data[]); - void clear(); - - /** - * Get and set words of bits - */ - Uint32 getWord(unsigned word_pos); - void setWord(unsigned word_pos, Uint32 new_word); - - /** - * isclear - Check if all bits are clear. This is faster - * than checking count() == 0. - */ - static bool isclear(const Uint32 data[]); - bool isclear() const; - - /** - * count - Count number of set bits. - */ - static unsigned count(const Uint32 data[]); - unsigned count() const; - - /** - * find - Find first set bit, starting at given position. - * Returns NotFound when not found. - */ - static unsigned find(const Uint32 data[], unsigned n); - unsigned find(unsigned n) const; - - /** - * equal - Bitwise equal. - */ - static bool equal(const Uint32 data[], const Uint32 data2[]); - bool equal(const BitmaskPOD& mask2) const; - - /** - * bitOR - Bitwise (x | y) into first operand. - */ - static void bitOR(Uint32 data[], const Uint32 data2[]); - BitmaskPOD& bitOR(const BitmaskPOD& mask2); - - /** - * bitAND - Bitwise (x & y) into first operand. - */ - static void bitAND(Uint32 data[], const Uint32 data2[]); - BitmaskPOD& bitAND(const BitmaskPOD& mask2); - - /** - * bitANDC - Bitwise (x & ~y) into first operand. - */ - static void bitANDC(Uint32 data[], const Uint32 data2[]); - BitmaskPOD& bitANDC(const BitmaskPOD& mask2); - - /** - * bitXOR - Bitwise (x ^ y) into first operand. - */ - static void bitXOR(Uint32 data[], const Uint32 data2[]); - BitmaskPOD& bitXOR(const BitmaskPOD& mask2); - - /** - * bitXORC - Bitwise (x ^ ~y) into first operand. - */ - static void bitXORC(Uint32 data[], const Uint32 data2[]); - BitmaskPOD& bitXORC(const BitmaskPOD& mask2); - - /** - * contains - Check if all bits set in data2 (that) are also set in data (this) - */ - static bool contains(Uint32 data[], const Uint32 data2[]); - bool contains(BitmaskPOD that); - - /** - * overlaps - Check if any bit set in this BitmaskPOD (data) is also set in that (data2) - */ - static bool overlaps(Uint32 data[], const Uint32 data2[]); - bool overlaps(BitmaskPOD that); - - /** - * getText - Return as hex-digits (only for debug routines). - */ - static char* getText(const Uint32 data[], char* buf); - char* getText(char* buf) const; -}; - -template -inline void -BitmaskPOD::assign(Uint32 dst[], const Uint32 src[]) -{ - BitmaskImpl::assign(size, dst, src); -} - -template -inline void -BitmaskPOD::assign(Uint32 dst[], const BitmaskPOD & src) -{ - BitmaskImpl::assign(size, dst, src.rep.data); -} - -template -inline void -BitmaskPOD::assign(const typename BitmaskPOD::Data & src) -{ - BitmaskPOD::assign(rep.data, src.data); -} - -template -inline void -BitmaskPOD::assign(const BitmaskPOD & src) -{ - BitmaskPOD::assign(rep.data, src.rep.data); -} - -template -inline void -BitmaskPOD::copyto(unsigned sz, Uint32 dst[]) const -{ - BitmaskImpl::assign(sz, dst, rep.data); -} - -template -inline void -BitmaskPOD::assign(unsigned sz, const Uint32 src[]) -{ - BitmaskImpl::assign(sz, rep.data, src); -} - -template -inline bool -BitmaskPOD::get(const Uint32 data[], unsigned n) -{ - return BitmaskImpl::get(size, data, n); -} - -template -inline bool -BitmaskPOD::get(unsigned n) const -{ - return BitmaskPOD::get(rep.data, n); -} - -template -inline void -BitmaskPOD::set(Uint32 data[], unsigned n, bool value) -{ - BitmaskImpl::set(size, data, n, value); -} - -template -inline void -BitmaskPOD::set(unsigned n, bool value) -{ - BitmaskPOD::set(rep.data, n, value); -} - -template -inline void -BitmaskPOD::set(Uint32 data[], unsigned n) -{ - BitmaskImpl::set(size, data, n); -} - -template -inline void -BitmaskPOD::set(unsigned n) -{ - BitmaskPOD::set(rep.data, n); -} - -template -inline void -BitmaskPOD::set(Uint32 data[]) -{ - BitmaskImpl::set(size, data); -} - -template -inline void -BitmaskPOD::set() -{ - BitmaskPOD::set(rep.data); -} - -template -inline void -BitmaskPOD::clear(Uint32 data[], unsigned n) -{ - BitmaskImpl::clear(size, data, n); -} - -template -inline void -BitmaskPOD::clear(unsigned n) -{ - BitmaskPOD::clear(rep.data, n); -} - -template -inline void -BitmaskPOD::clear(Uint32 data[]) -{ - BitmaskImpl::clear(size, data); -} - -template -inline void -BitmaskPOD::clear() -{ - BitmaskPOD::clear(rep.data); -} - -template -inline Uint32 -BitmaskPOD::getWord(unsigned word_pos) -{ - return BitmaskImpl::getWord(size, rep.data, word_pos); -} - -template -inline void -BitmaskPOD::setWord(unsigned word_pos, Uint32 new_word) -{ - BitmaskImpl::setWord(size, rep.data, word_pos, new_word); -} - -template -inline bool -BitmaskPOD::isclear(const Uint32 data[]) -{ - return BitmaskImpl::isclear(size, data); -} - -template -inline bool -BitmaskPOD::isclear() const -{ - return BitmaskPOD::isclear(rep.data); -} - -template -unsigned -BitmaskPOD::count(const Uint32 data[]) -{ - return BitmaskImpl::count(size, data); -} - -template -inline unsigned -BitmaskPOD::count() const -{ - return BitmaskPOD::count(rep.data); -} - -template -unsigned -BitmaskPOD::find(const Uint32 data[], unsigned n) -{ - return BitmaskImpl::find(size, data, n); -} - -template -inline unsigned -BitmaskPOD::find(unsigned n) const -{ - return BitmaskPOD::find(rep.data, n); -} - -template -inline bool -BitmaskPOD::equal(const Uint32 data[], const Uint32 data2[]) -{ - return BitmaskImpl::equal(size, data, data2); -} - -template -inline bool -BitmaskPOD::equal(const BitmaskPOD& mask2) const -{ - return BitmaskPOD::equal(rep.data, mask2.rep.data); -} - -template -inline void -BitmaskPOD::bitOR(Uint32 data[], const Uint32 data2[]) -{ - BitmaskImpl::bitOR(size,data, data2); -} - -template -inline BitmaskPOD& -BitmaskPOD::bitOR(const BitmaskPOD& mask2) -{ - BitmaskPOD::bitOR(rep.data, mask2.rep.data); - return *this; -} - -template -inline void -BitmaskPOD::bitAND(Uint32 data[], const Uint32 data2[]) -{ - BitmaskImpl::bitAND(size,data, data2); -} - -template -inline BitmaskPOD& -BitmaskPOD::bitAND(const BitmaskPOD& mask2) -{ - BitmaskPOD::bitAND(rep.data, mask2.rep.data); - return *this; -} - -template -inline void -BitmaskPOD::bitANDC(Uint32 data[], const Uint32 data2[]) -{ - BitmaskImpl::bitANDC(size,data, data2); -} - -template -inline BitmaskPOD& -BitmaskPOD::bitANDC(const BitmaskPOD& mask2) -{ - BitmaskPOD::bitANDC(rep.data, mask2.rep.data); - return *this; -} - -template -inline void -BitmaskPOD::bitXOR(Uint32 data[], const Uint32 data2[]) -{ - BitmaskImpl::bitXOR(size,data, data2); -} - -template -inline BitmaskPOD& -BitmaskPOD::bitXOR(const BitmaskPOD& mask2) -{ - BitmaskPOD::bitXOR(rep.data, mask2.rep.data); - return *this; -} - -template -inline void -BitmaskPOD::bitXORC(Uint32 data[], const Uint32 data2[]) -{ - BitmaskImpl::bitXORC(size,data, data2); -} - -template -inline BitmaskPOD& -BitmaskPOD::bitXORC(const BitmaskPOD& mask2) -{ - BitmaskPOD::bitXORC(rep.data, mask2.rep.data); - return *this; -} - -template -char * -BitmaskPOD::getText(const Uint32 data[], char* buf) -{ - return BitmaskImpl::getText(size, data, buf); -} - -template -inline char * -BitmaskPOD::getText(char* buf) const -{ - return BitmaskPOD::getText(rep.data, buf); -} - -template -inline bool -BitmaskPOD::contains(Uint32 data[], const Uint32 data2[]) -{ - return BitmaskImpl::contains(size, data, data2); -} - -template -inline bool -BitmaskPOD::contains(BitmaskPOD that) -{ - return BitmaskPOD::contains(this->rep.data, that.rep.data); -} - -template -inline bool -BitmaskPOD::overlaps(Uint32 data[], const Uint32 data2[]) -{ - return BitmaskImpl::overlaps(size, data, data2); -} - -template -inline bool -BitmaskPOD::overlaps(BitmaskPOD that) -{ - return BitmaskPOD::overlaps(this->rep.data, that.rep.data); -} - -template -class Bitmask : public BitmaskPOD { -public: - Bitmask() { this->clear();} -}; - -inline void -BitmaskImpl::getField(unsigned size, const Uint32 src[], - unsigned pos, unsigned len, Uint32 dst[]) -{ - assert(pos + len <= (size << 5)); - assert (len != 0); - if (len == 0) - return; - - src += (pos >> 5); - Uint32 offset = pos & 31; - * dst = (* src >> offset) & (len >= 32 ? ~0 : (1 << len) - 1); - - if(offset + len <= 32) - { - return; - } - Uint32 used = (32 - offset); - assert(len > used); - getFieldImpl(src+1, used & 31, len-used, dst+(used >> 5)); -} - -inline void -BitmaskImpl::setField(unsigned size, Uint32 dst[], - unsigned pos, unsigned len, const Uint32 src[]) -{ - assert(pos + len <= (size << 5)); - assert(len != 0); - if (len == 0) - return; - - dst += (pos >> 5); - Uint32 offset = pos & 31; - Uint32 mask = (len >= 32 ? ~0 : (1 << len) - 1) << offset; - - * dst = (* dst & ~mask) | ((*src << offset) & mask); - - if(offset + len <= 32) - { - return; - } - Uint32 used = (32 - offset); - assert(len > used); - setFieldImpl(dst+1, used & 31, len-used, src+(used >> 5)); -} - - -#endif diff --git a/storage/ndb/include/util/ConfigValues.hpp b/storage/ndb/include/util/ConfigValues.hpp deleted file mode 100644 index 3ea81c07e91..00000000000 --- a/storage/ndb/include/util/ConfigValues.hpp +++ /dev/null @@ -1,271 +0,0 @@ -/* Copyright (C) 2004-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#ifndef __CONFIG_VALUES_HPP -#define __CONFIG_VALUES_HPP - -#include -#include - -class ConfigValues { - friend class ConfigValuesFactory; - ConfigValues(Uint32 sz, Uint32 data); - -public: - ~ConfigValues(); - - enum ValueType { - InvalidType = 0, - IntType = 1, - StringType = 2, - SectionType = 3, - Int64Type = 4 - }; - - struct Entry { - Uint32 m_key; - ValueType m_type; - union { - Uint32 m_int; - Uint64 m_int64; - const char * m_string; - }; - }; - - class ConstIterator { - friend class ConfigValuesFactory; - const ConfigValues & m_cfg; - public: - Uint32 m_currentSection; - ConstIterator(const ConfigValues&c) : m_cfg(c) { m_currentSection = 0;} - - bool openSection(Uint32 key, Uint32 no); - bool closeSection(); - - bool get(Uint32 key, Entry *) const; - - bool get(Uint32 key, Uint32 * value) const; - bool get(Uint32 key, Uint64 * value) const; - bool get(Uint32 key, const char ** value) const; - bool getTypeOf(Uint32 key, ValueType * type) const; - - Uint32 get(Uint32 key, Uint32 notFound) const; - Uint64 get64(Uint32 key, Uint64 notFound) const; - const char * get(Uint32 key, const char * notFound) const; - ValueType getTypeOf(Uint32 key) const; - }; - - class Iterator : public ConstIterator { - ConfigValues & m_cfg; - public: - Iterator(ConfigValues&c) : ConstIterator(c), m_cfg(c) {} - Iterator(ConfigValues&c, const ConstIterator& i):ConstIterator(c),m_cfg(c){ - m_currentSection = i.m_currentSection; - } - - bool set(Uint32 key, Uint32 value); - bool set(Uint32 key, Uint64 value); - bool set(Uint32 key, const char * value); - }; - - Uint32 getPackedSize() const; // get size in bytes needed to pack - Uint32 pack(UtilBuffer&) const; - Uint32 pack(void * dst, Uint32 len) const;// pack into dst(of len %d); - -private: - friend class Iterator; - friend class ConstIterator; - - bool getByPos(Uint32 pos, Entry *) const; - Uint64 * get64(Uint32 index) const; - char ** getString(Uint32 index) const; - - Uint32 m_size; - Uint32 m_dataSize; - Uint32 m_stringCount; - Uint32 m_int64Count; - - Uint32 m_values[1]; - void * m_data[1]; -}; - -class ConfigValuesFactory { - Uint32 m_currentSection; -public: - Uint32 m_sectionCounter; - Uint32 m_freeKeys; - Uint32 m_freeData; - -public: - ConfigValuesFactory(Uint32 keys = 50, Uint32 data = 10); // Initial - ConfigValuesFactory(ConfigValues * m_cfg); // - ~ConfigValuesFactory(); - - ConfigValues * m_cfg; - ConfigValues * getConfigValues(); - - bool openSection(Uint32 key, Uint32 no); - bool put(const ConfigValues::Entry & ); - bool put(Uint32 key, Uint32 value); - bool put64(Uint32 key, Uint64 value); - bool put(Uint32 key, const char * value); - bool closeSection(); - - void expand(Uint32 freeKeys, Uint32 freeData); - void shrink(); - - bool unpack(const UtilBuffer&); - bool unpack(const void * src, Uint32 len); - - static ConfigValues * extractCurrentSection(const ConfigValues::ConstIterator &); - -private: - static ConfigValues * create(Uint32 keys, Uint32 data); - void put(const ConfigValues & src); -}; - -inline -bool -ConfigValues::ConstIterator::get(Uint32 key, Uint32 * value) const { - Entry tmp; - if(get(key, &tmp) && tmp.m_type == IntType){ - * value = tmp.m_int; - return true; - } - return false; -} - -inline -bool -ConfigValues::ConstIterator::get(Uint32 key, Uint64 * value) const { - Entry tmp; - if(get(key, &tmp) && tmp.m_type == Int64Type){ - * value = tmp.m_int64; - return true; - } - return false; -} - -inline -bool -ConfigValues::ConstIterator::get(Uint32 key, const char ** value) const { - Entry tmp; - if(get(key, &tmp) && tmp.m_type == StringType){ - * value = tmp.m_string; - return true; - } - return false; -} - -inline -bool -ConfigValues::ConstIterator::getTypeOf(Uint32 key, ValueType * type) const{ - Entry tmp; - if(get(key, &tmp)){ - * type = tmp.m_type; - return true; - } - return false; -} - -inline -Uint32 -ConfigValues::ConstIterator::get(Uint32 key, Uint32 notFound) const { - Entry tmp; - if(get(key, &tmp) && tmp.m_type == IntType){ - return tmp.m_int; - } - return notFound; -} - -inline -Uint64 -ConfigValues::ConstIterator::get64(Uint32 key, Uint64 notFound) const { - Entry tmp; - if(get(key, &tmp) && tmp.m_type == Int64Type){ - return tmp.m_int64; - } - return notFound; -} - -inline -const char * -ConfigValues::ConstIterator::get(Uint32 key, const char * notFound) const { - Entry tmp; - if(get(key, &tmp) && tmp.m_type == StringType){ - return tmp.m_string; - } - return notFound; -} - -inline -ConfigValues::ValueType -ConfigValues::ConstIterator::getTypeOf(Uint32 key) const{ - Entry tmp; - if(get(key, &tmp)){ - return tmp.m_type; - } - return ConfigValues::InvalidType; -} - -inline -bool -ConfigValuesFactory::put(Uint32 key, Uint32 val){ - ConfigValues::Entry tmp; - tmp.m_key = key; - tmp.m_type = ConfigValues::IntType; - tmp.m_int = val; - return put(tmp); -} - -inline -bool -ConfigValuesFactory::put64(Uint32 key, Uint64 val){ - ConfigValues::Entry tmp; - tmp.m_key = key; - tmp.m_type = ConfigValues::Int64Type; - tmp.m_int64 = val; - return put(tmp); -} - -inline -bool -ConfigValuesFactory::put(Uint32 key, const char * val){ - ConfigValues::Entry tmp; - tmp.m_key = key; - tmp.m_type = ConfigValues::StringType; - tmp.m_string = val; - return put(tmp); -} - -inline -Uint32 -ConfigValues::pack(UtilBuffer& buf) const { - Uint32 len = getPackedSize(); - void * tmp = buf.append(len); - if(tmp == 0){ - return 0; - } - return pack(tmp, len); -} - -inline -bool -ConfigValuesFactory::unpack(const UtilBuffer& buf){ - return unpack(buf.get_data(), buf.length()); -} - -#endif diff --git a/storage/ndb/include/util/File.hpp b/storage/ndb/include/util/File.hpp deleted file mode 100644 index 4505365b726..00000000000 --- a/storage/ndb/include/util/File.hpp +++ /dev/null @@ -1,211 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FILE_H -#define FILE_H - -#include - -/** - * This class provides a file abstraction . It has operations - * to create, read, write and delete a file. - * - * @version #@ $Id: File.hpp,v 1.5 2002/04/26 13:15:38 ejonore Exp $ - */ -class File_class -{ -public: - /** - * Returns time for last contents modification of a file. - * - * @param aFileName a filename to check. - * @return the time for last contents modification of the file. - */ - static time_t mtime(const char* aFileName); - - /** - * Returns true if the file exist. - * - * @param aFileName a filename to check. - * @return true if the file exists. - */ - static bool exists(const char* aFileName); - - /** - * Returns the size of a file. - * - * @param f a pointer to a FILE descriptor. - * @return the size of the file. - */ - static off_t size(FILE* f); - - /** - * Renames a file. - * - * @param currFileName the current name of the file. - * @param newFileName the new name of the file. - * @return true if successful. - */ - static bool rename(const char* currFileName, const char* newFileName); - - /** - * Removes/deletes a file. - * - * @param aFileName the file to remove. - * @return true if successful. - */ - static bool remove(const char* aFileName); - - /** - * Default constructor. - */ - File_class(); - - /** - * Creates a new File with the specified filename and file mode. - * The real file itself will not be created unless open() is called! - * - * To see the available file modes use 'man 3 fopen'. - * - * @param aFileName a filename. - * @param mode the mode which the file should be opened/created with, default "r". - */ - File_class(const char* aFileName, const char* mode = "r"); - - /** - * Destructor. - */ - ~File_class(); - - /** - * Opens/creates the file. If open() fails then 'errno' and perror() - * should be used to determine the exact failure cause. - * - * @return true if successful. Check errno if unsuccessful. - */ - bool open(); - - /** - * Opens/creates the file with the specified name and mode. - * If open() fails then 'errno' and perror() should be used to - * determine the exact failure cause. - * - * @param aFileName the file to open. - * @param mode the file mode to use. - * @return true if successful. Check errno if unsuccessful. - */ - bool open(const char* aFileName, const char* mode); - - /** - * Removes the file. - * - * @return true if successful. - */ - bool remove(); - - /** - * Closes the file, i.e., the FILE descriptor is closed. - */ - bool close(); - - /** - * Read from the file. See fread() for more info. - * - * @param buf the buffer to read into. - * @param itemSize the size of each item. - * @param nitems read max n number of items. - * @return 0 if successful. - */ - int read(void* buf, size_t itemSize, size_t nitems) const; - - /** - * Read char from the file. See fread() for more info. - * - * @param buf the buffer to read into. - * @param start the start index of the buf. - * @param length the length of the buffer. - * @return 0 if successful. - */ - int readChar(char* buf, long start, long length) const; - - /** - * Read chars from the file. See fread() for more info. - * - * @param buf the buffer to read into. - * @return 0 if successful. - */ - int readChar(char* buf); - - /** - * Write to file. See fwrite() for more info. - * - * @param buf the buffer to read from. - * @param itemSize the size of each item. - * @param nitems write max n number of items. - * @return 0 if successful. - */ - int write(const void* buf, size_t itemSize, size_t nitems); - - /** - * Write chars to file. See fwrite() for more info. - * - * @param buf the buffer to read from. - * @param start the start index of the buf. - * @param length the length of the buffer. - * @return 0 if successful. - */ - int writeChar(const char* buf, long start, long length); - - /** - * Write chars to file. See fwrite() for more info. - * - * @param buf the buffer to read from. - * @return 0 if successful. - */ - int writeChar(const char* buf); - - /** - * Returns the file size. - * - * @return the file size. - */ - off_t size() const; - - /** - * Returns the filename. - * - * @return the filename. - */ - const char* getName() const; - - /** - * Flush the buffer. - * - * @return 0 if successful. - */ - int flush() const; - -private: - FILE* m_file; - char m_fileName[PATH_MAX]; - const char* m_fileMode; - /* Prohibit */ - File_class (const File_class& aCopy); - File_class operator = (const File_class&); - bool operator == (const File_class&); -}; -#endif - diff --git a/storage/ndb/include/util/InputStream.hpp b/storage/ndb/include/util/InputStream.hpp deleted file mode 100644 index 49c91954a3a..00000000000 --- a/storage/ndb/include/util/InputStream.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef INPUT_STREAM_HPP -#define INPUT_STREAM_HPP - -#include -#include -#include - -/** - * Input stream - */ -class InputStream { -public: - InputStream() { m_mutex= NULL; }; - virtual ~InputStream() {}; - virtual char* gets(char * buf, int bufLen) = 0; - /** - * Set the mutex to be UNLOCKED when blocking (e.g. select(2)) - */ - void set_mutex(NdbMutex *m) { m_mutex= m; }; - virtual void reset_timeout() {}; -protected: - NdbMutex *m_mutex; -}; - -class FileInputStream : public InputStream { - FILE * f; -public: - FileInputStream(FILE * file = stdin); - virtual ~FileInputStream() {} - char* gets(char * buf, int bufLen); -}; - -extern FileInputStream Stdin; - -class SocketInputStream : public InputStream { - NDB_SOCKET_TYPE m_socket; - unsigned m_timeout_ms; - unsigned m_timeout_remain; - bool m_startover; - bool m_timedout; -public: - SocketInputStream(NDB_SOCKET_TYPE socket, unsigned read_timeout_ms = 60000); - virtual ~SocketInputStream() {} - char* gets(char * buf, int bufLen); - bool timedout() { return m_timedout; }; - void reset_timeout() { m_timedout= false; m_timeout_remain= m_timeout_ms;}; - -}; - -#endif diff --git a/storage/ndb/include/util/NdbAutoPtr.hpp b/storage/ndb/include/util/NdbAutoPtr.hpp deleted file mode 100644 index 78e7dbe25ad..00000000000 --- a/storage/ndb/include/util/NdbAutoPtr.hpp +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __NDB_AUTO_PTR_HPP -#define __NDB_AUTO_PTR_HPP - -#include -#include - -template -class NdbAutoPtr { - T * m_obj; -public: - NdbAutoPtr(T * obj = 0){ m_obj = obj;} - void reset(T * obj = 0) { if (m_obj) free(m_obj); m_obj = obj; } - ~NdbAutoPtr() { if (m_obj) free(m_obj);} -}; - -template -class NdbAutoObjPtr { - T * m_obj; -public: - NdbAutoObjPtr(T * obj = 0){ m_obj = obj;} - void reset(T * obj = 0) { if (m_obj) delete m_obj; m_obj = obj; } - ~NdbAutoObjPtr() { if (m_obj) delete m_obj;} -}; - -template -class NdbAutoObjArrayPtr { - T * m_obj; -public: - NdbAutoObjArrayPtr(T * obj = 0){ m_obj = obj;} - void reset(T * obj = 0) { if (m_obj) delete[] m_obj; m_obj = obj; } - ~NdbAutoObjArrayPtr() { if (m_obj) delete[] m_obj;} -}; - -template -class My_auto_ptr { - T * m_obj; -public: - My_auto_ptr(T * obj = 0){ m_obj = obj;} - void reset(T * obj = 0) { if (m_obj) my_free(m_obj); m_obj = obj; } - ~My_auto_ptr() { if (m_obj) my_free(m_obj);} -}; - -#endif diff --git a/storage/ndb/include/util/NdbOut.hpp b/storage/ndb/include/util/NdbOut.hpp deleted file mode 100644 index 4d9f5985d2f..00000000000 --- a/storage/ndb/include/util/NdbOut.hpp +++ /dev/null @@ -1,132 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBOUT_H -#define NDBOUT_H - -#ifdef __cplusplus - -#include -#include - -/** - * Class used for outputting logging messages to screen. - * Since the output capabilities are different on different platforms - * this middle layer class should be used for all output messages - */ - -/* - Example usage: - - #include "NdbOut.hpp" - - / * Use ndbout as you would use cout: - - ndbout << "Hello World! "<< 1 << " Hello again" - << 67 << anIntegerVar << "Hup << endl; - - - / * Use ndbout_c as you would use printf: - - ndbout_c("Hello World %d\n", 1); -*/ - -class NdbOut; -class OutputStream; -class NullOutputStream; - -/* Declare a static variable of NdbOut as ndbout */ -extern NdbOut ndbout; - -class NdbOut -{ -public: - NdbOut& operator<<(NdbOut& (* _f)(NdbOut&)); - NdbOut& operator<<(Int8); - NdbOut& operator<<(Uint8); - NdbOut& operator<<(Int16); - NdbOut& operator<<(Uint16); - NdbOut& operator<<(Int32); - NdbOut& operator<<(Uint32); - NdbOut& operator<<(Int64); - NdbOut& operator<<(Uint64); - NdbOut& operator<<(long unsigned int); - NdbOut& operator<<(const char*); - NdbOut& operator<<(const unsigned char*); - NdbOut& operator<<(BaseString &); - NdbOut& operator<<(const void*); - NdbOut& operator<<(float); - NdbOut& operator<<(double); - NdbOut& endline(void); - NdbOut& flushline(void); - NdbOut& setHexFormat(int _format); - - NdbOut(OutputStream &); - virtual ~NdbOut(); - - void print(const char * fmt, ...); - void println(const char * fmt, ...); - - OutputStream * m_out; -private: - int isHex; -}; - -inline NdbOut& NdbOut::operator<<(NdbOut& (* _f)(NdbOut&)) { - (* _f)(*this); - return * this; -} - -inline NdbOut& endl(NdbOut& _NdbOut) { - return _NdbOut.endline(); -} - -inline NdbOut& flush(NdbOut& _NdbOut) { - return _NdbOut.flushline(); -} - -inline NdbOut& hex(NdbOut& _NdbOut) { - return _NdbOut.setHexFormat(1); -} - -inline NdbOut& dec(NdbOut& _NdbOut) { - return _NdbOut.setHexFormat(0); -} -extern "C" -void ndbout_c(const char * fmt, ...) ATTRIBUTE_FORMAT(printf, 1, 2); - -class FilteredNdbOut : public NdbOut { -public: - FilteredNdbOut(OutputStream &, int threshold = 0, int level = 0); - virtual ~FilteredNdbOut(); - - void setLevel(int i); - void setThreshold(int i); - - int getLevel() const; - int getThreshold() const; - -private: - int m_threshold, m_level; - OutputStream * m_org; - NullOutputStream * m_null; -}; - -#else -void ndbout_c(const char * fmt, ...); -#endif - -#endif diff --git a/storage/ndb/include/util/NdbSqlUtil.hpp b/storage/ndb/include/util/NdbSqlUtil.hpp deleted file mode 100644 index 0218be6d20b..00000000000 --- a/storage/ndb/include/util/NdbSqlUtil.hpp +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_SQL_UTIL_HPP -#define NDB_SQL_UTIL_HPP - -#include -#include - -struct charset_info_st; -typedef const struct charset_info_st CHARSET_INFO; - -class NdbSqlUtil { -public: - /** - * Compare attribute values. Returns -1, 0, +1 for less, equal, - * greater, respectively. Parameters are pointers to values and their - * lengths in bytes. The lengths can differ. - * - * First value is a full value but second value can be partial. If - * the partial value is not enough to determine the result, CmpUnknown - * will be returned. A shorter second value is not necessarily - * partial. Partial values are allowed only for types where prefix - * comparison is possible (basically, binary strings). - * - * First parameter is a pointer to type specific extra info. Char - * types receive CHARSET_INFO in it. - * - * If a value cannot be parsed, it compares like NULL i.e. less than - * any valid value. - */ - typedef int Cmp(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full); - - /** - * Prototype for "like" comparison. Defined for string types. First - * argument can be fixed or var* type, second argument is fixed. - * Returns 0 on match, +1 on no match, and -1 on bad data. - * - * Uses default special chars ( \ % _ ). - */ - typedef int Like(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2); - - enum CmpResult { - CmpLess = -1, - CmpEqual = 0, - CmpGreater = 1, - CmpUnknown = 2 // insufficient partial data - }; - - struct Type { - enum Enum { - Undefined = NDB_TYPE_UNDEFINED, - Tinyint = NDB_TYPE_TINYINT, - Tinyunsigned = NDB_TYPE_TINYUNSIGNED, - Smallint = NDB_TYPE_SMALLINT, - Smallunsigned = NDB_TYPE_SMALLUNSIGNED, - Mediumint = NDB_TYPE_MEDIUMINT, - Mediumunsigned = NDB_TYPE_MEDIUMUNSIGNED, - Int = NDB_TYPE_INT, - Unsigned = NDB_TYPE_UNSIGNED, - Bigint = NDB_TYPE_BIGINT, - Bigunsigned = NDB_TYPE_BIGUNSIGNED, - Float = NDB_TYPE_FLOAT, - Double = NDB_TYPE_DOUBLE, - Olddecimal = NDB_TYPE_OLDDECIMAL, - Char = NDB_TYPE_CHAR, - Varchar = NDB_TYPE_VARCHAR, - Binary = NDB_TYPE_BINARY, - Varbinary = NDB_TYPE_VARBINARY, - Datetime = NDB_TYPE_DATETIME, - Date = NDB_TYPE_DATE, - Blob = NDB_TYPE_BLOB, - Text = NDB_TYPE_TEXT, - Bit = NDB_TYPE_BIT, - Longvarchar = NDB_TYPE_LONGVARCHAR, - Longvarbinary = NDB_TYPE_LONGVARBINARY, - Time = NDB_TYPE_TIME, - Year = NDB_TYPE_YEAR, - Timestamp = NDB_TYPE_TIMESTAMP, - Olddecimalunsigned = NDB_TYPE_OLDDECIMALUNSIGNED, - Decimal = NDB_TYPE_DECIMAL, - Decimalunsigned = NDB_TYPE_DECIMALUNSIGNED - }; - Enum m_typeId; // redundant - Cmp* m_cmp; // comparison method - Like* m_like; // "like" comparison method - }; - - /** - * Get type by id. Can return the Undefined type. - */ - static const Type& getType(Uint32 typeId); - - /** - * Get the normalized type used in hashing and key comparisons. - * Maps all string types to Binary. This includes Var* strings - * because strxfrm result is padded to fixed (maximum) length. - */ - static const Type& getTypeBinary(Uint32 typeId); - - /** - * Check character set. - */ - static uint check_column_for_pk(Uint32 typeId, const void* info); - static uint check_column_for_hash_index(Uint32 typeId, const void* info); - static uint check_column_for_ordered_index(Uint32 typeId, const void* info); - - /** - * Get number of length bytes and length from variable length string. - * Returns false on error (invalid data). For other types returns - * zero length bytes and the fixed attribute length. - */ - static bool get_var_length(Uint32 typeId, const void* p, unsigned attrlen, Uint32& lb, Uint32& len); - - /** - * Temporary workaround for bug#7284. - */ - static int strnxfrm_bug7284(CHARSET_INFO* cs, unsigned char* dst, unsigned dstLen, const unsigned char*src, unsigned srcLen); - - /** - * Compare decimal numbers. - */ - static int cmp_olddecimal(const uchar* s1, const uchar* s2, unsigned n); - -private: - /** - * List of all types. Must match Type::Enum. - */ - static const Type m_typeList[]; - /** - * Comparison methods. - */ - static Cmp cmpTinyint; - static Cmp cmpTinyunsigned; - static Cmp cmpSmallint; - static Cmp cmpSmallunsigned; - static Cmp cmpMediumint; - static Cmp cmpMediumunsigned; - static Cmp cmpInt; - static Cmp cmpUnsigned; - static Cmp cmpBigint; - static Cmp cmpBigunsigned; - static Cmp cmpFloat; - static Cmp cmpDouble; - static Cmp cmpOlddecimal; - static Cmp cmpChar; - static Cmp cmpVarchar; - static Cmp cmpBinary; - static Cmp cmpVarbinary; - static Cmp cmpDatetime; - static Cmp cmpDate; - static Cmp cmpBlob; - static Cmp cmpText; - static Cmp cmpBit; - static Cmp cmpLongvarchar; - static Cmp cmpLongvarbinary; - static Cmp cmpTime; - static Cmp cmpYear; - static Cmp cmpTimestamp; - static Cmp cmpOlddecimalunsigned; - static Cmp cmpDecimal; - static Cmp cmpDecimalunsigned; - // - static Like likeChar; - static Like likeBinary; - static Like likeVarchar; - static Like likeVarbinary; - static Like likeLongvarchar; - static Like likeLongvarbinary; -}; - -#endif diff --git a/storage/ndb/include/util/OutputStream.hpp b/storage/ndb/include/util/OutputStream.hpp deleted file mode 100644 index e4d1ad5c634..00000000000 --- a/storage/ndb/include/util/OutputStream.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef OUTPUT_STREAM_HPP -#define OUTPUT_STREAM_HPP - -#include -#include - -/** - * Output stream - */ -class OutputStream { -public: - OutputStream() {} - virtual ~OutputStream() {} - virtual int print(const char * fmt, ...) = 0; - virtual int println(const char * fmt, ...) = 0; - virtual void flush() {}; - virtual void reset_timeout() {}; -}; - -class FileOutputStream : public OutputStream { - FILE * f; -public: - FileOutputStream(FILE * file = stdout); - virtual ~FileOutputStream() {} - FILE *getFile() { return f; } - - int print(const char * fmt, ...); - int println(const char * fmt, ...); - void flush() { fflush(f); } -}; - -class SocketOutputStream : public OutputStream { - NDB_SOCKET_TYPE m_socket; - unsigned m_timeout_ms; - bool m_timedout; - unsigned m_timeout_remain; -public: - SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000); - virtual ~SocketOutputStream() {} - bool timedout() { return m_timedout; }; - void reset_timeout() { m_timedout= false; m_timeout_remain= m_timeout_ms;}; - - int print(const char * fmt, ...); - int println(const char * fmt, ...); -}; - -class NullOutputStream : public OutputStream { -public: - NullOutputStream() {} - virtual ~NullOutputStream() {} - int print(const char * /* unused */, ...) { return 1;} - int println(const char * /* unused */, ...) { return 1;} -}; - -#endif diff --git a/storage/ndb/include/util/Parser.hpp b/storage/ndb/include/util/Parser.hpp deleted file mode 100644 index 0b13c86dbfe..00000000000 --- a/storage/ndb/include/util/Parser.hpp +++ /dev/null @@ -1,294 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CPCD_PARSER_HPP -#define CPCD_PARSER_HPP - -#include "Vector.hpp" -#include "Properties.hpp" -#include "InputStream.hpp" -#include "NdbOut.hpp" - -class ParserImpl; -template struct ParserRow; - -//#define PARSER_DEBUG -#ifdef PARSER_DEBUG -#define DEBUG(x) \ - ndbout_c("%s:%d:%s", __FILE__, __LINE__, x); -#else -#define DEBUG(x) -#endif - -/** - * A generic parser - */ -template -class Parser { -public: - /** - * Status for parser - */ - enum ParserStatus { - Ok = 0, - Eof = 1, - NoLine = 2, - EmptyLine = 3, - UnknownCommand = 4, - UnknownArgument = 5, - TypeMismatch = 6, - InvalidArgumentFormat = 7, - UnknownArgumentType = 8, - CommandWithoutFunction = 9, - ArgumentGivenTwice = 10, - ExternalStop = 11, - MissingMandatoryArgument = 12 - }; - - /** - * Context for parse - */ - class Context { - public: - Context() { m_mutex= NULL; }; - ParserStatus m_status; - const ParserRow * m_currentCmd; - const ParserRow * m_currentArg; - char * m_currentToken; - char m_tokenBuffer[512]; - NdbMutex *m_mutex; - - Vector *> m_aliasUsed; - }; - - /** - * Initialize parser - */ - Parser(const ParserRow rows[], class InputStream & in = Stdin, - bool breakOnCommand = false, - bool breakOnEmptyLine = true, - bool breakOnInvalidArg = false); - ~Parser(); - - /** - * Run parser - */ - bool run(Context &, T &, volatile bool * stop = 0) const; - - /** - * Parse only one entry and return Properties object representing - * the message - */ - const Properties *parse(Context &, T &); - - bool getBreakOnCommand() const; - void setBreakOnCommand(bool v); - - bool getBreakOnEmptyLine() const; - void setBreakOnEmptyLine(bool v); - - bool getBreakOnInvalidArg() const; - void setBreakOnInvalidArg(bool v); - -private: - ParserImpl * impl; -}; - -template -struct ParserRow { -public: - enum Type { Cmd, Arg, CmdAlias, ArgAlias }; - enum ArgType { String, Int, Properties }; - enum ArgRequired { Mandatory, Optional }; - enum ArgMinMax { CheckMinMax, IgnoreMinMax }; - - const char * name; - const char * realName; - Type type; - ArgType argType; - ArgRequired argRequired; - ArgMinMax argMinMax; - int minVal; - int maxVal; - void (T::* function)(typename Parser::Context & ctx, - const class Properties& args); - const char * description; - void *user_value; -}; - -/** - * The void* equivalent implementation - */ -class ParserImpl { -public: - class Dummy {}; - typedef ParserRow DummyRow; - typedef Parser::Context Context; - - - ParserImpl(const DummyRow rows[], class InputStream & in, - bool b_cmd, bool b_empty, bool b_iarg); - ~ParserImpl(); - - bool run(Context *ctx, const class Properties **, volatile bool *) const ; - - static const DummyRow* matchCommand(Context*, const char*, const DummyRow*); - static const DummyRow* matchArg(Context*, const char *, const DummyRow *); - static bool parseArg(Context*, char*, const DummyRow*, Properties*); - static bool checkMandatory(Context*, const Properties*); -private: - const DummyRow * const m_rows; - class ParseInputStream & input; - bool m_breakOnEmpty; - bool m_breakOnCmd; - bool m_breakOnInvalidArg; -}; - -template -inline -Parser::Parser(const ParserRow rows[], class InputStream & in, - bool b_cmd, bool b_empty, bool b_iarg){ - impl = new ParserImpl((ParserImpl::DummyRow *)rows, in, - b_cmd, b_empty, b_iarg); -} - -template -inline -Parser::~Parser(){ - delete impl; -} - -template -inline -bool -Parser::run(Context & ctx, T & t, volatile bool * stop) const { - const Properties * p; - DEBUG("Executing Parser::run"); - if(impl->run((ParserImpl::Context*)&ctx, &p, stop)){ - const ParserRow * cmd = ctx.m_currentCmd; // Cast to correct type - if(cmd == 0){ - /** - * Should happen if run returns true - */ - abort(); - } - - for(unsigned i = 0; i * alias = ctx.m_aliasUsed[i]; - if(alias->function != 0){ - /** - * Report alias usage with callback (if specified by user) - */ - DEBUG("Alias usage with callback"); - (t.* alias->function)(ctx, * p); - } - } - - if(cmd->function == 0){ - ctx.m_status = CommandWithoutFunction; - DEBUG("CommandWithoutFunction"); - delete p; - return false; - } - (t.* cmd->function)(ctx, * p); // Call the function - delete p; - return true; - } - DEBUG(""); - return false; -} - -template -inline -const Properties * -Parser::parse(Context &ctx, T &t) { - const Properties * p; - volatile bool stop = false; - DEBUG("Executing Parser::parse"); - - if(impl->run((ParserImpl::Context*)&ctx, &p, &stop)){ - const ParserRow * cmd = ctx.m_currentCmd; // Cast to correct type - if(cmd == 0){ - /** - * Should happen if run returns true - */ - abort(); - } - - for(unsigned i = 0; i * alias = ctx.m_aliasUsed[i]; - if(alias->function != 0){ - /** - * Report alias usage with callback (if specified by user) - */ - DEBUG("Alias usage with callback"); - (t.* alias->function)(ctx, * p); - } - } - - if(cmd->function == 0){ - DEBUG("CommandWithoutFunction"); - ctx.m_status = CommandWithoutFunction; - return p; - } - return p; - } - DEBUG(""); - return NULL; -} - -template -inline -bool -Parser::getBreakOnCommand() const{ - return impl->m_breakOnCmd; -} - -template -inline -void -Parser::setBreakOnCommand(bool v){ - impl->m_breakOnCmd = v; -} - -template -inline -bool -Parser::getBreakOnEmptyLine() const{ - return impl->m_breakOnEmpty; -} -template -inline -void -Parser::setBreakOnEmptyLine(bool v){ - impl->m_breakOnEmpty = v; -} - -template -inline -bool -Parser::getBreakOnInvalidArg() const{ - return impl->m_breakOnInvalidArg; -} - -template -inline -void -Parser::setBreakOnInvalidArg(bool v){ - impl->m_breakOnInvalidArg = v; -} - -#endif diff --git a/storage/ndb/include/util/Properties.hpp b/storage/ndb/include/util/Properties.hpp deleted file mode 100644 index 6f95a07e9a9..00000000000 --- a/storage/ndb/include/util/Properties.hpp +++ /dev/null @@ -1,250 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PROPERTIES_HPP -#define PROPERTIES_HPP - -#include -#include -#include - -enum PropertiesType { - PropertiesType_Uint32 = 0, - PropertiesType_char = 1, - PropertiesType_Properties = 2, - PropertiesType_Uint64 = 3 -}; - -/** - * @struct Property - * @brief Stores one (name, value)-pair - * - * Value can be of type Properties, i.e. a Property may contain - * a Properties object. - */ -struct Property { - Property(const char* name, Uint32 val); - Property(const char* name, Uint64 val); - Property(const char* name, const char * value); - Property(const char* name, const class Properties * value); - ~Property(); -private: - friend class Properties; - struct PropertyImpl * impl; -}; - -/** - * @class Properties - * @brief Stores information in (name, value)-pairs - */ -class Properties { -public: - static const char delimiter; - static const char version[]; - - Properties(bool case_insensitive= false); - Properties(const Properties &); - Properties(const Property *, int len); - virtual ~Properties(); - - /** - * Set/Get wheather names in the Properties should be compared - * w/o case. - * NOTE: The property is automatically applied to all propoerties put - * into this after a called to setCaseInsensitiveNames has been made - * But properties already in when calling setCaseInsensitiveNames will - * not be affected - */ - void setCaseInsensitiveNames(bool value); - bool getCaseInsensitiveNames() const; - - /** - * Insert an array of value(s) - */ - void put(const Property *, int len); - - bool put(const char * name, Uint32 value, bool replace = false); - bool put64(const char * name, Uint64 value, bool replace = false); - bool put(const char * name, const char * value, bool replace = false); - bool put(const char * name, const Properties * value, bool replace = false); - - /** - * Same as put above, - * except that _%d (where %d is a number) is added to the name - * Compare get(name, no) - */ - bool put(const char *, Uint32 no, Uint32, bool replace = false); - bool put64(const char *, Uint32 no, Uint64, bool replace = false); - bool put(const char *, Uint32 no, const char *, bool replace = false); - bool put(const char *, Uint32 no, const Properties *, bool replace = false); - - - bool getTypeOf(const char * name, PropertiesType * type) const; - - /** @return true if Properties object contains name */ - bool contains(const char * name) const; - - bool get(const char * name, Uint32 * value) const; - bool get(const char * name, Uint64 * value) const; - bool get(const char * name, const char ** value) const; - bool get(const char * name, BaseString & value) const; - bool get(const char * name, const Properties ** value) const; - - bool getCopy(const char * name, char ** value) const; - bool getCopy(const char * name, Properties ** value) const; - - /** - * Same as get above - * except that _%d (where %d = no) is added to the name - */ - bool getTypeOf(const char * name, Uint32 no, PropertiesType * type) const; - bool contains(const char * name, Uint32 no) const; - - bool get(const char * name, Uint32 no, Uint32 * value) const; - bool get(const char * name, Uint32 no, Uint64 * value) const; - bool get(const char * name, Uint32 no, const char ** value) const; - bool get(const char * name, Uint32 no, const Properties ** value) const; - - bool getCopy(const char * name, Uint32 no, char ** value) const; - bool getCopy(const char * name, Uint32 no, Properties ** value) const; - - void clear(); - - void remove(const char * name); - - void print(FILE * file = stdout, const char * prefix = 0) const; - /** - * Iterator over names - */ - class Iterator { - public: - Iterator(const Properties* prop); - - const char* first(); - const char* next(); - private: - const Properties* m_prop; - Uint32 m_iterator; - }; - friend class Properties::Iterator; - - Uint32 getPackedSize() const; - bool pack(Uint32 * buf) const; - bool pack(UtilBuffer &buf) const; - bool unpack(const Uint32 * buf, Uint32 bufLen); - bool unpack(UtilBuffer &buf); - - Uint32 getPropertiesErrno() const { return propErrno; } - Uint32 getOSErrno() const { return osErrno; } -private: - Uint32 propErrno; - Uint32 osErrno; - - friend class PropertiesImpl; - class PropertiesImpl * impl; - class Properties * parent; - - void setErrno(Uint32 pErr, Uint32 osErr = 0) const ; -}; - -/** - * Error code for properties - */ - -/** - * No error - */ -extern const Uint32 E_PROPERTIES_OK; - -/** - * Invalid name in put, names can not contain Properties::delimiter - */ -extern const Uint32 E_PROPERTIES_INVALID_NAME; - -/** - * Element did not exist when using get - */ -extern const Uint32 E_PROPERTIES_NO_SUCH_ELEMENT; - -/** - * Element had wrong type when using get - */ -extern const Uint32 E_PROPERTIES_INVALID_TYPE; - -/** - * Element already existed when using put, and replace was not specified - */ -extern const Uint32 E_PROPERTIES_ELEMENT_ALREADY_EXISTS; - -/** - * Invalid version on properties file you are trying to read - */ -extern const Uint32 E_PROPERTIES_INVALID_VERSION_WHILE_UNPACKING; - -/** - * When unpacking an buffer - * found that buffer is to short - * - * Probably an invlaid buffer - */ -extern const Uint32 E_PROPERTIES_INVALID_BUFFER_TO_SHORT; - -/** - * Error when packing, can not allocate working buffer - * - * Note: OS error is set - */ -extern const Uint32 E_PROPERTIES_ERROR_MALLOC_WHILE_PACKING; - -/** - * Error when unpacking, can not allocate working buffer - * - * Note: OS error is set - */ -extern const Uint32 E_PROPERTIES_ERROR_MALLOC_WHILE_UNPACKING; - -/** - * Error when unpacking, invalid checksum - * - */ -extern const Uint32 E_PROPERTIES_INVALID_CHECKSUM; - -/** - * Error when unpacking - * No of items > 0 while size of buffer (left) <= 0 - */ -extern const Uint32 E_PROPERTIES_BUFFER_TO_SMALL_WHILE_UNPACKING; - -inline bool -Properties::unpack(UtilBuffer &buf) { - return unpack((const Uint32 *)buf.get_data(), buf.length()); -} - -inline bool -Properties::pack(UtilBuffer &buf) const { - Uint32 size = getPackedSize(); - void *tmp_buf = buf.append(size); - if(tmp_buf == 0) - return false; - bool ret = pack((Uint32 *)tmp_buf); - if(ret == false) - return false; - return true; -} - - - -#endif diff --git a/storage/ndb/include/util/SimpleProperties.hpp b/storage/ndb/include/util/SimpleProperties.hpp deleted file mode 100644 index 4fe981ef4da..00000000000 --- a/storage/ndb/include/util/SimpleProperties.hpp +++ /dev/null @@ -1,301 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SIMPLE_PROPERTIES_HPP -#define SIMPLE_PROPERTIES_HPP - -#include -#include - -/** - * @class SimpleProperties - * @brief Key-value-pair container. Actully a list of named elements. - * - * SimpleProperties: - * - The keys are Uint16 - * - The values are either Uint32 or null terminated c-strings - * - * @note Keys may be repeated. - * - * Examples of things that can be stored in a SimpleProperties object: - * - Lists like: ((1, "foo"), (2, "bar"), (3, 32), (2, "baz")) - */ -class SimpleProperties { -public: - /** - * Value types - */ - enum ValueType { - Uint32Value = 0, - StringValue = 1, - BinaryValue = 2, - InvalidValue = 3 - }; - - /** - * Struct for defining mapping to be used with unpack - */ - struct SP2StructMapping { - Uint16 Key; - Uint32 Offset; - ValueType Type; - Uint32 minValue; - Uint32 maxValue; - Uint32 Length_Offset; // Offset used for looking up length of - // data if Type = BinaryValue - }; - - /** - * UnpackStatus - Value returned from unpack - */ - enum UnpackStatus { - Eof = 0, // Success, end of SimpleProperties object reached - Break = 1, // Success - TypeMismatch = 2, - ValueTooLow = 3, - ValueTooHigh = 4, - UnknownKey = 5, - OutOfMemory = 6 // Only used when packing - }; - - /** - * Unpack - */ - class Reader; - static UnpackStatus unpack(class Reader & it, - void * dst, - const SP2StructMapping[], Uint32 mapSz, - bool ignoreMinMax, - bool ignoreUnknownKeys); - - class Writer; - static UnpackStatus pack(class Writer &, - const void * src, - const SP2StructMapping[], Uint32 mapSz, - bool ignoreMinMax); - - /** - * Reader class - */ - class Reader { - public: - virtual ~Reader() {} - - /** - * Move to first element - * Return true if element exist - */ - bool first(); - - /** - * Move to next element - * Return true if element exist - */ - bool next(); - - /** - * Is this valid - */ - bool valid() const; - - /** - * Get key - * Note only valid is valid() == true - */ - Uint16 getKey() const; - - /** - * Get value length in bytes - (including terminating 0 for strings) - * Note only valid is valid() == true - */ - Uint16 getValueLen() const; - - /** - * Get value type - * Note only valid is valid() == true - */ - ValueType getValueType() const; - - /** - * Get value - * Note only valid is valid() == true - */ - Uint32 getUint32() const; - char * getString(char * dst) const; - - /** - * Print the complete simple properties (for debugging) - */ - void printAll(NdbOut& ndbout); - - private: - bool readValue(); - - Uint16 m_key; - Uint16 m_itemLen; - union { - Uint32 m_ui32_value; - Uint32 m_strLen; // Including 0-byte in words - }; - ValueType m_type; - protected: - Reader(); - virtual void reset() = 0; - - virtual bool step(Uint32 len) = 0; - virtual bool getWord(Uint32 * dst) = 0; - virtual bool peekWord(Uint32 * dst) const = 0; - virtual bool peekWords(Uint32 * dst, Uint32 len) const = 0; - }; - - /** - * Writer class - */ - class Writer { - public: - Writer() {} - - bool first(); - bool add(Uint16 key, Uint32 value); - bool add(Uint16 key, const char * value); - bool add(Uint16 key, const void* value, int len); - protected: - virtual ~Writer() {} - virtual bool reset() = 0; - virtual bool putWord(Uint32 val) = 0; - virtual bool putWords(const Uint32 * src, Uint32 len) = 0; - private: - bool add(const char* value, int len); - }; -}; - -/** - * Reader for linear memory - */ -class SimplePropertiesLinearReader : public SimpleProperties::Reader { -public: - SimplePropertiesLinearReader(const Uint32 * src, Uint32 len); - virtual ~SimplePropertiesLinearReader() {} - - virtual void reset(); - virtual bool step(Uint32 len); - virtual bool getWord(Uint32 * dst); - virtual bool peekWord(Uint32 * dst) const ; - virtual bool peekWords(Uint32 * dst, Uint32 len) const; -private: - Uint32 m_len; - Uint32 m_pos; - const Uint32 * m_src; -}; - -/** - * Writer for linear memory - */ -class LinearWriter : public SimpleProperties::Writer { -public: - LinearWriter(Uint32 * src, Uint32 len); - virtual ~LinearWriter() {} - - virtual bool reset(); - virtual bool putWord(Uint32 val); - virtual bool putWords(const Uint32 * src, Uint32 len); - Uint32 getWordsUsed() const; -private: - Uint32 m_len; - Uint32 m_pos; - Uint32 * m_src; -}; - -/** - * Writer for UtilBuffer - */ -class UtilBufferWriter : public SimpleProperties::Writer { -public: - UtilBufferWriter(class UtilBuffer & buf); - virtual ~UtilBufferWriter() {} - - virtual bool reset(); - virtual bool putWord(Uint32 val); - virtual bool putWords(const Uint32 * src, Uint32 len); - Uint32 getWordsUsed() const; -private: - class UtilBuffer & m_buf; -}; - -/** - * Reader for long signal section memory - * - * - * Implemented in kernel/vm/SimplePropertiesSection.cpp - */ -class SimplePropertiesSectionReader : public SimpleProperties::Reader { -public: - SimplePropertiesSectionReader(struct SegmentedSectionPtr &, - class SectionSegmentPool &); - virtual ~SimplePropertiesSectionReader() {} - - virtual void reset(); - virtual bool step(Uint32 len); - virtual bool getWord(Uint32 * dst); - virtual bool peekWord(Uint32 * dst) const ; - virtual bool peekWords(Uint32 * dst, Uint32 len) const; - Uint32 getSize() const; - bool getWords(Uint32 * dst, Uint32 len); - -private: - Uint32 m_pos; - Uint32 m_len; - class SectionSegmentPool & m_pool; - struct SectionSegment * m_head; - struct SectionSegment * m_currentSegment; -}; - -inline -Uint32 SimplePropertiesSectionReader::getSize() const -{ - return m_len; -} - -/** - * Writer for long signal section memory - * - * - * Implemented in kernel/vm/SimplePropertiesSection.cpp - */ -class SimplePropertiesSectionWriter : public SimpleProperties::Writer { -public: - SimplePropertiesSectionWriter(class SectionSegmentPool &); - virtual ~SimplePropertiesSectionWriter() {} - - virtual bool reset(); - virtual bool putWord(Uint32 val); - virtual bool putWords(const Uint32 * src, Uint32 len); - - /** - * This "unlinks" the writer from the memory - */ - void getPtr(struct SegmentedSectionPtr & dst); - -private: - Int32 m_pos; - Uint32 m_sz; - class SectionSegmentPool & m_pool; - struct SectionSegment * m_head; - Uint32 m_prevPtrI; // Prev to m_currentSegment - struct SectionSegment * m_currentSegment; -}; - -#endif diff --git a/storage/ndb/include/util/SocketAuthenticator.hpp b/storage/ndb/include/util/SocketAuthenticator.hpp deleted file mode 100644 index eadd092d013..00000000000 --- a/storage/ndb/include/util/SocketAuthenticator.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SOCKET_AUTHENTICATOR_HPP -#define SOCKET_AUTHENTICATOR_HPP - -class SocketAuthenticator -{ -public: - SocketAuthenticator() {} - virtual ~SocketAuthenticator() {}; - virtual bool client_authenticate(int sockfd) = 0; - virtual bool server_authenticate(int sockfd) = 0; -}; - -class SocketAuthSimple : public SocketAuthenticator -{ - const char *m_passwd; - const char *m_username; -public: - SocketAuthSimple(const char *username, const char *passwd); - virtual ~SocketAuthSimple(); - virtual bool client_authenticate(int sockfd); - virtual bool server_authenticate(int sockfd); -}; - -#endif // SOCKET_AUTHENTICATOR_HPP diff --git a/storage/ndb/include/util/SocketClient.hpp b/storage/ndb/include/util/SocketClient.hpp deleted file mode 100644 index 758ad7a30e2..00000000000 --- a/storage/ndb/include/util/SocketClient.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SOCKET_CLIENT_HPP -#define SOCKET_CLIENT_HPP - -#include -class SocketAuthenticator; - -class SocketClient -{ - NDB_SOCKET_TYPE m_sockfd; - struct sockaddr_in m_servaddr; - unsigned int m_connect_timeout_sec; - unsigned short m_port; - char *m_server_name; - SocketAuthenticator *m_auth; -public: - SocketClient(const char *server_name, unsigned short port, SocketAuthenticator *sa = 0); - ~SocketClient(); - bool init(); - void set_port(unsigned short port) { - m_port = port; - m_servaddr.sin_port = htons(m_port); - }; - void set_connect_timeout(unsigned int s) { - m_connect_timeout_sec= s; - } - unsigned short get_port() { return m_port; }; - char *get_server_name() { return m_server_name; }; - int bind(const char* toaddress, unsigned short toport); - NDB_SOCKET_TYPE connect(const char* toaddress = 0, unsigned short port = 0); - bool close(); -}; - -#endif // SOCKET_ClIENT_HPP diff --git a/storage/ndb/include/util/SocketServer.hpp b/storage/ndb/include/util/SocketServer.hpp deleted file mode 100644 index a8173031b28..00000000000 --- a/storage/ndb/include/util/SocketServer.hpp +++ /dev/null @@ -1,142 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SOCKET_SERVER_HPP -#define SOCKET_SERVER_HPP - -#include -#include -#include -#include - -extern "C" void* sessionThread_C(void*); -extern "C" void* socketServerThread_C(void*); - -/** - * Socket Server - */ -class SocketServer { -public: - /** - * A Session - */ - class Session { - public: - virtual ~Session() {} - virtual void runSession(){} - virtual void stopSession(){ m_stop = true; } - protected: - friend class SocketServer; - friend void* sessionThread_C(void*); - Session(NDB_SOCKET_TYPE sock): m_socket(sock) - { - DBUG_ENTER("SocketServer::Session"); - DBUG_PRINT("enter",("NDB_SOCKET: %d", m_socket)); - m_stop = m_stopped = false; - DBUG_VOID_RETURN; - } - - bool m_stop; // Has the session been ordered to stop? - bool m_stopped; // Has the session stopped? - - NDB_SOCKET_TYPE m_socket; - }; - - /** - * A service i.e. a session factory - */ - class Service { - public: - Service() {} - virtual ~Service(){} - - /** - * Returned Session will be ran in own thread - * - * To manage threads self, just return NULL - */ - virtual Session * newSession(NDB_SOCKET_TYPE theSock) = 0; - virtual void stopSessions(){} - }; - - /** - * Constructor / Destructor - */ - SocketServer(unsigned maxSessions = ~(unsigned)0); - ~SocketServer(); - - /** - * Setup socket and bind it - * then close the socket - * Returns true if succeding in binding - */ - static bool tryBind(unsigned short port, const char * intface = 0); - - /** - * Setup socket - * bind & listen - * Returns false if no success - */ - bool setup(Service *, unsigned short *port, const char * pinterface = 0); - - /** - * start/stop the server - */ - void startServer(); - void stopServer(); - - /** - * stop sessions - * - * Note: Implies stopServer - */ - void stopSessions(bool wait = false); - - void foreachSession(void (*f)(Session*, void*), void *data); - void checkSessions(); - -private: - struct SessionInstance { - Service * m_service; - Session * m_session; - NdbThread * m_thread; - }; - struct ServiceInstance { - Service * m_service; - NDB_SOCKET_TYPE m_socket; - }; - NdbLockable m_session_mutex; - Vector m_sessions; - MutexVector m_services; - unsigned m_maxSessions; - - void doAccept(); - void checkSessionsImpl(); - void startSession(SessionInstance &); - - /** - * Note, this thread is only used when running interactive - * - */ - bool m_stopThread; - struct NdbThread * m_thread; - NdbLockable m_threadLock; - void doRun(); - friend void* socketServerThread_C(void*); - friend void* sessionThread_C(void*); -}; - -#endif diff --git a/storage/ndb/include/util/UtilBuffer.hpp b/storage/ndb/include/util/UtilBuffer.hpp deleted file mode 100644 index edac8b92a86..00000000000 --- a/storage/ndb/include/util/UtilBuffer.hpp +++ /dev/null @@ -1,103 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __BUFFER_HPP_INCLUDED__ -#define __BUFFER_HPP_INCLUDED__ - -#include - -/* This class represents a buffer of binary data, where you can append - * data at the end, and later read the entire bunch. - * It will take care of the hairy details of realloc()ing the space - * for you - */ -class UtilBuffer { -public: - UtilBuffer() { data = NULL; len = 0; alloc_size = 0; }; - ~UtilBuffer() { if(data) free(data); data = NULL; len = 0; alloc_size = 0; }; - - - int reallocate(size_t newsize) { - if(newsize < len) { - errno = EINVAL; - return -1; - } - void *newdata; - if((newdata = realloc(data, newsize)) == NULL) { - errno = ENOMEM; - return -1; - } - alloc_size = newsize; - data = newdata; - return 0; - }; - - int grow(size_t l) { - if(l > alloc_size) - return reallocate(l); - return 0; - }; - - int append(const void *d, size_t l) { - int ret; - ret = grow(len+l); - if(ret != 0) - return ret; - - memcpy((char *)data+len, d, l); - len+=l; - - return 0; - }; - - void * append(size_t l){ - if(grow(len+l) != 0) - return 0; - - void * ret = (char*)data+len; - len += l; - return ret; - } - - int assign(const void * d, size_t l) { - /* Free the old data only after copying, in case d==data. */ - void *old_data= data; - data = NULL; - len = 0; - alloc_size = 0; - int ret= append(d, l); - if (old_data) - free(old_data); - return ret; - } - - void clear() { - len = 0; - } - - int length() const { return len; } - - void *get_data() const { return data; } - - bool empty () const { return len == 0; } -private: - void *data; /* Pointer to data storage */ - size_t len; /* Size of the stored data */ - size_t alloc_size; /* Size of the allocated space, - * i.e. len can grow to this size */ -}; - -#endif /* !__BUFFER_HPP_INCLUDED__ */ diff --git a/storage/ndb/include/util/Vector.hpp b/storage/ndb/include/util/Vector.hpp deleted file mode 100644 index bf0bf3fdd15..00000000000 --- a/storage/ndb/include/util/Vector.hpp +++ /dev/null @@ -1,364 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_VECTOR_HPP -#define NDB_VECTOR_HPP - -#include -#include - -template -struct Vector { -public: - Vector(int sz = 10); - ~Vector(); - - T& operator[](unsigned i); - const T& operator[](unsigned i) const; - unsigned size() const { return m_size; }; - - int push_back(const T &); - void push(const T&, unsigned pos); - T& set(T&, unsigned pos, T& fill_obj); - T& back(); - - void erase(unsigned index); - - void clear(); - - int fill(unsigned new_size, T & obj); - - Vector& operator=(const Vector&); - - T* getBase() { return m_items;} - const T* getBase() const { return m_items;} -private: - T * m_items; - unsigned m_size; - unsigned m_incSize; - unsigned m_arraySize; -}; - -template -Vector::Vector(int i){ - m_items = new T[i]; - if (m_items == NULL) - { - errno = ENOMEM; - m_size = 0; - m_arraySize = 0; - m_incSize = 0; - return; - } - m_size = 0; - m_arraySize = i; - m_incSize = 50; -} - -template -Vector::~Vector(){ - delete[] m_items; - // safety for placement new usage - m_items = 0; - m_size = 0; - m_arraySize = 0; -} - -template -T & -Vector::operator[](unsigned i){ - if(i >= m_size) - abort(); - return m_items[i]; -} - -template -const T & -Vector::operator[](unsigned i) const { - if(i >= m_size) - abort(); - return m_items[i]; -} - -template -T & -Vector::back(){ - return (* this)[m_size - 1]; -} - -template -int -Vector::push_back(const T & t){ - if(m_size == m_arraySize){ - T * tmp = new T [m_arraySize + m_incSize]; - if(tmp == NULL) - { - errno = ENOMEM; - return -1; - } - for (unsigned k = 0; k < m_size; k++) - tmp[k] = m_items[k]; - delete[] m_items; - m_items = tmp; - m_arraySize = m_arraySize + m_incSize; - } - m_items[m_size] = t; - m_size++; - return 0; -} - -template -void -Vector::push(const T & t, unsigned pos) -{ - push_back(t); - if (pos < m_size - 1) - { - for(unsigned i = m_size - 1; i > pos; i--) - { - m_items[i] = m_items[i-1]; - } - m_items[pos] = t; - } -} - -template -T& -Vector::set(T & t, unsigned pos, T& fill_obj) -{ - fill(pos, fill_obj); - T& ret = m_items[pos]; - m_items[pos] = t; - return ret; -} - -template -void -Vector::erase(unsigned i){ - if(i >= m_size) - abort(); - - for (unsigned k = i; k + 1 < m_size; k++) - m_items[k] = m_items[k + 1]; - m_size--; -} - -template -void -Vector::clear(){ - m_size = 0; -} - -template -int -Vector::fill(unsigned new_size, T & obj){ - while(m_size <= new_size) - if (push_back(obj)) - return -1; - return 0; -} - -template -Vector& -Vector::operator=(const Vector& obj){ - if(this != &obj){ - clear(); - for(size_t i = 0; i -struct MutexVector : public NdbLockable { - MutexVector(int sz = 10); - ~MutexVector(); - - T& operator[](unsigned i); - const T& operator[](unsigned i) const; - unsigned size() const { return m_size; }; - - int push_back(const T &); - int push_back(const T &, bool lockMutex); - T& back(); - - void erase(unsigned index); - void erase(unsigned index, bool lockMutex); - - void clear(); - void clear(bool lockMutex); - - int fill(unsigned new_size, T & obj); -private: - T * m_items; - unsigned m_size; - unsigned m_incSize; - unsigned m_arraySize; -}; - -template -MutexVector::MutexVector(int i){ - m_items = new T[i]; - if (m_items == NULL) - { - errno = ENOMEM; - m_size = 0; - m_arraySize = 0; - m_incSize = 0; - return; - } - m_size = 0; - m_arraySize = i; - m_incSize = 50; -} - -template -MutexVector::~MutexVector(){ - delete[] m_items; - // safety for placement new usage - m_items = 0; - m_size = 0; - m_arraySize = 0; -} - -template -T & -MutexVector::operator[](unsigned i){ - if(i >= m_size) - abort(); - return m_items[i]; -} - -template -const T & -MutexVector::operator[](unsigned i) const { - if(i >= m_size) - abort(); - return m_items[i]; -} - -template -T & -MutexVector::back(){ - return (* this)[m_size - 1]; -} - -template -int -MutexVector::push_back(const T & t){ - lock(); - if(m_size == m_arraySize){ - T * tmp = new T [m_arraySize + m_incSize]; - if (tmp == NULL) - { - errno = ENOMEM; - unlock(); - return -1; - } - for (unsigned k = 0; k < m_size; k++) - tmp[k] = m_items[k]; - delete[] m_items; - m_items = tmp; - m_arraySize = m_arraySize + m_incSize; - } - m_items[m_size] = t; - m_size++; - unlock(); - return 0; -} - -template -int -MutexVector::push_back(const T & t, bool lockMutex){ - if(lockMutex) - lock(); - if(m_size == m_arraySize){ - T * tmp = new T [m_arraySize + m_incSize]; - if (tmp == NULL) - { - errno = ENOMEM; - if(lockMutex) - unlock(); - return -1; - } - for (unsigned k = 0; k < m_size; k++) - tmp[k] = m_items[k]; - delete[] m_items; - m_items = tmp; - m_arraySize = m_arraySize + m_incSize; - } - m_items[m_size] = t; - m_size++; - if(lockMutex) - unlock(); - return 0; -} - -template -void -MutexVector::erase(unsigned i){ - if(i >= m_size) - abort(); - - lock(); - for (unsigned k = i; k + 1 < m_size; k++) - m_items[k] = m_items[k + 1]; - m_size--; - unlock(); -} - -template -void -MutexVector::erase(unsigned i, bool _lock){ - if(i >= m_size) - abort(); - - if(_lock) - lock(); - for (unsigned k = i; k + 1 < m_size; k++) - m_items[k] = m_items[k + 1]; - m_size--; - if(_lock) - unlock(); -} - -template -void -MutexVector::clear(){ - lock(); - m_size = 0; - unlock(); -} - -template -void -MutexVector::clear(bool l){ - if(l) lock(); - m_size = 0; - if(l) unlock(); -} - -template -int -MutexVector::fill(unsigned new_size, T & obj){ - while(m_size <= new_size) - if (push_back(obj)) - return -1; - return 0; -} - -#endif diff --git a/storage/ndb/include/util/basestring_vsnprintf.h b/storage/ndb/include/util/basestring_vsnprintf.h deleted file mode 100644 index f304c2707a7..00000000000 --- a/storage/ndb/include/util/basestring_vsnprintf.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BASESTRING_VSNPRINTF_H -#define BASESTRING_VSNPRINTF_H -#include -#if defined(__cplusplus) -extern "C" -{ -#endif -int basestring_snprintf(char*, size_t, const char*, ...); -int basestring_vsnprintf(char*,size_t, const char*,va_list); -#if defined(__cplusplus) -} -#endif -#endif diff --git a/storage/ndb/include/util/md5_hash.hpp b/storage/ndb/include/util/md5_hash.hpp deleted file mode 100644 index 6f92dd7277f..00000000000 --- a/storage/ndb/include/util/md5_hash.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MD5_HASH_H -#define MD5_HASH_H - -#include - -// External declaration of hash function -void md5_hash(Uint32 result[4], const Uint64* keybuf, Uint32 no_of_32_words); - -inline -Uint32 -md5_hash(const Uint64* keybuf, Uint32 no_of_32_words) -{ - Uint32 result[4]; - md5_hash(result, keybuf, no_of_32_words); - return result[0]; -} - -#endif diff --git a/storage/ndb/include/util/ndb_opts.h b/storage/ndb/include/util/ndb_opts.h deleted file mode 100644 index d47a0243247..00000000000 --- a/storage/ndb/include/util/ndb_opts.h +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef _NDB_OPTS_H -#define _NDB_OPTS_H - -#include -#include -#include -#include -#include -#include - -#define NDB_STD_OPTS_VARS \ -my_bool opt_ndb_optimized_node_selection - -int opt_ndb_nodeid; -bool opt_endinfo= 0; -my_bool opt_ndb_shm; -my_bool opt_core; -const char *opt_ndb_connectstring= 0; -const char *opt_connect_str= 0; -const char *opt_ndb_mgmd= 0; -char opt_ndb_constrbuf[1024]; -unsigned opt_ndb_constrbuf_len= 0; - -#ifndef DBUG_OFF -const char *opt_debug= 0; -#endif - -#define OPT_NDB_CONNECTSTRING 'c' -#if defined VM_TRACE -#define OPT_WANT_CORE_DEFAULT 1 -#else -#define OPT_WANT_CORE_DEFAULT 0 -#endif - -#define NDB_STD_OPTS_COMMON \ - { "usage", '?', "Display this help and exit.", \ - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ - { "help", '?', "Display this help and exit.", \ - 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ - { "version", 'V', "Output version information and exit.", 0, 0, 0, \ - GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0 }, \ - { "ndb-connectstring", OPT_NDB_CONNECTSTRING, \ - "Set connect string for connecting to ndb_mgmd. " \ - "Syntax: \"[nodeid=;][host=][:]\". " \ - "Overrides specifying entries in NDB_CONNECTSTRING and my.cnf", \ - &opt_ndb_connectstring, &opt_ndb_connectstring, \ - 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ - { "ndb-mgmd-host", OPT_NDB_MGMD, \ - "Set host and port for connecting to ndb_mgmd. " \ - "Syntax: [:].", \ - &opt_ndb_mgmd, &opt_ndb_mgmd, 0, \ - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ - { "ndb-nodeid", OPT_NDB_NODEID, \ - "Set node id for this node.", \ - &opt_ndb_nodeid, &opt_ndb_nodeid, 0, \ - GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ - { "ndb-shm", OPT_NDB_SHM,\ - "Allow optimizing using shared memory connections when available",\ - &opt_ndb_shm, &opt_ndb_shm, 0,\ - GET_BOOL, NO_ARG, OPT_NDB_SHM_DEFAULT, 0, 0, 0, 0, 0 },\ - {"ndb-optimized-node-selection", OPT_NDB_OPTIMIZED_NODE_SELECTION,\ - "Select nodes for transactions in a more optimal way",\ - &opt_ndb_optimized_node_selection,\ - &opt_ndb_optimized_node_selection, 0,\ - GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},\ - { "connect-string", OPT_NDB_CONNECTSTRING, "same as --ndb-connectstring",\ - &opt_ndb_connectstring, &opt_ndb_connectstring, \ - 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\ - { "core-file", OPT_WANT_CORE, "Write core on errors.",\ - &opt_core, &opt_core, 0,\ - GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0},\ - {"character-sets-dir", OPT_CHARSETS_DIR,\ - "Directory where character sets are.", &charsets_dir,\ - &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}\ - -#ifndef DBUG_OFF -#define NDB_STD_OPTS(prog_name) \ - { "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", \ - &opt_debug, &opt_debug, \ - 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \ - NDB_STD_OPTS_COMMON -#else -#define NDB_STD_OPTS(prog_name) NDB_STD_OPTS_COMMON -#endif - -static void ndb_std_print_version() -{ - printf("MySQL distrib %s, for %s (%s)\n", - MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); -} - -static void usage(); - -enum ndb_std_options { - OPT_NDB_SHM= 256, - OPT_NDB_SHM_SIGNUM, - OPT_NDB_OPTIMIZED_NODE_SELECTION, - OPT_WANT_CORE, - OPT_NDB_MGMD, - OPT_NDB_NODEID, - OPT_CHARSETS_DIR, - NDB_STD_OPTIONS_LAST /* should always be last in this enum */ -}; - -static my_bool -ndb_std_get_one_option(int optid, - const struct my_option *opt __attribute__((unused)), - char *argument) -{ - switch (optid) { -#ifndef DBUG_OFF - case '#': - if (opt_debug) - { - DBUG_PUSH(opt_debug); - } - else - { - DBUG_PUSH("d:t"); - } - opt_endinfo= 1; - break; -#endif - case 'V': - ndb_std_print_version(); - exit(0); - case '?': - usage(); - exit(0); - case OPT_NDB_SHM: - if (opt_ndb_shm) - { -#ifndef NDB_SHM_TRANSPORTER - printf("Warning: binary not compiled with shared memory support,\n" - "Tcp connections will now be used instead\n"); - opt_ndb_shm= 0; -#endif - } - break; - case OPT_NDB_MGMD: - case OPT_NDB_NODEID: - { - int len= my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, - sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, - "%s%s%s",opt_ndb_constrbuf_len > 0 ? ",":"", - optid == OPT_NDB_NODEID ? "nodeid=" : "", - argument); - opt_ndb_constrbuf_len+= len; - } - /* fall through to add the connectstring to the end - * and set opt_ndbcluster_connectstring - */ - case OPT_NDB_CONNECTSTRING: - if (opt_ndb_connectstring && opt_ndb_connectstring[0]) - my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, - sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, - "%s%s", opt_ndb_constrbuf_len > 0 ? ",":"", - opt_ndb_connectstring); - else - opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0; - opt_connect_str= opt_ndb_constrbuf; - break; - } - return 0; -} - -#endif /*_NDB_OPTS_H */ diff --git a/storage/ndb/include/util/ndb_rand.h b/storage/ndb/include/util/ndb_rand.h deleted file mode 100644 index 22f06c9e32e..00000000000 --- a/storage/ndb/include/util/ndb_rand.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDB_RAND_H -#define NDB_RAND_H - -#define NDB_RAND_MAX 32767 - -#ifdef __cplusplus -extern "C" { -#endif - -int ndb_rand(void); - -void ndb_srand(unsigned seed); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/util/random.h b/storage/ndb/include/util/random.h deleted file mode 100644 index 2bc06da49f6..00000000000 --- a/storage/ndb/include/util/random.h +++ /dev/null @@ -1,84 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef RANDOM_H -#define RANDOM_H - -/*************************************************************** -* I N C L U D E D F I L E S * -***************************************************************/ - -/*************************************************************** -* M A C R O S * -***************************************************************/ - -/***************************************************************/ -/* C O N S T A N T S */ -/***************************************************************/ - - -/*************************************************************** -* D A T A S T R U C T U R E S * -***************************************************************/ - -typedef struct { - unsigned int length; - unsigned int *values; - unsigned int currentIndex; -}RandomSequence; - -typedef struct { - unsigned int length; - unsigned int value; -}SequenceValues; - -/*************************************************************** -* P U B L I C F U N C T I O N S * -***************************************************************/ - -#ifdef __cplusplus -extern "C" { -#endif - - -extern double getTps(unsigned int count, double timeValue); - -/*----------------------------*/ -/* Random Sequences Functions */ -/*----------------------------*/ -extern int initSequence(RandomSequence *seq, SequenceValues *inputValues); -extern unsigned int getNextRandom(RandomSequence *seq); -extern void printSequence(RandomSequence *seq, unsigned int numPerRow); - -/*---------------------------------------------------*/ -/* Code from the glibc, to make sure the same random */ -/* number generator is used by all */ -/*---------------------------------------------------*/ -extern void myRandom48Init(long int seedval); -extern long int myRandom48(unsigned int maxValue); - -#ifdef __cplusplus -} -#endif - -/*************************************************************** -* E X T E R N A L D A T A * -***************************************************************/ - - - -#endif /* RANDOM_H */ - diff --git a/storage/ndb/include/util/socket_io.h b/storage/ndb/include/util/socket_io.h deleted file mode 100644 index 721cc827fc2..00000000000 --- a/storage/ndb/include/util/socket_io.h +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef _SOCKET_IO_H -#define _SOCKET_IO_H - -#include - -#include - -#include - -#ifdef __cplusplus -extern "C" { -#endif - - int read_socket(NDB_SOCKET_TYPE, int timeout_ms, char *, int len); - - int readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - char * buf, int buflen, NdbMutex *mutex); - - int write_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, - const char[], int len); - - int print_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, - const char *, ...); - int println_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, - const char *, ...); - int vprint_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, - const char *, va_list); - int vprintln_socket(NDB_SOCKET_TYPE, int timeout_ms, int *time, - const char *, va_list); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/util/uucode.h b/storage/ndb/include/util/uucode.h deleted file mode 100644 index b456fbc8a7f..00000000000 --- a/storage/ndb/include/util/uucode.h +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef UUCODE_H -#define UUCODE_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - - void uuencode(const char * data, int dataLen, FILE * out); - int uudecode(FILE * input, char * outBuf, int bufLen); - - int uuencode_mem(char * dst, const char * src, int src_len); - int uudecode_mem(char * dst, int dst_len, const char * src); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/include/util/version.h b/storage/ndb/include/util/version.h deleted file mode 100644 index a75bdfc0ff6..00000000000 --- a/storage/ndb/include/util/version.h +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#ifndef VERSION_H -#define VERSION_H -#include - -/* some backwards compatible macros */ -#define MAKE_VERSION(A,B,C) NDB_MAKE_VERSION(A,B,C) -#define getMajor(a) ndbGetMajor(a) -#define getMinor(a) ndbGetMinor(a) -#define getBuild(a) ndbGetBuild(a) - -#ifdef __cplusplus -extern "C" { -#endif - - int ndbCompatible_mgmt_ndb(Uint32 ownVersion, Uint32 otherVersion); - int ndbCompatible_ndb_mgmt(Uint32 ownVersion, Uint32 otherVersion); - int ndbCompatible_mgmt_api(Uint32 ownVersion, Uint32 otherVersion); - int ndbCompatible_api_mgmt(Uint32 ownVersion, Uint32 otherVersion); - int ndbCompatible_api_ndb(Uint32 ownVersion, Uint32 otherVersion); - int ndbCompatible_ndb_api(Uint32 ownVersion, Uint32 otherVersion); - int ndbCompatible_ndb_ndb(Uint32 ownVersion, Uint32 otherVersion); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/storage/ndb/lib/.empty b/storage/ndb/lib/.empty deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/storage/ndb/ndb_configure.m4 b/storage/ndb/ndb_configure.m4 deleted file mode 100644 index 21e4627515f..00000000000 --- a/storage/ndb/ndb_configure.m4 +++ /dev/null @@ -1,349 +0,0 @@ -dnl --------------------------------------------------------------------------- -dnl Macro: MYSQL_CHECK_NDBCLUSTER -dnl --------------------------------------------------------------------------- - -# The version of NDB in this version of MySQL is currently fixed -# and not supposed to be changed unless major changes happen in -# storage/ndb directory. -# NOTE! To avoid mixup with MySQL Cluster's version numbers -# this version of NDB is set to 5.5.7 although it's basically -# a copy of MySQL Cluster 6.2.18 -NDB_VERSION_MAJOR=5 -NDB_VERSION_MINOR=5 -NDB_VERSION_BUILD=7 -NDB_VERSION_STATUS="" -TEST_NDBCLUSTER="" - -dnl for build ndb docs - -AC_PATH_PROG(DOXYGEN, doxygen, no) -AC_PATH_PROG(PDFLATEX, pdflatex, no) -AC_PATH_PROG(MAKEINDEX, makeindex, no) - -AC_SUBST(DOXYGEN) -AC_SUBST(PDFLATEX) -AC_SUBST(MAKEINDEX) - - -AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ - AC_ARG_WITH([ndb-sci], - AC_HELP_STRING([--with-ndb-sci=DIR], - [Provide MySQL with a custom location of - sci library. Given DIR, sci library is - assumed to be in $DIR/lib and header files - in $DIR/include.]), - [mysql_sci_dir=${withval}], - [mysql_sci_dir=""]) - - case "$mysql_sci_dir" in - "no" ) - have_ndb_sci=no - AC_MSG_RESULT([-- not including sci transporter]) - ;; - * ) - if test -f "$mysql_sci_dir/lib/libsisci.a" -a \ - -f "$mysql_sci_dir/include/sisci_api.h"; then - NDB_SCI_INCLUDES="-I$mysql_sci_dir/include" - NDB_SCI_LIBS="$mysql_sci_dir/lib/libsisci.a" - AC_MSG_RESULT([-- including sci transporter]) - AC_DEFINE([NDB_SCI_TRANSPORTER], [1], - [Including Ndb Cluster DB sci transporter]) - AC_SUBST(NDB_SCI_INCLUDES) - AC_SUBST(NDB_SCI_LIBS) - have_ndb_sci="yes" - AC_MSG_RESULT([found sci transporter in $mysql_sci_dir/{include, lib}]) - else - AC_MSG_RESULT([could not find sci transporter in $mysql_sci_dir/{include, lib}]) - fi - ;; - esac - - AC_ARG_WITH([ndb-test], - [ - --with-ndb-test Include the NDB Cluster ndbapi test programs], - [ndb_test="$withval"], - [ndb_test=no]) - AC_ARG_WITH([ndb-docs], - [ - --with-ndb-docs Include the NDB Cluster ndbapi and mgmapi documentation], - [ndb_docs="$withval"], - [ndb_docs=no]) - AC_ARG_WITH([ndb-port], - [ - --with-ndb-port Port for NDB Cluster management server], - [ndb_port="$withval"], - [ndb_port="default"]) - AC_ARG_WITH([ndb-port-base], - [ - --with-ndb-port-base Base port for NDB Cluster transporters], - [ndb_port_base="$withval"], - [ndb_port_base="default"]) - AC_ARG_WITH([ndb-debug], - [ - --without-ndb-debug Disable special ndb debug features], - [ndb_debug="$withval"], - [ndb_debug="default"]) - AC_ARG_WITH([ndb-ccflags], - AC_HELP_STRING([--with-ndb-ccflags=CFLAGS], - [Extra CFLAGS for ndb compile]), - [ndb_ccflags=${withval}], - [ndb_ccflags=""]) - AC_ARG_WITH([ndb-binlog], - [ - --without-ndb-binlog Disable ndb binlog], - [ndb_binlog="$withval"], - [ndb_binlog="default"]) - - case "$ndb_ccflags" in - "yes") - AC_MSG_RESULT([The --ndb-ccflags option requires a parameter (passed to CC for ndb compilation)]) - ;; - *) - ndb_cxxflags_fix="$ndb_cxxflags_fix $ndb_ccflags" - ;; - esac - - AC_MSG_CHECKING([for NDB Cluster options]) - AC_MSG_RESULT([]) - - have_ndb_test=no - case "$ndb_test" in - yes ) - AC_MSG_RESULT([-- including ndbapi test programs]) - have_ndb_test="yes" - ;; - * ) - AC_MSG_RESULT([-- not including ndbapi test programs]) - ;; - esac - - have_ndb_docs=no - case "$ndb_docs" in - yes ) - AC_MSG_RESULT([-- including ndbapi and mgmapi documentation]) - have_ndb_docs="yes" - ;; - * ) - AC_MSG_RESULT([-- not including ndbapi and mgmapi documentation]) - ;; - esac - - case "$ndb_debug" in - yes ) - AC_MSG_RESULT([-- including ndb extra debug options]) - have_ndb_debug="yes" - ;; - full ) - AC_MSG_RESULT([-- including ndb extra extra debug options]) - have_ndb_debug="full" - ;; - no ) - AC_MSG_RESULT([-- not including ndb extra debug options]) - have_ndb_debug="no" - ;; - * ) - have_ndb_debug="default" - ;; - esac - - AC_MSG_RESULT([done.]) -]) - -AC_DEFUN([NDBCLUSTER_WORKAROUNDS], [ - - #workaround for Sun Forte/x86 see BUG#4681 - case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc in - *solaris*-i?86-no) - CFLAGS="$CFLAGS -DBIG_TABLES" - CXXFLAGS="$CXXFLAGS -DBIG_TABLES" - ;; - *) - ;; - esac - - # workaround for Sun Forte compile problem for ndb - case $SYSTEM_TYPE-$ac_cv_prog_gcc in - *solaris*-no) - ndb_cxxflags_fix="$ndb_cxxflags_fix -instances=static" - ;; - *) - ;; - esac - - # ndb fail for whatever strange reason to link Sun Forte/x86 - # unless using incremental linker - case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in - *solaris*-i?86-no-yes) - CXXFLAGS="$CXXFLAGS -xildon" - ;; - *) - ;; - esac -]) - -AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [ - - AC_MSG_RESULT([Using NDB Cluster]) - with_partition="yes" - ndb_cxxflags_fix="" - TEST_NDBCLUSTER="--ndbcluster" - - ndbcluster_includes="-I\$(top_builddir)/storage/ndb/include -I\$(top_srcdir)/storage/ndb/include -I\$(top_srcdir)/storage/ndb/include/ndbapi -I\$(top_srcdir)/storage/ndb/include/mgmapi" - ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a" - ndbcluster_system_libs="" - ndb_mgmclient_libs="\$(top_builddir)/storage/ndb/src/mgmclient/libndbmgmclient.la" - - MYSQL_CHECK_NDB_OPTIONS - NDBCLUSTER_WORKAROUNDS - - MAKE_BINARY_DISTRIBUTION_OPTIONS="$MAKE_BINARY_DISTRIBUTION_OPTIONS --with-ndbcluster" - - if test "$have_ndb_debug" = "default" - then - have_ndb_debug=$with_debug - fi - - if test "$have_ndb_debug" = "yes" - then - # Medium debug. - NDB_DEFS="-DNDB_DEBUG -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD" - elif test "$have_ndb_debug" = "full" - then - NDB_DEFS="-DNDB_DEBUG_FULL -DVM_TRACE -DERROR_INSERT -DARRAY_GUARD" - else - # no extra ndb debug but still do asserts if debug version - if test "$with_debug" = "yes" - then - NDB_DEFS="" - else - NDB_DEFS="-DNDEBUG" - fi - fi - - if test X"$ndb_port" = Xdefault - then - ndb_port="1186" - fi - - have_ndb_binlog="no" - if test X"$ndb_binlog" = Xdefault || - test X"$ndb_binlog" = Xyes - then - have_ndb_binlog="yes" - fi - - if test X"$have_ndb_binlog" = Xyes - then - AC_DEFINE([WITH_NDB_BINLOG], [1], - [Including Ndb Cluster Binlog]) - AC_MSG_RESULT([Including Ndb Cluster Binlog]) - else - AC_MSG_RESULT([Not including Ndb Cluster Binlog]) - fi - - ndb_transporter_opt_objs="" - if test "$ac_cv_func_shmget" = "yes" && - test "$ac_cv_func_shmat" = "yes" && - test "$ac_cv_func_shmdt" = "yes" && - test "$ac_cv_func_shmctl" = "yes" && - test "$ac_cv_func_sigaction" = "yes" && - test "$ac_cv_func_sigemptyset" = "yes" && - test "$ac_cv_func_sigaddset" = "yes" && - test "$ac_cv_func_pthread_sigmask" = "yes" - then - AC_DEFINE([NDB_SHM_TRANSPORTER], [1], - [Including Ndb Cluster DB shared memory transporter]) - AC_MSG_RESULT([Including ndb shared memory transporter]) - ndb_transporter_opt_objs="$ndb_transporter_opt_objs SHM_Transporter.lo SHM_Transporter.unix.lo" - else - AC_MSG_RESULT([Not including ndb shared memory transporter]) - fi - - if test X"$have_ndb_sci" = Xyes - then - ndb_transporter_opt_objs="$ndb_transporter_opt_objs SCI_Transporter.lo" - fi - - ndb_opt_subdirs= - ndb_bin_am_ldflags="-static" - if test X"$have_ndb_test" = Xyes - then - ndb_opt_subdirs="test" - ndb_bin_am_ldflags="" - fi - - if test X"$have_ndb_docs" = Xyes - then - ndb_opt_subdirs="$ndb_opt_subdirs docs" - ndb_bin_am_ldflags="" - fi - - # building dynamic breaks on AIX. (If you want to try it and get unresolved - # __vec__delete2 and some such, try linking against libhC.) - case "$host_os" in - aix3.* | aix4.0.* | aix4.1.*) ;; - *) ndb_bin_am_ldflags="-static";; - esac - - # libndbclient versioning when linked with GNU ld. - if $LD --version 2>/dev/null|grep GNU >/dev/null 2>&1 ; then - NDB_LD_VERSION_SCRIPT="-Wl,--version-script=\$(top_builddir)/storage/ndb/src/libndb.ver" - AC_CONFIG_FILES(storage/ndb/src/libndb.ver) - fi - AC_SUBST(NDB_LD_VERSION_SCRIPT) - - AC_SUBST(NDB_SHARED_LIB_MAJOR_VERSION) - AC_SUBST(NDB_SHARED_LIB_VERSION) - - - AC_SUBST(NDB_VERSION_MAJOR) - AC_SUBST(NDB_VERSION_MINOR) - AC_SUBST(NDB_VERSION_BUILD) - AC_SUBST(NDB_VERSION_STATUS) - AC_DEFINE_UNQUOTED([NDB_VERSION_MAJOR], [$NDB_VERSION_MAJOR], - [NDB major version]) - AC_DEFINE_UNQUOTED([NDB_VERSION_MINOR], [$NDB_VERSION_MINOR], - [NDB minor version]) - AC_DEFINE_UNQUOTED([NDB_VERSION_BUILD], [$NDB_VERSION_BUILD], - [NDB build version]) - AC_DEFINE_UNQUOTED([NDB_VERSION_STATUS], ["$NDB_VERSION_STATUS"], - [NDB status version]) - - AC_SUBST(ndbcluster_includes) - AC_SUBST(ndbcluster_libs) - AC_SUBST(ndbcluster_system_libs) - AC_SUBST(ndb_mgmclient_libs) - AC_SUBST(NDB_SCI_LIBS) - - AC_SUBST(ndb_transporter_opt_objs) - AC_SUBST(ndb_port) - AC_SUBST(ndb_bin_am_ldflags) - AC_SUBST(ndb_opt_subdirs) - - AC_SUBST(NDB_DEFS) - AC_SUBST(ndb_cxxflags_fix) - - NDB_SIZEOF_CHARP="$ac_cv_sizeof_charp" - NDB_SIZEOF_CHAR="$ac_cv_sizeof_char" - NDB_SIZEOF_SHORT="$ac_cv_sizeof_short" - NDB_SIZEOF_INT="$ac_cv_sizeof_int" - NDB_SIZEOF_LONG="$ac_cv_sizeof_long" - NDB_SIZEOF_LONG_LONG="$ac_cv_sizeof_long_long" - AC_SUBST([NDB_SIZEOF_CHARP]) - AC_SUBST([NDB_SIZEOF_CHAR]) - AC_SUBST([NDB_SIZEOF_SHORT]) - AC_SUBST([NDB_SIZEOF_INT]) - AC_SUBST([NDB_SIZEOF_LONG]) - AC_SUBST([NDB_SIZEOF_LONG_LONG]) - - AC_CONFIG_FILES([ - storage/ndb/include/ndb_version.h - storage/ndb/include/ndb_global.h - storage/ndb/include/ndb_types.h - ]) -]) - -AC_SUBST(TEST_NDBCLUSTER) -dnl --------------------------------------------------------------------------- -dnl END OF MYSQL_CHECK_NDBCLUSTER SECTION -dnl --------------------------------------------------------------------------- diff --git a/storage/ndb/ndbapi-examples/Makefile b/storage/ndb/ndbapi-examples/Makefile deleted file mode 100644 index 6a48afccb48..00000000000 --- a/storage/ndb/ndbapi-examples/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -BIN_DIRS := ndbapi_simple \ - ndbapi_async \ - ndbapi_async1 \ - ndbapi_retries \ - ndbapi_simple_index \ - ndbapi_event \ - ndbapi_scan \ - mgmapi_logevent \ - ndbapi_simple_dual \ - mgmapi_logevent2 - -bins: $(patsubst %, _bins_%, $(BIN_DIRS)) - -$(patsubst %, _bins_%, $(BIN_DIRS)) : - $(MAKE) -C $(patsubst _bins_%, %, $@) $(OPTS) - -libs: - -clean: - for f in ${BIN_DIRS}; do \ - $(MAKE) -C $$f $@;\ - done - -clean_dep: clean -cleanall: clean -tidy: clean -distclean: clean diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile deleted file mode 100644 index b67150b71fa..00000000000 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = mgmapi_logevent -SRCS = main.cpp -OBJS = main.o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(OBJS): $(SRCS) - $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp deleted file mode 100644 index f2c8ebb6410..00000000000 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent/main.cpp +++ /dev/null @@ -1,154 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include -#include - -/* - * export LD_LIBRARY_PATH=../../../libmysql_r/.libs:../../../ndb/src/.libs - */ - -#define MGMERROR(h) \ -{ \ - fprintf(stderr, "code: %d msg: %s\n", \ - ndb_mgm_get_latest_error(h), \ - ndb_mgm_get_latest_error_msg(h)); \ - exit(-1); \ -} - -#define LOGEVENTERROR(h) \ -{ \ - fprintf(stderr, "code: %d msg: %s\n", \ - ndb_logevent_get_latest_error(h), \ - ndb_logevent_get_latest_error_msg(h)); \ - exit(-1); \ -} - -int main(int argc, char** argv) -{ - NdbMgmHandle h; - NdbLogEventHandle le; - int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, - 15, NDB_MGM_EVENT_CATEGORY_CONNECTION, - 15, NDB_MGM_EVENT_CATEGORY_NODE_RESTART, - 15, NDB_MGM_EVENT_CATEGORY_STARTUP, - 15, NDB_MGM_EVENT_CATEGORY_ERROR, - 0 }; - struct ndb_logevent event; - - if (argc < 2) - { - printf("Arguments are [].\n"); - exit(-1); - } - const char *connectstring = argv[1]; - int iterations = -1; - if (argc > 2) - iterations = atoi(argv[2]); - ndb_init(); - - h= ndb_mgm_create_handle(); - if ( h == 0) - { - printf("Unable to create handle\n"); - exit(-1); - } - if (ndb_mgm_set_connectstring(h, connectstring) == -1) - { - printf("Unable to set connectstring\n"); - exit(-1); - } - if (ndb_mgm_connect(h,0,0,0)) MGMERROR(h); - - le= ndb_mgm_create_logevent_handle(h, filter); - if ( le == 0 ) MGMERROR(h); - - while (iterations-- != 0) - { - int timeout= 1000; - int r= ndb_logevent_get_next(le,&event,timeout); - if (r == 0) - printf("No event within %d milliseconds\n", timeout); - else if (r < 0) - LOGEVENTERROR(le) - else - { - switch (event.type) { - case NDB_LE_BackupStarted: - printf("Node %d: BackupStarted\n", event.source_nodeid); - printf(" Starting node ID: %d\n", event.BackupStarted.starting_node); - printf(" Backup ID: %d\n", event.BackupStarted.backup_id); - break; - case NDB_LE_BackupCompleted: - printf("Node %d: BackupCompleted\n", event.source_nodeid); - printf(" Backup ID: %d\n", event.BackupStarted.backup_id); - break; - case NDB_LE_BackupAborted: - printf("Node %d: BackupAborted\n", event.source_nodeid); - break; - case NDB_LE_BackupFailedToStart: - printf("Node %d: BackupFailedToStart\n", event.source_nodeid); - break; - - case NDB_LE_NodeFailCompleted: - printf("Node %d: NodeFailCompleted\n", event.source_nodeid); - break; - case NDB_LE_ArbitResult: - printf("Node %d: ArbitResult\n", event.source_nodeid); - printf(" code %d, arbit_node %d\n", - event.ArbitResult.code & 0xffff, - event.ArbitResult.arbit_node); - break; - case NDB_LE_DeadDueToHeartbeat: - printf("Node %d: DeadDueToHeartbeat\n", event.source_nodeid); - printf(" node %d\n", event.DeadDueToHeartbeat.node); - break; - - case NDB_LE_Connected: - printf("Node %d: Connected\n", event.source_nodeid); - printf(" node %d\n", event.Connected.node); - break; - case NDB_LE_Disconnected: - printf("Node %d: Disconnected\n", event.source_nodeid); - printf(" node %d\n", event.Disconnected.node); - break; - case NDB_LE_NDBStartCompleted: - printf("Node %d: StartCompleted\n", event.source_nodeid); - printf(" version %d.%d.%d\n", - event.NDBStartCompleted.version >> 16 & 0xff, - event.NDBStartCompleted.version >> 8 & 0xff, - event.NDBStartCompleted.version >> 0 & 0xff); - break; - case NDB_LE_ArbitState: - printf("Node %d: ArbitState\n", event.source_nodeid); - printf(" code %d, arbit_node %d\n", - event.ArbitState.code & 0xffff, - event.ArbitResult.arbit_node); - break; - - default: - break; - } - } - } - - ndb_mgm_destroy_logevent_handle(&le); - ndb_mgm_destroy_handle(&h); - ndb_end(0); - return 0; -} diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile deleted file mode 100644 index fd9499c7a68..00000000000 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent2/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = mgmapi_logevent2 -SRCS = main.cpp -OBJS = main.o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(OBJS): $(SRCS) - $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/mgmapi -I$(INCLUDE_DIR)/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp deleted file mode 100644 index 622b90a6f8b..00000000000 --- a/storage/ndb/ndbapi-examples/mgmapi_logevent2/main.cpp +++ /dev/null @@ -1,225 +0,0 @@ -/* Copyright (c) 2003, 2006, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include -#include - -/* - * export LD_LIBRARY_PATH=../../../libmysql_r/.libs:../../../ndb/src/.libs - */ - -#define MGMERROR(h) \ -{ \ - fprintf(stderr, "code: %d msg: %s\n", \ - ndb_mgm_get_latest_error(h), \ - ndb_mgm_get_latest_error_msg(h)); \ - exit(-1); \ -} - -#define LOGEVENTERROR(h) \ -{ \ - fprintf(stderr, "code: %d msg: %s\n", \ - ndb_logevent_get_latest_error(h), \ - ndb_logevent_get_latest_error_msg(h)); \ - exit(-1); \ -} - -int main(int argc, char** argv) -{ - NdbMgmHandle h1,h2; - NdbLogEventHandle le1,le2; - int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, - 15, NDB_MGM_EVENT_CATEGORY_CONNECTION, - 15, NDB_MGM_EVENT_CATEGORY_NODE_RESTART, - 15, NDB_MGM_EVENT_CATEGORY_STARTUP, - 15, NDB_MGM_EVENT_CATEGORY_ERROR, - 0 }; - struct ndb_logevent event1, event2; - - if (argc < 3) - { - printf("Arguments are [].\n"); - exit(-1); - } - const char *connectstring1 = argv[1]; - const char *connectstring2 = argv[2]; - int iterations = -1; - if (argc > 3) - iterations = atoi(argv[3]); - ndb_init(); - - h1= ndb_mgm_create_handle(); - h2= ndb_mgm_create_handle(); - if ( h1 == 0 || h2 == 0 ) - { - printf("Unable to create handle\n"); - exit(-1); - } - if (ndb_mgm_set_connectstring(h1, connectstring1) == -1 || - ndb_mgm_set_connectstring(h2, connectstring1)) - { - printf("Unable to set connectstring\n"); - exit(-1); - } - if (ndb_mgm_connect(h1,0,0,0)) MGMERROR(h1); - if (ndb_mgm_connect(h2,0,0,0)) MGMERROR(h2); - - if ((le1= ndb_mgm_create_logevent_handle(h1, filter)) == 0) MGMERROR(h1); - if ((le2= ndb_mgm_create_logevent_handle(h1, filter)) == 0) MGMERROR(h2); - - while (iterations-- != 0) - { - int timeout= 1000; - int r1= ndb_logevent_get_next(le1,&event1,timeout); - if (r1 == 0) - printf("No event within %d milliseconds\n", timeout); - else if (r1 < 0) - LOGEVENTERROR(le1) - else - { - switch (event1.type) { - case NDB_LE_BackupStarted: - printf("Node %d: BackupStarted\n", event1.source_nodeid); - printf(" Starting node ID: %d\n", event1.BackupStarted.starting_node); - printf(" Backup ID: %d\n", event1.BackupStarted.backup_id); - break; - case NDB_LE_BackupCompleted: - printf("Node %d: BackupCompleted\n", event1.source_nodeid); - printf(" Backup ID: %d\n", event1.BackupStarted.backup_id); - break; - case NDB_LE_BackupAborted: - printf("Node %d: BackupAborted\n", event1.source_nodeid); - break; - case NDB_LE_BackupFailedToStart: - printf("Node %d: BackupFailedToStart\n", event1.source_nodeid); - break; - - case NDB_LE_NodeFailCompleted: - printf("Node %d: NodeFailCompleted\n", event1.source_nodeid); - break; - case NDB_LE_ArbitResult: - printf("Node %d: ArbitResult\n", event1.source_nodeid); - printf(" code %d, arbit_node %d\n", - event1.ArbitResult.code & 0xffff, - event1.ArbitResult.arbit_node); - break; - case NDB_LE_DeadDueToHeartbeat: - printf("Node %d: DeadDueToHeartbeat\n", event1.source_nodeid); - printf(" node %d\n", event1.DeadDueToHeartbeat.node); - break; - - case NDB_LE_Connected: - printf("Node %d: Connected\n", event1.source_nodeid); - printf(" node %d\n", event1.Connected.node); - break; - case NDB_LE_Disconnected: - printf("Node %d: Disconnected\n", event1.source_nodeid); - printf(" node %d\n", event1.Disconnected.node); - break; - case NDB_LE_NDBStartCompleted: - printf("Node %d: StartCompleted\n", event1.source_nodeid); - printf(" version %d.%d.%d\n", - event1.NDBStartCompleted.version >> 16 & 0xff, - event1.NDBStartCompleted.version >> 8 & 0xff, - event1.NDBStartCompleted.version >> 0 & 0xff); - break; - case NDB_LE_ArbitState: - printf("Node %d: ArbitState\n", event1.source_nodeid); - printf(" code %d, arbit_node %d\n", - event1.ArbitState.code & 0xffff, - event1.ArbitResult.arbit_node); - break; - - default: - break; - } - } - - int r2= ndb_logevent_get_next(le1,&event2,timeout); - if (r2 == 0) - printf("No event within %d milliseconds\n", timeout); - else if (r2 < 0) - LOGEVENTERROR(le2) - else - { - switch (event2.type) { - case NDB_LE_BackupStarted: - printf("Node %d: BackupStarted\n", event2.source_nodeid); - printf(" Starting node ID: %d\n", event2.BackupStarted.starting_node); - printf(" Backup ID: %d\n", event2.BackupStarted.backup_id); - break; - case NDB_LE_BackupCompleted: - printf("Node %d: BackupCompleted\n", event2.source_nodeid); - printf(" Backup ID: %d\n", event2.BackupStarted.backup_id); - break; - case NDB_LE_BackupAborted: - printf("Node %d: BackupAborted\n", event2.source_nodeid); - break; - case NDB_LE_BackupFailedToStart: - printf("Node %d: BackupFailedToStart\n", event2.source_nodeid); - break; - - case NDB_LE_NodeFailCompleted: - printf("Node %d: NodeFailCompleted\n", event2.source_nodeid); - break; - case NDB_LE_ArbitResult: - printf("Node %d: ArbitResult\n", event2.source_nodeid); - printf(" code %d, arbit_node %d\n", - event2.ArbitResult.code & 0xffff, - event2.ArbitResult.arbit_node); - break; - case NDB_LE_DeadDueToHeartbeat: - printf("Node %d: DeadDueToHeartbeat\n", event2.source_nodeid); - printf(" node %d\n", event2.DeadDueToHeartbeat.node); - break; - - case NDB_LE_Connected: - printf("Node %d: Connected\n", event2.source_nodeid); - printf(" node %d\n", event2.Connected.node); - break; - case NDB_LE_Disconnected: - printf("Node %d: Disconnected\n", event2.source_nodeid); - printf(" node %d\n", event2.Disconnected.node); - break; - case NDB_LE_NDBStartCompleted: - printf("Node %d: StartCompleted\n", event2.source_nodeid); - printf(" version %d.%d.%d\n", - event2.NDBStartCompleted.version >> 16 & 0xff, - event2.NDBStartCompleted.version >> 8 & 0xff, - event2.NDBStartCompleted.version >> 0 & 0xff); - break; - case NDB_LE_ArbitState: - printf("Node %d: ArbitState\n", event2.source_nodeid); - printf(" code %d, arbit_node %d\n", - event2.ArbitState.code & 0xffff, - event2.ArbitResult.arbit_node); - break; - - default: - break; - } - } - } - - ndb_mgm_destroy_logevent_handle(&le1); - ndb_mgm_destroy_logevent_handle(&le2); - ndb_mgm_destroy_handle(&h1); - ndb_mgm_destroy_handle(&h2); - ndb_end(0); - return 0; -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_async/Makefile b/storage/ndb/ndbapi-examples/ndbapi_async/Makefile deleted file mode 100644 index c18e9676b58..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_async/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = ndbapi_async -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o -CXX = g++ -CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -g -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR) -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(TARGET).o: $(SRCS) - $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp b/storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp deleted file mode 100644 index 5821dc88c2c..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp +++ /dev/null @@ -1,492 +0,0 @@ - - -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -/** - * ndbapi_async.cpp: - * Illustrates how to use callbacks and error handling using the asynchronous - * part of the NDBAPI. - * - * Classes and methods in NDBAPI used in this example: - * - * Ndb_cluster_connection - * connect() - * wait_until_ready() - * - * Ndb - * init() - * startTransaction() - * closeTransaction() - * sendPollNdb() - * getNdbError() - * - * NdbConnection - * getNdbOperation() - * executeAsynchPrepare() - * getNdbError() - * - * NdbOperation - * insertTuple() - * equal() - * setValue() - * - */ - - -#include -#include -#include - -#include // Used for cout - -/** - * Helper sleep function - */ -static void -milliSleep(int milliseconds){ - struct timeval sleeptime; - sleeptime.tv_sec = milliseconds / 1000; - sleeptime.tv_usec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; - select(0, 0, 0, 0, &sleeptime); -} - - -/** - * error printout macro - */ -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } -#define APIERROR(error) { \ - PRINT_ERROR(error.code,error.message); \ - exit(-1); } - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL -/** - * callback struct. - * transaction : index of the transaction in transaction[] array below - * data : the data that the transaction was modifying. - * retries : counter for how many times the trans. has been retried - */ -typedef struct { - Ndb * ndb; - int transaction; - int data; - int retries; -} async_callback_t; - -/** - * Structure used in "free list" to a NdbTransaction - */ -typedef struct { - NdbTransaction* conn; - int used; -} transaction_t; - -/** - * Free list holding transactions - */ -transaction_t transaction[1024]; //1024 - max number of outstanding - //transaction in one Ndb object - -#endif -/** - * prototypes - */ - -/** - * Prepare and send transaction - */ -int populate(Ndb * myNdb, int data, async_callback_t * cbData); - -/** - * Error handler. - */ -bool asynchErrorHandler(NdbTransaction * trans, Ndb* ndb); - -/** - * Exit function - */ -void asynchExitHandler(Ndb * m_ndb) ; - -/** - * Helper function used in callback(...) - */ -void closeTransaction(Ndb * ndb , async_callback_t * cb); - -/** - * Function to create table - */ -void create_table(MYSQL &mysql); - -/** - * Function to drop table - */ -void drop_table(MYSQL &mysql); - -/** - * stat. variables - */ -int tempErrors = 0; -int permErrors = 0; - -void -closeTransaction(Ndb * ndb , async_callback_t * cb) -{ - ndb->closeTransaction(transaction[cb->transaction].conn); - transaction[cb->transaction].conn = 0; - transaction[cb->transaction].used = 0; - cb->retries++; -} - -/** - * Callback executed when transaction has return from NDB - */ -static void -callback(int result, NdbTransaction* trans, void* aObject) -{ - async_callback_t * cbData = (async_callback_t *)aObject; - if (result<0) - { - /** - * Error: Temporary or permanent? - */ - if (asynchErrorHandler(trans, (Ndb*)cbData->ndb)) - { - closeTransaction((Ndb*)cbData->ndb, cbData); - while(populate((Ndb*)cbData->ndb, cbData->data, cbData) < 0) - milliSleep(10); - } - else - { - std::cout << "Restore: Failed to restore data " - << "due to a unrecoverable error. Exiting..." << std::endl; - delete cbData; - asynchExitHandler((Ndb*)cbData->ndb); - } - } - else - { - /** - * OK! close transaction - */ - closeTransaction((Ndb*)cbData->ndb, cbData); - delete cbData; - } -} - - -/** - * Create table "GARAGE" - */ -void create_table(MYSQL &mysql) -{ - while (mysql_query(&mysql, - "CREATE TABLE" - " GARAGE" - " (REG_NO INT UNSIGNED NOT NULL," - " BRAND CHAR(20) NOT NULL," - " COLOR CHAR(20) NOT NULL," - " PRIMARY KEY USING HASH (REG_NO))" - " ENGINE=NDB")) - { - if (mysql_errno(&mysql) != ER_TABLE_EXISTS_ERROR) - MYSQLERROR(mysql); - std::cout << "MySQL Cluster already has example table: GARAGE. " - << "Dropping it..." << std::endl; - drop_table(mysql); - create_table(mysql); - } -} - -/** - * Drop table GARAGE - */ -void drop_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, "DROP TABLE GARAGE")) - MYSQLERROR(mysql); -} - - -void asynchExitHandler(Ndb * m_ndb) -{ - if (m_ndb != NULL) - delete m_ndb; - exit(-1); -} - -/* returns true if is recoverable (temporary), - * false if it is an error that is permanent. - */ -bool asynchErrorHandler(NdbTransaction * trans, Ndb* ndb) -{ - NdbError error = trans->getNdbError(); - switch(error.status) - { - case NdbError::Success: - return false; - break; - - case NdbError::TemporaryError: - /** - * The error code indicates a temporary error. - * The application should typically retry. - * (Includes classifications: NdbError::InsufficientSpace, - * NdbError::TemporaryResourceError, NdbError::NodeRecoveryError, - * NdbError::OverloadError, NdbError::NodeShutdown - * and NdbError::TimeoutExpired.) - * - * We should sleep for a while and retry, except for insufficient space - */ - if(error.classification == NdbError::InsufficientSpace) - return false; - milliSleep(10); - tempErrors++; - return true; - break; - case NdbError::UnknownResult: - std::cout << error.message << std::endl; - return false; - break; - default: - case NdbError::PermanentError: - switch (error.code) - { - case 499: - case 250: - milliSleep(10); - return true; // SCAN errors that can be retried. Requires restart of scan. - default: - break; - } - //ERROR - std::cout << error.message << std::endl; - return false; - break; - } - return false; -} - -static int nPreparedTransactions = 0; -static int MAX_RETRIES = 10; -static int parallelism = 100; - - -/************************************************************************ - * populate() - * 1. Prepare 'parallelism' number of insert transactions. - * 2. Send transactions to NDB and wait for callbacks to execute - */ -int populate(Ndb * myNdb, int data, async_callback_t * cbData) -{ - - NdbOperation* myNdbOperation; // For operations - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("GARAGE"); - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - async_callback_t * cb; - int retries = 0; - int current = 0; - for(int i=0; i<1024; i++) - { - if(transaction[i].used == 0) - { - current = i; - if (cbData == 0) - { - /** - * We already have a callback - * This is an absolutely new transaction - */ - cb = new async_callback_t; - cb->retries = 0; - } - else - { - /** - * We already have a callback - */ - cb =cbData; - retries = cbData->retries; - } - /** - * Set data used by the callback - */ - cb->ndb = myNdb; //handle to Ndb object so that we can close transaction - // in the callback (alt. make myNdb global). - - cb->data = data; //this is the data we want to insert - cb->transaction = current; //This is the number (id) of this transaction - transaction[current].used = 1 ; //Mark the transaction as used - break; - } - } - if(!current) - return -1; - - while(retries < MAX_RETRIES) - { - transaction[current].conn = myNdb->startTransaction(); - if (transaction[current].conn == NULL) { - /** - * no transaction to close since conn == null - */ - milliSleep(10); - retries++; - continue; - } - myNdbOperation = transaction[current].conn->getNdbOperation(myTable); - if (myNdbOperation == NULL) - { - if (asynchErrorHandler(transaction[current].conn, myNdb)) - { - myNdb->closeTransaction(transaction[current].conn); - transaction[current].conn = 0; - milliSleep(10); - retries++; - continue; - } - asynchExitHandler(myNdb); - } // if - if(myNdbOperation->insertTuple() < 0 || - myNdbOperation->equal("REG_NO", data) < 0 || - myNdbOperation->setValue("BRAND", "Mercedes") <0 || - myNdbOperation->setValue("COLOR", "Blue") < 0) - { - if (asynchErrorHandler(transaction[current].conn, myNdb)) - { - myNdb->closeTransaction(transaction[current].conn); - transaction[current].conn = 0; - retries++; - milliSleep(10); - continue; - } - asynchExitHandler(myNdb); - } - - /*Prepare transaction (the transaction is NOT yet sent to NDB)*/ - transaction[current].conn->executeAsynchPrepare(NdbTransaction::Commit, - &callback, - cb); - /** - * When we have prepared parallelism number of transactions -> - * send the transaction to ndb. - * Next time we will deal with the transactions are in the - * callback. There we will see which ones that were successful - * and which ones to retry. - */ - if (nPreparedTransactions == parallelism-1) - { - // send-poll all transactions - // close transaction is done in callback - myNdb->sendPollNdb(3000, parallelism ); - nPreparedTransactions=0; - } - else - nPreparedTransactions++; - return 1; - } - std::cout << "Unable to recover from errors. Exiting..." << std::endl; - asynchExitHandler(myNdb); - return -1; -} - -int main(int argc, char** argv) -{ - if (argc != 3) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - char * mysqld_sock = argv[1]; - const char *connectstring = argv[2]; - ndb_init(); - MYSQL mysql; - - /************************************************************** - * Connect to mysql server and create table * - **************************************************************/ - { - if ( !mysql_init(&mysql) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql, "localhost", "root", "", "", - 0, mysqld_sock, 0) ) - MYSQLERROR(mysql); - - mysql_query(&mysql, "CREATE DATABASE TEST_DB"); - if (mysql_query(&mysql, "USE TEST_DB") != 0) MYSQLERROR(mysql); - - create_table(mysql); - } - - /************************************************************** - * Connect to ndb cluster * - **************************************************************/ - Ndb_cluster_connection cluster_connection(connectstring); - if (cluster_connection.connect(4, 5, 1)) - { - std::cout << "Unable to connect to cluster within 30 secs." << std::endl; - exit(-1); - } - // Optionally connect and wait for the storage nodes (ndbd's) - if (cluster_connection.wait_until_ready(30,0) < 0) - { - std::cout << "Cluster was not ready within 30 secs.\n"; - exit(-1); - } - - Ndb* myNdb = new Ndb( &cluster_connection, - "TEST_DB" ); // Object representing the database - if (myNdb->init(1024) == -1) { // Set max 1024 parallel transactions - APIERROR(myNdb->getNdbError()); - } - - /** - * Initialise transaction array - */ - for(int i = 0 ; i < 10 ; i++) - { - transaction[i].used = 0; - transaction[i].conn = 0; - - } - int i=0; - /** - * Do 10 insert transactions. - */ - while(i < 10) - { - while(populate(myNdb,i,0)<0) // <0, no space on free list. Sleep and try again. - milliSleep(10); - - i++; - } - std::cout << "Number of temporary errors: " << tempErrors << std::endl; - delete myNdb; - - drop_table(mysql); -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_async/readme.txt b/storage/ndb/ndbapi-examples/ndbapi_async/readme.txt deleted file mode 100644 index 47cb4bf9ffa..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_async/readme.txt +++ /dev/null @@ -1,3 +0,0 @@ -1. Set NDB_OS in Makefile -2. Add path to libNDB_API.so in LD_LIBRARY_PATH -3. Set NDB_CONNECTSTRING diff --git a/storage/ndb/ndbapi-examples/ndbapi_async1/Makefile b/storage/ndb/ndbapi-examples/ndbapi_async1/Makefile deleted file mode 100644 index c88086157e7..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_async1/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -TARGET = ndbapi_async1 -SRCS = ndbapi_async1.cpp -OBJS = ndbapi_async1.o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(TARGET).o: $(SRCS) - $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp deleted file mode 100644 index 323ba713a8a..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp +++ /dev/null @@ -1,199 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -// -// ndbapi_async1.cpp: Using asynchronous transactions in NDB API -// -// -// Correct output from this program is: -// -// Successful insert. -// Successful insert. - -#include -#include - -// Used for cout -#include - - -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } -#define APIERROR(error) \ - { std::cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \ - << error.code << ", msg: " << error.message << "." << std::endl; \ - exit(-1); } - -static void create_table(MYSQL &); -static void drop_table(MYSQL &); -static void callback(int result, NdbTransaction* NdbObject, void* aObject); - -int main(int argc, char** argv) -{ - if (argc != 3) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - char * mysqld_sock = argv[1]; - const char *connectstring = argv[2]; - ndb_init(); - - Ndb_cluster_connection *cluster_connection= - new Ndb_cluster_connection(connectstring); // Object representing the cluster - - int r= cluster_connection->connect(5 /* retries */, - 3 /* delay between retries */, - 1 /* verbose */); - if (r > 0) - { - std::cout - << "Cluster connect failed, possibly resolved with more retries.\n"; - exit(-1); - } - else if (r < 0) - { - std::cout - << "Cluster connect failed.\n"; - exit(-1); - } - - if (cluster_connection->wait_until_ready(30,0) < 0) - { - std::cout << "Cluster was not ready within 30 secs." << std::endl; - exit(-1); - } - - // connect to mysql server - MYSQL mysql; - if ( !mysql_init(&mysql) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql, "localhost", "root", "", "", - 0, mysqld_sock, 0) ) - MYSQLERROR(mysql); - - /******************************************** - * Connect to database via mysql-c * - ********************************************/ - mysql_query(&mysql, "CREATE DATABASE TEST_DB_1"); - if (mysql_query(&mysql, "USE TEST_DB_1") != 0) MYSQLERROR(mysql); - create_table(mysql); - - Ndb* myNdb = new Ndb( cluster_connection, - "TEST_DB_1" ); // Object representing the database - - NdbTransaction* myNdbTransaction[2]; // For transactions - NdbOperation* myNdbOperation; // For operations - - if (myNdb->init(2) == -1) { // Want two parallel insert transactions - APIERROR(myNdb->getNdbError()); - exit(-1); - } - - /****************************************************** - * Insert (we do two insert transactions in parallel) * - ******************************************************/ - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - for (int i = 0; i < 2; i++) { - myNdbTransaction[i] = myNdb->startTransaction(); - if (myNdbTransaction[i] == NULL) APIERROR(myNdb->getNdbError()); - - myNdbOperation = myNdbTransaction[i]->getNdbOperation(myTable); - if (myNdbOperation == NULL) APIERROR(myNdbTransaction[i]->getNdbError()); - - myNdbOperation->insertTuple(); - myNdbOperation->equal("ATTR1", 20 + i); - myNdbOperation->setValue("ATTR2", 20 + i); - - // Prepare transaction (the transaction is NOT yet sent to NDB) - myNdbTransaction[i]->executeAsynchPrepare(NdbTransaction::Commit, - &callback, NULL); - } - - // Send all transactions to NDB - myNdb->sendPreparedTransactions(0); - - // Poll all transactions - myNdb->pollNdb(3000, 2); - - // Close all transactions - for (int i = 0; i < 2; i++) - myNdb->closeTransaction(myNdbTransaction[i]); - - delete myNdb; - delete cluster_connection; - - drop_table(mysql); - - ndb_end(0); - return 0; -} - -/********************************************************* - * Create a table named MYTABLENAME if it does not exist * - *********************************************************/ -static void create_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, - "CREATE TABLE" - " MYTABLENAME" - " (ATTR1 INT UNSIGNED NOT NULL PRIMARY KEY," - " ATTR2 INT UNSIGNED NOT NULL)" - " ENGINE=NDB")) - MYSQLERROR(mysql); -} - -/*********************************** - * Drop a table named MYTABLENAME - ***********************************/ -static void drop_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, - "DROP TABLE" - " MYTABLENAME")) - MYSQLERROR(mysql); -} - - -/* - * callback : This is called when the transaction is polled - * - * (This function must have three arguments: - * - The result of the transaction, - * - The NdbTransaction object, and - * - A pointer to an arbitrary object.) - */ - -static void -callback(int result, NdbTransaction* myTrans, void* aObject) -{ - if (result == -1) { - std::cout << "Poll error: " << std::endl; - APIERROR(myTrans->getNdbError()); - } else { - std::cout << "Successful insert." << std::endl; - } -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_event/Makefile b/storage/ndb/ndbapi-examples/ndbapi_event/Makefile deleted file mode 100644 index c0430011ab6..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_event/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = ndbapi_event -SRCS = ndbapi_event.cpp -OBJS = ndbapi_event.o -CXX = g++ -g -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -DEBUG =# -DVM_TRACE -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(TARGET).o: $(SRCS) Makefile - $(CXX) $(CFLAGS) $(DEBUG) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi -I$(TOP_SRCDIR)/include $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp b/storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp deleted file mode 100644 index 70e899c5d7c..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/** - * ndbapi_event.cpp: Using API level events in NDB API - * - * Classes and methods used in this example: - * - * Ndb_cluster_connection - * connect() - * wait_until_ready() - * - * Ndb - * init() - * getDictionary() - * createEventOperation() - * dropEventOperation() - * pollEvents() - * nextEvent() - * - * NdbDictionary - * createEvent() - * dropEvent() - * - * NdbDictionary::Event - * setTable() - * addTableEvent() - * addEventColumn() - * - * NdbEventOperation - * getValue() - * getPreValue() - * execute() - * getEventType() - * - */ - -#include - -// Used for cout -#include -#include -#include -#ifdef VM_TRACE -#include -#endif -#ifndef assert -#include -#endif - - -/** - * Assume that there is a table which is being updated by - * another process (e.g. flexBench -l 0 -stdtables). - * We want to monitor what happens with column values. - * - * Or using the mysql client: - * - * shell> mysql -u root - * mysql> create database TEST_DB; - * mysql> use TEST_DB; - * mysql> create table t0 - * (c0 int, c1 int, c2 char(4), c3 char(4), c4 text, - * primary key(c0, c2)) engine ndb charset latin1; - * - * In another window start ndbapi_event, wait until properly started - - insert into t0 values (1, 2, 'a', 'b', null); - insert into t0 values (3, 4, 'c', 'd', null); - update t0 set c3 = 'e' where c0 = 1 and c2 = 'a'; -- use pk - update t0 set c3 = 'f'; -- use scan - update t0 set c3 = 'F'; -- use scan update to 'same' - update t0 set c2 = 'g' where c0 = 1; -- update pk part - update t0 set c2 = 'G' where c0 = 1; -- update pk part to 'same' - update t0 set c0 = 5, c2 = 'H' where c0 = 3; -- update full PK - delete from t0; - - insert ...; update ...; -- see events w/ same pk merged (if -m option) - delete ...; insert ...; -- there are 5 combinations ID IU DI UD UU - update ...; update ...; - - -- text requires -m flag - set @a = repeat('a',256); -- inline size - set @b = repeat('b',2000); -- part size - set @c = repeat('c',2000*30); -- 30 parts - - -- update the text field using combinations of @a, @b, @c ... - - * you should see the data popping up in the example window - * - */ - -#define APIERROR(error) \ - { std::cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \ - << error.code << ", msg: " << error.message << "." << std::endl; \ - exit(-1); } - -int myCreateEvent(Ndb* myNdb, - const char *eventName, - const char *eventTableName, - const char **eventColumnName, - const int noEventColumnName, - bool merge_events); - -int main(int argc, char** argv) -{ - if (argc < 3) - { - std::cout << "Arguments are [m(merge events)|d(debug)].\n"; - exit(-1); - } - const char *connectstring = argv[1]; - int timeout = atoi(argv[2]); - ndb_init(); - bool merge_events = argc > 3 && strchr(argv[3], 'm') != 0; -#ifdef VM_TRACE - bool dbug = argc > 3 && strchr(argv[3], 'd') != 0; - if (dbug) DBUG_PUSH("d:t:"); - if (dbug) putenv("API_SIGNAL_LOG=-"); -#endif - - Ndb_cluster_connection *cluster_connection= - new Ndb_cluster_connection(connectstring); // Object representing the cluster - - int r= cluster_connection->connect(5 /* retries */, - 3 /* delay between retries */, - 1 /* verbose */); - if (r > 0) - { - std::cout - << "Cluster connect failed, possibly resolved with more retries.\n"; - exit(-1); - } - else if (r < 0) - { - std::cout - << "Cluster connect failed.\n"; - exit(-1); - } - - if (cluster_connection->wait_until_ready(30,30)) - { - std::cout << "Cluster was not ready within 30 secs." << std::endl; - exit(-1); - } - - Ndb* myNdb= new Ndb(cluster_connection, - "TEST_DB"); // Object representing the database - - if (myNdb->init() == -1) APIERROR(myNdb->getNdbError()); - - const char *eventName= "CHNG_IN_t0"; - const char *eventTableName= "t0"; - const int noEventColumnName= 5; - const char *eventColumnName[noEventColumnName]= - {"c0", - "c1", - "c2", - "c3", - "c4" - }; - - // Create events - myCreateEvent(myNdb, - eventName, - eventTableName, - eventColumnName, - noEventColumnName, - merge_events); - - // Normal values and blobs are unfortunately handled differently.. - typedef union { NdbRecAttr* ra; NdbBlob* bh; } RA_BH; - - int i, j, k, l; - j = 0; - while (j < timeout) { - - // Start "transaction" for handling events - NdbEventOperation* op; - printf("create EventOperation\n"); - if ((op = myNdb->createEventOperation(eventName)) == NULL) - APIERROR(myNdb->getNdbError()); - op->mergeEvents(merge_events); - - printf("get values\n"); - RA_BH recAttr[noEventColumnName]; - RA_BH recAttrPre[noEventColumnName]; - // primary keys should always be a part of the result - for (i = 0; i < noEventColumnName; i++) { - if (i < 4) { - recAttr[i].ra = op->getValue(eventColumnName[i]); - recAttrPre[i].ra = op->getPreValue(eventColumnName[i]); - } else if (merge_events) { - recAttr[i].bh = op->getBlobHandle(eventColumnName[i]); - recAttrPre[i].bh = op->getPreBlobHandle(eventColumnName[i]); - } - } - - // set up the callbacks - printf("execute\n"); - // This starts changes to "start flowing" - if (op->execute()) - APIERROR(op->getNdbError()); - - NdbEventOperation* the_op = op; - - i= 0; - while (i < timeout) { - // printf("now waiting for event...\n"); - int r = myNdb->pollEvents(1000); // wait for event or 1000 ms - if (r > 0) { - // printf("got data! %d\n", r); - while ((op= myNdb->nextEvent())) { - assert(the_op == op); - i++; - switch (op->getEventType()) { - case NdbDictionary::Event::TE_INSERT: - printf("%u INSERT", i); - break; - case NdbDictionary::Event::TE_DELETE: - printf("%u DELETE", i); - break; - case NdbDictionary::Event::TE_UPDATE: - printf("%u UPDATE", i); - break; - default: - abort(); // should not happen - } - printf(" gci=%d\n", (int)op->getGCI()); - for (k = 0; k <= 1; k++) { - printf(k == 0 ? "post: " : "pre : "); - for (l = 0; l < noEventColumnName; l++) { - if (l < 4) { - NdbRecAttr* ra = k == 0 ? recAttr[l].ra : recAttrPre[l].ra; - if (ra->isNULL() >= 0) { // we have a value - if (ra->isNULL() == 0) { // we have a non-null value - if (l < 2) - printf("%-5u", ra->u_32_value()); - else - printf("%-5.4s", ra->aRef()); - } else - printf("%-5s", "NULL"); - } else - printf("%-5s", "-"); // no value - } else if (merge_events) { - int isNull; - NdbBlob* bh = k == 0 ? recAttr[l].bh : recAttrPre[l].bh; - bh->getDefined(isNull); - if (isNull >= 0) { // we have a value - if (! isNull) { // we have a non-null value - Uint64 length = 0; - bh->getLength(length); - // read into buffer - unsigned char* buf = new unsigned char [length]; - memset(buf, 'X', length); - Uint32 n = length; - bh->readData(buf, n); // n is in/out - assert(n == length); - // pretty-print - bool first = true; - Uint32 i = 0; - while (i < n) { - unsigned char c = buf[i++]; - Uint32 m = 1; - while (i < n && buf[i] == c) - i++, m++; - if (! first) - printf("+"); - printf("%u%c", m, c); - first = false; - } - printf("[%u]", n); - delete [] buf; - } else - printf("%-5s", "NULL"); - } else - printf("%-5s", "-"); // no value - } - } - printf("\n"); - } - } - } else - printf("timed out (%i)\n", timeout); - } - // don't want to listen to events anymore - if (myNdb->dropEventOperation(the_op)) APIERROR(myNdb->getNdbError()); - the_op = 0; - - j++; - } - - { - NdbDictionary::Dictionary *myDict = myNdb->getDictionary(); - if (!myDict) APIERROR(myNdb->getNdbError()); - // remove event from database - if (myDict->dropEvent(eventName)) APIERROR(myDict->getNdbError()); - } - - delete myNdb; - delete cluster_connection; - ndb_end(0); - return 0; -} - -int myCreateEvent(Ndb* myNdb, - const char *eventName, - const char *eventTableName, - const char **eventColumnNames, - const int noEventColumnNames, - bool merge_events) -{ - NdbDictionary::Dictionary *myDict= myNdb->getDictionary(); - if (!myDict) APIERROR(myNdb->getNdbError()); - - const NdbDictionary::Table *table= myDict->getTable(eventTableName); - if (!table) APIERROR(myDict->getNdbError()); - - NdbDictionary::Event myEvent(eventName, *table); - myEvent.addTableEvent(NdbDictionary::Event::TE_ALL); - // myEvent.addTableEvent(NdbDictionary::Event::TE_INSERT); - // myEvent.addTableEvent(NdbDictionary::Event::TE_UPDATE); - // myEvent.addTableEvent(NdbDictionary::Event::TE_DELETE); - - myEvent.addEventColumns(noEventColumnNames, eventColumnNames); - myEvent.mergeEvents(merge_events); - - // Add event to database - if (myDict->createEvent(myEvent) == 0) - myEvent.print(); - else if (myDict->getNdbError().classification == - NdbError::SchemaObjectExists) { - printf("Event creation failed, event exists\n"); - printf("dropping Event...\n"); - if (myDict->dropEvent(eventName)) APIERROR(myDict->getNdbError()); - // try again - // Add event to database - if ( myDict->createEvent(myEvent)) APIERROR(myDict->getNdbError()); - } else - APIERROR(myDict->getNdbError()); - - return 0; -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_retries/Makefile b/storage/ndb/ndbapi-examples/ndbapi_retries/Makefile deleted file mode 100644 index 1b4a316f406..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_retries/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -TARGET = ndbapi_retries -SRCS = ndbapi_retries.cpp -OBJS = ndbapi_retries.o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(TARGET).o: $(SRCS) - $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp b/storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp deleted file mode 100644 index 6a6acb1068c..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp +++ /dev/null @@ -1,291 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -// -// ndbapi_retries.cpp: Error handling and transaction retries -// -// There are many ways to program using the NDB API. In this example -// we execute two inserts in the same transaction using -// NdbConnection::execute(NoCommit). -// -// Transaction failing is handled by re-executing the transaction -// in case of non-permanent transaction errors. -// Application errors (i.e. errors at points marked with APIERROR) -// should be handled by the application programmer. - -#include -#include - -// Used for cout -#include - -// Used for sleep (use your own version of sleep) -#include -#define TIME_TO_SLEEP_BETWEEN_TRANSACTION_RETRIES 1 - -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } - -// -// APIERROR prints an NdbError object -// -#define APIERROR(error) \ - { std::cout << "API ERROR: " << error.code << " " << error.message \ - << std::endl \ - << " " << "Status: " << error.status \ - << ", Classification: " << error.classification << std::endl\ - << " " << "File: " << __FILE__ \ - << " (Line: " << __LINE__ << ")" << std::endl \ - ; \ - } - -// -// TRANSERROR prints all error info regarding an NdbTransaction -// -#define TRANSERROR(ndbTransaction) \ - { NdbError error = ndbTransaction->getNdbError(); \ - std::cout << "TRANS ERROR: " << error.code << " " << error.message \ - << std::endl \ - << " " << "Status: " << error.status \ - << ", Classification: " << error.classification << std::endl \ - << " " << "File: " << __FILE__ \ - << " (Line: " << __LINE__ << ")" << std::endl \ - ; \ - printTransactionError(ndbTransaction); \ - } - -void printTransactionError(NdbTransaction *ndbTransaction) { - const NdbOperation *ndbOp = NULL; - int i=0; - - /**************************************************************** - * Print NdbError object of every operations in the transaction * - ****************************************************************/ - while ((ndbOp = ndbTransaction->getNextCompletedOperation(ndbOp)) != NULL) { - NdbError error = ndbOp->getNdbError(); - std::cout << " OPERATION " << i+1 << ": " - << error.code << " " << error.message << std::endl - << " Status: " << error.status - << ", Classification: " << error.classification << std::endl; - i++; - } -} - - -// -// Example insert -// @param myNdb Ndb object representing NDB Cluster -// @param myTransaction NdbTransaction used for transaction -// @param myTable Table to insert into -// @param error NdbError object returned in case of errors -// @return -1 in case of failures, 0 otherwise -// -int insert(int transactionId, NdbTransaction* myTransaction, - const NdbDictionary::Table *myTable) { - NdbOperation *myOperation; // For other operations - - myOperation = myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) return -1; - - if (myOperation->insertTuple() || - myOperation->equal("ATTR1", transactionId) || - myOperation->setValue("ATTR2", transactionId)) { - APIERROR(myOperation->getNdbError()); - exit(-1); - } - - return myTransaction->execute(NdbTransaction::NoCommit); -} - - -// -// Execute function which re-executes (tries 10 times) the transaction -// if there are temporary errors (e.g. the NDB Cluster is overloaded). -// @return -1 failure, 1 success -// -int executeInsertTransaction(int transactionId, Ndb* myNdb, - const NdbDictionary::Table *myTable) { - int result = 0; // No result yet - int noOfRetriesLeft = 10; - NdbTransaction *myTransaction; // For other transactions - NdbError ndberror; - - while (noOfRetriesLeft > 0 && !result) { - - /********************************* - * Start and execute transaction * - *********************************/ - myTransaction = myNdb->startTransaction(); - if (myTransaction == NULL) { - APIERROR(myNdb->getNdbError()); - ndberror = myNdb->getNdbError(); - result = -1; // Failure - } else if (insert(transactionId, myTransaction, myTable) || - insert(10000+transactionId, myTransaction, myTable) || - myTransaction->execute(NdbTransaction::Commit)) { - TRANSERROR(myTransaction); - ndberror = myTransaction->getNdbError(); - result = -1; // Failure - } else { - result = 1; // Success - } - - /********************************** - * If failure, then analyze error * - **********************************/ - if (result == -1) { - switch (ndberror.status) { - case NdbError::Success: - break; - case NdbError::TemporaryError: - std::cout << "Retrying transaction..." << std::endl; - sleep(TIME_TO_SLEEP_BETWEEN_TRANSACTION_RETRIES); - --noOfRetriesLeft; - result = 0; // No completed transaction yet - break; - - case NdbError::UnknownResult: - case NdbError::PermanentError: - std::cout << "No retry of transaction..." << std::endl; - result = -1; // Permanent failure - break; - } - } - - /********************* - * Close transaction * - *********************/ - if (myTransaction != NULL) { - myNdb->closeTransaction(myTransaction); - } - } - - if (result != 1) exit(-1); - return result; -} - -/********************************************************* - * Create a table named MYTABLENAME if it does not exist * - *********************************************************/ -static void create_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, - "CREATE TABLE" - " MYTABLENAME" - " (ATTR1 INT UNSIGNED NOT NULL PRIMARY KEY," - " ATTR2 INT UNSIGNED NOT NULL)" - " ENGINE=NDB")) - MYSQLERROR(mysql); -} - -/*********************************** - * Drop a table named MYTABLENAME - ***********************************/ -static void drop_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, - "DROP TABLE" - " MYTABLENAME")) - MYSQLERROR(mysql); -} - -int main(int argc, char** argv) -{ - if (argc != 3) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - char * mysqld_sock = argv[1]; - const char *connectstring = argv[2]; - ndb_init(); - - Ndb_cluster_connection *cluster_connection= - new Ndb_cluster_connection(connectstring); // Object representing the cluster - - int r= cluster_connection->connect(5 /* retries */, - 3 /* delay between retries */, - 1 /* verbose */); - if (r > 0) - { - std::cout - << "Cluster connect failed, possibly resolved with more retries.\n"; - exit(-1); - } - else if (r < 0) - { - std::cout - << "Cluster connect failed.\n"; - exit(-1); - } - - if (cluster_connection->wait_until_ready(30,30)) - { - std::cout << "Cluster was not ready within 30 secs." << std::endl; - exit(-1); - } - // connect to mysql server - MYSQL mysql; - if ( !mysql_init(&mysql) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql, "localhost", "root", "", "", - 0, mysqld_sock, 0) ) - MYSQLERROR(mysql); - - /******************************************** - * Connect to database via mysql-c * - ********************************************/ - mysql_query(&mysql, "CREATE DATABASE TEST_DB_1"); - if (mysql_query(&mysql, "USE TEST_DB_1") != 0) MYSQLERROR(mysql); - create_table(mysql); - - Ndb* myNdb= new Ndb( cluster_connection, - "TEST_DB_1" ); // Object representing the database - - if (myNdb->init() == -1) { - APIERROR(myNdb->getNdbError()); - exit(-1); - } - - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - if (myTable == NULL) - { - APIERROR(myDict->getNdbError()); - return -1; - } - /************************************ - * Execute some insert transactions * - ************************************/ - for (int i = 10000; i < 20000; i++) { - executeInsertTransaction(i, myNdb, myTable); - } - - delete myNdb; - delete cluster_connection; - - drop_table(mysql); - - ndb_end(0); - return 0; -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan/Makefile b/storage/ndb/ndbapi-examples/ndbapi_scan/Makefile deleted file mode 100644 index 30742509f75..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_scan/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = ndbapi_scan -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o -CXX = g++ -CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -g -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR) -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(TARGET).o: $(SRCS) - $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp b/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp deleted file mode 100644 index 6024d3b349f..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp +++ /dev/null @@ -1,845 +0,0 @@ - -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -/* - * ndbapi_scan.cpp: - * Illustrates how to use the scan api in the NDBAPI. - * The example shows how to do scan, scan for update and scan for delete - * using NdbScanFilter and NdbScanOperation - * - * Classes and methods used in this example: - * - * Ndb_cluster_connection - * connect() - * wait_until_ready() - * - * Ndb - * init() - * getDictionary() - * startTransaction() - * closeTransaction() - * - * NdbTransaction - * getNdbScanOperation() - * execute() - * - * NdbScanOperation - * getValue() - * readTuples() - * nextResult() - * deleteCurrentTuple() - * updateCurrentTuple() - * - * const NdbDictionary::Dictionary - * getTable() - * - * const NdbDictionary::Table - * getColumn() - * - * const NdbDictionary::Column - * getLength() - * - * NdbOperation - * insertTuple() - * equal() - * setValue() - * - * NdbScanFilter - * begin() - * eq() - * end() - * - */ - - -#include -#include -#include -// Used for cout -#include -#include - -/** - * Helper sleep function - */ -static void -milliSleep(int milliseconds){ - struct timeval sleeptime; - sleeptime.tv_sec = milliseconds / 1000; - sleeptime.tv_usec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; - select(0, 0, 0, 0, &sleeptime); -} - - -/** - * Helper sleep function - */ -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } -#define APIERROR(error) { \ - PRINT_ERROR(error.code,error.message); \ - exit(-1); } - -struct Car -{ - /** - * Note memset, so that entire char-fields are cleared - * as all 20 bytes are significant (as type is char) - */ - Car() { memset(this, 0, sizeof(* this)); } - - unsigned int reg_no; - char brand[20]; - char color[20]; -}; - -/** - * Function to drop table - */ -void drop_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, "DROP TABLE GARAGE")) - MYSQLERROR(mysql); -} - - -/** - * Function to create table - */ -void create_table(MYSQL &mysql) -{ - while (mysql_query(&mysql, - "CREATE TABLE" - " GARAGE" - " (REG_NO INT UNSIGNED NOT NULL," - " BRAND CHAR(20) NOT NULL," - " COLOR CHAR(20) NOT NULL," - " PRIMARY KEY USING HASH (REG_NO))" - " ENGINE=NDB")) - { - if (mysql_errno(&mysql) != ER_TABLE_EXISTS_ERROR) - MYSQLERROR(mysql); - std::cout << "MySQL Cluster already has example table: GARAGE. " - << "Dropping it..." << std::endl; - /****************** - * Recreate table * - ******************/ - drop_table(mysql); - create_table(mysql); - } -} - -int populate(Ndb * myNdb) -{ - int i; - Car cars[15]; - - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("GARAGE"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - /** - * Five blue mercedes - */ - for (i = 0; i < 5; i++) - { - cars[i].reg_no = i; - sprintf(cars[i].brand, "Mercedes"); - sprintf(cars[i].color, "Blue"); - } - - /** - * Five black bmw - */ - for (i = 5; i < 10; i++) - { - cars[i].reg_no = i; - sprintf(cars[i].brand, "BMW"); - sprintf(cars[i].color, "Black"); - } - - /** - * Five pink toyotas - */ - for (i = 10; i < 15; i++) - { - cars[i].reg_no = i; - sprintf(cars[i].brand, "Toyota"); - sprintf(cars[i].color, "Pink"); - } - - NdbTransaction* myTrans = myNdb->startTransaction(); - if (myTrans == NULL) - APIERROR(myNdb->getNdbError()); - - for (i = 0; i < 15; i++) - { - NdbOperation* myNdbOperation = myTrans->getNdbOperation(myTable); - if (myNdbOperation == NULL) - APIERROR(myTrans->getNdbError()); - myNdbOperation->insertTuple(); - myNdbOperation->equal("REG_NO", cars[i].reg_no); - myNdbOperation->setValue("BRAND", cars[i].brand); - myNdbOperation->setValue("COLOR", cars[i].color); - } - - int check = myTrans->execute(NdbTransaction::Commit); - - myTrans->close(); - - return check != -1; -} - -int scan_delete(Ndb* myNdb, - int column, - const char * color) - -{ - - // Scan all records exclusive and delete - // them one by one - int retryAttempt = 0; - const int retryMax = 10; - int deletedRows = 0; - int check; - NdbError err; - NdbTransaction *myTrans; - NdbScanOperation *myScanOp; - - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("GARAGE"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - /** - * Loop as long as : - * retryMax not reached - * failed operations due to TEMPORARY erros - * - * Exit loop; - * retyrMax reached - * Permanent error (return -1) - */ - while (true) - { - if (retryAttempt >= retryMax) - { - std::cout << "ERROR: has retried this operation " << retryAttempt - << " times, failing!" << std::endl; - return -1; - } - - myTrans = myNdb->startTransaction(); - if (myTrans == NULL) - { - const NdbError err = myNdb->getNdbError(); - - if (err.status == NdbError::TemporaryError) - { - milliSleep(50); - retryAttempt++; - continue; - } - std::cout << err.message << std::endl; - return -1; - } - - /** - * Get a scan operation. - */ - myScanOp = myTrans->getNdbScanOperation(myTable); - if (myScanOp == NULL) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Define a result set for the scan. - */ - if(myScanOp->readTuples(NdbOperation::LM_Exclusive) != 0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Use NdbScanFilter to define a search critera - */ - NdbScanFilter filter(myScanOp) ; - if(filter.begin(NdbScanFilter::AND) < 0 || - filter.cmp(NdbScanFilter::COND_EQ, column, color) < 0 || - filter.end() < 0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Start scan (NoCommit since we are only reading at this stage); - */ - if(myTrans->execute(NdbTransaction::NoCommit) != 0){ - err = myTrans->getNdbError(); - if(err.status == NdbError::TemporaryError){ - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - milliSleep(50); - continue; - } - std::cout << err.code << std::endl; - std::cout << myTrans->getNdbError().code << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - - /** - * start of loop: nextResult(true) means that "parallelism" number of - * rows are fetched from NDB and cached in NDBAPI - */ - while((check = myScanOp->nextResult(true)) == 0){ - do - { - if (myScanOp->deleteCurrentTuple() != 0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - deletedRows++; - - /** - * nextResult(false) means that the records - * cached in the NDBAPI are modified before - * fetching more rows from NDB. - */ - } while((check = myScanOp->nextResult(false)) == 0); - - /** - * Commit when all cached tuple have been marked for deletion - */ - if(check != -1) - { - check = myTrans->execute(NdbTransaction::Commit); - } - - if(check == -1) - { - /** - * Create a new transaction, while keeping scan open - */ - check = myTrans->restart(); - } - - /** - * Check for errors - */ - err = myTrans->getNdbError(); - if(check == -1) - { - if(err.status == NdbError::TemporaryError) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - milliSleep(50); - continue; - } - } - /** - * End of loop - */ - } - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return 0; - } - - if(myTrans!=0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - } - return -1; -} - - -int scan_update(Ndb* myNdb, - int update_column, - const char * before_color, - const char * after_color) - -{ - - // Scan all records exclusive and update - // them one by one - int retryAttempt = 0; - const int retryMax = 10; - int updatedRows = 0; - int check; - NdbError err; - NdbTransaction *myTrans; - NdbScanOperation *myScanOp; - - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("GARAGE"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - /** - * Loop as long as : - * retryMax not reached - * failed operations due to TEMPORARY erros - * - * Exit loop; - * retyrMax reached - * Permanent error (return -1) - */ - while (true) - { - - if (retryAttempt >= retryMax) - { - std::cout << "ERROR: has retried this operation " << retryAttempt - << " times, failing!" << std::endl; - return -1; - } - - myTrans = myNdb->startTransaction(); - if (myTrans == NULL) - { - const NdbError err = myNdb->getNdbError(); - - if (err.status == NdbError::TemporaryError) - { - milliSleep(50); - retryAttempt++; - continue; - } - std::cout << err.message << std::endl; - return -1; - } - - /** - * Get a scan operation. - */ - myScanOp = myTrans->getNdbScanOperation(myTable); - if (myScanOp == NULL) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Define a result set for the scan. - */ - if( myScanOp->readTuples(NdbOperation::LM_Exclusive) ) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Use NdbScanFilter to define a search critera - */ - NdbScanFilter filter(myScanOp) ; - if(filter.begin(NdbScanFilter::AND) < 0 || - filter.cmp(NdbScanFilter::COND_EQ, update_column, before_color) <0|| - filter.end() <0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Start scan (NoCommit since we are only reading at this stage); - */ - if(myTrans->execute(NdbTransaction::NoCommit) != 0) - { - err = myTrans->getNdbError(); - if(err.status == NdbError::TemporaryError){ - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - milliSleep(50); - continue; - } - std::cout << myTrans->getNdbError().code << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * start of loop: nextResult(true) means that "parallelism" number of - * rows are fetched from NDB and cached in NDBAPI - */ - while((check = myScanOp->nextResult(true)) == 0){ - do { - /** - * Get update operation - */ - NdbOperation * myUpdateOp = myScanOp->updateCurrentTuple(); - if (myUpdateOp == 0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - updatedRows++; - - /** - * do the update - */ - myUpdateOp->setValue(update_column, after_color); - /** - * nextResult(false) means that the records - * cached in the NDBAPI are modified before - * fetching more rows from NDB. - */ - } while((check = myScanOp->nextResult(false)) == 0); - - /** - * NoCommit when all cached tuple have been updated - */ - if(check != -1) - { - check = myTrans->execute(NdbTransaction::NoCommit); - } - - /** - * Check for errors - */ - err = myTrans->getNdbError(); - if(check == -1) - { - if(err.status == NdbError::TemporaryError){ - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - milliSleep(50); - continue; - } - } - /** - * End of loop - */ - } - - /** - * Commit all prepared operations - */ - if(myTrans->execute(NdbTransaction::Commit) == -1) - { - if(err.status == NdbError::TemporaryError){ - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - milliSleep(50); - continue; - } - } - - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return 0; - } - - - if(myTrans!=0) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - } - return -1; -} - - - -int scan_print(Ndb * myNdb) -{ -// Scan all records exclusive and update - // them one by one - int retryAttempt = 0; - const int retryMax = 10; - int fetchedRows = 0; - int check; - NdbError err; - NdbTransaction *myTrans; - NdbScanOperation *myScanOp; - /* Result of reading attribute value, three columns: - REG_NO, BRAND, and COLOR - */ - NdbRecAttr * myRecAttr[3]; - - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("GARAGE"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - /** - * Loop as long as : - * retryMax not reached - * failed operations due to TEMPORARY erros - * - * Exit loop; - * retyrMax reached - * Permanent error (return -1) - */ - while (true) - { - - if (retryAttempt >= retryMax) - { - std::cout << "ERROR: has retried this operation " << retryAttempt - << " times, failing!" << std::endl; - return -1; - } - - myTrans = myNdb->startTransaction(); - if (myTrans == NULL) - { - const NdbError err = myNdb->getNdbError(); - - if (err.status == NdbError::TemporaryError) - { - milliSleep(50); - retryAttempt++; - continue; - } - std::cout << err.message << std::endl; - return -1; - } - /* - * Define a scan operation. - * NDBAPI. - */ - myScanOp = myTrans->getNdbScanOperation(myTable); - if (myScanOp == NULL) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Read without locks, without being placed in lock queue - */ - if( myScanOp->readTuples(NdbOperation::LM_CommittedRead) == -1) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * Define storage for fetched attributes. - * E.g., the resulting attributes of executing - * myOp->getValue("REG_NO") is placed in myRecAttr[0]. - * No data exists in myRecAttr until transaction has commited! - */ - myRecAttr[0] = myScanOp->getValue("REG_NO"); - myRecAttr[1] = myScanOp->getValue("BRAND"); - myRecAttr[2] = myScanOp->getValue("COLOR"); - if(myRecAttr[0] ==NULL || myRecAttr[1] == NULL || myRecAttr[2]==NULL) - { - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - /** - * Start scan (NoCommit since we are only reading at this stage); - */ - if(myTrans->execute(NdbTransaction::NoCommit) != 0){ - err = myTrans->getNdbError(); - if(err.status == NdbError::TemporaryError){ - std::cout << myTrans->getNdbError().message << std::endl; - myNdb->closeTransaction(myTrans); - milliSleep(50); - continue; - } - std::cout << err.code << std::endl; - std::cout << myTrans->getNdbError().code << std::endl; - myNdb->closeTransaction(myTrans); - return -1; - } - - /** - * start of loop: nextResult(true) means that "parallelism" number of - * rows are fetched from NDB and cached in NDBAPI - */ - while((check = myScanOp->nextResult(true)) == 0){ - do { - - fetchedRows++; - /** - * print REG_NO unsigned int - */ - std::cout << myRecAttr[0]->u_32_value() << "\t"; - - /** - * print BRAND character string - */ - std::cout << myRecAttr[1]->aRef() << "\t"; - - /** - * print COLOR character string - */ - std::cout << myRecAttr[2]->aRef() << std::endl; - - /** - * nextResult(false) means that the records - * cached in the NDBAPI are modified before - * fetching more rows from NDB. - */ - } while((check = myScanOp->nextResult(false)) == 0); - - } - myNdb->closeTransaction(myTrans); - return 1; - } - return -1; - -} - - -int main(int argc, char** argv) -{ - if (argc != 3) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - char * mysqld_sock = argv[1]; - const char *connectstring = argv[2]; - ndb_init(); - MYSQL mysql; - - /************************************************************** - * Connect to mysql server and create table * - **************************************************************/ - { - if ( !mysql_init(&mysql) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql, "localhost", "root", "", "", - 0, mysqld_sock, 0) ) - MYSQLERROR(mysql); - - mysql_query(&mysql, "CREATE DATABASE TEST_DB"); - if (mysql_query(&mysql, "USE TEST_DB") != 0) MYSQLERROR(mysql); - - create_table(mysql); - } - - /************************************************************** - * Connect to ndb cluster * - **************************************************************/ - - Ndb_cluster_connection cluster_connection(connectstring); - if (cluster_connection.connect(4, 5, 1)) - { - std::cout << "Unable to connect to cluster within 30 secs." << std::endl; - exit(-1); - } - // Optionally connect and wait for the storage nodes (ndbd's) - if (cluster_connection.wait_until_ready(30,0) < 0) - { - std::cout << "Cluster was not ready within 30 secs.\n"; - exit(-1); - } - - Ndb myNdb(&cluster_connection,"TEST_DB"); - if (myNdb.init(1024) == -1) { // Set max 1024 parallel transactions - APIERROR(myNdb.getNdbError()); - exit(-1); - } - - /******************************************* - * Check table definition * - *******************************************/ - int column_color; - { - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *t= myDict->getTable("GARAGE"); - - Car car; - if (t->getColumn("COLOR")->getLength() != sizeof(car.color) || - t->getColumn("BRAND")->getLength() != sizeof(car.brand)) - { - std::cout << "Wrong table definition" << std::endl; - exit(-1); - } - column_color= t->getColumn("COLOR")->getColumnNo(); - } - - if(populate(&myNdb) > 0) - std::cout << "populate: Success!" << std::endl; - - if(scan_print(&myNdb) > 0) - std::cout << "scan_print: Success!" << std::endl << std::endl; - - std::cout << "Going to delete all pink cars!" << std::endl; - - { - /** - * Note! color needs to be of exact the same size as column defined - */ - Car tmp; - sprintf(tmp.color, "Pink"); - if(scan_delete(&myNdb, column_color, tmp.color) > 0) - std::cout << "scan_delete: Success!" << std::endl << std::endl; - } - - if(scan_print(&myNdb) > 0) - std::cout << "scan_print: Success!" << std::endl << std::endl; - - { - /** - * Note! color1 & 2 need to be of exact the same size as column defined - */ - Car tmp1, tmp2; - sprintf(tmp1.color, "Blue"); - sprintf(tmp2.color, "Black"); - std::cout << "Going to update all " << tmp1.color - << " cars to " << tmp2.color << " cars!" << std::endl; - if(scan_update(&myNdb, column_color, tmp1.color, tmp2.color) > 0) - std::cout << "scan_update: Success!" << std::endl << std::endl; - } - if(scan_print(&myNdb) > 0) - std::cout << "scan_print: Success!" << std::endl << std::endl; - - /** - * Drop table - */ - drop_table(mysql); - - return 0; -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt b/storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt deleted file mode 100644 index 47cb4bf9ffa..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt +++ /dev/null @@ -1,3 +0,0 @@ -1. Set NDB_OS in Makefile -2. Add path to libNDB_API.so in LD_LIBRARY_PATH -3. Set NDB_CONNECTSTRING diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple/Makefile deleted file mode 100644 index fa407fb7d63..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_simple/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = ndbapi_simple -SRCS = $(TARGET).cpp -OBJS = $(TARGET).o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(TARGET).o: $(SRCS) - $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp deleted file mode 100644 index b72397f20f9..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp +++ /dev/null @@ -1,298 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/* - * ndbapi_simple.cpp: Using synchronous transactions in NDB API - * - * Correct output from this program is: - * - * ATTR1 ATTR2 - * 0 10 - * 1 1 - * 2 12 - * Detected that deleted tuple doesn't exist! - * 4 14 - * 5 5 - * 6 16 - * 7 7 - * 8 18 - * 9 9 - * - */ - -#include -#include -// Used for cout -#include -#include - -static void run_application(MYSQL &, Ndb_cluster_connection &); - -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } -#define APIERROR(error) { \ - PRINT_ERROR(error.code,error.message); \ - exit(-1); } - -int main(int argc, char** argv) -{ - if (argc != 3) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - // ndb_init must be called first - ndb_init(); - - // connect to mysql server and cluster and run application - { - char * mysqld_sock = argv[1]; - const char *connectstring = argv[2]; - // Object representing the cluster - Ndb_cluster_connection cluster_connection(connectstring); - - // Connect to cluster management server (ndb_mgmd) - if (cluster_connection.connect(4 /* retries */, - 5 /* delay between retries */, - 1 /* verbose */)) - { - std::cout << "Cluster management server was not ready within 30 secs.\n"; - exit(-1); - } - - // Optionally connect and wait for the storage nodes (ndbd's) - if (cluster_connection.wait_until_ready(30,0) < 0) - { - std::cout << "Cluster was not ready within 30 secs.\n"; - exit(-1); - } - - // connect to mysql server - MYSQL mysql; - if ( !mysql_init(&mysql) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql, "localhost", "root", "", "", - 0, mysqld_sock, 0) ) - MYSQLERROR(mysql); - - // run the application code - run_application(mysql, cluster_connection); - } - - ndb_end(0); - - return 0; -} - -static void create_table(MYSQL &); -static void drop_table(MYSQL &); -static void do_insert(Ndb &); -static void do_update(Ndb &); -static void do_delete(Ndb &); -static void do_read(Ndb &); - -static void run_application(MYSQL &mysql, - Ndb_cluster_connection &cluster_connection) -{ - /******************************************** - * Connect to database via mysql-c * - ********************************************/ - mysql_query(&mysql, "CREATE DATABASE TEST_DB_1"); - if (mysql_query(&mysql, "USE TEST_DB_1") != 0) MYSQLERROR(mysql); - create_table(mysql); - - /******************************************** - * Connect to database via NdbApi * - ********************************************/ - // Object representing the database - Ndb myNdb( &cluster_connection, "TEST_DB_1" ); - if (myNdb.init()) APIERROR(myNdb.getNdbError()); - - /* - * Do different operations on database - */ - do_insert(myNdb); - do_update(myNdb); - do_delete(myNdb); - do_read(myNdb); - drop_table(mysql); - mysql_query(&mysql, "DROP DATABASE TEST_DB_1"); -} - -/********************************************************* - * Create a table named MYTABLENAME if it does not exist * - *********************************************************/ -static void create_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, - "CREATE TABLE" - " MYTABLENAME" - " (ATTR1 INT UNSIGNED NOT NULL PRIMARY KEY," - " ATTR2 INT UNSIGNED NOT NULL)" - " ENGINE=NDB")) - MYSQLERROR(mysql); -} - -/*********************************** - * Drop a table named MYTABLENAME - ***********************************/ -static void drop_table(MYSQL &mysql) -{ - if (mysql_query(&mysql, - "DROP TABLE" - " MYTABLENAME")) - MYSQLERROR(mysql); -} - -/************************************************************************** - * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) * - **************************************************************************/ -static void do_insert(Ndb &myNdb) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - for (int i = 0; i < 5; i++) { - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->insertTuple(); - myOperation->equal("ATTR1", i); - myOperation->setValue("ATTR2", i); - - myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->insertTuple(); - myOperation->equal("ATTR1", i+5); - myOperation->setValue("ATTR2", i+5); - - if (myTransaction->execute( NdbTransaction::Commit ) == -1) - APIERROR(myTransaction->getNdbError()); - - myNdb.closeTransaction(myTransaction); - } -} - -/***************************************************************** - * Update the second attribute in half of the tuples (adding 10) * - *****************************************************************/ -static void do_update(Ndb &myNdb) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - for (int i = 0; i < 10; i+=2) { - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->updateTuple(); - myOperation->equal( "ATTR1", i ); - myOperation->setValue( "ATTR2", i+10); - - if( myTransaction->execute( NdbTransaction::Commit ) == -1 ) - APIERROR(myTransaction->getNdbError()); - - myNdb.closeTransaction(myTransaction); - } -} - -/************************************************* - * Delete one tuple (the one with primary key 3) * - *************************************************/ -static void do_delete(Ndb &myNdb) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->deleteTuple(); - myOperation->equal( "ATTR1", 3 ); - - if (myTransaction->execute(NdbTransaction::Commit) == -1) - APIERROR(myTransaction->getNdbError()); - - myNdb.closeTransaction(myTransaction); -} - -/***************************** - * Read and print all tuples * - *****************************/ -static void do_read(Ndb &myNdb) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - std::cout << "ATTR1 ATTR2" << std::endl; - - for (int i = 0; i < 10; i++) { - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->readTuple(NdbOperation::LM_Read); - myOperation->equal("ATTR1", i); - - NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL); - if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - - if(myTransaction->execute( NdbTransaction::Commit ) == -1) - APIERROR(myTransaction->getNdbError()); - - if (myTransaction->getNdbError().classification == NdbError::NoDataFound) - if (i == 3) - std::cout << "Detected that deleted tuple doesn't exist!" << std::endl; - else - APIERROR(myTransaction->getNdbError()); - - if (i != 3) { - printf(" %2d %2d\n", i, myRecAttr->u_32_value()); - } - myNdb.closeTransaction(myTransaction); - } -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile deleted file mode 100644 index 9757df3ceab..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = ndbapi_simple_dual -SRCS = main.cpp -OBJS = main.o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR)/storage/ndb/include -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(OBJS): $(SRCS) - $(CXX) $(CFLAGS) -I$(TOP_SRCDIR)/include -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp deleted file mode 100644 index fb91d1d2120..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_dual/main.cpp +++ /dev/null @@ -1,348 +0,0 @@ -/* Copyright (c) 2003, 2006, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/* - * ndbapi_simple_dual.cpp: Using synchronous transactions in NDB API - * - * Correct output from this program is: - * - * ATTR1 ATTR2 - * 0 10 - * 1 1 - * 2 12 - * Detected that deleted tuple doesn't exist! - * 4 14 - * 5 5 - * 6 16 - * 7 7 - * 8 18 - * 9 9 - * ATTR1 ATTR2 - * 0 10 - * 1 1 - * 2 12 - * Detected that deleted tuple doesn't exist! - * 4 14 - * 5 5 - * 6 16 - * 7 7 - * 8 18 - * 9 9 - * - */ - -#include -#include -// Used for cout -#include -#include - -static void run_application(MYSQL &, Ndb_cluster_connection &, const char* table, const char* db); - -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } -#define APIERROR(error) { \ - PRINT_ERROR(error.code,error.message); \ - exit(-1); } - -int main(int argc, char** argv) -{ - if (argc != 5) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - // ndb_init must be called first - ndb_init(); - { - char * mysqld1_sock = argv[1]; - const char *connectstring1 = argv[2]; - char * mysqld2_sock = argv[3]; - const char *connectstring2 = argv[4]; - - // Object representing the cluster 1 - Ndb_cluster_connection cluster1_connection(connectstring1); - MYSQL mysql1; - // Object representing the cluster 2 - Ndb_cluster_connection cluster2_connection(connectstring2); - MYSQL mysql2; - - // connect to mysql server and cluster 1 and run application - // Connect to cluster 1 management server (ndb_mgmd) - if (cluster1_connection.connect(4 /* retries */, - 5 /* delay between retries */, - 1 /* verbose */)) - { - std::cout << "Cluster 1 management server was not ready within 30 secs.\n"; - exit(-1); - } - // Optionally connect and wait for the storage nodes (ndbd's) - if (cluster1_connection.wait_until_ready(30,0) < 0) - { - std::cout << "Cluster 1 was not ready within 30 secs.\n"; - exit(-1); - } - // connect to mysql server in cluster 1 - if ( !mysql_init(&mysql1) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql1, "localhost", "root", "", "", - 0, mysqld1_sock, 0) ) - MYSQLERROR(mysql1); - - - // connect to mysql server and cluster 2 and run application - - // Connect to cluster management server (ndb_mgmd) - if (cluster2_connection.connect(4 /* retries */, - 5 /* delay between retries */, - 1 /* verbose */)) - { - std::cout << "Cluster 2 management server was not ready within 30 secs.\n"; - exit(-1); - } - // Optionally connect and wait for the storage nodes (ndbd's) - if (cluster2_connection.wait_until_ready(30,0) < 0) - { - std::cout << "Cluster 2 was not ready within 30 secs.\n"; - exit(-1); - } - // connect to mysql server in cluster 2 - if ( !mysql_init(&mysql2) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql2, "localhost", "root", "", "", - 0, mysqld2_sock, 0) ) - MYSQLERROR(mysql2); - - // run the application code - run_application(mysql1, cluster1_connection, "MYTABLENAME1", "TEST_DB_1"); - run_application(mysql2, cluster2_connection, "MYTABLENAME2", "TEST_DB_2"); - } - // Note: all connections must have been destroyed before calling ndb_end() - ndb_end(0); - - return 0; -} - -static void create_table(MYSQL &, const char* table); -static void drop_table(MYSQL &, const char* table); -static void do_insert(Ndb &, const char* table); -static void do_update(Ndb &, const char* table); -static void do_delete(Ndb &, const char* table); -static void do_read(Ndb &, const char* table); - -static void run_application(MYSQL &mysql, - Ndb_cluster_connection &cluster_connection, - const char* table, - const char* db) -{ - /******************************************** - * Connect to database via mysql-c * - ********************************************/ - char db_stmt[256]; - sprintf(db_stmt, "CREATE DATABASE %s\n", db); - mysql_query(&mysql, db_stmt); - sprintf(db_stmt, "USE %s", db); - if (mysql_query(&mysql, db_stmt) != 0) MYSQLERROR(mysql); - create_table(mysql, table); - - /******************************************** - * Connect to database via NdbApi * - ********************************************/ - // Object representing the database - Ndb myNdb( &cluster_connection, db ); - if (myNdb.init()) APIERROR(myNdb.getNdbError()); - - /* - * Do different operations on database - */ - do_insert(myNdb, table); - do_update(myNdb, table); - do_delete(myNdb, table); - do_read(myNdb, table); - /* - * Drop the table - */ - drop_table(mysql, table); - sprintf(db_stmt, "DROP DATABASE %s\n", db); - mysql_query(&mysql, db_stmt); -} - -/********************************************************* - * Create a table named by table if it does not exist * - *********************************************************/ -static void create_table(MYSQL &mysql, const char* table) -{ - char create_stmt[256]; - - sprintf(create_stmt, "CREATE TABLE %s \ - (ATTR1 INT UNSIGNED NOT NULL PRIMARY KEY,\ - ATTR2 INT UNSIGNED NOT NULL)\ - ENGINE=NDB", table); - if (mysql_query(&mysql, create_stmt)) - MYSQLERROR(mysql); -} - -/******************************* - * Drop a table named by table - *******************************/ -static void drop_table(MYSQL &mysql, const char* table) -{ - char drop_stmt[256]; - - sprintf(drop_stmt, "DROP TABLE IF EXISTS %s", table); - if (mysql_query(&mysql, drop_stmt)) - MYSQLERROR(mysql); -} - -/************************************************************************** - * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) * - **************************************************************************/ -static void do_insert(Ndb &myNdb, const char* table) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable(table); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - for (int i = 0; i < 5; i++) { - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->insertTuple(); - myOperation->equal("ATTR1", i); - myOperation->setValue("ATTR2", i); - - myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->insertTuple(); - myOperation->equal("ATTR1", i+5); - myOperation->setValue("ATTR2", i+5); - - if (myTransaction->execute( NdbTransaction::Commit ) == -1) - APIERROR(myTransaction->getNdbError()); - - myNdb.closeTransaction(myTransaction); - } -} - -/***************************************************************** - * Update the second attribute in half of the tuples (adding 10) * - *****************************************************************/ -static void do_update(Ndb &myNdb, const char* table) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable(table); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - for (int i = 0; i < 10; i+=2) { - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->updateTuple(); - myOperation->equal( "ATTR1", i ); - myOperation->setValue( "ATTR2", i+10); - - if( myTransaction->execute( NdbTransaction::Commit ) == -1 ) - APIERROR(myTransaction->getNdbError()); - - myNdb.closeTransaction(myTransaction); - } -} - -/************************************************* - * Delete one tuple (the one with primary key 3) * - *************************************************/ -static void do_delete(Ndb &myNdb, const char* table) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable(table); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->deleteTuple(); - myOperation->equal( "ATTR1", 3 ); - - if (myTransaction->execute(NdbTransaction::Commit) == -1) - APIERROR(myTransaction->getNdbError()); - - myNdb.closeTransaction(myTransaction); -} - -/***************************** - * Read and print all tuples * - *****************************/ -static void do_read(Ndb &myNdb, const char* table) -{ - const NdbDictionary::Dictionary* myDict= myNdb.getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable(table); - - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - - std::cout << "ATTR1 ATTR2" << std::endl; - - for (int i = 0; i < 10; i++) { - NdbTransaction *myTransaction= myNdb.startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb.getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->readTuple(NdbOperation::LM_Read); - myOperation->equal("ATTR1", i); - - NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL); - if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - - if(myTransaction->execute( NdbTransaction::Commit ) == -1) - if (i == 3) { - std::cout << "Detected that deleted tuple doesn't exist!" << std::endl; - } else { - APIERROR(myTransaction->getNdbError()); - } - - if (i != 3) { - printf(" %2d %2d\n", i, myRecAttr->u_32_value()); - } - myNdb.closeTransaction(myTransaction); - } -} diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile deleted file mode 100644 index 975563b9508..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -TARGET = ndbapi_simple_index -SRCS = main.cpp -OBJS = main.o -CXX = g++ -CFLAGS = -c -Wall -fno-rtti -fno-exceptions -CXXFLAGS = -DEBUG = -LFLAGS = -Wall -TOP_SRCDIR = ../../../.. -INCLUDE_DIR = $(TOP_SRCDIR) -LIB_DIR = -L$(TOP_SRCDIR)/storage/ndb/src/.libs \ - -L$(TOP_SRCDIR)/libmysql_r/.libs \ - -L$(TOP_SRCDIR)/zlib/.libs \ - -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings -SYS_LIB = - -$(TARGET): $(OBJS) - $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET) - -$(OBJS): $(SRCS) - $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/storage/ndb/include -I$(INCLUDE_DIR)/storage/ndb/include/ndbapi $(SRCS) - -clean: - rm -f *.o $(TARGET) diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp deleted file mode 100644 index 8504dc3511c..00000000000 --- a/storage/ndb/ndbapi-examples/ndbapi_simple_index/main.cpp +++ /dev/null @@ -1,274 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -// -// ndbapi_simple_index.cpp: Using secondary indexes in NDB API -// -// Correct output from this program is: -// -// ATTR1 ATTR2 -// 0 0 -// 1 1 -// 2 2 -// 3 3 -// 4 4 -// 5 5 -// 6 6 -// 7 7 -// 8 8 -// 9 9 -// ATTR1 ATTR2 -// 0 10 -// 1 1 -// 2 12 -// Detected that deleted tuple doesn't exist! -// 4 14 -// 5 5 -// 6 16 -// 7 7 -// 8 18 -// 9 9 - -#include -#include - -// Used for cout -#include -#include - -#define PRINT_ERROR(code,msg) \ - std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \ - << ", code: " << code \ - << ", msg: " << msg << "." << std::endl -#define MYSQLERROR(mysql) { \ - PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \ - exit(-1); } -#define APIERROR(error) { \ - PRINT_ERROR(error.code,error.message); \ - exit(-1); } - -int main(int argc, char** argv) -{ - if (argc != 3) - { - std::cout << "Arguments are .\n"; - exit(-1); - } - char * mysqld_sock = argv[1]; - const char *connectstring = argv[2]; - ndb_init(); - MYSQL mysql; - - /************************************************************** - * Connect to mysql server and create table * - **************************************************************/ - { - if ( !mysql_init(&mysql) ) { - std::cout << "mysql_init failed\n"; - exit(-1); - } - if ( !mysql_real_connect(&mysql, "localhost", "root", "", "", - 0, mysqld_sock, 0) ) - MYSQLERROR(mysql); - - mysql_query(&mysql, "CREATE DATABASE TEST_DB_1"); - if (mysql_query(&mysql, "USE TEST_DB_1") != 0) MYSQLERROR(mysql); - - if (mysql_query(&mysql, - "CREATE TABLE" - " MYTABLENAME" - " (ATTR1 INT UNSIGNED," - " ATTR2 INT UNSIGNED NOT NULL," - " PRIMARY KEY USING HASH (ATTR1)," - " UNIQUE MYINDEXNAME USING HASH (ATTR2))" - " ENGINE=NDB")) - MYSQLERROR(mysql); - } - - /************************************************************** - * Connect to ndb cluster * - **************************************************************/ - - Ndb_cluster_connection *cluster_connection= - new Ndb_cluster_connection(connectstring); // Object representing the cluster - - if (cluster_connection->connect(5,3,1)) - { - std::cout << "Connect to cluster management server failed.\n"; - exit(-1); - } - - if (cluster_connection->wait_until_ready(30,30)) - { - std::cout << "Cluster was not ready within 30 secs.\n"; - exit(-1); - } - - Ndb* myNdb = new Ndb( cluster_connection, - "TEST_DB_1" ); // Object representing the database - if (myNdb->init() == -1) { - APIERROR(myNdb->getNdbError()); - exit(-1); - } - - const NdbDictionary::Dictionary* myDict= myNdb->getDictionary(); - const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME"); - if (myTable == NULL) - APIERROR(myDict->getNdbError()); - const NdbDictionary::Index *myIndex= myDict->getIndex("MYINDEXNAME$unique","MYTABLENAME"); - if (myIndex == NULL) - APIERROR(myDict->getNdbError()); - - /************************************************************************** - * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) * - **************************************************************************/ - for (int i = 0; i < 5; i++) { - NdbTransaction *myTransaction= myNdb->startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb->getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->insertTuple(); - myOperation->equal("ATTR1", i); - myOperation->setValue("ATTR2", i); - - myOperation = myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->insertTuple(); - myOperation->equal("ATTR1", i+5); - myOperation->setValue("ATTR2", i+5); - - if (myTransaction->execute( NdbTransaction::Commit ) == -1) - APIERROR(myTransaction->getNdbError()); - - myNdb->closeTransaction(myTransaction); - } - - /***************************************** - * Read and print all tuples using index * - *****************************************/ - std::cout << "ATTR1 ATTR2" << std::endl; - - for (int i = 0; i < 10; i++) { - NdbTransaction *myTransaction= myNdb->startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb->getNdbError()); - - NdbIndexOperation *myIndexOperation= - myTransaction->getNdbIndexOperation(myIndex); - if (myIndexOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myIndexOperation->readTuple(NdbOperation::LM_Read); - myIndexOperation->equal("ATTR2", i); - - NdbRecAttr *myRecAttr= myIndexOperation->getValue("ATTR1", NULL); - if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - - if(myTransaction->execute( NdbTransaction::Commit, - NdbOperation::AbortOnError ) != -1) - printf(" %2d %2d\n", myRecAttr->u_32_value(), i); - - myNdb->closeTransaction(myTransaction); - } - - /***************************************************************** - * Update the second attribute in half of the tuples (adding 10) * - *****************************************************************/ - for (int i = 0; i < 10; i+=2) { - NdbTransaction *myTransaction= myNdb->startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb->getNdbError()); - - NdbIndexOperation *myIndexOperation= - myTransaction->getNdbIndexOperation(myIndex); - if (myIndexOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myIndexOperation->updateTuple(); - myIndexOperation->equal( "ATTR2", i ); - myIndexOperation->setValue( "ATTR2", i+10); - - if( myTransaction->execute( NdbTransaction::Commit ) == -1 ) - APIERROR(myTransaction->getNdbError()); - - myNdb->closeTransaction(myTransaction); - } - - /************************************************* - * Delete one tuple (the one with primary key 3) * - *************************************************/ - { - NdbTransaction *myTransaction= myNdb->startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb->getNdbError()); - - NdbIndexOperation *myIndexOperation= - myTransaction->getNdbIndexOperation(myIndex); - if (myIndexOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myIndexOperation->deleteTuple(); - myIndexOperation->equal( "ATTR2", 3 ); - - if (myTransaction->execute(NdbTransaction::Commit) == -1) - APIERROR(myTransaction->getNdbError()); - - myNdb->closeTransaction(myTransaction); - } - - /***************************** - * Read and print all tuples * - *****************************/ - { - std::cout << "ATTR1 ATTR2" << std::endl; - - for (int i = 0; i < 10; i++) { - NdbTransaction *myTransaction= myNdb->startTransaction(); - if (myTransaction == NULL) APIERROR(myNdb->getNdbError()); - - NdbOperation *myOperation= myTransaction->getNdbOperation(myTable); - if (myOperation == NULL) APIERROR(myTransaction->getNdbError()); - - myOperation->readTuple(NdbOperation::LM_Read); - myOperation->equal("ATTR1", i); - - NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL); - if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - - if(myTransaction->execute( NdbTransaction::Commit, - NdbOperation::AbortOnError ) == -1) - if (i == 3) { - std::cout << "Detected that deleted tuple doesn't exist!\n"; - } else { - APIERROR(myTransaction->getNdbError()); - } - - if (i != 3) { - printf(" %2d %2d\n", i, myRecAttr->u_32_value()); - } - myNdb->closeTransaction(myTransaction); - } - } - - /************** - * Drop table * - **************/ - if (mysql_query(&mysql, "DROP TABLE MYTABLENAME")) - MYSQLERROR(mysql); - - delete myNdb; - delete cluster_connection; - - ndb_end(0); - return 0; -} diff --git a/storage/ndb/plug.in b/storage/ndb/plug.in deleted file mode 100644 index 349c0660a1c..00000000000 --- a/storage/ndb/plug.in +++ /dev/null @@ -1,7 +0,0 @@ -sinclude(storage/ndb/ndb_configure.m4) - -MYSQL_STORAGE_ENGINE(ndbcluster, ndbcluster, [Cluster Storage Engine], - [High Availability Clustered tables],) -MYSQL_PLUGIN_STATIC(ndbcluster, [[\$(ndbcluster_libs) \$(ndbcluster_system_libs) \$(NDB_SCI_LIBS)]]) -MYSQL_PLUGIN_ACTIONS(ndbcluster,[MYSQL_SETUP_NDBCLUSTER]) -MYSQL_PLUGIN_DEPENDS(ndbcluster, [partition]) diff --git a/storage/ndb/src/Makefile.am b/storage/ndb/src/Makefile.am deleted file mode 100644 index 33bad49575d..00000000000 --- a/storage/ndb/src/Makefile.am +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SUBDIRS = common mgmapi ndbapi . kernel mgmclient mgmsrv cw - -include $(top_srcdir)/storage/ndb/config/common.mk.am - -ndblib_LTLIBRARIES = libndbclient.la - -libndbclient_la_SOURCES = - -libndbclient_la_LDFLAGS = -version-info @NDB_SHARED_LIB_VERSION@ @NDB_LD_VERSION_SCRIPT@ - -libndbclient_la_LIBADD = \ - ndbapi/libndbapi.la \ - common/transporter/libtransporter.la \ - common/debugger/libtrace.la \ - common/debugger/signaldata/libsignaldataprint.la \ - mgmapi/libmgmapi.la \ - common/mgmcommon/libmgmsrvcommon.la \ - common/logger/liblogger.la \ - common/portlib/libportlib.la \ - common/util/libgeneral.la - -windoze-dsp: libndbclient.dsp - -libndbclient.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(ndblib_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ dummy.cpp - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(libndbclient_la_LIBADD) - @touch dummy.cpp diff --git a/storage/ndb/src/common/Makefile.am b/storage/ndb/src/common/Makefile.am deleted file mode 100644 index d9d55b26eed..00000000000 --- a/storage/ndb/src/common/Makefile.am +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SUBDIRS = portlib debugger util logger transporter mgmcommon - -noinst_LTLIBRARIES = libcommon.la - -libcommon_la_SOURCES = -libcommon_la_LIBADD = \ - transporter/libtransporter.la \ - debugger/libtrace.la \ - debugger/signaldata/libsignaldataprint.la \ - mgmcommon/libmgmsrvcommon.la \ - portlib/libportlib.la \ - logger/liblogger.la \ - util/libgeneral.la - -windoze-dsp: diff --git a/storage/ndb/src/common/debugger/BlockNames.cpp b/storage/ndb/src/common/debugger/BlockNames.cpp deleted file mode 100644 index b9e1b5bb617..00000000000 --- a/storage/ndb/src/common/debugger/BlockNames.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -const BlockName BlockNames[] = { - { "CMVMI", CMVMI }, - { "DBACC", DBACC }, - { "DBDICT", DBDICT }, - { "DBDIH", DBDIH }, - { "DBLQH", DBLQH }, - { "DBTC", DBTC }, - { "DBTUP", DBTUP }, - { "NDBFS", NDBFS }, - { "NDBCNTR", NDBCNTR }, - { "QMGR", QMGR }, - { "TRIX", TRIX }, - { "BACKUP", BACKUP }, - { "DBUTIL", DBUTIL }, - { "SUMA", SUMA }, - { "DBTUX", DBTUX } - ,{ "TSMAN", TSMAN} - ,{ "LGMAN", LGMAN } - ,{ "PGMAN", PGMAN } - ,{ "RESTORE", RESTORE } -}; - -const BlockNumber NO_OF_BLOCK_NAMES = sizeof(BlockNames) / sizeof(BlockName); diff --git a/storage/ndb/src/common/debugger/DebuggerNames.cpp b/storage/ndb/src/common/debugger/DebuggerNames.cpp deleted file mode 100644 index 3c0b113d2f4..00000000000 --- a/storage/ndb/src/common/debugger/DebuggerNames.cpp +++ /dev/null @@ -1,154 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -#include "DebuggerNames.hpp" - -#include -#include -#include - -static const char * localSignalNames[MAX_GSN+1]; -static SignalDataPrintFunction localPrintFunctions[MAX_GSN+1]; -static const char * localBlockNames[NO_OF_BLOCKS]; - -static -int -initSignalNames(const char * dst[], const GsnName src[], unsigned short len){ - unsigned i; - for(i = 0; i<=MAX_GSN; i++) - dst[i] = 0; - - for(i = 0; i 0; i++){ - SignalDataPrintFunction fun = src[i].function; - - if(dst[gsn] != 0 && fun != 0){ - if(dst[gsn] != fun){ - fprintf(stderr, - "Multiple definition of signal print function for gsn: %d\n", - gsn); - exit(0); - } - } - dst[gsn] = fun; - } - return 0; -} - -static -int -initBlockNames(const char * dst[], - const BlockName src[], - unsigned len){ - unsigned i; - for(i = 0; i= NO_OF_BLOCKS || dst[index] != 0){ - fprintf(stderr, - "Invalid block name definition: %d %s\n", - src[i].number, src[i].name); - exit(0); - } - dst[index] = src[i].name; - } - return 0; -} - -/** - * Run static initializer - */ -static const int -xxx_DUMMY_SIGNAL_NAMES_xxx = initSignalNames(localSignalNames, - SignalNames, - NO_OF_SIGNAL_NAMES); -static const int -xxx_DUMMY_PRINT_FUNCTIONS_xxx = initSignalPrinters(localPrintFunctions, - SignalDataPrintFunctions); - -static const int -xxx_DUMMY_BLOCK_NAMES_xxx = initBlockNames(localBlockNames, - BlockNames, - NO_OF_BLOCK_NAMES); - -const char * -getSignalName(unsigned short gsn, const char * defVal){ - if(gsn > 0 && gsn <= MAX_GSN) - return (localSignalNames[gsn] ? localSignalNames[gsn] : defVal); - return defVal; -} - -unsigned short -getGsn(const char * signalName){ - return 0; -} - -const char * -getBlockName(unsigned short blockNo, const char * ret){ - if(blockNo >= MIN_BLOCK_NO && blockNo <= MAX_BLOCK_NO) - return localBlockNames[blockNo-MIN_BLOCK_NO]; - if (ret == 0) { - static char buf[20]; - BaseString::snprintf(buf, sizeof(buf), "BLOCK#%d", (int)blockNo); - return buf; - } - return ret; -} - -unsigned short -getBlockNo(const char * blockName){ - for(int i = 0; i 0 && gsn <= MAX_GSN) - return localPrintFunctions[gsn]; - return 0; -} diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp deleted file mode 100644 index cb42250dbd8..00000000000 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ /dev/null @@ -1,1155 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include -#include - -#include -#include -#include -#include -#include - -#include - -// -// PUBLIC -// -EventLoggerBase::~EventLoggerBase() -{ - -} - -#define QQQQ char *m_text, size_t m_text_len, const Uint32* theData - -void getTextConnected(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Node %u Connected", - theData[1]); -} -void getTextConnectedApiVersion(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Node %u: API version %d.%d.%d", - theData[1], - getMajor(theData[2]), - getMinor(theData[2]), - getBuild(theData[2])); -} -void getTextDisconnected(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Node %u Disconnected", - theData[1]); -} -void getTextCommunicationClosed(QQQQ) { - //----------------------------------------------------------------------- - // REPORT communication to node closed. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Communication to Node %u closed", - theData[1]); -} -void getTextCommunicationOpened(QQQQ) { - //----------------------------------------------------------------------- - // REPORT communication to node opened. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Communication to Node %u opened", - theData[1]); -} -void getTextNDBStartStarted(QQQQ) { - //----------------------------------------------------------------------- - // Start of NDB has been initiated. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Start initiated (version %d.%d.%d)", - getMajor(theData[1]), - getMinor(theData[1]), - getBuild(theData[1])); -} -void getTextNDBStopStarted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "%s shutdown initiated", - (theData[1] == 1 ? "Cluster" : "Node")); -} -void getRestartAction(Uint32 action, BaseString &str) -{ - if (action == 0) - return; - str.appfmt(", restarting"); - if (action & 2) - str.appfmt(", no start"); - if (action & 4) - str.appfmt(", initial"); -} -void getTextNDBStopCompleted(QQQQ) { - BaseString action_str(""); - BaseString signum_str(""); - getRestartAction(theData[1], action_str); - if (theData[2]) - signum_str.appfmt(" Initiated by signal %d.", theData[2]); - BaseString::snprintf(m_text, m_text_len, - "Node shutdown completed%s.%s", - action_str.c_str(), - signum_str.c_str()); -} -void getTextNDBStopForced(QQQQ) { - BaseString action_str(""); - BaseString reason_str(""); - BaseString sphase_str(""); - int signum = theData[2]; - int error = theData[3]; - int sphase = theData[4]; - int extra = theData[5]; - getRestartAction(theData[1],action_str); - if (signum) - reason_str.appfmt(" Initiated by signal %d.", signum); - if (error) - { - ndbd_exit_classification cl; - ndbd_exit_status st; - const char *msg = ndbd_exit_message(error, &cl); - const char *cl_msg = ndbd_exit_classification_message(cl, &st); - const char *st_msg = ndbd_exit_status_message(st); - reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.", - error, msg, cl_msg, st_msg); - if (extra != 0) - reason_str.appfmt(" (extra info %d)", extra); - } - if (sphase < 255) - sphase_str.appfmt(" Occured during startphase %u.", sphase); - BaseString::snprintf(m_text, m_text_len, - "Forced node shutdown completed%s.%s%s", - action_str.c_str(), sphase_str.c_str(), - reason_str.c_str()); -} -void getTextNDBStopAborted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Node shutdown aborted"); -} -void getTextNDBStartCompleted(QQQQ) { - //----------------------------------------------------------------------- - // Start of NDB has been completed. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Started (version %d.%d.%d)", - getMajor(theData[1]), - getMinor(theData[1]), - getBuild(theData[1])); -} -void getTextSTTORRYRecieved(QQQQ) { - //----------------------------------------------------------------------- - // STTORRY recevied after restart finished. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "STTORRY received after restart finished"); -} -void getTextStartPhaseCompleted(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Start phase completed. - //----------------------------------------------------------------------- - const char *type = ""; - switch((NodeState::StartType)theData[2]){ - case NodeState::ST_INITIAL_START: - type = "(initial start)"; - break; - case NodeState::ST_SYSTEM_RESTART: - type = "(system restart)"; - break; - case NodeState::ST_NODE_RESTART: - type = "(node restart)"; - break; - case NodeState::ST_INITIAL_NODE_RESTART: - type = "(initial node restart)"; - break; - case NodeState::ST_ILLEGAL_TYPE: - type = ""; - break; - default: - BaseString::snprintf(m_text, m_text_len, - "Start phase %u completed (unknown = %d)", - theData[1], - theData[2]); - return; - } - BaseString::snprintf(m_text, m_text_len, - "Start phase %u completed %s", - theData[1], - type); -} -void getTextCM_REGCONF(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "CM_REGCONF president = %u, own Node = %u, our dynamic id = %u", - theData[2], - theData[1], - theData[3]); -} -void getTextCM_REGREF(QQQQ) { - const char* line = ""; - switch (theData[3]) { - case 0: - line = "Busy"; - break; - case 1: - line = "Election with wait = false"; - break; - case 2: - line = "Election with wait = false"; - break; - case 3: - line = "Not president"; - break; - case 4: - line = "Election without selecting new candidate"; - break; - default: - line = "No such cause"; - break; - }//switch - - BaseString::snprintf(m_text, m_text_len, - "CM_REGREF from Node %u to our Node %u. Cause = %s", - theData[2], - theData[1], - line); -} -void getTextFIND_NEIGHBOURS(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Node Restart copied a fragment. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "We are Node %u with dynamic ID %u, our left neighbour " - "is Node %u, our right is Node %u", - theData[1], - theData[4], - theData[2], - theData[3]); -} -void getTextNodeFailCompleted(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Node failure phase completed. - //----------------------------------------------------------------------- - if (theData[1] == 0) - { - if (theData[3] != 0) { - BaseString::snprintf(m_text, m_text_len, - "Node %u completed failure of Node %u", - theData[3], - theData[2]); - } else { - BaseString::snprintf(m_text, m_text_len, - "All nodes completed failure of Node %u", - theData[2]); - }//if - } else { - const char* line = ""; - if (theData[1] == DBTC){ - line = "DBTC"; - }else if (theData[1] == DBDICT){ - line = "DBDICT"; - }else if (theData[1] == DBDIH){ - line = "DBDIH"; - }else if (theData[1] == DBLQH){ - line = "DBLQH"; - } - BaseString::snprintf(m_text, m_text_len, - "Node failure of %u %s completed", - theData[2], - line); - } -} -void getTextNODE_FAILREP(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Node %u has failed. The Node state at failure " - "was %u", - theData[1], - theData[2]); -} -void getTextArbitState(QQQQ) { - //----------------------------------------------------------------------- - // REPORT arbitrator found or lost. - //----------------------------------------------------------------------- - { - const ArbitSignalData* sd = (ArbitSignalData*)theData; - char ticketText[ArbitTicket::TextLength + 1]; - char errText[ArbitCode::ErrTextLength + 1]; - const unsigned code = sd->code & 0xFFFF; - const unsigned state = sd->code >> 16; - switch (code) { - case ArbitCode::ThreadStart: - BaseString::snprintf(m_text, m_text_len, - "President restarts arbitration thread [state=%u]", - state); - break; - case ArbitCode::PrepPart2: - sd->ticket.getText(ticketText, sizeof(ticketText)); - BaseString::snprintf(m_text, m_text_len, - "Prepare arbitrator node %u [ticket=%s]", - sd->node, ticketText); - break; - case ArbitCode::PrepAtrun: - sd->ticket.getText(ticketText, sizeof(ticketText)); - BaseString::snprintf(m_text, m_text_len, - "Receive arbitrator node %u [ticket=%s]", - sd->node, ticketText); - break; - case ArbitCode::ApiStart: - sd->ticket.getText(ticketText, sizeof(ticketText)); - BaseString::snprintf(m_text, m_text_len, - "Started arbitrator node %u [ticket=%s]", - sd->node, ticketText); - break; - case ArbitCode::ApiFail: - BaseString::snprintf(m_text, m_text_len, - "Lost arbitrator node %u - process failure [state=%u]", - sd->node, state); - break; - case ArbitCode::ApiExit: - BaseString::snprintf(m_text, m_text_len, - "Lost arbitrator node %u - process exit [state=%u]", - sd->node, state); - break; - default: - ArbitCode::getErrText(code, errText, sizeof(errText)); - BaseString::snprintf(m_text, m_text_len, - "Lost arbitrator node %u - %s [state=%u]", - sd->node, errText, state); - break; - } - } -} - -void getTextArbitResult(QQQQ) { - //----------------------------------------------------------------------- - // REPORT arbitration result (the failures may not reach us). - //----------------------------------------------------------------------- - { - const ArbitSignalData* sd = (ArbitSignalData*)theData; - char errText[ArbitCode::ErrTextLength + 1]; - const unsigned code = sd->code & 0xFFFF; - const unsigned state = sd->code >> 16; - switch (code) { - case ArbitCode::LoseNodes: - BaseString::snprintf(m_text, m_text_len, - "Arbitration check lost - less than 1/2 nodes left"); - break; - case ArbitCode::WinNodes: - BaseString::snprintf(m_text, m_text_len, - "Arbitration check won - all node groups and more than 1/2 nodes left"); - break; - case ArbitCode::WinGroups: - BaseString::snprintf(m_text, m_text_len, - "Arbitration check won - node group majority"); - break; - case ArbitCode::LoseGroups: - BaseString::snprintf(m_text, m_text_len, - "Arbitration check lost - missing node group"); - break; - case ArbitCode::Partitioning: - BaseString::snprintf(m_text, m_text_len, - "Network partitioning - arbitration required"); - break; - case ArbitCode::WinChoose: - BaseString::snprintf(m_text, m_text_len, - "Arbitration won - positive reply from node %u", - sd->node); - break; - case ArbitCode::LoseChoose: - BaseString::snprintf(m_text, m_text_len, - "Arbitration lost - negative reply from node %u", - sd->node); - break; - case ArbitCode::LoseNorun: - BaseString::snprintf(m_text, m_text_len, - "Network partitioning - no arbitrator available"); - break; - case ArbitCode::LoseNocfg: - BaseString::snprintf(m_text, m_text_len, - "Network partitioning - no arbitrator configured"); - break; - default: - ArbitCode::getErrText(code, errText, sizeof(errText)); - BaseString::snprintf(m_text, m_text_len, - "Arbitration failure - %s [state=%u]", - errText, state); - break; - } - } -} -void getTextGlobalCheckpointStarted(QQQQ) { - //----------------------------------------------------------------------- - // This event reports that a global checkpoint has been started and this - // node is the master of this global checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Global checkpoint %u started", - theData[1]); -} -void getTextGlobalCheckpointCompleted(QQQQ) { - //----------------------------------------------------------------------- - // This event reports that a global checkpoint has been completed on this - // node and the node is the master of this global checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Global checkpoint %u completed", - theData[1]); -} -void getTextLocalCheckpointStarted(QQQQ) { - //----------------------------------------------------------------------- - // This event reports that a local checkpoint has been started and this - // node is the master of this local checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Local checkpoint %u started. " - "Keep GCI = %u oldest restorable GCI = %u", - theData[1], - theData[2], - theData[3]); -} -void getTextLocalCheckpointCompleted(QQQQ) { - //----------------------------------------------------------------------- - // This event reports that a local checkpoint has been completed on this - // node and the node is the master of this local checkpoint. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Local checkpoint %u completed", - theData[1]); -} -void getTextTableCreated(QQQQ) { - //----------------------------------------------------------------------- - // This event reports that a table has been created. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Table with ID = %u created", - theData[1]); -} -/* STRANGE */ -void getTextLCPStoppedInCalcKeepGci(QQQQ) { - if (theData[1] == 0) - BaseString::snprintf(m_text, m_text_len, - "Local Checkpoint stopped in CALCULATED_KEEP_GCI"); -} -void getTextNR_CopyDict(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Node Restart completed copy of dictionary information. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Node restart completed copy of dictionary information"); -} -void getTextNR_CopyDistr(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Node Restart completed copy of distribution information. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Node restart completed copy of distribution information"); -} -void getTextNR_CopyFragsStarted(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Node Restart is starting to copy the fragments. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Node restart starting to copy the fragments " - "to Node %u", - theData[1]); -} -void getTextNR_CopyFragDone(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Node Restart copied a fragment. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Table ID = %u, fragment ID = %u have been copied " - "to Node %u", - theData[2], - theData[3], - theData[1]); -} -void getTextNR_CopyFragsCompleted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Node restart completed copying the fragments " - "to Node %u", - theData[1]); -} -void getTextLCPFragmentCompleted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Table ID = %u, fragment ID = %u has completed LCP " - "on Node %u maxGciStarted: %d maxGciCompleted: %d", - theData[2], - theData[3], - theData[1], - theData[4], - theData[5]); -} -void getTextTransReportCounters(QQQQ) { - // ------------------------------------------------------------------- - // Report information about transaction activity once per 10 seconds. - // ------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "Trans. Count = %u, Commit Count = %u, " - "Read Count = %u, Simple Read Count = %u, " - "Write Count = %u, AttrInfo Count = %u, " - "Concurrent Operations = %u, Abort Count = %u" - " Scans = %u Range scans = %u", - theData[1], - theData[2], - theData[3], - theData[4], - theData[5], - theData[6], - theData[7], - theData[8], - theData[9], - theData[10]); -} -void getTextOperationReportCounters(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Operations=%u", - theData[1]); -} -void getTextUndoLogBlocked(QQQQ) { - //----------------------------------------------------------------------- - // REPORT Undo Logging blocked due to buffer near to overflow. - //----------------------------------------------------------------------- - BaseString::snprintf(m_text, m_text_len, - "ACC Blocked %u and TUP Blocked %u times last second", - theData[1], - theData[2]); -} - -void getTextTransporterError(QQQQ) { - struct myTransporterError{ - Uint32 errorNum; - char errorString[256]; - }; - int i = 0; - int lenth = 0; - static const struct myTransporterError TransporterErrorString[]= - { - //TE_NO_ERROR = 0 - {TE_NO_ERROR,"No error"}, - //TE_ERROR_CLOSING_SOCKET = 0x1 - {TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"}, - //TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2 - {TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"}, - //TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT - {TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"}, - //TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT - {TE_INVALID_CHECKSUM,"Error found in message (checksum)"}, - //TE_COULD_NOT_CREATE_SOCKET = 0x5 - {TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"}, - //TE_COULD_NOT_BIND_SOCKET = 0x6 - {TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"}, - //TE_LISTEN_FAILED = 0x7 - {TE_LISTEN_FAILED,"Error found while listening to server socket"}, - //TE_ACCEPT_RETURN_ERROR = 0x8 - {TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"}, - //TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT - {TE_SHM_DISCONNECT,"The remote node has disconnected"}, - //TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT - {TE_SHM_IPC_STAT,"Unable to check shm segment"}, - //TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd - {TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"}, - //TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe - {TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"}, - //TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf - {TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"}, - //TE_TOO_SMALL_SIGID = 0x10 - {TE_TOO_SMALL_SIGID,"Sig ID too small"}, - //TE_TOO_LARGE_SIGID = 0x11 - {TE_TOO_LARGE_SIGID,"Sig ID too large"}, - //TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT - {TE_WAIT_STACK_FULL,"Wait stack was full"}, - //TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT - {TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"}, - //TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT - {TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"}, - //TE_SIGNAL_LOST = 0x15 - {TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"}, - //TE_SEND_BUFFER_FULL = 0x16 - {TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"}, - //TE_SCI_LINK_ERROR = 0x0017 - {TE_SCI_LINK_ERROR,"There is no link from this node to the switch"}, - //TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT - {TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"}, - //TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT - {TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"}, - //TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT - {TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"}, - //TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT - {TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"}, - //TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT - {TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"}, - //TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC - {TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"}, - //TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT - {TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"}, - //TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC - {TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"}, - //TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT - {TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"}, - //TE_SHM_IPC_PERMANENT = 0x21 - {TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"}, - //TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22 - {TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"} - }; - - lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError); - for(i=0; i")), - (gth == 0 ? "is" : (gth > 0 ? "increased to" : "decreased to")), - percent, "%", - used, size/1024, total - ); -} - -void getTextBackupStarted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Backup %d started from node %d", - theData[2], refToNode(theData[1])); -} -void getTextBackupFailedToStart(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Backup request from %d failed to start. Error: %d", - refToNode(theData[1]), theData[2]); -} -void getTextBackupCompleted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Backup %u started from node %u completed." - " StartGCP: %u StopGCP: %u" - " #Records: %u #LogRecords: %u" - " Data: %u bytes Log: %u bytes", - theData[2], refToNode(theData[1]), - theData[3], theData[4], theData[6], theData[8], - theData[5], theData[7]); -} -void getTextBackupAborted(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Backup %d started from %d has been aborted. Error: %d", - theData[2], - refToNode(theData[1]), - theData[3]); -} - -void getTextSingleUser(QQQQ) { - switch (theData[1]) - { - case 0: - BaseString::snprintf(m_text, m_text_len, "Entering single user mode"); - break; - case 1: - BaseString::snprintf(m_text, m_text_len, - "Entered single user mode " - "Node %d has exclusive access", theData[2]); - break; - case 2: - BaseString::snprintf(m_text, m_text_len,"Exiting single user mode"); - break; - default: - BaseString::snprintf(m_text, m_text_len, - "Unknown single user report %d", theData[1]); - break; - } -} - -void getTextStartReport(QQQQ) { - Uint32 time = theData[2]; - Uint32 sz = theData[3]; - char mask1[100]; - char mask2[100]; - char mask3[100]; - char mask4[100]; - BitmaskImpl::getText(sz, theData + 4 + (0 * sz), mask1); - BitmaskImpl::getText(sz, theData + 4 + (1 * sz), mask2); - BitmaskImpl::getText(sz, theData + 4 + (2 * sz), mask3); - BitmaskImpl::getText(sz, theData + 4 + (3 * sz), mask4); - switch(theData[1]){ - case 1: // Wait initial - BaseString::snprintf - (m_text, m_text_len, - "Initial start, waiting for %s to connect, " - " nodes [ all: %s connected: %s no-wait: %s ]", - mask4, mask1, mask2, mask3); - break; - case 2: // Wait partial - BaseString::snprintf - (m_text, m_text_len, - "Waiting until nodes: %s connects, " - "nodes [ all: %s connected: %s no-wait: %s ]", - mask4, mask1, mask2, mask3); - break; - case 3: // Wait partial timeout - BaseString::snprintf - (m_text, m_text_len, - "Waiting %u sec for nodes %s to connect, " - "nodes [ all: %s connected: %s no-wait: %s ]", - - time, mask4, mask1, mask2, mask3); - break; - case 4: // Wait partioned - BaseString::snprintf - (m_text, m_text_len, - "Waiting for non partitioned start, " - "nodes [ all: %s connected: %s missing: %s no-wait: %s ]", - - mask1, mask2, mask4, mask3); - break; - case 5: - BaseString::snprintf - (m_text, m_text_len, - "Waiting %u sec for non partitioned start, " - "nodes [ all: %s connected: %s missing: %s no-wait: %s ]", - - time, mask1, mask2, mask4, mask3); - break; - case 0x8000: // Do initial - BaseString::snprintf - (m_text, m_text_len, - "Initial start with nodes %s [ missing: %s no-wait: %s ]", - mask2, mask4, mask3); - break; - case 0x8001: // Do start - BaseString::snprintf - (m_text, m_text_len, - "Start with all nodes %s", - mask2); - break; - case 0x8002: // Do partial - BaseString::snprintf - (m_text, m_text_len, - "Start with nodes %s [ missing: %s no-wait: %s ]", - mask2, mask4, mask3); - break; - case 0x8003: // Do partioned - BaseString::snprintf - (m_text, m_text_len, - "Start potentially partitioned with nodes %s " - " [ missing: %s no-wait: %s ]", - mask2, mask4, mask3); - break; - default: - BaseString::snprintf - (m_text, m_text_len, - "Unknown startreport: 0x%x [ %s %s %s %s ]", - theData[1], - mask1, mask2, mask3, mask4); - } -} - -#if 0 -BaseString::snprintf(m_text, - m_text_len, - "Unknown event: %d", - theData[0]); -#endif - -/** - * This matrix defines which event should be printed when - * - * threshold - is in range [0-15] - * severity - DEBUG to ALERT (Type of log message) - */ - -#define ROW(a,b,c,d) \ -{ NDB_LE_ ## a, b, c, d, getText ## a} - -const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = { - // CONNECTION - ROW(Connected, LogLevel::llConnection, 8, Logger::LL_INFO ), - ROW(Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT ), - ROW(CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO ), - ROW(CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO ), - ROW(ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO ), - // CHECKPOINT - ROW(GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO ), - ROW(GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO ), - ROW(LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO ), - ROW(LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO ), - ROW(LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT ), - ROW(LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO ), - ROW(UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO ), - - // STARTUP - ROW(NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ), - ROW(NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ), - ROW(STTORRYRecieved, LogLevel::llStartUp, 15, Logger::LL_INFO ), - ROW(StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO ), - ROW(CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO ), - ROW(CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO ), - ROW(FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO ), - ROW(NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ), - ROW(NDBStopCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ), - ROW(NDBStopForced, LogLevel::llStartUp, 1, Logger::LL_ALERT ), - ROW(NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO ), - ROW(StartREDOLog, LogLevel::llStartUp, 4, Logger::LL_INFO ), - ROW(StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO ), - ROW(UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO ), - ROW(StartReport, LogLevel::llStartUp, 4, Logger::LL_INFO ), - - // NODERESTART - ROW(NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), - ROW(NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), - ROW(NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), - ROW(NR_CopyFragDone, LogLevel::llNodeRestart,10, Logger::LL_INFO ), - ROW(NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO ), - - ROW(NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT), - ROW(NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT), - ROW(ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO ), - ROW(ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT), - ROW(GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), - ROW(GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), - ROW(LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), - ROW(LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ), - - // STATISTIC - ROW(TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ), - ROW(OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ), - ROW(TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO ), - ROW(JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ), - ROW(SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ), - ROW(ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ), - ROW(MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO ), - - // ERROR - ROW(TransporterError, LogLevel::llError, 2, Logger::LL_ERROR ), - ROW(TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING ), - ROW(MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING ), - ROW(DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT ), - ROW(WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING ), - // INFO - ROW(SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO ), - ROW(CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO ), - ROW(InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO ), - ROW(EventBufferStatus, LogLevel::llInfo, 7, Logger::LL_INFO ), - - //Single User - ROW(SingleUser, LogLevel::llInfo, 7, Logger::LL_INFO ), - - // Backup - ROW(BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO ), - ROW(BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO ), - ROW(BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT), - ROW(BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT ) -}; - -const Uint32 EventLoggerBase::matrixSize= -sizeof(EventLoggerBase::matrix)/sizeof(EventRepLogLevelMatrix); - -EventLogger::EventLogger() : m_filterLevel(15) -{ - setCategory("EventLogger"); - enable(Logger::LL_INFO, Logger::LL_ALERT); -} - -EventLogger::~EventLogger() -{ -} - -bool -EventLogger::open(const char* logFileName, int maxNoFiles, long maxFileSize, - unsigned int maxLogEntries) -{ - return addHandler(new FileLogHandler(logFileName, maxNoFiles, maxFileSize, - maxLogEntries)); -} - -void -EventLogger::close() -{ - removeAllHandlers(); -} - -#ifdef NOT_USED - -static NdbOut& -operator<<(NdbOut& out, const LogLevel & ll) -{ - out << "[LogLevel: "; - for(size_t i = 0; i 0) - textF(dst+pos,dst_len-pos,theData); - return dst; -} - -void -EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId, - const LogLevel* ll) -{ - Uint32 threshold = 0; - Logger::LoggerLevel severity = Logger::LL_WARNING; - LogLevel::EventCategory cat= LogLevel::llInvalid; - EventTextFunction textF; - char log_text[MAX_TEXT_LENGTH]; - - DBUG_ENTER("EventLogger::log"); - DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId)); - - if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF)) - DBUG_VOID_RETURN; - - Uint32 set = ll?ll->getLogLevel(cat) : m_logLevel.getLogLevel(cat); - DBUG_PRINT("info",("threshold=%d, set=%d", threshold, set)); - if (ll) - DBUG_PRINT("info",("m_logLevel.getLogLevel=%d", m_logLevel.getLogLevel(cat))); - - if (threshold <= set){ - getText(log_text,sizeof(log_text),textF,theData,nodeId); - - switch (severity){ - case Logger::LL_ALERT: - alert(log_text); - break; - case Logger::LL_CRITICAL: - critical(log_text); - break; - case Logger::LL_WARNING: - warning(log_text); - break; - case Logger::LL_ERROR: - error(log_text); - break; - case Logger::LL_INFO: - info(log_text); - break; - case Logger::LL_DEBUG: - debug(log_text); - break; - default: - info(log_text); - break; - } - } // if (.. - DBUG_VOID_RETURN; -} - -int -EventLogger::getFilterLevel() const -{ - return m_filterLevel; -} - -void -EventLogger::setFilterLevel(int filterLevel) -{ - m_filterLevel = filterLevel; -} diff --git a/storage/ndb/src/common/debugger/GrepError.cpp b/storage/ndb/src/common/debugger/GrepError.cpp deleted file mode 100644 index 8bb2bce64ed..00000000000 --- a/storage/ndb/src/common/debugger/GrepError.cpp +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -/** - * Error descriptions. - */ - -const GrepError::ErrorDescription GrepError::errorDescriptions[] = { - { GrepError::GE_NO_ERROR, - "No error" }, - { GrepError::SUBSCRIPTION_ID_NOMEM, - "Not enough resources to allocate the subscription" }, - { GrepError::SUBSCRIPTION_ID_NOT_FOUND, - "The requested subscription (id, key) does not exist"}, - { GrepError::SUBSCRIPTION_ID_NOT_UNIQUE, - "A subscription with (id, key) does already exist"}, - { GrepError::SUBSCRIPTION_ID_SUMA_FAILED_CREATE, - "Suma failed to create a new subscription id"}, - { GrepError::NULL_VALUE, - "NULL"}, - { GrepError::SEQUENCE_ERROR, - "Error when creating or using sequence."}, - { GrepError::NOSPACE_IN_POOL, - "No space left in pool when trying to seize data"}, - { GrepError::SUBSCRIPTION_ID_ALREADY_EXIST, - "A subscription for this replication channel does already exist"}, - { GrepError::SUBSCRIPTION_NOT_STARTED, - "No subscription is started"}, - { GrepError::SUBSCRIBER_NOT_FOUND, - "The subscriber does not exist in SUMA."}, - { GrepError::WRONG_NO_OF_SECTIONS, - "Something is wrong with the supplied arguments"}, - { GrepError::ILLEGAL_ACTION_WHEN_STOPPING, - "Action can not be performed while channel is in stopping state"}, - { GrepError::SELECTED_TABLE_NOT_FOUND, - "The selected table was not found. "}, - { GrepError::REP_APPLY_LOGRECORD_FAILED, - "Failed applying a log record (permanent error)"}, - { GrepError::REP_APPLY_METARECORD_FAILED, - "Failed applying a meta record (permanent error)"}, - { GrepError::REP_DELETE_NEGATIVE_EPOCH, - "Trying to delete a GCI Buffer using a negative epoch."}, - { GrepError::REP_DELETE_NONEXISTING_EPOCH, - "Trying to delete a non-existing GCI Buffer."}, - { GrepError::REP_NO_CONNECTED_NODES, - "There are no connected nodes in the node group."}, - { GrepError::REP_DISCONNECT, - "Global Replication Server disconnected."}, - { GrepError::COULD_NOT_ALLOCATE_MEM_FOR_SIGNAL, - "Could not allocate memory for signal."}, - { GrepError::REP_NOT_PROPER_TABLE, - "Specified table is not a valid table. " - "Either the format is not // or " - "the table name is too long "}, - { GrepError::REP_TABLE_ALREADY_SELECTED, - "The specified table is already selected for replication" }, - { GrepError::REP_TABLE_NOT_FOUND, - "The specified table was not found" }, - { GrepError::START_OF_COMPONENT_IN_WRONG_STATE, - "Component or protocol can not be started in the current state."}, - { GrepError::START_ALREADY_IN_PROGRESS, - "Start of replication protocol is already in progress."}, - { GrepError::ILLEGAL_STOP_EPOCH_ID, - "It is not possible to stop on the requested epoch id."}, - { GrepError::ILLEGAL_USE_OF_COMMAND, - "The command cannot be executed in this state."}, - { GrepError::CHANNEL_NOT_STOPPABLE, - "It is not possible to stop the in this state."}, - - /** - * Applier stuff - */ - { GrepError::REP_APPLY_NONCOMPLETE_GCIBUFFER, - "Applier: Ordered to apply an incomplete GCI Buffer."}, - { GrepError::REP_APPLY_NULL_GCIBUFFER, - "Applier: Tried to apply a NULL GCI Buffer."}, - { GrepError::REP_APPLIER_START_TRANSACTION, - "Applier: Could not start a transaction."}, - { GrepError::REP_APPLIER_NO_TABLE, - "Applier: Table does not exist"}, - { GrepError::REP_APPLIER_NO_OPERATION, - "Applier: Cannot get NdbOperation record."}, - { GrepError::REP_APPLIER_EXECUTE_TRANSACTION, - "Applier: Execute transaction failed."}, - { GrepError::REP_APPLIER_CREATE_TABLE, - "Applier: Create table failed."}, - { GrepError::REP_APPLIER_PREPARE_TABLE, - "Applier: Prepare table for create failed."}, - - { GrepError::NOT_YET_IMPLEMENTED, - "Command or event not yet implemented."} -}; - - - - - -const Uint32 -GrepError::noOfErrorDescs = sizeof(GrepError::errorDescriptions) / - sizeof(GrepError::ErrorDescription); - - -/** - * gets the corresponding error message to an err code - */ -const char * -GrepError::getErrorDesc(GrepError::GE_Code err) { - - for(Uint32 i = 0; i $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libtrace_la_SOURCES) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/debugger/SignalLoggerManager.cpp b/storage/ndb/src/common/debugger/SignalLoggerManager.cpp deleted file mode 100644 index a5a4f8a1f89..00000000000 --- a/storage/ndb/src/common/debugger/SignalLoggerManager.cpp +++ /dev/null @@ -1,507 +0,0 @@ -/* Copyright (c) 2003-2006, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "SignalLoggerManager.hpp" -#include - -#include - -SignalLoggerManager::SignalLoggerManager() -{ - for (int i = 0; i < NO_OF_BLOCKS; i++){ - logModes[i] = 0; - } - outputStream = 0; - m_ownNodeId = 0; - m_logDistributed = false; -} - -SignalLoggerManager::~SignalLoggerManager() -{ - if(outputStream != 0){ - fflush(outputStream); - fclose(outputStream); - outputStream = 0; - } -} - -FILE * -SignalLoggerManager::setOutputStream(FILE * output) -{ - if(outputStream != 0){ - fflush(outputStream); - } - - FILE * out = outputStream; - outputStream = output; - return out; -} - -FILE * -SignalLoggerManager::getOutputStream() const -{ - return outputStream; -} - -void -SignalLoggerManager::flushSignalLog() -{ - if(outputStream != 0) - fflush(outputStream); -} - -void -SignalLoggerManager::setTrace(unsigned long trace) -{ - traceId = trace; -} - -unsigned long -SignalLoggerManager::getTrace() const -{ - return traceId; -} - -void -SignalLoggerManager::setOwnNodeId(int nodeId){ - m_ownNodeId = nodeId; -} - -void -SignalLoggerManager::setLogDistributed(bool val){ - m_logDistributed = val; -} - -int -getParameter(char *blocks[NO_OF_BLOCKS], const char * par, const char * line) -{ - const char * loc = strstr(line, par); - if(loc == NULL) - return 0; - - loc += strlen(par); - - int found = 0; - - char * copy = strdup(loc); - char * tmp = copy; - bool done = false; - while(!done){ - int len = strcspn(tmp, ", ;:\0"); - if(len == 0) - done = true; - else { - if(* (tmp + len) != ',') - done = true; - * (tmp + len) = 0; - blocks[found] = strdup(tmp); - found ++; - tmp += (len + 1); - } - } - free(copy); - return found; -} - - -#define SLM_OFF 0 -#define SLM_ON 1 -#define SLM_TOGGLE 2 - -int -SignalLoggerManager::log(LogMode logMode, const char * params) -{ - char * blocks[NO_OF_BLOCKS]; - const int count = getParameter(blocks, "BLOCK=", params); - - int cnt = 0; - if((count == 1 && !strcmp(blocks[0], "ALL")) || - count == 0){ - - for (int number = 0; number < NO_OF_BLOCKS; ++number){ - cnt += log(SLM_ON, number, logMode); - } - } else { - for (int i = 0; i < count; ++i){ - BlockNumber number = getBlockNo(blocks[i]); - cnt += log(SLM_ON, number, logMode); - } - } - for(int i = 0; i= 7){ - fprintf(output, - " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n", - signalData[0], signalData[1], signalData[2], signalData[3], - signalData[4], signalData[5], signalData[6]); - len -= 7; - signalData += 7; - } - if(len > 0){ - for(Uint32 i = 0; i= 3) { - fprintf(output, " *** invalid ***\n"); - return; - } - const Uint32 len = ptr[i].sz; - const Uint32 * data = ptr[i].p; - Uint32 pos = 0; - fprintf(output, " size=%u\n", (unsigned)len); - while (pos < len) { - printDataWord(output, pos, data[pos]); - } - if (len > 0) - putc('\n', output); -} - -void -SignalLoggerManager::printDataWord(FILE * output, Uint32 & pos, const Uint32 data) -{ - const char* const hex = "0123456789abcdef"; - if (pos > 0 && pos % 7 == 0) - putc('\n', output); - putc(' ', output); - putc('H', output); - putc('\'', output); - for (int i = 7; i >= 0; i--) - putc(hex[(data >> (i << 2)) & 0xf], output); - pos++; -} diff --git a/storage/ndb/src/common/debugger/signaldata/AccLock.cpp b/storage/ndb/src/common/debugger/signaldata/AccLock.cpp deleted file mode 100644 index 918e1dd4e8a..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/AccLock.cpp +++ /dev/null @@ -1,75 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -bool -printACC_LOCKREQ(FILE* output, const Uint32* theData, Uint32 len, Uint16 rbn) -{ - const AccLockReq* const sig = (const AccLockReq*)theData; - Uint32 reqtype = sig->requestInfo & 0xFF; - switch (sig->returnCode) { - case RNIL: - fprintf(output, " returnCode=RNIL"); - break; - case AccLockReq::Success: - fprintf(output, " returnCode=Success"); - break; - case AccLockReq::IsBlocked: - fprintf(output, " returnCode=IsBlocked"); - break; - case AccLockReq::WouldBlock: - fprintf(output, " returnCode=WouldBlock"); - break; - case AccLockReq::Refused: - fprintf(output, " returnCode=Refused"); - break; - case AccLockReq::NoFreeOp: - fprintf(output, " returnCode=NoFreeOp"); - break; - default: - fprintf(output, " returnCode=%u?", sig->returnCode); - break; - } - switch (reqtype) { - case AccLockReq::LockShared: - fprintf(output, " req=LockShared\n"); - break; - case AccLockReq::LockExclusive: - fprintf(output, " req=LockExclusive\n"); - break; - case AccLockReq::Unlock: - fprintf(output, " req=Unlock\n"); - break; - case AccLockReq::Abort: - fprintf(output, " req=Abort\n"); - break; - default: - fprintf(output, " req=%u\n", reqtype); - break; - } - fprintf(output, " accOpPtr: 0x%x\n", sig->accOpPtr); - if (reqtype == AccLockReq::LockShared || - reqtype == AccLockReq::LockExclusive) { - fprintf(output, " userPtr: 0x%x userRef: 0x%x\n", sig->userPtr, sig->userRef); - fprintf(output, " table: id=%u", sig->tableId); - fprintf(output, " fragment: id=%u ptr=0x%x\n", sig->fragId, sig->fragPtrI); - fprintf(output, " tuple: addr=0x%x hashValue=%x\n", sig->tupAddr, sig->hashValue); - fprintf(output, " transid: %08x %08x\n", sig->transId1, sig->transId2); - } - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp b/storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp deleted file mode 100644 index 12bfe687366..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/AlterIndx.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printALTER_INDX_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterIndxReq * const sig = (AlterIndxReq *) theData; - return false; -} - -bool printALTER_INDX_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterIndxConf * const sig = (AlterIndxConf *) theData; - return false; -} - -bool printALTER_INDX_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterIndxRef * const sig = (AlterIndxRef *) theData; - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/AlterTab.cpp b/storage/ndb/src/common/debugger/signaldata/AlterTab.cpp deleted file mode 100644 index a2ea11249a7..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/AlterTab.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printALTER_TAB_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterTabReq * const sig = (AlterTabReq *) theData; - - return false; -} - -bool printALTER_TAB_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterTabConf * const sig = (AlterTabConf *) theData; - - return false; -} - -bool printALTER_TAB_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterTabRef * const sig = (AlterTabRef *) theData; - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/AlterTable.cpp b/storage/ndb/src/common/debugger/signaldata/AlterTable.cpp deleted file mode 100644 index e58338122af..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/AlterTable.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printALTER_TABLE_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterTableReq * const sig = (AlterTableReq *) theData; - - return false; -} - -bool printALTER_TABLE_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterTableConf * const sig = (AlterTableConf *) theData; - - return false; -} - -bool printALTER_TABLE_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const AlterTableRef * const sig = (AlterTableRef *) theData; - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp b/storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp deleted file mode 100644 index 467ebdbe12c..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/AlterTrig.cpp +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printALTER_TRIG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const AlterTrigReq * const sig = (AlterTrigReq *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "\n"); - - return false; -} - -bool printALTER_TRIG_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const AlterTrigConf * const sig = (AlterTrigConf *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "\n"); - - return false; -} - -bool printALTER_TRIG_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const AlterTrigRef * const sig = (AlterTrigRef *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Error code: %u, ", sig->getErrorCode()); - fprintf(output, "\n"); - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp deleted file mode 100644 index c399959830d..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -bool -printDEFINE_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ - DefineBackupReq* sig = (DefineBackupReq*)data; - fprintf(out, " backupPtr: %d backupId: %d clientRef: %d clientData: %d\n", - sig->backupPtr, sig->backupId, sig->clientRef, sig->clientData); - fprintf(out, " backupKey: [ %08x%08x ] DataLength: %d\n", - sig->backupKey[0], sig->backupKey[1], sig->backupDataLen); - char buf[_NDB_NODE_BITMASK_SIZE * 8 + 1]; - fprintf(out, " Nodes: %s\n", sig->nodes.getText(buf)); - return true; -} - -bool -printDEFINE_BACKUP_REF(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ - DefineBackupRef* sig = (DefineBackupRef*)data; - fprintf(out, " backupPtr: %d backupId: %d errorCode: %d\n", - sig->backupPtr, sig->backupId, sig->errorCode); - return true; -} - -bool -printDEFINE_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - DefineBackupConf* sig = (DefineBackupConf*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; -} - -bool -printSTART_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - StartBackupReq* sig = (StartBackupReq*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; -} - -bool -printSTART_BACKUP_REF(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ - StartBackupRef* sig = (StartBackupRef*)data; - fprintf(out, " backupPtr: %d backupId: %d errorCode: %d\n", - sig->backupPtr, sig->backupId, sig->errorCode); - return true; -} - -bool -printSTART_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - StartBackupConf* sig = (StartBackupConf*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; -} - -bool -printBACKUP_FRAGMENT_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - BackupFragmentReq* sig = (BackupFragmentReq*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d (count = %d)\n", - sig->tableId, sig->fragmentNo, sig->count); - return true; -} - -bool -printBACKUP_FRAGMENT_REF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - BackupFragmentRef* sig = (BackupFragmentRef*)data; - fprintf(out, " backupPtr: %d backupId: %d nodeId: %d errorCode: %d\n", - sig->backupPtr, sig->backupId, sig->nodeId, sig->errorCode); - return true; -} - -bool -printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - BackupFragmentConf* sig = (BackupFragmentConf*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n", - sig->tableId, sig->fragmentNo, - sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), - sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); - return true; -} - -bool -printSTOP_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - StopBackupReq* sig = (StopBackupReq*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; -} - -bool -printSTOP_BACKUP_REF(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ - StopBackupRef* sig = (StopBackupRef*)data; - fprintf(out, " backupPtr: %d backupId: %d errorCode: %d\n", - sig->backupPtr, sig->backupId, sig->errorCode); - return true; -} - -bool -printSTOP_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ - StopBackupConf* sig = (StopBackupConf*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; -} - -bool -printBACKUP_STATUS_REQ(FILE *, const Uint32 *, Uint32, Uint16){ - return false; -} - -bool -printBACKUP_STATUS_CONF(FILE *, const Uint32 *, Uint32, Uint16){ - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp deleted file mode 100644 index 04d84cc5b34..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -bool -printBACKUP_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 bno){ - BackupReq* sig = (BackupReq*)theData; - fprintf(output, " senderData: %d DataLength: %d flags: %d\n", - sig->senderData, - sig->backupDataLen, - sig->flags); - return true; -} - -bool -printBACKUP_DATA(FILE * output, const Uint32 * theData, Uint32 len, Uint16 bno){ - BackupData * sig = (BackupData*)theData; - if(sig->requestType == BackupData::ClientToMaster){ - fprintf(output, " ClientToMaster: senderData: %d backupId: %d\n", - sig->senderData, sig->backupId); - } else if(sig->requestType == BackupData::MasterToSlave){ - fprintf(output, " MasterToSlave: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - } - return false; -} - -bool -printBACKUP_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 bno){ - - BackupRef* sig = (BackupRef*)theData; - fprintf(output, " senderData: %d errorCode: %d masterRef: %d\n", - sig->senderData, - sig->errorCode, - sig->masterRef); - return true; -} - -bool -printBACKUP_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 bno){ - BackupConf* sig = (BackupConf*)theData; - fprintf(output, " senderData: %d backupId: %d\n", - sig->senderData, - sig->backupId); - return true; -} - -bool -printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ - BackupAbortRep* sig = (BackupAbortRep*)data; - fprintf(out, " senderData: %d backupId: %d reason: %d\n", - sig->senderData, - sig->backupId, - sig->reason); - return true; -} - -bool -printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ - BackupCompleteRep* sig = (BackupCompleteRep*)data; - fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n", - sig->senderData, - sig->backupId, - sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), - sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); - return true; -} - -bool -printBACKUP_NF_COMPLETE_REP(FILE*, const Uint32*, Uint32, Uint16){ - return false; -} - -bool -printABORT_BACKUP_ORD(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ - AbortBackupOrd* sig = (AbortBackupOrd*)data; - - AbortBackupOrd::RequestType rt =(AbortBackupOrd::RequestType)sig->requestType; - switch(rt){ - case AbortBackupOrd::ClientAbort: - fprintf(out, " ClientAbort: senderData: %d backupId: %d\n", - sig->senderData, sig->backupId); - return true; - break; - case AbortBackupOrd::BackupComplete: - fprintf(out, " BackupComplete: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; - case AbortBackupOrd::BackupFailure: - fprintf(out, " BackupFailure: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; - case AbortBackupOrd::LogBufferFull: - fprintf(out, " LogBufferFull: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; - break; - case AbortBackupOrd::FileOrScanError: - fprintf(out, " FileOrScanError: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; - break; - case AbortBackupOrd::BackupFailureDueToNodeFail: - fprintf(out, " BackupFailureDueToNodeFail: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; - break; - case AbortBackupOrd::OkToClean: - fprintf(out, " OkToClean: backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - return true; - break; - case AbortBackupOrd::AbortScan: - case AbortBackupOrd::IncompatibleVersions: - return false; - } - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp b/storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp deleted file mode 100644 index 771f3d884f7..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CloseComReqConf.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include - -bool -printCLOSECOMREQCONF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo){ - - CloseComReqConf * cc = (CloseComReqConf*)theData; - - fprintf(output, " xxxBlockRef = (%d, %d) failNo = %d noOfNodes = %d\n", - refToBlock(cc->xxxBlockRef), refToNode(cc->xxxBlockRef), - cc->failNo, cc->noOfNodes); - - int hits = 0; - fprintf(output, " Nodes: "); - for(int i = 0; itheNodes, i)){ - hits++; - fprintf(output, " %d", i); - } - if(hits == 16){ - fprintf(output, "\n Nodes: "); - hits = 0; - } - } - if(hits != 0) - fprintf(output, "\n"); - - return true; -} - - diff --git a/storage/ndb/src/common/debugger/signaldata/CntrStart.cpp b/storage/ndb/src/common/debugger/signaldata/CntrStart.cpp deleted file mode 100644 index cc092b478a5..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CntrStart.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include - -bool -printCNTR_START_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const CntrStartReq * const sig = (CntrStartReq *)theData; - fprintf(output, " nodeId: %x\n", sig->nodeId); - fprintf(output, " startType: %x\n", sig->startType); - fprintf(output, " lastGci: %x\n", sig->lastGci); - return true; -} - -bool -printCNTR_START_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const CntrStartRef * const sig = (CntrStartRef *)theData; - fprintf(output, " errorCode: %x\n", sig->errorCode); - fprintf(output, " masterNodeId: %x\n", sig->masterNodeId); - return true; -} - -bool -printCNTR_START_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const CntrStartConf * const sig = (CntrStartConf *)theData; - fprintf(output, " startType: %x\n", sig->startType); - fprintf(output, " startGci: %x\n", sig->startGci); - fprintf(output, " masterNodeId: %x\n", sig->masterNodeId); - fprintf(output, " noStartNodes: %x\n", sig->noStartNodes); - - char buf[32*NdbNodeBitmask::Size+1]; - fprintf(output, " startedNodes: %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->startedNodes, buf)); - fprintf(output, " startingNodes: %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->startingNodes, buf)); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/ContinueB.cpp b/storage/ndb/src/common/debugger/signaldata/ContinueB.cpp deleted file mode 100644 index c00f9ce274e..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/ContinueB.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include -#include - -bool -printCONTINUEB(FILE * output, const Uint32 * theData, Uint32 len, - Uint16 receiverBlockNo){ - if(receiverBlockNo == DBDIH){ - return printCONTINUEB_DBDIH(output, theData, len, 0); - } else if(receiverBlockNo == NDBFS) { - return printCONTINUEB_NDBFS(output, theData, len, 0); - } - - return false; -} - - diff --git a/storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp b/storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp deleted file mode 100644 index 1bce63a3d0f..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CopyGCI.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -static -void -print(char * buf, size_t buf_len, CopyGCIReq::CopyReason r){ - switch(r){ - case CopyGCIReq::IDLE: - BaseString::snprintf(buf, buf_len, "IDLE"); - break; - case CopyGCIReq::LOCAL_CHECKPOINT: - BaseString::snprintf(buf, buf_len, "LOCAL_CHECKPOINT"); - break; - case CopyGCIReq::RESTART: - BaseString::snprintf(buf, buf_len, "RESTART"); - break; - case CopyGCIReq::GLOBAL_CHECKPOINT: - BaseString::snprintf(buf, buf_len, "GLOBAL_CHECKPOINT"); - break; - case CopyGCIReq::INITIAL_START_COMPLETED: - BaseString::snprintf(buf, buf_len, "INITIAL_START_COMPLETED"); - break; - default: - BaseString::snprintf(buf, buf_len, ""); - } -} - -bool -printCOPY_GCI_REQ(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - CopyGCIReq * sig = (CopyGCIReq*)theData; - - static char buf[255]; - print(buf, sizeof(buf), (CopyGCIReq::CopyReason)sig->copyReason); - - fprintf(output, " SenderData: %d CopyReason: %s StartWord: %d\n", - sig->anyData, - buf, - sig->startWord); - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp b/storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp deleted file mode 100644 index 49cf2d8d4f3..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CreateEvnt.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printCREATE_EVNT_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const CreateEvntReq * const sig = (CreateEvntReq *) theData; - - return false; -} - -bool printCREATE_EVNT_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const CreateEvntConf * const sig = (CreateEvntConf *) theData; - - return false; -} - -bool printCREATE_EVNT_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const CreateEvntRef * const sig = (CreateEvntRef *) theData; - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp b/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp deleted file mode 100644 index 524b2678c74..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printCREATE_FRAGMENTATION_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const CreateFragmentationReq * const sig = (CreateFragmentationReq *)theData; - fprintf(output, " senderRef: %x\n", sig->senderRef); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " fragmentationType: %x\n", sig->fragmentationType); - fprintf(output, " noOfFragments: %x\n", sig->noOfFragments); - if (sig->primaryTableId == RNIL) - fprintf(output, " primaryTableId: none\n"); - else - fprintf(output, " primaryTableId: %x\n", sig->primaryTableId); - return true; -} - -bool -printCREATE_FRAGMENTATION_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const CreateFragmentationRef * const sig = (CreateFragmentationRef *)theData; - fprintf(output, " senderRef: %x\n", sig->senderRef); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " errorCode: %x\n", sig->errorCode); - return true; -} - -bool -printCREATE_FRAGMENTATION_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const CreateFragmentationConf * const sig = - (CreateFragmentationConf *)theData; - fprintf(output, " senderRef: %x\n", sig->senderRef); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " noOfReplicas: %x\n", sig->noOfReplicas); - fprintf(output, " noOfFragments: %x\n", sig->noOfFragments); - return true; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp b/storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp deleted file mode 100644 index 46ea8e27449..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CreateIndx.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printCREATE_INDX_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const CreateIndxReq * const sig = (CreateIndxReq *) theData; - - return false; -} - -bool printCREATE_INDX_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const CreateIndxConf * const sig = (CreateIndxConf *) theData; - - return false; -} - -bool printCREATE_INDX_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const CreateIndxRef * const sig = (CreateIndxRef *) theData; - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp b/storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp deleted file mode 100644 index 1ed3b940f29..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/CreateTrig.cpp +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printCREATE_TRIG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const CreateTrigReq * const sig = (CreateTrigReq *) theData; - - //char triggerName[MAX_TAB_NAME_SIZE]; - char triggerType[32]; - char triggerActionTime[32]; - char triggerEvent[32]; - - //sig->getTriggerName((char *) &triggerName); - switch (sig->getTriggerType()) { - case(TriggerType::SECONDARY_INDEX): - BaseString::snprintf(triggerType, sizeof(triggerType), "SECONDARY_INDEX"); - break; - case(TriggerType::SUBSCRIPTION): - BaseString::snprintf(triggerType, sizeof(triggerType), "SUBSCRIPTION"); - break; - case(TriggerType::ORDERED_INDEX): - BaseString::snprintf(triggerType, sizeof(triggerType), "ORDERED_INDEX"); - break; - default: - BaseString::snprintf(triggerType, sizeof(triggerType), "UNKNOWN [%d]", (int)sig->getTriggerType()); - break; - } - switch (sig->getTriggerActionTime()) { - case (TriggerActionTime::TA_BEFORE): - BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "BEFORE"); - break; - case(TriggerActionTime::TA_AFTER): - BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "AFTER"); - break; - case (TriggerActionTime::TA_DEFERRED): - BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "DEFERRED"); - break; - case (TriggerActionTime::TA_DETACHED): - BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "DETACHED"); - break; - default: - BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), - "UNKNOWN [%d]", (int)sig->getTriggerActionTime()); - break; - } - switch (sig->getTriggerEvent()) { - case (TriggerEvent::TE_INSERT): - BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "INSERT"); - break; - case(TriggerEvent::TE_DELETE): - BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "DELETE"); - break; - case(TriggerEvent::TE_UPDATE): - BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "UPDATE"); - break; - case(TriggerEvent::TE_CUSTOM): - BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "CUSTOM"); - break; - default: - BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "UNKNOWN [%d]", (int)sig->getTriggerEvent()); - break; - } - - fprintf(output, "User: %u, ", sig->getUserRef()); - //fprintf(output, "Trigger name: \"%s\"\n", triggerName); - fprintf(output, "Type: %s, ", triggerType); - fprintf(output, "Action: %s, ", triggerActionTime); - fprintf(output, "Event: %s, ", triggerEvent); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Table id: %u, ", sig->getTableId()); - fprintf(output, "Monitor replicas: %s ", (sig->getMonitorReplicas())?"true":"false"); - fprintf(output, "Monitor all attributes: %s ", (sig->getMonitorAllAttributes())?"true":"false"); - const AttributeMask& attributeMask = sig->getAttributeMask(); - - char buf[MAXNROFATTRIBUTESINWORDS * 8 + 1]; - fprintf(output, "Attribute mask: %s", attributeMask.getText(buf)); - fprintf(output, "\n"); - - return false; -} - -bool printCREATE_TRIG_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const CreateTrigConf * const sig = (CreateTrigConf *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Table id: %u, ", sig->getTableId()); - fprintf(output, "\n"); - - return false; -} - -bool printCREATE_TRIG_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const CreateTrigRef * const sig = (CreateTrigRef *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Table id: %u, ", sig->getTableId()); - fprintf(output, "Error code: %u, ", sig->getErrorCode()); - fprintf(output, "\n"); - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp deleted file mode 100644 index 264997d7766..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ /dev/null @@ -1,310 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -//static -const -SimpleProperties::SP2StructMapping -DictTabInfo::TableMapping[] = { - DTIMAPS(Table, TableName, TableName, 0, MAX_TAB_NAME_SIZE), - DTIMAP(Table, TableId, TableId), - DTIMAPS(Table, PrimaryTable, PrimaryTable, 0, MAX_TAB_NAME_SIZE), - DTIMAP(Table, PrimaryTableId, PrimaryTableId), - DTIMAP2(Table, TableLoggedFlag, TableLoggedFlag, 0, 1), - DTIMAP2(Table, TableTemporaryFlag, TableTemporaryFlag, 0, 1), - DTIMAP2(Table, ForceVarPartFlag, ForceVarPartFlag, 0, 1), - DTIMAP2(Table, TableKValue, TableKValue, 6, 6), - DTIMAP2(Table, MinLoadFactor, MinLoadFactor, 0, 90), - DTIMAP2(Table, MaxLoadFactor, MaxLoadFactor, 25, 110), - DTIMAP2(Table, FragmentTypeVal, FragmentType, 0, 3), - DTIMAP2(Table, TableTypeVal, TableType, 1, 3), - DTIMAP(Table, NoOfKeyAttr, NoOfKeyAttr), - DTIMAP2(Table, NoOfAttributes, NoOfAttributes, 1, MAX_ATTRIBUTES_IN_TABLE), - DTIMAP(Table, NoOfNullable, NoOfNullable), - DTIMAP2(Table, NoOfVariable, NoOfVariable, 0, 0), - DTIMAP(Table, KeyLength, KeyLength), - DTIMAP(Table, TableVersion, TableVersion), - DTIMAP(Table, IndexState, IndexState), - DTIMAP(Table, InsertTriggerId, InsertTriggerId), - DTIMAP(Table, UpdateTriggerId, UpdateTriggerId), - DTIMAP(Table, DeleteTriggerId, DeleteTriggerId), - DTIMAP(Table, CustomTriggerId, CustomTriggerId), - DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE), - DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen), - DTIMAP2(Table, FragmentCount, FragmentCount, 0, MAX_NDB_PARTITIONS), - DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, 2*MAX_FRAGMENT_DATA_BYTES), - DTIMAPB(Table, ReplicaData, ReplicaData, 0, 2*MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen), - DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, 6*MAX_NDB_PARTITIONS), - DTIMAPB(Table, FragmentData, FragmentData, 0, 6*MAX_NDB_PARTITIONS, FragmentDataLen), - DTIMAP2(Table, TablespaceDataLen, TablespaceDataLen, 0, 8*MAX_NDB_PARTITIONS), - DTIMAPB(Table, TablespaceData, TablespaceData, 0, 8*MAX_NDB_PARTITIONS, TablespaceDataLen), - DTIMAP2(Table, RangeListDataLen, RangeListDataLen, 0, 8*MAX_NDB_PARTITIONS), - DTIMAPB(Table, RangeListData, RangeListData, 0, 8*MAX_NDB_PARTITIONS, RangeListDataLen), - DTIMAP(Table, TablespaceId, TablespaceId), - DTIMAP(Table, TablespaceVersion, TablespaceVersion), - DTIMAP(Table, MaxRowsLow, MaxRowsLow), - DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), - DTIMAP(Table, DefaultNoPartFlag, DefaultNoPartFlag), - DTIMAP(Table, LinearHashFlag, LinearHashFlag), - DTIMAP(Table, TablespaceVersion, TablespaceVersion), - DTIMAP(Table, RowGCIFlag, RowGCIFlag), - DTIMAP(Table, RowChecksumFlag, RowChecksumFlag), - DTIMAP(Table, MaxRowsLow, MaxRowsLow), - DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), - DTIMAP(Table, MinRowsLow, MinRowsLow), - DTIMAP(Table, MinRowsHigh, MinRowsHigh), - DTIMAP(Table, SingleUserMode, SingleUserMode), - DTIBREAK(AttributeName) -}; - -//static -const Uint32 DictTabInfo::TableMappingSize = -sizeof(DictTabInfo::TableMapping) / sizeof(SimpleProperties::SP2StructMapping); - -//static -const -SimpleProperties::SP2StructMapping -DictTabInfo::AttributeMapping[] = { - DTIMAPS(Attribute, AttributeName, AttributeName, 0, MAX_ATTR_NAME_SIZE), - DTIMAP(Attribute, AttributeId, AttributeId), - DTIMAP(Attribute, AttributeType, AttributeType), - DTIMAP2(Attribute, AttributeSize, AttributeSize, 3, 7), - DTIMAP2(Attribute, AttributeArraySize, AttributeArraySize, 0, 65535), - DTIMAP2(Attribute, AttributeArrayType, AttributeArrayType, 0, 3), - DTIMAP2(Attribute, AttributeKeyFlag, AttributeKeyFlag, 0, 1), - DTIMAP2(Attribute, AttributeNullableFlag, AttributeNullableFlag, 0, 1), - DTIMAP2(Attribute, AttributeDKey, AttributeDKey, 0, 1), - DTIMAP2(Attribute, AttributeStorageType, AttributeStorageType, 0, 1), - DTIMAP(Attribute, AttributeExtType, AttributeExtType), - DTIMAP(Attribute, AttributeExtPrecision, AttributeExtPrecision), - DTIMAP(Attribute, AttributeExtScale, AttributeExtScale), - DTIMAP(Attribute, AttributeExtLength, AttributeExtLength), - DTIMAP2(Attribute, AttributeAutoIncrement, AttributeAutoIncrement, 0, 1), - DTIMAPS(Attribute, AttributeDefaultValue, AttributeDefaultValue, - 0, MAX_ATTR_DEFAULT_VALUE_SIZE), - DTIBREAK(AttributeEnd) -}; - -//static -const Uint32 DictTabInfo::AttributeMappingSize = -sizeof(DictTabInfo::AttributeMapping) / -sizeof(SimpleProperties::SP2StructMapping); - -bool printDICTTABINFO(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ -// const DictTabInfo * const sig = (DictTabInfo *) theData; - - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - return true; -} - -void -DictTabInfo::Table::init(){ - memset(TableName, 0, sizeof(TableName));//TableName[0] = 0; - TableId = ~0; - memset(PrimaryTable, 0, sizeof(PrimaryTable));//PrimaryTable[0] = 0; // Only used when "index" - PrimaryTableId = RNIL; - TableLoggedFlag = 1; - TableTemporaryFlag = 0; - ForceVarPartFlag = 0; - NoOfKeyAttr = 0; - NoOfAttributes = 0; - NoOfNullable = 0; - NoOfVariable = 0; - TableKValue = 6; - MinLoadFactor = 78; - MaxLoadFactor = 80; - KeyLength = 0; - FragmentType = DictTabInfo::AllNodesSmallTable; - TableType = DictTabInfo::UndefTableType; - TableVersion = 0; - IndexState = ~0; - InsertTriggerId = RNIL; - UpdateTriggerId = RNIL; - DeleteTriggerId = RNIL; - CustomTriggerId = RNIL; - FrmLen = 0; - FragmentDataLen = 0; - ReplicaDataLen = 0; - RangeListDataLen = 0; - TablespaceDataLen = 0; - memset(FrmData, 0, sizeof(FrmData)); - memset(FragmentData, 0, sizeof(FragmentData)); - memset(ReplicaData, 0, sizeof(ReplicaData)); - memset(RangeListData, 0, sizeof(RangeListData)); - memset(TablespaceData, 0, sizeof(TablespaceData)); - FragmentCount = 0; - TablespaceId = RNIL; - TablespaceVersion = ~0; - MaxRowsLow = 0; - MaxRowsHigh = 0; - DefaultNoPartFlag = 1; - LinearHashFlag = 1; - - RowGCIFlag = ~0; - RowChecksumFlag = ~0; - - MaxRowsLow = 0; - MaxRowsHigh = 0; - MinRowsLow = 0; - MinRowsHigh = 0; - - SingleUserMode = 0; -} - -void -DictTabInfo::Attribute::init(){ - memset(AttributeName, 0, sizeof(AttributeName));//AttributeName[0] = 0; - AttributeId = 0xFFFF; // ZNIL - AttributeType = ~0, // deprecated - AttributeSize = DictTabInfo::a32Bit; - AttributeArraySize = 1; - AttributeArrayType = NDB_ARRAYTYPE_FIXED; - AttributeKeyFlag = 0; - AttributeNullableFlag = 0; - AttributeDKey = 0; - AttributeExtType = DictTabInfo::ExtUnsigned, - AttributeExtPrecision = 0, - AttributeExtScale = 0, - AttributeExtLength = 0, - AttributeAutoIncrement = false; - AttributeStorageType = 0; - memset(AttributeDefaultValue, 0, sizeof(AttributeDefaultValue));//AttributeDefaultValue[0] = 0; -} - -//static -const -SimpleProperties::SP2StructMapping -DictFilegroupInfo::Mapping[] = { - DFGIMAPS(Filegroup, FilegroupName, FilegroupName, 0, MAX_TAB_NAME_SIZE), - DFGIMAP2(Filegroup, FilegroupType, FilegroupType, 0, 1), - DFGIMAP(Filegroup, FilegroupId, FilegroupId), - DFGIMAP(Filegroup, FilegroupVersion, FilegroupVersion), - - DFGIMAP(Filegroup, TS_ExtentSize, TS_ExtentSize), - DFGIMAP(Filegroup, TS_LogfileGroupId, TS_LogfileGroupId), - DFGIMAP(Filegroup, TS_LogfileGroupVersion, TS_LogfileGroupVersion), - DFGIMAP(Filegroup, TS_GrowLimit, TS_DataGrow.GrowLimit), - DFGIMAP(Filegroup, TS_GrowSizeHi, TS_DataGrow.GrowSizeHi), - DFGIMAP(Filegroup, TS_GrowSizeLo, TS_DataGrow.GrowSizeLo), - DFGIMAPS(Filegroup, TS_GrowPattern, TS_DataGrow.GrowPattern, 0, PATH_MAX), - DFGIMAP(Filegroup, TS_GrowMaxSize, TS_DataGrow.GrowMaxSize), - - DFGIMAP(Filegroup, LF_UndoBufferSize, LF_UndoBufferSize), - DFGIMAP(Filegroup, LF_UndoGrowLimit, LF_UndoGrow.GrowLimit), - DFGIMAP(Filegroup, LF_UndoGrowSizeHi, LF_UndoGrow.GrowSizeHi), - DFGIMAP(Filegroup, LF_UndoGrowSizeLo, LF_UndoGrow.GrowSizeLo), - DFGIMAPS(Filegroup, LF_UndoGrowPattern, LF_UndoGrow.GrowPattern, 0,PATH_MAX), - DFGIMAP(Filegroup, LF_UndoGrowMaxSize, LF_UndoGrow.GrowMaxSize), - DFGIMAP(Filegroup, LF_UndoFreeWordsHi, LF_UndoFreeWordsHi), - DFGIMAP(Filegroup, LF_UndoFreeWordsLo, LF_UndoFreeWordsLo), - - DFGIBREAK(FileName) -}; - -//static -const Uint32 DictFilegroupInfo::MappingSize = -sizeof(DictFilegroupInfo::Mapping) / sizeof(SimpleProperties::SP2StructMapping); - -//static -const -SimpleProperties::SP2StructMapping -DictFilegroupInfo::FileMapping[] = { - DFGIMAPS(File, FileName, FileName, 0, PATH_MAX), - DFGIMAP2(File, FileType, FileType, 0, 1), - DFGIMAP(File, FileId, FileId), - DFGIMAP(File, FileVersion, FileVersion), - DFGIMAP(File, FileFGroupId, FilegroupId), - DFGIMAP(File, FileFGroupVersion, FilegroupVersion), - DFGIMAP(File, FileSizeHi, FileSizeHi), - DFGIMAP(File, FileSizeLo, FileSizeLo), - DFGIMAP(File, FileFreeExtents, FileFreeExtents), - DFGIBREAK(FileEnd) -}; - -//static -const Uint32 DictFilegroupInfo::FileMappingSize = -sizeof(DictFilegroupInfo::FileMapping) / -sizeof(SimpleProperties::SP2StructMapping); - -void -DictFilegroupInfo::Filegroup::init(){ - memset(FilegroupName, 0, sizeof(FilegroupName)); - FilegroupType = ~0; - FilegroupId = ~0; - FilegroupVersion = ~0; - - TS_ExtentSize = 0; - TS_LogfileGroupId = ~0; - TS_LogfileGroupVersion = ~0; - TS_DataGrow.GrowLimit = 0; - TS_DataGrow.GrowSizeHi = 0; - TS_DataGrow.GrowSizeLo = 0; - memset(TS_DataGrow.GrowPattern, 0, sizeof(TS_DataGrow.GrowPattern)); - TS_DataGrow.GrowMaxSize = 0; - LF_UndoFreeWordsHi= 0; - LF_UndoFreeWordsLo= 0; -} - -void -DictFilegroupInfo::File::init(){ - memset(FileName, 0, sizeof(FileName)); - FileType = ~0; - FileId = ~0; - FileVersion = ~0; - FilegroupId = ~0; - FilegroupVersion = ~0; - FileSizeHi = 0; - FileSizeLo = 0; - FileFreeExtents = 0; -} - -// blob table name hack - -bool -DictTabInfo::isBlobTableName(const char* name, Uint32* ptab_id, Uint32* pcol_no) -{ - const char* const prefix = "NDB$BLOB_"; - const char* s = strrchr(name, table_name_separator); - s = (s == NULL ? name : s + 1); - if (strncmp(s, prefix, strlen(prefix)) != 0) - return false; - s += strlen(prefix); - uint i, n; - for (i = 0, n = 0; '0' <= s[i] && s[i] <= '9'; i++) - n = 10 * n + (s[i] - '0'); - if (i == 0 || s[i] != '_') - return false; - const uint tab_id = n; - s = &s[i + 1]; - for (i = 0, n = 0; '0' <= s[i] && s[i] <= '9'; i++) - n = 10 * n + (s[i] - '0'); - if (i == 0 || s[i] != 0) - return false; - const uint col_no = n; - if (ptab_id) - *ptab_id = tab_id; - if (pcol_no) - *pcol_no = col_no; - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp b/storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp deleted file mode 100644 index 67c6dc2afd0..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DihContinueB.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -bool -printCONTINUEB_DBDIH(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 not_used){ - - (void)not_used; - - switch (theData[0]) { - case DihContinueB::ZPACK_TABLE_INTO_PAGES: - fprintf(output, " Pack Table Into Pages: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZPACK_FRAG_INTO_PAGES: - fprintf(output, " Pack Frag Into Pages: Table: %d Fragment: %d PageIndex: %d WordIndex: %d\n", - theData[1], theData[2], theData[3], theData[4]); - return true; - break; - case DihContinueB::ZREAD_PAGES_INTO_TABLE: - fprintf(output, " Read Pages Into Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZREAD_PAGES_INTO_FRAG: - fprintf(output, " Read Pages Into Frag: Table: %d Fragment: %d PageIndex: %d WordIndex: %d\n", - theData[1], theData[2], theData[3], theData[4]); - return true; - break; -#if 0 - case DihContinueB::ZREAD_TAB_DESCRIPTION: - fprintf(output, " Read Table description: %d\n", theData[1]); - return true; - break; -#endif - case DihContinueB::ZCOPY_TABLE: - fprintf(output, " Copy Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZCOPY_TABLE_NODE: - fprintf(output, " Copy table node: TableId: %d NodeId: %d\n", - theData[1], theData[2]); - fprintf(output, "PageIndex: %d WordIndex: %d NoOfWords: %d\n", - theData[3], theData[4], theData[5]); - return true; - break; - case DihContinueB::ZSTART_FRAGMENT: - fprintf(output, " Start fragment: Table: %d Fragment: %d\n", - theData[1], theData[2]); - return true; - break; - case DihContinueB::ZCOMPLETE_RESTART: - fprintf(output, "Complete Restart\n"); - return true; - break; - case DihContinueB::ZREAD_TABLE_FROM_PAGES: - fprintf(output, " Read Table From Pages: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZSR_PHASE2_READ_TABLE: - fprintf(output, " Phase 2 Read Table: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZCHECK_TC_COUNTER: - fprintf(output, " Check Tc Counter from place %d\n", theData[1]); - return true; - break; - case DihContinueB::ZCALCULATE_KEEP_GCI: - fprintf(output, " Calc Keep GCI: Table: %d Fragment: %d\n", - theData[1], theData[2]); - return true; - break; - case DihContinueB::ZSTORE_NEW_LCP_ID: - fprintf(output, " Store New LCP Id\n"); - return true; - break; - case DihContinueB::ZTABLE_UPDATE: - fprintf(output, " Table Update: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZCHECK_LCP_COMPLETED: - fprintf(output, " Check LCP Completed: TableId %d\n", theData[1]); - return true; - break; - case DihContinueB::ZINIT_LCP: - fprintf(output, " Init LCP: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZADD_TABLE_MASTER_PAGES: - fprintf(output, " Add Table Master Pages: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZDIH_ADD_TABLE_MASTER: - fprintf(output, " Dih Add Table Master: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZADD_TABLE_SLAVE_PAGES: - fprintf(output, " Add Table Slave Pages: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZDIH_ADD_TABLE_SLAVE: - fprintf(output, " Add Table Slave: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZSTART_GCP: - fprintf(output, " Start GCP\n"); - return true; - break; - case DihContinueB::ZCOPY_GCI: - fprintf(output, " Copy GCI\n"); - return true; - break; - case DihContinueB::ZEMPTY_VERIFY_QUEUE: - fprintf(output, " Empty Verify Queue\n"); - return true; - break; - case DihContinueB::ZCHECK_GCP_STOP: - fprintf(output, " Check GCP Stop\n"); - if (len == 6){ - fprintf(output, "coldGcpStatus = %d\n", theData[1]); - fprintf(output, "cgcpStatus = %d\n", theData[2]); - fprintf(output, "coldGcpId = %d\n", theData[3]); - fprintf(output, "cnewgcp = %d\n", theData[4]); - fprintf(output, "cgcpSameCounter = %d\n", theData[5]); - } - return true; - break; - case DihContinueB::ZREMOVE_NODE_FROM_TABLE: - fprintf(output, " Remove Node From Table: Node: %d Table: %d\n", - theData[1], theData[2]); - return true; - break; - case DihContinueB::ZCOPY_NODE: - fprintf(output, " Copy Node: Table: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZSTART_TAKE_OVER: - fprintf(output, " Start Take Over: TakeOverPtr: %d, startNode: %d, toNode: %d\n", - theData[1], theData[2], theData[3]); - return true; - break; - case DihContinueB::ZCHECK_START_TAKE_OVER: - fprintf(output, " Check Start Take Over\n"); - return true; - break; - case DihContinueB::ZTO_START_COPY_FRAG: - fprintf(output, " To Start Copy Frag: TakeOverPtr: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZINVALIDATE_NODE_LCP: - fprintf(output, " Invalide LCP: NodeId: %d TableId %d\n", - theData[1], theData[2]); - return true; - break; - case DihContinueB::ZINITIALISE_RECORDS: - fprintf(output, " Initialise Records: tdata0: %d\n", theData[1]); - return true; - break; - case DihContinueB::ZSTART_PERMREQ_AGAIN: - fprintf(output, " START_PERMREQ again for node: %d\n", theData[1]); - return true; - break; - case DihContinueB::SwitchReplica: - fprintf(output, " NodeId = %d TableId = %d FragNo = %d\n", - theData[1], theData[2], theData[3]); - return true; - break; - case DihContinueB::ZSEND_START_TO: - fprintf(output, " Send Start Take Over: TakeOverPtr: %d, startNode: %d, toNode: %d\n", - theData[1], theData[2], theData[3]); - return true; - break; - case DihContinueB::ZSEND_UPDATE_TO: - fprintf(output, " Send Update Take Over: TakeOverPtr: %d, startNode: %d, toNode: %d\n", - theData[1], theData[2], theData[3]); - return true; - break; - case DihContinueB::ZSEND_END_TO: - fprintf(output, " Send End Take Over: TakeOverPtr: %d, startNode: %d, toNode: %d\n", - theData[1], theData[2], theData[3]); - return true; - break; - case DihContinueB::ZSEND_ADD_FRAG: - fprintf(output, " Send Add Fragment: TakeOverPtr: %d, startNode: %d, toNode: %d\n", - theData[1], theData[2], theData[3]); - return true; - break; - case DihContinueB::ZSEND_CREATE_FRAG: - fprintf(output, " Send Create Fragment: TakeOverPtr: %d, storedType: %d, start Gci: %d, startNode: %d, toNode: %d\n", - theData[1], theData[2], theData[3], theData[4], theData[5]); - return true; - break; - case DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE: - fprintf(output, " Wait drop tab writing to file TableId: %d\n", theData[1]); - return true; - case DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH: - fprintf(output, " Wait drop tab FailedNodeId: %d TableId: %d\n", - theData[1], theData[2]); - return true; - default: - fprintf(output, " Default system error lab...\n"); - break; - }//switch - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp b/storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp deleted file mode 100644 index 6dc5fb17b26..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DihSwitchReplicaReq.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -bool -printDIH_SWITCH_REPLICA_REQ(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - - DihSwitchReplicaReq * req = (DihSwitchReplicaReq *)&theData[0]; - - const Uint32 requestInfo = req->requestInfo; - - switch(DihSwitchReplicaReq::getRequestType(requestInfo)){ - case DihSwitchReplicaReq::RemoveNodeAsPrimary:{ - fprintf(output, " RemoveNodeAsPrimary: Node=%d", req->nodeId); - if(DihSwitchReplicaReq::getAllTables(requestInfo)) - fprintf(output, " All Tables"); - else - fprintf(output, " TableId=%d", req->tableId); - - if(DihSwitchReplicaReq::getDistribute(requestInfo)) - fprintf(output, " Distribute"); - fprintf(output, "\n"); - return true; - } - break; - default: - fprintf(output, " Unknown request type:\n"); - } - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp b/storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp deleted file mode 100644 index 5fe3d62c22a..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DisconnectRep.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printDISCONNECT_REP(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const DisconnectRep * const sig = (DisconnectRep *) theData; - - fprintf(output, " NodeId: %d, ErrorCode: %d\n", - sig->nodeId, sig->err); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DropIndx.cpp b/storage/ndb/src/common/debugger/signaldata/DropIndx.cpp deleted file mode 100644 index 3c2dd03aafc..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DropIndx.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printDROP_INDX_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const DropIndxReq * const sig = (DropIndxReq *) theData; - - return false; -} - -bool printDROP_INDX_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const DropIndxConf * const sig = (DropIndxConf *) theData; - - return false; -} - -bool printDROP_INDX_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ -// const DropIndxRef * const sig = (DropIndxRef *) theData; - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DropTab.cpp b/storage/ndb/src/common/debugger/signaldata/DropTab.cpp deleted file mode 100644 index 0666c6ae5c9..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DropTab.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printDROP_TAB_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const DropTabReq * const sig = (DropTabReq *) theData; - - fprintf(output, - " senderRef: %x senderData: %d TableId: %d requestType: %d\n", - sig->senderRef, sig->senderData, sig->tableId, sig->requestType); - return true; -} - -bool printDROP_TAB_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const DropTabConf * const sig = (DropTabConf *) theData; - - fprintf(output, - " senderRef: %x senderData: %d TableId: %d\n", - sig->senderRef, sig->senderData, sig->tableId); - - return true; -} - -bool printDROP_TAB_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const DropTabRef * const sig = (DropTabRef *) theData; - - fprintf(output, - " senderRef: %x senderData: %d TableId: %d errorCode: %d\n", - sig->senderRef, sig->senderData, sig->tableId, sig->errorCode); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/DropTrig.cpp b/storage/ndb/src/common/debugger/signaldata/DropTrig.cpp deleted file mode 100644 index 6127d7c8d7a..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/DropTrig.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool printDROP_TRIG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const DropTrigReq * const sig = (DropTrigReq *) theData; - - //char triggerName[MAX_TAB_NAME_SIZE]; - //char triggerType[32]; - //char triggerActionTime[32]; - //char triggerEvent[32]; - - //sig->getTriggerName((char *) &triggerName); - //switch(sig->getTriggerType()) { - //case(TriggerType::SECONDARY_INDEX): - //strcpy(triggerType, "SECONDARY_INDEX"); - //break; - //case(TriggerType::SUBSCRIPTION): - //strcpy(triggerType, "SUBSCRIPTION"); - //break; - //default: - //strcpy(triggerType, "UNSUPPORTED"); - //} - //strcpy(triggerActionTime, - //(sig->getTriggerActionTime() == TriggerActionTime::BEFORE)? - //"BEFORE":"AFTER"); - //switch(sig->getTriggerEvent()) { - //case (TriggerEvent::TE_INSERT): - //strcpy(triggerEvent, "INSERT"); - //break; - //case(TriggerEvent::TE_DELETE): - //strcpy(triggerEvent, "DELETE"); - //break; - //case(TriggerEvent::TE_UPDATE): - //strcpy(triggerEvent, "UPDATE"); - //break; - //} - - fprintf(output, "User: %u, ", sig->getUserRef()); - //fprintf(output, "Trigger name: \"%s\"\n", triggerName); - //fprintf(output, "Type: %s, ", triggerType); - //fprintf(output, "Action: %s, ", triggerActionTime); - //fprintf(output, "Event: %s, ", triggerEvent); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Table id: %u, ", sig->getTableId()); - fprintf(output, "\n"); - - return false; -} - -bool printDROP_TRIG_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const DropTrigConf * const sig = (DropTrigConf *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Table id: %u, ", sig->getTableId()); - fprintf(output, "\n"); - - return false; -} - -bool printDROP_TRIG_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const DropTrigRef * const sig = (DropTrigRef *) theData; - - fprintf(output, "User: %u, ", sig->getUserRef()); - fprintf(output, "Trigger id: %u, ", sig->getTriggerId()); - fprintf(output, "Table id: %u, ", sig->getTableId()); - fprintf(output, "Error code: %u, ", sig->getErrorCode()); - fprintf(output, "\n"); - - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FailRep.cpp b/storage/ndb/src/common/debugger/signaldata/FailRep.cpp deleted file mode 100644 index f5f053d8ddb..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FailRep.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFAIL_REP(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const FailRep * const sig = (FailRep *) theData; - - fprintf(output, " FailedNode: %d, FailCause: %d\n", - sig->failNodeId, sig->failCause); - - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp b/storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp deleted file mode 100644 index 8584003a601..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FireTrigOrd.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -static -const char * -trigEvent(Uint32 i){ - switch(i){ - case TriggerEvent::TE_INSERT: - return "insert"; - break; - case TriggerEvent::TE_UPDATE: - return "update"; - break; - case TriggerEvent::TE_DELETE: - return "delete"; - break; - } - return "UNKNOWN"; -} - -bool -printFIRE_TRIG_ORD(FILE * output, const Uint32 * theData, Uint32 len, - Uint16 receiverBlockNo) -{ - const FireTrigOrd * const sig = (FireTrigOrd *) theData; - - fprintf(output, " TriggerId: %d TriggerEvent: %s\n", - sig->getTriggerId(), - trigEvent(sig->getTriggerEvent())); - fprintf(output, " UserRef: (%d, %d) User data: %x\n", - refToNode(sig->getUserRef()), - refToBlock(sig->getUserRef()), - sig->getConnectionPtr()); - fprintf(output, " Signal: PK=%d BEFORE=%d AFTER=%d\n", - sig->getNoOfPrimaryKeyWords(), - sig->getNoOfBeforeValueWords(), - sig->getNoOfAfterValueWords()); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp deleted file mode 100644 index ea00573afbf..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FsAppendReq.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFSAPPENDREQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - bool ret = true; - - const FsAppendReq * const sig = (FsAppendReq *) theData; - - fprintf(output, " FilePointer: %d\n", sig->filePointer); - fprintf(output, " UserReference: H\'%.8x, UserPointer: H\'%.8x\n", - sig->userReference, sig->userPointer); - - fprintf(output, " varIndex: %d offset: %d size: %d\n", - sig->varIndex, - sig->offset, - sig->size); - return ret; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp deleted file mode 100644 index 44ae70096fa..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FsCloseReq.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFSCLOSEREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const FsCloseReq * const sig = (FsCloseReq *) theData; - - fprintf(output, " UserPointer: %d\n", - sig->userPointer); - fprintf(output, " FilePointer: %d\n", - sig->filePointer); - fprintf(output, " UserReference: H\'%.8x\n", - sig->userReference); - - fprintf(output, " Flags: H\'%.8x, ", sig->fileFlag); - if (sig->getRemoveFileFlag(sig->fileFlag)) - fprintf(output, "Remove file"); - else - fprintf(output, "Don't remove file"); - fprintf(output, "\n"); - - return len == 4; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FsConf.cpp b/storage/ndb/src/common/debugger/signaldata/FsConf.cpp deleted file mode 100644 index 338920dffb6..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FsConf.cpp +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFSCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const FsConf * const sig = (FsConf *) theData; - - fprintf(output, " UserPointer: %d\n", sig->userPointer); - - if (len > 1){ - // Only valid if this is a FSOPENCONF - fprintf(output, " FilePointer: %d\n", sig->filePointer); - } - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp deleted file mode 100644 index 4c270fee211..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FsOpenReq.cpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFSOPENREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const FsOpenReq * const sig = (FsOpenReq *) theData; - - - fprintf(output, " UserReference: H\'%.8x, userPointer: H\'%.8x\n", - sig->userReference, sig->userPointer); - fprintf(output, " FileNumber[1-4]: H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n", - sig->fileNumber[0], sig->fileNumber[1], sig->fileNumber[2], sig->fileNumber[3]); - fprintf(output, " FileFlags: H\'%.8x ", - sig->fileFlags); - - // File open mode must be one of ReadOnly, WriteOnly or ReadWrite - const Uint32 flags = sig->fileFlags; - switch(flags & 3){ - case FsOpenReq::OM_READONLY: - fprintf(output, "Open read only"); - break; - case FsOpenReq::OM_WRITEONLY: - fprintf(output, "Open write only"); - break; - case FsOpenReq::OM_READWRITE: - fprintf(output, "Open read and write"); - break; - default: - fprintf(output, "Open mode unknown!"); - } - - if (flags & FsOpenReq::OM_CREATE) - fprintf(output, ", Create new file"); - if (flags & FsOpenReq::OM_TRUNCATE) - fprintf(output, ", Truncate existing file"); - if (flags & FsOpenReq::OM_APPEND) - fprintf(output, ", Append"); - - fprintf(output, "\n"); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp b/storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp deleted file mode 100644 index b146b150baf..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FsReadWriteReq.cpp +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFSREADWRITEREQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - bool ret = true; - - const FsReadWriteReq * const sig = (FsReadWriteReq *) theData; - - fprintf(output, " UserPointer: %d\n", sig->userPointer); - fprintf(output, " FilePointer: %d\n", sig->filePointer); - fprintf(output, " UserReference: H\'%.8x", sig->userReference); - - fprintf(output, " Operation flag: H\'%.8x (", sig->operationFlag); - if (sig->getSyncFlag(sig->operationFlag)) - fprintf(output, "Sync,"); - else - fprintf(output, "No sync,"); - - fprintf(output, " Format="); - switch(sig->getFormatFlag(sig->operationFlag)){ - case FsReadWriteReq::fsFormatListOfPairs: - fprintf(output, "List of pairs)\n"); - break; - case FsReadWriteReq::fsFormatArrayOfPages: - fprintf(output, "Array of pages)\n"); - break; - case FsReadWriteReq::fsFormatListOfMemPages: - fprintf(output, "List of mem pages)\n"); - break; - case FsReadWriteReq::fsFormatGlobalPage: - fprintf(output, "List of global pages)\n"); - case FsReadWriteReq::fsFormatSharedPage: - fprintf(output, "List of shared pages)\n"); - break; - default: - fprintf(output, "fsFormatMax not handled\n"); - ret = false; - break; - } - - fprintf(output, " varIndex: %d\n", - sig->varIndex); - fprintf(output, " numberOfPages: %d\n", - sig->numberOfPages); - fprintf(output, " pageData: "); - - unsigned int i; - switch(sig->getFormatFlag(sig->operationFlag)){ - case FsReadWriteReq::fsFormatListOfPairs: - for (i= 0; i < sig->numberOfPages*2; i += 2){ - fprintf(output, " H\'%.8x, H\'%.8x\n", sig->data.pageData[i], - sig->data.pageData[i + 1]); - } - break; - case FsReadWriteReq::fsFormatArrayOfPages: - fprintf(output, " H\'%.8x, H\'%.8x\n", sig->data.pageData[0], - sig->data.pageData[1]); - break; - case FsReadWriteReq::fsFormatListOfMemPages: - for (i= 0; i < (sig->numberOfPages + 1); i++){ - fprintf(output, " H\'%.8x, ", sig->data.pageData[i]); - } - break; - case FsReadWriteReq::fsFormatGlobalPage: - for (i= 0; i < sig->numberOfPages; i++){ - fprintf(output, " H\'%.8x, ", sig->data.pageData[i]); - } - break; - default: - fprintf(output, "Impossible event\n"); - } - - fprintf(output, "\n"); - return ret; -} diff --git a/storage/ndb/src/common/debugger/signaldata/FsRef.cpp b/storage/ndb/src/common/debugger/signaldata/FsRef.cpp deleted file mode 100644 index de48b36855e..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/FsRef.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printFSREF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - bool ret = true; - - const FsRef * const sig = (FsRef *) theData; - - fprintf(output, " UserPointer: %d\n", - sig->userPointer); - - fprintf(output, " ErrorCode: %d, ", sig->errorCode); - ndbd_exit_classification cl; - switch (sig->getErrorCode(sig->errorCode)){ - case FsRef::fsErrNone: - fprintf(output, "No error"); - break; - default: - fprintf(output, ndbd_exit_message(sig->getErrorCode(sig->errorCode), &cl)); - break; - } - fprintf(output, "\n"); - fprintf(output, " OS ErrorCode: %d \n", sig->osErrorCode); - - return ret; -} diff --git a/storage/ndb/src/common/debugger/signaldata/GCPSave.cpp b/storage/ndb/src/common/debugger/signaldata/GCPSave.cpp deleted file mode 100644 index c0a13acf783..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/GCPSave.cpp +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -bool -printGCPSaveReq(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo){ - - GCPSaveReq * sr = (GCPSaveReq*)theData; - - fprintf(output, " dihBlockRef = (%d, %d) dihPtr = %d gci = %d\n", - refToBlock(sr->dihBlockRef), refToNode(sr->dihBlockRef), - sr->dihPtr, sr->gci); - - return true; -} - -bool -printGCPSaveRef(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo){ - - GCPSaveRef * sr = (GCPSaveRef*)theData; - - fprintf(output, " nodeId = %d dihPtr = %d gci = %d reason: ", - sr->nodeId, - sr->dihPtr, sr->gci); - - switch(sr->errorCode){ - case GCPSaveRef::NodeShutdownInProgress: - fprintf(output, "NodeShutdownInProgress\n"); - break; - case GCPSaveRef::FakedSignalDueToNodeFailure: - fprintf(output, "FakedSignalDueToNodeFailure\n"); - break; - default: - fprintf(output, "Unknown reason: %d\n", sr->errorCode); - return false; - } - - return true; -} - -bool -printGCPSaveConf(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo){ - - GCPSaveConf * sr = (GCPSaveConf*)theData; - - fprintf(output, " nodeId = %d dihPtr = %d gci = %d\n", - sr->nodeId, - sr->dihPtr, sr->gci); - - return true; -} - - diff --git a/storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp b/storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp deleted file mode 100755 index 39197fae04a..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/IndxAttrInfo.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printINDXATTRINFO(FILE * output, const Uint32 * theData, Uint32 len, - Uint16 receiverBlockNo) -{ -// const IndxAttrInfo * const sig = (IndxAttrInfo *) theData; - - Uint32 i = 0; - while (i < len) - fprintf(output, " H\'%.8x", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp b/storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp deleted file mode 100755 index 24abc4e9a1d..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/IndxKeyInfo.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printINDXKEYINFO(FILE * output, const Uint32 * theData, Uint32 len, - Uint16 receiverBlockNo) -{ -// const IndxKeyInfo * const sig = (IndxKeyInfo *) theData; - - Uint32 i = 0; - while (i < len) - fprintf(output, " H\'%.8x", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/LCP.cpp b/storage/ndb/src/common/debugger/signaldata/LCP.cpp deleted file mode 100644 index 7c39322cdf0..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/LCP.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include - -bool -printSTART_LCP_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - const StartLcpReq * const sig = (StartLcpReq *) theData; - - char buf1[8*_NDB_NODE_BITMASK_SIZE+1]; - char buf2[8*_NDB_NODE_BITMASK_SIZE+1]; - fprintf(output, - " Sender: %d LcpId: %d\n" - " ParticipatingDIH = %s\n" - " ParticipatingLQH = %s\n", - refToNode(sig->senderRef), sig->lcpId, - sig->participatingDIH.getText(buf1), - sig->participatingLQH.getText(buf2)); - - return true; -} - -bool -printSTART_LCP_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - const StartLcpConf * const sig = (StartLcpConf *) theData; - - fprintf(output, " Sender: %d LcpId: %d\n", - refToNode(sig->senderRef), sig->lcpId); - - return true; -} - -bool -printLCP_FRAG_ORD(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - const LcpFragOrd * const sig = (LcpFragOrd *) theData; - - fprintf(output, " LcpId: %d LcpNo: %d Table: %d Fragment: %d\n", - sig->lcpId, sig->lcpNo, sig->tableId, sig->fragmentId); - - fprintf(output, " KeepGCI: %d LastFragmentFlag: %d\n", - sig->keepGci, sig->lastFragmentFlag); - return true; -} - -bool -printLCP_FRAG_REP(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - const LcpFragRep * const sig = (LcpFragRep *) theData; - - fprintf(output, " LcpId: %d LcpNo: %d NodeId: %d Table: %d Fragment: %d\n", - sig->lcpId, sig->lcpNo, sig->nodeId, sig->tableId, sig->fragId); - fprintf(output, " Max GCI Started: %d Max GCI Completed: %d\n", - sig->maxGciStarted, sig->maxGciCompleted); - return true; -} - -bool -printLCP_COMPLETE_REP(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo){ - - const LcpCompleteRep * const sig = (LcpCompleteRep *) theData; - - fprintf(output, " LcpId: %d NodeId: %d Block: %s\n", - sig->lcpId, sig->nodeId, getBlockName(sig->blockNo)); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp deleted file mode 100644 index 94d83a58d61..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/LqhFrag.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -bool -printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB){ - LqhFragReq* sig = (LqhFragReq*)theData; - - fprintf(output, " senderData: %d senderRef: %x", - sig->senderData, sig->senderRef); - fprintf(output, " tableId: %d fragmentId: %d tableType: %d", - sig->tableId, sig->fragmentId, sig->tableType); - if (sig->primaryTableId == RNIL) - fprintf(output, " primaryTableId: RNIL\n"); - else - fprintf(output, " primaryTableId: %d\n", sig->primaryTableId); - fprintf(output, " localKeyLength: %d maxLoadFactor: %d minLoadFactor: %d\n", - sig->localKeyLength, sig->maxLoadFactor, sig->minLoadFactor); - fprintf(output, " kValue: %d lh3DistrBits: %d lh3PageBits: %d\n", - sig->kValue, sig->lh3DistrBits, sig->lh3PageBits); - - fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n", - sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength); - - fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n", - sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh); - fprintf(output, " schemaVersion: %d nextLCP: %d\n", - sig->schemaVersion, sig->nextLCP); - - return true; -} -bool -printLQH_FRAG_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 rec){ - LqhFragConf* sig = (LqhFragConf*)theData; - - fprintf(output, " senderData: %d lqhFragPtr: %d\n", - sig->senderData, sig->lqhFragPtr); - return true; -} - -bool -printLQH_FRAG_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 rec){ - LqhFragRef* sig = (LqhFragRef*)theData; - - fprintf(output, " senderData: %d errorCode: %d\n", - sig->senderData, sig->errorCode); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp b/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp deleted file mode 100644 index 9db9d47d6ac..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/LqhKey.cpp +++ /dev/null @@ -1,183 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printLQHKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const LqhKeyReq * const sig = (LqhKeyReq *) theData; - - fprintf(output, - " ClientPtr = H\'%.8x hashValue = H\'%.8x tcBlockRef = H\'%.8x\n" - " transId1 = H\'%.8x transId2 = H\'%.8x savePointId = H\'%.8x\n", - sig->clientConnectPtr, // DATA 0 - sig->hashValue, // DATA 2 - sig->tcBlockref, // DATA 4 - sig->transId1, // DATA 7 - sig->transId2, // DATA 8 - sig->savePointId // DATA 9 - ); - - const Uint32 reqInfo = sig->requestInfo; - const Uint32 attrLen = sig->attrLen; - - fprintf(output, - " Op: %d Lock: %d Flags: ", - LqhKeyReq::getOperation(reqInfo), - LqhKeyReq::getLockType(reqInfo)); - if(LqhKeyReq::getSimpleFlag(reqInfo)) - fprintf(output, "Simple "); - if(LqhKeyReq::getDirtyFlag(reqInfo)) - fprintf(output, "Dirty "); - if(LqhKeyReq::getInterpretedFlag(reqInfo)) - fprintf(output, "Interpreted "); - if(LqhKeyReq::getScanTakeOverFlag(attrLen)) - fprintf(output, "ScanTakeOver "); - if(LqhKeyReq::getMarkerFlag(reqInfo)) - fprintf(output, "CommitAckMarker "); - if(LqhKeyReq::getNoDiskFlag(reqInfo)) - fprintf(output, "NoDisk "); - if(LqhKeyReq::getRowidFlag(reqInfo)) - fprintf(output, "Rowid "); - if(LqhKeyReq::getNrCopyFlag(reqInfo)) - fprintf(output, "NrCopy "); - if(LqhKeyReq::getGCIFlag(reqInfo)) - fprintf(output, "GCI "); - - fprintf(output, "ScanInfo/noFiredTriggers: H\'%x\n", sig->scanInfo); - - fprintf(output, - " AttrLen: %d (%d in this) KeyLen: %d TableId: %d SchemaVer: %d\n", - LqhKeyReq::getAttrLen(attrLen), - LqhKeyReq::getAIInLqhKeyReq(reqInfo), - LqhKeyReq::getKeyLen(reqInfo), - LqhKeyReq::getTableId(sig->tableSchemaVersion), - LqhKeyReq::getSchemaVersion(sig->tableSchemaVersion)); - - fprintf(output, - " FragId: %d ReplicaNo: %d LastReplica: %d NextNodeId: %d\n", - LqhKeyReq::getFragmentId(sig->fragmentData), - LqhKeyReq::getSeqNoReplica(reqInfo), - LqhKeyReq::getLastReplicaNo(reqInfo), - LqhKeyReq::getNextReplicaNodeId(sig->fragmentData)); - - bool printed = false; - Uint32 nextPos = LqhKeyReq::getApplicationAddressFlag(reqInfo) << 1; - if(nextPos != 0){ - fprintf(output, - " ApiRef: H\'%.8x ApiOpRef: H\'%.8x", - sig->variableData[0], - sig->variableData[1]); - printed = true; - } - - if(LqhKeyReq::getSameClientAndTcFlag(reqInfo)){ - fprintf(output, " TcOpRec: H\'%.8x", sig->variableData[nextPos]); - nextPos++; - printed = true; - } - - Uint32 tmp = LqhKeyReq::getLastReplicaNo(reqInfo) - - LqhKeyReq::getSeqNoReplica(reqInfo); - if(tmp > 1){ - NodeId node2 = sig->variableData[nextPos] & 0xffff; - NodeId node3 = sig->variableData[nextPos] >> 16; - fprintf(output, " NextNodeId2: %d NextNodeId3: %d", - node2, node3); - nextPos ++; - printed = true; - } - if(printed) - fprintf(output, "\n"); - - printed = false; - if(LqhKeyReq::getStoredProcFlag(attrLen)){ - fprintf(output, " StoredProcId: %d", sig->variableData[nextPos]); - nextPos++; - printed = true; - } - - if(LqhKeyReq::getReturnedReadLenAIFlag(reqInfo)){ - fprintf(output, " ReturnedReadLenAI: %d", - sig->variableData[nextPos]); - nextPos++; - printed = true; - } - - const UintR keyLen = LqhKeyReq::getKeyLen(reqInfo); - if(keyLen > 0){ - fprintf(output, " KeyInfo: "); - for(UintR i = 0; ivariableData[nextPos]); - fprintf(output, "\n"); - } - - if (LqhKeyReq::getRowidFlag(reqInfo)) - { - fprintf(output, " Rowid: [ page: %d idx: %d ]\n", - sig->variableData[nextPos + 0], - sig->variableData[nextPos + 1]); - nextPos += 2; - } - - if (LqhKeyReq::getGCIFlag(reqInfo)) - { - fprintf(output, " GCI: %u", sig->variableData[nextPos + 0]); - nextPos++; - } - - if(!LqhKeyReq::getInterpretedFlag(reqInfo)){ - fprintf(output, " AttrInfo: "); - for(int i = 0; ivariableData[nextPos]); - fprintf(output, "\n"); - } else { - fprintf(output, " InitialReadSize: %d InterpretedSize: %d " - "FinalUpdateSize: %d FinalReadSize: %d SubroutineSize: %d\n", - sig->variableData[nextPos+0], sig->variableData[nextPos+1], - sig->variableData[nextPos+2], sig->variableData[nextPos+3], - sig->variableData[nextPos+4]); - nextPos += 5; - } - return true; -} - -bool -printLQHKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ -// const LqhKeyConf * const sig = (LqhKeyConf *) theData; - - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} - -bool -printLQHKEYREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ -// const LqhKeyRef * const sig = (LqhKeyRef *) theData; - - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp b/storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp deleted file mode 100644 index 5f72b6a056d..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/LqhTrans.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printLQH_TRANSCONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const LqhTransConf * const sig = (LqhTransConf *)theData; - fprintf(output, " tcRef: %x\n", sig->tcRef); - fprintf(output, " lqhNodeId: %x\n", sig->lqhNodeId); - fprintf(output, " operationStatus: %x\n", sig->operationStatus); - fprintf(output, " transId1: %x\n", sig->transId1); - fprintf(output, " transId2: %x\n", sig->transId2); - fprintf(output, " apiRef: %x\n", sig->apiRef); - fprintf(output, " apiOpRec: %x\n", sig->apiOpRec); - fprintf(output, " lqhConnectPtr: %x\n", sig->lqhConnectPtr); - fprintf(output, " oldTcOpRec: %x\n", sig->oldTcOpRec); - fprintf(output, " requestInfo: %x\n", sig->requestInfo); - fprintf(output, " gci: %x\n", sig->gci); - fprintf(output, " nextNodeId1: %x\n", sig->nextNodeId1); - fprintf(output, " nextNodeId2: %x\n", sig->nextNodeId2); - fprintf(output, " nextNodeId3: %x\n", sig->nextNodeId3); - fprintf(output, " tableId: %x\n", sig->tableId); - return true; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/Makefile.am b/storage/ndb/src/common/debugger/signaldata/Makefile.am deleted file mode 100644 index b6d7dd6736d..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/Makefile.am +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -noinst_LTLIBRARIES = libsignaldataprint.la - -libsignaldataprint_la_SOURCES = \ - TcKeyReq.cpp TcKeyConf.cpp TcKeyRef.cpp \ - TcRollbackRep.cpp \ - TupKey.cpp TupCommit.cpp LqhKey.cpp \ - FsOpenReq.cpp FsCloseReq.cpp FsRef.cpp FsConf.cpp FsReadWriteReq.cpp\ - SignalDataPrint.cpp SignalNames.cpp \ - ContinueB.cpp DihContinueB.cpp NdbfsContinueB.cpp \ - CloseComReqConf.cpp PackedSignal.cpp PrepFailReqRef.cpp \ - GCPSave.cpp DictTabInfo.cpp \ - AlterTable.cpp AlterTab.cpp \ - CreateTrig.cpp AlterTrig.cpp DropTrig.cpp \ - FireTrigOrd.cpp TrigAttrInfo.cpp \ - CreateIndx.cpp AlterIndx.cpp DropIndx.cpp TcIndx.cpp \ - IndxKeyInfo.cpp IndxAttrInfo.cpp \ - FsAppendReq.cpp ScanTab.cpp \ - BackupImpl.cpp BackupSignalData.cpp \ - UtilSequence.cpp UtilPrepare.cpp UtilDelete.cpp UtilExecute.cpp \ - LqhFrag.cpp DropTab.cpp PrepDropTab.cpp LCP.cpp MasterLCP.cpp \ - CopyGCI.cpp SystemError.cpp StartRec.cpp NFCompleteRep.cpp \ - FailRep.cpp DisconnectRep.cpp SignalDroppedRep.cpp \ - SumaImpl.cpp NdbSttor.cpp CreateFragmentation.cpp \ - UtilLock.cpp TuxMaint.cpp AccLock.cpp \ - LqhTrans.cpp ReadNodesConf.cpp CntrStart.cpp \ - ScanFrag.cpp - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am - -windoze-dsp: libsignaldataprint.dsp - -libsignaldataprint.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libsignaldataprint_la_SOURCES) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp b/storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp deleted file mode 100644 index 77f348a63e0..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/MasterLCP.cpp +++ /dev/null @@ -1,87 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -static -void -print(char *buf, size_t buf_len, MasterLCPConf::State s){ - switch(s){ - case MasterLCPConf::LCP_STATUS_IDLE: - BaseString::snprintf(buf, buf_len, "LCP_STATUS_IDLE"); - break; - case MasterLCPConf::LCP_STATUS_ACTIVE: - BaseString::snprintf(buf, buf_len, "LCP_STATUS_ACTIVE"); - break; - case MasterLCPConf::LCP_TAB_COMPLETED: - BaseString::snprintf(buf, buf_len, "LCP_TAB_COMPLETED"); - break; - case MasterLCPConf::LCP_TAB_SAVED: - BaseString::snprintf(buf, buf_len, "LCP_TAB_SAVED"); - break; - } -} - -NdbOut & -operator<<(NdbOut& out, const MasterLCPConf::State& s){ - static char buf[255]; - print(buf, sizeof(buf), s); - out << buf; - return out; -} - -bool -printMASTER_LCP_CONF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - - MasterLCPConf * sig = (MasterLCPConf *)&theData[0]; - - static char buf[255]; - print(buf, sizeof(buf), (MasterLCPConf::State)sig->lcpState); - fprintf(output, " senderNode=%d failedNode=%d SenderState=%s\n", - sig->senderNodeId, sig->failedNodeId, buf); - return true; -} - -bool -printMASTER_LCP_REQ(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - - MasterLCPReq * sig = (MasterLCPReq *)&theData[0]; - - fprintf(output, " masterRef=(node=%d, block=%d), failedNode=%d\n", - refToNode(sig->masterRef), refToBlock(sig->masterRef), - sig->failedNodeId); - return true; -} - -bool -printMASTER_LCP_REF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - - MasterLCPRef * sig = (MasterLCPRef *)&theData[0]; - fprintf(output, " senderNode=%d failedNode=%d\n", - sig->senderNodeId, sig->failedNodeId); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp b/storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp deleted file mode 100644 index 74735546320..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/NFCompleteRep.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -bool -printNF_COMPLETE_REP(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - - NFCompleteRep * sig = (NFCompleteRep*)theData; - const char * who = getBlockName(sig->blockNo, 0); - - if(who == 0){ - fprintf(output, - " Node: %d has completed failure of node %d\n", - sig->nodeId, sig->failedNodeId); - } else { - fprintf(output, - " Node: %d block: %s has completed failure of node %d\n", - sig->nodeId, who, sig->failedNodeId); - } - - fprintf(output, "Sent from line: %d\n", - sig->from); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp b/storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp deleted file mode 100644 index 480ee078d2e..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/NdbSttor.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printNDB_STTOR(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const NdbSttor * const sig = (NdbSttor *)theData; - fprintf(output, " senderRef: %x\n", sig->senderRef); - fprintf(output, " nodeId: %x\n", sig->nodeId); - fprintf(output, " internalStartPhase: %x\n", sig->internalStartPhase); - fprintf(output, " typeOfStart: %x\n", sig->typeOfStart); - fprintf(output, " masterNodeId: %x\n", sig->masterNodeId); - - int left = len - NdbSttor::SignalLength; - if(left > 0){ - fprintf(output, " config: "); - for(int i = 0; iconfig[i]); - if(((i + 1) % 7) == 0 && (i+1) < left){ - fprintf(output, "\n config: "); - } - } - fprintf(output, "\n"); - } - return true; -} - -bool -printNDB_STTORRY(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const NdbSttorry * const sig = (NdbSttorry *)theData; - fprintf(output, " senderRef: %x\n", sig->senderRef); - return true; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp b/storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp deleted file mode 100644 index 190e755c731..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -bool -printCONTINUEB_NDBFS(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 not_used){ - - (void)not_used; - - switch (theData[0]) { - case NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY: - fprintf(output, " Scanning the memory channel every 10ms\n"); - return true; - break; - case NdbfsContinueB::ZSCAN_MEMORYCHANNEL_NO_DELAY: - fprintf(output, " Scanning the memory channel again with no delay\n"); - return true; - break; - default: - fprintf(output, " Default system error lab...\n"); - return false; - break; - }//switch - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp b/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp deleted file mode 100644 index 62de2a04f08..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/PackedSignal.cpp +++ /dev/null @@ -1,106 +0,0 @@ -/* Copyright (c) 2003, 2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -bool -printPACKED_SIGNAL(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - fprintf(output, "--------- Begin Packed Signals --------\n"); - // Print each signal separately - for (i = 0; i < len;) { - switch (PackedSignal::getSignalType(theData[i])) { - case ZCOMMIT: { - Uint32 signalLength = 4; - fprintf(output, "--------------- Signal ----------------\n"); - fprintf(output, "r.bn: %u \"%s\", length: %u \"COMMIT\"\n", - receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength); - fprintf(output, "Signal data: "); - for(Uint32 j = 0; j < signalLength; j++) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - break; - } - case ZCOMPLETE: { - Uint32 signalLength = 3; - fprintf(output, "--------------- Signal ----------------\n"); - fprintf(output, "r.bn: %u \"%s\", length: %u \"COMPLETE\"\n", - receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength); - fprintf(output, "Signal data: "); - for(Uint32 j = 0; j < signalLength; j++) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - break; - } - case ZCOMMITTED: { - Uint32 signalLength = 3; - fprintf(output, "--------------- Signal ----------------\n"); - fprintf(output, "r.bn: %u \"%s\", length: %u \"COMMITTED\"\n", - receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength); - fprintf(output, "Signal data: "); - for(Uint32 j = 0; j < signalLength; j++) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - break; - } - case ZCOMPLETED: { - Uint32 signalLength = 3; - fprintf(output, "--------------- Signal ----------------\n"); - fprintf(output, "r.bn: %u \"%s\", length: %u \"COMPLETED\"\n", - receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength); - fprintf(output, "Signal data: "); - for(Uint32 j = 0; j < signalLength; j++) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - break; - } - case ZLQHKEYCONF: { - Uint32 signalLength = LqhKeyConf::SignalLength; - - fprintf(output, "--------------- Signal ----------------\n"); - fprintf(output, "r.bn: %u \"%s\", length: %u \"LQHKEYCONF\"\n", - receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength); - printLQHKEYCONF(output, theData + i, signalLength, receiverBlockNo); - i += signalLength; - break; - } - case ZREMOVE_MARKER: { - Uint32 signalLength = 2; - fprintf(output, "--------------- Signal ----------------\n"); - fprintf(output, "r.bn: %u \"%s\", length: %u \"REMOVE_MARKER\"\n", - receiverBlockNo, getBlockName(receiverBlockNo,""), signalLength); - fprintf(output, "Signal data: "); - i++; // Skip first word! - for(Uint32 j = 0; j < signalLength; j++) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - break; - } - default: - fprintf(output, "Unknown signal type\n"); - i = len; // terminate printing - break; - } - }//for - fprintf(output, "--------- End Packed Signals ----------\n"); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp b/storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp deleted file mode 100644 index df2f3323795..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/PrepDropTab.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printPREP_DROP_TAB_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const PrepDropTabReq * const sig = (PrepDropTabReq *) theData; - - fprintf(output, - " senderRef: %x senderData: %d TableId: %d\n", - sig->senderRef, sig->senderData, sig->tableId); - return true; -} - -bool printPREP_DROP_TAB_CONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const PrepDropTabConf * const sig = (PrepDropTabConf *) theData; - - fprintf(output, - " senderRef: %x senderData: %d TableId: %d\n", - sig->senderRef, sig->senderData, sig->tableId); - - return true; -} - -bool printPREP_DROP_TAB_REF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo) -{ - const PrepDropTabRef * const sig = (PrepDropTabRef *) theData; - - fprintf(output, - " senderRef: %x senderData: %d TableId: %d errorCode: %d\n", - sig->senderRef, sig->senderData, sig->tableId, sig->errorCode); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp b/storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp deleted file mode 100644 index cf4afae6bc2..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/PrepFailReqRef.cpp +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include - -bool -printPREPFAILREQREF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 receiverBlockNo){ - - PrepFailReqRef * cc = (PrepFailReqRef*)theData; - - fprintf(output, " xxxBlockRef = (%d, %d) failNo = %d noOfNodes = %d\n", - refToBlock(cc->xxxBlockRef), refToNode(cc->xxxBlockRef), - cc->failNo, cc->noOfNodes); - - int hits = 0; - fprintf(output, " Nodes: "); - for(int i = 0; itheNodes, i)){ - hits++; - fprintf(output, " %d", i); - } - if(hits == 16){ - fprintf(output, "\n Nodes: "); - hits = 0; - } - } - if(hits != 0) - fprintf(output, "\n"); - - return true; -} - - diff --git a/storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp b/storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp deleted file mode 100644 index 6457e2795c9..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/ReadNodesConf.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include - -bool -printREAD_NODES_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const ReadNodesConf * const sig = (ReadNodesConf *)theData; - fprintf(output, " noOfNodes: %x\n", sig->noOfNodes); - fprintf(output, " ndynamicId: %x\n", sig->ndynamicId); - fprintf(output, " masterNodeId: %x\n", sig->masterNodeId); - - char buf[32*NdbNodeBitmask::Size+1]; - fprintf(output, " allNodes(defined): %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->allNodes, buf)); - fprintf(output, " inactiveNodes: %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->inactiveNodes, buf)); - fprintf(output, " clusterNodes: %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->clusterNodes, buf)); - fprintf(output, " startedNodes: %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->startedNodes, buf)); - fprintf(output, " startingNodes: %s\n", - BitmaskImpl::getText(NdbNodeBitmask::Size, sig->startingNodes, buf)); - return true; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp b/storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp deleted file mode 100644 index f4a63dbdf0c..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/ScanFrag.cpp +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include -#include -#include - -bool -printSCAN_FRAGREQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const ScanFragReq * const sig = (ScanFragReq *)theData; - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " resultRef: %x\n", sig->resultRef); - fprintf(output, " savePointId: %x\n", sig->savePointId); - fprintf(output, " requestInfo: %x\n", sig->requestInfo); - fprintf(output, " tableId: %x\n", sig->tableId); - fprintf(output, " fragmentNo: %x\n", sig->fragmentNoKeyLen & 0xFFFF); - fprintf(output, " keyLen: %x\n", sig->fragmentNoKeyLen >> 16); - fprintf(output, " schemaVersion: %x\n", sig->schemaVersion); - fprintf(output, " transId1: %x\n", sig->transId1); - fprintf(output, " transId2: %x\n", sig->transId2); - fprintf(output, " clientOpPtr: %x\n", sig->clientOpPtr); - fprintf(output, " batch_size_rows: %x\n", sig->batch_size_rows); - fprintf(output, " batch_size_bytes: %x\n", sig->batch_size_bytes); - return true; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp b/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp deleted file mode 100644 index ca531c2863c..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright (c) 2003-2005, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include -#include -#include - -bool -printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const ScanTabReq * const sig = (ScanTabReq *) theData; - - const UintR requestInfo = sig->requestInfo; - - fprintf(output, " apiConnectPtr: H\'%.8x", - sig->apiConnectPtr); - fprintf(output, " requestInfo: H\'%.8x:\n", requestInfo); - fprintf(output, " Parallellism: %u Batch: %u LockMode: %u Keyinfo: %u Holdlock: %u RangeScan: %u Descending: %u TupScan: %u\n ReadCommitted: %u DistributionKeyFlag: %u NoDisk: %u", - sig->getParallelism(requestInfo), - sig->getScanBatch(requestInfo), - sig->getLockMode(requestInfo), - sig->getKeyinfoFlag(requestInfo), - sig->getHoldLockFlag(requestInfo), - sig->getRangeScanFlag(requestInfo), - sig->getDescendingFlag(requestInfo), - sig->getTupScanFlag(requestInfo), - sig->getReadCommittedFlag(requestInfo), - sig->getDistributionKeyFlag(requestInfo), - sig->getNoDiskFlag(requestInfo)); - - if(sig->getDistributionKeyFlag(requestInfo)) - fprintf(output, " DKey: %x", sig->distributionKey); - - Uint32 keyLen = (sig->attrLenKeyLen >> 16); - Uint32 attrLen = (sig->attrLenKeyLen & 0xFFFF); - fprintf(output, " attrLen: %d, keyLen: %d tableId: %d, tableSchemaVer: %d\n", - attrLen, keyLen, sig->tableId, sig->tableSchemaVersion); - - fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x) storedProcId: H\'%.8x\n", - sig->transId1, sig->transId2, sig->storedProcId); - fprintf(output, " batch_byte_size: %d, first_batch_size: %d\n", - sig->batch_byte_size, sig->first_batch_size); - return false; -} - -bool -printSCANTABCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const ScanTabConf * const sig = (ScanTabConf *) theData; - - const UintR requestInfo = sig->requestInfo; - - fprintf(output, " apiConnectPtr: H\'%.8x\n", - sig->apiConnectPtr); - fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n", - sig->transId1, sig->transId2); - - fprintf(output, " requestInfo: Eod: %d OpCount: %d\n", - (requestInfo & ScanTabConf::EndOfData) == ScanTabConf::EndOfData, - (requestInfo & (~ScanTabConf::EndOfData))); - size_t op_count= requestInfo & (~ScanTabConf::EndOfData); - if(op_count){ - fprintf(output, " Operation(s) [api tc rows len]:\n"); - ScanTabConf::OpData * op = (ScanTabConf::OpData*) - (theData + ScanTabConf::SignalLength); - for(size_t i = 0; iinfo != ScanTabConf::EndOfData) - fprintf(output, " [0x%x 0x%x %d %d]", - op->apiPtrI, op->tcPtrI, - ScanTabConf::getRows(op->info), - ScanTabConf::getLength(op->info)); - else - fprintf(output, " [0x%x 0x%x eod]", - op->apiPtrI, op->tcPtrI); - - op++; - } - fprintf(output, "\n"); - } - return false; -} - -bool -printSCANTABREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const ScanTabRef * const sig = (ScanTabRef *) theData; - - fprintf(output, " apiConnectPtr: H\'%.8x\n", - sig->apiConnectPtr); - - fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n", - sig->transId1, sig->transId2); - - fprintf(output, " Errorcode: %u\n", sig->errorCode); - - fprintf(output, " closeNeeded: %u\n", sig->closeNeeded); - return false; -} - - -bool -printSCANFRAGNEXTREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - const ScanFragNextReq * const sig = (ScanFragNextReq *) theData; - - fprintf(output, " senderData: H\'%.8x\n", - sig->senderData); - - fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n", - sig->transId1, sig->transId2); - - fprintf(output, " Close scan: %u\n", sig->closeFlag); - - return false; -} - -bool -printSCANNEXTREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - if(receiverBlockNo == DBTC){ - const ScanNextReq * const sig = (ScanNextReq *) theData; - - fprintf(output, " apiConnectPtr: H\'%.8x\n", - sig->apiConnectPtr); - - fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x) ", - sig->transId1, sig->transId2); - - fprintf(output, " Stop this scan: %u\n", sig->stopScan); - - const Uint32 * ops = theData + ScanNextReq::SignalLength; - if(len > ScanNextReq::SignalLength){ - fprintf(output, " tcFragPtr(s): "); - for(size_t i = ScanNextReq::SignalLength; i -#include -#include - -/** - * This is the register - */ - -const NameFunctionPair -SignalDataPrintFunctions[] = { - { GSN_TCKEYREQ, printTCKEYREQ }, - { GSN_TCINDXREQ, printTCKEYREQ }, - { GSN_TCKEYCONF, printTCKEYCONF }, - { GSN_TCKEYREF, printTCKEYREF }, - { GSN_LQHKEYREQ, printLQHKEYREQ }, - { GSN_LQHKEYCONF, printLQHKEYCONF }, - { GSN_LQHKEYREF, printLQHKEYREF }, - { GSN_TUPKEYREQ, printTUPKEYREQ }, - { GSN_TUPKEYCONF, printTUPKEYCONF }, - { GSN_TUPKEYREF, printTUPKEYREF }, - { GSN_TUP_COMMITREQ, printTUPCOMMITREQ }, - { GSN_CONTINUEB, printCONTINUEB }, - { GSN_FSOPENREQ, printFSOPENREQ }, - { GSN_FSCLOSEREQ, printFSCLOSEREQ }, - { GSN_FSREADREQ, printFSREADWRITEREQ }, - { GSN_FSWRITEREQ, printFSREADWRITEREQ }, - { GSN_FSCLOSEREF, printFSREF }, - { GSN_FSOPENREF, printFSREF }, - { GSN_FSWRITEREF, printFSREF }, - { GSN_FSREADREF, printFSREF }, - { GSN_FSSYNCREF, printFSREF }, - { GSN_FSCLOSECONF, printFSCONF }, - { GSN_FSOPENCONF, printFSCONF }, - { GSN_FSWRITECONF, printFSCONF }, - { GSN_FSREADCONF, printFSCONF }, - { GSN_FSSYNCCONF, printFSCONF }, - { GSN_CLOSE_COMREQ, printCLOSECOMREQCONF }, - { GSN_CLOSE_COMCONF, printCLOSECOMREQCONF }, - { GSN_PACKED_SIGNAL, printPACKED_SIGNAL }, - { GSN_PREP_FAILREQ, printPREPFAILREQREF }, - { GSN_PREP_FAILREF, printPREPFAILREQREF }, - { GSN_ALTER_TABLE_REQ, printALTER_TABLE_REQ }, - { GSN_ALTER_TABLE_CONF, printALTER_TABLE_CONF }, - { GSN_ALTER_TABLE_REF, printALTER_TABLE_REF }, - { GSN_ALTER_TAB_REQ, printALTER_TAB_REQ }, - { GSN_ALTER_TAB_CONF, printALTER_TAB_CONF }, - { GSN_ALTER_TAB_REF, printALTER_TAB_REF }, - { GSN_CREATE_TRIG_REQ, printCREATE_TRIG_REQ }, - { GSN_CREATE_TRIG_CONF, printCREATE_TRIG_CONF }, - { GSN_CREATE_TRIG_REF, printCREATE_TRIG_REF }, - { GSN_ALTER_TRIG_REQ, printALTER_TRIG_REQ }, - { GSN_ALTER_TRIG_CONF, printALTER_TRIG_CONF }, - { GSN_ALTER_TRIG_REF, printALTER_TRIG_REF }, - { GSN_DROP_TRIG_REQ, printDROP_TRIG_REQ }, - { GSN_DROP_TRIG_CONF, printDROP_TRIG_CONF }, - { GSN_DROP_TRIG_REF, printDROP_TRIG_REF }, - { GSN_FIRE_TRIG_ORD, printFIRE_TRIG_ORD }, - { GSN_TRIG_ATTRINFO, printTRIG_ATTRINFO }, - { GSN_CREATE_INDX_REQ, printCREATE_INDX_REQ }, - { GSN_CREATE_INDX_CONF, printCREATE_INDX_CONF }, - { GSN_CREATE_INDX_REF, printCREATE_INDX_REF }, - { GSN_DROP_INDX_REQ, printDROP_INDX_REQ }, - { GSN_DROP_INDX_CONF, printDROP_INDX_CONF }, - { GSN_DROP_INDX_REF, printDROP_INDX_REF }, - { GSN_ALTER_INDX_REQ, printALTER_INDX_REQ }, - { GSN_ALTER_INDX_CONF, printALTER_INDX_CONF }, - { GSN_ALTER_INDX_REF, printALTER_INDX_REF }, - { GSN_TCINDXCONF, printTCINDXCONF }, - { GSN_TCINDXREF, printTCINDXREF }, - { GSN_INDXKEYINFO, printINDXKEYINFO }, - { GSN_INDXATTRINFO, printINDXATTRINFO }, - { GSN_FSAPPENDREQ, printFSAPPENDREQ }, - { GSN_BACKUP_REQ, printBACKUP_REQ }, - { GSN_BACKUP_DATA, printBACKUP_DATA }, - { GSN_BACKUP_REF, printBACKUP_REF }, - { GSN_BACKUP_CONF, printBACKUP_CONF }, - { GSN_ABORT_BACKUP_ORD, printABORT_BACKUP_ORD }, - { GSN_BACKUP_ABORT_REP, printBACKUP_ABORT_REP }, - { GSN_BACKUP_COMPLETE_REP, printBACKUP_COMPLETE_REP }, - { GSN_BACKUP_NF_COMPLETE_REP, printBACKUP_NF_COMPLETE_REP }, - { GSN_DEFINE_BACKUP_REQ, printDEFINE_BACKUP_REQ }, - { GSN_DEFINE_BACKUP_REF, printDEFINE_BACKUP_REF }, - { GSN_DEFINE_BACKUP_CONF, printDEFINE_BACKUP_CONF }, - { GSN_START_BACKUP_REQ, printSTART_BACKUP_REQ }, - { GSN_START_BACKUP_REF, printSTART_BACKUP_REF }, - { GSN_START_BACKUP_CONF, printSTART_BACKUP_CONF }, - { GSN_BACKUP_FRAGMENT_REQ, printBACKUP_FRAGMENT_REQ }, - { GSN_BACKUP_FRAGMENT_REF, printBACKUP_FRAGMENT_REF }, - { GSN_BACKUP_FRAGMENT_CONF, printBACKUP_FRAGMENT_CONF }, - { GSN_STOP_BACKUP_REQ, printSTOP_BACKUP_REQ }, - { GSN_STOP_BACKUP_REF, printSTOP_BACKUP_REF }, - { GSN_STOP_BACKUP_CONF, printSTOP_BACKUP_CONF }, - { GSN_BACKUP_STATUS_REQ, printBACKUP_STATUS_REQ }, - //{ GSN_BACKUP_STATUS_REF, printBACKUP_STATUS_REF }, - { GSN_BACKUP_STATUS_CONF, printBACKUP_STATUS_CONF }, - { GSN_UTIL_SEQUENCE_REQ, printUTIL_SEQUENCE_REQ }, - { GSN_UTIL_SEQUENCE_REF, printUTIL_SEQUENCE_REF }, - { GSN_UTIL_SEQUENCE_CONF, printUTIL_SEQUENCE_CONF }, - { GSN_UTIL_PREPARE_REQ, printUTIL_PREPARE_REQ }, - { GSN_UTIL_PREPARE_REF, printUTIL_PREPARE_REF }, - { GSN_UTIL_PREPARE_CONF, printUTIL_PREPARE_CONF }, - { GSN_UTIL_EXECUTE_REQ, printUTIL_EXECUTE_REQ }, - { GSN_UTIL_EXECUTE_REF, printUTIL_EXECUTE_REF }, - { GSN_UTIL_EXECUTE_CONF, printUTIL_EXECUTE_CONF }, - { GSN_SCAN_TABREQ, printSCANTABREQ }, - { GSN_SCAN_TABCONF, printSCANTABCONF }, - { GSN_SCAN_TABREF, printSCANTABREF }, - { GSN_SCAN_NEXTREQ, printSCANNEXTREQ }, - { GSN_LQHFRAGREQ, printLQH_FRAG_REQ }, - { GSN_LQHFRAGREF, printLQH_FRAG_REF }, - { GSN_LQHFRAGCONF, printLQH_FRAG_CONF }, - { GSN_PREP_DROP_TAB_REQ, printPREP_DROP_TAB_REQ }, - { GSN_PREP_DROP_TAB_REF, printPREP_DROP_TAB_REF }, - { GSN_PREP_DROP_TAB_CONF, printPREP_DROP_TAB_CONF }, - { GSN_DROP_TAB_REQ, printDROP_TAB_REQ }, - { GSN_DROP_TAB_REF, printDROP_TAB_REF }, - { GSN_DROP_TAB_CONF, printDROP_TAB_CONF }, - { GSN_LCP_FRAG_ORD, printLCP_FRAG_ORD }, - { GSN_LCP_FRAG_REP, printLCP_FRAG_REP }, - { GSN_LCP_COMPLETE_REP, printLCP_COMPLETE_REP }, - { GSN_START_LCP_REQ, printSTART_LCP_REQ }, - { GSN_START_LCP_CONF, printSTART_LCP_CONF }, - { GSN_MASTER_LCPREQ, printMASTER_LCP_REQ }, - { GSN_MASTER_LCPREF, printMASTER_LCP_REF }, - { GSN_MASTER_LCPCONF, printMASTER_LCP_CONF }, - { GSN_COPY_GCIREQ, printCOPY_GCI_REQ }, - { GSN_SYSTEM_ERROR, printSYSTEM_ERROR }, - { GSN_START_RECREQ, printSTART_REC_REQ }, - { GSN_START_RECCONF, printSTART_REC_CONF }, - { GSN_START_FRAGREQ, printSTART_FRAG_REQ }, - { GSN_NF_COMPLETEREP, printNF_COMPLETE_REP }, - { GSN_SIGNAL_DROPPED_REP, printSIGNAL_DROPPED_REP }, - { GSN_FAIL_REP, printFAIL_REP }, - { GSN_DISCONNECT_REP, printDISCONNECT_REP }, - - { GSN_SUB_CREATE_REQ, printSUB_CREATE_REQ }, - { GSN_SUB_CREATE_REF, printSUB_CREATE_REF }, - { GSN_SUB_CREATE_CONF, printSUB_CREATE_CONF }, - { GSN_SUB_REMOVE_REQ, printSUB_REMOVE_REQ }, - { GSN_SUB_REMOVE_REF, printSUB_REMOVE_REF }, - { GSN_SUB_REMOVE_CONF, printSUB_REMOVE_CONF }, - { GSN_SUB_START_REQ, printSUB_START_REQ }, - { GSN_SUB_START_REF, printSUB_START_REF }, - { GSN_SUB_START_CONF, printSUB_START_CONF }, - { GSN_SUB_STOP_REQ, printSUB_STOP_REQ }, - { GSN_SUB_STOP_REF, printSUB_STOP_REF }, - { GSN_SUB_STOP_CONF, printSUB_STOP_CONF }, - { GSN_SUB_SYNC_REQ, printSUB_SYNC_REQ }, - { GSN_SUB_SYNC_REF, printSUB_SYNC_REF }, - { GSN_SUB_SYNC_CONF, printSUB_SYNC_CONF }, - { GSN_SUB_TABLE_DATA, printSUB_TABLE_DATA }, - { GSN_SUB_SYNC_CONTINUE_REQ, printSUB_SYNC_CONTINUE_REQ }, - { GSN_SUB_SYNC_CONTINUE_REF, printSUB_SYNC_CONTINUE_REF }, - { GSN_SUB_SYNC_CONTINUE_CONF, printSUB_SYNC_CONTINUE_CONF }, - { GSN_SUB_GCP_COMPLETE_REP, printSUB_GCP_COMPLETE_REP } - - ,{ GSN_CREATE_FRAGMENTATION_REQ, printCREATE_FRAGMENTATION_REQ } - ,{ GSN_CREATE_FRAGMENTATION_REF, printCREATE_FRAGMENTATION_REF } - ,{ GSN_CREATE_FRAGMENTATION_CONF, printCREATE_FRAGMENTATION_CONF } - - ,{ GSN_UTIL_CREATE_LOCK_REQ, printUTIL_CREATE_LOCK_REQ } - ,{ GSN_UTIL_CREATE_LOCK_REF, printUTIL_CREATE_LOCK_REF } - ,{ GSN_UTIL_CREATE_LOCK_CONF, printUTIL_CREATE_LOCK_CONF } - ,{ GSN_UTIL_DESTROY_LOCK_REQ, printUTIL_DESTROY_LOCK_REQ } - ,{ GSN_UTIL_DESTROY_LOCK_REF, printUTIL_DESTROY_LOCK_REF } - ,{ GSN_UTIL_DESTROY_LOCK_CONF, printUTIL_DESTROY_LOCK_CONF } - ,{ GSN_UTIL_LOCK_REQ, printUTIL_LOCK_REQ } - ,{ GSN_UTIL_LOCK_REF, printUTIL_LOCK_REF } - ,{ GSN_UTIL_LOCK_CONF, printUTIL_LOCK_CONF } - ,{ GSN_UTIL_UNLOCK_REQ, printUTIL_UNLOCK_REQ } - ,{ GSN_UTIL_UNLOCK_REF, printUTIL_UNLOCK_REF } - ,{ GSN_UTIL_UNLOCK_CONF, printUTIL_UNLOCK_CONF } - ,{ GSN_CNTR_START_REQ, printCNTR_START_REQ } - ,{ GSN_CNTR_START_REF, printCNTR_START_REF } - ,{ GSN_CNTR_START_CONF, printCNTR_START_CONF } - - ,{ GSN_READ_NODESCONF, printREAD_NODES_CONF } - - ,{ GSN_TUX_MAINT_REQ, printTUX_MAINT_REQ } - ,{ GSN_ACC_LOCKREQ, printACC_LOCKREQ } - ,{ GSN_LQH_TRANSCONF, printLQH_TRANSCONF } - ,{ GSN_SCAN_FRAGREQ, printSCAN_FRAGREQ } - ,{ GSN_START_FRAGREQ, printSTART_FRAG_REQ } - ,{ 0, 0 } -}; - -#include - -template struct BitmaskPOD<1>; -template struct BitmaskPOD<2>; -template struct BitmaskPOD<4>; -template class Bitmask<1>; -template class Bitmask<2>; -template class Bitmask<4>; diff --git a/storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp b/storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp deleted file mode 100644 index 2ef807fa28b..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/SignalDroppedRep.cpp +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -bool -printSIGNAL_DROPPED_REP(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - SignalDroppedRep * sig = (SignalDroppedRep*)theData; - - fprintf(output, " originalGsn: %s(%d) Length: %d SectionCount: %d\n", - getSignalName(sig->originalGsn), - sig->originalGsn, - sig->originalLength, - sig->originalSectionCount); - return false; -} diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp deleted file mode 100644 index 789a30931c9..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ /dev/null @@ -1,649 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -const GsnName SignalNames [] = { - { GSN_API_REGCONF, "API_REGCONF" } - ,{ GSN_API_REGREF, "API_REGREF" } - ,{ GSN_API_REGREQ, "API_REGREQ" } - ,{ GSN_ATTRINFO, "ATTRINFO" } - ,{ GSN_SCHEMA_INFO, "SCHEMA_INFO" } - ,{ GSN_SCHEMA_INFOCONF, "SCHEMA_INFOCONF" } - ,{ GSN_GET_SCHEMA_INFOREQ, "GET_SCHEMA_INFOREQ" } - ,{ GSN_DIHNDBTAMPER, "DIHNDBTAMPER" } - ,{ GSN_KEYINFO, "KEYINFO" } - ,{ GSN_KEYINFO20, "KEYINFO20" } - ,{ GSN_KEYINFO20_R, "KEYINFO20_R" } - ,{ GSN_NODE_FAILREP, "NODE_FAILREP" } - ,{ GSN_READCONF, "READCONF" } - ,{ GSN_SCAN_NEXTREQ, "SCAN_NEXTREQ" } - ,{ GSN_SCAN_TABCONF, "SCAN_TABCONF" } - ,{ GSN_SCAN_TABREF, "SCAN_TABREF" } - ,{ GSN_SCAN_TABREQ, "SCAN_TABREQ" } - ,{ GSN_TC_COMMITCONF, "TC_COMMITCONF" } - ,{ GSN_TC_COMMITREF, "TC_COMMITREF" } - ,{ GSN_TC_COMMITREQ, "TC_COMMITREQ" } - ,{ GSN_TCKEY_FAILCONF, "TCKEY_FAILCONF" } - ,{ GSN_TCKEY_FAILREF, "TCKEY_FAILREF" } - ,{ GSN_TCKEYCONF, "TCKEYCONF" } - ,{ GSN_TCKEYREF, "TCKEYREF" } - ,{ GSN_TCKEYREQ, "TCKEYREQ" } - ,{ GSN_TCRELEASECONF, "TCRELEASECONF" } - ,{ GSN_TCRELEASEREF, "TCRELEASEREF" } - ,{ GSN_TCRELEASEREQ, "TCRELEASEREQ" } - ,{ GSN_TCROLLBACKCONF, "TCROLLBACKCONF" } - ,{ GSN_TCROLLBACKREF, "TCROLLBACKREF" } - ,{ GSN_TCROLLBACKREQ, "TCROLLBACKREQ" } - ,{ GSN_TCROLLBACKREP, "TCROLLBACKREP" } - ,{ GSN_TCSEIZECONF, "TCSEIZECONF" } - ,{ GSN_TCSEIZEREF, "TCSEIZEREF" } - ,{ GSN_TCSEIZEREQ, "TCSEIZEREQ" } - ,{ GSN_TRANSID_AI, "TRANSID_AI" } - ,{ GSN_TRANSID_AI_R, "TRANSID_AI_R" } - ,{ GSN_ABORT, "ABORT" } - ,{ GSN_ABORTCONF, "ABORTCONF" } - ,{ GSN_ABORTED, "ABORTED" } - ,{ GSN_ABORTREQ, "ABORTREQ" } - ,{ GSN_ACC_ABORTCONF, "ACC_ABORTCONF" } - ,{ GSN_ACC_ABORTREQ, "ACC_ABORTREQ" } - ,{ GSN_ACC_CHECK_SCAN, "ACC_CHECK_SCAN" } - ,{ GSN_ACC_COMMITCONF, "ACC_COMMITCONF" } - ,{ GSN_ACC_COMMITREQ, "ACC_COMMITREQ" } - ,{ GSN_ACC_OVER_REC, "ACC_OVER_REC" } - ,{ GSN_ACC_SCAN_INFO, "ACC_SCAN_INFO" } - ,{ GSN_ACC_SCAN_INFO24, "ACC_SCAN_INFO24" } - ,{ GSN_ACC_SCANCONF, "ACC_SCANCONF" } - ,{ GSN_ACC_SCANREF, "ACC_SCANREF" } - ,{ GSN_ACC_SCANREQ, "ACC_SCANREQ" } - ,{ GSN_ACC_TO_CONF, "ACC_TO_CONF" } - ,{ GSN_ACC_TO_REF, "ACC_TO_REF" } - ,{ GSN_ACC_TO_REQ, "ACC_TO_REQ" } - ,{ GSN_ACCFRAGCONF, "ACCFRAGCONF" } - ,{ GSN_ACCFRAGREF, "ACCFRAGREF" } - ,{ GSN_ACCFRAGREQ, "ACCFRAGREQ" } - ,{ GSN_ACCKEYCONF, "ACCKEYCONF" } - ,{ GSN_ACCKEYREF, "ACCKEYREF" } - ,{ GSN_ACCKEYREQ, "ACCKEYREQ" } - ,{ GSN_ACCMINUPDATE, "ACCMINUPDATE" } - ,{ GSN_ACCSEIZECONF, "ACCSEIZECONF" } - ,{ GSN_ACCSEIZEREF, "ACCSEIZEREF" } - ,{ GSN_ACCSEIZEREQ, "ACCSEIZEREQ" } - ,{ GSN_ACCUPDATECONF, "ACCUPDATECONF" } - ,{ GSN_ACCUPDATEKEY, "ACCUPDATEKEY" } - ,{ GSN_ACCUPDATEREF, "ACCUPDATEREF" } - ,{ GSN_ADD_FRAGCONF, "ADD_FRAGCONF" } - ,{ GSN_ADD_FRAGREF, "ADD_FRAGREF" } - ,{ GSN_ADD_FRAGREQ, "ADD_FRAGREQ" } - ,{ GSN_API_FAILCONF, "API_FAILCONF" } - ,{ GSN_API_FAILREQ, "API_FAILREQ" } - ,{ GSN_CHECK_LCP_STOP, "CHECK_LCP_STOP" } - ,{ GSN_CLOSE_COMCONF, "CLOSE_COMCONF" } - ,{ GSN_CLOSE_COMREQ, "CLOSE_COMREQ" } - ,{ GSN_CM_ACKADD, "CM_ACKADD" } - ,{ GSN_CM_ADD, "CM_ADD" } - ,{ GSN_CM_ADD_REP, "CM_ADD_REP" } - ,{ GSN_CM_HEARTBEAT, "CM_HEARTBEAT" } - ,{ GSN_CM_NODEINFOCONF, "CM_NODEINFOCONF" } - ,{ GSN_CM_NODEINFOREF, "CM_NODEINFOREF" } - ,{ GSN_CM_NODEINFOREQ, "CM_NODEINFOREQ" } - ,{ GSN_CM_REGCONF, "CM_REGCONF" } - ,{ GSN_CM_REGREF, "CM_REGREF" } - ,{ GSN_CM_REGREQ, "CM_REGREQ" } - ,{ GSN_CNTR_START_REQ, "CNTR_START_REQ" } - ,{ GSN_CNTR_START_REF, "CNTR_START_REF" } - ,{ GSN_CNTR_START_CONF, "CNTR_START_CONF" } - ,{ GSN_CNTR_START_REP, "CNTR_START_REP" } - ,{ GSN_CNTR_WAITREP, "CNTR_WAITREP" } - ,{ GSN_COMMIT, "COMMIT" } - ,{ GSN_COMMIT_FAILCONF, "COMMIT_FAILCONF" } - ,{ GSN_COMMIT_FAILREQ, "COMMIT_FAILREQ" } - ,{ GSN_COMMITCONF, "COMMITCONF" } - ,{ GSN_COMMITREQ, "COMMITREQ" } - ,{ GSN_COMMITTED, "COMMITTED" } - ,{ GSN_LCP_FRAG_ORD, "LCP_FRAG_ORD" } - ,{ GSN_LCP_FRAG_REP, "LCP_FRAG_REP" } - ,{ GSN_LCP_COMPLETE_REP, "LCP_COMPLETE_REP" } - ,{ GSN_START_LCP_REQ, "START_LCP_REQ" } - ,{ GSN_START_LCP_CONF, "START_LCP_CONF" } - ,{ GSN_COMPLETE, "COMPLETE" } - ,{ GSN_COMPLETECONF, "COMPLETECONF" } - ,{ GSN_COMPLETED, "COMPLETED" } - ,{ GSN_COMPLETEREQ, "COMPLETEREQ" } - ,{ GSN_CONNECT_REP, "CONNECT_REP" } - ,{ GSN_CONTINUEB, "CONTINUEB" } - ,{ GSN_COPY_ACTIVECONF, "COPY_ACTIVECONF" } - ,{ GSN_COPY_ACTIVEREF, "COPY_ACTIVEREF" } - ,{ GSN_COPY_ACTIVEREQ, "COPY_ACTIVEREQ" } - ,{ GSN_COPY_FRAGCONF, "COPY_FRAGCONF" } - ,{ GSN_COPY_FRAGREF, "COPY_FRAGREF" } - ,{ GSN_COPY_FRAGREQ, "COPY_FRAGREQ" } - ,{ GSN_COPY_GCICONF, "COPY_GCICONF" } - ,{ GSN_COPY_GCIREQ, "COPY_GCIREQ" } - ,{ GSN_COPY_STATECONF, "COPY_STATECONF" } - ,{ GSN_COPY_STATEREQ, "COPY_STATEREQ" } - ,{ GSN_COPY_TABCONF, "COPY_TABCONF" } - ,{ GSN_COPY_TABREQ, "COPY_TABREQ" } - ,{ GSN_CREATE_FRAGCONF, "CREATE_FRAGCONF" } - ,{ GSN_CREATE_FRAGREF, "CREATE_FRAGREF" } - ,{ GSN_CREATE_FRAGREQ, "CREATE_FRAGREQ" } - ,{ GSN_DEBUG_SIG, "DEBUG_SIG" } - ,{ GSN_DI_FCOUNTCONF, "DI_FCOUNTCONF" } - ,{ GSN_DI_FCOUNTREF, "DI_FCOUNTREF" } - ,{ GSN_DI_FCOUNTREQ, "DI_FCOUNTREQ" } - ,{ GSN_DIADDTABCONF, "DIADDTABCONF" } - ,{ GSN_DIADDTABREF, "DIADDTABREF" } - ,{ GSN_DIADDTABREQ, "DIADDTABREQ" } - ,{ GSN_DICTSTARTCONF, "DICTSTARTCONF" } - ,{ GSN_DICTSTARTREQ, "DICTSTARTREQ" } - ,{ GSN_LIST_TABLES_REQ, "LIST_TABLES_REQ" } - ,{ GSN_LIST_TABLES_CONF, "LIST_TABLES_CONF" } - ,{ GSN_DIGETNODESCONF, "DIGETNODESCONF" } - ,{ GSN_DIGETNODESREF, "DIGETNODESREF" } - ,{ GSN_DIGETNODESREQ, "DIGETNODESREQ" } - ,{ GSN_DIGETPRIMCONF, "DIGETPRIMCONF" } - ,{ GSN_DIGETPRIMREF, "DIGETPRIMREF" } - ,{ GSN_DIGETPRIMREQ, "DIGETPRIMREQ" } - ,{ GSN_DIH_RESTARTCONF, "DIH_RESTARTCONF" } - ,{ GSN_DIH_RESTARTREF, "DIH_RESTARTREF" } - ,{ GSN_DIH_RESTARTREQ, "DIH_RESTARTREQ" } - - ,{ GSN_DIRELEASECONF, "DIRELEASECONF" } - ,{ GSN_DIRELEASEREF, "DIRELEASEREF" } - ,{ GSN_DIRELEASEREQ, "DIRELEASEREQ" } - ,{ GSN_DISCONNECT_REP, "DISCONNECT_REP" } - ,{ GSN_DISEIZECONF, "DISEIZECONF" } - ,{ GSN_DISEIZEREF, "DISEIZEREF" } - ,{ GSN_DISEIZEREQ, "DISEIZEREQ" } - ,{ GSN_DIVERIFYCONF, "DIVERIFYCONF" } - ,{ GSN_DIVERIFYREF, "DIVERIFYREF" } - ,{ GSN_DIVERIFYREQ, "DIVERIFYREQ" } - ,{ GSN_EMPTY_LCP_REQ, "EMPTY_LCP_REQ" } - ,{ GSN_EMPTY_LCP_CONF, "EMPTY_LCP_CONF" } - ,{ GSN_ENABLE_COMORD, "ENABLE_COMORD" } - ,{ GSN_END_LCPCONF, "END_LCPCONF" } - ,{ GSN_END_LCPREQ, "END_LCPREQ" } - ,{ GSN_END_TOCONF, "END_TOCONF" } - ,{ GSN_END_TOREQ, "END_TOREQ" } - ,{ GSN_EVENT_REP, "EVENT_REP" } - ,{ GSN_EXEC_FRAGCONF, "EXEC_FRAGCONF" } - ,{ GSN_EXEC_FRAGREF, "EXEC_FRAGREF" } - ,{ GSN_EXEC_FRAGREQ, "EXEC_FRAGREQ" } - ,{ GSN_EXEC_SRCONF, "EXEC_SRCONF" } - ,{ GSN_EXEC_SRREQ, "EXEC_SRREQ" } - ,{ GSN_EXPANDCHECK2, "EXPANDCHECK2" } - ,{ GSN_FAIL_REP, "FAIL_REP" } - ,{ GSN_FSCLOSECONF, "FSCLOSECONF" } - ,{ GSN_FSCLOSEREF, "FSCLOSEREF" } - ,{ GSN_FSCLOSEREQ, "FSCLOSEREQ" } - ,{ GSN_FSOPENCONF, "FSOPENCONF" } - ,{ GSN_FSOPENREF, "FSOPENREF" } - ,{ GSN_FSOPENREQ, "FSOPENREQ" } - ,{ GSN_FSREADCONF, "FSREADCONF" } - ,{ GSN_FSREADREF, "FSREADREF" } - ,{ GSN_FSREADREQ, "FSREADREQ" } - ,{ GSN_FSSYNCCONF, "FSSYNCCONF" } - ,{ GSN_FSSYNCREF, "FSSYNCREF" } - ,{ GSN_FSSYNCREQ, "FSSYNCREQ" } - ,{ GSN_FSWRITECONF, "FSWRITECONF" } - ,{ GSN_FSWRITEREF, "FSWRITEREF" } - ,{ GSN_FSWRITEREQ, "FSWRITEREQ" } - ,{ GSN_FSAPPENDCONF, "FSAPPENDCONF" } - ,{ GSN_FSAPPENDREF, "FSAPPENDREF" } - ,{ GSN_FSAPPENDREQ, "FSAPPENDREQ" } - ,{ GSN_FSREMOVECONF, "FSREMOVECONF" } - ,{ GSN_FSREMOVEREF, "FSREMOVEREF" } - ,{ GSN_FSREMOVEREQ, "FSREMOVEREQ" } - ,{ GSN_GCP_ABORT, "GCP_ABORT" } - ,{ GSN_GCP_ABORTED, "GCP_ABORTED" } - ,{ GSN_GCP_COMMIT, "GCP_COMMIT" } - ,{ GSN_GCP_NODEFINISH, "GCP_NODEFINISH" } - ,{ GSN_GCP_NOMORETRANS, "GCP_NOMORETRANS" } - ,{ GSN_GCP_PREPARE, "GCP_PREPARE" } - ,{ GSN_GCP_PREPARECONF, "GCP_PREPARECONF" } - ,{ GSN_GCP_PREPAREREF, "GCP_PREPAREREF" } - ,{ GSN_GCP_SAVECONF, "GCP_SAVECONF" } - ,{ GSN_GCP_SAVEREF, "GCP_SAVEREF" } - ,{ GSN_GCP_SAVEREQ, "GCP_SAVEREQ" } - ,{ GSN_GCP_TCFINISHED, "GCP_TCFINISHED" } - ,{ GSN_GET_TABINFOREF, "GET_TABINFOREF" } - ,{ GSN_GET_TABINFOREQ, "GET_TABINFOREQ" } - ,{ GSN_GET_TABINFO_CONF, "GET_TABINFO_CONF" } - ,{ GSN_GETGCICONF, "GETGCICONF" } - ,{ GSN_GETGCIREQ, "GETGCIREQ" } - ,{ GSN_HOT_SPAREREP, "HOT_SPAREREP" } - ,{ GSN_INCL_NODECONF, "INCL_NODECONF" } - ,{ GSN_INCL_NODEREF, "INCL_NODEREF" } - ,{ GSN_INCL_NODEREQ, "INCL_NODEREQ" } - ,{ GSN_LQH_TRANSCONF, "LQH_TRANSCONF" } - ,{ GSN_LQH_TRANSREQ, "LQH_TRANSREQ" } - ,{ GSN_LQHADDATTCONF, "LQHADDATTCONF" } - ,{ GSN_LQHADDATTREF, "LQHADDATTREF" } - ,{ GSN_LQHADDATTREQ, "LQHADDATTREQ" } - ,{ GSN_LQHFRAGCONF, "LQHFRAGCONF" } - ,{ GSN_LQHFRAGREF, "LQHFRAGREF" } - ,{ GSN_LQHFRAGREQ, "LQHFRAGREQ" } - ,{ GSN_LQHKEYCONF, "LQHKEYCONF" } - ,{ GSN_LQHKEYREF, "LQHKEYREF" } - ,{ GSN_LQHKEYREQ, "LQHKEYREQ" } - ,{ GSN_MASTER_GCPCONF, "MASTER_GCPCONF" } - ,{ GSN_MASTER_GCPREF, "MASTER_GCPREF" } - ,{ GSN_MASTER_GCPREQ, "MASTER_GCPREQ" } - ,{ GSN_MASTER_LCPCONF, "MASTER_LCPCONF" } - ,{ GSN_MASTER_LCPREF, "MASTER_LCPREF" } - ,{ GSN_MASTER_LCPREQ, "MASTER_LCPREQ" } - ,{ GSN_MEMCHECKCONF, "MEMCHECKCONF" } - ,{ GSN_MEMCHECKREQ, "MEMCHECKREQ" } - ,{ GSN_NDB_FAILCONF, "NDB_FAILCONF" } - ,{ GSN_NDB_STARTCONF, "NDB_STARTCONF" } - ,{ GSN_NDB_STARTREF, "NDB_STARTREF" } - ,{ GSN_NDB_STARTREQ, "NDB_STARTREQ" } - ,{ GSN_NDB_STTOR, "NDB_STTOR" } - ,{ GSN_NDB_STTORRY, "NDB_STTORRY" } - ,{ GSN_NDB_TAMPER, "NDB_TAMPER" } - ,{ GSN_NEXT_SCANCONF, "NEXT_SCANCONF" } - ,{ GSN_NEXT_SCANREF, "NEXT_SCANREF" } - ,{ GSN_NEXT_SCANREQ, "NEXT_SCANREQ" } - ,{ GSN_NEXTOPERATION, "NEXTOPERATION" } - ,{ GSN_NF_COMPLETEREP, "NF_COMPLETEREP" } - ,{ GSN_OPEN_COMCONF, "OPEN_COMCONF" } - ,{ GSN_OPEN_COMREF, "OPEN_COMREF" } - ,{ GSN_OPEN_COMREQ, "OPEN_COMREQ" } - ,{ GSN_PACKED_SIGNAL, "PACKED_SIGNAL" } - ,{ GSN_PREP_FAILCONF, "PREP_FAILCONF" } - ,{ GSN_PREP_FAILREF, "PREP_FAILREF" } - ,{ GSN_PREP_FAILREQ, "PREP_FAILREQ" } - ,{ GSN_PRES_TOCONF, "PRES_TOCONF" } - ,{ GSN_PRES_TOREQ, "PRES_TOREQ" } - ,{ GSN_READ_NODESCONF, "READ_NODESCONF" } - ,{ GSN_READ_NODESREF, "READ_NODESREF" } - ,{ GSN_READ_NODESREQ, "READ_NODESREQ" } - ,{ GSN_SCAN_FRAGCONF, "SCAN_FRAGCONF" } - ,{ GSN_SCAN_FRAGREF, "SCAN_FRAGREF" } - ,{ GSN_SCAN_FRAGREQ, "SCAN_FRAGREQ" } - ,{ GSN_SCAN_HBREP, "SCAN_HBREP" } - ,{ GSN_SCAN_PROCCONF, "SCAN_PROCCONF" } - ,{ GSN_SCAN_PROCREQ, "SCAN_PROCREQ" } - ,{ GSN_SEND_PACKED, "SEND_PACKED" } - ,{ GSN_SET_LOGLEVELORD, "SET_LOGLEVELORD" } - ,{ GSN_SHRINKCHECK2, "SHRINKCHECK2" } - ,{ GSN_READ_CONFIG_REQ, "READ_CONFIG_REQ" } - ,{ GSN_READ_CONFIG_CONF, "READ_CONFIG_CONF" } - ,{ GSN_START_COPYCONF, "START_COPYCONF" } - ,{ GSN_START_COPYREF, "START_COPYREF" } - ,{ GSN_START_COPYREQ, "START_COPYREQ" } - ,{ GSN_START_EXEC_SR, "START_EXEC_SR" } - ,{ GSN_START_FRAGCONF, "START_FRAGCONF" } - ,{ GSN_START_FRAGREF, "START_FRAGREF" } - ,{ GSN_START_FRAGREQ, "START_FRAGREQ" } - ,{ GSN_START_LCP_REF, "START_LCP_REF" } - ,{ GSN_START_LCP_ROUND, "START_LCP_ROUND" } - ,{ GSN_START_MECONF, "START_MECONF" } - ,{ GSN_START_MEREF, "START_MEREF" } - ,{ GSN_START_MEREQ, "START_MEREQ" } - ,{ GSN_START_PERMCONF, "START_PERMCONF" } - ,{ GSN_START_PERMREF, "START_PERMREF" } - ,{ GSN_START_PERMREQ, "START_PERMREQ" } - ,{ GSN_START_RECCONF, "START_RECCONF" } - ,{ GSN_START_RECREF, "START_RECREF" } - ,{ GSN_START_RECREQ, "START_RECREQ" } - ,{ GSN_START_TOCONF, "START_TOCONF" } - ,{ GSN_START_TOREQ, "START_TOREQ" } - ,{ GSN_STORED_PROCCONF, "STORED_PROCCONF" } - ,{ GSN_STORED_PROCREF, "STORED_PROCREF" } - ,{ GSN_STORED_PROCREQ, "STORED_PROCREQ" } - ,{ GSN_STTOR, "STTOR" } - ,{ GSN_STTORRY, "STTORRY" } - ,{ GSN_SYSTEM_ERROR, "SYSTEM_ERROR" } - ,{ GSN_TAB_COMMITCONF, "TAB_COMMITCONF" } - ,{ GSN_TAB_COMMITREF, "TAB_COMMITREF" } - ,{ GSN_TAB_COMMITREQ, "TAB_COMMITREQ" } - ,{ GSN_TAKE_OVERTCCONF, "TAKE_OVERTCCONF" } - ,{ GSN_TAKE_OVERTCREQ, "TAKE_OVERTCREQ" } - ,{ GSN_TC_CLOPSIZECONF, "TC_CLOPSIZECONF" } - ,{ GSN_TC_CLOPSIZEREQ, "TC_CLOPSIZEREQ" } - ,{ GSN_TC_SCHVERCONF, "TC_SCHVERCONF" } - ,{ GSN_TC_SCHVERREQ, "TC_SCHVERREQ" } - ,{ GSN_TCGETOPSIZECONF, "TCGETOPSIZECONF" } - ,{ GSN_TCGETOPSIZEREQ, "TCGETOPSIZEREQ" } - ,{ GSN_TEST_ORD, "TEST_ORD" } - ,{ GSN_TESTSIG, "TESTSIG" } - ,{ GSN_TIME_SIGNAL, "TIME_SIGNAL" } - ,{ GSN_TUP_ABORTREQ, "TUP_ABORTREQ" } - ,{ GSN_TUP_ADD_ATTCONF, "TUP_ADD_ATTCONF" } - ,{ GSN_TUP_ADD_ATTRREF, "TUP_ADD_ATTRREF" } - ,{ GSN_TUP_ADD_ATTRREQ, "TUP_ADD_ATTRREQ" } - ,{ GSN_TUP_ATTRINFO, "TUP_ATTRINFO" } - ,{ GSN_TUP_COMMITREQ, "TUP_COMMITREQ" } - ,{ GSN_TUPFRAGCONF, "TUPFRAGCONF" } - ,{ GSN_TUPFRAGREF, "TUPFRAGREF" } - ,{ GSN_TUPFRAGREQ, "TUPFRAGREQ" } - ,{ GSN_TUPKEYCONF, "TUPKEYCONF" } - ,{ GSN_TUPKEYREF, "TUPKEYREF" } - ,{ GSN_TUPKEYREQ, "TUPKEYREQ" } - ,{ GSN_TUPRELEASECONF, "TUPRELEASECONF" } - ,{ GSN_TUPRELEASEREF, "TUPRELEASEREF" } - ,{ GSN_TUPRELEASEREQ, "TUPRELEASEREQ" } - ,{ GSN_TUPSEIZECONF, "TUPSEIZECONF" } - ,{ GSN_TUPSEIZEREF, "TUPSEIZEREF" } - ,{ GSN_TUPSEIZEREQ, "TUPSEIZEREQ" } - ,{ GSN_UNBLO_DICTCONF, "UNBLO_DICTCONF" } - ,{ GSN_UNBLO_DICTREQ, "UNBLO_DICTREQ" } - ,{ GSN_UPDATE_TOCONF, "UPDATE_TOCONF" } - ,{ GSN_UPDATE_TOREF, "UPDATE_TOREF" } - ,{ GSN_UPDATE_TOREQ, "UPDATE_TOREQ" } - ,{ GSN_TUP_ALLOCREQ, "TUP_ALLOCREQ" } - ,{ GSN_LQH_ALLOCREQ, "LQH_ALLOCREQ" } - ,{ GSN_TUP_DEALLOCREQ, "TUP_DEALLOCREQ" } - ,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" } - ,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" } - - ,{ GSN_START_ORD, "START_ORD" } - ,{ GSN_STOP_ORD, "STOP_ORD" } - ,{ GSN_TAMPER_ORD, "TAMPER_ORD" } - - ,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" } - ,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" } - ,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" } - ,{ GSN_DUMP_STATE_ORD, "DUMP_STATE_ORD" } - - ,{ GSN_NODE_START_REP, "NODE_START_REP" } - - ,{ GSN_START_INFOREQ, "START_INFOREQ" } - ,{ GSN_START_INFOREF, "START_INFOREF" } - ,{ GSN_START_INFOCONF, "START_INFOCONF" } - - ,{ GSN_CHECKNODEGROUPSREQ, "CHECKNODEGROUPSREQ" } - ,{ GSN_CHECKNODEGROUPSCONF, "CHECKNODEGROUPSCONF" } - - ,{ GSN_ARBIT_PREPREQ, "ARBIT_PREPREQ" } - ,{ GSN_ARBIT_PREPCONF, "ARBIT_PREPCONF" } - ,{ GSN_ARBIT_PREPREF, "ARBIT_PREPREF" } - ,{ GSN_ARBIT_STARTREQ, "ARBIT_STARTREQ" } - ,{ GSN_ARBIT_STARTCONF, "ARBIT_STARTCONF" } - ,{ GSN_ARBIT_STARTREF, "ARBIT_STARTREF" } - ,{ GSN_ARBIT_CHOOSEREQ, "ARBIT_CHOOSEREQ" } - ,{ GSN_ARBIT_CHOOSECONF, "ARBIT_CHOOSECONF" } - ,{ GSN_ARBIT_CHOOSEREF, "ARBIT_CHOOSEREF" } - ,{ GSN_ARBIT_STOPORD, "ARBIT_STOPORD" } - ,{ GSN_ARBIT_STOPREP, "ARBIT_STOPREP" } - - ,{ GSN_TC_COMMIT_ACK, "TC_COMMIT_ACK" } - ,{ GSN_REMOVE_MARKER_ORD, "REMOVE_MARKER_ORD" } - - ,{ GSN_NODE_STATE_REP, "NODE_STATE_REP" } - ,{ GSN_CHANGE_NODE_STATE_REQ, "CHANGE_NODE_STATE_REQ" } - ,{ GSN_CHANGE_NODE_STATE_CONF, "CHANGE_NODE_STATE_CONF" } - - ,{ GSN_BLOCK_COMMIT_ORD, "BLOCK_COMMIT_ORD" } - ,{ GSN_UNBLOCK_COMMIT_ORD, "UNBLOCK_COMMIT_ORD" } - - ,{ GSN_DIH_SWITCH_REPLICA_REQ, "DIH_SWITCH_REPLICA_REQ" } - ,{ GSN_DIH_SWITCH_REPLICA_REF, "DIH_SWITCH_REPLICA_REF" } - ,{ GSN_DIH_SWITCH_REPLICA_CONF, "DIH_SWITCH_REPLICA_CONF" } - - ,{ GSN_STOP_PERM_REQ, "STOP_PERM_REQ" } - ,{ GSN_STOP_PERM_REF, "STOP_PERM_REF" } - ,{ GSN_STOP_PERM_CONF, "STOP_PERM_CONF" } - - ,{ GSN_STOP_ME_REQ, "STOP_ME_REQ" } - ,{ GSN_STOP_ME_REF, "STOP_ME_REF" } - ,{ GSN_STOP_ME_CONF, "STOP_ME_CONF" } - - ,{ GSN_WAIT_GCP_REQ, "WAIT_GCP_REQ" } - ,{ GSN_WAIT_GCP_REF, "WAIT_GCP_REF" } - ,{ GSN_WAIT_GCP_CONF, "WAIT_GCP_CONF" } - - ,{ GSN_STOP_REQ, "STOP_REQ" } - ,{ GSN_STOP_REF, "STOP_REF" } - ,{ GSN_API_VERSION_REQ, "API_VERSION_REQ" } - ,{ GSN_API_VERSION_CONF, "API_VERSION_CONF" } - - ,{ GSN_ABORT_ALL_REQ, "ABORT_ALL_REQ" } - ,{ GSN_ABORT_ALL_REF, "ABORT_ALL_REF" } - ,{ GSN_ABORT_ALL_CONF, "ABORT_ALL_CONF" } - - ,{ GSN_DROP_TABLE_REQ, "DROP_TABLE_REQ" } - ,{ GSN_DROP_TABLE_REF, "DROP_TABLE_REF" } - ,{ GSN_DROP_TABLE_CONF, "DROP_TABLE_CONF" } - - ,{ GSN_DROP_TAB_REQ, "DROP_TAB_REQ" } - ,{ GSN_DROP_TAB_REF, "DROP_TAB_REF" } - ,{ GSN_DROP_TAB_CONF, "DROP_TAB_CONF" } - - ,{ GSN_PREP_DROP_TAB_REQ, "PREP_DROP_TAB_REQ" } - ,{ GSN_PREP_DROP_TAB_REF, "PREP_DROP_TAB_REF" } - ,{ GSN_PREP_DROP_TAB_CONF, "PREP_DROP_TAB_CONF" } - - ,{ GSN_WAIT_DROP_TAB_REQ, "WAIT_DROP_TAB_REQ" } - ,{ GSN_WAIT_DROP_TAB_REF, "WAIT_DROP_TAB_REF" } - ,{ GSN_WAIT_DROP_TAB_CONF, "WAIT_DROP_TAB_CONF" } - - ,{ GSN_CREATE_TRIG_REQ, "CREATE_TRIG_REQ" } - ,{ GSN_CREATE_TRIG_CONF, "CREATE_TRIG_CONF" } - ,{ GSN_CREATE_TRIG_REF, "CREATE_TRIG_REF" } - ,{ GSN_ALTER_TRIG_REQ, "ALTER_TRIG_REQ" } - ,{ GSN_ALTER_TRIG_CONF, "ALTER_TRIG_CONF" } - ,{ GSN_ALTER_TRIG_REF, "ALTER_TRIG_REF" } - ,{ GSN_DROP_TRIG_REQ, "DROP_TRIG_REQ" } - ,{ GSN_DROP_TRIG_CONF, "DROP_TRIG_CONF" } - ,{ GSN_DROP_TRIG_REF, "DROP_TRIG_REF" } - ,{ GSN_FIRE_TRIG_ORD, "FIRE_TRIG_ORD" } - ,{ GSN_TRIG_ATTRINFO, "TRIG_ATTRINFO" } - - ,{ GSN_CREATE_INDX_REQ, "CREATE_INDX_REQ" } - ,{ GSN_CREATE_INDX_CONF, "CREATE_INDX_CONF" } - ,{ GSN_CREATE_INDX_REF, "CREATE_INDX_REF" } - ,{ GSN_DROP_INDX_REQ, "DROP_INDX_REQ" } - ,{ GSN_DROP_INDX_CONF, "DROP_INDX_CONF" } - ,{ GSN_DROP_INDX_REF, "DROP_INDX_REF" } - ,{ GSN_ALTER_INDX_REQ, "ALTER_INDX_REQ" } - ,{ GSN_ALTER_INDX_CONF, "ALTER_INDX_CONF" } - ,{ GSN_ALTER_INDX_REF, "ALTER_INDX_REF" } - ,{ GSN_TCINDXREQ, "TCINDXREQ" } - ,{ GSN_TCINDXCONF, "TCINDXCONF" } - ,{ GSN_TCINDXREF, "TCINDXREF" } - ,{ GSN_INDXKEYINFO, "INDXKEYINFO" } - ,{ GSN_INDXATTRINFO, "INDXATTRINFO" } - ,{ GSN_BUILDINDXREQ, "BUILDINDXREQ" } - ,{ GSN_BUILDINDXCONF, "BUILDINDXCONF" } - ,{ GSN_BUILDINDXREF, "BUILDINDXREF" } - //,{ GSN_TCINDXNEXTREQ, "TCINDXNEXTREQ" } - //,{ GSN_TCINDEXNEXTCONF, "TCINDEXNEXTCONF" } - //,{ GSN_TCINDEXNEXREF, "TCINDEXNEXREF" } - - ,{ GSN_CREATE_EVNT_REQ, "CREATE_EVNT_REQ" } - ,{ GSN_CREATE_EVNT_CONF, "CREATE_EVNT_CONF" } - ,{ GSN_CREATE_EVNT_REF, "CREATE_EVNT_REF" } - - ,{ GSN_SUMA_START_ME_REQ, "SUMA_START_ME_REQ" } - ,{ GSN_SUMA_START_ME_REF, "SUMA_START_ME_REF" } - ,{ GSN_SUMA_START_ME_CONF, "SUMA_START_ME_CONF" } - ,{ GSN_SUMA_HANDOVER_REQ, "SUMA_HANDOVER_REQ"} - ,{ GSN_SUMA_HANDOVER_REF, "SUMA_HANDOVER_REF"} - ,{ GSN_SUMA_HANDOVER_CONF, "SUMA_HANDOVER_CONF"} - - ,{ GSN_DROP_EVNT_REQ, "DROP_EVNT_REQ" } - ,{ GSN_DROP_EVNT_CONF, "DROP_EVNT_CONF" } - ,{ GSN_DROP_EVNT_REF, "DROP_EVNT_REF" } - - ,{ GSN_BACKUP_TRIG_REQ, "BACKUP_TRIG_REQ" } - ,{ GSN_BACKUP_REQ, "BACKUP_REQ" } - ,{ GSN_BACKUP_DATA, "BACKUP_DATA" } - ,{ GSN_BACKUP_REF, "BACKUP_REF" } - ,{ GSN_BACKUP_CONF, "BACKUP_CONF" } - ,{ GSN_ABORT_BACKUP_ORD, "ABORT_BACKUP_ORD" } - ,{ GSN_BACKUP_ABORT_REP, "BACKUP_ABORT_REP" } - ,{ GSN_BACKUP_COMPLETE_REP, "BACKUP_COMPLETE_REP" } - ,{ GSN_BACKUP_NF_COMPLETE_REP, "BACKUP_NF_COMPLETE_REP" } - ,{ GSN_DEFINE_BACKUP_REQ, "DEFINE_BACKUP_REQ" } - ,{ GSN_DEFINE_BACKUP_REF, "DEFINE_BACKUP_REF" } - ,{ GSN_DEFINE_BACKUP_CONF, "DEFINE_BACKUP_CONF" } - ,{ GSN_START_BACKUP_REQ, "START_BACKUP_REQ" } - ,{ GSN_START_BACKUP_REF, "START_BACKUP_REF" } - ,{ GSN_START_BACKUP_CONF, "START_BACKUP_CONF" } - ,{ GSN_BACKUP_FRAGMENT_REQ, "BACKUP_FRAGMENT_REQ" } - ,{ GSN_BACKUP_FRAGMENT_REF, "BACKUP_FRAGMENT_REF" } - ,{ GSN_BACKUP_FRAGMENT_CONF, "BACKUP_FRAGMENT_CONF" } - ,{ GSN_STOP_BACKUP_REQ, "STOP_BACKUP_REQ" } - ,{ GSN_STOP_BACKUP_REF, "STOP_BACKUP_REF" } - ,{ GSN_STOP_BACKUP_CONF, "STOP_BACKUP_CONF" } - ,{ GSN_BACKUP_STATUS_REQ, "BACKUP_STATUS_REQ" } - ,{ GSN_BACKUP_STATUS_REF, "BACKUP_STATUS_REF" } - ,{ GSN_BACKUP_STATUS_CONF, "BACKUP_STATUS_CONF" } - ,{ GSN_SIGNAL_DROPPED_REP, "SIGNAL_DROPPED_REP" } - ,{ GSN_CONTINUE_FRAGMENTED, "CONTINUE_FRAGMENTED" } - - /** Util Block Services **/ - ,{ GSN_UTIL_SEQUENCE_REQ, "UTIL_SEQUENCE_REQ" } - ,{ GSN_UTIL_SEQUENCE_REF, "UTIL_SEQUENCE_REF" } - ,{ GSN_UTIL_SEQUENCE_CONF, "UTIL_SEQUENCE_CONF" } - ,{ GSN_UTIL_PREPARE_REQ, "UTIL_PREPARE_REQ" } - ,{ GSN_UTIL_PREPARE_CONF, "UTIL_PREPARE_CONF" } - ,{ GSN_UTIL_PREPARE_REF, "UTIL_PREPARE_REF" } - ,{ GSN_UTIL_EXECUTE_REQ, "UTIL_EXECUTE_REQ" } - ,{ GSN_UTIL_EXECUTE_CONF, "UTIL_EXECUTE_CONF" } - ,{ GSN_UTIL_EXECUTE_REF, "UTIL_EXECUTE_REF" } - ,{ GSN_UTIL_RELEASE_REQ, "UTIL_RELEASE_REQ" } - ,{ GSN_UTIL_RELEASE_CONF, "UTIL_RELEASE_CONF" } - ,{ GSN_UTIL_RELEASE_REF, "UTIL_RELASE_REF" } - - /* Suma Block Services **/ - ,{ GSN_SUB_CREATE_REQ, "SUB_CREATE_REQ" } - ,{ GSN_SUB_CREATE_REF, "SUB_CREATE_REF" } - ,{ GSN_SUB_CREATE_CONF, "SUB_CREATE_CONF" } - ,{ GSN_SUB_REMOVE_REQ, "SUB_REMOVE_REQ" } - ,{ GSN_SUB_REMOVE_REF, "SUB_REMOVE_REF" } - ,{ GSN_SUB_REMOVE_CONF, "SUB_REMOVE_CONF" } - ,{ GSN_SUB_START_REQ, "SUB_START_REQ" } - ,{ GSN_SUB_START_REF, "SUB_START_REF" } - ,{ GSN_SUB_START_CONF, "SUB_START_CONF" } - ,{ GSN_SUB_STOP_REQ, "SUB_STOP_REQ" } - ,{ GSN_SUB_STOP_REF, "SUB_STOP_REF" } - ,{ GSN_SUB_STOP_CONF, "SUB_STOP_CONF" } - ,{ GSN_SUB_SYNC_REQ, "SUB_SYNC_REQ" } - ,{ GSN_SUB_SYNC_REF, "SUB_SYNC_REF" } - ,{ GSN_SUB_SYNC_CONF, "SUB_SYNC_CONF" } - ,{ GSN_SUB_TABLE_DATA, "SUB_TABLE_DATA" } - ,{ GSN_SUB_SYNC_CONTINUE_REQ, "SUB_SYNC_CONTINUE_REQ" } - ,{ GSN_SUB_SYNC_CONTINUE_REF, "SUB_SYNC_CONTINUE_REF" } - ,{ GSN_SUB_SYNC_CONTINUE_CONF, "SUB_SYNC_CONTINUE_CONF" } - ,{ GSN_SUB_GCP_COMPLETE_REP, "SUB_GCP_COMPLETE_REP" } - ,{ GSN_SUB_GCP_COMPLETE_ACK, "SUB_GCP_COMPLETE_ACK" } - - ,{ GSN_CREATE_SUBID_REQ, "CREATE_SUBID_REQ" } - ,{ GSN_CREATE_SUBID_REF, "CREATE_SUBID_REF" } - ,{ GSN_CREATE_SUBID_CONF, "CREATE_SUBID_CONF" } - - ,{ GSN_CREATE_TABLE_REQ, "CREATE_TABLE_REQ" } - ,{ GSN_CREATE_TABLE_REF, "CREATE_TABLE_REF" } - ,{ GSN_CREATE_TABLE_CONF, "CREATE_TABLE_CONF" } - - ,{ GSN_CREATE_TAB_REQ, "CREATE_TAB_REQ" } - ,{ GSN_CREATE_TAB_REF, "CREATE_TAB_REF" } - ,{ GSN_CREATE_TAB_CONF, "CREATE_TAB_CONF" } - - ,{ GSN_ALTER_TABLE_REQ, "ALTER_TABLE_REQ" } - ,{ GSN_ALTER_TABLE_REF, "ALTER_TABLE_REF" } - ,{ GSN_ALTER_TABLE_CONF, "ALTER_TABLE_CONF" } - - ,{ GSN_ALTER_TAB_REQ, "ALTER_TAB_REQ" } - ,{ GSN_ALTER_TAB_REF, "ALTER_TAB_REF" } - ,{ GSN_ALTER_TAB_CONF, "ALTER_TAB_CONF" } - - ,{ GSN_CREATE_FRAGMENTATION_REQ, "CREATE_FRAGMENTATION_REQ" } - ,{ GSN_CREATE_FRAGMENTATION_REF, "CREATE_FRAGMENTATION_REF" } - ,{ GSN_CREATE_FRAGMENTATION_CONF, "CREATE_FRAGMENTATION_CONF" } - - ,{ GSN_UTIL_CREATE_LOCK_REQ, "UTIL_CREATE_LOCK_REQ" } - ,{ GSN_UTIL_CREATE_LOCK_REF, "UTIL_CREATE_LOCK_REF" } - ,{ GSN_UTIL_CREATE_LOCK_CONF, "UTIL_CREATE_LOCK_CONF" } - ,{ GSN_UTIL_DESTROY_LOCK_REQ, "UTIL_DESTROY_LOCK_REQ" } - ,{ GSN_UTIL_DESTROY_LOCK_REF, "UTIL_DESTROY_LOCK_REF" } - ,{ GSN_UTIL_DESTROY_LOCK_CONF, "UTIL_DESTROY_LOCK_CONF" } - ,{ GSN_UTIL_LOCK_REQ, "UTIL_LOCK_REQ" } - ,{ GSN_UTIL_LOCK_REF, "UTIL_LOCK_REF" } - ,{ GSN_UTIL_LOCK_CONF, "UTIL_LOCK_CONF" } - ,{ GSN_UTIL_UNLOCK_REQ, "UTIL_UNLOCK_REQ" } - ,{ GSN_UTIL_UNLOCK_REF, "UTIL_UNLOCK_REF" } - ,{ GSN_UTIL_UNLOCK_CONF, "UTIL_UNLOCK_CONF" } - - /* TUX */ - ,{ GSN_TUXFRAGREQ, "TUXFRAGREQ" } - ,{ GSN_TUXFRAGCONF, "TUXFRAGCONF" } - ,{ GSN_TUXFRAGREF, "TUXFRAGREF" } - ,{ GSN_TUX_ADD_ATTRREQ, "TUX_ADD_ATTRREQ" } - ,{ GSN_TUX_ADD_ATTRCONF, "TUX_ADD_ATTRCONF" } - ,{ GSN_TUX_ADD_ATTRREF, "TUX_ADD_ATTRREF" } - ,{ GSN_TUX_MAINT_REQ, "TUX_MAINT_REQ" } - ,{ GSN_TUX_MAINT_CONF, "TUX_MAINT_CONF" } - ,{ GSN_TUX_MAINT_REF, "TUX_MAINT_REF" } - ,{ GSN_TUX_BOUND_INFO, "TUX_BOUND_INFO" } - ,{ GSN_ACC_LOCKREQ, "ACC_LOCKREQ" } - - ,{ GSN_CREATE_FILEGROUP_REQ, "CREATE_FILEGROUP_REQ" } - ,{ GSN_CREATE_FILEGROUP_REF, "CREATE_FILEGROUP_REF" } - ,{ GSN_CREATE_FILEGROUP_CONF, "CREATE_FILEGROUP_CONF" } - - ,{ GSN_CREATE_FILE_REQ, "CREATE_FILE_REQ" } - ,{ GSN_CREATE_FILE_REF, "CREATE_FILE_REF" } - ,{ GSN_CREATE_FILE_CONF, "CREATE_FILE_CONF" } - - ,{ GSN_DROP_FILEGROUP_REQ, "DROP_FILEGROUP_REQ" } - ,{ GSN_DROP_FILEGROUP_REF, "DROP_FILEGROUP_REF" } - ,{ GSN_DROP_FILEGROUP_CONF, "DROP_FILEGROUP_CONF" } - - ,{ GSN_DROP_FILE_REQ, "DROP_FILE_REQ" } - ,{ GSN_DROP_FILE_REF, "DROP_FILE_REF" } - ,{ GSN_DROP_FILE_CONF, "DROP_FILE_CONF" } - - ,{ GSN_CREATE_OBJ_REQ, "CREATE_OBJ_REQ" } - ,{ GSN_CREATE_OBJ_REF, "CREATE_OBJ_REF" } - ,{ GSN_CREATE_OBJ_CONF, "CREATE_OBJ_CONF" } - - ,{ GSN_DROP_OBJ_REQ, "DROP_OBJ_REQ" } - ,{ GSN_DROP_OBJ_REF, "DROP_OBJ_REF" } - ,{ GSN_DROP_OBJ_CONF, "DROP_OBJ_CONF" } - - ,{ GSN_LCP_PREPARE_REQ, "LCP_PREPARE_REQ" } - ,{ GSN_LCP_PREPARE_REF, "LCP_PREPARE_REF" } - ,{ GSN_LCP_PREPARE_CONF, "LCP_PREPARE_CONF" } - - ,{ GSN_DICT_ABORT_REQ, "DICT_ABORT_REQ" } - ,{ GSN_DICT_ABORT_REF, "DICT_ABORT_REF" } - ,{ GSN_DICT_ABORT_CONF, "DICT_ABORT_CONF" } - - ,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ" } - ,{ GSN_DICT_COMMIT_REF, "DICT_COMMIT_REF" } - ,{ GSN_DICT_COMMIT_CONF, "DICT_COMMIT_CONF" } - - /* DICT LOCK */ - ,{ GSN_DICT_LOCK_REQ, "DICT_LOCK_REQ" } - ,{ GSN_DICT_LOCK_CONF, "DICT_LOCK_CONF" } - ,{ GSN_DICT_LOCK_REF, "DICT_LOCK_REF" } - ,{ GSN_DICT_UNLOCK_ORD, "DICT_UNLOCK_ORD" } - - ,{ GSN_UPDATE_FRAG_DIST_KEY_ORD, "UPDATE_FRAG_DIST_KEY_ORD" } - ,{ GSN_DICT_COMMIT_REQ, "DICT_COMMIT_REQ"} - - ,{ GSN_ROUTE_ORD, "ROUTE_ORD" } - ,{ GSN_NODE_VERSION_REP, "NODE_VERSION_REP" } - - ,{ GSN_PREPARE_COPY_FRAG_REQ, "PREPARE_COPY_FRAG_REQ" } - ,{ GSN_PREPARE_COPY_FRAG_REF, "PREPARE_COPY_FRAG_REF" } - ,{ GSN_PREPARE_COPY_FRAG_CONF, "PREPARE_COPY_FRAG_CONF" } -}; -const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName); diff --git a/storage/ndb/src/common/debugger/signaldata/StartRec.cpp b/storage/ndb/src/common/debugger/signaldata/StartRec.cpp deleted file mode 100644 index 387b4085b12..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/StartRec.cpp +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include - -bool -printSTART_REC_REQ(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - StartRecReq * sig = (StartRecReq *) theData; - - fprintf(output, " receivingNodeId: %d senderRef: (%d, %d)\n", - sig->receivingNodeId, - refToNode(sig->senderRef), - refToBlock(sig->senderRef)); - - fprintf(output, " keepGci: %d lastCompletedGci: %d newestGci: %d\n", - sig->keepGci, - sig->lastCompletedGci, - sig->newestGci); - - return true; -} - -bool -printSTART_REC_CONF(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo){ - StartRecConf * sig = (StartRecConf *) theData; - - fprintf(output, " startingNodeId: %d\n", - sig->startingNodeId); - - return true; -} - -bool -printSTART_FRAG_REQ(FILE * output, - const Uint32 * theData, - Uint32 len, - Uint16 recBlockNo) -{ - StartFragReq* sig = (StartFragReq*)theData; - - fprintf(output, " table: %d frag: %d lcpId: %d lcpNo: %d #nodes: %d \n", - sig->tableId, sig->fragId, sig->lcpId, sig->lcpNo, - sig->noOfLogNodes); - - for(Uint32 i = 0; inoOfLogNodes; i++) - { - fprintf(output, " (node: %d startGci: %d lastGci: %d)", - sig->lqhLogNode[i], - sig->startGci[i], - sig->lastGci[i]); - } - - fprintf(output, "\n"); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp b/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp deleted file mode 100644 index 34bd8aa41e6..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp +++ /dev/null @@ -1,218 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printSUB_CREATE_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubCreateReq * const sig = (SubCreateReq *)theData; - fprintf(output, " senderRef: %x\n", sig->senderRef); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " subscriptionType: %x\n", sig->subscriptionType); - fprintf(output, " tableId: %x\n", sig->tableId); - return false; -} - -bool -printSUB_CREATE_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubCreateConf * const sig = (SubCreateConf *)theData; - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_CREATE_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubCreateRef * const sig = (SubCreateRef *)theData; - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_REMOVE_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const SubRemoveReq * const sig = (SubRemoveReq *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - return false; -} - -bool -printSUB_REMOVE_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const SubRemoveConf * const sig = (SubRemoveConf *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_REMOVE_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const SubRemoveRef * const sig = (SubRemoveRef *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " errorCode: %x\n", sig->errorCode); - return false; -} - -bool -printSUB_START_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubStartReq * const sig = (SubStartReq *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_START_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubStartRef * const sig = (SubStartRef *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " startPart: %x\n", sig->part); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " errorCode: %x\n", sig->errorCode); - return false; -} - -bool -printSUB_START_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubStartConf * const sig = (SubStartConf *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " startPart: %x\n", sig->part); - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_STOP_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubStopReq * const sig = (SubStopReq *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_STOP_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubStopRef * const sig = (SubStopRef *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " errorCode: %x\n", sig->errorCode); - return false; -} - -bool -printSUB_STOP_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubStopConf * const sig = (SubStopConf *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_SYNC_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubSyncReq * const sig = (SubSyncReq *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - fprintf(output, " syncPart: %x\n", sig->part); - return false; -} - -bool -printSUB_SYNC_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubSyncRef * const sig = (SubSyncRef *)theData; - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " errorCode: %x\n", sig->errorCode); - return false; -} - -bool -printSUB_SYNC_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubSyncConf * const sig = (SubSyncConf *)theData; - fprintf(output, " senderData: %x\n", sig->senderData); - return false; -} - -bool -printSUB_TABLE_DATA(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubTableData * const sig = (SubTableData *)theData; - fprintf(output, " senderData: %x\n", sig->senderData); - fprintf(output, " gci: %x\n", sig->gci); - fprintf(output, " tableId: %x\n", sig->tableId); - fprintf(output, " operation: %x\n", - SubTableData::getOperation(sig->requestInfo)); - return false; -} - -bool -printSUB_SYNC_CONTINUE_REQ(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubSyncContinueReq * const sig = (SubSyncContinueReq *)theData; - fprintf(output, " subscriberData: %x\n", sig->subscriberData); - fprintf(output, " noOfRowsSent: %x\n", sig->noOfRowsSent); - return false; -} - -bool -printSUB_SYNC_CONTINUE_REF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubSyncContinueRef * const sig = (SubSyncContinueRef *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - return false; -} - -bool -printSUB_SYNC_CONTINUE_CONF(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubSyncContinueConf * const sig = (SubSyncContinueConf *)theData; - fprintf(output, " subscriptionId: %x\n", sig->subscriptionId); - fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey); - return false; -} - -bool -printSUB_GCP_COMPLETE_REP(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) { - const SubGcpCompleteRep * const sig = (SubGcpCompleteRep *)theData; - fprintf(output, " gci: %x\n", sig->gci); - return false; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/SystemError.cpp b/storage/ndb/src/common/debugger/signaldata/SystemError.cpp deleted file mode 100644 index 175ef091aaf..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/SystemError.cpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include - -bool -printSYSTEM_ERROR(FILE * output, const Uint32 * theData, Uint32 len, - Uint16 receiverBlockNo){ - - const SystemError * const sig = (SystemError *) theData; - - fprintf(output, "errorRef: H\'%.8x\n", - sig->errorRef); - fprintf(output, "errorCode: %d\n", - sig->errorCode); - fprintf(output, "data1: H\'%.8x\n", - sig->data1); - fprintf(output, "data2: H\'%.8x\n", - sig->data2); - - return true; -} - - diff --git a/storage/ndb/src/common/debugger/signaldata/TcIndx.cpp b/storage/ndb/src/common/debugger/signaldata/TcIndx.cpp deleted file mode 100644 index 0927287d95c..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TcIndx.cpp +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - - -bool -printTCINDXCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - if (receiverBlockNo == API_PACKED) { - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - } - else { - const TcIndxConf * const sig = (TcIndxConf *) theData; - - fprintf(output, "Signal data: "); - Uint32 i = 0; - Uint32 confInfo = sig->confInfo; - Uint32 noOfOp = TcIndxConf::getNoOfOperations(confInfo); - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - fprintf(output, "apiConnectPtr: H'%.8x, gci: %u, transId:(H'%.8x, H'%.8x)\n", - sig->apiConnectPtr, sig->gci, sig->transId1, sig->transId2); - - fprintf(output, "noOfOperations: %u, commitFlag: %s, markerFlag: %s\n", - noOfOp, - (TcIndxConf::getCommitFlag(confInfo) == 0)?"false":"true", - (TcIndxConf::getMarkerFlag(confInfo) == 0)?"false":"true"); - fprintf(output, "Operations:\n"); - for(i = 0; i < noOfOp; i++) { - fprintf(output, - "apiOperationPtr: H'%.8x, attrInfoLen: %u\n", - sig->operations[i].apiOperationPtr, - sig->operations[i].attrInfoLen); - } - } - - return true; -} - -bool -printTCINDXREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - -// const TcIndxRef * const sig = (TcIndxRef *) theData; - - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} - diff --git a/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp deleted file mode 100644 index 47695d914cc..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -bool -printTCKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - - if (receiverBlockNo == API_PACKED) { - return false; - Uint32 Theader = * theData++; - Uint32 TpacketLen = (Theader & 0x1F) + 3; - Uint32 TrecBlockNo = Theader >> 16; - - do { - fprintf(output, "Block: %d %d %d\n", TrecBlockNo, len, TpacketLen); - printTCKEYCONF(output, theData, TpacketLen, TrecBlockNo); - assert(len >= (1 + TpacketLen)); - len -= (1 + TpacketLen); - theData += TpacketLen; - } while(len); - return true; - } - else { - const TcKeyConf * const sig = (TcKeyConf *) theData; - - Uint32 i = 0; - Uint32 confInfo = sig->confInfo; - Uint32 noOfOp = TcKeyConf::getNoOfOperations(confInfo); - if (noOfOp > 10) noOfOp = 10; - fprintf(output, " apiConnectPtr: H'%.8x, gci: %u, transId:(H'%.8x, H'%.8x)\n", - sig->apiConnectPtr, sig->gci, sig->transId1, sig->transId2); - - fprintf(output, " noOfOperations: %u, commitFlag: %s, markerFlag: %s\n", - noOfOp, - (TcKeyConf::getCommitFlag(confInfo) == 0)?"false":"true", - (TcKeyConf::getMarkerFlag(confInfo) == 0)?"false":"true"); - fprintf(output, "Operations:\n"); - for(i = 0; i < noOfOp; i++) { - if(sig->operations[i].attrInfoLen > TcKeyConf::DirtyReadBit) - fprintf(output, - " apiOperationPtr: H'%.8x, simplereadnode: %u\n", - sig->operations[i].apiOperationPtr, - sig->operations[i].attrInfoLen & (~TcKeyConf::DirtyReadBit)); - else - fprintf(output, - " apiOperationPtr: H'%.8x, attrInfoLen: %u\n", - sig->operations[i].apiOperationPtr, - sig->operations[i].attrInfoLen); - } - } - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp deleted file mode 100644 index fdfe10e4a30..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TcKeyRef.cpp +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printTCKEYREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp deleted file mode 100644 index 793fa557dd6..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TcKeyReq.cpp +++ /dev/null @@ -1,115 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -bool -printTCKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - - const TcKeyReq * const sig = (TcKeyReq *) theData; - - UintR requestInfo = sig->requestInfo; - - fprintf(output, " apiConnectPtr: H\'%.8x, apiOperationPtr: H\'%.8x\n", - sig->apiConnectPtr, sig->apiOperationPtr); - fprintf(output, " Operation: %s, Flags: ", - sig->getOperationType(requestInfo) == ZREAD ? "Read" : - sig->getOperationType(requestInfo) == ZREAD_EX ? "Read-Ex" : - sig->getOperationType(requestInfo) == ZUPDATE ? "Update" : - sig->getOperationType(requestInfo) == ZINSERT ? "Insert" : - sig->getOperationType(requestInfo) == ZDELETE ? "Delete" : - sig->getOperationType(requestInfo) == ZWRITE ? "Write" : - "Unknown"); - { - if(sig->getDirtyFlag(requestInfo)){ - fprintf(output, "Dirty "); - } - if(sig->getStartFlag(requestInfo)){ - fprintf(output, "Start "); - } - if(sig->getExecuteFlag(requestInfo)){ - fprintf(output, "Execute "); - } - if(sig->getCommitFlag(requestInfo)){ - fprintf(output, "Commit "); - } - if (sig->getExecutingTrigger(requestInfo)) { - fprintf(output, "Trigger "); - } - - if (sig->getNoDiskFlag(requestInfo)) { - fprintf(output, "NoDisk "); - } - - UintR TcommitType = sig->getAbortOption(requestInfo); - if (TcommitType == TcKeyReq::AbortOnError) { - fprintf(output, "AbortOnError "); - } else if (TcommitType == TcKeyReq::IgnoreError) { - fprintf(output, "IgnoreError "); - }//if - - if(sig->getSimpleFlag(requestInfo)){ - fprintf(output, "Simple "); - } - if(sig->getScanIndFlag(requestInfo)){ - fprintf(output, "ScanInd "); - } - if(sig->getInterpretedFlag(requestInfo)){ - fprintf(output, "Interpreted "); - } - if(sig->getDistributionKeyFlag(sig->requestInfo)){ - fprintf(output, " d-key"); - } - fprintf(output, "\n"); - } - - const int keyLen = sig->getKeyLength(requestInfo); - const int attrInThis = sig->getAIInTcKeyReq(requestInfo); - const int attrLen = sig->getAttrinfoLen(sig->attrLen); - const int apiVer = sig->getAPIVersion(sig->attrLen); - fprintf(output, - " keyLen: %d, attrLen: %d, AI in this: %d, tableId: %d, " - "tableSchemaVer: %d, API Ver: %d\n", - keyLen, attrLen, attrInThis, - sig->tableId, sig->tableSchemaVersion, apiVer); - - fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n -- Variable Data --\n", - sig->transId1, sig->transId2); - - if (len >= TcKeyReq::StaticLength) { - Uint32 restLen = (len - TcKeyReq::StaticLength); - const Uint32 * rest = &sig->scanInfo; - while(restLen >= 7){ - fprintf(output, - " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n", - rest[0], rest[1], rest[2], rest[3], - rest[4], rest[5], rest[6]); - restLen -= 7; - rest += 7; - } - if(restLen > 0){ - for(Uint32 i = 0; i - -bool -printTCROLLBACKREP(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp b/storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp deleted file mode 100644 index 51abfb5a2f2..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TrigAttrInfo.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -static -const char * -tatype(Uint32 i){ - switch(i){ - case TrigAttrInfo::PRIMARY_KEY: - return "PK"; - break; - case TrigAttrInfo::BEFORE_VALUES: - return "BEFORE"; - break; - case TrigAttrInfo::AFTER_VALUES: - return "AFTER"; - break; - } - return "UNKNOWN"; -} - -bool -printTRIG_ATTRINFO(FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const TrigAttrInfo * const sig = (TrigAttrInfo *) theData; - - fprintf(output, " TriggerId: %d Type: %s ConnectPtr: %x\n", - sig->getTriggerId(), - tatype(sig->getAttrInfoType()), - sig->getConnectionPtr()); - - Uint32 i = 0; - while (i < len - TrigAttrInfo::StaticLength) - fprintf(output, " H\'%.8x", sig->getData()[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/TupCommit.cpp b/storage/ndb/src/common/debugger/signaldata/TupCommit.cpp deleted file mode 100644 index e6dcfef5c8f..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TupCommit.cpp +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printTUPCOMMITREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/TupKey.cpp b/storage/ndb/src/common/debugger/signaldata/TupKey.cpp deleted file mode 100644 index 9290e942311..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TupKey.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printTUPKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} - -bool -printTUPKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} - -bool -printTUPKEYREF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){ - fprintf(output, "Signal data: "); - Uint32 i = 0; - while (i < len) - fprintf(output, "H\'%.8x ", theData[i++]); - fprintf(output,"\n"); - - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp b/storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp deleted file mode 100644 index 6aa5e2a8d06..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/TuxMaint.cpp +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -bool -printTUX_MAINT_REQ(FILE* output, const Uint32* theData, Uint32 len, Uint16 rbn) -{ - //const bool inOut = rbn & (1 << 15); - const TuxMaintReq* const sig = (const TuxMaintReq*)theData; - fprintf(output, " errorCode=%d\n", sig->errorCode); - fprintf(output, " table: id=%u", sig->tableId); - fprintf(output, " index: id=%u", sig->indexId); - fprintf(output, " fragment: id=%u\n", sig->fragId); - fprintf(output, " tuple: loc=%u.%u version=%u\n", sig->pageId, sig->pageIndex, sig->tupVersion); - const Uint32 opCode = sig->opInfo & 0xFF; - const Uint32 opFlag = sig->opInfo >> 8; - switch (opCode ) { - case TuxMaintReq::OpAdd: - fprintf(output, " opCode=Add opFlag=%u\n", opFlag); - break; - case TuxMaintReq::OpRemove: - fprintf(output, " opCode=Remove opFlag=%u\n", opFlag); - break; - default: - fprintf(output, " opInfo=%x ***invalid***\n", sig->opInfo); - break; - } - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp b/storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp deleted file mode 100644 index 8cdb6fb38b8..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/UtilDelete.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printUTIL_DELETE_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - (void)l; // Don't want compiler warning - (void)b; // Don't want compiler warning - - UtilDeleteReq* sig = (UtilDeleteReq*)data; - fprintf(out, " senderData: %d prepareId: %d totalDataLen: %d\n", - sig->senderData, - sig->prepareId, - sig->totalDataLen); - fprintf(out, - " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n" - " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n" - " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n", - sig->attrData[0], sig->attrData[1], sig->attrData[2], - sig->attrData[3], sig->attrData[4], sig->attrData[5], - sig->attrData[6], sig->attrData[7], sig->attrData[8], - sig->attrData[9], sig->attrData[10], sig->attrData[11], - sig->attrData[12], sig->attrData[13], sig->attrData[14], - sig->attrData[15], sig->attrData[16], sig->attrData[17], - sig->attrData[18], sig->attrData[19], sig->attrData[20], - sig->attrData[21] - ); - - return true; -} - -bool -printUTIL_DELETE_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - (void)l; // Don't want compiler warning - (void)b; // Don't want compiler warning - - UtilDeleteConf* sig = (UtilDeleteConf*)data; - fprintf(out, " senderData: %d\n", sig->senderData); - return true; -} - -bool -printUTIL_DELETE_REF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - (void)l; // Don't want compiler warning - (void)b; // Don't want compiler warning - - UtilDeleteRef* sig = (UtilDeleteRef*)data; - fprintf(out, " senderData: %d\n", sig->senderData); - fprintf(out, " errorCode: %d\n", sig->errorCode); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp b/storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp deleted file mode 100644 index 1e3cf1f255a..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/UtilExecute.cpp +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printUTIL_EXECUTE_REQ(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec) -{ - const UtilExecuteReq* const sig = (UtilExecuteReq*)data; - fprintf(out, " senderRef: H'%.8x, senderData: H'%.8x prepareId: %d " - " releaseFlag: %d\n", - sig->senderRef, - sig->senderData, - sig->getPrepareId(), - sig->getReleaseFlag()); - return true; -} - -bool -printUTIL_EXECUTE_CONF(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec) -{ - UtilExecuteConf* sig = (UtilExecuteConf*)data; - fprintf(out, " senderData: H'%.8x\n", - sig->senderData); - return true; -} - -bool -printUTIL_EXECUTE_REF(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec) -{ - UtilExecuteRef* sig = (UtilExecuteRef*)data; - fprintf(out, " senderData: H'%.8x, ", sig->senderData); - fprintf(out, " errorCode: %s, ", - sig->errorCode == UtilExecuteRef::IllegalKeyNumber ? - "IllegalKeyNumber" : - sig->errorCode == UtilExecuteRef::IllegalAttrNumber ? - "IllegalAttrNumber" : - sig->errorCode == UtilExecuteRef::TCError ? - "TCError" : - sig->errorCode == UtilExecuteRef::AllocationError ? - "AllocationError" : - "Unknown"); - fprintf(out, " TCErrorCode: %d\n", - sig->TCErrorCode); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/UtilLock.cpp b/storage/ndb/src/common/debugger/signaldata/UtilLock.cpp deleted file mode 100644 index a10e490fa53..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/UtilLock.cpp +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printUTIL_LOCK_REQ (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilLockReq *const sig = (UtilLockReq *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " requestInfo: %x\n", sig->requestInfo); - return true; -} - -bool -printUTIL_LOCK_CONF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilLockConf *const sig = (UtilLockConf *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " lockKey: %x\n", sig->lockKey); - return true; -} - -bool -printUTIL_LOCK_REF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilLockRef *const sig = (UtilLockRef *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " errorCode: %x\n", sig->errorCode); - return true; -} - -bool -printUTIL_UNLOCK_REQ (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilUnlockReq *const sig = (UtilUnlockReq *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " lockKey: %x\n", sig->lockKey); - return true; -} - -bool -printUTIL_UNLOCK_CONF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilUnlockConf *const sig = (UtilUnlockConf *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - return true; -} - -bool -printUTIL_UNLOCK_REF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilUnlockRef *const sig = (UtilUnlockRef *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " errorCode: %x\n", sig->errorCode); - return true; -} - -bool -printUTIL_CREATE_LOCK_REQ (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilCreateLockReq *const sig = (UtilCreateLockReq *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " lockType: %x\n", sig->lockType); - return true; -} - -bool -printUTIL_CREATE_LOCK_REF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilCreateLockRef *const sig = (UtilCreateLockRef *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " errorCode: %x\n", sig->errorCode); - return true; -} - -bool -printUTIL_CREATE_LOCK_CONF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilCreateLockConf *const sig = (UtilCreateLockConf *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - return true; -} - -bool -printUTIL_DESTROY_LOCK_REQ (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilDestroyLockReq *const sig = (UtilDestroyLockReq *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " lockKey: %x\n", sig->lockKey); - return true; -} - -bool -printUTIL_DESTROY_LOCK_REF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilDestroyLockRef *const sig = (UtilDestroyLockRef *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - fprintf (output, " errorCode: %x\n", sig->errorCode); - return true; -} - -bool -printUTIL_DESTROY_LOCK_CONF (FILE * output, const Uint32 * theData, - Uint32 len, Uint16 receiverBlockNo) -{ - const UtilDestroyLockConf *const sig = (UtilDestroyLockConf *) theData; - fprintf (output, " senderData: %x\n", sig->senderData); - fprintf (output, " senderRef: %x\n", sig->senderRef); - fprintf (output, " lockId: %x\n", sig->lockId); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp b/storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp deleted file mode 100644 index ee0234561b6..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/UtilPrepare.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -bool -printUTIL_PREPARE_REQ(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec) -{ - UtilPrepareReq* sig = (UtilPrepareReq*)data; - fprintf(out, " senderRef: H'%.8x senderData: H'%.8x\n", - sig->senderRef, - sig->senderData); - - return true; -} - -bool -printUTIL_PREPARE_CONF(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec) -{ - UtilPrepareConf* sig = (UtilPrepareConf*)data; - fprintf(out, " senderData: H'%.8x prepareId: %d\n", - sig->senderData, - sig->prepareId); - return true; -} - -bool -printUTIL_PREPARE_REF(FILE* out, const Uint32 * data, Uint32 len, Uint16 rec) -{ - UtilPrepareRef* sig = (UtilPrepareRef*)data; - fprintf(out, " senderData: H'%.8x, ", sig->senderData); - fprintf(out, " error: %d, ", sig->errorCode); - - fprintf(out, " errorMsg: "); - switch(sig->errorCode) { - case UtilPrepareRef::NO_ERROR: - fprintf(out, "No error"); - break; - case UtilPrepareRef::PREPARE_SEIZE_ERROR: - fprintf(out, "Failed to seize Prepare record"); - break; - case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR: - fprintf(out, "Failed to seize PreparedOperation record"); - break; - case UtilPrepareRef::DICT_TAB_INFO_ERROR: - fprintf(out, "Failed to get table info from DICT"); - break; - } - fprintf(out, "\n"); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp b/storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp deleted file mode 100644 index 2533d1b870e..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/UtilSequence.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -inline -const char * -type2string(UtilSequenceReq::RequestType type){ - switch(type){ - case UtilSequenceReq::NextVal: - return "NextVal"; - case UtilSequenceReq::CurrVal: - return "CurrVal"; - case UtilSequenceReq::Create: - return "Create"; - default: - return "Unknown"; - } -} - -bool -printUTIL_SEQUENCE_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - UtilSequenceReq* sig = (UtilSequenceReq*)data; - fprintf(out, " senderData: %d sequenceId: %d RequestType: %s\n", - sig->senderData, - sig->sequenceId, - type2string((UtilSequenceReq::RequestType)sig->requestType)); - return true; -} - -bool -printUTIL_SEQUENCE_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - UtilSequenceConf* sig = (UtilSequenceConf*)data; - fprintf(out, " senderData: %d sequenceId: %d RequestType: %s\n", - sig->senderData, - sig->sequenceId, - type2string((UtilSequenceReq::RequestType)sig->requestType)); - fprintf(out, " val: [ %d %d ]\n", - sig->sequenceValue[0], - sig->sequenceValue[1]); - return true; -} - -bool -printUTIL_SEQUENCE_REF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ - UtilSequenceRef* sig = (UtilSequenceRef*)data; - fprintf(out, " senderData: %d sequenceId: %d RequestType: %s\n", - sig->senderData, - sig->sequenceId, - type2string((UtilSequenceReq::RequestType)sig->requestType)); - fprintf(out, " errorCode: %d, TCErrorCode: %d\n", - sig->errorCode, sig->TCErrorCode); - return true; -} diff --git a/storage/ndb/src/common/debugger/signaldata/print.awk b/storage/ndb/src/common/debugger/signaldata/print.awk deleted file mode 100644 index ac65348170e..00000000000 --- a/storage/ndb/src/common/debugger/signaldata/print.awk +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -BEGIN { - m_curr=""; - m_count=0; - m_level=0; -} -/^[ ]*class[ ]+.*{/ { - if(m_curr != ""){ - print; - print "ERROR: " m_curr; - exit; - } - m_curr = $2; -} -/{/ { - m_level++; -} -/bool print/{ - m_print=$3; - i=index($3, "("); - if(i > 0){ - m_print=substr($3, 0, i-1); - } -} - -/[ ]+Uint32[ ]+[^)]*;/ { - if(m_level >= 0){ - m=$2; - i=index($2, ";"); - if(i > 0){ - m=substr($2, 0, i-1); - } - m_members[m_count]=m; - m_count++; - } -} -/^[ ]*}[ ]*;/ { - m_level--; - if(m_level == 0){ - if(m_count > 0 && m_print != ""){ - print "bool"; - print m_print "(FILE * output, const Uint32 * theData, "; - print "Uint32 len, Uint16 receiverBlockNo) {"; - print "const " m_curr " * const sig = (" m_curr " *)theData;"; - for(i = 0; i" m_members[i] ");"; - } - print "return true;"; - print "}"; - print ""; - } - m_curr=""; - m_print=""; - m_count=0; - } -} diff --git a/storage/ndb/src/common/logger/ConsoleLogHandler.cpp b/storage/ndb/src/common/logger/ConsoleLogHandler.cpp deleted file mode 100644 index ddfc9a85a5b..00000000000 --- a/storage/ndb/src/common/logger/ConsoleLogHandler.cpp +++ /dev/null @@ -1,68 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "ConsoleLogHandler.hpp" - -#include - -ConsoleLogHandler::ConsoleLogHandler() : LogHandler() -{ -} - -ConsoleLogHandler::~ConsoleLogHandler() -{ - -} - -bool -ConsoleLogHandler::open() -{ - return true; -} - -bool -ConsoleLogHandler::close() -{ - return true; -} - -// -// PROTECTED -// -void -ConsoleLogHandler::writeHeader(const char* pCategory, Logger::LoggerLevel level) -{ - char str[LogHandler::MAX_HEADER_LENGTH]; - ndbout << getDefaultHeader(str, pCategory, level); -} - -void -ConsoleLogHandler::writeMessage(const char* pMsg) -{ - ndbout << pMsg; -} - -void -ConsoleLogHandler::writeFooter() -{ - ndbout << getDefaultFooter() << flush; -} - - -bool -ConsoleLogHandler::setParam(const BaseString ¶m, const BaseString &value) { - return false; -} diff --git a/storage/ndb/src/common/logger/FileLogHandler.cpp b/storage/ndb/src/common/logger/FileLogHandler.cpp deleted file mode 100644 index b2b8b7c7b31..00000000000 --- a/storage/ndb/src/common/logger/FileLogHandler.cpp +++ /dev/null @@ -1,251 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -// -// PUBLIC -// - -FileLogHandler::FileLogHandler() : - LogHandler(), - m_maxNoFiles(MAX_NO_FILES), - m_maxFileSize(MAX_FILE_SIZE), - m_maxLogEntries(MAX_LOG_ENTRIES) - -{ - m_pLogFile = new File_class("logger.log", "a+"); -} - -FileLogHandler::FileLogHandler(const char* aFileName, - int maxNoFiles, - long maxFileSize, - unsigned int maxLogEntries) : - LogHandler(), - m_maxNoFiles(maxNoFiles), - m_maxFileSize(maxFileSize), - m_maxLogEntries(maxLogEntries) -{ - m_pLogFile = new File_class(aFileName, "a+"); -} - -FileLogHandler::~FileLogHandler() -{ - delete m_pLogFile; -} - -bool -FileLogHandler::open() -{ - bool rc = true; - - if (m_pLogFile->open()) - { - if (isTimeForNewFile()) - { - if (!createNewFile()) - { - setErrorCode(errno); - rc = false; - } - } - } - else - { - setErrorCode(errno); - rc = false; - } - - return rc; -} - -bool -FileLogHandler::close() -{ - bool rc = true; - if (!m_pLogFile->close()) - { - setErrorCode(errno); - rc = false; - } - - return rc; -} - -void -FileLogHandler::writeHeader(const char* pCategory, Logger::LoggerLevel level) -{ - char str[LogHandler::MAX_HEADER_LENGTH]; - m_pLogFile->writeChar(getDefaultHeader(str, pCategory, level)); -} - -void -FileLogHandler::writeMessage(const char* pMsg) -{ - m_pLogFile->writeChar(pMsg); -} - -void -FileLogHandler::writeFooter() -{ - static int callCount = 0; - m_pLogFile->writeChar(getDefaultFooter()); - /** - * The reason I also check the number of log entries instead of - * only the log size, is that I do not want to check the file size - * after each log entry which requires system calls and is quite slow. - * TODO: Any better way? - */ - if (callCount % m_maxLogEntries != 0) // Check every m_maxLogEntries - { - if (isTimeForNewFile()) - { - if (!createNewFile()) - { - // Baby one more time... - createNewFile(); - } - } - callCount = 0; - } - callCount++; - - m_pLogFile->flush(); -} - - -// -// PRIVATE -// - -bool -FileLogHandler::isTimeForNewFile() -{ - return (m_pLogFile->size() >= m_maxFileSize); -} - -bool -FileLogHandler::createNewFile() -{ - bool rc = true; - int fileNo = 1; - char newName[PATH_MAX]; - time_t newMtime, preMtime = 0; - - do - { - if (fileNo >= m_maxNoFiles) - { - fileNo = 1; - BaseString::snprintf(newName, sizeof(newName), - "%s.%d", m_pLogFile->getName(), fileNo); - break; - } - BaseString::snprintf(newName, sizeof(newName), - "%s.%d", m_pLogFile->getName(), fileNo++); - newMtime = File_class::mtime(newName); - if (newMtime < preMtime) - { - break; - } - else - { - preMtime = newMtime; - } - } while (File_class::exists(newName)); - - m_pLogFile->close(); - if (!File_class::rename(m_pLogFile->getName(), newName)) - { - setErrorCode(errno); - rc = false; - } - - // Open again - if (!m_pLogFile->open()) - { - setErrorCode(errno); - rc = false; - } - - return rc; -} - -bool -FileLogHandler::setParam(const BaseString ¶m, const BaseString &value){ - if(param == "filename") - return setFilename(value); - if(param == "maxsize") - return setMaxSize(value); - if(param == "maxfiles") - return setMaxFiles(value); - setErrorStr("Invalid parameter"); - return false; -} - -bool -FileLogHandler::setFilename(const BaseString &filename) { - close(); - if(m_pLogFile) - delete m_pLogFile; - m_pLogFile = new File_class(filename.c_str(), "a+"); - return open(); -} - -bool -FileLogHandler::setMaxSize(const BaseString &size) { - char *end; - long val = strtol(size.c_str(), &end, 0); /* XXX */ - if(size.c_str() == end || val < 0) - { - setErrorStr("Invalid file size"); - return false; - } - if(end[0] == 'M') - val *= 1024*1024; - if(end[0] == 'k') - val *= 1024; - - m_maxFileSize = val; - - return true; -} - -bool -FileLogHandler::setMaxFiles(const BaseString &files) { - char *end; - long val = strtol(files.c_str(), &end, 0); - if(files.c_str() == end || val < 1) - { - setErrorStr("Invalid maximum number of files"); - return false; - } - m_maxNoFiles = val; - - return true; -} - -bool -FileLogHandler::checkParams() { - if(m_pLogFile == NULL) - { - setErrorStr("Log file cannot be null."); - return false; - } - return true; -} diff --git a/storage/ndb/src/common/logger/LogHandler.cpp b/storage/ndb/src/common/logger/LogHandler.cpp deleted file mode 100644 index 4e8ad49d56d..00000000000 --- a/storage/ndb/src/common/logger/LogHandler.cpp +++ /dev/null @@ -1,208 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "LogHandler.hpp" - -#include - -// -// PUBLIC -// -LogHandler::LogHandler() : - m_pDateTimeFormat("%d-%.2d-%.2d %.2d:%.2d:%.2d"), - m_errorCode(0), - m_errorStr(NULL) -{ - m_max_repeat_frequency= 3; // repeat messages maximum every 3 seconds - m_count_repeated_messages= 0; - m_last_category[0]= 0; - m_last_message[0]= 0; - m_last_log_time= 0; - m_now= 0; - m_last_level= (Logger::LoggerLevel)-1; -} - -LogHandler::~LogHandler() -{ -} - -void -LogHandler::append(const char* pCategory, Logger::LoggerLevel level, - const char* pMsg) -{ - time_t now; - now= ::time((time_t*)NULL); - - if (level != m_last_level || - strcmp(pCategory, m_last_category) || - strcmp(pMsg, m_last_message)) - { - if (m_count_repeated_messages > 0) // print that message - append_impl(m_last_category, m_last_level, m_last_message); - - m_last_level= level; - strncpy(m_last_category, pCategory, sizeof(m_last_category)); - strncpy(m_last_message, pMsg, sizeof(m_last_message)); - } - else // repeated message - { - if (now < (time_t) (m_last_log_time+m_max_repeat_frequency)) - { - m_count_repeated_messages++; - m_now= now; - return; - } - } - - m_now= now; - - append_impl(pCategory, level, pMsg); - m_last_log_time= now; -} - -void -LogHandler::append_impl(const char* pCategory, Logger::LoggerLevel level, - const char* pMsg) -{ - writeHeader(pCategory, level); - if (m_count_repeated_messages <= 1) - writeMessage(pMsg); - else - { - BaseString str(pMsg); - str.appfmt(" - Repeated %d times", m_count_repeated_messages); - writeMessage(str.c_str()); - } - m_count_repeated_messages= 0; - writeFooter(); -} - -const char* -LogHandler::getDefaultHeader(char* pStr, const char* pCategory, - Logger::LoggerLevel level) const -{ - char time[MAX_DATE_TIME_HEADER_LENGTH]; - BaseString::snprintf(pStr, MAX_HEADER_LENGTH, "%s [%s] %s -- ", - getTimeAsString((char*)time), - pCategory, - Logger::LoggerLevelNames[level]); - - return pStr; -} - - -const char* -LogHandler::getDefaultFooter() const -{ - return "\n"; -} - -const char* -LogHandler::getDateTimeFormat() const -{ - return m_pDateTimeFormat; -} - -void -LogHandler::setDateTimeFormat(const char* pFormat) -{ - m_pDateTimeFormat = (char*)pFormat; -} - -char* -LogHandler::getTimeAsString(char* pStr) const -{ - struct tm* tm_now; -#ifdef NDB_WIN32 - tm_now = localtime(&m_now); -#else - tm_now = ::localtime(&m_now); //uses the "current" timezone -#endif - - BaseString::snprintf(pStr, MAX_DATE_TIME_HEADER_LENGTH, - m_pDateTimeFormat, - tm_now->tm_year + 1900, - tm_now->tm_mon + 1, //month is [0,11]. +1 -> [1,12] - tm_now->tm_mday, - tm_now->tm_hour, - tm_now->tm_min, - tm_now->tm_sec); - - return pStr; -} - -int -LogHandler::getErrorCode() const -{ - return m_errorCode; -} - -void -LogHandler::setErrorCode(int code) -{ - m_errorCode = code; -} - - -char* -LogHandler::getErrorStr() -{ - return m_errorStr; -} - -void -LogHandler::setErrorStr(const char* str) -{ - m_errorStr= (char*) str; -} - -bool -LogHandler::parseParams(const BaseString &_params) { - Vector v_args; - - bool ret = true; - - _params.split(v_args, ","); - for(size_t i=0; i < v_args.size(); i++) { - Vector v_param_value; - if(v_args[i].split(v_param_value, "=", 2) != 2) - { - ret = false; - setErrorStr("Can't find key=value pair."); - } - else - { - v_param_value[0].trim(" \t"); - if (!setParam(v_param_value[0], v_param_value[1])) - { - ret = false; - } - } - } - - if(!checkParams()) - ret = false; - return ret; -} - -bool -LogHandler::checkParams() { - return true; -} - -// -// PRIVATE -// diff --git a/storage/ndb/src/common/logger/LogHandlerList.cpp b/storage/ndb/src/common/logger/LogHandlerList.cpp deleted file mode 100644 index c954d20cb96..00000000000 --- a/storage/ndb/src/common/logger/LogHandlerList.cpp +++ /dev/null @@ -1,181 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "LogHandlerList.hpp" - -#include - -// -// PUBLIC -// - -LogHandlerList::LogHandlerList() : - m_size(0), - m_pHeadNode(NULL), - m_pTailNode(NULL), - m_pCurrNode(NULL) -{ -} - -LogHandlerList::~LogHandlerList() -{ - removeAll(); -} - -void -LogHandlerList::add(LogHandler* pNewHandler) -{ - LogHandlerNode* pNode = new LogHandlerNode(); - - if (m_pHeadNode == NULL) - { - m_pHeadNode = pNode; - pNode->pPrev = NULL; - } - else - { - m_pTailNode->pNext = pNode; - pNode->pPrev = m_pTailNode; - } - m_pTailNode = pNode; - pNode->pNext = NULL; - pNode->pHandler = pNewHandler; - - m_size++; -} - -bool -LogHandlerList::remove(LogHandler* pRemoveHandler) -{ - LogHandlerNode* pNode = m_pHeadNode; - bool removed = false; - do - { - if (pNode->pHandler == pRemoveHandler) - { - removeNode(pNode); - removed = true; - break; - } - } while ( (pNode = next(pNode)) != NULL); - - return removed; -} - -void -LogHandlerList::removeAll() -{ - while (m_pHeadNode != NULL) - { - removeNode(m_pHeadNode); - } -} - -LogHandler* -LogHandlerList::next() -{ - LogHandler* pHandler = NULL; - if (m_pCurrNode == NULL) - { - m_pCurrNode = m_pHeadNode; - if (m_pCurrNode != NULL) - { - pHandler = m_pCurrNode->pHandler; - } - } - else - { - m_pCurrNode = next(m_pCurrNode); // Next node - if (m_pCurrNode != NULL) - { - pHandler = m_pCurrNode->pHandler; - } - } - - return pHandler; -} - -int -LogHandlerList::size() const -{ - return m_size; -} - -// -// PRIVATE -// - -LogHandlerList::LogHandlerNode* -LogHandlerList::next(LogHandlerNode* pNode) -{ - LogHandlerNode* pCurr = pNode; - if (pNode->pNext != NULL) - { - pCurr = pNode->pNext; - } - else - { - // Tail - pCurr = NULL; - } - return pCurr; -} - -LogHandlerList::LogHandlerNode* -LogHandlerList::prev(LogHandlerNode* pNode) -{ - LogHandlerNode* pCurr = pNode; - if (pNode->pPrev != NULL) // head - { - pCurr = pNode->pPrev; - } - else - { - // Head - pCurr = NULL; - } - - return pCurr; -} - -void -LogHandlerList::removeNode(LogHandlerNode* pNode) -{ - if (pNode->pPrev == NULL) // If head - { - m_pHeadNode = pNode->pNext; - } - else - { - pNode->pPrev->pNext = pNode->pNext; - } - - if (pNode->pNext == NULL) // if tail - { - m_pTailNode = pNode->pPrev; - } - else - { - pNode->pNext->pPrev = pNode->pPrev; - } - - pNode->pNext = NULL; - pNode->pPrev = NULL; - delete pNode->pHandler; // Delete log handler - delete pNode; - - m_size--; -} diff --git a/storage/ndb/src/common/logger/LogHandlerList.hpp b/storage/ndb/src/common/logger/LogHandlerList.hpp deleted file mode 100644 index 0552521d108..00000000000 --- a/storage/ndb/src/common/logger/LogHandlerList.hpp +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LOGHANDLERLIST_H -#define LOGHANDLERLIST_H - -class LogHandler; -#include - -/** - * Provides a simple linked list of log handlers. - * - * @see LogHandler - * @version #@ $Id: LogHandlerList.hpp,v 1.2 2002/03/14 13:07:21 eyualex Exp $ - */ -class LogHandlerList -{ -public: - /** - * Default Constructor. - */ - LogHandlerList(); - - /** - * Destructor. - */ - ~LogHandlerList(); - - /** - * Adds a new log handler. - * - * @param pNewHandler log handler. - */ - void add(LogHandler* pNewHandler); - - /** - * Removes a log handler from the list and call its destructor. - * - * @param pRemoveHandler the handler to remove - */ - bool remove(LogHandler* pRemoveHandler); - - /** - * Removes all log handlers. - */ - void removeAll(); - - /** - * Returns the next log handler in the list. - * returns a log handler or NULL. - */ - LogHandler* next(); - - /** - * Returns the size of the list. - */ - int size() const; -private: - /** List node */ - struct LogHandlerNode - { - LogHandlerNode* pPrev; - LogHandlerNode* pNext; - LogHandler* pHandler; - }; - - LogHandlerNode* next(LogHandlerNode* pNode); - LogHandlerNode* prev(LogHandlerNode* pNode); - - void removeNode(LogHandlerNode* pNode); - - int m_size; - - LogHandlerNode* m_pHeadNode; - LogHandlerNode* m_pTailNode; - LogHandlerNode* m_pCurrNode; -}; - -#endif - - diff --git a/storage/ndb/src/common/logger/Logger.cpp b/storage/ndb/src/common/logger/Logger.cpp deleted file mode 100644 index 0c8b2aefc7b..00000000000 --- a/storage/ndb/src/common/logger/Logger.cpp +++ /dev/null @@ -1,398 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "Logger.hpp" - -#include -#include -#include -#include "LogHandlerList.hpp" - -#if !defined NDB_WIN32 -#include -#endif - -// -// PUBLIC -// -const char* Logger::LoggerLevelNames[] = { "ON ", - "DEBUG ", - "INFO ", - "WARNING ", - "ERROR ", - "CRITICAL", - "ALERT ", - "ALL " - }; -Logger::Logger() : - m_pCategory("Logger"), - m_pConsoleHandler(NULL), - m_pFileHandler(NULL), - m_pSyslogHandler(NULL) -{ - m_pHandlerList = new LogHandlerList(); - m_mutex= NdbMutex_Create(); - m_handler_mutex= NdbMutex_Create(); - disable(LL_ALL); - enable(LL_ON); - enable(LL_INFO); -} - -Logger::~Logger() -{ - removeAllHandlers(); - delete m_pHandlerList; - NdbMutex_Destroy(m_handler_mutex); - NdbMutex_Destroy(m_mutex); -} - -void -Logger::setCategory(const char* pCategory) -{ - Guard g(m_mutex); - m_pCategory = pCategory; -} - -bool -Logger::createConsoleHandler() -{ - Guard g(m_handler_mutex); - bool rc = true; - - if (m_pConsoleHandler == NULL) - { - m_pConsoleHandler = new ConsoleLogHandler(); - if (!addHandler(m_pConsoleHandler)) // TODO: check error code - { - rc = false; - delete m_pConsoleHandler; - m_pConsoleHandler = NULL; - } - } - - return rc; -} - -void -Logger::removeConsoleHandler() -{ - Guard g(m_handler_mutex); - if (removeHandler(m_pConsoleHandler)) - { - m_pConsoleHandler = NULL; - } -} - -bool -Logger::createFileHandler() -{ - Guard g(m_handler_mutex); - bool rc = true; - if (m_pFileHandler == NULL) - { - m_pFileHandler = new FileLogHandler(); - if (!addHandler(m_pFileHandler)) // TODO: check error code - { - rc = false; - delete m_pFileHandler; - m_pFileHandler = NULL; - } - } - - return rc; -} - -void -Logger::removeFileHandler() -{ - Guard g(m_handler_mutex); - if (removeHandler(m_pFileHandler)) - { - m_pFileHandler = NULL; - } -} - -bool -Logger::createSyslogHandler() -{ - Guard g(m_handler_mutex); - bool rc = true; - if (m_pSyslogHandler == NULL) - { -#if defined NDB_WIN32 - m_pSyslogHandler = new ConsoleLogHandler(); -#else - m_pSyslogHandler = new SysLogHandler(); -#endif - if (!addHandler(m_pSyslogHandler)) // TODO: check error code - { - rc = false; - delete m_pSyslogHandler; - m_pSyslogHandler = NULL; - } - } - - return rc; -} - -void -Logger::removeSyslogHandler() -{ - Guard g(m_handler_mutex); - if (removeHandler(m_pSyslogHandler)) - { - m_pSyslogHandler = NULL; - } -} - -bool -Logger::addHandler(LogHandler* pHandler) -{ - Guard g(m_mutex); - assert(pHandler != NULL); - - bool rc = pHandler->open(); - if (rc) - { - m_pHandlerList->add(pHandler); - } - else - { - delete pHandler; - } - - return rc; -} - -bool -Logger::addHandler(const BaseString &logstring, int *err, int len, char* errStr) { - size_t i; - Vector logdest; - Vectorloghandlers; - DBUG_ENTER("Logger::addHandler"); - - logstring.split(logdest, ";"); - - for(i = 0; i < logdest.size(); i++) { - DBUG_PRINT("info",("adding: %s",logdest[i].c_str())); - - Vector v_type_args; - logdest[i].split(v_type_args, ":", 2); - - BaseString type(v_type_args[0]); - BaseString params; - if(v_type_args.size() >= 2) - params = v_type_args[1]; - - LogHandler *handler = NULL; - -#ifndef NDB_WIN32 - if(type == "SYSLOG") - { - handler = new SysLogHandler(); - } else -#endif - if(type == "FILE") - handler = new FileLogHandler(); - else if(type == "CONSOLE") - handler = new ConsoleLogHandler(); - - if(handler == NULL) - { - snprintf(errStr,len,"Could not create log destination: %s", - logdest[i].c_str()); - DBUG_RETURN(false); - } - if(!handler->parseParams(params)) - { - *err= handler->getErrorCode(); - if(handler->getErrorStr()) - strncpy(errStr, handler->getErrorStr(), len); - DBUG_RETURN(false); - } - loghandlers.push_back(handler); - } - - for(i = 0; i < loghandlers.size(); i++) - addHandler(loghandlers[i]); - - DBUG_RETURN(true); /* @todo handle errors */ -} - -bool -Logger::removeHandler(LogHandler* pHandler) -{ - Guard g(m_mutex); - int rc = false; - if (pHandler != NULL) - { - rc = m_pHandlerList->remove(pHandler); - } - - return rc; -} - -void -Logger::removeAllHandlers() -{ - Guard g(m_mutex); - m_pHandlerList->removeAll(); -} - -bool -Logger::isEnable(LoggerLevel logLevel) const -{ - Guard g(m_mutex); - if (logLevel == LL_ALL) - { - for (unsigned i = 1; i < MAX_LOG_LEVELS; i++) - if (!m_logLevels[i]) - return false; - return true; - } - return m_logLevels[logLevel]; -} - -void -Logger::enable(LoggerLevel logLevel) -{ - Guard g(m_mutex); - if (logLevel == LL_ALL) - { - for (unsigned i = 0; i < MAX_LOG_LEVELS; i++) - { - m_logLevels[i] = true; - } - } - else - { - m_logLevels[logLevel] = true; - } -} - -void -Logger::enable(LoggerLevel fromLogLevel, LoggerLevel toLogLevel) -{ - Guard g(m_mutex); - if (fromLogLevel > toLogLevel) - { - LoggerLevel tmp = toLogLevel; - toLogLevel = fromLogLevel; - fromLogLevel = tmp; - } - - for (int i = fromLogLevel; i <= toLogLevel; i++) - { - m_logLevels[i] = true; - } -} - -void -Logger::disable(LoggerLevel logLevel) -{ - Guard g(m_mutex); - if (logLevel == LL_ALL) - { - for (unsigned i = 0; i < MAX_LOG_LEVELS; i++) - { - m_logLevels[i] = false; - } - } - else - { - m_logLevels[logLevel] = false; - } -} - -void -Logger::alert(const char* pMsg, ...) const -{ - va_list ap; - va_start(ap, pMsg); - log(LL_ALERT, pMsg, ap); - va_end(ap); -} - -void -Logger::critical(const char* pMsg, ...) const -{ - va_list ap; - va_start(ap, pMsg); - log(LL_CRITICAL, pMsg, ap); - va_end(ap); -} -void -Logger::error(const char* pMsg, ...) const -{ - va_list ap; - va_start(ap, pMsg); - log(LL_ERROR, pMsg, ap); - va_end(ap); -} -void -Logger::warning(const char* pMsg, ...) const -{ - va_list ap; - va_start(ap, pMsg); - log(LL_WARNING, pMsg, ap); - va_end(ap); -} - -void -Logger::info(const char* pMsg, ...) const -{ - va_list ap; - va_start(ap, pMsg); - log(LL_INFO, pMsg, ap); - va_end(ap); -} - -void -Logger::debug(const char* pMsg, ...) const -{ - va_list ap; - va_start(ap, pMsg); - log(LL_DEBUG, pMsg, ap); - va_end(ap); -} - -// -// PROTECTED -// - -void -Logger::log(LoggerLevel logLevel, const char* pMsg, va_list ap) const -{ - Guard g(m_mutex); - if (m_logLevels[LL_ON] && m_logLevels[logLevel]) - { - char buf[MAX_LOG_MESSAGE_SIZE]; - BaseString::vsnprintf(buf, sizeof(buf), pMsg, ap); - LogHandler* pHandler = NULL; - while ( (pHandler = m_pHandlerList->next()) != NULL) - { - pHandler->append(m_pCategory, logLevel, buf); - } - } -} - -// -// PRIVATE -// - -template class Vector; diff --git a/storage/ndb/src/common/logger/Makefile.am b/storage/ndb/src/common/logger/Makefile.am deleted file mode 100644 index 5dd1e14b649..00000000000 --- a/storage/ndb/src/common/logger/Makefile.am +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -noinst_LTLIBRARIES = liblogger.la - -SOURCE_WIN = Logger.cpp LogHandlerList.cpp LogHandler.cpp \ - ConsoleLogHandler.cpp FileLogHandler.cpp -liblogger_la_SOURCES = $(SOURCE_WIN) SysLogHandler.cpp - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am - -windoze-dsp: liblogger.dsp - -liblogger.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(SOURCE_WIN) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/logger/SysLogHandler.cpp b/storage/ndb/src/common/logger/SysLogHandler.cpp deleted file mode 100644 index e4a0de92de5..00000000000 --- a/storage/ndb/src/common/logger/SysLogHandler.cpp +++ /dev/null @@ -1,159 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "SysLogHandler.hpp" - -#include - -// -// PUBLIC -// - -SysLogHandler::SysLogHandler() : - m_severity(LOG_INFO), - m_pIdentity("NDB"), - m_facility(LOG_USER) -{ -} - -SysLogHandler::SysLogHandler(const char* pIdentity, int facility) : - m_severity(LOG_INFO), - m_pIdentity(pIdentity), - m_facility(facility) -{ - -} - -SysLogHandler::~SysLogHandler() -{ -} - -bool -SysLogHandler::open() -{ - ::setlogmask(LOG_UPTO(LOG_DEBUG)); // Log from EMERGENCY down to DEBUG - ::openlog(m_pIdentity, LOG_PID|LOG_CONS|LOG_ODELAY, m_facility); // PID, CONSOLE delay openlog - - return true; -} - -bool -SysLogHandler::close() -{ - ::closelog(); - - return true; -} - -void -SysLogHandler::writeHeader(const char* pCategory, Logger::LoggerLevel level) -{ - // Save category to be used by writeMessage... - m_pCategory = pCategory; - // Map LogLevel to syslog severity - switch (level) - { - case Logger::LL_ALERT: - m_severity = LOG_ALERT; - break; - case Logger::LL_CRITICAL: - m_severity = LOG_CRIT; - break; - case Logger::LL_ERROR: - m_severity = LOG_ERR; - break; - case Logger::LL_WARNING: - m_severity = LOG_WARNING; - break; - case Logger::LL_INFO: - m_severity = LOG_INFO; - break; - case Logger::LL_DEBUG: - m_severity = LOG_DEBUG; - break; - default: - m_severity = LOG_INFO; - break; - } - -} - -void -SysLogHandler::writeMessage(const char* pMsg) -{ - ::syslog(m_facility | m_severity, "[%s] %s", m_pCategory, pMsg); -} - -void -SysLogHandler::writeFooter() -{ - // Need to close it everytime? Do we run out of file descriptors? - //::closelog(); -} - -bool -SysLogHandler::setParam(const BaseString ¶m, const BaseString &value) { - if(param == "facility") { - return setFacility(value); - } - return false; -} - -static const struct syslog_facility { - const char *name; - int value; -} facilitynames[] = { - { "auth", LOG_AUTH }, -#ifdef LOG_AUTHPRIV - { "authpriv", LOG_AUTHPRIV }, -#endif - { "cron", LOG_CRON }, - { "daemon", LOG_DAEMON }, -#ifdef LOG_FTP - { "ftp", LOG_FTP }, -#endif - { "kern", LOG_KERN }, - { "lpr", LOG_LPR }, - { "mail", LOG_MAIL }, - { "news", LOG_NEWS }, - { "syslog", LOG_SYSLOG }, - { "user", LOG_USER }, - { "uucp", LOG_UUCP }, - { "local0", LOG_LOCAL0 }, - { "local1", LOG_LOCAL1 }, - { "local2", LOG_LOCAL2 }, - { "local3", LOG_LOCAL3 }, - { "local4", LOG_LOCAL4 }, - { "local5", LOG_LOCAL5 }, - { "local6", LOG_LOCAL6 }, - { "local7", LOG_LOCAL7 }, - { NULL, -1 } -}; - -bool -SysLogHandler::setFacility(const BaseString &facility) { - const struct syslog_facility *c; - for(c = facilitynames; c->name != NULL; c++) { - if(facility == c->name) { - m_facility = c->value; - close(); - open(); - return true; - } - } - setErrorStr("Invalid syslog facility name"); - return false; -} diff --git a/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp b/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp deleted file mode 100644 index 725eca59869..00000000000 --- a/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp +++ /dev/null @@ -1,164 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "LogHandlerListUnitTest.hpp" - -#include -#include -#include - -#include - -typedef bool (*TESTFUNC)(const char*); -typedef struct -{ - const char* name; - TESTFUNC test; -}Tests; - -static Tests testCases[] = { {"Add", &LogHandlerListUnitTest::testAdd}, - {"Remove", &LogHandlerListUnitTest::testRemove}, - {"Traverse Next", &LogHandlerListUnitTest::testTraverseNext} - }; - - -int testFailed = 0; - -int main(int argc, char* argv[]) -{ - char str[256]; - int testCount = (sizeof(testCases) / sizeof(Tests)); - ndbout << "Starting " << testCount << " tests..." << endl; - for (int i = 0; i < testCount; i++) - { - ndbout << "-- " << " Test " << i + 1 - << " [" << testCases[i].name << "] --" << endl; - BaseString::snprintf(str, 256, "%s %s %s %d", "Logging ", - testCases[i].name, " message ", i); - if (testCases[i].test(str)) - { - ndbout << "-- Passed --" << endl; - } - else - { - ndbout << "-- Failed -- " << endl; - } - - } - ndbout << endl << "-- " << testCount - testFailed << " passed, " - << testFailed << " failed --" << endl; - - return 0; -} - -bool -LogHandlerListUnitTest::testAdd(const char* msg) -{ - bool rc = true; - LogHandlerList list; - int size = 10; - for (int i = 0; i < size; i++) - { - list.add(new ConsoleLogHandler()); - } - if (list.size() != size) - { - rc = false; - } - ndbout << "List size: " << list.size() << endl; - - - return rc; -} -bool -LogHandlerListUnitTest::testRemove(const char* msg) -{ - bool rc = true; - - LogHandlerList list; - int size = 10; - LogHandler* pHandlers[10]; - for (int i = 0; i < size; i++) - { - pHandlers[i] = new ConsoleLogHandler(); - list.add(pHandlers[i]); - } - - // Remove - - for (int i = 0; i < size; i++) - { - if (!list.remove(pHandlers[i])) - { - ndbout << "Could not remove handler!" << endl; - } - else - { - ndbout << "List size: " << list.size() << endl; - } - } - - return rc; - -} -bool -LogHandlerListUnitTest::testTraverseNext(const char* msg) -{ - bool rc = true; - LogHandlerList list; - int size = 10; - LogHandler* pHandlers[10]; - - for (int i = 0; i < size; i++) - { - char* str = new char[3]; - pHandlers[i] = new ConsoleLogHandler(); - BaseString::snprintf(str, 3, "%d", i); - pHandlers[i]->setDateTimeFormat(str); - list.add(pHandlers[i]); - } - - ndbout << "List size: " << list.size() << endl; - - LogHandler* pHandler = NULL; - int i = 0; - while ((pHandler = list.next()) != NULL) - { - ndbout << "Handler[" << i++ << "]:dateformat = " - << pHandler->getDateTimeFormat() << endl; - } - - list.removeAll(); - - return rc; - -} - -void -LogHandlerListUnitTest::error(const char* msg) -{ - testFailed++; - ndbout << "Test failed: " << msg << endl; -} - -LogHandlerListUnitTest::LogHandlerListUnitTest() -{ -} -LogHandlerListUnitTest::~LogHandlerListUnitTest() -{ -} diff --git a/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp b/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp deleted file mode 100644 index d2d040c25da..00000000000 --- a/storage/ndb/src/common/logger/listtest/LogHandlerListUnitTest.hpp +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LOGHANDLERLISTUNITTEST_H -#define LOGHANDLERLISTUNITTEST_H - -#include "LogHandlerList.hpp" - -/** - * Unit test of LogHandlerList. - * - * @version #@ $Id: LogHandlerListUnitTest.hpp,v 1.1 2002/03/13 17:59:15 eyualex Exp $ - */ -class LogHandlerListUnitTest -{ -public: - - static bool testAdd(const char* msg); - static bool testRemove(const char* msg); - static bool testTraverseNext(const char* msg); - - void error(const char* msg); - - LogHandlerListUnitTest(); - ~LogHandlerListUnitTest(); -}; -#endif diff --git a/storage/ndb/src/common/logger/listtest/Makefile b/storage/ndb/src/common/logger/listtest/Makefile deleted file mode 100644 index 4688a5e5a2f..00000000000 --- a/storage/ndb/src/common/logger/listtest/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -include .defs.mk - -TYPE := - -BIN_TARGET := listtest -BIN_TARGET_ARCHIVES := portlib logger general - -SOURCES := LogHandlerListUnitTest.cpp - -CCFLAGS_LOC += -I../ -I$(NDB_TOP)/include/logger -I$(NDB_TOP)/include/portlib - -include $(NDB_TOP)/Epilogue.mk - - diff --git a/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp deleted file mode 100644 index 60ad595c50f..00000000000 --- a/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp +++ /dev/null @@ -1,189 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "LoggerUnitTest.hpp" - -#include -#include -#include - -#include - -#include -#include - -typedef bool (*TESTFUNC)(const char*); -typedef struct -{ - const char* name; - TESTFUNC test; -}Tests; - -static Tests testCases[] = { {"Alert", &LoggerUnitTest::testAlert}, - {"Critical", &LoggerUnitTest::testCritical}, - {"Error", &LoggerUnitTest::testError}, - {"Warning", &LoggerUnitTest::testWarning}, - {"Info", &LoggerUnitTest::testInfo}, - {"Debug", &LoggerUnitTest::testDebug}, - {"Info to Critical", &LoggerUnitTest::testInfoCritical}, - {"All", &LoggerUnitTest::testAll}, - {"Off", &LoggerUnitTest::testOff} - }; - -static Logger logger; -int testFailed = 0; - -NDB_COMMAND(loggertest, "loggertest", "loggertest -console | -file", - "loggertest", 16384) -{ - if (argc < 2) - { - ndbout << "Usage: loggertest -console | -file | -syslog" << endl; - return 0; - } - - if (strcmp(argv[1], "-console") == 0) - { - logger.createConsoleHandler(); - } - else if (strcmp(argv[1], "-file") == 0) - { - logger.createFileHandler(); - //logger.addHandler(new FileLogHandler(argv[2])); - } - else if (strcmp(argv[1], "-syslog") == 0) - { - logger.createSyslogHandler(); - } - - logger.disable(Logger::LL_ALL); - - char str[256]; - int testCount = (sizeof(testCases) / sizeof(Tests)); - ndbout << "Starting " << testCount << " tests..." << endl; - for (int i = 0; i < testCount; i++) - { - ndbout << "-- " << " Test " << i + 1 - << " [" << testCases[i].name << "] --" << endl; - BaseString::snprintf(str, 256, "%s %s %s %d", "Logging ", - testCases[i].name, " message ", i); - if (testCases[i].test(str)) - { - ndbout << "-- Passed --" << endl; - } - else - { - ndbout << "-- Failed -- " << endl; - } - - } - ndbout << endl << "-- " << testCount - testFailed << " passed, " - << testFailed << " failed --" << endl; - - logger.removeAllHandlers(); - - return 0; -} - -bool -LoggerUnitTest::logTo(Logger::LoggerLevel from, Logger::LoggerLevel to, const char* msg) -{ - logger.enable(from, to); - return logTo(from, msg); -} - -bool -LoggerUnitTest::logTo(Logger::LoggerLevel level, const char* msg) -{ - logger.enable(level); - logger.alert(msg); - logger.critical(msg); - logger.error(msg); - logger.warning(msg); - logger.info(msg); - logger.debug(msg); - logger.disable(level); - return true; -} - -bool -LoggerUnitTest::testAll(const char* msg) -{ - return logTo(Logger::LL_ALL, msg); -} - -bool -LoggerUnitTest::testOff(const char* msg) -{ - return logTo(Logger::LL_OFF, msg); - -} - -bool -LoggerUnitTest::testAlert(const char* msg) -{ - return logTo(Logger::LL_ALERT, msg); -} - -bool -LoggerUnitTest::testCritical(const char* msg) -{ - return logTo(Logger::LL_CRITICAL, msg); -} - -bool -LoggerUnitTest::testError(const char* msg) -{ - return logTo(Logger::LL_ERROR, msg); -} - -bool -LoggerUnitTest::testWarning(const char* msg) -{ - return logTo(Logger::LL_WARNING, msg); -} - -bool -LoggerUnitTest::testInfo(const char* msg) -{ - return logTo(Logger::LL_INFO, msg); -} - -bool -LoggerUnitTest::testDebug(const char* msg) -{ - return logTo(Logger::LL_DEBUG, msg); -} - -bool -LoggerUnitTest::testInfoCritical(const char* msg) -{ - return logTo(Logger::LL_CRITICAL, Logger::LL_INFO, msg); -} - -void -LoggerUnitTest::error(const char* msg) -{ - testFailed++; - ndbout << "Test failed: " << msg << endl; -} - -LoggerUnitTest::LoggerUnitTest() -{ -} -LoggerUnitTest::~LoggerUnitTest() -{ -} diff --git a/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp b/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp deleted file mode 100644 index 0faab97d2c1..00000000000 --- a/storage/ndb/src/common/logger/loggertest/LoggerUnitTest.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LOGGERUNITTEST_H -#define LOGGERUNITTEST_H - -#include "Logger.hpp" - -/** - * Unit test of Logger. - * - * @version #@ $Id: LoggerUnitTest.hpp,v 1.1 2002/03/13 17:55:31 eyualex Exp $ - */ -class LoggerUnitTest -{ -public: - - static bool testAll(const char* msg); - static bool testOff(const char* msg); - static bool testAlert(const char* msg); - static bool testCritical(const char* msg); - static bool testError(const char* msg); - static bool testWarning(const char* msg); - static bool testInfo(const char* msg); - static bool testDebug(const char* msg); - static bool testInfoCritical(const char* msg); - - static bool logTo(Logger::LoggerLevel level, const char* msg); - static bool logTo(Logger::LoggerLevel from, Logger::LoggerLevel to, const char* msg); - - void error(const char* msg); - - LoggerUnitTest(); - ~LoggerUnitTest(); -}; -#endif diff --git a/storage/ndb/src/common/logger/loggertest/Makefile b/storage/ndb/src/common/logger/loggertest/Makefile deleted file mode 100644 index 0aef0ca2bce..00000000000 --- a/storage/ndb/src/common/logger/loggertest/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -include .defs.mk - -TYPE := - -BIN_TARGET := loggertest -BIN_TARGET_ARCHIVES := logger portlib general - -SOURCES := LoggerUnitTest.cpp - -CCFLAGS_LOC += -I$(NDB_TOP)/include/logger \ - -I$(NDB_TOP)/include/util \ - -I$(NDB_TOP)/include/portlib - -include $(NDB_TOP)/Epilogue.mk - - diff --git a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp deleted file mode 100644 index b4409c4ff8e..00000000000 --- a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ /dev/null @@ -1,391 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -#include -#include - -#include -#include - -#include -#include -#include "MgmtErrorReporter.hpp" - -#include -#include - -#include -#include - -#include - -#include -#include -#include -#include -#include - -//**************************************************************************** -//**************************************************************************** - -ConfigRetriever::ConfigRetriever(const char * _connect_string, - Uint32 version, Uint32 node_type, - const char * _bindaddress, - int timeout_ms) -{ - DBUG_ENTER("ConfigRetriever::ConfigRetriever"); - - m_version = version; - m_node_type = node_type; - _ownNodeId= 0; - m_end_session= true; - - m_handle= ndb_mgm_create_handle(); - - if (m_handle == 0) { - setError(CR_ERROR, "Unable to allocate mgm handle"); - DBUG_VOID_RETURN; - } - - ndb_mgm_set_timeout(m_handle, timeout_ms); - - if (ndb_mgm_set_connectstring(m_handle, _connect_string)) - { - BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle)); - tmp.append(" : "); - tmp.append(ndb_mgm_get_latest_error_desc(m_handle)); - setError(CR_ERROR, tmp.c_str()); - DBUG_VOID_RETURN; - } - - if (_bindaddress) - { - if (ndb_mgm_set_bindaddress(m_handle, _bindaddress)) - { - setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle)); - DBUG_VOID_RETURN; - } - } - resetError(); - DBUG_VOID_RETURN; -} - -ConfigRetriever::~ConfigRetriever() -{ - DBUG_ENTER("ConfigRetriever::~ConfigRetriever"); - if (m_handle) { - if(m_end_session) - ndb_mgm_end_session(m_handle); - ndb_mgm_disconnect(m_handle); - ndb_mgm_destroy_handle(&m_handle); - } - DBUG_VOID_RETURN; -} - -Uint32 -ConfigRetriever::get_configuration_nodeid() const -{ - return ndb_mgm_get_configuration_nodeid(m_handle); -} - -Uint32 ConfigRetriever::get_mgmd_port() const -{ - return ndb_mgm_get_connected_port(m_handle); -} - -const char *ConfigRetriever::get_mgmd_host() const -{ - return ndb_mgm_get_connected_host(m_handle); -} - -const char *ConfigRetriever::get_connectstring(char *buf, int buf_sz) const -{ - return ndb_mgm_get_connectstring(m_handle, buf, buf_sz); -} - -//**************************************************************************** -//**************************************************************************** - -int -ConfigRetriever::do_connect(int no_retries, - int retry_delay_in_seconds, int verbose) -{ - return - (ndb_mgm_connect(m_handle,no_retries,retry_delay_in_seconds,verbose)==0) ? - 0 : -1; -} - -int -ConfigRetriever::disconnect() -{ - return ndb_mgm_disconnect(m_handle); -} - -//**************************************************************************** -//**************************************************************************** -//**************************************************************************** -//**************************************************************************** -struct ndb_mgm_configuration* -ConfigRetriever::getConfig() { - - struct ndb_mgm_configuration * p = 0; - - if(m_handle != 0) - p = getConfig(m_handle); - - if(p == 0) - return 0; - - if(!verifyConfig(p, _ownNodeId)){ - free(p); - p= 0; - } - - return p; -} - -ndb_mgm_configuration * -ConfigRetriever::getConfig(NdbMgmHandle m_handle_arg) -{ - ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle_arg, - m_version); - if(conf == 0) - { - BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle_arg)); - tmp.append(" : "); - tmp.append(ndb_mgm_get_latest_error_desc(m_handle_arg)); - setError(CR_ERROR, tmp.c_str()); - return 0; - } - return conf; -} - -ndb_mgm_configuration * -ConfigRetriever::getConfig(const char * filename){ -#ifndef NDB_WIN32 - - struct stat sbuf; - const int res = stat(filename, &sbuf); - if(res != 0){ - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), "Could not find file: \"%s\"", filename); - setError(CR_ERROR, buf); - return 0; - } - const Uint32 bytes = sbuf.st_size; - - Uint32 * buf2 = new Uint32[bytes/4+1]; - - FILE * f = fopen(filename, "rb"); - if(f == 0){ - setError(CR_ERROR, "Failed to open file"); - delete []buf2; - return 0; - } - Uint32 sz = fread(buf2, 1, bytes, f); - fclose(f); - if(sz != bytes){ - setError(CR_ERROR, "Failed to read file"); - delete []buf2; - return 0; - } - - ConfigValuesFactory cvf; - if(!cvf.unpack(buf2, bytes)){ - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), "Error while unpacking"); - setError(CR_ERROR, buf); - delete []buf2; - return 0; - } - delete [] buf2; - return (ndb_mgm_configuration*)cvf.m_cfg; -#else - return 0; -#endif -} - -void -ConfigRetriever::setError(ErrorType et, const char * s){ - errorString.assign(s ? s : ""); - latestErrorType = et; -} - -void -ConfigRetriever::resetError(){ - setError(CR_NO_ERROR,0); -} - -int -ConfigRetriever::hasError() -{ - return latestErrorType != CR_NO_ERROR; -} - -const char * -ConfigRetriever::getErrorString(){ - return errorString.c_str(); -} - -bool -ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 nodeid){ - - char buf[255]; - ndb_mgm_configuration_iterator * it; - it = ndb_mgm_create_configuration_iterator((struct ndb_mgm_configuration *)conf, - CFG_SECTION_NODE); - - if(it == 0){ - BaseString::snprintf(buf, 255, "Unable to create config iterator"); - setError(CR_ERROR, buf); - return false; - - } - NdbAutoPtr ptr(it); - - if(ndb_mgm_find(it, CFG_NODE_ID, nodeid) != 0){ - BaseString::snprintf(buf, 255, "Unable to find node with id: %d", nodeid); - setError(CR_ERROR, buf); - return false; - } - - const char * hostname; - if(ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &hostname)){ - BaseString::snprintf(buf, 255, "Unable to get hostname(%d) from config",CFG_NODE_HOST); - setError(CR_ERROR, buf); - return false; - } - - const char * datadir; - if(!ndb_mgm_get_string_parameter(it, CFG_NODE_DATADIR, &datadir)){ - NdbConfig_SetPath(datadir); - } - - if (hostname && hostname[0] != 0 && - !SocketServer::tryBind(0,hostname)) { - BaseString::snprintf(buf, 255, "Config hostname(%s) don't match a local interface," - " tried to bind, error = %d - %s", - hostname, errno, strerror(errno)); - setError(CR_ERROR, buf); - return false; - } - - unsigned int _type; - if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){ - BaseString::snprintf(buf, 255, "Unable to get type of node(%d) from config", - CFG_TYPE_OF_SECTION); - setError(CR_ERROR, buf); - return false; - } - - if(_type != m_node_type){ - const char *type_s, *alias_s, *type_s2, *alias_s2; - alias_s= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)m_node_type, - &type_s); - alias_s2= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)_type, - &type_s2); - BaseString::snprintf(buf, 255, "This node type %s(%s) and config " - "node type %s(%s) don't match for nodeid %d", - alias_s, type_s, alias_s2, type_s2, nodeid); - setError(CR_ERROR, buf); - return false; - } - - /** - * Check hostnames - */ - ndb_mgm_configuration_iterator iter(* conf, CFG_SECTION_CONNECTION); - for(iter.first(); iter.valid(); iter.next()){ - - Uint32 type = CONNECTION_TYPE_TCP + 1; - if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; - if(type != CONNECTION_TYPE_TCP) continue; - - Uint32 nodeId1, nodeId2, remoteNodeId; - if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue; - if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue; - - if(nodeId1 != nodeid && nodeId2 != nodeid) continue; - remoteNodeId = (nodeid == nodeId1 ? nodeId2 : nodeId1); - - const char * name; - struct in_addr addr; - BaseString tmp; - if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){ - if(Ndb_getInAddr(&addr, name) != 0){ - tmp.assfmt("Unable to lookup/illegal hostname %s, " - "connection from node %d to node %d", - name, nodeid, remoteNodeId); - setError(CR_ERROR, tmp.c_str()); - return false; - } - } - - if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){ - if(Ndb_getInAddr(&addr, name) != 0){ - tmp.assfmt("Unable to lookup/illegal hostname %s, " - "connection from node %d to node %d", - name, nodeid, remoteNodeId); - setError(CR_ERROR, tmp.c_str()); - return false; - } - } - } - return true; -} - -int -ConfigRetriever::setNodeId(Uint32 nodeid) -{ - return ndb_mgm_set_configuration_nodeid(m_handle, nodeid); -} - -Uint32 -ConfigRetriever::allocNodeId(int no_retries, int retry_delay_in_seconds) -{ - int res; - _ownNodeId= 0; - if(m_handle != 0) - { - while (1) - { - if(!ndb_mgm_is_connected(m_handle)) - if(!ndb_mgm_connect(m_handle, 0, 0, 0)) - goto next; - - res= ndb_mgm_alloc_nodeid(m_handle, m_version, m_node_type, - no_retries == 0 /* only log last retry */); - if(res >= 0) - return _ownNodeId= (Uint32)res; - - next: - int error = ndb_mgm_get_latest_error(m_handle); - if (no_retries == 0 || error == NDB_MGM_ALLOCID_CONFIG_MISMATCH) - break; - no_retries--; - NdbSleep_SecSleep(retry_delay_in_seconds); - } - BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle)); - tmp.append(" : "); - tmp.append(ndb_mgm_get_latest_error_desc(m_handle)); - setError(CR_ERROR, tmp.c_str()); - } else - setError(CR_ERROR, "management server handle not initialized"); - return 0; -} diff --git a/storage/ndb/src/common/mgmcommon/IPCConfig.cpp b/storage/ndb/src/common/mgmcommon/IPCConfig.cpp deleted file mode 100644 index 31444953e70..00000000000 --- a/storage/ndb/src/common/mgmcommon/IPCConfig.cpp +++ /dev/null @@ -1,370 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include - -#if defined DEBUG_TRANSPORTER -#define DEBUG(t) ndbout << __FILE__ << ":" << __LINE__ << ":" << t << endl; -#else -#define DEBUG(t) -#endif - -IPCConfig::IPCConfig(Properties * p) -{ - theNoOfRemoteNodes = 0; - the_ownId = 0; - if(p != 0) - props = new Properties(* p); - else - props = 0; -} - - -IPCConfig::~IPCConfig() -{ - if(props != 0){ - delete props; - } -} - -int -IPCConfig::init(){ - Uint32 nodeId; - - if(props == 0) return -1; - if(!props->get("LocalNodeId", &nodeId)) { - DEBUG( "Did not find local node id." ); - return -1; - } - the_ownId = nodeId; - - Uint32 noOfConnections; - if(!props->get("NoOfConnections", &noOfConnections)) { - DEBUG( "Did not find noOfConnections." ); - return -1; - } - - for(Uint32 i = 0; iget("Connection", i, &tmp)) { - DEBUG( "Did not find Connection." ); - return -1; - } - if(!tmp->get("NodeId1", &node1)) { - DEBUG( "Did not find NodeId1." ); - return -1; - } - if(!tmp->get("NodeId2", &node2)) { - DEBUG( "Did not find NodeId2." ); - return -1; - } - - if(node1 == the_ownId && node2 != the_ownId) - if(!addRemoteNodeId(node2)) { - DEBUG( "addRemoteNodeId(node2) failed." ); - return -1; - } - - if(node1 != the_ownId && node2 == the_ownId) - if(!addRemoteNodeId(node1)) { - DEBUG( "addRemoteNodeId(node2) failed." ); - return -1; - } - } - return 0; -} - -bool -IPCConfig::addRemoteNodeId(NodeId nodeId){ - for(int i = 0; i nodeId){ - if(theRemoteNodeIds[i] < returnNode){ - returnNode = theRemoteNodeIds[i]; - } - } - if(returnNode == (MAX_NODES + 1)) - return false; - nodeId = returnNode; - return true; -} - - -Uint32 -IPCConfig::getREPHBFrequency(NodeId id) const { - const Properties * tmp; - Uint32 out; - - /** - * Todo: Fix correct heartbeat - */ - if (!props->get("Node", id, &tmp) || - !tmp->get("HeartbeatIntervalRepRep", &out)) { - DEBUG("Illegal Node or HeartbeatIntervalRepRep in config."); - out = 10000; - } - - return out; -} - -const char* -IPCConfig::getNodeType(NodeId id) const { - const char * out; - const Properties * tmp; - - if (!props->get("Node", id, &tmp) || !tmp->get("Type", &out)) { - DEBUG("Illegal Node or NodeType in config."); - out = "Unknown"; - } - - return out; -} - -#include -Uint32 -IPCConfig::configureTransporters(Uint32 nodeId, - const class ndb_mgm_configuration & config, - class TransporterRegistry & tr){ - TransporterConfiguration conf; - - DBUG_ENTER("IPCConfig::configureTransporters"); - - /** - * Iterate over all MGM's an construct a connectstring - * create mgm_handle and give it to the Transporter Registry - */ - { - const char *separator= ""; - BaseString connect_string; - ndb_mgm_configuration_iterator iter(config, CFG_SECTION_NODE); - for(iter.first(); iter.valid(); iter.next()) - { - Uint32 type; - if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; - if(type != NODE_TYPE_MGM) continue; - const char* hostname; - Uint32 port; - if(iter.get(CFG_NODE_HOST, &hostname)) continue; - if( strlen(hostname) == 0 ) continue; - if(iter.get(CFG_MGM_PORT, &port)) continue; - connect_string.appfmt("%s%s:%u",separator,hostname,port); - separator= ","; - } - NdbMgmHandle h= ndb_mgm_create_handle(); - if ( h && connect_string.length() > 0 ) - { - ndb_mgm_set_connectstring(h,connect_string.c_str()); - tr.set_mgm_handle(h); - } - } - - Uint32 noOfTransportersCreated= 0; - ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION); - - for(iter.first(); iter.valid(); iter.next()){ - - Uint32 nodeId1, nodeId2, remoteNodeId; - const char * remoteHostName= 0, * localHostName= 0; - if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue; - if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue; - - if(nodeId1 != nodeId && nodeId2 != nodeId) continue; - remoteNodeId = (nodeId == nodeId1 ? nodeId2 : nodeId1); - - { - const char * host1= 0, * host2= 0; - iter.get(CFG_CONNECTION_HOSTNAME_1, &host1); - iter.get(CFG_CONNECTION_HOSTNAME_2, &host2); - localHostName = (nodeId == nodeId1 ? host1 : host2); - remoteHostName = (nodeId == nodeId1 ? host2 : host1); - } - - Uint32 sendSignalId = 1; - Uint32 checksum = 1; - if(iter.get(CFG_CONNECTION_SEND_SIGNAL_ID, &sendSignalId)) continue; - if(iter.get(CFG_CONNECTION_CHECKSUM, &checksum)) continue; - - Uint32 type = ~0; - if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue; - - Uint32 server_port= 0; - if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break; - - Uint32 nodeIdServer= 0; - if(iter.get(CFG_CONNECTION_NODE_ID_SERVER, &nodeIdServer)) break; - - /* - We check the node type. - */ - Uint32 node1type, node2type; - ndb_mgm_configuration_iterator node1iter(config, CFG_SECTION_NODE); - ndb_mgm_configuration_iterator node2iter(config, CFG_SECTION_NODE); - node1iter.find(CFG_NODE_ID,nodeId1); - node2iter.find(CFG_NODE_ID,nodeId2); - node1iter.get(CFG_TYPE_OF_SECTION,&node1type); - node2iter.get(CFG_TYPE_OF_SECTION,&node2type); - - if(node1type==NODE_TYPE_MGM || node2type==NODE_TYPE_MGM) - conf.isMgmConnection= true; - else - conf.isMgmConnection= false; - - if (nodeId == nodeIdServer && !conf.isMgmConnection) { - tr.add_transporter_interface(remoteNodeId, localHostName, server_port); - } - - DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d", - nodeId, remoteNodeId, server_port, sendSignalId, checksum)); - /* - This may be a dynamic port. It depends on when we're getting - our configuration. If we've been restarted, we'll be getting - a configuration with our old dynamic port in it, hence the number - here is negative (and we try the old port number first). - - On a first-run, server_port will be zero (with dynamic ports) - - If we're not using dynamic ports, we don't do anything. - */ - - conf.localNodeId = nodeId; - conf.remoteNodeId = remoteNodeId; - conf.checksum = checksum; - conf.signalId = sendSignalId; - conf.s_port = server_port; - conf.localHostName = localHostName; - conf.remoteHostName = remoteHostName; - conf.serverNodeId = nodeIdServer; - - switch(type){ - case CONNECTION_TYPE_SHM: - if(iter.get(CFG_SHM_KEY, &conf.shm.shmKey)) break; - if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shm.shmSize)) break; - - Uint32 tmp; - if(iter.get(CFG_SHM_SIGNUM, &tmp)) break; - conf.shm.signum= tmp; - - if(!tr.createSHMTransporter(&conf)){ - DBUG_PRINT("error", ("Failed to create SHM Transporter from %d to %d", - conf.localNodeId, conf.remoteNodeId)); - ndbout << "Failed to create SHM Transporter from: " - << conf.localNodeId << " to: " << conf.remoteNodeId << endl; - } else { - noOfTransportersCreated++; - } - DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, " - "buf size = %d", conf.shm.shmKey, conf.shm.shmSize)); - - break; - - case CONNECTION_TYPE_SCI: - if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sci.sendLimit)) break; - if(iter.get(CFG_SCI_BUFFER_MEM, &conf.sci.bufferSize)) break; - if (nodeId == nodeId1) { - if(iter.get(CFG_SCI_HOST2_ID_0, &conf.sci.remoteSciNodeId0)) break; - if(iter.get(CFG_SCI_HOST2_ID_1, &conf.sci.remoteSciNodeId1)) break; - } else { - if(iter.get(CFG_SCI_HOST1_ID_0, &conf.sci.remoteSciNodeId0)) break; - if(iter.get(CFG_SCI_HOST1_ID_1, &conf.sci.remoteSciNodeId1)) break; - } - if (conf.sci.remoteSciNodeId1 == 0) { - conf.sci.nLocalAdapters = 1; - } else { - conf.sci.nLocalAdapters = 2; - } - if(!tr.createSCITransporter(&conf)){ - DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d", - conf.localNodeId, conf.remoteNodeId)); - ndbout << "Failed to create SCI Transporter from: " - << conf.localNodeId << " to: " << conf.remoteNodeId << endl; - } else { - DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, " - "remote SCI node id %d", - conf.sci.nLocalAdapters, conf.sci.remoteSciNodeId0)); - DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, " - "buf size = %d", conf.localHostName, - conf.remoteHostName, conf.sci.sendLimit, - conf.sci.bufferSize)); - if (conf.sci.nLocalAdapters > 1) { - DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, " - "second remote SCI node id = %d", - conf.sci.remoteSciNodeId1)); - } - noOfTransportersCreated++; - continue; - } - break; - - case CONNECTION_TYPE_TCP: - if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.tcp.sendBufferSize)) break; - if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.tcp.maxReceiveSize)) break; - - const char * proxy; - if (!iter.get(CFG_TCP_PROXY, &proxy)) { - if (strlen(proxy) > 0 && nodeId2 == nodeId) { - // TODO handle host:port - conf.s_port = atoi(proxy); - } - } - - if(!tr.createTCPTransporter(&conf)){ - ndbout << "Failed to create TCP Transporter from: " - << nodeId << " to: " << remoteNodeId << endl; - } else { - noOfTransportersCreated++; - } - DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, " - "maxReceiveSize = %d", conf.tcp.sendBufferSize, - conf.tcp.maxReceiveSize)); - break; - default: - ndbout << "Unknown transporter type from: " << nodeId << - " to: " << remoteNodeId << endl; - break; - } // switch - } // for - - DBUG_RETURN(noOfTransportersCreated); -} - diff --git a/storage/ndb/src/common/mgmcommon/Makefile.am b/storage/ndb/src/common/mgmcommon/Makefile.am deleted file mode 100644 index 4dc2d367550..00000000000 --- a/storage/ndb/src/common/mgmcommon/Makefile.am +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -noinst_LTLIBRARIES = libmgmsrvcommon.la - -libmgmsrvcommon_la_SOURCES = \ - ConfigRetriever.cpp \ - IPCConfig.cpp - -INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/mgmapi -I$(top_srcdir)/storage/ndb/src/mgmsrv - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am -include $(top_srcdir)/storage/ndb/config/type_mgmapiclient.mk.am - -windoze-dsp: libmgmsrvcommon.dsp - -libmgmsrvcommon.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libmgmsrvcommon_la_SOURCES) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/mgmcommon/printConfig/Makefile b/storage/ndb/src/common/mgmcommon/printConfig/Makefile deleted file mode 100644 index 77e8943e2c6..00000000000 --- a/storage/ndb/src/common/mgmcommon/printConfig/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -include .defs.mk - -TYPE := ndbapi mgmapiclient - -BIN_TARGET := printConfig -BIN_TARGET_ARCHIVES := general portlib - -CCFLAGS_LOC += -I.. - -SOURCES := printConfig.cpp ../ConfigRetriever.cpp - -SOURCES.c := ../NdbConfig.c ../LocalConfig.c - -CFLAGS_printConfig.cpp := -I$(call fixpath,$(NDB_TOP)/src/mgmapi) - -include $(NDB_TOP)/Epilogue.mk diff --git a/storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp b/storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp deleted file mode 100644 index feabc2168ac..00000000000 --- a/storage/ndb/src/common/mgmcommon/printConfig/printConfig.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include -#include -#include -#include -#include - -void usage(const char * prg){ - ndbout << "Usage " << prg - << " host []" << endl; - - char buf[255]; - for(unsigned i = 0; i []" - << endl; -} - -NDB_COMMAND(printConfig, - "printConfig", "printConfig", "Prints configuration", 16384){ - if(argc < 4){ - usage(argv[0]); - return 0; - } - if(strcmp("file", argv[1]) != 0 && strcmp("host", argv[1]) != 0){ - usage(argv[0]); - return 0; - } - - if(strcmp("host", argv[1]) == 0 && argc < 5){ - usage(argv[0]); - return 0; - } - - ConfigRetriever c; - struct ndb_mgm_configuration * p = 0; - - if(strcmp("host", argv[1]) == 0){ - int verId = 0; - if(argc > 5) - verId = atoi(argv[5]); - - ndbout << "Getting config from: " << argv[2] << ":" << atoi(argv[3]) - << " NodeId =" << atoi(argv[4]) - << " VersionId = " << verId << endl; - - p = c.getConfig(argv[2], - atoi(argv[3]), - verId); - } else if (strcmp("file", argv[1]) == 0){ - int verId = 0; - if(argc > 4) - verId = atoi(argv[4]); - - ndbout << "Getting config from: " << argv[2] - << " NodeId =" << atoi(argv[3]) - << " VersionId = " << verId << endl; - - p = c.getConfig(argv[2], atoi(argv[3]), verId); - } - - if(p != 0){ - // - free(p); - } else { - ndbout << "Configuration not found: " << c.getErrorString() << endl; - } - - return 0; -} diff --git a/storage/ndb/src/common/portlib/Makefile.am b/storage/ndb/src/common/portlib/Makefile.am deleted file mode 100644 index e474764e69f..00000000000 --- a/storage/ndb/src/common/portlib/Makefile.am +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2004, 2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -noinst_LTLIBRARIES = libportlib.la - -libportlib_la_SOURCES = \ - NdbCondition.c NdbMutex.c NdbSleep.c NdbTick.c \ - NdbEnv.c NdbThread.c NdbHost.c NdbTCP.cpp \ - NdbDaemon.c NdbMem.c \ - NdbConfig.c - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_util.mk.am - -EXTRA_PROGRAMS = memtest PortLibTest munmaptest - -PortLibTest_SOURCES = NdbPortLibTest.cpp -munmaptest_SOURCES = munmaptest.cpp - -WIN_src = win32/NdbCondition.c \ - win32/NdbDaemon.c \ - win32/NdbEnv.c \ - win32/NdbHost.c \ - win32/NdbMem.c \ - win32/NdbMutex.c \ - win32/NdbSleep.c \ - win32/NdbTCP.c \ - win32/NdbThread.c \ - win32/NdbTick.c - -windoze-dsp: libportlib.dsp - -libportlib.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(WIN_src) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/portlib/NdbCondition.c b/storage/ndb/src/common/portlib/NdbCondition.c deleted file mode 100644 index 451e5f8ac8c..00000000000 --- a/storage/ndb/src/common/portlib/NdbCondition.c +++ /dev/null @@ -1,142 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include -#include -#include -#include - -struct NdbCondition -{ - pthread_cond_t cond; -}; - - - -struct NdbCondition* -NdbCondition_Create(void) -{ - struct NdbCondition* tmpCond; - int result; - - tmpCond = (struct NdbCondition*)NdbMem_Allocate(sizeof(struct NdbCondition)); - - if (tmpCond == NULL) - return NULL; - - result = pthread_cond_init(&tmpCond->cond, NULL); - - assert(result==0); - return tmpCond; -} - - - -int -NdbCondition_Wait(struct NdbCondition* p_cond, - NdbMutex* p_mutex) -{ - int result; - - if (p_cond == NULL || p_mutex == NULL) - return 1; - - result = pthread_cond_wait(&p_cond->cond, p_mutex); - - return result; -} - -int -NdbCondition_WaitTimeout(struct NdbCondition* p_cond, - NdbMutex* p_mutex, - int msecs){ - int result; - struct timespec abstime; - int secs = 0; - - if (p_cond == NULL || p_mutex == NULL) - return 1; - -#ifdef HAVE_CLOCK_GETTIME - clock_gettime(CLOCK_REALTIME, &abstime); -#else - { - struct timeval tick_time; - gettimeofday(&tick_time, 0); - abstime.tv_sec = tick_time.tv_sec; - abstime.tv_nsec = tick_time.tv_usec * 1000; - } -#endif - - if(msecs >= 1000){ - secs = msecs / 1000; - msecs = msecs % 1000; - } - - abstime.tv_sec += secs; - abstime.tv_nsec += msecs * 1000000; - if (abstime.tv_nsec >= 1000000000) { - abstime.tv_sec += 1; - abstime.tv_nsec -= 1000000000; - } - - result = pthread_cond_timedwait(&p_cond->cond, p_mutex, &abstime); - - return result; -} - -int -NdbCondition_Signal(struct NdbCondition* p_cond){ - int result; - - if (p_cond == NULL) - return 1; - - result = pthread_cond_signal(&p_cond->cond); - - return result; -} - - -int NdbCondition_Broadcast(struct NdbCondition* p_cond) -{ - int result; - - if (p_cond == NULL) - return 1; - - result = pthread_cond_broadcast(&p_cond->cond); - - return result; -} - - -int NdbCondition_Destroy(struct NdbCondition* p_cond) -{ - int result; - - if (p_cond == NULL) - return 1; - - result = pthread_cond_destroy(&p_cond->cond); - free(p_cond); - - return 0; -} - diff --git a/storage/ndb/src/common/portlib/NdbConfig.c b/storage/ndb/src/common/portlib/NdbConfig.c deleted file mode 100644 index da4fc997750..00000000000 --- a/storage/ndb/src/common/portlib/NdbConfig.c +++ /dev/null @@ -1,145 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include -#include -#include - -static const char *datadir_path= 0; - -const char * -NdbConfig_get_path(int *_len) -{ - const char *path= NdbEnv_GetEnv("NDB_HOME", 0, 0); - int path_len= 0; - if (path) - path_len= strlen(path); - if (path_len == 0 && datadir_path) { - path= datadir_path; - path_len= strlen(path); - } - if (path_len == 0) { - path= "."; - path_len= strlen(path); - } - if (_len) - *_len= path_len; - return path; -} - -static char* -NdbConfig_AllocHomePath(int _len) -{ - int path_len; - const char *path= NdbConfig_get_path(&path_len); - int len= _len+path_len; - char *buf= NdbMem_Allocate(len); - basestring_snprintf(buf, len, "%s%s", path, DIR_SEPARATOR); - return buf; -} - -void -NdbConfig_SetPath(const char* path){ - datadir_path= path; -} - -char* -NdbConfig_NdbCfgName(int with_ndb_home){ - char *buf; - int len= 0; - - if (with_ndb_home) { - buf= NdbConfig_AllocHomePath(PATH_MAX); - len= strlen(buf); - } else - buf= NdbMem_Allocate(PATH_MAX); - basestring_snprintf(buf+len, PATH_MAX, "Ndb.cfg"); - return buf; -} - -static -char *get_prefix_buf(int len, int node_id) -{ - char tmp_buf[sizeof("ndb_pid#############")+1]; - char *buf; - if (node_id > 0) - basestring_snprintf(tmp_buf, sizeof(tmp_buf), "ndb_%u", node_id); - else - basestring_snprintf(tmp_buf, sizeof(tmp_buf), "ndb_pid%u", getpid()); - tmp_buf[sizeof(tmp_buf)-1]= 0; - - buf= NdbConfig_AllocHomePath(len+strlen(tmp_buf)); - strcat(buf, tmp_buf); - return buf; -} - -char* -NdbConfig_ErrorFileName(int node_id){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, "_error.log"); - return buf; -} - -char* -NdbConfig_ClusterLogFileName(int node_id){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, "_cluster.log"); - return buf; -} - -char* -NdbConfig_SignalLogFileName(int node_id){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, "_signal.log"); - return buf; -} - -char* -NdbConfig_TraceFileName(int node_id, int file_no){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, "_trace.log.%u", file_no); - return buf; -} - -char* -NdbConfig_NextTraceFileName(int node_id){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, "_trace.log.next"); - return buf; -} - -char* -NdbConfig_PidFileName(int node_id){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, ".pid"); - return buf; -} - -char* -NdbConfig_StdoutFileName(int node_id){ - char *buf= get_prefix_buf(PATH_MAX, node_id); - int len= strlen(buf); - basestring_snprintf(buf+len, PATH_MAX, "_out.log"); - return buf; -} diff --git a/storage/ndb/src/common/portlib/NdbDaemon.c b/storage/ndb/src/common/portlib/NdbDaemon.c deleted file mode 100644 index 22d59925db7..00000000000 --- a/storage/ndb/src/common/portlib/NdbDaemon.c +++ /dev/null @@ -1,171 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbDaemon.h" - -#define NdbDaemon_ErrorSize 500 -long NdbDaemon_DaemonPid = 0; -int NdbDaemon_ErrorCode = 0; -char NdbDaemon_ErrorText[NdbDaemon_ErrorSize] = ""; - -int -NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags) -{ - int lockfd = -1, logfd = -1, n; - char buf[64]; - - (void)flags; /* remove warning for unused parameter */ - - /* Check that we have write access to lock file */ - assert(lockfile != NULL); - lockfd = open(lockfile, O_CREAT|O_RDWR, 0644); - if (lockfd == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: open for write failed: %s", lockfile, strerror(errno)); - return -1; - } - /* Read any old pid from lock file */ - buf[0] = 0; - n = read(lockfd, buf, sizeof(buf)); - if (n < 0) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: read failed: %s", lockfile, strerror(errno)); - return -1; - } - NdbDaemon_DaemonPid = atol(buf); - if (lseek(lockfd, 0, SEEK_SET) == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: lseek failed: %s", lockfile, strerror(errno)); - return -1; - } -#ifdef F_TLOCK - /* Test for lock before becoming daemon */ - if (lockf(lockfd, F_TLOCK, 0) == -1) - { - if (errno == EACCES || errno == EAGAIN) { /* results may vary */ - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: already locked by pid=%ld", lockfile, NdbDaemon_DaemonPid); - return -1; - } - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: lock test failed: %s", lockfile, strerror(errno)); - return -1; - } -#endif - /* Test open log file before becoming daemon */ - if (logfile != NULL) { - logfd = open(logfile, O_CREAT|O_WRONLY|O_APPEND, 0644); - if (logfd == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: open for write failed: %s", logfile, strerror(errno)); - return -1; - } - } -#ifdef F_TLOCK - if (lockf(lockfd, F_ULOCK, 0) == -1) - { - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: fail to unlock", lockfile); - return -1; - } -#endif - - /* Fork */ - n = fork(); - if (n == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "fork failed: %s", strerror(errno)); - return -1; - } - /* Exit if we are the parent */ - if (n != 0) { - exit(0); - } - /* Running in child process */ - NdbDaemon_DaemonPid = getpid(); - /* Lock the lock file (likely to succeed due to test above) */ - if (lockf(lockfd, F_LOCK, 0) == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: lock failed: %s", lockfile, strerror(errno)); - return -1; - } - /* Become process group leader */ - if (setsid() == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "setsid failed: %s", strerror(errno)); - return -1; - } - /* Write pid to lock file */ - if (ftruncate(lockfd, 0) == -1) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: ftruncate failed: %s", lockfile, strerror(errno)); - return -1; - } - sprintf(buf, "%ld\n", NdbDaemon_DaemonPid); - n = strlen(buf); - if (write(lockfd, buf, n) != n) { - NdbDaemon_ErrorCode = errno; - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "%s: write failed: %s", lockfile, strerror(errno)); - return -1; - } - /* Do input/output redirections (assume fd 0,1,2 not in use) */ - close(0); - open("/dev/null", O_RDONLY); - if (logfile != 0) { - dup2(logfd, 1); - dup2(logfd, 2); - close(logfd); - } - /* Success */ - return 0; -} - -#if 0 -int -NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags) -{ - /* Fail */ - snprintf(NdbDaemon_ErrorText, NdbDaemon_ErrorSize, - "Daemon mode not implemented"); - return -1; -} -#endif - -#ifdef NDB_DAEMON_TEST - -int -main() -{ - if (NdbDaemon_Make("test.pid", "test.log", 0) == -1) { - fprintf(stderr, "NdbDaemon_Make: %s\n", NdbDaemon_ErrorText); - return 1; - } - sleep(10); - return 0; -} - -#endif diff --git a/storage/ndb/src/common/portlib/NdbEnv.c b/storage/ndb/src/common/portlib/NdbEnv.c deleted file mode 100644 index 43a06d8352d..00000000000 --- a/storage/ndb/src/common/portlib/NdbEnv.c +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include - -const char* NdbEnv_GetEnv(const char* name, char * buf, int buflen) -{ - char* p = NULL; - p = getenv(name); - - if (p != NULL && buf != NULL){ - strncpy(buf, p, buflen); - buf[buflen-1] = 0; - } - return p; - -} - diff --git a/storage/ndb/src/common/portlib/NdbHost.c b/storage/ndb/src/common/portlib/NdbHost.c deleted file mode 100644 index 4ac92d121c1..00000000000 --- a/storage/ndb/src/common/portlib/NdbHost.c +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include "NdbHost.h" - -int NdbHost_GetHostName(char* buf) -{ - if (gethostname(buf, MAXHOSTNAMELEN) != 0) - { - return -1; - } - return 0; -} - -int NdbHost_GetProcessId(void) -{ - return getpid(); -} - diff --git a/storage/ndb/src/common/portlib/NdbMem.c b/storage/ndb/src/common/portlib/NdbMem.c deleted file mode 100644 index c89e0747a8c..00000000000 --- a/storage/ndb/src/common/portlib/NdbMem.c +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include - -void NdbMem_Create() -{ - /* Do nothing */ - return; -} - -void NdbMem_Destroy() -{ - /* Do nothing */ - return; -} - - -void* NdbMem_Allocate(size_t size) -{ - void* mem_allocated; - assert(size > 0); - mem_allocated= (void*)malloc(size); - return mem_allocated; -} - -void* NdbMem_AllocateAlign(size_t size, size_t alignment) -{ - (void)alignment; /* remove warning for unused parameter */ - /* - return (void*)memalign(alignment, size); - TEMP fix - */ - return (void*)malloc(size); -} - - -void NdbMem_Free(void* ptr) -{ - free(ptr); -} - - -int NdbMem_MemLockAll(int i){ - if (i == 1) - { -#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) && defined (MCL_FUTURE) - return mlockall(MCL_CURRENT | MCL_FUTURE); -#else - return -1; -#endif - } -#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) - return mlockall(MCL_CURRENT); -#else - return -1; -#endif -} - -int NdbMem_MemUnlockAll(){ -#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) - return munlockall(); -#else - return -1; -#endif -} - diff --git a/storage/ndb/src/common/portlib/NdbMutex.c b/storage/ndb/src/common/portlib/NdbMutex.c deleted file mode 100644 index 77a3e55e0b0..00000000000 --- a/storage/ndb/src/common/portlib/NdbMutex.c +++ /dev/null @@ -1,91 +0,0 @@ -/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include -#include -#include - -NdbMutex* NdbMutex_Create(void) -{ - NdbMutex* pNdbMutex; - int result; - - pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex)); - - if (pNdbMutex == NULL) - return NULL; - - result = pthread_mutex_init(pNdbMutex, NULL); - assert(result == 0); - - return pNdbMutex; -} - - -int NdbMutex_Destroy(NdbMutex* p_mutex) -{ - int result; - - if (p_mutex == NULL) - return -1; - - result = pthread_mutex_destroy(p_mutex); - - NdbMem_Free(p_mutex); - - return result; -} - - -int NdbMutex_Lock(NdbMutex* p_mutex) -{ - int result; - - if (p_mutex == NULL) - return -1; - - result = pthread_mutex_lock(p_mutex); - - return result; -} - - -int NdbMutex_Unlock(NdbMutex* p_mutex) -{ - int result; - - if (p_mutex == NULL) - return -1; - - result = pthread_mutex_unlock(p_mutex); - - return result; -} - - -int NdbMutex_Trylock(NdbMutex* p_mutex) -{ - int result = -1; - - if (p_mutex != NULL) { - result = pthread_mutex_trylock(p_mutex); - } - - return result; -} - diff --git a/storage/ndb/src/common/portlib/NdbPortLibTest.cpp b/storage/ndb/src/common/portlib/NdbPortLibTest.cpp deleted file mode 100644 index 87f9246e171..00000000000 --- a/storage/ndb/src/common/portlib/NdbPortLibTest.cpp +++ /dev/null @@ -1,603 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/** - * NdbPortLibTest.cpp - * Test the functionality of portlib - * TODO - Add tests for NdbMem - */ - -#include - -#include "NdbOut.hpp" -#include "NdbThread.h" -#include "NdbMutex.h" -#include "NdbCondition.h" -#include "NdbSleep.h" -#include "NdbTick.h" -#include "NdbEnv.h" -#include "NdbHost.h" -#include "NdbMain.h" - -int TestHasFailed; -int verbose = 0; - -static void fail(const char* test, const char* cause) -{ - TestHasFailed = 1; - ndbout << test << " failed, " << cause << endl; -} - -// test 1 variables and funcs - -extern "C" void* thread1func(void* arg) -{ - int arg1; - int returnvalue = 8; - arg1 = *(int*)arg; - ndbout << "thread1: thread1func called with arg = " << arg1 << endl; - - // delay(1000); - if (arg1 != 7) - fail("TEST1", "Wrong arg"); - - return returnvalue; -} - -// test 2 variables and funcs - -NdbMutex* test2mutex; - -extern "C" void* test2func(void* arg) -{ - - int arg1; - arg1 = *(int*)arg; - ndbout << "thread" << arg1 << " started in test2func" << endl; - - if (NdbMutex_Lock(test2mutex) != 0) - fail("TEST2", "Failed to lock mutex"); - - ndbout << "thread" << arg1 << ", test2func " << endl; - - if (NdbMutex_Unlock(test2mutex) != 0) - fail("TEST2", "Failed to unlock mutex"); - - int returnvalue = arg1; - return returnvalue; -} - - -// test 3 and 7 variables and funcs - -NdbMutex* testmutex; -NdbCondition* testcond; -int testthreadsdone; - -extern "C" void* testfunc(void* arg) -{ - int tmpVar; - int threadno; - int result; - - threadno = *(int*)arg; - - ndbout << "Thread" << threadno << " started in testfunc" << endl; - do - { - - if ((threadno % 2) == 0) - result = NdbSleep_SecSleep(1); - else - result = NdbSleep_MilliSleep(100); - - if (result != 0) - fail("TEST3", "Wrong result from sleep function"); - - if (NdbMutex_Lock(testmutex) != 0) - fail("TEST3", "Wrong result from NdbMutex_Lock function"); - - ndbout << "thread" << threadno << ", testfunc " << endl; - testthreadsdone++; - tmpVar = testthreadsdone; - - if (NdbCondition_Signal(testcond) != 0) - fail("TEST3", "Wrong result from NdbCondition_Signal function"); - - if (NdbMutex_Unlock(testmutex) != 0) - fail("TEST3", "Wrong result from NdbMutex_Unlock function"); - - } - while(tmpVar<100); - - return 0; -} - -extern "C" void* testTryLockfunc(void* arg) -{ - int tmpVar = 0; - int threadno; - int result; - - threadno = *(int*)arg; - - ndbout << "Thread" << threadno << " started" << endl; - do - { - - if ((threadno % 2) == 0) - result = NdbSleep_SecSleep(1); - else - result = NdbSleep_MilliSleep(100); - - if (result != 0) - fail("TEST3", "Wrong result from sleep function"); - - if (NdbMutex_Trylock(testmutex) == 0){ - - ndbout << "thread" << threadno << ", testTryLockfunc locked" << endl; - testthreadsdone++; - tmpVar = testthreadsdone; - - if (NdbCondition_Signal(testcond) != 0) - fail("TEST3", "Wrong result from NdbCondition_Signal function"); - - if (NdbMutex_Unlock(testmutex) != 0) - fail("TEST3", "Wrong result from NdbMutex_Unlock function"); - } - - } - while(tmpVar<100); - - return 0; -} - - - -void testMicros(int count); -Uint64 time_diff(Uint64 s1, Uint64 s2, Uint32 m1, Uint32 m2); - -NDB_COMMAND(PortLibTest, "portlibtest", "portlibtest", "Test the portable function layer", 4096){ - - ndbout << "= TESTING ARGUMENT PASSING ============" << endl; - ndbout << "ARGC: " << argc << endl; - for(int i = 1; i < argc; i++){ - ndbout << " ARGV"<= m1) - diff += (m2 - m1); - else { - diff += m2; - diff -= m1; - } - - // if(0) - // ndbout("(s1,m1) = (%d, %d) (s2,m2) = (%d, %d) -> diff = %d\n", - // (Uint32)s1,m1,(Uint32)s2,m2, (Uint32)diff); - - return diff; -}; - -void -testMicros(int count){ - Uint32 avg = 0; - Uint32 sum2 = 0; - - for(int i = 0; i (r*1000)){ - avg += (m - (r*1000)); - sum2 += (m - (r*1000)) * (m - (r*1000)); - } else { - avg += ((r*1000) - m); - sum2 += ((r*1000) - m) * ((r*1000) - m); - } -#if 0 - m /= 1000; - if(m > r && ((m - r) > 10)){ - ndbout << "Difference to big: " << (m - r) << " - Test failed" << endl; - TestHasFailed = 1; - } - if(m < r && ((r - m) > 10)){ - ndbout << "Difference to big: " << (r - m) << " - Test failed" << endl; - TestHasFailed = 1; - } -#endif - } - - Uint32 dev = (avg * avg - sum2) / count; dev /= count; - avg /= count; - - Uint32 t = 0; - while((t*t) -#include -#include - -int -NdbSleep_MilliSleep(int milliseconds){ - my_sleep(milliseconds*1000); - return 0; -#if 0 - int result = 0; - struct timespec sleeptime; - sleeptime.tv_sec = milliseconds / 1000; - sleeptime.tv_nsec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000; - result = nanosleep(&sleeptime, NULL); - return result; -#endif -} - -int -NdbSleep_SecSleep(int seconds){ - int result = 0; - result = sleep(seconds); - return result; -} - - diff --git a/storage/ndb/src/common/portlib/NdbTCP.cpp b/storage/ndb/src/common/portlib/NdbTCP.cpp deleted file mode 100644 index d2bfa96e009..00000000000 --- a/storage/ndb/src/common/portlib/NdbTCP.cpp +++ /dev/null @@ -1,132 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include - -extern "C" -int -Ndb_getInAddr(struct in_addr * dst, const char *address) { - // DBUG_ENTER("Ndb_getInAddr"); - { - int tmp_errno; - struct hostent tmp_hostent, *hp; - char buff[GETHOSTBYNAME_BUFF_SIZE]; - hp = my_gethostbyname_r(address,&tmp_hostent,buff,sizeof(buff), - &tmp_errno); - if (hp) - { - memcpy(dst, hp->h_addr, min(sizeof(*dst), (size_t) hp->h_length)); - my_gethostbyname_r_free(); - return 0; //DBUG_RETURN(0); - } - my_gethostbyname_r_free(); - } - /* Try it as aaa.bbb.ccc.ddd. */ - dst->s_addr = inet_addr(address); - if (dst->s_addr != -#ifdef INADDR_NONE - INADDR_NONE -#else - -1 -#endif - ) - { - return 0; //DBUG_RETURN(0); - } - // DBUG_PRINT("error",("inet_addr(%s) - %d - %s", - // address, errno, strerror(errno))); - return -1; //DBUG_RETURN(-1); -} - -#ifndef DBUG_OFF -extern "C" -int NDB_CLOSE_SOCKET(int fd) -{ - DBUG_PRINT("info", ("NDB_CLOSE_SOCKET(%d)", fd)); - return _NDB_CLOSE_SOCKET(fd); -} -#endif - -#if 0 -int -Ndb_getInAddr(struct in_addr * dst, const char *address) { - struct hostent host, * hostPtr; - char buf[1024]; - int h_errno; - hostPtr = gethostbyname_r(address, &host, &buf[0], 1024, &h_errno); - if (hostPtr != NULL) { - dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr; - return 0; - } - - /* Try it as aaa.bbb.ccc.ddd. */ - dst->s_addr = inet_addr(address); - if (dst->s_addr != -1) { - return 0; - } - return -1; -} -#endif - -int Ndb_check_socket_hup(NDB_SOCKET_TYPE sock) -{ -#ifdef HAVE_POLL - struct pollfd pfd[1]; - int r; - - pfd[0].fd= sock; - pfd[0].events= POLLHUP | POLLIN | POLLOUT | POLLNVAL; - pfd[0].revents= 0; - r= poll(pfd,1,0); - if(pfd[0].revents & (POLLHUP|POLLERR)) - return 1; - - return 0; -#else /* HAVE_POLL */ - fd_set readfds, writefds, errorfds; - struct timeval tv= {0,0}; - int s_err; - int s_err_size= sizeof(s_err); - - FD_ZERO(&readfds); - FD_ZERO(&writefds); - FD_ZERO(&errorfds); - - FD_SET(sock, &readfds); - FD_SET(sock, &writefds); - FD_SET(sock, &errorfds); - - if(select(1, &readfds, &writefds, &errorfds, &tv)<0) - return 1; - - if(FD_ISSET(sock,&errorfds)) - return 1; - - s_err=0; - if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (char*) &s_err, &s_err_size) != 0) - return(1); - - if (s_err) - { /* getsockopt could succeed */ - return(1); /* but return an error... */ - } - - return 0; -#endif /* HAVE_POLL */ -} diff --git a/storage/ndb/src/common/portlib/NdbThread.c b/storage/ndb/src/common/portlib/NdbThread.c deleted file mode 100644 index 01f08a2505a..00000000000 --- a/storage/ndb/src/common/portlib/NdbThread.c +++ /dev/null @@ -1,193 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include -#include - -#define MAX_THREAD_NAME 16 - -/*#define USE_PTHREAD_EXTRAS*/ - -#ifdef NDB_SHM_TRANSPORTER -int g_ndb_shm_signum= 0; -#endif - -struct NdbThread -{ - pthread_t thread; - char thread_name[MAX_THREAD_NAME]; - NDB_THREAD_FUNC * func; - void * object; -}; - - -#ifdef NDB_SHM_TRANSPORTER -void NdbThread_set_shm_sigmask(my_bool block) -{ - DBUG_ENTER("NdbThread_set_shm_sigmask"); - if (g_ndb_shm_signum) - { - sigset_t mask; - DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum)); - sigemptyset(&mask); - sigaddset(&mask, g_ndb_shm_signum); - if (block) - pthread_sigmask(SIG_BLOCK, &mask, 0); - else - pthread_sigmask(SIG_UNBLOCK, &mask, 0); - } - DBUG_VOID_RETURN; -} -#endif - - -static -void* -ndb_thread_wrapper(void* _ss){ - my_thread_init(); - { - DBUG_ENTER("ndb_thread_wrapper"); -#ifdef NDB_SHM_TRANSPORTER - NdbThread_set_shm_sigmask(TRUE); -#endif - { - /** - * Block all signals to thread by default - * let them go to main process instead - */ - sigset_t mask; - sigfillset(&mask); - pthread_sigmask(SIG_BLOCK, &mask, 0); - } - - { - void *ret; - struct NdbThread * ss = (struct NdbThread *)_ss; - ret= (* ss->func)(ss->object); - DBUG_POP(); - NdbThread_Exit(ret); - } - /* will never be reached */ - DBUG_RETURN(0); - } -} - - -struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func, - NDB_THREAD_ARG *p_thread_arg, - const NDB_THREAD_STACKSIZE _thread_stack_size, - const char* p_thread_name, - NDB_THREAD_PRIO thread_prio) -{ - struct NdbThread* tmpThread; - int result; - pthread_attr_t thread_attr; - NDB_THREAD_STACKSIZE thread_stack_size= _thread_stack_size * SIZEOF_CHARP/4; - - DBUG_ENTER("NdbThread_Create"); - - (void)thread_prio; /* remove warning for unused parameter */ - - if (p_thread_func == NULL) - DBUG_RETURN(NULL); - - tmpThread = (struct NdbThread*)NdbMem_Allocate(sizeof(struct NdbThread)); - if (tmpThread == NULL) - DBUG_RETURN(NULL); - - DBUG_PRINT("info",("thread_name: %s", p_thread_name)); - - strnmov(tmpThread->thread_name,p_thread_name,sizeof(tmpThread->thread_name)); - - pthread_attr_init(&thread_attr); -#ifdef PTHREAD_STACK_MIN - if (thread_stack_size < PTHREAD_STACK_MIN) - thread_stack_size = PTHREAD_STACK_MIN; -#endif - pthread_attr_setstacksize(&thread_attr, thread_stack_size); -#ifdef USE_PTHREAD_EXTRAS - /* Guard stack overflow with a 2k databuffer */ - pthread_attr_setguardsize(&thread_attr, 2048); -#endif - -#ifdef PTHREAD_CREATE_JOINABLE /* needed on SCO */ - pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); -#endif - tmpThread->func= p_thread_func; - tmpThread->object= p_thread_arg; - result = pthread_create(&tmpThread->thread, - &thread_attr, - ndb_thread_wrapper, - tmpThread); - if (result != 0) - { - NdbMem_Free((char *)tmpThread); - tmpThread = 0; - } - - pthread_attr_destroy(&thread_attr); - DBUG_PRINT("exit",("ret: 0x%lx", (long) tmpThread)); - DBUG_RETURN(tmpThread); -} - - -void NdbThread_Destroy(struct NdbThread** p_thread) -{ - DBUG_ENTER("NdbThread_Destroy"); - if (*p_thread != NULL){ - DBUG_PRINT("enter",("*p_thread: 0x%lx", (long) *p_thread)); - free(* p_thread); - * p_thread = 0; - } - DBUG_VOID_RETURN; -} - - -int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status) -{ - int result; - - if (p_wait_thread == NULL) - return 0; - - if (p_wait_thread->thread == 0) - return 0; - - result = pthread_join(p_wait_thread->thread, status); - - return result; -} - - -void NdbThread_Exit(void *status) -{ - my_thread_end(); - pthread_exit(status); -} - - -int NdbThread_SetConcurrencyLevel(int level) -{ -#ifdef USE_PTHREAD_EXTRAS - return pthread_setconcurrency(level); -#else - (void)level; /* remove warning for unused parameter */ - return 0; -#endif -} diff --git a/storage/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c deleted file mode 100644 index 1e46664b663..00000000000 --- a/storage/ndb/src/common/portlib/NdbTick.c +++ /dev/null @@ -1,104 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -#define NANOSEC_PER_SEC 1000000000 -#define MICROSEC_PER_SEC 1000000 -#define MILLISEC_PER_SEC 1000 -#define MICROSEC_PER_MILLISEC 1000 -#define MILLISEC_PER_NANOSEC 1000000 - - -#ifdef HAVE_CLOCK_GETTIME -NDB_TICKS NdbTick_CurrentMillisecond(void) -{ - struct timespec tick_time; - clock_gettime(CLOCK_REALTIME, &tick_time); - - return - ((NDB_TICKS)tick_time.tv_sec) * ((NDB_TICKS)MILLISEC_PER_SEC) + - ((NDB_TICKS)tick_time.tv_nsec) / ((NDB_TICKS)MILLISEC_PER_NANOSEC); -} - -int -NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ - struct timespec t; - int res = clock_gettime(CLOCK_REALTIME, &t); - * secs = t.tv_sec; - * micros = t.tv_nsec / 1000; - return res; -} -#else -NDB_TICKS NdbTick_CurrentMillisecond(void) -{ - struct timeval tick_time; - gettimeofday(&tick_time, 0); - - return - ((NDB_TICKS)tick_time.tv_sec) * ((NDB_TICKS)MILLISEC_PER_SEC) + - ((NDB_TICKS)tick_time.tv_usec) / ((NDB_TICKS)MICROSEC_PER_MILLISEC); -} - -int -NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ - struct timeval tick_time; - int res = gettimeofday(&tick_time, 0); - - if(secs==0) { - NDB_TICKS local_secs = tick_time.tv_sec; - *micros = tick_time.tv_usec; - *micros = local_secs*1000000+*micros; - } else { - * secs = tick_time.tv_sec; - * micros = tick_time.tv_usec; - } - return res; -} - -#endif -int -NdbTick_getMicroTimer(struct MicroSecondTimer* input_timer) -{ - NDB_TICKS secs; - Uint32 mics; - int ret_value; - ret_value = NdbTick_CurrentMicrosecond(&secs, &mics); - input_timer->seconds = secs; - input_timer->micro_seconds = (NDB_TICKS)mics; - return ret_value; -} - -NDB_TICKS -NdbTick_getMicrosPassed(struct MicroSecondTimer start, - struct MicroSecondTimer stop) -{ - NDB_TICKS ret_value = (NDB_TICKS)0; - if (start.seconds < stop.seconds) { - NDB_TICKS sec_passed = stop.seconds - start.seconds; - ret_value = ((NDB_TICKS)MICROSEC_PER_SEC) * sec_passed; - } else if (start.seconds > stop.seconds) { - return ret_value; - } - if (start.micro_seconds < stop.micro_seconds) { - ret_value += (stop.micro_seconds - start.micro_seconds); - } else if (ret_value != (NDB_TICKS)0) { - ret_value -= (start.micro_seconds - stop.micro_seconds); - } - return ret_value; -} diff --git a/storage/ndb/src/common/portlib/memtest.c b/storage/ndb/src/common/portlib/memtest.c deleted file mode 100644 index 37d87de90e7..00000000000 --- a/storage/ndb/src/common/portlib/memtest.c +++ /dev/null @@ -1,243 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include - -long long getMilli(); -long long getMicro(); -void malloctest(int loopcount, int memsize, int touch); -void freetest(int loopcount, int memsize); -void mmaptest(int loopcount, int memsize, int touch); -void unmaptest(int loopcount, int memsize); - - -main(int argc, char ** argv) -{ - - int loopcount; - int memsize; - if(argc < 4) { - printf("Usage: memtest X loopcount memsize(MB)\n"); - printf("where X = \n"); - printf("1 : malloc test \n"); - printf("2 : mmap test \n"); - printf("3 : malloc test + touch pages\n"); - printf("4 : mmap test + touch pages\n"); - printf("5 : malloc/free test \n"); - printf("6 : mmap/munmap test \n"); - printf("loopcount - number of loops\n"); - printf("memsize - memory segment size to allocate in MB.\n"); - exit(1); - } - - - loopcount = atoi(argv[2]); - memsize = atoi(argv[3]); - switch(atoi(argv[1])) { - case 1: malloctest(loopcount, memsize , 0 ); - break; - case 2: mmaptest(loopcount, memsize,0); - break; - case 3: malloctest(loopcount, memsize,1); - break; - case 4: mmaptest(loopcount, memsize,1); - break; - case 5: freetest(loopcount, memsize); - break; - case 6: unmaptest(loopcount, memsize); - break; - default: - break; - } -} - -long long getMilli() { - struct timeval tick_time; - gettimeofday(&tick_time, 0); - - return - ((long long)tick_time.tv_sec) * ((long long)1000) + - ((long long)tick_time.tv_usec) / ((long long)1000); -} - -long long getMicro(){ - struct timeval tick_time; - int res = gettimeofday(&tick_time, 0); - - long long secs = tick_time.tv_sec; - long long micros = tick_time.tv_usec; - - micros = secs*1000000+micros; - return micros; -} - -void malloctest(int loopcount, int memsize, int touch) { - long long start=0; - int total=0; - int i=0, j=0; - int size=memsize*1024*1024; /*bytes*/; - float mean; - char * ptr =0; - - printf("Staring malloctest "); - if(touch) - printf("with touch\n"); - else - printf("\n"); - - start=getMicro(); - - for(i=0; i - -#include -#include "NdbThread.h" -#include -#include - -NDB_COMMAND(ndbmem, "ndbmem", "ndbmem", "Test the ndbmem functionality", 4096){ - - ndbout << "Starting test of NdbMem" << endl; - ndbout << "=======================" << endl; - - ndbout << "Creating NdbMem" << endl; - NdbMem_Create(); - - - ndbout << "NdbMem - test 1" << endl; - if (argc == 2){ - int size1 = atoi(argv[1]); - ndbout << "Allocate and test "<"<< endl; - } - - return NULL; - -} - - - diff --git a/storage/ndb/src/common/portlib/munmaptest.cpp b/storage/ndb/src/common/portlib/munmaptest.cpp deleted file mode 100644 index 7977dc88634..00000000000 --- a/storage/ndb/src/common/portlib/munmaptest.cpp +++ /dev/null @@ -1,246 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct ThreadData -{ - char * mapAddr; - Uint32 mapSize; - Uint32 chunk; - Uint32 idx; - -}; - -long long getMilli(); -long long getMicro(); - - -void* mapSegment(void * arg); -void* unmapSegment(void * arg); - - -void* mapSegment(void * arg) { - - ThreadData * threadArgs; - long long start=0; - int total=0; - int id = *(int *)arg; - threadArgs = new ThreadData [1]; - Uint32 size=5*1024*1024; - struct NdbThread* unmapthread_var; - void *status = 0; - int run = 1; - int max=0, min =100000000, sum=0; - while(run < 1001) { - start=getMicro(); - char * ptr =(char*) mmap(0, - size, - PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANONYMOUS, - 0, - 0); - - total=(int)(getMicro()-start); - - ndbout << "T" << id << ": mmap took : " << total << " microsecs. " - << " Run: " << run ; - ndbout_c(" mapped @ %p \n", ptr); - - if(total>max) - max = total; - if(totalmapSize; - Uint32 chunk = threadData->chunk; - mapAddr = threadData->mapAddr; - - - - freeAddr = mapAddr+mapSize-chunk; - NdbSleep_MilliSleep(100); - for(Uint32 i=0;i -#include "NdbCondition.h" -#include - -struct NdbCondition -{ - long nWaiters; - NdbMutex* pNdbMutexWaitersLock; - HANDLE hSemaphore; - HANDLE hEventWaitersDone; - int bWasBroadcast; -}; - - -struct NdbCondition* -NdbCondition_Create(void) -{ - int result = 0; - struct NdbCondition* pNdbCondition = (struct NdbCondition*)malloc(sizeof(struct NdbCondition)); - if(!pNdbCondition) - return 0; - - pNdbCondition->nWaiters = 0; - pNdbCondition->bWasBroadcast = 0; - if(!(pNdbCondition->hSemaphore = CreateSemaphore(0, 0, MAXLONG, 0))) - result = -1; - else if(!(pNdbCondition->pNdbMutexWaitersLock = NdbMutex_Create())) - result = -1; - else if(!(pNdbCondition->hEventWaitersDone = CreateEvent(0, 0, 0, 0))) - result = -1; - - assert(!result); - return pNdbCondition; -} - - -int -NdbCondition_Wait(struct NdbCondition* p_cond, - NdbMutex* p_mutex) -{ - int result; - int bLastWaiter; - if(!p_cond || !p_mutex) - return 1; - - NdbMutex_Lock(p_cond->pNdbMutexWaitersLock); - p_cond->nWaiters++; - NdbMutex_Unlock(p_cond->pNdbMutexWaitersLock); - - if(NdbMutex_Unlock(p_mutex)) - return -1; - result = WaitForSingleObject (p_cond->hSemaphore, INFINITE); - - NdbMutex_Lock(p_cond->pNdbMutexWaitersLock); - p_cond->nWaiters--; - bLastWaiter = (p_cond->bWasBroadcast && p_cond->nWaiters==0); - NdbMutex_Unlock(p_cond->pNdbMutexWaitersLock); - - if(result==WAIT_OBJECT_0 && bLastWaiter) - SetEvent(p_cond->hEventWaitersDone); - - NdbMutex_Lock(p_mutex); - return result; -} - - -int -NdbCondition_WaitTimeout(struct NdbCondition* p_cond, - NdbMutex* p_mutex, - int msecs) -{ - int result; - int bLastWaiter; - if (!p_cond || !p_mutex) - return 1; - - NdbMutex_Lock(p_cond->pNdbMutexWaitersLock); - p_cond->nWaiters++; - NdbMutex_Unlock(p_cond->pNdbMutexWaitersLock); - if(msecs<0) - msecs = 0; - - if(NdbMutex_Unlock(p_mutex)) - return -1; - result = WaitForSingleObject(p_cond->hSemaphore, msecs); - - NdbMutex_Lock(p_cond->pNdbMutexWaitersLock); - p_cond->nWaiters--; - bLastWaiter = (p_cond->bWasBroadcast && p_cond->nWaiters==0); - NdbMutex_Unlock(p_cond->pNdbMutexWaitersLock); - - if(result!=WAIT_OBJECT_0) - result = -1; - - if(bLastWaiter) - SetEvent(p_cond->hEventWaitersDone); - - NdbMutex_Lock(p_mutex); - return result; -} - - -int -NdbCondition_Signal(struct NdbCondition* p_cond) -{ - int bHaveWaiters; - if(!p_cond) - return 1; - - NdbMutex_Lock(p_cond->pNdbMutexWaitersLock); - bHaveWaiters = (p_cond->nWaiters > 0); - NdbMutex_Unlock(p_cond->pNdbMutexWaitersLock); - - if(bHaveWaiters) - return (ReleaseSemaphore(p_cond->hSemaphore, 1, 0) ? 0 : -1); - else - return 0; -} - - -int NdbCondition_Broadcast(struct NdbCondition* p_cond) -{ - int bHaveWaiters; - int result = 0; - if(!p_cond) - return 1; - - NdbMutex_Lock(p_cond->pNdbMutexWaitersLock); - bHaveWaiters = 0; - if(p_cond->nWaiters > 0) - { - p_cond->bWasBroadcast = !0; - bHaveWaiters = 1; - } - NdbMutex_Unlock(p_cond->pNdbMutexWaitersLock); - if(bHaveWaiters) - { - if(!ReleaseSemaphore(p_cond->hSemaphore, p_cond->nWaiters, 0)) - result = -1; - else if(WaitForSingleObject (p_cond->hEventWaitersDone, INFINITE) != WAIT_OBJECT_0) - result = -1; - p_cond->bWasBroadcast = 0; - } - return result; -} - - -int NdbCondition_Destroy(struct NdbCondition* p_cond) -{ - int result; - if(!p_cond) - return 1; - - CloseHandle(p_cond->hEventWaitersDone); - NdbMutex_Destroy(p_cond->pNdbMutexWaitersLock); - result = (CloseHandle(p_cond->hSemaphore) ? 0 : -1); - - free(p_cond); - return 0; -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbDaemon.c b/storage/ndb/src/common/portlib/win32/NdbDaemon.c deleted file mode 100644 index 00fa8ea4591..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbDaemon.c +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "NdbDaemon.h" - -#define NdbDaemon_ErrorSize 500 -long NdbDaemon_DaemonPid; -int NdbDaemon_ErrorCode; -char NdbDaemon_ErrorText[NdbDaemon_ErrorSize]; - -int -NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags) -{ - // XXX do something - return 0; -} - -#ifdef NDB_DAEMON_TEST - -int -main() -{ - if (NdbDaemon_Make("test.pid", "test.log", 0) == -1) { - fprintf(stderr, "NdbDaemon_Make: %s\n", NdbDaemon_ErrorText); - return 1; - } - sleep(10); - return 0; -} - -#endif diff --git a/storage/ndb/src/common/portlib/win32/NdbEnv.c b/storage/ndb/src/common/portlib/win32/NdbEnv.c deleted file mode 100644 index 7ebee73ae02..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbEnv.c +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbEnv.h" - -const char* NdbEnv_GetEnv(const char* name, char * buf, int buflen) -{ - char* p = NULL; - p = getenv(name); - - if (p != NULL && buf != NULL){ - strncpy(buf, p, buflen); - buf[buflen-1] = 0; - } - return p; -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbHost.c b/storage/ndb/src/common/portlib/win32/NdbHost.c deleted file mode 100644 index 220c080ed4b..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbHost.c +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include "NdbHost.h" - - -int NdbHost_GetHostName(char* buf) -{ - /* We must initialize TCP/IP if we want to call gethostname */ - WORD wVersionRequested; - WSADATA wsaData; - int err; - - wVersionRequested = MAKEWORD( 2, 0 ); - err = WSAStartup( wVersionRequested, &wsaData ); - if ( err != 0 ) { - /** - * Tell the user that we couldn't find a usable - * WinSock DLL. - */ - return -1; - } - - /* Get host name */ - if(gethostname(buf, MAXHOSTNAMELEN)) - { - return -1; - } - return 0; -} - - -int NdbHost_GetProcessId(void) -{ - return _getpid(); -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbMem.c b/storage/ndb/src/common/portlib/win32/NdbMem.c deleted file mode 100644 index 0cf1b5f018e..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbMem.c +++ /dev/null @@ -1,283 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbMem.h" - -#if 0 -struct AWEINFO -{ - SIZE_T dwSizeInBytesRequested; - ULONG_PTR nNumberOfPagesRequested; - ULONG_PTR nNumberOfPagesActual; - ULONG_PTR nNumberOfPagesFreed; - ULONG_PTR* pnPhysicalMemoryPageArray; - void* pRegionReserved; -}; - -const size_t cNdbMem_nMaxAWEinfo = 256; -size_t gNdbMem_nAWEinfo = 0; - -struct AWEINFO* gNdbMem_pAWEinfo = 0; - - -void ShowLastError(const char* szContext, const char* szFunction) -{ - DWORD dwError = GetLastError(); - LPVOID lpMsgBuf; - FormatMessage( - FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - dwError, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language - (LPTSTR)&lpMsgBuf, - 0, - NULL - ); - printf("%s : %s failed : %lu : %s\n", szContext, szFunction, dwError, (char*)lpMsgBuf); - LocalFree(lpMsgBuf); -} - - - -void NdbMem_Create() -{ - // Address Windowing Extensions - struct PRIVINFO - { - DWORD Count; - LUID_AND_ATTRIBUTES Privilege[1]; - } Info; - - HANDLE hProcess = GetCurrentProcess(); - HANDLE hToken; - if(!OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken)) - { - ShowLastError("NdbMem_Create", "OpenProcessToken"); - } - - Info.Count = 1; - Info.Privilege[0].Attributes = SE_PRIVILEGE_ENABLED; - if(!LookupPrivilegeValue(0, SE_LOCK_MEMORY_NAME, &(Info.Privilege[0].Luid))) - { - ShowLastError("NdbMem_Create", "LookupPrivilegeValue"); - } - - if(!AdjustTokenPrivileges(hToken, FALSE, (PTOKEN_PRIVILEGES)&Info, 0, 0, 0)) - { - ShowLastError("NdbMem_Create", "AdjustTokenPrivileges"); - } - - if(!CloseHandle(hToken)) - { - ShowLastError("NdbMem_Create", "CloseHandle"); - } - - return; -} - -void NdbMem_Destroy() -{ - /* Do nothing */ - return; -} - -void* NdbMem_Allocate(size_t size) -{ - // Address Windowing Extensions - struct AWEINFO* pAWEinfo; - HANDLE hProcess; - SYSTEM_INFO sysinfo; - - if(!gNdbMem_pAWEinfo) - { - gNdbMem_pAWEinfo = VirtualAlloc(0, - sizeof(struct AWEINFO)*cNdbMem_nMaxAWEinfo, - MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE); - } - - assert(gNdbMem_nAWEinfo < cNdbMem_nMaxAWEinfo); - pAWEinfo = gNdbMem_pAWEinfo+gNdbMem_nAWEinfo++; - - hProcess = GetCurrentProcess(); - GetSystemInfo(&sysinfo); - pAWEinfo->nNumberOfPagesRequested = (size+sysinfo.dwPageSize-1)/sysinfo.dwPageSize; - pAWEinfo->pnPhysicalMemoryPageArray = VirtualAlloc(0, - sizeof(ULONG_PTR)*pAWEinfo->nNumberOfPagesRequested, - MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE); - pAWEinfo->nNumberOfPagesActual = pAWEinfo->nNumberOfPagesRequested; - if(!AllocateUserPhysicalPages(hProcess, &(pAWEinfo->nNumberOfPagesActual), pAWEinfo->pnPhysicalMemoryPageArray)) - { - ShowLastError("NdbMem_Allocate", "AllocateUserPhysicalPages"); - return 0; - } - if(pAWEinfo->nNumberOfPagesRequested != pAWEinfo->nNumberOfPagesActual) - { - ShowLastError("NdbMem_Allocate", "nNumberOfPagesRequested != nNumberOfPagesActual"); - return 0; - } - - pAWEinfo->dwSizeInBytesRequested = size; - pAWEinfo->pRegionReserved = VirtualAlloc(0, pAWEinfo->dwSizeInBytesRequested, MEM_RESERVE | MEM_PHYSICAL, PAGE_READWRITE); - if(!pAWEinfo->pRegionReserved) - { - ShowLastError("NdbMem_Allocate", "VirtualAlloc"); - return 0; - } - - if(!MapUserPhysicalPages(pAWEinfo->pRegionReserved, pAWEinfo->nNumberOfPagesActual, pAWEinfo->pnPhysicalMemoryPageArray)) - { - ShowLastError("NdbMem_Allocate", "MapUserPhysicalPages"); - return 0; - } - - /* - printf("allocate AWE memory: %lu bytes, %lu pages, address %lx\n", - pAWEinfo->dwSizeInBytesRequested, - pAWEinfo->nNumberOfPagesActual, - pAWEinfo->pRegionReserved); - */ - return pAWEinfo->pRegionReserved; -} - - -void* NdbMem_AllocateAlign(size_t size, size_t alignment) -{ - /* - return (void*)memalign(alignment, size); - TEMP fix - */ - return NdbMem_Allocate(size); -} - - -void NdbMem_Free(void* ptr) -{ - // VirtualFree(ptr, 0, MEM_DECOMMIT|MEM_RELEASE); - - // Address Windowing Extensions - struct AWEINFO* pAWEinfo = 0; - size_t i; - HANDLE hProcess; - - for(i=0; inNumberOfPagesActual, 0)) - { - ShowLastError("NdbMem_Free", "MapUserPhysicalPages"); - } - - if(!VirtualFree(ptr, 0, MEM_RELEASE)) - { - ShowLastError("NdbMem_Free", "VirtualFree"); - } - - pAWEinfo->nNumberOfPagesFreed = pAWEinfo->nNumberOfPagesActual; - if(!FreeUserPhysicalPages(hProcess, &(pAWEinfo->nNumberOfPagesFreed), pAWEinfo->pnPhysicalMemoryPageArray)) - { - ShowLastError("NdbMem_Free", "FreeUserPhysicalPages"); - } - - VirtualFree(pAWEinfo->pnPhysicalMemoryPageArray, 0, MEM_DECOMMIT|MEM_RELEASE); -} - - -int NdbMem_MemLockAll() -{ - /* - HANDLE hProcess = GetCurrentProcess(); - SIZE_T nMinimumWorkingSetSize; - SIZE_T nMaximumWorkingSetSize; - GetProcessWorkingSetSize(hProcess, &nMinimumWorkingSetSize, &nMaximumWorkingSetSize); - ndbout << "nMinimumWorkingSetSize=" << nMinimumWorkingSetSize << ", nMaximumWorkingSetSize=" << nMaximumWorkingSetSize << endl; - - SetProcessWorkingSetSize(hProcess, 50000000, 100000000); - - GetProcessWorkingSetSize(hProcess, &nMinimumWorkingSetSize, &nMaximumWorkingSetSize); - ndbout << "nMinimumWorkingSetSize=" << nMinimumWorkingSetSize << ", nMaximumWorkingSetSize=" << nMaximumWorkingSetSize << endl; - */ - return -1; -} - -int NdbMem_MemUnlockAll() -{ - //VirtualUnlock(); - return -1; -} - -#endif - -void NdbMem_Create() -{ - /* Do nothing */ - return; -} - -void NdbMem_Destroy() -{ - /* Do nothing */ - return; -} - - -void* NdbMem_Allocate(size_t size) -{ - void* mem_allocated; - assert(size > 0); - mem_allocated= (void*)malloc(size); - return mem_allocated; -} - -void* NdbMem_AllocateAlign(size_t size, size_t alignment) -{ - (void)alignment; /* remove warning for unused parameter */ - /* - return (void*)memalign(alignment, size); - TEMP fix - */ - return (void*)malloc(size); -} - - -void NdbMem_Free(void* ptr) -{ - free(ptr); -} - - -int NdbMem_MemLockAll() -{ - return 0; -} - -int NdbMem_MemUnlockAll() -{ - return 0; -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbMutex.c b/storage/ndb/src/common/portlib/win32/NdbMutex.c deleted file mode 100644 index 8ddfd43a283..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbMutex.c +++ /dev/null @@ -1,73 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include "NdbMutex.h" - -NdbMutex* NdbMutex_Create(void) -{ - NdbMutex* pNdbMutex = (NdbMutex*)malloc(sizeof(NdbMutex)); - if(!pNdbMutex) - return 0; - - InitializeCriticalSection(pNdbMutex); - return pNdbMutex; -} - - -int NdbMutex_Destroy(NdbMutex* p_mutex) -{ - if(!p_mutex) - return -1; - - DeleteCriticalSection(p_mutex); - free(p_mutex); - return 0; -} - - -int NdbMutex_Lock(NdbMutex* p_mutex) -{ - if(!p_mutex) - return -1; - - EnterCriticalSection (p_mutex); - return 0; -} - - -int NdbMutex_Unlock(NdbMutex* p_mutex) -{ - if(!p_mutex) - return -1; - - LeaveCriticalSection(p_mutex); - return 0; -} - - -int NdbMutex_Trylock(NdbMutex* p_mutex) -{ - int result = -1; - if(p_mutex) - { - result = NdbMutex_Lock(p_mutex); - //(TryEnterCriticalSection(p_mutex) ? 0 : -1); - } - return result; -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbSleep.c b/storage/ndb/src/common/portlib/win32/NdbSleep.c deleted file mode 100644 index 2d87cd88234..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbSleep.c +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbSleep.h" - -int -NdbSleep_MilliSleep(int milliseconds) -{ - Sleep(milliseconds); - return 0; -} - -int -NdbSleep_SecSleep(int seconds) -{ - return NdbSleep_MilliSleep(seconds*1000); -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbTCP.c b/storage/ndb/src/common/portlib/win32/NdbTCP.c deleted file mode 100644 index 75e9345a150..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbTCP.c +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbTCP.h" - -int -Ndb_getInAddr(struct in_addr * dst, const char *address) -{ - struct hostent * hostPtr; - - /* Try it as aaa.bbb.ccc.ddd. */ - dst->s_addr = inet_addr(address); - if (dst->s_addr != -1) { - return 0; - } - - hostPtr = gethostbyname(address); - if (hostPtr != NULL) { - dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr; - return 0; - } - - return -1; -} - -int Ndb_check_socket_hup(NDB_SOCKET_TYPE sock) -{ - fd_set readfds, writefds, errorfds; - struct timeval tv= {0,0}; - int s_err; - int s_err_size= sizeof(s_err); - - FD_ZERO(&readfds); - FD_ZERO(&writefds); - FD_ZERO(&errorfds); - - FD_SET(sock, &readfds); - FD_SET(sock, &writefds); - FD_SET(sock, &errorfds); - - if(select(1, &readfds, &writefds, &errorfds, &tv)==SOCKET_ERROR) - return 1; - - if(FD_ISSET(sock,&errorfds)) - return 1; - - s_err=0; - if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (char*) &s_err, &s_err_size) != 0) - return(1); - - if (s_err) - { /* getsockopt could succeed */ - return(1); /* but return an error... */ - } - - return 0; -} diff --git a/storage/ndb/src/common/portlib/win32/NdbThread.c b/storage/ndb/src/common/portlib/win32/NdbThread.c deleted file mode 100644 index 98c8e472fcd..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbThread.c +++ /dev/null @@ -1,114 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbThread.h" -#include - -#define MAX_THREAD_NAME 16 - -typedef unsigned (WINAPI* NDB_WIN32_THREAD_FUNC)(void*); - - -struct NdbThread -{ - HANDLE hThread; - unsigned nThreadId; - char thread_name[MAX_THREAD_NAME]; -}; - - -struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func, - NDB_THREAD_ARG *p_thread_arg, - const NDB_THREAD_STACKSIZE thread_stack_size, - const char* p_thread_name, - NDB_THREAD_PRIO thread_prio) -{ - struct NdbThread* tmpThread; - unsigned initflag; - int nPriority = 0; - - if(!p_thread_func) - return 0; - - tmpThread = (struct NdbThread*)malloc(sizeof(struct NdbThread)); - if(!tmpThread) - return 0; - - strncpy((char*)&tmpThread->thread_name, p_thread_name, MAX_THREAD_NAME); - - switch(thread_prio) - { - case NDB_THREAD_PRIO_HIGHEST: nPriority=THREAD_PRIORITY_HIGHEST; break; - case NDB_THREAD_PRIO_HIGH: nPriority=THREAD_PRIORITY_ABOVE_NORMAL; break; - case NDB_THREAD_PRIO_MEAN: nPriority=THREAD_PRIORITY_NORMAL; break; - case NDB_THREAD_PRIO_LOW: nPriority=THREAD_PRIORITY_BELOW_NORMAL; break; - case NDB_THREAD_PRIO_LOWEST: nPriority=THREAD_PRIORITY_LOWEST; break; - } - initflag = (nPriority ? CREATE_SUSPENDED : 0); - - tmpThread->hThread = (HANDLE)_beginthreadex(0, thread_stack_size, - (NDB_WIN32_THREAD_FUNC)p_thread_func, p_thread_arg, - initflag, &tmpThread->nThreadId); - - if(nPriority && tmpThread->hThread) - { - SetThreadPriority(tmpThread->hThread, nPriority); - ResumeThread (tmpThread->hThread); - } - - assert(tmpThread->hThread); - return tmpThread; -} - - -void NdbThread_Destroy(struct NdbThread** p_thread) -{ - CloseHandle((*p_thread)->hThread); - (*p_thread)->hThread = 0; - free(*p_thread); - *p_thread = 0; -} - - -int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status) -{ - void *local_status = 0; - if (status == 0) - status = &local_status; - - if(WaitForSingleObject(p_wait_thread->hThread, INFINITE) == WAIT_OBJECT_0 - && GetExitCodeThread(p_wait_thread->hThread, (LPDWORD)status)) - { - CloseHandle(p_wait_thread->hThread); - p_wait_thread->hThread = 0; - return 0; - } - return -1; -} - - -void NdbThread_Exit(int status) -{ - _endthreadex((DWORD) status); -} - - -int NdbThread_SetConcurrencyLevel(int level) -{ - return 0; -} - diff --git a/storage/ndb/src/common/portlib/win32/NdbTick.c b/storage/ndb/src/common/portlib/win32/NdbTick.c deleted file mode 100644 index a4157f14fa4..00000000000 --- a/storage/ndb/src/common/portlib/win32/NdbTick.c +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "NdbTick.h" -//#include - -/* -#define FILETIME_PER_MICROSEC 10 -#define FILETIME_PER_MILLISEC 10000 -#define FILETIME_PER_SEC 10000000 - - -NDB_TICKS NdbTick_CurrentMillisecond(void) -{ - ULONGLONG ullTime; - GetSystemTimeAsFileTime((LPFILETIME)&ullTime); - return (ullTime / FILETIME_PER_MILLISEC); -} - -int -NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros) -{ - ULONGLONG ullTime; - GetSystemTimeAsFileTime((LPFILETIME)&ullTime); - *secs = (ullTime / FILETIME_PER_SEC); - *micros = (Uint32)((ullTime % FILETIME_PER_SEC) / FILETIME_PER_MICROSEC); - return 0; -} -*/ - - -NDB_TICKS NdbTick_CurrentMillisecond(void) -{ - LARGE_INTEGER liCount, liFreq; - QueryPerformanceCounter(&liCount); - QueryPerformanceFrequency(&liFreq); - return (liCount.QuadPart*1000) / liFreq.QuadPart; -} - -int -NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros) -{ - LARGE_INTEGER liCount, liFreq; - QueryPerformanceCounter(&liCount); - QueryPerformanceFrequency(&liFreq); - *secs = liCount.QuadPart / liFreq.QuadPart; - liCount.QuadPart -= *secs * liFreq.QuadPart; - *micros = (liCount.QuadPart*1000000) / liFreq.QuadPart; - return 0; -} diff --git a/storage/ndb/src/common/transporter/Makefile.am b/storage/ndb/src/common/transporter/Makefile.am deleted file mode 100644 index a8374d69662..00000000000 --- a/storage/ndb/src/common/transporter/Makefile.am +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -noinst_LTLIBRARIES = libtransporter.la - -libtransporter_la_SOURCES = \ - Transporter.cpp \ - SendBuffer.cpp \ - TCP_Transporter.cpp \ - TransporterRegistry.cpp \ - Packer.cpp - -EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp SCI_Transporter.cpp - -libtransporter_la_LIBADD = @ndb_transporter_opt_objs@ -libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@ - -INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi -I$(top_srcdir)/storage/ndb/src/mgmapi -I$(top_srcdir)/storage/ndb/include/debugger -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/include/transporter @NDB_SCI_INCLUDES@ - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_util.mk.am - -windoze-dsp: libtransporter.dsp - -libtransporter.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libtransporter_la_SOURCES) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/transporter/Packer.cpp b/storage/ndb/src/common/transporter/Packer.cpp deleted file mode 100644 index 9fb9f77fc7c..00000000000 --- a/storage/ndb/src/common/transporter/Packer.cpp +++ /dev/null @@ -1,517 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "Packer.hpp" -#include -#include -#include - -#ifdef ERROR_INSERT -Uint32 MAX_RECEIVED_SIGNALS = 1024; -#else -#define MAX_RECEIVED_SIGNALS 1024 -#endif - -Uint32 -TransporterRegistry::unpack(Uint32 * readPtr, - Uint32 sizeOfData, - NodeId remoteNodeId, - IOState state) { - SignalHeader signalHeader; - LinearSectionPtr ptr[3]; - - Uint32 usedData = 0; - Uint32 loop_count = 0; - - if(state == NoHalt || state == HaltOutput){ - while ((sizeOfData >= 4 + sizeof(Protocol6)) && - (loop_count < MAX_RECEIVED_SIGNALS)) { - Uint32 word1 = readPtr[0]; - Uint32 word2 = readPtr[1]; - Uint32 word3 = readPtr[2]; - loop_count++; - -#if 0 - if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){ - //Do funky stuff - } -#endif - - const Uint16 messageLen32 = Protocol6::getMessageLength(word1); - const Uint32 messageLenBytes = ((Uint32)messageLen32) << 2; - - if(messageLen32 == 0 || messageLen32 > MAX_MESSAGE_SIZE){ - DEBUG("Message Size = " << messageLenBytes); - reportError(callbackObj, remoteNodeId, TE_INVALID_MESSAGE_LENGTH); - return usedData; - }//if - - if (sizeOfData < messageLenBytes) { - break; - }//if - - if(Protocol6::getCheckSumIncluded(word1)){ - const Uint32 tmpLen = messageLen32 - 1; - const Uint32 checkSumSent = readPtr[tmpLen]; - const Uint32 checkSumComputed = computeChecksum(&readPtr[0], tmpLen); - - if(checkSumComputed != checkSumSent){ - reportError(callbackObj, remoteNodeId, TE_INVALID_CHECKSUM); - return usedData; - }//if - }//if - -#if 0 - if(Protocol6::getCompressed(word1)){ - //Do funky stuff - }//if -#endif - - Protocol6::createSignalHeader(&signalHeader, word1, word2, word3); - - Uint32 sBlockNum = signalHeader.theSendersBlockRef; - sBlockNum = numberToRef(sBlockNum, remoteNodeId); - signalHeader.theSendersBlockRef = sBlockNum; - - Uint8 prio = Protocol6::getPrio(word1); - - Uint32 * signalData = &readPtr[3]; - - if(Protocol6::getSignalIdIncluded(word1) == 0){ - signalHeader.theSendersSignalId = ~0; - } else { - signalHeader.theSendersSignalId = * signalData; - signalData ++; - }//if - signalHeader.theSignalId= ~0; - - Uint32 * sectionPtr = signalData + signalHeader.theLength; - Uint32 * sectionData = sectionPtr + signalHeader.m_noOfSections; - for(Uint32 i = 0; i= 4 + sizeof(Protocol6)) && - (loop_count < MAX_RECEIVED_SIGNALS)) { - Uint32 word1 = readPtr[0]; - Uint32 word2 = readPtr[1]; - Uint32 word3 = readPtr[2]; - loop_count++; - -#if 0 - if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){ - //Do funky stuff - }//if -#endif - - const Uint16 messageLen32 = Protocol6::getMessageLength(word1); - const Uint32 messageLenBytes = ((Uint32)messageLen32) << 2; - if(messageLen32 == 0 || messageLen32 > MAX_MESSAGE_SIZE){ - DEBUG("Message Size = " << messageLenBytes); - reportError(callbackObj, remoteNodeId, TE_INVALID_MESSAGE_LENGTH); - return usedData; - }//if - - if (sizeOfData < messageLenBytes) { - break; - }//if - - if(Protocol6::getCheckSumIncluded(word1)){ - const Uint32 tmpLen = messageLen32 - 1; - const Uint32 checkSumSent = readPtr[tmpLen]; - const Uint32 checkSumComputed = computeChecksum(&readPtr[0], tmpLen); - - if(checkSumComputed != checkSumSent){ - - //theTransporters[remoteNodeId]->disconnect(); - reportError(callbackObj, remoteNodeId, TE_INVALID_CHECKSUM); - return usedData; - }//if - }//if - -#if 0 - if(Protocol6::getCompressed(word1)){ - //Do funky stuff - }//if -#endif - - Protocol6::createSignalHeader(&signalHeader, word1, word2, word3); - - Uint32 rBlockNum = signalHeader.theReceiversBlockNumber; - - if(rBlockNum == 252){ - Uint32 sBlockNum = signalHeader.theSendersBlockRef; - sBlockNum = numberToRef(sBlockNum, remoteNodeId); - signalHeader.theSendersBlockRef = sBlockNum; - - Uint8 prio = Protocol6::getPrio(word1); - - Uint32 * signalData = &readPtr[3]; - - if(Protocol6::getSignalIdIncluded(word1) == 0){ - signalHeader.theSendersSignalId = ~0; - } else { - signalHeader.theSendersSignalId = * signalData; - signalData ++; - }//if - - Uint32 * sectionPtr = signalData + signalHeader.theLength; - Uint32 * sectionData = sectionPtr + signalHeader.m_noOfSections; - for(Uint32 i = 0; i MAX_MESSAGE_SIZE){ - DEBUG("Message Size(words) = " << messageLen32); - reportError(callbackObj, remoteNodeId, TE_INVALID_MESSAGE_LENGTH); - return readPtr; - }//if - - if(Protocol6::getCheckSumIncluded(word1)){ - const Uint32 tmpLen = messageLen32 - 1; - const Uint32 checkSumSent = readPtr[tmpLen]; - const Uint32 checkSumComputed = computeChecksum(&readPtr[0], tmpLen); - - if(checkSumComputed != checkSumSent){ - reportError(callbackObj, remoteNodeId, TE_INVALID_CHECKSUM); - return readPtr; - }//if - }//if - -#if 0 - if(Protocol6::getCompressed(word1)){ - //Do funky stuff - }//if -#endif - - Protocol6::createSignalHeader(&signalHeader, word1, word2, word3); - - Uint32 sBlockNum = signalHeader.theSendersBlockRef; - sBlockNum = numberToRef(sBlockNum, remoteNodeId); - signalHeader.theSendersBlockRef = sBlockNum; - - Uint8 prio = Protocol6::getPrio(word1); - - Uint32 * signalData = &readPtr[3]; - - if(Protocol6::getSignalIdIncluded(word1) == 0){ - signalHeader.theSendersSignalId = ~0; - } else { - signalHeader.theSendersSignalId = * signalData; - signalData ++; - }//if - - Uint32 * sectionPtr = signalData + signalHeader.theLength; - Uint32 * sectionData = sectionPtr + signalHeader.m_noOfSections; - for(Uint32 i = 0; i MAX_MESSAGE_SIZE){ - DEBUG("Message Size(words) = " << messageLen32); - reportError(callbackObj, remoteNodeId, TE_INVALID_MESSAGE_LENGTH); - return readPtr; - }//if - - if(Protocol6::getCheckSumIncluded(word1)){ - const Uint32 tmpLen = messageLen32 - 1; - const Uint32 checkSumSent = readPtr[tmpLen]; - const Uint32 checkSumComputed = computeChecksum(&readPtr[0], tmpLen); - - if(checkSumComputed != checkSumSent){ - - //theTransporters[remoteNodeId]->disconnect(); - reportError(callbackObj, remoteNodeId, TE_INVALID_CHECKSUM); - return readPtr; - }//if - }//if - -#if 0 - if(Protocol6::getCompressed(word1)){ - //Do funky stuff - }//if -#endif - - Protocol6::createSignalHeader(&signalHeader, word1, word2, word3); - - Uint32 rBlockNum = signalHeader.theReceiversBlockNumber; - - if(rBlockNum == 252){ - Uint32 sBlockNum = signalHeader.theSendersBlockRef; - sBlockNum = numberToRef(sBlockNum, remoteNodeId); - signalHeader.theSendersBlockRef = sBlockNum; - - Uint8 prio = Protocol6::getPrio(word1); - - Uint32 * signalData = &readPtr[3]; - - if(Protocol6::getSignalIdIncluded(word1) == 0){ - signalHeader.theSendersSignalId = ~0; - } else { - signalHeader.theSendersSignalId = * signalData; - signalData ++; - }//if - - Uint32 * sectionPtr = signalData + signalHeader.theLength; - Uint32 * sectionData = sectionPtr + signalHeader.m_noOfSections; - for(Uint32 i = 0; itheLength; - Uint32 no_segs = header->m_noOfSections; - - Uint32 len32 = - dataLen32 + no_segs + - checksumUsed + signalIdUsed + (sizeof(Protocol6)/4); - - - for(i = 0; itheSignalId; - tmpInserPtr++; - } - - memcpy(tmpInserPtr, theData, 4 * dataLen32); - - tmpInserPtr += dataLen32; - for(i = 0; itheLength; - Uint32 no_segs = header->m_noOfSections; - - Uint32 len32 = - dataLen32 + no_segs + - checksumUsed + signalIdUsed + (sizeof(Protocol6)/4); - - for(i = 0; itheSignalId; - tmpInserPtr++; - } - - memcpy(tmpInserPtr, theData, 4 * dataLen32); - - tmpInserPtr += dataLen32; - for(i = 0; i -#include "TransporterInternalDefinitions.hpp" - -class Packer { - Uint32 preComputedWord1; - Uint32 checksumUsed; // Checksum shall be included in the message - Uint32 signalIdUsed; // Senders signal id shall be included in the message -public: - Packer(bool signalId, bool checksum); - - Uint32 getMessageLength(const SignalHeader* header, - const LinearSectionPtr ptr[3]) const ; - - - Uint32 getMessageLength(const SignalHeader* header, - const SegmentedSectionPtr ptr[3]) const ; - - void pack(Uint32 * insertPtr, - Uint32 prio, - const SignalHeader* header, - const Uint32* data, - const LinearSectionPtr ptr[3]) const ; - - void pack(Uint32 * insertPtr, - Uint32 prio, - const SignalHeader* header, - const Uint32* data, - class SectionSegmentPool & thePool, - const SegmentedSectionPtr ptr[3]) const ; -}; - -inline -Uint32 -Packer::getMessageLength(const SignalHeader* header, - const LinearSectionPtr ptr[3]) const { - Uint32 tLen32 = header->theLength; - Uint32 no_seg = header->m_noOfSections; - tLen32 += checksumUsed; - tLen32 += signalIdUsed; - tLen32 += no_seg; - - for(Uint32 i = 0; itheLength; - Uint32 no_seg = header->m_noOfSections; - tLen32 += checksumUsed; - tLen32 += signalIdUsed; - tLen32 += no_seg; - - for(Uint32 i = 0; i - -#include "SCI_Transporter.hpp" -#include -#include -#include -#include - -#include "TransporterInternalDefinitions.hpp" -#include - -#include -#include - -#define FLAGS 0 -#define DEBUG_TRANSPORTER -SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg, - const char *lHostName, - const char *rHostName, - int r_port, - bool isMgmConnection, - Uint32 packetSize, - Uint32 bufferSize, - Uint32 nAdapters, - Uint16 remoteSciNodeId0, - Uint16 remoteSciNodeId1, - NodeId _localNodeId, - NodeId _remoteNodeId, - NodeId serverNodeId, - bool chksm, - bool signalId, - Uint32 reportFreq) : - Transporter(t_reg, tt_SCI_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection, _localNodeId, - _remoteNodeId, serverNodeId, 0, false, chksm, signalId) -{ - DBUG_ENTER("SCI_Transporter::SCI_Transporter"); - m_PacketSize = (packetSize + 3)/4 ; - m_BufferSize = bufferSize; - m_sendBuffer.m_buffer = NULL; - - m_RemoteSciNodeId = remoteSciNodeId0; - - if(remoteSciNodeId0 == 0 || remoteSciNodeId1 == 0) - m_numberOfRemoteNodes=1; - else - m_numberOfRemoteNodes=2; - - m_RemoteSciNodeId1 = remoteSciNodeId1; - - - m_initLocal=false; - m_failCounter=0; - m_remoteNodes[0]=remoteSciNodeId0; - m_remoteNodes[1]=remoteSciNodeId1; - m_adapters = nAdapters; - m_ActiveAdapterId=0; - m_StandbyAdapterId=1; - - m_mapped = false; - m_sciinit=false; - - sciAdapters= new SciAdapter[nAdapters* (sizeof (SciAdapter))]; - if(sciAdapters==NULL) { - } - m_SourceSegm= new sourceSegm[nAdapters* (sizeof (sourceSegm))]; - if(m_SourceSegm==NULL) { - } - m_TargetSegm= new targetSegm[nAdapters* (sizeof (targetSegm))]; - if(m_TargetSegm==NULL) { - } - m_reportFreq= reportFreq; - - //reset all statistic counters. -#ifdef DEBUG_TRANSPORTER - i1024=0; - i2048=0; - i2049=0; - i10242048=0; - i20484096=0; - i4096=0; - i4097=0; -#endif - DBUG_VOID_RETURN; -} - -void SCI_Transporter::disconnectImpl() -{ - DBUG_ENTER("SCI_Transporter::disconnectImpl"); - sci_error_t err; - if(m_mapped){ - setDisconnect(); - DBUG_PRINT("info", ("connect status = %d, remote node = %d", - (int)getConnectionStatus(), remoteNodeId)); - disconnectRemote(); - disconnectLocal(); - } - - // Empty send buffer - - m_sendBuffer.m_dataSize = 0; - - m_initLocal=false; - m_mapped = false; - - if(m_sciinit) { - for(Uint32 i=0; i4096: " << i4097 << endl; -#endif - DBUG_VOID_RETURN; -} - - -bool SCI_Transporter::initTransporter() { - DBUG_ENTER("SCI_Transporter::initTransporter"); - if(m_BufferSize < (2*MAX_MESSAGE_SIZE + 4096)){ - m_BufferSize = 2 * MAX_MESSAGE_SIZE + 4096; - } - - // Allocate buffers for sending, send buffer size plus 2048 bytes for avoiding - // the need to send twice when a large message comes around. Send buffer size is - // measured in words. - Uint32 sz = 4 * m_PacketSize + MAX_MESSAGE_SIZE;; - - m_sendBuffer.m_sendBufferSize = 4 * ((sz + 3) / 4); - m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4]; - m_sendBuffer.m_dataSize = 0; - - DBUG_PRINT("info", - ("Created SCI Send Buffer with buffer size %d and packet size %d", - m_sendBuffer.m_sendBufferSize, m_PacketSize * 4)); - if(!getLinkStatus(m_ActiveAdapterId) || - (m_adapters > 1 && - !getLinkStatus(m_StandbyAdapterId))) { - DBUG_PRINT("error", - ("The link is not fully operational. Check the cables and the switches")); - //NDB should terminate - report_error(TE_SCI_LINK_ERROR); - DBUG_RETURN(false); - } - DBUG_RETURN(true); -} // initTransporter() - - - -Uint32 SCI_Transporter::getLocalNodeId(Uint32 adapterNo) -{ - sci_query_adapter_t queryAdapter; - sci_error_t error; - Uint32 _localNodeId; - - queryAdapter.subcommand = SCI_Q_ADAPTER_NODEID; - queryAdapter.localAdapterNo = adapterNo; - queryAdapter.data = &_localNodeId; - - SCIQuery(SCI_Q_ADAPTER,(void*)(&queryAdapter),(Uint32)NULL,&error); - - if(error != SCI_ERR_OK) - return 0; - return _localNodeId; -} - - -bool SCI_Transporter::getLinkStatus(Uint32 adapterNo) -{ - sci_query_adapter_t queryAdapter; - sci_error_t error; - int linkstatus; - queryAdapter.subcommand = SCI_Q_ADAPTER_LINK_OPERATIONAL; - - queryAdapter.localAdapterNo = adapterNo; - queryAdapter.data = &linkstatus; - - SCIQuery(SCI_Q_ADAPTER,(void*)(&queryAdapter),(Uint32)NULL,&error); - - if(error != SCI_ERR_OK) { - DBUG_PRINT("error", ("error %d querying adapter", error)); - return false; - } - if(linkstatus<=0) - return false; - return true; -} - - - -sci_error_t SCI_Transporter::initLocalSegment() { - DBUG_ENTER("SCI_Transporter::initLocalSegment"); - Uint32 segmentSize = m_BufferSize; - Uint32 offset = 0; - sci_error_t err; - if(!m_sciinit) { - for(Uint32 i=0; i 0){ -#ifdef DEBUG_TRANSPORTER - if(sizeToSend < 1024 ) - i1024++; - if(sizeToSend > 1024 && sizeToSend < 2048 ) - i10242048++; - if(sizeToSend==2048) - i2048++; - if(sizeToSend>2048 && sizeToSend < 4096) - i20484096++; - if(sizeToSend==4096) - i4096++; - if(sizeToSend==4097) - i4097++; -#endif - - tryagain: - retry++; - if (retry > 3) { - DBUG_PRINT("error", ("SCI Transfer failed")); - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - } - Uint32 * insertPtr = (Uint32 *) - (m_TargetSegm[m_ActiveAdapterId].writer)->getWritePtr(sizeToSend); - - if(insertPtr != 0) { - - const Uint32 remoteOffset=(Uint32) - ((char*)insertPtr - - (char*)(m_TargetSegm[m_ActiveAdapterId].mappedMemory)); - - SCIMemCpy(m_TargetSegm[m_ActiveAdapterId].sequence, - (void*)sendPtr, - m_TargetSegm[m_ActiveAdapterId].rhm[m_ActiveAdapterId].map, - remoteOffset, - sizeToSend, - SCI_FLAG_ERROR_CHECK, - &err); - - if (err != SCI_ERR_OK) { - if (err == SCI_ERR_OUT_OF_RANGE || - err == SCI_ERR_SIZE_ALIGNMENT || - err == SCI_ERR_OFFSET_ALIGNMENT) { - DBUG_PRINT("error", ("Data transfer error = %d", err)); - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - } - if(err == SCI_ERR_TRANSFER_FAILED) { - if(getLinkStatus(m_ActiveAdapterId)) - goto tryagain; - if (m_adapters == 1) { - DBUG_PRINT("error", ("SCI Transfer failed")); - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - return false; - } - m_failCounter++; - Uint32 temp=m_ActiveAdapterId; - if (getLinkStatus(m_StandbyAdapterId)) { - failoverShmWriter(); - SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0); - m_ActiveAdapterId=m_StandbyAdapterId; - m_StandbyAdapterId=temp; - DBUG_PRINT("error", ("Swapping from adapter %u to %u", - m_StandbyAdapterId, m_ActiveAdapterId)); - } else { - report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR); - DBUG_PRINT("error", ("SCI Transfer failed")); - } - } - } else { - SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer); - writer->updateWritePtr(sizeToSend); - - Uint32 sendLimit = writer->getBufferSize(); - sendLimit -= writer->getWriteIndex(); - - m_sendBuffer.m_dataSize = 0; - m_sendBuffer.m_forceSendLimit = sendLimit; - } - } else { - /** - * If we end up here, the SCI segment is full. - */ - DBUG_PRINT("error", ("the segment is full for some reason")); - return false; - } //if - } - return true; -} // doSend() - - - -void SCI_Transporter::failoverShmWriter() { -#if 0 - (m_TargetSegm[m_StandbyAdapterId].writer) - ->copyIndexes((m_TargetSegm[m_StandbyAdapterId].writer)); -#endif -} //failoverShm - - -void SCI_Transporter::setupLocalSegment() -{ - DBUG_ENTER("SCI_Transporter::setupLocalSegment"); - Uint32 sharedSize = 0; - sharedSize =4096; //start of the buffer is page aligend - - Uint32 sizeOfBuffer = m_BufferSize; - - sizeOfBuffer -= sharedSize; - - Uint32 * localReadIndex = - (Uint32*)m_SourceSegm[m_ActiveAdapterId].mappedMemory; - Uint32 * localWriteIndex = (Uint32*)(localReadIndex+ 1); - m_localStatusFlag = (Uint32*)(localReadIndex + 3); - - char * localStartOfBuf = (char*) - ((char*)m_SourceSegm[m_ActiveAdapterId].mappedMemory+sharedSize); - - * localReadIndex = 0; - * localWriteIndex = 0; - - const Uint32 slack = MAX_MESSAGE_SIZE; - - reader = new SHM_Reader(localStartOfBuf, - sizeOfBuffer, - slack, - localReadIndex, - localWriteIndex); - - reader->clear(); - DBUG_VOID_RETURN; -} //setupLocalSegment - -void SCI_Transporter::setupRemoteSegment() -{ - DBUG_ENTER("SCI_Transporter::setupRemoteSegment"); - Uint32 sharedSize = 0; - sharedSize =4096; //start of the buffer is page aligned - - Uint32 sizeOfBuffer = m_BufferSize; - const Uint32 slack = MAX_MESSAGE_SIZE; - sizeOfBuffer -= sharedSize; - - Uint32 *segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ; - - Uint32 * remoteReadIndex = (Uint32*)segPtr; - Uint32 * remoteWriteIndex = (Uint32*)(segPtr + 1); - m_remoteStatusFlag = (Uint32*)(segPtr + 3); - - char * remoteStartOfBuf = ( char*)((char*)segPtr+(sharedSize)); - - writer = new SHM_Writer(remoteStartOfBuf, - sizeOfBuffer, - slack, - remoteReadIndex, - remoteWriteIndex); - - writer->clear(); - - m_TargetSegm[0].writer=writer; - - m_sendBuffer.m_forceSendLimit = writer->getBufferSize(); - - if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) { - report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - DBUG_PRINT("error", ("Unable to create sequence on active")); - doDisconnect(); - } - if (m_adapters > 1) { - segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ; - - Uint32 * remoteReadIndex2 = (Uint32*)segPtr; - Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1); - m_remoteStatusFlag2 = (Uint32*)(segPtr + 3); - - char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize); - - /** - * setup a writer. writer2 is used to mirror the changes of - * writer on the standby - * segment, so that in the case of a failover, we can switch - * to the stdby seg. quickly.* - */ - writer2 = new SHM_Writer(remoteStartOfBuf2, - sizeOfBuffer, - slack, - remoteReadIndex2, - remoteWriteIndex2); - - * remoteReadIndex = 0; - * remoteWriteIndex = 0; - writer2->clear(); - m_TargetSegm[1].writer=writer2; - if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) { - report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE); - DBUG_PRINT("error", ("Unable to create sequence on standby")); - doDisconnect(); - } - } - DBUG_VOID_RETURN; -} //setupRemoteSegment - -bool -SCI_Transporter::init_local() -{ - DBUG_ENTER("SCI_Transporter::init_local"); - if(!m_initLocal) { - if(initLocalSegment()!=SCI_ERR_OK){ - NdbSleep_MilliSleep(10); - //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! - report_error(TE_SCI_CANNOT_INIT_LOCALSEGMENT); - DBUG_RETURN(false); - } - m_initLocal=true; - } - DBUG_RETURN(true); -} - -bool -SCI_Transporter::init_remote() -{ - DBUG_ENTER("SCI_Transporter::init_remote"); - sci_error_t err; - Uint32 offset = 0; - if(!m_mapped ) { - DBUG_PRINT("info", ("Map remote segments")); - for(Uint32 i=0; i < m_adapters ; i++) { - m_TargetSegm[i].rhm[i].remoteHandle=0; - SCIConnectSegment(sciAdapters[i].scidesc, - &(m_TargetSegm[i].rhm[i].remoteHandle), - m_remoteNodes[i], - remoteSegmentId(localNodeId, remoteNodeId), - i, - 0, - 0, - 0, - 0, - &err); - - if(err != SCI_ERR_OK) { - NdbSleep_MilliSleep(10); - DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err)); - DBUG_RETURN(false); - } - } - // Map the remote memory segment into program space - for(Uint32 i=0; i < m_adapters ; i++) { - m_TargetSegm[i].mappedMemory = - SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle), - &(m_TargetSegm[i].rhm[i].map), - offset, - m_BufferSize, - NULL, - FLAGS, - &err); - - if(err!= SCI_ERR_OK) { - DBUG_PRINT("error", - ("Cannot map a segment to the remote node %d. Error code 0x%x", - m_RemoteSciNodeId, err)); - //NDB SHOULD TERMINATE AND COMPUTER REBOOTED! - report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT); - DBUG_RETURN(false); - } - } - m_mapped=true; - setupRemoteSegment(); - setConnected(); - DBUG_PRINT("info", ("connected and mapped to segment, remoteNode: %d", - remoteNodeId)); - DBUG_PRINT("info", ("remoteSegId: %d", - remoteSegmentId(localNodeId, remoteNodeId))); - DBUG_RETURN(true); - } else { - DBUG_RETURN(getConnectionStatus()); - } -} - -bool -SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) -{ - SocketInputStream s_input(sockfd); - SocketOutputStream s_output(sockfd); - char buf[256]; - DBUG_ENTER("SCI_Transporter::connect_client_impl"); - // Wait for server to create and attach - if (s_input.gets(buf, 256) == 0) { - DBUG_PRINT("error", ("No initial response from server in SCI")); - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - if (!init_local()) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - // Send ok to server - s_output.println("sci client 1 ok"); - - if (!init_remote()) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - // Wait for ok from server - if (s_input.gets(buf, 256) == 0) { - DBUG_PRINT("error", ("No second response from server in SCI")); - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - // Send ok to server - s_output.println("sci client 2 ok"); - - NDB_CLOSE_SOCKET(sockfd); - DBUG_PRINT("info", ("Successfully connected client to node %d", - remoteNodeId)); - DBUG_RETURN(true); -} - -bool -SCI_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) -{ - SocketOutputStream s_output(sockfd); - SocketInputStream s_input(sockfd); - char buf[256]; - DBUG_ENTER("SCI_Transporter::connect_server_impl"); - - if (!init_local()) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - // Send ok to client - s_output.println("sci server 1 ok"); - - // Wait for ok from client - if (s_input.gets(buf, 256) == 0) { - DBUG_PRINT("error", ("No response from client in SCI")); - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - if (!init_remote()) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - // Send ok to client - s_output.println("sci server 2 ok"); - // Wait for ok from client - if (s_input.gets(buf, 256) == 0) { - DBUG_PRINT("error", ("No second response from client in SCI")); - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - NDB_CLOSE_SOCKET(sockfd); - DBUG_PRINT("info", ("Successfully connected server to node %d", - remoteNodeId)); - DBUG_RETURN(true); -} - -sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) { - sci_error_t err; - SCICreateMapSequence((m_TargetSegm[adapterid].rhm[adapterid].map), - &(m_TargetSegm[adapterid].sequence), - SCI_FLAG_FAST_BARRIER, - &err); - return err; -} // createSequence() - -bool SCI_Transporter::disconnectLocal() -{ - DBUG_ENTER("SCI_Transporter::disconnectLocal"); - sci_error_t err; - m_ActiveAdapterId=0; - - /** Free resources used by a local segment - */ - - SCIUnmapSegment(m_SourceSegm[0].lhm[0].map,0,&err); - if(err!=SCI_ERR_OK) { - report_error(TE_SCI_UNABLE_TO_UNMAP_SEGMENT); - DBUG_PRINT("error", ("Unable to unmap segment")); - DBUG_RETURN(false); - } - - SCIRemoveSegment((m_SourceSegm[m_ActiveAdapterId].localHandle), - FLAGS, - &err); - - if(err!=SCI_ERR_OK) { - report_error(TE_SCI_UNABLE_TO_REMOVE_SEGMENT); - DBUG_PRINT("error", ("Unable to remove segment")); - DBUG_RETURN(false); - } - DBUG_PRINT("info", ("Local memory segment is unmapped and removed")); - DBUG_RETURN(true); -} // disconnectLocal() - - -bool SCI_Transporter::disconnectRemote() { - DBUG_ENTER("SCI_Transporter::disconnectRemote"); - sci_error_t err; - for(Uint32 i=0; i= send_buf_size) || - (curr_data_size >= sci_buffer_remaining)) { - /** - * The new message will not fit in the send buffer. We need to - * send the send buffer before filling it up with the new - * signal data. If current data size will spill over buffer edge - * we will also send to ensure correct operation. - */ - if (!doSend()) { - /** - * We were not successfull sending, report 0 as meaning buffer full and - * upper levels handle retries and other recovery matters. - */ - return 0; - } - } - /** - * New signal fits, simply fill it up with more data. - */ - Uint32 sz = m_sendBuffer.m_dataSize; - return &m_sendBuffer.m_buffer[sz]; -} - -void -SCI_Transporter::updateWritePtr(Uint32 lenBytes, Uint32 prio){ - - Uint32 sz = m_sendBuffer.m_dataSize; - Uint32 packet_size = m_PacketSize; - sz += ((lenBytes + 3) >> 2); - m_sendBuffer.m_dataSize = sz; - - if(sz > packet_size) { - /**------------------------------------------------- - * Buffer is full and we are ready to send. We will - * not wait since the signal is already in the buffer. - * Force flag set has the same indication that we - * should always send. If it is not possible to send - * we will not worry since we will soon be back for - * a renewed trial. - *------------------------------------------------- - */ - doSend(); - } -} - -enum SciStatus { - SCIDISCONNECT = 1, - SCICONNECTED = 2 -}; - -bool -SCI_Transporter::getConnectionStatus() { - if(*m_localStatusFlag == SCICONNECTED && - (*m_remoteStatusFlag == SCICONNECTED || - ((m_adapters > 1) && - *m_remoteStatusFlag2 == SCICONNECTED))) - return true; - else - return false; -} - -void -SCI_Transporter::setConnected() { - *m_remoteStatusFlag = SCICONNECTED; - if (m_adapters > 1) { - *m_remoteStatusFlag2 = SCICONNECTED; - } - *m_localStatusFlag = SCICONNECTED; -} - -void -SCI_Transporter::setDisconnect() { - if(getLinkStatus(m_ActiveAdapterId)) - *m_remoteStatusFlag = SCIDISCONNECT; - if (m_adapters > 1) { - if(getLinkStatus(m_StandbyAdapterId)) - *m_remoteStatusFlag2 = SCIDISCONNECT; - } -} - -bool -SCI_Transporter::checkConnected() { - if (*m_localStatusFlag == SCIDISCONNECT) { - return false; - } - else - return true; -} - -static bool init = false; - -bool -SCI_Transporter::initSCI() { - DBUG_ENTER("SCI_Transporter::initSCI"); - if(!init){ - sci_error_t error; - // Initialize SISCI library - SCIInitialize(0, &error); - if(error != SCI_ERR_OK) { - DBUG_PRINT("error", ("Cannot initialize SISCI library.")); - DBUG_PRINT("error", - ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x", - error)); - DBUG_RETURN(false); - } - init = true; - } - DBUG_RETURN(true); -} - -Uint32 -SCI_Transporter::get_free_buffer() const -{ - return (m_TargetSegm[m_ActiveAdapterId].writer)->get_free_buffer(); -} - diff --git a/storage/ndb/src/common/transporter/SCI_Transporter.hpp b/storage/ndb/src/common/transporter/SCI_Transporter.hpp deleted file mode 100644 index 5135ba7a83b..00000000000 --- a/storage/ndb/src/common/transporter/SCI_Transporter.hpp +++ /dev/null @@ -1,384 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SCI_Transporter_H -#define SCI_Transporter_H -#include "Transporter.hpp" -#include "SHM_Buffer.hpp" - - -#include -#include -#include - -#include - -/** - * The SCI Transporter - * - * The design goal of the SCI transporter is to deliver high performance - * data transfers (low latency, high bandwidth) combined with very high - * availability (failover support). - * High performance is an inherit feature of SCI and the, whereas failover - * support is implemented at the application level. - * In SCI the programming model is similar to the shared memory paradigm. - * A process on one node (A) allocates a memory segment and import the - * segment to its virtual address space. Another node (B) can connect to - * the segment and map this segment into its virtual address space. - * If A writes data to the segment, then B can read it and vice versa, through - * ordinary loads and stores. This is also called PIO (programmable IO), and - * is one thing that distinguish SCI from other interconnects such as, - * ethernet, Gig-e, Myrinet, and Infiniband. By using PIO, lower network - * latency is achieved, compared to the interconnects mentioned above. - * In order for NDB to utilize SCI, the SCI transporter relies on the - * SISCI api. The SISCI api provides a high level abstraction to the low - * level SCI driver called PCISCI driver. - * The SISCI api provides functions to setup, export, and import - * memory segments in a process virtual address space, and also functions to - * guarantee the correctness of data transfers between nodes. Basically, the - * - * In NDB Cluster, each SCI transporter creates a local segment - * that is mapped into the virtual address space. After the creation of the - * local segment, the SCI transporter connects to a segment created by another - * transporter at a remote node, and the maps the remote segment into its - * virtual address space. However, since NDB Cluster relies on redundancy - * at the network level, by using dual SCI adapters communication can be - * maintained even if one of the adapter cards fails (or anything on the - * network this adapter card exists in e.g. an SCI switch failure). - * - */ - -/** - * class SCITransporter - * @brief - main class for the SCI transporter. - */ -class SCI_Transporter : public Transporter { - friend class TransporterRegistry; -public: - - /** - * Init the transporter. Allocate sendbuffers and open a SCI virtual device - * for each adapter. - * @return true if successful, otherwize false - */ - bool initTransporter(); - - - /** - * Creates a sequence for error checking. - * @param adapterid the adapter on which to create a new sequence. - * @return SCI_ERR_OK if ok, otherwize something else. - */ - sci_error_t createSequence(Uint32 adapterid); - - - /** Initiate Local Segment: create a memory segment, - * prepare a memory segment, map the local segment - * into memory space and make segment available. - * @return SCI_ERR_OK if ok, otherwize something else. - */ - sci_error_t initLocalSegment(); - - /** - * Calculate the segment id for the remote segment - * @param localNodeId - local id (e.g. 1 = mgm , 2 = ndb.2 etc.) - * @param remoteNodeId - remote id (e.g. 1 = mgm , 2 = ndb.2 etc.) - * @return a segment id - */ - Uint32 remoteSegmentId(Uint16 localNodeId, Uint16 remoteNodeId); - - // Get local segment id (inline) - Uint32 hostSegmentId(Uint16 localNodeId, Uint16 remoteNodeId); - - /** - * closeSCI closes the SCI virtual device - */ - void closeSCI(); - - - /** - * Check the status of the remote node, - * if it is connected or has disconnected - * @return true if connected, otherwize false. - */ - bool checkConnected(); - - /** - * Check if the segment are properly connected to each other (remotely - * and locally). - * @return True if the both the local segment is mapped and the - * remote segment is mapped. Otherwize false. - */ - bool getConnectionStatus(); - - virtual Uint32 get_free_buffer() const; -private: - SCI_Transporter(TransporterRegistry &t_reg, - const char *local_host, - const char *remote_host, - int port, - bool isMgmConnection, - Uint32 packetSize, - Uint32 bufferSize, - Uint32 nAdapters, - Uint16 remoteSciNodeId0, - Uint16 remoteSciNodeId1, - NodeId localNodeID, - NodeId remoteNodeID, - NodeId serverNodeId, - bool checksum, - bool signalId, - Uint32 reportFreq = 4096); - - /** - * Destructor. Disconnects the transporter. - */ - ~SCI_Transporter(); - bool m_mapped; - bool m_initLocal; - bool m_sciinit; - Uint32 m_failCounter; - /** - * For statistics on transfered packets - */ -//#ifdef DEBUG_TRANSPORTER -#if 1 - Uint32 i1024; - Uint32 i2048; - Uint32 i2049; - Uint32 i10242048; - Uint32 i20484096; - Uint32 i4096; - Uint32 i4097; -#endif - - volatile Uint32 * m_localStatusFlag; - volatile Uint32 * m_remoteStatusFlag; - volatile Uint32 * m_remoteStatusFlag2; - - struct { - Uint32 * m_buffer; // The buffer - Uint32 m_dataSize; // No of words in buffer - Uint32 m_sendBufferSize; // Buffer size - Uint32 m_forceSendLimit; // Send when buffer is this full - } m_sendBuffer; - - SHM_Reader * reader; - SHM_Writer * writer; - SHM_Writer * writer2; - - /** - * Statistics - */ - Uint32 m_reportFreq; - - Uint32 m_adapters; - Uint32 m_numberOfRemoteNodes; - - Uint16 m_remoteNodes[2]; - - typedef struct SciAdapter { - sci_desc_t scidesc; - Uint32 localSciNodeId; - bool linkStatus; - } SciAdapter; - - SciAdapter* sciAdapters; - Uint32 m_ActiveAdapterId; - Uint32 m_StandbyAdapterId; - - typedef struct sourceSegm { - sci_local_segment_t localHandle; // Handle to local segment to be mapped - struct localHandleMap { - sci_map_t map; // Handle to the new mapped segment. - // 2 = max adapters in one node - } lhm[2]; - - volatile void *mappedMemory; // Used when reading - } sourceSegm; - - typedef struct targetSegm { - struct remoteHandleMap { - sci_remote_segment_t remoteHandle; //Handle to local segment to be mapped - sci_map_t map; //Handle to the new mapped segment - } rhm[2]; - - sci_sequence_status_t m_SequenceStatus; // Used for error checking - sci_sequence_t sequence; - volatile void * mappedMemory; // Used when writing - SHM_Writer * writer; - } targetSegm; - - sci_sequence_status_t m_SequenceStatus; // Used for error checking - - - // Shared between all SCI users active=(either prim or second) - sci_desc_t activeSCIDescriptor; - - sourceSegm* m_SourceSegm; // Local segment reference - targetSegm* m_TargetSegm; // Remote segment reference - - Uint32 m_LocalAdapterId; // Adapter Id - Uint16 m_LocalSciNodeId; // The SCI-node Id of this machine (adapter 0) - Uint16 m_LocalSciNodeId1; // The SCI-node Id of this machine (adapter 1) - Uint16 m_RemoteSciNodeId; // The SCI-node Id of remote machine (adapter 0) - Uint16 m_RemoteSciNodeId1; // The SCI-node Id of remote machine (adapter 1) - - Uint32 m_PacketSize; // The size of each data packet - Uint32 m_BufferSize; // Mapped SCI buffer size - - Uint32 * getWritePtr(Uint32 lenBytes, Uint32 prio); - void updateWritePtr(Uint32 lenBytes, Uint32 prio); - - /** - * doSend. Copies the data from the source (the send buffer) to the - * shared mem. segment. - * Sequences are used for error checking. - * If an error occurs, the transfer is retried. - * If the link that we need to swap to is broken, we will disconnect. - * @return Returns true if datatransfer ok. If not retriable - * then false is returned. - */ - bool doSend(); - - /** - * @param adapterNo the adapter for which to retrieve the node id. - * @return Returns the node id for an adapter. - */ - Uint32 getLocalNodeId(Uint32 adapterNo); - - bool hasDataToRead() const { - return reader->empty() == false; - } - - bool hasDataToSend() const { - return m_sendBuffer.m_dataSize > 0; - } - - /** - * Make the local segment unavailable, no new connections will be accepted. - * @return Returns true if the segment was successfully disconnected. - */ - bool disconnectLocal(); - - /** - * Make the local segment unavailable, no new connections will be accepted. - * @return Returns true if the segment was successfully disconnected. - */ - bool disconnectRemote(); - - void resetToInitialState(); - - /** - * It is always possible to send data with SCI! - * @return True (always) - */ - bool sendIsPossible(struct timeval * timeout); - - void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){ - reader->getReadPtr(* ptr, * eod); - } - - void updateReceivePtr(Uint32 *ptr){ - reader->updateReadPtr(ptr); - } - - /** - * Corresponds to SHM_Transporter::setupBuffers() - * Initiates the start pointer of the buffer and read pointers. - * Initiate the localSegment for the SHM reader. - */ - void setupLocalSegment(); - - /** - * Initiate the remoteSegment for the SHM writer - */ - void setupRemoteSegment(); - - /** - * Set the connect flag in the remote memory segment (write through) - */ - void setConnected(); - - /** - * Set the disconnect flag in the remote memory segment (write through) - */ - void setDisconnect(); - - /** - * Check if there is a link between the adapter and the switch - * @param adapterNo the adapter for which to retrieve the link status. - * @return Returns true if there is a link between adapter and switch. - * Otherwize false is returned and the cables must be checked. - */ - bool getLinkStatus(Uint32 adapterNo); - - /** - * failoverShmWriter takes the state of the active writer and inserts into - * the standby writer. - */ - void failoverShmWriter(); - - bool init_local(); - bool init_remote(); - -protected: - - /** Perform a connection between segment - * This is a client node, trying to connect to a remote segment. - * @param timeout, the time the connect thread sleeps before - * retrying. - * @return Returns true on success, otherwize falser - */ - bool connect_server_impl(NDB_SOCKET_TYPE sockfd); - bool connect_client_impl(NDB_SOCKET_TYPE sockfd); - - /** - * We will disconnect if: - * -# the other node has disconnected from us - * -# unrecoverable error in transmission, on both adapters - * -# if we are shutdown properly - */ - void disconnectImpl(); - - static bool initSCI(); -}; - - -/** The theLocalAdapterId combined with the theRemoteNodeId constructs - * (SCI ids)* a unique identifier for the local segment - */ -inline -Uint32 -SCI_Transporter::hostSegmentId(Uint16 SciLocalNodeId, - Uint16 SciRemoteNodeId) { - - return (SciLocalNodeId << 16) | SciRemoteNodeId; -} - -/** The theLocalAdapterId combined with the theRemoteNodeId constructs - * (SCI ids)* a unique identifier for the remote segment - */ -inline -Uint32 -SCI_Transporter::remoteSegmentId(Uint16 SciLocalNodeId, - Uint16 SciRemoteNodeId) { - - return (SciRemoteNodeId << 16) | SciLocalNodeId; -} - - -#endif diff --git a/storage/ndb/src/common/transporter/SHM_Buffer.hpp b/storage/ndb/src/common/transporter/SHM_Buffer.hpp deleted file mode 100644 index 5e900170318..00000000000 --- a/storage/ndb/src/common/transporter/SHM_Buffer.hpp +++ /dev/null @@ -1,233 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SHM_BUFFER_HPP -#define SHM_BUFFER_HPP - -#include - -#include - -/** - * These classes implement a circular buffer - * - * One reader and one writer - */ - -/** - * SHM_Reader - * - * Use as follows: - * getReadPtr(ptr, sz); - * for(int i = 0; i= m_bufferSize){ - tReadIndex = 0; - } - - m_readIndex = tReadIndex; - * m_sharedReadIndex = tReadIndex; -} - -#define WRITER_SLACK 4 - -class SHM_Writer { -public: - SHM_Writer(char * const _startOfBuffer, - Uint32 _sizeOfBuffer, - Uint32 _slack, - Uint32 * _readIndex, - Uint32 * _writeIndex) : - m_startOfBuffer(_startOfBuffer), - m_totalBufferSize(_sizeOfBuffer), - m_bufferSize(_sizeOfBuffer - _slack), - m_sharedReadIndex(_readIndex), - m_sharedWriteIndex(_writeIndex) - { - } - - void clear() { - m_writeIndex = 0; - } - - inline char * getWritePtr(Uint32 sz); - inline void updateWritePtr(Uint32 sz); - - inline Uint32 getWriteIndex() const { return m_writeIndex;} - inline Uint32 getBufferSize() const { return m_bufferSize;} - inline Uint32 get_free_buffer() const; - - inline void copyIndexes(SHM_Writer * standbyWriter); - -private: - char * const m_startOfBuffer; - Uint32 m_totalBufferSize; - Uint32 m_bufferSize; - - Uint32 m_writeIndex; - - Uint32 * m_sharedReadIndex; - Uint32 * m_sharedWriteIndex; -}; - -inline -char * -SHM_Writer::getWritePtr(Uint32 sz){ - Uint32 tReadIndex = * m_sharedReadIndex; - Uint32 tWriteIndex = m_writeIndex; - - char * ptr = &m_startOfBuffer[tWriteIndex]; - - Uint32 free; - if(tReadIndex <= tWriteIndex){ - free = m_bufferSize + tReadIndex - tWriteIndex; - } else { - free = tReadIndex - tWriteIndex; - } - - sz += 4; - if(sz < free){ - return ptr; - } - - return 0; -} - -inline -void -SHM_Writer::updateWritePtr(Uint32 sz){ - - assert(m_writeIndex == * m_sharedWriteIndex); - - Uint32 tWriteIndex = m_writeIndex; - tWriteIndex += sz; - - assert(tWriteIndex < m_totalBufferSize); - - if(tWriteIndex >= m_bufferSize){ - tWriteIndex = 0; - } - - m_writeIndex = tWriteIndex; - * m_sharedWriteIndex = tWriteIndex; -} - -inline -Uint32 -SHM_Writer::get_free_buffer() const -{ - Uint32 tReadIndex = * m_sharedReadIndex; - Uint32 tWriteIndex = m_writeIndex; - - Uint32 free; - if(tReadIndex <= tWriteIndex){ - free = m_bufferSize + tReadIndex - tWriteIndex; - } else { - free = tReadIndex - tWriteIndex; - } - return free; -} - -#endif diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.cpp deleted file mode 100644 index 649891a8ad7..00000000000 --- a/storage/ndb/src/common/transporter/SHM_Transporter.cpp +++ /dev/null @@ -1,377 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include "SHM_Transporter.hpp" -#include "TransporterInternalDefinitions.hpp" -#include -#include -#include - -#include -#include - -extern int g_ndb_shm_signum; - -SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, - const char *lHostName, - const char *rHostName, - int r_port, - bool isMgmConnection_arg, - NodeId lNodeId, - NodeId rNodeId, - NodeId serverNodeId, - bool checksum, - bool signalId, - key_t _shmKey, - Uint32 _shmSize) : - Transporter(t_reg, tt_SHM_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection_arg, - lNodeId, rNodeId, serverNodeId, - 0, false, checksum, signalId), - shmKey(_shmKey), - shmSize(_shmSize) -{ -#ifndef NDB_WIN32 - shmId= 0; -#endif - _shmSegCreated = false; - _attached = false; - - shmBuf = 0; - reader = 0; - writer = 0; - - setupBuffersDone=false; -#ifdef DEBUG_TRANSPORTER - printf("shm key (%d - %d) = %d\n", lNodeId, rNodeId, shmKey); -#endif - m_signal_threshold = 4096; -} - -SHM_Transporter::~SHM_Transporter(){ - doDisconnect(); -} - -bool -SHM_Transporter::initTransporter(){ - if (g_ndb_shm_signum) - return true; - return false; -} - -void -SHM_Transporter::setupBuffers(){ - Uint32 sharedSize = 0; - sharedSize += 28; //SHM_Reader::getSharedSize(); - sharedSize += 28; //SHM_Writer::getSharedSize(); - - const Uint32 slack = MAX_MESSAGE_SIZE; - - /** - * NOTE: There is 7th shared variable in Win2k (sharedCountAttached). - */ - Uint32 sizeOfBuffer = shmSize; - sizeOfBuffer -= 2*sharedSize; - sizeOfBuffer /= 2; - - Uint32 * base1 = (Uint32*)shmBuf; - - Uint32 * sharedReadIndex1 = base1; - Uint32 * sharedWriteIndex1 = base1 + 1; - serverStatusFlag = base1 + 4; - char * startOfBuf1 = shmBuf+sharedSize; - - Uint32 * base2 = (Uint32*)(shmBuf + sizeOfBuffer + sharedSize); - Uint32 * sharedReadIndex2 = base2; - Uint32 * sharedWriteIndex2 = base2 + 1; - clientStatusFlag = base2 + 4; - char * startOfBuf2 = ((char *)base2)+sharedSize; - - if(isServer){ - * serverStatusFlag = 0; - reader = new SHM_Reader(startOfBuf1, - sizeOfBuffer, - slack, - sharedReadIndex1, - sharedWriteIndex1); - - writer = new SHM_Writer(startOfBuf2, - sizeOfBuffer, - slack, - sharedReadIndex2, - sharedWriteIndex2); - - * sharedReadIndex1 = 0; - * sharedWriteIndex1 = 0; - - * sharedReadIndex2 = 0; - * sharedWriteIndex2 = 0; - - reader->clear(); - writer->clear(); - - * serverStatusFlag = 1; - -#ifdef DEBUG_TRANSPORTER - printf("-- (%d - %d) - Server -\n", localNodeId, remoteNodeId); - printf("Reader at: %d (%p)\n", startOfBuf1 - shmBuf, startOfBuf1); - printf("sharedReadIndex1 at %d (%p) = %d\n", - (char*)sharedReadIndex1-shmBuf, - sharedReadIndex1, *sharedReadIndex1); - printf("sharedWriteIndex1 at %d (%p) = %d\n", - (char*)sharedWriteIndex1-shmBuf, - sharedWriteIndex1, *sharedWriteIndex1); - - printf("Writer at: %d (%p)\n", startOfBuf2 - shmBuf, startOfBuf2); - printf("sharedReadIndex2 at %d (%p) = %d\n", - (char*)sharedReadIndex2-shmBuf, - sharedReadIndex2, *sharedReadIndex2); - printf("sharedWriteIndex2 at %d (%p) = %d\n", - (char*)sharedWriteIndex2-shmBuf, - sharedWriteIndex2, *sharedWriteIndex2); - - printf("sizeOfBuffer = %d\n", sizeOfBuffer); -#endif - } else { - * clientStatusFlag = 0; - reader = new SHM_Reader(startOfBuf2, - sizeOfBuffer, - slack, - sharedReadIndex2, - sharedWriteIndex2); - - writer = new SHM_Writer(startOfBuf1, - sizeOfBuffer, - slack, - sharedReadIndex1, - sharedWriteIndex1); - - * sharedReadIndex2 = 0; - * sharedWriteIndex1 = 0; - - reader->clear(); - writer->clear(); - * clientStatusFlag = 1; -#ifdef DEBUG_TRANSPORTER - printf("-- (%d - %d) - Client -\n", localNodeId, remoteNodeId); - printf("Reader at: %d (%p)\n", startOfBuf2 - shmBuf, startOfBuf2); - printf("sharedReadIndex2 at %d (%p) = %d\n", - (char*)sharedReadIndex2-shmBuf, - sharedReadIndex2, *sharedReadIndex2); - printf("sharedWriteIndex2 at %d (%p) = %d\n", - (char*)sharedWriteIndex2-shmBuf, - sharedWriteIndex2, *sharedWriteIndex2); - - printf("Writer at: %d (%p)\n", startOfBuf1 - shmBuf, startOfBuf1); - printf("sharedReadIndex1 at %d (%p) = %d\n", - (char*)sharedReadIndex1-shmBuf, - sharedReadIndex1, *sharedReadIndex1); - printf("sharedWriteIndex1 at %d (%p) = %d\n", - (char*)sharedWriteIndex1-shmBuf, - sharedWriteIndex1, *sharedWriteIndex1); - - printf("sizeOfBuffer = %d\n", sizeOfBuffer); -#endif - } -#ifdef DEBUG_TRANSPORTER - printf("Mapping from %p to %p\n", shmBuf, shmBuf+shmSize); -#endif -} - -bool -SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) -{ - DBUG_ENTER("SHM_Transporter::connect_server_impl"); - SocketOutputStream s_output(sockfd); - SocketInputStream s_input(sockfd); - char buf[256]; - - // Create - if(!_shmSegCreated){ - if (!ndb_shm_create()) { - make_error_info(buf, sizeof(buf)); - report_error(TE_SHM_UNABLE_TO_CREATE_SEGMENT, buf); - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - _shmSegCreated = true; - } - - // Attach - if(!_attached){ - if (!ndb_shm_attach()) { - make_error_info(buf, sizeof(buf)); - report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT, buf); - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - _attached = true; - } - - // Send ok to client - s_output.println("shm server 1 ok: %d", - m_transporter_registry.m_shm_own_pid); - - // Wait for ok from client - DBUG_PRINT("info", ("Wait for ok from client")); - if (s_input.gets(buf, sizeof(buf)) == 0) - { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - if(sscanf(buf, "shm client 1 ok: %d", &m_remote_pid) != 1) - { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - int r= connect_common(sockfd); - - if (r) { - // Send ok to client - s_output.println("shm server 2 ok"); - // Wait for ok from client - if (s_input.gets(buf, 256) == 0) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - DBUG_PRINT("info", ("Successfully connected server to node %d", - remoteNodeId)); - } - - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(r); -} - -bool -SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) -{ - DBUG_ENTER("SHM_Transporter::connect_client_impl"); - SocketInputStream s_input(sockfd); - SocketOutputStream s_output(sockfd); - char buf[256]; - - // Wait for server to create and attach - DBUG_PRINT("info", ("Wait for server to create and attach")); - if (s_input.gets(buf, 256) == 0) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_PRINT("error", ("Server id %d did not attach", - remoteNodeId)); - DBUG_RETURN(false); - } - - if(sscanf(buf, "shm server 1 ok: %d", &m_remote_pid) != 1) - { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - // Create - if(!_shmSegCreated){ - if (!ndb_shm_get()) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_PRINT("error", ("Failed create of shm seg to node %d", - remoteNodeId)); - DBUG_RETURN(false); - } - _shmSegCreated = true; - } - - // Attach - if(!_attached){ - if (!ndb_shm_attach()) { - make_error_info(buf, sizeof(buf)); - report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT, buf); - NDB_CLOSE_SOCKET(sockfd); - DBUG_PRINT("error", ("Failed attach of shm seg to node %d", - remoteNodeId)); - DBUG_RETURN(false); - } - _attached = true; - } - - // Send ok to server - s_output.println("shm client 1 ok: %d", - m_transporter_registry.m_shm_own_pid); - - int r= connect_common(sockfd); - - if (r) { - // Wait for ok from server - DBUG_PRINT("info", ("Wait for ok from server")); - if (s_input.gets(buf, 256) == 0) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_PRINT("error", ("No ok from server node %d", - remoteNodeId)); - DBUG_RETURN(false); - } - // Send ok to server - s_output.println("shm client 2 ok"); - DBUG_PRINT("info", ("Successfully connected client to node %d", - remoteNodeId)); - } - - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(r); -} - -bool -SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd) -{ - if (!checkConnected()) { - return false; - } - - if(!setupBuffersDone) - { - setupBuffers(); - setupBuffersDone=true; - } - - if(setupBuffersDone) - { - NdbSleep_MilliSleep(m_timeOutMillis); - if(*serverStatusFlag == 1 && *clientStatusFlag == 1) - { - m_last_signal = 0; - return true; - } - } - - DBUG_PRINT("error", ("Failed to set up buffers to node %d", - remoteNodeId)); - return false; -} - -void -SHM_Transporter::doSend() -{ - if(m_last_signal) - { - m_last_signal = 0; - kill(m_remote_pid, g_ndb_shm_signum); - } -} - -Uint32 -SHM_Transporter::get_free_buffer() const -{ - return writer->get_free_buffer(); -} diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.hpp b/storage/ndb/src/common/transporter/SHM_Transporter.hpp deleted file mode 100644 index d9eef794001..00000000000 --- a/storage/ndb/src/common/transporter/SHM_Transporter.hpp +++ /dev/null @@ -1,177 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SHM_Transporter_H -#define SHM_Transporter_H - -#include "Transporter.hpp" -#include "SHM_Buffer.hpp" - -#ifdef NDB_WIN32 -typedef Uint32 key_t; -#endif - -/** - * class SHMTransporter - * @brief - main class for the SHM transporter. - */ - -class SHM_Transporter : public Transporter { - friend class TransporterRegistry; -public: - SHM_Transporter(TransporterRegistry &, - const char *lHostName, - const char *rHostName, - int r_port, - bool isMgmConnection, - NodeId lNodeId, - NodeId rNodeId, - NodeId serverNodeId, - bool checksum, - bool signalId, - key_t shmKey, - Uint32 shmSize); - - /** - * SHM destructor - */ - virtual ~SHM_Transporter(); - - /** - * Do initialization - */ - bool initTransporter(); - - Uint32 * getWritePtr(Uint32 lenBytes, Uint32 prio) - { - return (Uint32 *)writer->getWritePtr(lenBytes); - } - - void updateWritePtr(Uint32 lenBytes, Uint32 prio) - { - writer->updateWritePtr(lenBytes); - m_last_signal += lenBytes; - if(m_last_signal >= m_signal_threshold) - { - doSend(); - } - } - - void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){ - reader->getReadPtr(* ptr, * eod); - } - - void updateReceivePtr(Uint32 * ptr){ - reader->updateReadPtr(ptr); - } - -protected: - /** - * disconnect a segmnet - * -# deletes the shm buffer associated with a segment - * -# marks the segment for removal - */ - void disconnectImpl(); - - /** - * Blocking - * - * -# Create shm segment - * -# Attach to it - * -# Wait for someone to attach (max wait = timeout), then rerun again - * until connection established. - * @param timeOutMillis - the time to sleep before (ms) trying again. - * @returns - True if the server managed to hook up with the client, - * i.e., both agrees that the other one has setup the segment. - * Otherwise false. - */ - virtual bool connect_server_impl(NDB_SOCKET_TYPE sockfd); - - /** - * Blocking - * - * -# Attach to shm segment - * -# Check if the segment is setup - * -# Check if the server set it up - * -# If all clear, return. - * @param timeOutMillis - the time to sleep before (ms) trying again. - * @returns - True if the client managed to hook up with the server, - * i.e., both agrees that the other one has setup the segment. - * Otherwise false. - */ - virtual bool connect_client_impl(NDB_SOCKET_TYPE sockfd); - - bool connect_common(NDB_SOCKET_TYPE sockfd); - - bool ndb_shm_create(); - bool ndb_shm_get(); - bool ndb_shm_attach(); - - /** - * Check if there are two processes attached to the segment (a connection) - * @return - True if the above holds. Otherwise false. - */ - bool checkConnected(); - - - /** - * Initialises the SHM_Reader and SHM_Writer on the segment - */ - void setupBuffers(); - - /** - * doSend (i.e signal receiver) - */ - void doSend(); - int m_remote_pid; - Uint32 m_last_signal; - Uint32 m_signal_threshold; - - virtual Uint32 get_free_buffer() const; - -private: - bool _shmSegCreated; - bool _attached; - bool m_connected; - - key_t shmKey; - volatile Uint32 * serverStatusFlag; - volatile Uint32 * clientStatusFlag; - bool setupBuffersDone; - -#ifdef NDB_WIN32 - HANDLE hFileMapping; -#else - int shmId; -#endif - - int shmSize; - char * shmBuf; - - SHM_Reader * reader; - SHM_Writer * writer; - - /** - * @return - True if the reader has data to read on its segment. - */ - bool hasDataToRead() const { - return reader->empty() == false; - } - - void make_error_info(char info[], int sz); -}; - -#endif diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp deleted file mode 100644 index cc9c5e7cb19..00000000000 --- a/storage/ndb/src/common/transporter/SHM_Transporter.unix.cpp +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include "SHM_Transporter.hpp" -#include "TransporterInternalDefinitions.hpp" -#include -#include -#include - -#include -#include - -void SHM_Transporter::make_error_info(char info[], int sz) -{ - snprintf(info,sz,"Shm key=%d sz=%d id=%d", - shmKey, shmSize, shmId); -} - -bool -SHM_Transporter::ndb_shm_create() -{ - shmId = shmget(shmKey, shmSize, IPC_CREAT | 960); - if(shmId == -1) { - perror("shmget: "); - return false; - } - return true; -} - -bool -SHM_Transporter::ndb_shm_get() -{ - shmId = shmget(shmKey, shmSize, 0); - if(shmId == -1) { - perror("shmget: "); - return false; - } - return true; -} - -bool -SHM_Transporter::ndb_shm_attach() -{ - shmBuf = (char *)shmat(shmId, 0, 0); - if(shmBuf == 0) { - perror("shmat: "); - return false; - } - return true; -} - -bool -SHM_Transporter::checkConnected(){ - struct shmid_ds info; - const int res = shmctl(shmId, IPC_STAT, &info); - if(res == -1){ - char buf[128]; - int r= snprintf(buf, sizeof(buf), - "shmctl(%d, IPC_STAT) errno: %d(%s). ", shmId, - errno, strerror(errno)); - make_error_info(buf+r, sizeof(buf)-r); - DBUG_PRINT("error",(buf)); - switch (errno) - { - case EACCES: - report_error(TE_SHM_IPC_PERMANENT, buf); - break; - default: - report_error(TE_SHM_IPC_STAT, buf); - break; - } - return false; - } - - if(info.shm_nattch != 2){ - char buf[128]; - make_error_info(buf, sizeof(buf)); - report_error(TE_SHM_DISCONNECT); - DBUG_PRINT("error", ("Already connected to node %d", - remoteNodeId)); - return false; - } - return true; -} - -void -SHM_Transporter::disconnectImpl(){ - if(_attached){ - const int res = shmdt(shmBuf); - if(res == -1){ - perror("shmdelete: "); - return; - } - _attached = false; - if(!isServer && _shmSegCreated) - _shmSegCreated = false; - } - - if(isServer && _shmSegCreated){ - const int res = shmctl(shmId, IPC_RMID, 0); - if(res == -1){ - char buf[64]; - make_error_info(buf, sizeof(buf)); - report_error(TE_SHM_UNABLE_TO_REMOVE_SEGMENT); - return; - } - _shmSegCreated = false; - } - setupBuffersDone=false; -} diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp deleted file mode 100644 index 78b1d367ef5..00000000000 --- a/storage/ndb/src/common/transporter/SHM_Transporter.win32.cpp +++ /dev/null @@ -1,178 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include "SHM_Transporter.hpp" -#include "TransporterInternalDefinitions.hpp" -#include -#include -#include - -#include - - -void SHM_Transporter::make_error_info(char info[], int sz) -{ - snprintf(info,sz,"Shm key=%d sz=%d", - shmKey, shmSize); -} - -bool -SHM_Transporter::connectServer(Uint32 timeOutMillis){ - if(!_shmSegCreated) - { - char szName[32]; - sprintf(szName, "ndb%lu", shmKey); - hFileMapping = CreateFileMapping(INVALID_HANDLE_VALUE, - 0, - PAGE_READWRITE, - 0, - shmSize, - szName); - - if(!hFileMapping) - { - reportThreadError(remoteNodeId, TE_SHM_UNABLE_TO_CREATE_SEGMENT); - NdbSleep_MilliSleep(timeOutMillis); - return false; - } - _shmSegCreated = true; - } - - if(!_attached){ - shmBuf = (char*)MapViewOfFile(hFileMapping, FILE_MAP_ALL_ACCESS, 0, 0, 0); - if(shmBuf == 0){ - reportThreadError(remoteNodeId, TE_SHM_UNABLE_TO_ATTACH_SEGMENT); - NdbSleep_MilliSleep(timeOutMillis); - return false; - } - volatile Uint32 * sharedCountAttached = - (volatile Uint32*)(shmBuf + 6*sizeof(Uint32*)); - ++*sharedCountAttached; - _attached = true; - } - - volatile Uint32 * sharedCountAttached = - (volatile Uint32*)(shmBuf + 6*sizeof(Uint32*)); - - if(*sharedCountAttached == 2 && !setupBuffersDone) { - setupBuffers(); - setupBuffersDone=true; - } - if(*sharedCountAttached > 2) { - reportThreadError(remoteNodeId, TE_SHM_DISCONNECT); - return false; - } - - if(setupBuffersDone) { - NdbSleep_MilliSleep(timeOutMillis); - if(*serverStatusFlag==1 && *clientStatusFlag==1) - return true; - } - - NdbSleep_MilliSleep(timeOutMillis); - return false; -} - -bool -SHM_Transporter::connectClient(Uint32 timeOutMillis){ - if(!_shmSegCreated) - { - char szName[32]; - sprintf(szName, "ndb%lu", shmKey); - hFileMapping = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, szName); - - if(!hFileMapping) - { - NdbSleep_MilliSleep(timeOutMillis); - return false; - } - _shmSegCreated = true; - } - - if(!_attached){ - shmBuf = (char*)MapViewOfFile(hFileMapping, FILE_MAP_ALL_ACCESS, 0, 0, 0); - if(shmBuf == 0){ - reportThreadError(remoteNodeId, TE_SHM_UNABLE_TO_ATTACH_SEGMENT); - NdbSleep_MilliSleep(timeOutMillis); - return false; - } - volatile Uint32 * sharedCountAttached = - (volatile Uint32*)(shmBuf + 6*sizeof(Uint32*)); - ++*sharedCountAttached; - _attached = true; - } - - volatile Uint32 * sharedCountAttached = - (volatile Uint32*)(shmBuf + 6*sizeof(Uint32*)); - - if(*sharedCountAttached == 2 && !setupBuffersDone) { - setupBuffers(); - setupBuffersDone=true; - } - - if(setupBuffersDone) { - if(*serverStatusFlag==1 && *clientStatusFlag==1) - return true; - } - NdbSleep_MilliSleep(timeOutMillis); - return false; - -} - - -bool -SHM_Transporter::checkConnected(){ - volatile Uint32 * sharedCountAttached = - (volatile Uint32*)(shmBuf + 6*sizeof(Uint32*)); - if(*sharedCountAttached != 2) { - reportError(callbackObj, remoteNodeId, TE_SHM_DISCONNECT); - return false; - } - return true; -} - -void -SHM_Transporter::disconnectImpl(){ - if(_attached) { - volatile Uint32 * sharedCountAttached = - (volatile Uint32*)(shmBuf + 6*sizeof(Uint32*)); - - --*sharedCountAttached; - - if(!UnmapViewOfFile(shmBuf)) { - reportError(callbackObj, remoteNodeId, TE_SHM_UNABLE_TO_REMOVE_SEGMENT); - return; - } - - _attached = false; - if(!isServer && _shmSegCreated) - _shmSegCreated = false; - } - - if(_shmSegCreated){ - if(!CloseHandle(hFileMapping)) { - reportError(callbackObj, remoteNodeId, TE_SHM_UNABLE_TO_REMOVE_SEGMENT); - return; - } - _shmSegCreated = false; - } - setupBuffersDone=false; - -} - diff --git a/storage/ndb/src/common/transporter/SendBuffer.cpp b/storage/ndb/src/common/transporter/SendBuffer.cpp deleted file mode 100644 index 7e62ebaa91c..00000000000 --- a/storage/ndb/src/common/transporter/SendBuffer.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "SendBuffer.hpp" -#include "TransporterInternalDefinitions.hpp" - -SendBuffer::SendBuffer(Uint32 bufSize) { - - sizeOfBuffer = bufSize; - if(sizeOfBuffer < MAX_MESSAGE_SIZE) - sizeOfBuffer = 2 * MAX_MESSAGE_SIZE; - startOfBuffer = NULL; - - // Initalise pointers - endOfBuffer = NULL; - insertPtr = NULL; - sendPtr = NULL; - sendDataSize = 0; - dataSize = 0; -} - -bool -SendBuffer::initBuffer(Uint32 aRemoteNodeId) { - - // Allocate memory for the buffer -#ifdef DEBUG_TRANSPORTER - ndbout << "Allocating " << sizeOfBuffer << " bytes for send buffer" << endl; -#endif - - startOfBuffer = new Uint32[(sizeOfBuffer >> 2) + 1]; - endOfBuffer = startOfBuffer + (sizeOfBuffer >> 2); - - emptyBuffer(); - theRemoteNodeId = aRemoteNodeId; - return true; -} - -SendBuffer::~SendBuffer() { - // Deallocate the buffer memory - if(startOfBuffer != NULL) - delete[] startOfBuffer; -} - -int -SendBuffer::bufferSize() { - return dataSize; -} - -Uint32 -SendBuffer::bufferSizeRemaining() const { - return (sizeOfBuffer - dataSize); -} - -void -SendBuffer::emptyBuffer() { - insertPtr = startOfBuffer; - sendPtr = (char*)startOfBuffer; - dataSize = 0; - sendDataSize = 0; -} - -#ifdef DEBUG_TRANSPORTER -void -SendBuffer::print() { - - printf("SendBuffer status printouts\n"); - - printf( "sizeOfBuffer: %d\n", sizeOfBuffer); - printf( "startOfBuffer: %.8x\n", startOfBuffer); - printf( "endOfBuffer: %.8x\n", endOfBuffer); - printf( "insertPtr: %.8x\n", insertPtr); - printf( "sendPtr: %.8x\n", sendPtr); - printf( "sendDataSize: %d\n", sendDataSize); - printf( "dataSize: %d\n", dataSize); -} -#endif diff --git a/storage/ndb/src/common/transporter/SendBuffer.hpp b/storage/ndb/src/common/transporter/SendBuffer.hpp deleted file mode 100644 index 8d772aa4dbc..00000000000 --- a/storage/ndb/src/common/transporter/SendBuffer.hpp +++ /dev/null @@ -1,190 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//**************************************************************************** -// -// NAME -// SendBuffer -// -// DESCRIPTION -// The SendBuffer is a circular buffer storing signals waiting to be sent. -// The signals can be of variable size and are copied into the buffer -// in Protocol 6 format. There will be two SendBuffer instances -// (priority level A and B) for each transporter using a buffer for -// sending. The buffering will in most cases be done to send as big -// packages as possible over TCP/IP. -// -//***************************************************************************/ -#ifndef SendBuffer_H -#define SendBuffer_H - -#include "TransporterDefinitions.hpp" -#include - -#ifdef DEBUG_TRANSPORTER -#include -#endif - -class SendBuffer { - friend class TCP_Transporter; -public: - // Set member variables - SendBuffer(Uint32 bufSize); - - // Deallocate the buffer memory - ~SendBuffer(); - - // Allocate memory for the buffer and initialize the buffer pointers - bool initBuffer(Uint32 aRemoteNodeId); - - // Number of bytes remaining in the buffer - Uint32 bufferSizeRemaining() const; - - // Number of bytes of data in the buffer - int bufferSize(); - - // Empty the buffer - void emptyBuffer(); - - /** - * The transporter calls updateBuffer after a retrieve followed by - * a successful send, to update the cirkular buffer pointers. - * updateBuffer is called with the number of bytes really sent, - * it may be that it is less than what was retrived from the buffer. - * If that is the case there will be an incomplete message (slack) - * in the SendBuffer. - * - * Returns 0 if buffer empty - * else ~0 - */ - Uint32 bytesSent(Uint32 len); - -#ifdef DEBUG_TRANSPORTER - // Prints the buffer status on the screen. Can be used for testing purposes. - void print(); -#endif - - Uint32* getInsertPtr(Uint32 bytes); - void updateInsertPtr(Uint32 bytes); - -private: - - Uint32 sizeOfBuffer; // Length, in number of bytes, of the buffer memory - Uint32 dataSize; // Number of bytes in buffer - - Uint32 * startOfBuffer; // Pointer to the start of the buffer memory - Uint32 * endOfBuffer; // Pointer to end of buffer - - Uint32 * insertPtr; // Where to insert next - - char * sendPtr; // Where data to send starts - Uint32 sendDataSize; // Num bytes to send - - Uint32 theRemoteNodeId; -}; - -inline -Uint32 -SendBuffer::bytesSent(Uint32 bytes) { - - if(bytes > dataSize){ -#ifdef DEBUG_TRANSPORTER - printf("bytes(%d) > dataSize(%d)\n", bytes, dataSize); -#endif - abort(); - // reportError(0 ,theRemoteNodeId, TE_INVALID_MESSAGE_LENGTH); - return 0; - }//if - - if(bytes > sendDataSize){ -#ifdef DEBUG_TRANSPORTER - printf("bytes(%d) > sendDataSize(%d)\n", bytes, sendDataSize); -#endif - abort(); - //reportError(0,theRemoteNodeId, TE_INVALID_MESSAGE_LENGTH); - return 0; - }//if - - dataSize -= bytes; - sendPtr += bytes; - sendDataSize -= bytes; - - if(sendDataSize == 0){ - if(sendPtr > (char*)insertPtr){ - sendPtr = (char *)startOfBuffer; - sendDataSize = dataSize; - } else { - sendPtr = ((char*)insertPtr) - dataSize; - sendDataSize = dataSize; - } - } - - if(dataSize == 0) - return 0; - return ~0; -} - -inline -Uint32* -SendBuffer::getInsertPtr(Uint32 len){ - if (bufferSizeRemaining() < len){ - return 0; - } - - const char * const tmpInsertPtr = (char *) insertPtr; - - if(tmpInsertPtr >= sendPtr){ - // Is there enough space at the end of the buffer? - if ((tmpInsertPtr + len) < (char*)endOfBuffer){ - sendDataSize += len; - return insertPtr; - } else { - // We have passed the end of the cirkular buffer, - // must start from the beginning - // Is there enough space in the beginning of the buffer? - if ((Uint32)(sendPtr - (char *)startOfBuffer) <= len){ - // Not enough space available, insert failed - return 0; - } else { - // There is space available at the beginning of the buffer - // We start from the beginning, set endOfData and insertPtr - insertPtr = startOfBuffer; - if(sendDataSize != 0){ - return insertPtr; - } - sendPtr = (char *)startOfBuffer; - sendDataSize = len; - return insertPtr; - } - } - } else { - // sendPtr > insertPtr - // Is there enought room - if((tmpInsertPtr + len) < sendPtr){ - return insertPtr; - } - return 0; - } -} - -inline -void -SendBuffer::updateInsertPtr(Uint32 lenBytes){ - dataSize += lenBytes; - insertPtr += (lenBytes / 4); -} - -#endif // Define of SendBuffer_H diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp deleted file mode 100644 index 768b4f4a052..00000000000 --- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp +++ /dev/null @@ -1,436 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include -#include "TCP_Transporter.hpp" -#include -#include - -#include -extern EventLogger g_eventLogger; -// End of stuff to be moved - -#ifdef NDB_WIN32 -class ndbstrerror -{ -public: - ndbstrerror(int iError); - ~ndbstrerror(void); - operator char*(void) { return m_szError; }; - -private: - int m_iError; - char* m_szError; -}; - -ndbstrerror::ndbstrerror(int iError) -: m_iError(iError) -{ - FormatMessage( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - 0, - iError, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPTSTR)&m_szError, - 0, - 0); -} - -ndbstrerror::~ndbstrerror(void) -{ - LocalFree( m_szError ); - m_szError = 0; -} -#else -#define ndbstrerror strerror -#endif - -TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg, - int sendBufSize, int maxRecvSize, - const char *lHostName, - const char *rHostName, - int r_port, - bool isMgmConnection_arg, - NodeId lNodeId, - NodeId rNodeId, - NodeId serverNodeId, - bool chksm, bool signalId, - Uint32 _reportFreq) : - Transporter(t_reg, tt_TCP_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection_arg, - lNodeId, rNodeId, serverNodeId, - 0, false, chksm, signalId), - m_sendBuffer(sendBufSize) -{ - maxReceiveSize = maxRecvSize; - - // Initialize member variables - theSocket = NDB_INVALID_SOCKET; - - sendCount = receiveCount = 0; - sendSize = receiveSize = 0; - reportFreq = _reportFreq; - - sockOptRcvBufSize = 70080; - sockOptSndBufSize = 71540; - sockOptNodelay = 1; - sockOptTcpMaxSeg = 4096; -} - -TCP_Transporter::~TCP_Transporter() { - - // Disconnect - if (theSocket != NDB_INVALID_SOCKET) - doDisconnect(); - - // Delete send buffers - - // Delete receive buffer!! - receiveBuffer.destroy(); -} - -bool TCP_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd) -{ - DBUG_ENTER("TCP_Transpporter::connect_server_impl"); - DBUG_RETURN(connect_common(sockfd)); -} - -bool TCP_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd) -{ - DBUG_ENTER("TCP_Transpporter::connect_client_impl"); - DBUG_RETURN(connect_common(sockfd)); -} - -bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd) -{ - theSocket = sockfd; - setSocketOptions(); - setSocketNonBlocking(theSocket); - DBUG_PRINT("info", ("Successfully set-up TCP transporter to node %d", - remoteNodeId)); - return true; -} - -bool -TCP_Transporter::initTransporter() { - - // Allocate buffer for receiving - // Let it be the maximum size we receive plus 8 kB for any earlier received - // incomplete messages (slack) - Uint32 recBufSize = maxReceiveSize; - if(recBufSize < MAX_MESSAGE_SIZE){ - recBufSize = MAX_MESSAGE_SIZE; - } - - if(!receiveBuffer.init(recBufSize+MAX_MESSAGE_SIZE)){ - return false; - } - - // Allocate buffers for sending - if (!m_sendBuffer.initBuffer(remoteNodeId)) { - // XXX What shall be done here? - // The same is valid for the other init-methods - return false; - } - - return true; -} - -void -TCP_Transporter::setSocketOptions(){ - int sockOptKeepAlive = 1; - - if (setsockopt(theSocket, SOL_SOCKET, SO_RCVBUF, - (char*)&sockOptRcvBufSize, sizeof(sockOptRcvBufSize)) < 0) { -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("The setsockopt SO_RCVBUF error code = %d", InetErrno); -#endif - }//if - - if (setsockopt(theSocket, SOL_SOCKET, SO_SNDBUF, - (char*)&sockOptSndBufSize, sizeof(sockOptSndBufSize)) < 0) { -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("The setsockopt SO_SNDBUF error code = %d", InetErrno); -#endif - }//if - - if (setsockopt(theSocket, SOL_SOCKET, SO_KEEPALIVE, - (char*)&sockOptKeepAlive, sizeof(sockOptKeepAlive)) < 0) { - ndbout_c("The setsockopt SO_KEEPALIVE error code = %d", InetErrno); - }//if - - //----------------------------------------------- - // Set the TCP_NODELAY option so also small packets are sent - // as soon as possible - //----------------------------------------------- - if (setsockopt(theSocket, IPPROTO_TCP, TCP_NODELAY, - (char*)&sockOptNodelay, sizeof(sockOptNodelay)) < 0) { -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("The setsockopt TCP_NODELAY error code = %d", InetErrno); -#endif - }//if -} - - -#ifdef NDB_WIN32 - -bool -TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){ - unsigned long ul = 1; - if(ioctlsocket(socket, FIONBIO, &ul)) - { -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("Set non-blocking server error3: %d", InetErrno); -#endif - }//if - return true; -} - -#else - -bool -TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){ - int flags; - flags = fcntl(socket, F_GETFL, 0); - if (flags < 0) { -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("Set non-blocking server error1: %s", strerror(InetErrno)); -#endif - }//if - flags |= NDB_NONBLOCK; - if (fcntl(socket, F_SETFL, flags) == -1) { -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("Set non-blocking server error2: %s", strerror(InetErrno)); -#endif - }//if - return true; -} - -#endif - -bool -TCP_Transporter::sendIsPossible(struct timeval * timeout) { - if(theSocket != NDB_INVALID_SOCKET){ - fd_set writeset; - FD_ZERO(&writeset); - FD_SET(theSocket, &writeset); - - int selectReply = select(theSocket + 1, NULL, &writeset, NULL, timeout); - - if ((selectReply > 0) && FD_ISSET(theSocket, &writeset)) - return true; - else - return false; - } - return false; -} - -Uint32 -TCP_Transporter::get_free_buffer() const -{ - return m_sendBuffer.bufferSizeRemaining(); -} - -Uint32 * -TCP_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio){ - - Uint32 * insertPtr = m_sendBuffer.getInsertPtr(lenBytes); - - struct timeval timeout = {0, 10000}; - - if (insertPtr == 0) { - //------------------------------------------------- - // Buffer was completely full. We have severe problems. - // We will attempt to wait for a small time - //------------------------------------------------- - if(sendIsPossible(&timeout)) { - //------------------------------------------------- - // Send is possible after the small timeout. - //------------------------------------------------- - if(!doSend()){ - return 0; - } else { - //------------------------------------------------- - // Since send was successful we will make a renewed - // attempt at inserting the signal into the buffer. - //------------------------------------------------- - insertPtr = m_sendBuffer.getInsertPtr(lenBytes); - }//if - } else { - return 0; - }//if - } - return insertPtr; -} - -void -TCP_Transporter::updateWritePtr(Uint32 lenBytes, Uint32 prio){ - m_sendBuffer.updateInsertPtr(lenBytes); - - const int bufsize = m_sendBuffer.bufferSize(); - if(bufsize > TCP_SEND_LIMIT) { - //------------------------------------------------- - // Buffer is full and we are ready to send. We will - // not wait since the signal is already in the buffer. - // Force flag set has the same indication that we - // should always send. If it is not possible to send - // we will not worry since we will soon be back for - // a renewed trial. - //------------------------------------------------- - struct timeval no_timeout = {0,0}; - if(sendIsPossible(&no_timeout)) { - //------------------------------------------------- - // Send was possible, attempt at a send. - //------------------------------------------------- - doSend(); - }//if - } -} - -#define DISCONNECT_ERRNO(e, sz) ((sz == 0) || \ - (!((sz == -1) && (e == EAGAIN) || (e == EWOULDBLOCK) || (e == EINTR)))) - - -bool -TCP_Transporter::doSend() { - // If no sendbuffers are used nothing is done - // Sends the contents of the SendBuffers until they are empty - // or until select does not select the socket for write. - // Before calling send, the socket must be selected for write - // using "select" - // It writes on the external TCP/IP interface until the send buffer is empty - // and as long as write is possible (test it using select) - - // Empty the SendBuffers - - bool sent_any = true; - while (m_sendBuffer.dataSize > 0) - { - const char * const sendPtr = m_sendBuffer.sendPtr; - const Uint32 sizeToSend = m_sendBuffer.sendDataSize; - const int nBytesSent = send(theSocket, sendPtr, sizeToSend, 0); - - if (nBytesSent > 0) - { - sent_any = true; - m_sendBuffer.bytesSent(nBytesSent); - - sendCount ++; - sendSize += nBytesSent; - if(sendCount == reportFreq) - { - reportSendLen(get_callback_obj(), remoteNodeId, sendCount, sendSize); - sendCount = 0; - sendSize = 0; - } - } - else - { - if (nBytesSent < 0 && InetErrno == EAGAIN && sent_any) - break; - - // Send failed -#if defined DEBUG_TRANSPORTER - g_eventLogger.error("Send Failure(disconnect==%d) to node = %d nBytesSent = %d " - "errno = %d strerror = %s", - DISCONNECT_ERRNO(InetErrno, nBytesSent), - remoteNodeId, nBytesSent, InetErrno, - (char*)ndbstrerror(InetErrno)); -#endif - if(DISCONNECT_ERRNO(InetErrno, nBytesSent)){ - doDisconnect(); - report_disconnect(InetErrno); - } - - return false; - } - } - return true; -} - -int -TCP_Transporter::doReceive() { - // Select-function must return the socket for read - // before this method is called - // It reads the external TCP/IP interface once - Uint32 size = receiveBuffer.sizeOfBuffer - receiveBuffer.sizeOfData; - if(size > 0){ - const int nBytesRead = recv(theSocket, - receiveBuffer.insertPtr, - size < maxReceiveSize ? size : maxReceiveSize, - 0); - - if (nBytesRead > 0) { - receiveBuffer.sizeOfData += nBytesRead; - receiveBuffer.insertPtr += nBytesRead; - - if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){ -#ifdef DEBUG_TRANSPORTER - g_eventLogger.error("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", - receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); - g_eventLogger.error("nBytesRead = %d", nBytesRead); -#endif - g_eventLogger.error("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)", - receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer); - report_error(TE_INVALID_MESSAGE_LENGTH); - return 0; - } - - receiveCount ++; - receiveSize += nBytesRead; - - if(receiveCount == reportFreq){ - reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize); - receiveCount = 0; - receiveSize = 0; - } - return nBytesRead; - } else { -#if defined DEBUG_TRANSPORTER - g_eventLogger.error("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d " - "errno = %d strerror = %s", - DISCONNECT_ERRNO(InetErrno, nBytesRead), - remoteNodeId, nBytesRead, InetErrno, - (char*)ndbstrerror(InetErrno)); -#endif - if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){ - // The remote node has closed down - doDisconnect(); - report_disconnect(InetErrno); - } - } - return nBytesRead; - } else { - return 0; - } -} - -void -TCP_Transporter::disconnectImpl() { - if(theSocket != NDB_INVALID_SOCKET){ - if(NDB_CLOSE_SOCKET(theSocket) < 0){ - report_error(TE_ERROR_CLOSING_SOCKET); - } - } - - // Empty send och receive buffers - receiveBuffer.clear(); - m_sendBuffer.emptyBuffer(); - - theSocket = NDB_INVALID_SOCKET; -} diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp deleted file mode 100644 index 64b48d741a6..00000000000 --- a/storage/ndb/src/common/transporter/TCP_Transporter.hpp +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TCP_TRANSPORTER_HPP -#define TCP_TRANSPORTER_HPP - -#include "Transporter.hpp" -#include "SendBuffer.hpp" - -#include - -struct ReceiveBuffer { - Uint32 *startOfBuffer; // Pointer to start of the receive buffer - Uint32 *readPtr; // Pointer to start reading data - - char *insertPtr; // Pointer to first position in the receiveBuffer - // in which to insert received data. Earlier - // received incomplete messages (slack) are - // copied into the first part of the receiveBuffer - - Uint32 sizeOfData; // In bytes - Uint32 sizeOfBuffer; - - ReceiveBuffer() {} - bool init(int bytes); - void destroy(); - - void clear(); - void incompleteMessage(); -}; - -class TCP_Transporter : public Transporter { - friend class TransporterRegistry; -private: - // Initialize member variables - TCP_Transporter(TransporterRegistry&, - int sendBufferSize, int maxReceiveSize, - const char *lHostName, - const char *rHostName, - int r_port, - bool isMgmConnection, - NodeId lHostId, - NodeId rHostId, - NodeId serverNodeId, - bool checksum, bool signalId, - Uint32 reportFreq = 4096); - - // Disconnect, delete send buffers and receive buffer - virtual ~TCP_Transporter(); - - /** - * Allocate buffers for sending and receiving - */ - bool initTransporter(); - - Uint32 * getWritePtr(Uint32 lenBytes, Uint32 prio); - void updateWritePtr(Uint32 lenBytes, Uint32 prio); - - bool hasDataToSend() const ; - - /** - * Retrieves the contents of the send buffers and writes it on - * the external TCP/IP interface until the send buffers are empty - * and as long as write is possible. - */ - bool doSend(); - - /** - * It reads the external TCP/IP interface once - * and puts the data in the receiveBuffer - */ - int doReceive(); - - /** - * Returns socket (used for select) - */ - NDB_SOCKET_TYPE getSocket() const; - - /** - * Get Receive Data - * - * Returns - no of bytes to read - * and set ptr - */ - virtual Uint32 getReceiveData(Uint32 ** ptr); - - /** - * Update receive data ptr - */ - virtual void updateReceiveDataPtr(Uint32 bytesRead); - - virtual Uint32 get_free_buffer() const; - - inline bool hasReceiveData () const { - return receiveBuffer.sizeOfData > 0; - } -protected: - /** - * Setup client/server and perform connect/accept - * Is used both by clients and servers - * A client connects to the remote server - * A server accepts any new connections - */ - virtual bool connect_server_impl(NDB_SOCKET_TYPE sockfd); - virtual bool connect_client_impl(NDB_SOCKET_TYPE sockfd); - bool connect_common(NDB_SOCKET_TYPE sockfd); - - /** - * Disconnects a TCP/IP node. Empty send and receivebuffer. - */ - virtual void disconnectImpl(); - -private: - /** - * Send buffers - */ - SendBuffer m_sendBuffer; - - // Sending/Receiving socket used by both client and server - NDB_SOCKET_TYPE theSocket; - - Uint32 maxReceiveSize; - - /** - * Socket options - */ - int sockOptRcvBufSize; - int sockOptSndBufSize; - int sockOptNodelay; - int sockOptTcpMaxSeg; - - void setSocketOptions(); - - static bool setSocketNonBlocking(NDB_SOCKET_TYPE aSocket); - - bool sendIsPossible(struct timeval * timeout); - - /** - * Statistics - */ - Uint32 reportFreq; - Uint32 receiveCount; - Uint64 receiveSize; - Uint32 sendCount; - Uint64 sendSize; - - ReceiveBuffer receiveBuffer; -}; - -inline -NDB_SOCKET_TYPE -TCP_Transporter::getSocket() const { - return theSocket; -} - -inline -Uint32 -TCP_Transporter::getReceiveData(Uint32 ** ptr){ - (* ptr) = receiveBuffer.readPtr; - return receiveBuffer.sizeOfData; -} - -inline -void -TCP_Transporter::updateReceiveDataPtr(Uint32 bytesRead){ - char * ptr = (char *)receiveBuffer.readPtr; - ptr += bytesRead; - receiveBuffer.readPtr = (Uint32*)ptr; - receiveBuffer.sizeOfData -= bytesRead; - receiveBuffer.incompleteMessage(); -} - -inline -bool -TCP_Transporter::hasDataToSend() const { - return m_sendBuffer.dataSize > 0; -} - -inline -bool -ReceiveBuffer::init(int bytes){ -#ifdef DEBUG_TRANSPORTER - ndbout << "Allocating " << bytes << " bytes as receivebuffer" << endl; -#endif - - startOfBuffer = new Uint32[((bytes + 0) >> 2) + 1]; - sizeOfBuffer = bytes + sizeof(Uint32); - clear(); - return true; -} - -inline -void -ReceiveBuffer::destroy(){ - delete[] startOfBuffer; - sizeOfBuffer = 0; - startOfBuffer = 0; - clear(); -} - -inline -void -ReceiveBuffer::clear(){ - readPtr = startOfBuffer; - insertPtr = (char *)startOfBuffer; - sizeOfData = 0; -} - -inline -void -ReceiveBuffer::incompleteMessage() { - if(startOfBuffer != readPtr){ - if(sizeOfData != 0) - memmove(startOfBuffer, readPtr, sizeOfData); - readPtr = startOfBuffer; - insertPtr = ((char *)startOfBuffer) + sizeOfData; - } -} - - -#endif // Define of TCP_Transporter_H diff --git a/storage/ndb/src/common/transporter/Transporter.cpp b/storage/ndb/src/common/transporter/Transporter.cpp deleted file mode 100644 index fe43124ad86..00000000000 --- a/storage/ndb/src/common/transporter/Transporter.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include "Transporter.hpp" -#include "TransporterInternalDefinitions.hpp" -#include -#include -#include -#include - -#include -extern EventLogger g_eventLogger; - -Transporter::Transporter(TransporterRegistry &t_reg, - TransporterType _type, - const char *lHostName, - const char *rHostName, - int s_port, - bool _isMgmConnection, - NodeId lNodeId, - NodeId rNodeId, - NodeId serverNodeId, - int _byteorder, - bool _compression, bool _checksum, bool _signalId) - : m_s_port(s_port), remoteNodeId(rNodeId), localNodeId(lNodeId), - isServer(lNodeId==serverNodeId), - m_packer(_signalId, _checksum), isMgmConnection(_isMgmConnection), - m_type(_type), - m_transporter_registry(t_reg) -{ - DBUG_ENTER("Transporter::Transporter"); - if (rHostName && strlen(rHostName) > 0){ - strncpy(remoteHostName, rHostName, sizeof(remoteHostName)); - Ndb_getInAddr(&remoteHostAddress, rHostName); - } - else - { - if (!isServer) { - ndbout << "Unable to setup transporter. Node " << rNodeId - << " must have hostname. Update configuration." << endl; - exit(-1); - } - remoteHostName[0]= 0; - } - strncpy(localHostName, lHostName, sizeof(localHostName)); - - DBUG_PRINT("info",("rId=%d lId=%d isServer=%d rHost=%s lHost=%s s_port=%d", - remoteNodeId, localNodeId, isServer, - remoteHostName, localHostName, - s_port)); - - byteOrder = _byteorder; - compressionUsed = _compression; - checksumUsed = _checksum; - signalIdUsed = _signalId; - - m_connected = false; - m_timeOutMillis = 30000; - - m_connect_address.s_addr= 0; - if(s_port<0) - s_port= -s_port; // was dynamic - - if (isServer) - m_socket_client= 0; - else - { - m_socket_client= new SocketClient(remoteHostName, s_port, - new SocketAuthSimple("ndbd", - "ndbd passwd")); - - m_socket_client->set_connect_timeout((m_timeOutMillis+999)/1000); - } - DBUG_VOID_RETURN; -} - -Transporter::~Transporter(){ - if (m_socket_client) - delete m_socket_client; -} - -bool -Transporter::connect_server(NDB_SOCKET_TYPE sockfd) { - // all initial negotiation is done in TransporterRegistry::connect_server - DBUG_ENTER("Transporter::connect_server"); - - if(m_connected) - { - DBUG_RETURN(false); // TODO assert(0); - } - - { - struct sockaddr_in addr; - SOCKET_SIZE_TYPE addrlen= sizeof(addr); - getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); - m_connect_address= (&addr)->sin_addr; - } - - bool res = connect_server_impl(sockfd); - if(res){ - m_connected = true; - m_errorCount = 0; - } - - DBUG_RETURN(res); -} - -bool -Transporter::connect_client() { - NDB_SOCKET_TYPE sockfd; - - if(m_connected) - return true; - - if(isMgmConnection) - { - sockfd= m_transporter_registry.connect_ndb_mgmd(m_socket_client); - } - else - { - if (!m_socket_client->init()) - { - return false; - } - if (strlen(localHostName) > 0) - { - if (m_socket_client->bind(localHostName, 0) != 0) - return false; - } - sockfd= m_socket_client->connect(); - } - - return connect_client(sockfd); -} - -bool -Transporter::connect_client(NDB_SOCKET_TYPE sockfd) { - - if(m_connected) - return true; - - if (sockfd == NDB_INVALID_SOCKET) - return false; - - DBUG_ENTER("Transporter::connect_client"); - - DBUG_PRINT("info",("port %d isMgmConnection=%d",m_s_port,isMgmConnection)); - - SocketOutputStream s_output(sockfd); - SocketInputStream s_input(sockfd); - - // send info about own id - // send info about own transporter type - - s_output.println("%d %d", localNodeId, m_type); - // get remote id - int nodeId, remote_transporter_type= -1; - - char buf[256]; - if (s_input.gets(buf, 256) == 0) { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - int r= sscanf(buf, "%d %d", &nodeId, &remote_transporter_type); - switch (r) { - case 2: - break; - case 1: - // we're running version prior to 4.1.9 - // ok, but with no checks on transporter configuration compatability - break; - default: - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(false); - } - - DBUG_PRINT("info", ("nodeId=%d remote_transporter_type=%d", - nodeId, remote_transporter_type)); - - if (remote_transporter_type != -1) - { - if (remote_transporter_type != m_type) - { - DBUG_PRINT("error", ("Transporter types mismatch this=%d remote=%d", - m_type, remote_transporter_type)); - NDB_CLOSE_SOCKET(sockfd); - g_eventLogger.error("Incompatible configuration: transporter type " - "mismatch with node %d", nodeId); - DBUG_RETURN(false); - } - } - else if (m_type == tt_SHM_TRANSPORTER) - { - g_eventLogger.warning("Unable to verify transporter compatability with node %d", nodeId); - } - - { - struct sockaddr_in addr; - SOCKET_SIZE_TYPE addrlen= sizeof(addr); - getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); - m_connect_address= (&addr)->sin_addr; - } - - bool res = connect_client_impl(sockfd); - if(res){ - m_connected = true; - m_errorCount = 0; - } - DBUG_RETURN(res); -} - -void -Transporter::doDisconnect() { - - if(!m_connected) - return; //assert(0); TODO will fail - - m_connected= false; - disconnectImpl(); -} diff --git a/storage/ndb/src/common/transporter/Transporter.hpp b/storage/ndb/src/common/transporter/Transporter.hpp deleted file mode 100644 index 28f99e9170d..00000000000 --- a/storage/ndb/src/common/transporter/Transporter.hpp +++ /dev/null @@ -1,193 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef Transporter_H -#define Transporter_H - -#include - -#include - -#include -#include -#include "TransporterDefinitions.hpp" -#include "Packer.hpp" - -#include -#include - -class Transporter { - friend class TransporterRegistry; -public: - virtual bool initTransporter() = 0; - - /** - * Destructor - */ - virtual ~Transporter(); - - /** - * None blocking - * Use isConnected() to check status - */ - bool connect_client(); - bool connect_client(NDB_SOCKET_TYPE sockfd); - bool connect_server(NDB_SOCKET_TYPE socket); - - /** - * Blocking - */ - virtual void doDisconnect(); - - virtual Uint32 * getWritePtr(Uint32 lenBytes, Uint32 prio) = 0; - virtual void updateWritePtr(Uint32 lenBytes, Uint32 prio) = 0; - - /** - * Are we currently connected - */ - bool isConnected() const; - - /** - * Remote Node Id - */ - NodeId getRemoteNodeId() const; - - /** - * Local (own) Node Id - */ - NodeId getLocalNodeId() const; - - /** - * Get port we're connecting to (signed) - */ - int get_s_port() { return m_s_port; }; - - /** - * Set port to connect to (signed) - */ - void set_s_port(int port) { - m_s_port = port; - if(port<0) - port= -port; - if(m_socket_client) - m_socket_client->set_port(port); - }; - - virtual Uint32 get_free_buffer() const = 0; - -protected: - Transporter(TransporterRegistry &, - TransporterType, - const char *lHostName, - const char *rHostName, - int s_port, - bool isMgmConnection, - NodeId lNodeId, - NodeId rNodeId, - NodeId serverNodeId, - int byteorder, - bool compression, - bool checksum, - bool signalId); - - /** - * Blocking, for max timeOut milli seconds - * Returns true if connect succeded - */ - virtual bool connect_server_impl(NDB_SOCKET_TYPE sockfd) = 0; - virtual bool connect_client_impl(NDB_SOCKET_TYPE sockfd) = 0; - - /** - * Blocking - */ - virtual void disconnectImpl() = 0; - - /** - * Remote host name/and address - */ - char remoteHostName[256]; - char localHostName[256]; - struct in_addr remoteHostAddress; - struct in_addr localHostAddress; - - int m_s_port; - - const NodeId remoteNodeId; - const NodeId localNodeId; - - const bool isServer; - - unsigned createIndex; - - int byteOrder; - bool compressionUsed; - bool checksumUsed; - bool signalIdUsed; - Packer m_packer; - -private: - - /** - * means that we transform an MGM connection into - * a transporter connection - */ - bool isMgmConnection; - - SocketClient *m_socket_client; - struct in_addr m_connect_address; - -protected: - Uint32 getErrorCount(); - Uint32 m_errorCount; - Uint32 m_timeOutMillis; - -protected: - bool m_connected; // Are we connected - TransporterType m_type; - - TransporterRegistry &m_transporter_registry; - void *get_callback_obj() { return m_transporter_registry.callbackObj; }; - void report_disconnect(int err){m_transporter_registry.report_disconnect(remoteNodeId,err);}; - void report_error(enum TransporterError err, const char *info = 0) - { reportError(get_callback_obj(), remoteNodeId, err, info); }; -}; - -inline -bool -Transporter::isConnected() const { - return m_connected; -} - -inline -NodeId -Transporter::getRemoteNodeId() const { - return remoteNodeId; -} - -inline -NodeId -Transporter::getLocalNodeId() const { - return localNodeId; -} - -inline -Uint32 -Transporter::getErrorCount() -{ - return m_errorCount; -} - -#endif // Define of Transporter_H diff --git a/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp b/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp deleted file mode 100644 index 251e46a6f16..00000000000 --- a/storage/ndb/src/common/transporter/TransporterInternalDefinitions.hpp +++ /dev/null @@ -1,298 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef TransporterInternalDefinitions_H -#define TransporterInternalDefinitions_H - -#if defined DEBUG_TRANSPORTER || defined VM_TRACE -#include -#endif - -#define NDB_TCP_TRANSPORTER - -#ifdef HAVE_NDB_SHM -#define NDB_SHM_TRANSPORTER -#endif - -#ifdef HAVE_NDB_SCI -#define NDB_SCI_TRANSPORTER -#endif - -#ifdef DEBUG_TRANSPORTER -#define DEBUG(x) ndbout << x << endl -#else -#define DEBUG(x) -#endif - -#if defined VM_TRACE || defined DEBUG_TRANSPORTER -#define WARNING(X) ndbout << X << endl; -#else -#define WARNING(X) -#endif - -// Calculate a checksum -inline -Uint32 -computeChecksum(const Uint32 * const startOfData, int nWords) { - Uint32 chksum = startOfData[0]; - for (int i=1; i < nWords; i++) - chksum ^= startOfData[i]; - return chksum; -} - -struct Protocol6 { - Uint32 word1; - Uint32 word2; - Uint32 word3; - -/** - * - * b = Byte order - 4 Bits (Note 1 significant bit) - * g = GSN - 16 Bits - * p = Prio - 2 Bits - * c = Checksum included - 1 Bit - * z = Compression - 1 Bit - * v = Version id - 4 Bits - * i = Signal id included - 1 Bit - * m = Message length - 16 Bits (0-65536) (In word -> 0-256k bytes) - * d = Signal data length - 5 Bits (0-31) - * t = trace - 6 Bits (0-63) - * r = Recievers block no - 16 Bits - * s = Senders block no - 16 Bits - * u = Unused - 7 Bits - * f = FragmentInfo1 - 1 Bit - * h = FragmentInfo2 - 1 bit - * n = No of segments - 2 Bits - - * Word 1 - * - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * bfizcppbmmmmmmmmmmmmmmmmbhdddddb - - ** - * Word 2 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * ggggggggggggggggvvvvttttttnn - - ** - * Word 3 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * rrrrrrrrrrrrrrrrssssssssssssssss - - ** - * Word 4 (Optional Signal Id) - */ - - /** - * 0 = Big endian (Sparc), 1 = Little endian (Intel) - */ - static Uint32 getByteOrder (const Uint32 & word1); - static Uint32 getCompressed (const Uint32 & word1); - static Uint32 getSignalIdIncluded(const Uint32 & word1); - static Uint32 getCheckSumIncluded(const Uint32 & word1); - static Uint32 getPrio (const Uint32 & word1); - static Uint32 getMessageLength (const Uint32 & word1); - - static void setByteOrder (Uint32 & word1, Uint32 byteOrder); - static void setCompressed (Uint32 & word1, Uint32 compressed); - static void setSignalIdIncluded(Uint32 & word1, Uint32 signalId); - static void setCheckSumIncluded(Uint32 & word1, Uint32 checkSum); - static void setPrio (Uint32 & word1, Uint32 prio); - static void setMessageLength (Uint32 & word1, Uint32 messageLen); - - static void createSignalHeader(SignalHeader * const dst, - const Uint32 & word1, - const Uint32 & word2, - const Uint32 & word3); - - static void createProtocol6Header(Uint32 & word1, - Uint32 & word2, - Uint32 & word3, - const SignalHeader * const src); -}; - -#define WORD1_BYTEORDER_MASK (0x81000081) -#define WORD1_SIGNALID_MASK (0x00000004) -#define WORD1_COMPRESSED_MASK (0x00000008) -#define WORD1_CHECKSUM_MASK (0x00000010) -#define WORD1_PRIO_MASK (0x00000060) -#define WORD1_MESSAGELEN_MASK (0x00FFFF00) -#define WORD1_SIGNAL_LEN_MASK (0x7C000000) -#define WORD1_FRAG_INF_MASK (0x00000002) -#define WORD1_FRAG_INF2_MASK (0x02000000) - -#define WORD1_FRAG_INF_SHIFT (1) -#define WORD1_SIGNALID_SHIFT (2) -#define WORD1_COMPRESSED_SHIFT (3) -#define WORD1_CHECKSUM_SHIFT (4) -#define WORD1_PRIO_SHIFT (5) -#define WORD1_MESSAGELEN_SHIFT (8) -#define WORD1_FRAG_INF2_SHIFT (25) -#define WORD1_SIGNAL_LEN_SHIFT (26) - -#define WORD2_VERID_GSN_MASK (0x000FFFFF) -#define WORD2_TRACE_MASK (0x03f00000) -#define WORD2_SEC_COUNT_MASK (0x0c000000) - -#define WORD2_TRACE_SHIFT (20) -#define WORD2_SEC_COUNT_SHIFT (26) - -#define WORD3_SENDER_MASK (0x0000FFFF) -#define WORD3_RECEIVER_MASK (0xFFFF0000) - -#define WORD3_RECEIVER_SHIFT (16) - -inline -Uint32 -Protocol6::getByteOrder(const Uint32 & word1){ - return word1 & 1; -} - -inline -Uint32 -Protocol6::getCompressed(const Uint32 & word1){ - return (word1 & WORD1_COMPRESSED_MASK) >> WORD1_COMPRESSED_SHIFT; -} - -inline -Uint32 -Protocol6::getSignalIdIncluded(const Uint32 & word1){ - return (word1 & WORD1_SIGNALID_MASK) >> WORD1_SIGNALID_SHIFT; -} - -inline -Uint32 -Protocol6::getCheckSumIncluded(const Uint32 & word1){ - return (word1 & WORD1_CHECKSUM_MASK) >> WORD1_CHECKSUM_SHIFT; -} - -inline -Uint32 -Protocol6::getMessageLength(const Uint32 & word1){ - return (word1 & WORD1_MESSAGELEN_MASK) >> WORD1_MESSAGELEN_SHIFT; -} - -inline -Uint32 -Protocol6::getPrio(const Uint32 & word1){ - return (word1 & WORD1_PRIO_MASK) >> WORD1_PRIO_SHIFT; -} - -inline -void -Protocol6::setByteOrder(Uint32 & word1, Uint32 byteOrder){ - Uint32 tmp = byteOrder; - tmp |= (tmp << 7); - tmp |= (tmp << 24); - word1 |= (tmp & WORD1_BYTEORDER_MASK); -} - -inline -void -Protocol6::setCompressed(Uint32 & word1, Uint32 compressed){ - word1 |= ((compressed << WORD1_COMPRESSED_SHIFT) & WORD1_COMPRESSED_MASK); -} - -inline -void -Protocol6::setSignalIdIncluded(Uint32 & word1, Uint32 signalId){ - word1 |= ((signalId << WORD1_SIGNALID_SHIFT) & WORD1_SIGNALID_MASK); -} - -inline -void -Protocol6::setCheckSumIncluded(Uint32 & word1, Uint32 checkSum){ - word1 |= ((checkSum << WORD1_CHECKSUM_SHIFT) & WORD1_CHECKSUM_MASK); -} - -inline -void -Protocol6::setMessageLength(Uint32 & word1, Uint32 messageLen){ - word1 |= ((messageLen << WORD1_MESSAGELEN_SHIFT) & WORD1_MESSAGELEN_MASK); -} - -inline -void -Protocol6::setPrio(Uint32 & word1, Uint32 prio){ - word1 |= ((prio << WORD1_PRIO_SHIFT) & WORD1_PRIO_MASK); -} - -inline -void -Protocol6::createSignalHeader(SignalHeader * const dst, - const Uint32 & word1, - const Uint32 & word2, - const Uint32 & word3){ - - Uint32 signal_len = (word1 & WORD1_SIGNAL_LEN_MASK)>> WORD1_SIGNAL_LEN_SHIFT; - Uint32 fragInfo1 = (word1 & WORD1_FRAG_INF_MASK) >> (WORD1_FRAG_INF_SHIFT-1); - Uint32 fragInfo2 = (word1 & WORD1_FRAG_INF2_MASK) >> (WORD1_FRAG_INF2_SHIFT); - Uint32 trace = (word2 & WORD2_TRACE_MASK) >> WORD2_TRACE_SHIFT; - Uint32 verid_gsn = (word2 & WORD2_VERID_GSN_MASK); - Uint32 secCount = (word2 & WORD2_SEC_COUNT_MASK) >> WORD2_SEC_COUNT_SHIFT; - - dst->theTrace = trace; - dst->m_noOfSections = secCount; - dst->m_fragmentInfo = fragInfo1 | fragInfo2; - - dst->theLength = signal_len; - dst->theVerId_signalNumber = verid_gsn; - - Uint32 sBlockNum = (word3 & WORD3_SENDER_MASK); - Uint32 rBlockNum = (word3 & WORD3_RECEIVER_MASK) >> WORD3_RECEIVER_SHIFT; - - dst->theSendersBlockRef = sBlockNum; - dst->theReceiversBlockNumber = rBlockNum; -} - -inline -void -Protocol6::createProtocol6Header(Uint32 & word1, - Uint32 & word2, - Uint32 & word3, - const SignalHeader * const src){ - const Uint32 signal_len = src->theLength; - const Uint32 fragInfo = src->m_fragmentInfo; - const Uint32 fragInfo1 = (fragInfo & 2); - const Uint32 fragInfo2 = (fragInfo & 1); - - const Uint32 trace = src->theTrace; - const Uint32 verid_gsn = src->theVerId_signalNumber; - const Uint32 secCount = src->m_noOfSections; - - word1 |= ((signal_len << WORD1_SIGNAL_LEN_SHIFT) & WORD1_SIGNAL_LEN_MASK); - word1 |= ((fragInfo1 << (WORD1_FRAG_INF_SHIFT-1)) & WORD1_FRAG_INF_MASK); - word1 |= ((fragInfo2 << WORD1_FRAG_INF2_SHIFT) & WORD1_FRAG_INF2_MASK); - - word2 |= ((trace << WORD2_TRACE_SHIFT) & WORD2_TRACE_MASK); - word2 |= (verid_gsn & WORD2_VERID_GSN_MASK); - word2 |= ((secCount << WORD2_SEC_COUNT_SHIFT) & WORD2_SEC_COUNT_MASK); - - Uint32 sBlockNum = src->theSendersBlockRef ; - Uint32 rBlockNum = src->theReceiversBlockNumber ; - - word3 |= (sBlockNum & WORD3_SENDER_MASK); - word3 |= ((rBlockNum << WORD3_RECEIVER_SHIFT) & WORD3_RECEIVER_MASK); -} - -// Define of TransporterInternalDefinitions_H -#endif diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp deleted file mode 100644 index e820322f96e..00000000000 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ /dev/null @@ -1,1448 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -#include -#include "TransporterInternalDefinitions.hpp" - -#include "Transporter.hpp" -#include - -#ifdef NDB_TCP_TRANSPORTER -#include "TCP_Transporter.hpp" -#endif - -#ifdef NDB_SCI_TRANSPORTER -#include "SCI_Transporter.hpp" -#endif - -#ifdef NDB_SHM_TRANSPORTER -#include "SHM_Transporter.hpp" -extern int g_ndb_shm_signum; -#endif - -#include "TransporterCallback.hpp" -#include "NdbOut.hpp" -#include -#include -#include -#include - -#include -#include -#include - -#include -extern EventLogger g_eventLogger; - -struct in_addr -TransporterRegistry::get_connect_address(NodeId node_id) const -{ - return theTransporters[node_id]->m_connect_address; -} - -SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd) -{ - DBUG_ENTER("SocketServer::Session * TransporterService::newSession"); - if (m_auth && !m_auth->server_authenticate(sockfd)){ - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(0); - } - - if (!m_transporter_registry->connect_server(sockfd)) - { - NDB_CLOSE_SOCKET(sockfd); - DBUG_RETURN(0); - } - - DBUG_RETURN(0); -} - -TransporterRegistry::TransporterRegistry(void * callback, - unsigned _maxTransporters, - unsigned sizeOfLongSignalMemory) : - m_mgm_handle(0), - m_transp_count(0) -{ - DBUG_ENTER("TransporterRegistry::TransporterRegistry"); - - nodeIdSpecified = false; - maxTransporters = _maxTransporters; - sendCounter = 1; - - callbackObj=callback; - - theTCPTransporters = new TCP_Transporter * [maxTransporters]; - theSCITransporters = new SCI_Transporter * [maxTransporters]; - theSHMTransporters = new SHM_Transporter * [maxTransporters]; - theTransporterTypes = new TransporterType [maxTransporters]; - theTransporters = new Transporter * [maxTransporters]; - performStates = new PerformState [maxTransporters]; - ioStates = new IOState [maxTransporters]; - - // Initialize member variables - nTransporters = 0; - nTCPTransporters = 0; - nSCITransporters = 0; - nSHMTransporters = 0; - - // Initialize the transporter arrays - for (unsigned i=0; igetRemoteNodeId()); - } -} - -void -TransporterRegistry::disconnectAll(){ - for(unsigned i = 0; idoDisconnect(); - } -} - -bool -TransporterRegistry::init(NodeId nodeId) { - DBUG_ENTER("TransporterRegistry::init"); - nodeIdSpecified = true; - localNodeId = nodeId; - - DEBUG("TransporterRegistry started node: " << localNodeId); - - DBUG_RETURN(true); -} - -bool -TransporterRegistry::connect_server(NDB_SOCKET_TYPE sockfd) -{ - DBUG_ENTER("TransporterRegistry::connect_server"); - - // read node id from client - // read transporter type - int nodeId, remote_transporter_type= -1; - SocketInputStream s_input(sockfd); - char buf[256]; - if (s_input.gets(buf, 256) == 0) { - DBUG_PRINT("error", ("Could not get node id from client")); - DBUG_RETURN(false); - } - int r= sscanf(buf, "%d %d", &nodeId, &remote_transporter_type); - switch (r) { - case 2: - break; - case 1: - // we're running version prior to 4.1.9 - // ok, but with no checks on transporter configuration compatability - break; - default: - DBUG_PRINT("error", ("Error in node id from client")); - DBUG_RETURN(false); - } - - DBUG_PRINT("info", ("nodeId=%d remote_transporter_type=%d", - nodeId,remote_transporter_type)); - - //check that nodeid is valid and that there is an allocated transporter - if ( nodeId < 0 || nodeId >= (int)maxTransporters) { - DBUG_PRINT("error", ("Node id out of range from client")); - DBUG_RETURN(false); - } - if (theTransporters[nodeId] == 0) { - DBUG_PRINT("error", ("No transporter for this node id from client")); - DBUG_RETURN(false); - } - - //check that the transporter should be connected - if (performStates[nodeId] != TransporterRegistry::CONNECTING) { - DBUG_PRINT("error", ("Transporter in wrong state for this node id from client")); - DBUG_RETURN(false); - } - - Transporter *t= theTransporters[nodeId]; - - // send info about own id (just as response to acknowledge connection) - // send info on own transporter type - SocketOutputStream s_output(sockfd); - s_output.println("%d %d", t->getLocalNodeId(), t->m_type); - - if (remote_transporter_type != -1) - { - if (remote_transporter_type != t->m_type) - { - DBUG_PRINT("error", ("Transporter types mismatch this=%d remote=%d", - t->m_type, remote_transporter_type)); - g_eventLogger.error("Incompatible configuration: Transporter type " - "mismatch with node %d", nodeId); - - // wait for socket close for 1 second to let message arrive at client - { - fd_set a_set; - FD_ZERO(&a_set); - FD_SET(sockfd, &a_set); - struct timeval timeout; - timeout.tv_sec = 1; timeout.tv_usec = 0; - select(sockfd+1, &a_set, 0, 0, &timeout); - } - DBUG_RETURN(false); - } - } - else if (t->m_type == tt_SHM_TRANSPORTER) - { - g_eventLogger.warning("Unable to verify transporter compatability with node %d", nodeId); - } - - // setup transporter (transporter responsible for closing sockfd) - t->connect_server(sockfd); - - DBUG_RETURN(true); -} - -bool -TransporterRegistry::createTCPTransporter(TransporterConfiguration *config) { -#ifdef NDB_TCP_TRANSPORTER - - if(!nodeIdSpecified){ - init(config->localNodeId); - } - - if(config->localNodeId != localNodeId) - return false; - - if(theTransporters[config->remoteNodeId] != NULL) - return false; - - TCP_Transporter * t = new TCP_Transporter(*this, - config->tcp.sendBufferSize, - config->tcp.maxReceiveSize, - config->localHostName, - config->remoteHostName, - config->s_port, - config->isMgmConnection, - localNodeId, - config->remoteNodeId, - config->serverNodeId, - config->checksum, - config->signalId); - if (t == NULL) - return false; - else if (!t->initTransporter()) { - delete t; - return false; - } - - // Put the transporter in the transporter arrays - theTCPTransporters[nTCPTransporters] = t; - theTransporters[t->getRemoteNodeId()] = t; - theTransporterTypes[t->getRemoteNodeId()] = tt_TCP_TRANSPORTER; - performStates[t->getRemoteNodeId()] = DISCONNECTED; - nTransporters++; - nTCPTransporters++; - - return true; -#else - return false; -#endif -} - -bool -TransporterRegistry::createSCITransporter(TransporterConfiguration *config) { -#ifdef NDB_SCI_TRANSPORTER - - if(!SCI_Transporter::initSCI()) - abort(); - - if(!nodeIdSpecified){ - init(config->localNodeId); - } - - if(config->localNodeId != localNodeId) - return false; - - if(theTransporters[config->remoteNodeId] != NULL) - return false; - - SCI_Transporter * t = new SCI_Transporter(*this, - config->localHostName, - config->remoteHostName, - config->s_port, - config->isMgmConnection, - config->sci.sendLimit, - config->sci.bufferSize, - config->sci.nLocalAdapters, - config->sci.remoteSciNodeId0, - config->sci.remoteSciNodeId1, - localNodeId, - config->remoteNodeId, - config->serverNodeId, - config->checksum, - config->signalId); - - if (t == NULL) - return false; - else if (!t->initTransporter()) { - delete t; - return false; - } - // Put the transporter in the transporter arrays - theSCITransporters[nSCITransporters] = t; - theTransporters[t->getRemoteNodeId()] = t; - theTransporterTypes[t->getRemoteNodeId()] = tt_SCI_TRANSPORTER; - performStates[t->getRemoteNodeId()] = DISCONNECTED; - nTransporters++; - nSCITransporters++; - - return true; -#else - return false; -#endif -} - -bool -TransporterRegistry::createSHMTransporter(TransporterConfiguration *config) { - DBUG_ENTER("TransporterRegistry::createTransporter SHM"); -#ifdef NDB_SHM_TRANSPORTER - if(!nodeIdSpecified){ - init(config->localNodeId); - } - - if(config->localNodeId != localNodeId) - return false; - - if (!g_ndb_shm_signum) { - g_ndb_shm_signum= config->shm.signum; - DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum)); - /** - * Make sure to block g_ndb_shm_signum - * TransporterRegistry::init is run from "main" thread - */ - NdbThread_set_shm_sigmask(TRUE); - } - - if(config->shm.signum != g_ndb_shm_signum) - return false; - - if(theTransporters[config->remoteNodeId] != NULL) - return false; - - SHM_Transporter * t = new SHM_Transporter(*this, - config->localHostName, - config->remoteHostName, - config->s_port, - config->isMgmConnection, - localNodeId, - config->remoteNodeId, - config->serverNodeId, - config->checksum, - config->signalId, - config->shm.shmKey, - config->shm.shmSize - ); - if (t == NULL) - return false; - else if (!t->initTransporter()) { - delete t; - return false; - } - // Put the transporter in the transporter arrays - theSHMTransporters[nSHMTransporters] = t; - theTransporters[t->getRemoteNodeId()] = t; - theTransporterTypes[t->getRemoteNodeId()] = tt_SHM_TRANSPORTER; - performStates[t->getRemoteNodeId()] = DISCONNECTED; - - nTransporters++; - nSHMTransporters++; - - DBUG_RETURN(true); -#else - DBUG_RETURN(false); -#endif -} - - -void -TransporterRegistry::removeTransporter(NodeId nodeId) { - - DEBUG("Removing transporter from " << localNodeId - << " to " << nodeId); - - if(theTransporters[nodeId] == NULL) - return; - - theTransporters[nodeId]->doDisconnect(); - - const TransporterType type = theTransporterTypes[nodeId]; - - int ind = 0; - switch(type){ - case tt_TCP_TRANSPORTER: -#ifdef NDB_TCP_TRANSPORTER - for(; ind < nTCPTransporters; ind++) - if(theTCPTransporters[ind]->getRemoteNodeId() == nodeId) - break; - ind++; - for(; indgetRemoteNodeId() == nodeId) - break; - ind++; - for(; indgetRemoteNodeId() == nodeId) - break; - ind++; - for(; indget_free_buffer(); - } - return 0; -} - - -SendStatus -TransporterRegistry::prepareSend(const SignalHeader * const signalHeader, - Uint8 prio, - const Uint32 * const signalData, - NodeId nodeId, - const LinearSectionPtr ptr[3]){ - - - Transporter *t = theTransporters[nodeId]; - if(t != NULL && - (((ioStates[nodeId] != HaltOutput) && (ioStates[nodeId] != HaltIO)) || - ((signalHeader->theReceiversBlockNumber == 252) || - (signalHeader->theReceiversBlockNumber == 4002)))) { - - if(t->isConnected()){ - Uint32 lenBytes = t->m_packer.getMessageLength(signalHeader, ptr); - if(lenBytes <= MAX_MESSAGE_SIZE){ - Uint32 * insertPtr = t->getWritePtr(lenBytes, prio); - if(insertPtr != 0){ - t->m_packer.pack(insertPtr, prio, signalHeader, signalData, ptr); - t->updateWritePtr(lenBytes, prio); - return SEND_OK; - } - - int sleepTime = 2; - - /** - * @note: on linux/i386 the granularity is 10ms - * so sleepTime = 2 generates a 10 ms sleep. - */ - for(int i = 0; i<50; i++){ - if((nSHMTransporters+nSCITransporters) == 0) - NdbSleep_MilliSleep(sleepTime); - insertPtr = t->getWritePtr(lenBytes, prio); - if(insertPtr != 0){ - t->m_packer.pack(insertPtr, prio, signalHeader, signalData, ptr); - t->updateWritePtr(lenBytes, prio); - break; - } - } - - if(insertPtr != 0){ - /** - * Send buffer full, but resend works - */ - reportError(callbackObj, nodeId, TE_SEND_BUFFER_FULL); - return SEND_OK; - } - - WARNING("Signal to " << nodeId << " lost(buffer)"); - reportError(callbackObj, nodeId, TE_SIGNAL_LOST_SEND_BUFFER_FULL); - return SEND_BUFFER_FULL; - } else { - return SEND_MESSAGE_TOO_BIG; - } - } else { - DEBUG("Signal to " << nodeId << " lost(disconnect) "); - return SEND_DISCONNECTED; - } - } else { - DEBUG("Discarding message to block: " - << signalHeader->theReceiversBlockNumber - << " node: " << nodeId); - - if(t == NULL) - return SEND_UNKNOWN_NODE; - - return SEND_BLOCKED; - } -} - -SendStatus -TransporterRegistry::prepareSend(const SignalHeader * const signalHeader, - Uint8 prio, - const Uint32 * const signalData, - NodeId nodeId, - class SectionSegmentPool & thePool, - const SegmentedSectionPtr ptr[3]){ - - - Transporter *t = theTransporters[nodeId]; - if(t != NULL && - (((ioStates[nodeId] != HaltOutput) && (ioStates[nodeId] != HaltIO)) || - ((signalHeader->theReceiversBlockNumber == 252)|| - (signalHeader->theReceiversBlockNumber == 4002)))) { - - if(t->isConnected()){ - Uint32 lenBytes = t->m_packer.getMessageLength(signalHeader, ptr); - if(lenBytes <= MAX_MESSAGE_SIZE){ - Uint32 * insertPtr = t->getWritePtr(lenBytes, prio); - if(insertPtr != 0){ - t->m_packer.pack(insertPtr, prio, signalHeader, signalData, thePool, ptr); - t->updateWritePtr(lenBytes, prio); - return SEND_OK; - } - - - /** - * @note: on linux/i386 the granularity is 10ms - * so sleepTime = 2 generates a 10 ms sleep. - */ - int sleepTime = 2; - for(int i = 0; i<50; i++){ - if((nSHMTransporters+nSCITransporters) == 0) - NdbSleep_MilliSleep(sleepTime); - insertPtr = t->getWritePtr(lenBytes, prio); - if(insertPtr != 0){ - t->m_packer.pack(insertPtr, prio, signalHeader, signalData, thePool, ptr); - t->updateWritePtr(lenBytes, prio); - break; - } - } - - if(insertPtr != 0){ - /** - * Send buffer full, but resend works - */ - reportError(callbackObj, nodeId, TE_SEND_BUFFER_FULL); - return SEND_OK; - } - - WARNING("Signal to " << nodeId << " lost(buffer)"); - reportError(callbackObj, nodeId, TE_SIGNAL_LOST_SEND_BUFFER_FULL); - return SEND_BUFFER_FULL; - } else { - return SEND_MESSAGE_TOO_BIG; - } - } else { - DEBUG("Signal to " << nodeId << " lost(disconnect) "); - return SEND_DISCONNECTED; - } - } else { - DEBUG("Discarding message to block: " - << signalHeader->theReceiversBlockNumber - << " node: " << nodeId); - - if(t == NULL) - return SEND_UNKNOWN_NODE; - - return SEND_BLOCKED; - } -} - -void -TransporterRegistry::external_IO(Uint32 timeOutMillis) { - //----------------------------------------------------------- - // Most of the time we will send the buffers here and then wait - // for new signals. Thus we start by sending without timeout - // followed by the receive part where we expect to sleep for - // a while. - //----------------------------------------------------------- - if(pollReceive(timeOutMillis)){ - performReceive(); - } - performSend(); -} - -Uint32 -TransporterRegistry::pollReceive(Uint32 timeOutMillis){ - Uint32 retVal = 0; - - if((nSCITransporters) > 0) - { - timeOutMillis=0; - } - -#ifdef NDB_SHM_TRANSPORTER - if(nSHMTransporters > 0) - { - Uint32 res = poll_SHM(0); - if(res) - { - retVal |= res; - timeOutMillis = 0; - } - } -#endif - -#ifdef NDB_TCP_TRANSPORTER - if(nTCPTransporters > 0 || retVal == 0) - { - retVal |= poll_TCP(timeOutMillis); - } - else - tcpReadSelectReply = 0; -#endif -#ifdef NDB_SCI_TRANSPORTER - if(nSCITransporters > 0) - retVal |= poll_SCI(timeOutMillis); -#endif -#ifdef NDB_SHM_TRANSPORTER - if(nSHMTransporters > 0 && retVal == 0) - { - int res = poll_SHM(0); - retVal |= res; - } -#endif - return retVal; -} - - -#ifdef NDB_SCI_TRANSPORTER -Uint32 -TransporterRegistry::poll_SCI(Uint32 timeOutMillis) -{ - for (int i=0; iisConnected()) { - if(t->hasDataToRead()) - return 1; - } - } - return 0; -} -#endif - - -#ifdef NDB_SHM_TRANSPORTER -static int g_shm_counter = 0; -Uint32 -TransporterRegistry::poll_SHM(Uint32 timeOutMillis) -{ - for(int j=0; j < 100; j++) - { - for (int i=0; iisConnected()) { - if(t->hasDataToRead()) { - return 1; - } - } - } - } - return 0; -} -#endif - -#ifdef NDB_TCP_TRANSPORTER -Uint32 -TransporterRegistry::poll_TCP(Uint32 timeOutMillis) -{ - bool hasdata = false; - if (false && nTCPTransporters == 0) - { - tcpReadSelectReply = 0; - return 0; - } - - NDB_SOCKET_TYPE maxSocketValue = -1; - - // Needed for TCP/IP connections - // The read- and writeset are used by select - - FD_ZERO(&tcpReadset); - - // Prepare for sending and receiving - for (int i = 0; i < nTCPTransporters; i++) { - TCP_Transporter * t = theTCPTransporters[i]; - - // If the transporter is connected - NodeId nodeId = t->getRemoteNodeId(); - if (is_connected(nodeId) && t->isConnected()) { - - const NDB_SOCKET_TYPE socket = t->getSocket(); - // Find the highest socket value. It will be used by select - if (socket > maxSocketValue) - maxSocketValue = socket; - - // Put the connected transporters in the socket read-set - FD_SET(socket, &tcpReadset); - } - hasdata |= t->hasReceiveData(); - } - - timeOutMillis = hasdata ? 0 : timeOutMillis; - - struct timeval timeout; - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - - // The highest socket value plus one - maxSocketValue++; - - tcpReadSelectReply = select(maxSocketValue, &tcpReadset, 0, 0, &timeout); - if(false && tcpReadSelectReply == -1 && errno == EINTR) - g_eventLogger.info("woke-up by signal"); - -#ifdef NDB_WIN32 - if(tcpReadSelectReply == SOCKET_ERROR) - { - NdbSleep_MilliSleep(timeOutMillis); - } -#endif - - return tcpReadSelectReply || hasdata; -} -#endif - - -void -TransporterRegistry::performReceive() -{ -#ifdef NDB_TCP_TRANSPORTER - for (int i=0; igetRemoteNodeId(); - const NDB_SOCKET_TYPE socket = t->getSocket(); - if(is_connected(nodeId)){ - if(t->isConnected()) - { - if (FD_ISSET(socket, &tcpReadset)) - { - t->doReceive(); - } - - if (t->hasReceiveData()) - { - Uint32 * ptr; - Uint32 sz = t->getReceiveData(&ptr); - transporter_recv_from(callbackObj, nodeId); - Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]); - t->updateReceiveDataPtr(szUsed); - } - } - } - } -#endif - -#ifdef NDB_SCI_TRANSPORTER - //performReceive - //do prepareReceive on the SCI transporters (prepareReceive(t,,,,)) - for (int i=0; igetRemoteNodeId(); - if(is_connected(nodeId)) - { - if(t->isConnected() && t->checkConnected()) - { - Uint32 * readPtr, * eodPtr; - t->getReceivePtr(&readPtr, &eodPtr); - transporter_recv_from(callbackObj, nodeId); - Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]); - t->updateReceivePtr(newPtr); - } - } - } -#endif -#ifdef NDB_SHM_TRANSPORTER - for (int i=0; igetRemoteNodeId(); - if(is_connected(nodeId)){ - if(t->isConnected() && t->checkConnected()) - { - Uint32 * readPtr, * eodPtr; - t->getReceivePtr(&readPtr, &eodPtr); - transporter_recv_from(callbackObj, nodeId); - Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]); - t->updateReceivePtr(newPtr); - } - } - } -#endif -} - -void -TransporterRegistry::performSend() -{ - int i; - sendCounter = 1; - -#ifdef NDB_TCP_TRANSPORTER - for (i = m_transp_count; i < nTCPTransporters; i++) - { - TCP_Transporter *t = theTCPTransporters[i]; - if (t && t->hasDataToSend() && t->isConnected() && - is_connected(t->getRemoteNodeId())) - { - t->doSend(); - } - } - for (i = 0; i < m_transp_count && i < nTCPTransporters; i++) - { - TCP_Transporter *t = theTCPTransporters[i]; - if (t && t->hasDataToSend() && t->isConnected() && - is_connected(t->getRemoteNodeId())) - { - t->doSend(); - } - } - m_transp_count++; - if (m_transp_count == nTCPTransporters) m_transp_count = 0; -#endif -#ifdef NDB_SCI_TRANSPORTER - //scroll through the SCI transporters, - // get each transporter, check if connected, send data - for (i=0; igetRemoteNodeId(); - - if(is_connected(nodeId)) - { - if(t->isConnected() && t->hasDataToSend()) { - t->doSend(); - } //if - } //if - } -#endif - -#ifdef NDB_SHM_TRANSPORTER - for (i=0; igetRemoteNodeId(); - if(is_connected(nodeId)) - { - if(t->isConnected()) - { - t->doSend(); - } - } - } -#endif -} - -int -TransporterRegistry::forceSendCheck(int sendLimit){ - int tSendCounter = sendCounter; - sendCounter = tSendCounter + 1; - if (tSendCounter >= sendLimit) { - performSend(); - sendCounter = 1; - return 1; - }//if - return 0; -}//TransporterRegistry::forceSendCheck() - -#ifdef DEBUG_TRANSPORTER -void -TransporterRegistry::printState(){ - ndbout << "-- TransporterRegistry -- " << endl << endl - << "Transporters = " << nTransporters << endl; - for(int i = 0; igetRemoteNodeId(); - ndbout << "Transporter: " << remoteNodeId - << " PerformState: " << performStates[remoteNodeId] - << " IOState: " << ioStates[remoteNodeId] << endl; - } -} -#endif - -IOState -TransporterRegistry::ioState(NodeId nodeId) { - return ioStates[nodeId]; -} - -void -TransporterRegistry::setIOState(NodeId nodeId, IOState state) { - DEBUG("TransporterRegistry::setIOState(" - << nodeId << ", " << state << ")"); - ioStates[nodeId] = state; -} - -static void * -run_start_clients_C(void * me) -{ - ((TransporterRegistry*) me)->start_clients_thread(); - return 0; -} - -// Run by kernel thread -void -TransporterRegistry::do_connect(NodeId node_id) -{ - PerformState &curr_state = performStates[node_id]; - switch(curr_state){ - case DISCONNECTED: - break; - case CONNECTED: - return; - case CONNECTING: - return; - case DISCONNECTING: - break; - } - DBUG_ENTER("TransporterRegistry::do_connect"); - DBUG_PRINT("info",("performStates[%d]=CONNECTING",node_id)); - curr_state= CONNECTING; - DBUG_VOID_RETURN; -} -void -TransporterRegistry::do_disconnect(NodeId node_id) -{ - PerformState &curr_state = performStates[node_id]; - switch(curr_state){ - case DISCONNECTED: - return; - case CONNECTED: - break; - case CONNECTING: - break; - case DISCONNECTING: - return; - } - DBUG_ENTER("TransporterRegistry::do_disconnect"); - DBUG_PRINT("info",("performStates[%d]=DISCONNECTING",node_id)); - curr_state= DISCONNECTING; - DBUG_VOID_RETURN; -} - -void -TransporterRegistry::report_connect(NodeId node_id) -{ - DBUG_ENTER("TransporterRegistry::report_connect"); - DBUG_PRINT("info",("performStates[%d]=CONNECTED",node_id)); - performStates[node_id] = CONNECTED; - reportConnect(callbackObj, node_id); - DBUG_VOID_RETURN; -} - -void -TransporterRegistry::report_disconnect(NodeId node_id, int errnum) -{ - DBUG_ENTER("TransporterRegistry::report_disconnect"); - DBUG_PRINT("info",("performStates[%d]=DISCONNECTED",node_id)); - performStates[node_id] = DISCONNECTED; - reportDisconnect(callbackObj, node_id, errnum); - DBUG_VOID_RETURN; -} - -void -TransporterRegistry::update_connections() -{ - for (int i= 0, n= 0; n < nTransporters; i++){ - Transporter * t = theTransporters[i]; - if (!t) - continue; - n++; - - const NodeId nodeId = t->getRemoteNodeId(); - switch(performStates[nodeId]){ - case CONNECTED: - case DISCONNECTED: - break; - case CONNECTING: - if(t->isConnected()) - report_connect(nodeId); - break; - case DISCONNECTING: - if(!t->isConnected()) - report_disconnect(nodeId, 0); - break; - } - } -} - -// run as own thread -void -TransporterRegistry::start_clients_thread() -{ - int persist_mgm_count= 0; - DBUG_ENTER("TransporterRegistry::start_clients_thread"); - while (m_run_start_clients_thread) { - NdbSleep_MilliSleep(100); - persist_mgm_count++; - if(persist_mgm_count==50) - { - ndb_mgm_check_connection(m_mgm_handle); - persist_mgm_count= 0; - } - for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){ - Transporter * t = theTransporters[i]; - if (!t) - continue; - n++; - - const NodeId nodeId = t->getRemoteNodeId(); - switch(performStates[nodeId]){ - case CONNECTING: - if(!t->isConnected() && !t->isServer) { - bool connected= false; - /** - * First, we try to connect (if we have a port number). - */ - if (t->get_s_port()) - connected= t->connect_client(); - - /** - * If dynamic, get the port for connecting from the management server - */ - if( !connected && t->get_s_port() <= 0) { // Port is dynamic - int server_port= 0; - struct ndb_mgm_reply mgm_reply; - - if(!ndb_mgm_is_connected(m_mgm_handle)) - ndb_mgm_connect(m_mgm_handle, 0, 0, 0); - - if(ndb_mgm_is_connected(m_mgm_handle)) - { - int res= - ndb_mgm_get_connection_int_parameter(m_mgm_handle, - t->getRemoteNodeId(), - t->getLocalNodeId(), - CFG_CONNECTION_SERVER_PORT, - &server_port, - &mgm_reply); - DBUG_PRINT("info",("Got dynamic port %d for %d -> %d (ret: %d)", - server_port,t->getRemoteNodeId(), - t->getLocalNodeId(),res)); - if( res >= 0 ) - { - /** - * Server_port == 0 just means that that a mgmt server - * has not received a new port yet. Keep the old. - */ - if (server_port) - t->set_s_port(server_port); - } - else if(ndb_mgm_is_connected(m_mgm_handle)) - { - g_eventLogger.info("Failed to get dynamic port to connect to: %d", res); - ndb_mgm_disconnect(m_mgm_handle); - } - else - { - g_eventLogger.info("Management server closed connection early. " - "It is probably being shut down (or has problems). " - "We will retry the connection. %d %s %s line: %d", - ndb_mgm_get_latest_error(m_mgm_handle), - ndb_mgm_get_latest_error_desc(m_mgm_handle), - ndb_mgm_get_latest_error_msg(m_mgm_handle), - ndb_mgm_get_latest_error_line(m_mgm_handle) - ); - } - } - /** else - * We will not be able to get a new port unless - * the m_mgm_handle is connected. Note that not - * being connected is an ok state, just continue - * until it is able to connect. Continue using the - * old port until we can connect again and get a - * new port. - */ - } - } - break; - case DISCONNECTING: - if(t->isConnected()) - t->doDisconnect(); - break; - default: - break; - } - } - } - DBUG_VOID_RETURN; -} - -bool -TransporterRegistry::start_clients() -{ - m_run_start_clients_thread= true; - m_start_clients_thread= NdbThread_Create(run_start_clients_C, - (void**)this, - 32768, - "ndb_start_clients", - NDB_THREAD_PRIO_LOW); - if (m_start_clients_thread == 0) { - m_run_start_clients_thread= false; - return false; - } - return true; -} - -bool -TransporterRegistry::stop_clients() -{ - if (m_start_clients_thread) { - m_run_start_clients_thread= false; - void* status; - NdbThread_WaitFor(m_start_clients_thread, &status); - NdbThread_Destroy(&m_start_clients_thread); - } - return true; -} - -void -TransporterRegistry::add_transporter_interface(NodeId remoteNodeId, - const char *interf, - int s_port) -{ - DBUG_ENTER("TransporterRegistry::add_transporter_interface"); - DBUG_PRINT("enter",("interface=%s, s_port= %d", interf, s_port)); - if (interf && strlen(interf) == 0) - interf= 0; - - for (unsigned i= 0; i < m_transporter_interface.size(); i++) - { - Transporter_interface &tmp= m_transporter_interface[i]; - if (s_port != tmp.m_s_service_port || tmp.m_s_service_port==0) - continue; - if (interf != 0 && tmp.m_interface != 0 && - strcmp(interf, tmp.m_interface) == 0) - { - DBUG_VOID_RETURN; // found match, no need to insert - } - if (interf == 0 && tmp.m_interface == 0) - { - DBUG_VOID_RETURN; // found match, no need to insert - } - } - Transporter_interface t; - t.m_remote_nodeId= remoteNodeId; - t.m_s_service_port= s_port; - t.m_interface= interf; - m_transporter_interface.push_back(t); - DBUG_PRINT("exit",("interface and port added")); - DBUG_VOID_RETURN; -} - -bool -TransporterRegistry::start_service(SocketServer& socket_server) -{ - DBUG_ENTER("TransporterRegistry::start_service"); - if (m_transporter_interface.size() > 0 && !nodeIdSpecified) - { - g_eventLogger.error("TransporterRegistry::startReceiving: localNodeId not specified"); - DBUG_RETURN(false); - } - - for (unsigned i= 0; i < m_transporter_interface.size(); i++) - { - Transporter_interface &t= m_transporter_interface[i]; - - unsigned short port= (unsigned short)t.m_s_service_port; - if(t.m_s_service_port<0) - port= -t.m_s_service_port; // is a dynamic port - TransporterService *transporter_service = - new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd")); - if(!socket_server.setup(transporter_service, - &port, t.m_interface)) - { - DBUG_PRINT("info", ("Trying new port")); - port= 0; - if(t.m_s_service_port>0 - || !socket_server.setup(transporter_service, - &port, t.m_interface)) - { - /* - * If it wasn't a dynamically allocated port, or - * our attempts at getting a new dynamic port failed - */ - g_eventLogger.error("Unable to setup transporter service port: %s:%d!\n" - "Please check if the port is already used,\n" - "(perhaps the node is already running)", - t.m_interface ? t.m_interface : "*", t.m_s_service_port); - delete transporter_service; - DBUG_RETURN(false); - } - } - t.m_s_service_port= (t.m_s_service_port<=0)?-port:port; // -`ve if dynamic - DBUG_PRINT("info", ("t.m_s_service_port = %d",t.m_s_service_port)); - transporter_service->setTransporterRegistry(this); - } - DBUG_RETURN(true); -} - -#ifdef NDB_SHM_TRANSPORTER -static -RETSIGTYPE -shm_sig_handler(int signo) -{ - g_shm_counter++; -} -#endif - -void -TransporterRegistry::startReceiving() -{ - DBUG_ENTER("TransporterRegistry::startReceiving"); - -#ifdef NDB_SHM_TRANSPORTER - m_shm_own_pid = getpid(); - if (g_ndb_shm_signum) - { - DBUG_PRINT("info",("Install signal handler for signum %d", - g_ndb_shm_signum)); - struct sigaction sa; - NdbThread_set_shm_sigmask(FALSE); - sigemptyset(&sa.sa_mask); - sa.sa_handler = shm_sig_handler; - sa.sa_flags = 0; - int ret; - while((ret = sigaction(g_ndb_shm_signum, &sa, 0)) == -1 && errno == EINTR); - if(ret != 0) - { - DBUG_PRINT("error",("Install failed")); - g_eventLogger.error("Failed to install signal handler for" - " SHM transporter, signum %d, errno: %d (%s)", - g_ndb_shm_signum, errno, strerror(errno)); - } - } -#endif // NDB_SHM_TRANSPORTER - DBUG_VOID_RETURN; -} - -void -TransporterRegistry::stopReceiving(){ - /** - * Disconnect all transporters, this includes detach from remote node - * and since that must be done from the same process that called attach - * it's done here in the receive thread - */ - disconnectAll(); -} - -void -TransporterRegistry::startSending(){ -} - -void -TransporterRegistry::stopSending(){ -} - -NdbOut & operator <<(NdbOut & out, SignalHeader & sh){ - out << "-- Signal Header --" << endl; - out << "theLength: " << sh.theLength << endl; - out << "gsn: " << sh.theVerId_signalNumber << endl; - out << "recBlockNo: " << sh.theReceiversBlockNumber << endl; - out << "sendBlockRef: " << sh.theSendersBlockRef << endl; - out << "sendersSig: " << sh.theSendersSignalId << endl; - out << "theSignalId: " << sh.theSignalId << endl; - out << "trace: " << (int)sh.theTrace << endl; - return out; -} - -Transporter* -TransporterRegistry::get_transporter(NodeId nodeId) { - return theTransporters[nodeId]; -} - -bool TransporterRegistry::connect_client(NdbMgmHandle *h) -{ - DBUG_ENTER("TransporterRegistry::connect_client(NdbMgmHandle)"); - - Uint32 mgm_nodeid= ndb_mgm_get_mgmd_nodeid(*h); - - if(!mgm_nodeid) - { - g_eventLogger.error("%s: %d", __FILE__, __LINE__); - return false; - } - Transporter * t = theTransporters[mgm_nodeid]; - if (!t) - { - g_eventLogger.error("%s: %d", __FILE__, __LINE__); - return false; - } - DBUG_RETURN(t->connect_client(connect_ndb_mgmd(h))); -} - -/** - * Given a connected NdbMgmHandle, turns it into a transporter - * and returns the socket. - */ -NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h) -{ - struct ndb_mgm_reply mgm_reply; - - if ( h==NULL || *h == NULL ) - { - g_eventLogger.error("%s: %d", __FILE__, __LINE__); - return NDB_INVALID_SOCKET; - } - - for(unsigned int i=0;i < m_transporter_interface.size();i++) - if (m_transporter_interface[i].m_s_service_port < 0 - && ndb_mgm_set_connection_int_parameter(*h, - get_localNodeId(), - m_transporter_interface[i].m_remote_nodeId, - CFG_CONNECTION_SERVER_PORT, - m_transporter_interface[i].m_s_service_port, - &mgm_reply) < 0) - { - g_eventLogger.error("Error: %s: %d", - ndb_mgm_get_latest_error_desc(*h), - ndb_mgm_get_latest_error(*h)); - g_eventLogger.error("%s: %d", __FILE__, __LINE__); - ndb_mgm_destroy_handle(h); - return NDB_INVALID_SOCKET; - } - - /** - * convert_to_transporter also disposes of the handle (i.e. we don't leak - * memory here. - */ - NDB_SOCKET_TYPE sockfd= ndb_mgm_convert_to_transporter(h); - if ( sockfd == NDB_INVALID_SOCKET) - { - g_eventLogger.error("Error: %s: %d", - ndb_mgm_get_latest_error_desc(*h), - ndb_mgm_get_latest_error(*h)); - g_eventLogger.error("%s: %d", __FILE__, __LINE__); - ndb_mgm_destroy_handle(h); - } - return sockfd; -} - -/** - * Given a SocketClient, creates a NdbMgmHandle, turns it into a transporter - * and returns the socket. - */ -NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(SocketClient *sc) -{ - NdbMgmHandle h= ndb_mgm_create_handle(); - - if ( h == NULL ) - { - return NDB_INVALID_SOCKET; - } - - /** - * Set connectstring - */ - { - BaseString cs; - cs.assfmt("%s:%u",sc->get_server_name(),sc->get_port()); - ndb_mgm_set_connectstring(h, cs.c_str()); - } - - if(ndb_mgm_connect(h, 0, 0, 0)<0) - { - ndb_mgm_destroy_handle(&h); - return NDB_INVALID_SOCKET; - } - - return connect_ndb_mgmd(&h); -} - -template class Vector; diff --git a/storage/ndb/src/common/transporter/basictest/Makefile b/storage/ndb/src/common/transporter/basictest/Makefile deleted file mode 100644 index d86af360408..00000000000 --- a/storage/ndb/src/common/transporter/basictest/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -BIN_TARGET := basicTransporterTest -BIN_TARGET_ARCHIVES := transporter portlib general - -SOURCES = basicTransporterTest.cpp - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp b/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp deleted file mode 100644 index b4b3e638935..00000000000 --- a/storage/ndb/src/common/transporter/basictest/basicTransporterTest.cpp +++ /dev/null @@ -1,512 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "TransporterRegistry.hpp" -#include "TransporterDefinitions.hpp" -#include "TransporterCallback.hpp" -#include - -#include -#include -#include -#include - -int basePortTCP = 17000; - -SCI_TransporterConfiguration sciTemplate = { - 8000, - // Packet size - 2500000, // Buffer size - 2, // number of adapters - 1, // remote node id SCI - 2, // Remote node Id SCI - 0, // local ndb node id (server) - 0, // remote ndb node id (client) - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - -TCP_TransporterConfiguration tcpTemplate = { - 17000, // port; - "", // remoteHostName; - "", // localhostname - 2, // remoteNodeId; - 1, // localNodeId; - 10000, // sendBufferSize - Size of SendBuffer of priority B - 10000, // maxReceiveSize - Maximum no of bytes to receive - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - -SHM_TransporterConfiguration shmTemplate = { - 0, //remoteNodeId - 0, //localNodeId; - false, //compression - true, //checksum; - true, //signalId; - 0, //byteOrder; - 123, //shmKey; - 2500000 //shmSize; -}; - -TransporterRegistry *tReg = 0; - -#include - -extern "C" -void -signalHandler(int signo){ - ::signal(13, signalHandler); - char buf[255]; - sprintf(buf,"Signal: %d\n", signo); - ndbout << buf << endl; -} - -void -usage(const char * progName){ - ndbout << "Usage: " << progName << " localNodeId localHostName" - << " remoteHostName1 remoteHostName2" << endl; - ndbout << " type = shm tcp ose sci" << endl; - ndbout << " localNodeId - 1 to 3" << endl; -} - -typedef void (* CreateTransporterFunc)(void * conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName); - -void createSCITransporter(void *, NodeId, NodeId, const char *, const char *); -void createTCPTransporter(void *, NodeId, NodeId, const char *, const char *); -void createSHMTransporter(void *, NodeId, NodeId, const char *, const char *); - -int signalReceived[4]; - -int -main(int argc, const char **argv){ - - signalHandler(0); - - for(int i = 0; i<4; i++) - signalReceived[i] = 0; - - if(argc < 5){ - usage(argv[0]); - return 0; - } - - Uint32 noOfConnections = 0; - const char * progName = argv[0]; - const char * type = argv[1]; - const NodeId localNodeId = atoi(argv[2]); - const char * localHostName = argv[3]; - const char * remoteHost1 = argv[4]; - const char * remoteHost2 = NULL; - - if(argc == 5) - noOfConnections = 1; - else { - noOfConnections = 2; - remoteHost2 = argv[5]; - } - - if(localNodeId < 1 || localNodeId > 3){ - ndbout << "localNodeId = " << localNodeId << endl << endl; - usage(progName); - return 0; - } - - ndbout << "-----------------" << endl; - ndbout << "localNodeId: " << localNodeId << endl; - ndbout << "localHostName: " << localHostName << endl; - ndbout << "remoteHost1 (node " << (localNodeId == 1?2:1) << "): " - << remoteHost1 << endl; - if(noOfConnections == 2){ - ndbout << "remoteHost2 (node " << (localNodeId == 3?2:3) << "): " - << remoteHost2 << endl; - } - ndbout << "-----------------" << endl; - - void * confTemplate = 0; - CreateTransporterFunc func = 0; - - if(strcasecmp(type, "tcp") == 0){ - func = createTCPTransporter; - confTemplate = &tcpTemplate; - } else if(strcasecmp(type, "sci") == 0){ - func = createSCITransporter; - confTemplate = &sciTemplate; - } else if(strcasecmp(type, "shm") == 0){ - func = createSHMTransporter; - confTemplate = &shmTemplate; - } else { - ndbout << "Unsupported transporter type" << endl; - return 0; - } - - ndbout << "Creating transporter registry" << endl; - tReg = new TransporterRegistry; - tReg->init(localNodeId); - - switch(localNodeId){ - case 1: - (* func)(confTemplate, 1, 2, localHostName, remoteHost1); - if(noOfConnections == 2) - (* func)(confTemplate, 1, 3, localHostName, remoteHost2); - break; - case 2: - (* func)(confTemplate, 2, 1, localHostName, remoteHost1); - if(noOfConnections == 2) - (* func)(confTemplate, 2, 3, localHostName, remoteHost2); - break; - case 3: - (* func)(confTemplate, 3, 1, localHostName, remoteHost1); - if(noOfConnections == 2) - (* func)(confTemplate, 3, 2, localHostName, remoteHost2); - break; - } - - ndbout << "Doing startSending/startReceiving" << endl; - tReg->startSending(); - tReg->startReceiving(); - - ndbout << "Connecting" << endl; - tReg->setPerformState(PerformConnect); - tReg->checkConnections(); - - unsigned sum = 0; - do { - sum = 0; - for(int i = 0; i<4; i++) - sum += signalReceived[i]; - - tReg->checkConnections(); - - tReg->external_IO(500); - NdbSleep_MilliSleep(500); - - ndbout << "In main loop" << endl; - } while(sum != 2*noOfConnections); - - ndbout << "Doing setPerformState(Disconnect)" << endl; - tReg->setPerformState(PerformDisconnect); - - ndbout << "Doing checkConnections()" << endl; - tReg->checkConnections(); - - ndbout << "Sleeping 3 secs" << endl; - NdbSleep_SecSleep(3); - - ndbout << "Deleting transporter registry" << endl; - delete tReg; tReg = 0; - - return 0; -} - -void -checkData(SignalHeader * const header, Uint8 prio, Uint32 * const theData, - LinearSectionPtr ptr[3]){ - Uint32 expectedLength = 0; - if(prio == 0) - expectedLength = 17; - else - expectedLength = 19; - - if(header->theLength != expectedLength){ - ndbout << "Unexpected signal length: " << header->theLength - << " expected: " << expectedLength << endl; - abort(); - } - - if(header->theVerId_signalNumber != expectedLength + 1) - abort(); - - if(header->theReceiversBlockNumber != expectedLength + 2) - abort(); - - if(refToBlock(header->theSendersBlockRef) != expectedLength + 3) - abort(); - - if(header->theSendersSignalId != expectedLength + 5) - abort(); - - if(header->theTrace != expectedLength + 6) - abort(); - - if(header->m_noOfSections != (prio == 0 ? 0 : 1)) - abort(); - - if(header->m_fragmentInfo != (prio + 1)) - abort(); - - Uint32 dataWordStart = header->theLength ; - for(unsigned i = 0; itheLength; i++){ - if(theData[i] != i){ //dataWordStart){ - ndbout << "data corrupt!\n" << endl; - abort(); - } - dataWordStart ^= (~i*i); - } - - if(prio != 0){ - ndbout_c("Found section"); - if(ptr[0].sz != header->theLength) - abort(); - - if(memcmp(ptr[0].p, theData, (ptr[0].sz * 4)) != 0) - abort(); - } -} - -void -sendSignalTo(NodeId nodeId, int prio){ - SignalHeader sh; - sh.theLength = (prio == 0 ? 17 : 19); - sh.theVerId_signalNumber = sh.theLength + 1; - sh.theReceiversBlockNumber = sh.theLength + 2; - sh.theSendersBlockRef = sh.theLength + 3; - sh.theSendersSignalId = sh.theLength + 4; - sh.theSignalId = sh.theLength + 5; - sh.theTrace = sh.theLength + 6; - sh.m_noOfSections = (prio == 0 ? 0 : 1); - sh.m_fragmentInfo = prio + 1; - - Uint32 theData[25]; - - Uint32 dataWordStart = sh.theLength; - for(unsigned i = 0; itheSendersBlockRef); - - ndbout << "Recieved prio " << (int)prio << " signal from node: " - << nodeId - << " gsn = " << header->theVerId_signalNumber << endl; - checkData(header, prio, theData, ptr); - ndbout << " Data is ok!\n" << endl; - - signalReceived[nodeId]++; - - if(prio == 0) - sendSignalTo(nodeId, 1); - else - tReg->setPerformState(nodeId, PerformDisconnect); -} - -void -copy(Uint32 * & insertPtr, - class SectionSegmentPool & thePool, const SegmentedSectionPtr & _ptr){ - abort(); -} - -void -reportError(void* callbackObj, NodeId nodeId, TransporterError errorCode){ - char buf[255]; - sprintf(buf, "reportError (%d, %x)", nodeId, errorCode); - ndbout << buf << endl; - if(errorCode & 0x8000){ - tReg->setPerformState(nodeId, PerformDisconnect); - abort(); - } -} - -/** - * Report average send theLength in bytes (4096 last sends) - */ -void -reportSendLen(void* callbackObj, NodeId nodeId, Uint32 count, Uint64 bytes){ - char buf[255]; - sprintf(buf, "reportSendLen(%d, %d)", nodeId, (Uint32)(bytes/count)); - ndbout << buf << endl; -} - -/** - * Report average receive theLength in bytes (4096 last receives) - */ -void -reportReceiveLen(void* callbackObj, NodeId nodeId, Uint32 count, Uint64 bytes){ - char buf[255]; - sprintf(buf, "reportReceiveLen(%d, %d)", nodeId, (Uint32)(bytes/count)); - ndbout << buf << endl; -} - -/** - * Report connection established - */ -void -reportConnect(void* callbackObj, NodeId nodeId){ - char buf[255]; - sprintf(buf, "reportConnect(%d)", nodeId); - ndbout << buf << endl; - tReg->setPerformState(nodeId, PerformIO); - - sendSignalTo(nodeId, 0); -} - -/** - * Report connection broken - */ -void -reportDisconnect(void* callbackObj, NodeId nodeId, Uint32 errNo){ - char buf[255]; - sprintf(buf, "reportDisconnect(%d)", nodeId); - ndbout << buf << endl; - if(signalReceived[nodeId] < 2) - tReg->setPerformState(nodeId, PerformConnect); -} - -int -checkJobBuffer() { - /** - * Check to see if jobbbuffers are starting to get full - * and if so call doJob - */ - return 0; -} - -void -createOSETransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName){ - ndbout << "Creating OSE transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - OSE_TransporterConfiguration * conf = (OSE_TransporterConfiguration*)_conf; - - conf->localNodeId = localNodeId; - conf->localHostName = localHostName; - conf->remoteNodeId = remoteNodeId; - conf->remoteHostName = remoteHostName; - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - -void -createTCPTransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName){ - ndbout << "Creating TCP transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - TCP_TransporterConfiguration * conf = (TCP_TransporterConfiguration*)_conf; - - int port; - if(localNodeId == 1 && remoteNodeId == 2) port = basePortTCP + 0; - if(localNodeId == 1 && remoteNodeId == 3) port = basePortTCP + 1; - if(localNodeId == 2 && remoteNodeId == 1) port = basePortTCP + 0; - if(localNodeId == 2 && remoteNodeId == 3) port = basePortTCP + 2; - if(localNodeId == 3 && remoteNodeId == 1) port = basePortTCP + 1; - if(localNodeId == 3 && remoteNodeId == 2) port = basePortTCP + 2; - - conf->localNodeId = localNodeId; - conf->localHostName = localHostName; - conf->remoteNodeId = remoteNodeId; - conf->remoteHostName = remoteHostName; - conf->port = port; - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - -void -createSCITransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName){ - - - ndbout << "Creating SCI transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - - SCI_TransporterConfiguration * conf = (SCI_TransporterConfiguration*)_conf; - - conf->remoteSciNodeId0= (Uint16)atoi(localHostName); - conf->remoteSciNodeId1= (Uint16)atoi(remoteHostName); - - - conf->localNodeId = localNodeId; - conf->remoteNodeId = remoteNodeId; - - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - -void -createSHMTransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName){ - - - ndbout << "Creating SHM transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - - SHM_TransporterConfiguration * conf = (SHM_TransporterConfiguration*)_conf; - - conf->localNodeId = localNodeId; - conf->remoteNodeId = remoteNodeId; - - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} diff --git a/storage/ndb/src/common/transporter/buddy.cpp b/storage/ndb/src/common/transporter/buddy.cpp deleted file mode 100644 index 342ef88a6d2..00000000000 --- a/storage/ndb/src/common/transporter/buddy.cpp +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "buddy.hpp" - -void Chunk256::setFree(bool free){ - // Bit 0 of allocationTimeStamp represents if the segment is free or not - Uint32 offMask = 0x0; // A mask to set the 0 bit to 0 - allocationTimeStamp = 0x0; - if(free) - // Set this bit to 0, if segment should be free - allocationTimeStamp = allocationTimeStamp & offMask; -} - -bool Chunk256::getFree(){ - Uint32 offMask = 0x0; - return ((allocationTimeStamp | offMask) == offMask ? true : false); -} - -void Chunk256::setAllocationTimeStamp(Uint32 cTime){ - // Bits 1-31 of allocationTimeStamp represent the allocation time for segment - - // printf("\nSet allocation time. Current time %d", cTime); - Uint32 onMask = 0x80000000; // A mask to set the 0 bit to 1 - allocationTimeStamp = 0x0; - allocationTimeStamp = onMask | cTime; -} - -Uint32 Chunk256::getAllocationTimeStamp(){ - Uint32 onMask = 0x80000000; - allocationTimeStamp = allocationTimeStamp ^ onMask; - printf("\nGet allocation time. Time is %d", allocationTimeStamp); - return allocationTimeStamp; -}; - -bool BuddyMemory::allocate(int nChunksToAllocate) { - - // Allocate the memory block needed. This memory is deallocated in the - // destructor of TransporterRegistry. - - printf("\nAllocating %d chunks...", nChunksToAllocate); - - startOfMemoryBlock = (Uint32*) malloc(256 * nChunksToAllocate); - - if (startOfMemoryBlock == NULL) - return false; - - // Allocate the array of 256-byte chunks - chunk = new Chunk256[nChunksToAllocate]; - - // Initialize the chunk-array. Every 8 kB segment consists of 32 chunks. - // Set all chunks to free and set the prev and next pointer - for (int i=0; i < nChunksToAllocate; i++) { - chunk[i].setFree(true); - if (i%32 == 0) { - // The first chunk in every segment will point to the prev and next segment - chunk[i].prevSegmentOfSameSize = i-32; - chunk[i].nextSegmentOfSameSize = i + 32; - chunk[0].prevSegmentOfSameSize = END_OF_CHUNK_LIST; - chunk[totalNoOfChunks-32].nextSegmentOfSameSize = END_OF_CHUNK_LIST; - } else { - // The rest of the chunks in the segments have undefined prev and next pointers - chunk[i].prevSegmentOfSameSize = UNDEFINED_CHUNK; - chunk[i].nextSegmentOfSameSize = UNDEFINED_CHUNK; - } - } - - // Initialize the freeSegment-pointers - for (int i=0; i 256) - segmSize ++; - printf("\nSegment size: %f", pow(2,int(8+segmSize))); - - while ((segmSize <= sz_GET_MAX) && (freeSegment[segmSize] == UNDEFINED_CHUNK)) - segmSize++; - - segm = freeSegment[segmSize]; - if (segm != UNDEFINED_CHUNK){ - // Free segment of asked size or larger is found - - // Remove the found segment from the freeSegment-list - removeFromFreeSegmentList(segmSize, segm); - - // Set all chunks to allocated (not free) and set the allocation time - // for the segment we are about to allocate - for (int i = segm; i <= segm+nChunksToAllocate; i++) { - chunk[i].setFree(false); - chunk[i].setAllocationTimeStamp(currentTime); - } - - // Before returning the segment, check if it is larger than the segment asked for - if (nChunksAskedFor < nChunksToAllocate) - release(nChunksAskedFor, nChunksToAllocate - nChunksAskedFor - 1); - - Segment segment; - segment.segmentAddress = startOfMemoryBlock+(segm * 256); - segment.segmentSize = 256 * nChunksAskedFor; - segment.releaseId = segm; - - printf("\nSegment: segment address = %d, segment size = %d, release Id = %d", - segment.segmentAddress, segment.segmentSize, segment.releaseId); - - return true; - } - printf("\nNo segments of asked size or larger are found"); - return false; -} - -void BuddyMemory::removeFromFreeSegmentList(int sz, int index) { - // Remove the segment from the freeSegment list - - printf("\nRemoving segment from list..."); - if (index != UNDEFINED_CHUNK) { - Chunk256 prevChunk; - Chunk256 nextChunk; - int prevChunkIndex = chunk[index].prevSegmentOfSameSize; - int nextChunkIndex = chunk[index].nextSegmentOfSameSize; - - if (prevChunkIndex == END_OF_CHUNK_LIST) { - if (nextChunkIndex == END_OF_CHUNK_LIST) - // We are about to remove the only element in the list - freeSegment[sz] = UNDEFINED_CHUNK; - else { - // We are about to remove the first element in the list - nextChunk = chunk[nextChunkIndex]; - nextChunk.prevSegmentOfSameSize = END_OF_CHUNK_LIST; - freeSegment[sz] = nextChunkIndex; - } - } else { - if (nextChunkIndex == END_OF_CHUNK_LIST) { - // We are about to remove the last element in the list - prevChunk = chunk[prevChunkIndex]; - prevChunk.nextSegmentOfSameSize = END_OF_CHUNK_LIST; - } else { - // We are about to remove an element in the middle of the list - prevChunk = chunk[prevChunkIndex]; - nextChunk = chunk[nextChunkIndex]; - prevChunk.nextSegmentOfSameSize = nextChunkIndex; - nextChunk.prevSegmentOfSameSize = prevChunkIndex; - } - } - } - for (int i=0; i= 0; i--) { - if (!chunk[i].getFree()) - break; - else { - startChunk = i; - nChunksToRelease++; - // Look at the next-pointer. If it is valid, we have a - // chunk that is the start of a free segment. Remove it - // from the freeSegment-list. - if (chunk[i].nextSegmentOfSameSize != UNDEFINED_CHUNK) - removeFromFreeSegmentList(size, i); - } - } - - // Look at the chunks after the segment we are about to release - for (int i = endChunk+1; i <= totalNoOfChunks; i++) { - if (!chunk[i].getFree()) - break; - else { - endChunk = i; - nChunksToRelease++; - // Look at the next-pointer. If it is valid, we have a - // chunk that is the start of a free segment. Remove it - // from the free segment list - if (chunk[i].nextSegmentOfSameSize != UNDEFINED_CHUNK) - removeFromFreeSegmentList(size, i); - } - } - - // We have the start and end indexes and total no of free chunks. - // Separate the chunks into segments that can be added to the - // freeSegments-list. - int restChunk = 0; - int segmSize; - - printf("\n%d chunks to release (finally)", nChunksToRelease); - - segmSize = logTwoPlus(nChunksToRelease) - 1; - if (segmSize > sz_MAX) { - segmSize = sz_MAX; - } - - nChunksToRelease = pow(2,segmSize); - addToFreeSegmentList(nChunksToRelease*256, startChunk); -} - -void BuddyMemory::addToFreeSegmentList(int sz, int index) { - // Add a segment to the freeSegment list - - printf("\nAsked to add segment of size %d", sz); - - // Get an index in freeSegment list corresponding to sz size - int segmSize = logTwoPlus(sz) - 1; - if (sz - pow(2,segmSize) >= 256) - segmSize ++; - sz = segmSize - 8; - - int nextSegm = freeSegment[sz]; - - printf("\nAdding a segment of size %f", pow(2,(8 + sz))); - - freeSegment[sz] = index; - if (nextSegm == UNDEFINED_CHUNK) { - // We are about to add a segment to an empty list - chunk[index].prevSegmentOfSameSize = END_OF_CHUNK_LIST; - chunk[index].nextSegmentOfSameSize = END_OF_CHUNK_LIST; - } - else { - // Add the segment first in the list - chunk[index].prevSegmentOfSameSize = END_OF_CHUNK_LIST; - chunk[index].nextSegmentOfSameSize = nextSegm; - chunk[nextSegm].prevSegmentOfSameSize = index; - } - - for (int i=0; i> 8); - arg = arg | (arg >> 4); - arg = arg | (arg >> 2); - arg = arg | (arg >> 1); - resValue = (arg & 0x5555) + ((arg >> 1) & 0x5555); - resValue = (resValue & 0x3333) + ((resValue >> 2) & 0x3333); - resValue = resValue + (resValue >> 4); - resValue = (resValue & 0xf) + ((resValue >> 8) & 0xf); - - return resValue; -} - -bool BuddyMemory::memoryAvailable() { - // Return true if there is at least 8 kB memory available - for (int i = sz_8192; i < sz_MAX; i++) - if (freeSegment[i] != UNDEFINED_CHUNK) - return true; - return false; -} - - -void BuddyMemory::refreshTime(Uint32 time) { - if (time - currentTime > 1000) { - // Update current time - currentTime = time; - // Go through the chunk-list every second and release - // any chunks that have been allocated for too long - for (int i=0; i ALLOCATION_TIMEOUT)) { - release(i, 256); - printf("\nChunks hve been allocated for too long"); - } - } - } -} diff --git a/storage/ndb/src/common/transporter/buddy.hpp b/storage/ndb/src/common/transporter/buddy.hpp deleted file mode 100644 index 2494a874f92..00000000000 --- a/storage/ndb/src/common/transporter/buddy.hpp +++ /dev/null @@ -1,172 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BUDDY_H -#define BUDDY_H - -#include - -typedef unsigned int Uint32; -typedef unsigned short Uint16; -typedef unsigned long long Uint64; - -// -const int UNDEFINED_CHUNK = -2; // XXX Set to hex - -// -const int END_OF_CHUNK_LIST = -1; // XXX Set to hex - -// A timeout (no of seconds) for the memory segments in the TransporterRegistry -// memory pool. If a segment has been occupied (free=false) for a longer period -// than this timeout, it will be released. -const int ALLOCATION_TIMEOUT = 10000; - -// Free segments should always be as large as possible -// and are only allowed to be in any of these sizes -enum FreeSegmentSize { - sz_256 = 0, - sz_512 = 1, - sz_1024 = 2, - sz_2048 = 3, - sz_4096 = 4, - sz_8192 = 5, - sz_16384 = 6, - sz_32768 = 7, - sz_65536 = 8, - sz_131072 = 9, - sz_GET_MAX = 5, - sz_MAX = 9 -}; - -struct Segment; - -class BuddyMemory { -public: - - // Return true if there is at least 8 kB memory available - bool memoryAvailable(); - - // - bool allocate(int nChunksToAllocate); - - // Remove the segment from the freeSegment list - void removeFromFreeSegmentList(int sz, int index); - - // Release the segment of size - void release(int releaseId, int size); - - // Add a segment to the freeSegment list - void addToFreeSegmentList(int sz, int index); - - bool getSegment(Uint32 size, Segment * dst); - - void refreshTime(Uint32 time); - - //Calculate log2(arg) + 1 - Uint32 logTwoPlus(Uint32 arg); - - // The current time - Uint32 currentTime; - - // Pointer to the first free segment of size FreeSegmentSize - Uint32 freeSegment[sz_MAX]; - - // Start address of the memory block allocated - Uint32* startOfMemoryBlock; - - // Total number of 256 byte chunks. - Uint32 totalNoOfChunks; - - // Array of 256-byte chunks - struct Chunk256* chunk; -}; - -struct Segment { - Uint32 segmentSize; // Size of the segment in no of words - Uint16 index; // Index in the array of SegmentListElements - Uint16 releaseId; // Unique no used when releasing the segment - // Undefined if Long_signal.deallocIndicator==0 - union { - Uint32* segmentAddress; // Address to the memory segment - Uint64 _padding_NOT_TO_BE_USED_; - }; -}; - -struct Chunk256 { - Uint32 allocationTimeStamp; // Bit 0 represents if the segment is free or not - // Bit 1-31 is the allocation time for the segment - // Bit 1-31 are undefined if the segment is free - Uint32 nextSegmentOfSameSize; // Undefined if allocated. - // The first chunk in a free segment has a valid - // next-pointer. In the rest of the chunks - // belonging to the segment it is UNDEFINED_CHUNK. - Uint32 prevSegmentOfSameSize; // Undefined if allocated - // The first chunk in a free segment has a valid - // prev-pointer. In the rest of the chunks - // belonging to the segment it is UNDEFINED_CHUNK. - - void setFree(bool free); - - bool getFree(); - - void setAllocationTimeStamp(Uint32 cTime); - - Uint32 getAllocationTimeStamp(); -}; - -// inline void Chunk256::setFree(bool free){ -// // Bit 0 of allocationTimeStamp represents if the segment is free or not -// allocationTimeStamp = 0x0; - -// printf("\nSet free segment"); -// Uint32 offMask = 0x0; // A mask to set the 0 bit to 0 -// if(free) -// // Set this bit to 0, if segment should be free -// allocationTimeStamp = allocationTimeStamp & offMask; -// } - -// inline bool Chunk256::getFree(){ -// // Get free segment - -// allocationTimeStamp = 0x0; -// Uint32 offMask = 0x0; - -// printf("\nGet free segment"); -// return ((allocationTimeStamp | offMask) == offMask ? true : false); -// } - -// inline void Chunk256::setAllocationTimeStamp(Uint32 cTime){ -// // Bits 1-31 of allocationTimeStamp represent the allocation time for segment - -// Uint32 onMask = 0x80000000; // A mask to set the 0 bit to 1 -// allocationTimeStamp = 0x0; - -// printf("\nSet allocation time"); - -// allocationTimeStamp = onMask | cTime; -// } - -// inline Uint32 Chunk256::getAllocationTimeStamp(){ - -// Uint32 onMask = 0x80000000; // A mask to set the 0 bit to 1 -// allocationTimeStamp = 0x0; - -// printf("\nGet allocation time"); -// allocationTimeStamp = allocationTimeStamp ^ onMask; -// return allocationTimeStamp; -// }; - -#endif diff --git a/storage/ndb/src/common/transporter/failoverSCI/Makefile b/storage/ndb/src/common/transporter/failoverSCI/Makefile deleted file mode 100644 index 1e3d5f4a4b7..00000000000 --- a/storage/ndb/src/common/transporter/failoverSCI/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -BIN_TARGET := failoverSCI -BIN_TARGET_LIBS := sisci -BIN_TARGET_ARCHIVES := portlib - -CCFLAGS_LOC += -I.. - -SOURCES = failoverSCI.cpp - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp b/storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp deleted file mode 100644 index b32c839c3b4..00000000000 --- a/storage/ndb/src/common/transporter/failoverSCI/failoverSCI.cpp +++ /dev/null @@ -1,863 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "sisci_types.h" -#include "sisci_api.h" -#include "sisci_error.h" -//#include "sisci_demolib.h" -#include -#include -#define NO_CALLBACK NULL -#define NO_FLAGS 0 -#define DATA_TRANSFER_READY 8 - -sci_error_t error; -sci_desc_t sdOne; -sci_desc_t sdTwo; -sci_local_segment_t localSegmentOne; -sci_local_segment_t localSegmentTwo; -sci_remote_segment_t remoteSegmentOne; -sci_remote_segment_t remoteSegmentTwo; -sci_map_t localMapOne; -sci_map_t localMapTwo; -sci_map_t remoteMapOne; -sci_map_t remoteMapTwo; -unsigned int localAdapterNo = 0; -unsigned int standbyAdapterNo = 1; -unsigned int localNodeId1; -unsigned int localNodeId2; -unsigned int remoteNodeId1 = 0; -unsigned int remoteNodeId2 = 0; -unsigned int localSegmentId; -unsigned int remoteSegmentId1; -unsigned int remoteSegmentId2; -unsigned int segmentSize = 8192; -unsigned int offset = 0; -unsigned int client = 0; -unsigned int server = 0; -unsigned int *localbufferPtr; -static int data; -static int interruptConnected=0; - -/*********************************************************************************/ -/* U S A G E */ -/* */ -/*********************************************************************************/ - -void Usage() -{ - printf("Usage of shmem\n"); - printf("shmem -rn -client/server [ -adapterno -size ] \n\n"); - printf(" -rn : Remote node-id\n"); - printf(" -client : The local node is client\n"); - printf(" -server : The local node is server\n"); - printf(" -adapterno : Local adapter number (default %d)\n", localAdapterNo); - printf(" -size : Segment block size (default %d)\n", segmentSize); - printf(" -help : This helpscreen\n"); - - printf("\n"); -} - - -/*********************************************************************************/ -/* P R I N T P A R A M E T E R S */ -/* */ -/*********************************************************************************/ -void PrintParameters(void) -{ - - printf("Test parameters for %s \n",(client) ? "client" : "server" ); - printf("----------------------------\n\n"); - printf("Local node-id1 : %d\n",localNodeId1); - printf("Local node-id2 : %d\n",localNodeId2); - // printf("Remote node-id : %d\n",remoteNodeId); - printf("Local adapter no. : %d\n",localAdapterNo); - printf("Segment size : %d\n",segmentSize); - printf("----------------------------\n\n"); - -} - - -/*********************************************************************************/ -/* F I L L S E G M E N T W I T H D A T A */ -/* */ -/*********************************************************************************/ - -sci_error_t GetLocalNodeId(Uint32 localAdapterNo, Uint32* localNodeId) -{ - sci_query_adapter_t queryAdapter; - sci_error_t error; - unsigned int _localNodeId; - - queryAdapter.subcommand = SCI_Q_ADAPTER_NODEID; - queryAdapter.localAdapterNo = localAdapterNo; - queryAdapter.data = &_localNodeId; - - SCIQuery(SCI_Q_ADAPTER,&queryAdapter,NO_FLAGS,&error); - - *localNodeId=_localNodeId; - - return error; -} - - - - - - -sci_error_t SendInterrupt(sci_desc_t sd, - Uint32 localAdapterNo, - Uint32 localSciNodeId, - Uint32 remoteSciNodeId, - Uint32 interruptNo){ - - sci_error_t error; - sci_remote_interrupt_t remoteInterrupt; - Uint32 timeOut = SCI_INFINITE_TIMEOUT; - - // Now connect to the other sides interrupt flag - do { - SCIConnectInterrupt(sd, &remoteInterrupt, remoteSciNodeId, localAdapterNo, - interruptNo, timeOut, NO_FLAGS, &error); - } while (error != SCI_ERR_OK); - - if (error != SCI_ERR_OK) { - fprintf(stderr, "SCIConnectInterrupt failed - Error code 0x%x\n", error); - return error; - } - - // Trigger interrupt - printf("\nNode %u sent interrupt (0x%x) to node %d\n",localSciNodeId, interruptNo, remoteSciNodeId); - SCITriggerInterrupt(remoteInterrupt, NO_FLAGS, &error); - if (error != SCI_ERR_OK) { - fprintf(stderr, "SCITriggerInterrupt failed - Error code 0x%x\n", error); - return error; - } - - - // Disconnect and remove interrupts - SCIDisconnectInterrupt(remoteInterrupt, NO_FLAGS, &error); - if (error != SCI_ERR_OK) { - fprintf(stderr, "SCIDisconnectInterrupt failed - Error code 0x%x\n", error); - return error; - } - - return error; -} - - -sci_error_t ReceiveInterrupt(sci_desc_t sd, - Uint32 localAdapterNo, - Uint32 localSciNodeId, - Uint32 interruptNo, - Uint32 timeout) { - - sci_error_t error; - sci_local_interrupt_t localInterrupt; - Uint32 timeOut = SCI_INFINITE_TIMEOUT; - - // Create an interrupt - SCICreateInterrupt(sd, &localInterrupt, localAdapterNo, - &interruptNo, 0, NULL, SCI_FLAG_FIXED_INTNO, &error); - if (error != SCI_ERR_OK) { - fprintf(stderr, "SCICreateInterrupt failed - Error code 0x%x\n", error); - return error; - } - - - // Wait for an interrupt - SCIWaitForInterrupt(localInterrupt, timeOut, NO_FLAGS, &error); - - printf("\nNode %u received interrupt (0x%x)\n", localSciNodeId, interruptNo); - - // Remove interrupt - - SCIRemoveInterrupt(localInterrupt, NO_FLAGS, &error); - if (error != SCI_ERR_OK) { - fprintf(stderr, "SCIRemoveInterrupt failed - Error code 0x%x\n", error); - return error; - } - return error; -} - - -sci_error_t FillSegmentWithData(unsigned int segmentSize, int reverse) -{ - unsigned int i; - unsigned int nostores; - - - nostores = (segmentSize) / sizeof(unsigned int); - - /* Allocate buffer */ - - localbufferPtr = (unsigned int*)malloc( segmentSize ); - if ( localbufferPtr == NULL ) { - /* - * Unable to create local buffer - Insufficient memory available - */ - return SCI_ERR_NOSPC; - } - if(reverse) { - /* Fill in the data into a local buffer */ - printf("Filling forward order \n"); - for (i=0;i\n"); - // exit(-1); - //} - - if (server == 0 && client == 0) { - fprintf(stderr,"You must specify a client node or a server node\n"); - exit(-1); - } - - if (server == 1 && client == 1) { - fprintf(stderr,"Both server node and client node is selected.\n"); - fprintf(stderr,"You must specify either a client or a server node\n"); - exit(-1); - } - - - /* Initialize the SISCI library */ - SCIInitialize(NO_FLAGS, &error); - if (error != SCI_ERR_OK) { - fprintf(stderr,"SCIInitialize failed - Error code: 0x%x\n",error); - exit(error); - } - - - /* Open a file descriptor */ - SCIOpen(&sdOne,NO_FLAGS,&error); - if (error != SCI_ERR_OK) { - if (error == SCI_ERR_INCONSISTENT_VERSIONS) { - fprintf(stderr,"Version mismatch between SISCI user library and SISCI driver\n"); - } - fprintf(stderr,"SCIOpen failed - Error code 0x%x\n",error); - exit(error); - } - - /* Open a file descriptor */ - SCIOpen(&sdTwo,NO_FLAGS,&error); - if (error != SCI_ERR_OK) { - if (error == SCI_ERR_INCONSISTENT_VERSIONS) { - fprintf(stderr,"Version mismatch between SISCI user library and SISCI driver\n"); - } - fprintf(stderr,"SCIOpen failed - Error code 0x%x\n",error); - exit(error); - } - - - /* Get local node-id */ - error = GetLocalNodeId(localAdapterNo, &localNodeId1); - error = GetLocalNodeId(standbyAdapterNo, &localNodeId2); - if (error != SCI_ERR_OK) { - fprintf(stderr,"Could not find the local adapter %d\n", localAdapterNo); - SCIClose(sdOne,NO_FLAGS,&error); - SCIClose(sdTwo,NO_FLAGS,&error); - exit(-1); - } - - - /* Print parameters */ - PrintParameters(); - - if (client) { - remoteNodeId1=324; - remoteNodeId2=328; - ShmemClientNode(); - } else { - remoteNodeId1=452; - remoteNodeId2=456; - ShmemServerNode(); - } - - /* Close the file descriptor */ - SCIClose(sdOne,NO_FLAGS,&error); - SCIClose(sdTwo,NO_FLAGS,&error); - if (error != SCI_ERR_OK) { - fprintf(stderr,"SCIClose failed - Error code: 0x%x\n",error); - } - - - /* Free allocated resources */ - SCITerminate(); - - return SCI_ERR_OK; -} - - - - - - - - - - - - - - - - - diff --git a/storage/ndb/src/common/transporter/perftest/Makefile b/storage/ndb/src/common/transporter/perftest/Makefile deleted file mode 100644 index 01869e1acf9..00000000000 --- a/storage/ndb/src/common/transporter/perftest/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -BIN_TARGET := perfTransporterTest -BIN_TARGET_ARCHIVES := transporter portlib general - -SOURCES = perfTransporterTest.cpp - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp b/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp deleted file mode 100644 index 3aca596ea70..00000000000 --- a/storage/ndb/src/common/transporter/perftest/perfTransporterTest.cpp +++ /dev/null @@ -1,712 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "TransporterRegistry.hpp" -#include "TransporterDefinitions.hpp" -#include "TransporterCallback.hpp" -#include - -#include -#include -#include -#include - -int basePortTCP = 17000; - -SCI_TransporterConfiguration sciTemplate = { - 2000, - // Packet size - 2000000, // Buffer size - 2, // number of adapters - 1, // remote node id SCI - 2, // Remote node Id SCI - 0, // local ndb node id (server) - 0, // remote ndb node id (client) - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - - -SHM_TransporterConfiguration shmTemplate = { - 0, //remoteNodeId - 0, //localNodeId; - false, //compression - true, //checksum; - true, //signalId; - 0, //byteOrder; - 123, //shmKey; - 25000000 //shmSize; -}; - - -TCP_TransporterConfiguration tcpTemplate = { - 17000, // port; - "", // remoteHostName; - "", // localhostname - 2, // remoteNodeId; - 1, // localNodeId; - 25000000, // sendBufferSize - Size of SendBuffer of priority B - 5000000, // maxReceiveSize - Maximum no of bytes to receive - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - -TransporterRegistry *tReg = 0; - -#include - -extern "C" -void -signalHandler(int signo){ - ::signal(13, signalHandler); - char buf[255]; - sprintf(buf,"Signal: %d\n", signo); - ndbout << buf << endl; -} - -void -usage(const char * progName){ - ndbout << "Usage: " << progName << " localNodeId localHostName" - << " remoteHostName" - << " [] [] []" << endl; - ndbout << " type = shm tcp ose sci" << endl; - ndbout << " localNodeId - {1,2}" << endl; -} - -typedef void (* CreateTransporterFunc)(void * conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendBuf, - int recvBuf); - -void -createTCPTransporter(void*, NodeId, NodeId, const char*, const char*, int, int); -void -createSHMTransporter(void*, NodeId, NodeId, const char*, const char*, int, int); -void -createSCITransporter(void*, NodeId, NodeId, const char*, const char*, int, int); - -struct TestPhase { - int signalSize; - int noOfSignals; - int noOfSignalSent; - int noOfSignalReceived; - NDB_TICKS startTime; - NDB_TICKS stopTime; - NDB_TICKS accTime; - int loopCount; - Uint64 sendLenBytes, sendCount; - Uint64 recvLenBytes, recvCount; -}; - -TestPhase testSpec[] = { - { 1, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 1, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 1, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 1, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 8, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 8, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 8, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 8, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 16, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 16, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 16, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 16, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 24, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 24, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 24, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 24, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 0, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of random size - ,{ 0, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of random size - ,{ 0, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of random size - ,{ 0, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of random size - - ,{ 100, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals - ,{ 100, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals - ,{ 100, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals - ,{ 100, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals - - ,{ 500, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals - ,{ 500, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals - ,{ 500, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals - ,{ 500, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals - - ,{ 1000, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals - ,{ 1000, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals - ,{ 1000, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals - ,{ 1000, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals -}; - -const int noOfTests = sizeof(testSpec)/sizeof(TestPhase); - -Uint32 StaticBuffer[1000]; - -SendStatus -sendSignalTo(NodeId nodeId, int signalSize, Uint32 count){ - if(signalSize == 0) - signalSize = (rand() % 25) + 1; - - SignalHeader sh; - sh.theLength = (signalSize > 25 ? 25 : signalSize); - sh.theVerId_signalNumber = count; - sh.theReceiversBlockNumber = rand(); - sh.theSendersBlockRef = rand(); - sh.theSendersSignalId = rand(); - sh.theSignalId = rand(); - sh.theTrace = rand(); - - Uint32 theData[25]; - for(int i = 0; i<25; i++) - theData[i] = (i+1) * (Uint32)(&theData[i]); - - theData[0] = count; - LinearSectionPtr ptr[3]; - - if(signalSize <= 25){ - sh.m_noOfSections = 0; - } else { - sh.m_noOfSections = 1; - ptr[0].sz = signalSize - 25; - ptr[0].p = &StaticBuffer[0]; - } - - return tReg->prepareSend(&sh, 1, theData, nodeId, ptr); -} - -void -reportHeader(){ - ndbout << "#Sigs\tSz\tTime\tSig/sec\tBps\tBps-tot\t" - << "s len\tr len" << endl; -} - -void -print(char * dst, int i){ - if(i > 1000000){ - const int d = i / 1000000; - const int r = (i - (d * 1000000)) / 100000; - if(d < 100) - sprintf(dst, "%d.%dM", d, r); - else - sprintf(dst, "%dM", d); - } else if(i > 1000){ - const int d = i / 1000; - const int r = (i - (d * 1000)) / 100; - if(d < 100) - sprintf(dst, "%d.%dk", d, r); - else - sprintf(dst, "%dk", d); - } else { - sprintf(dst, "%d", i); - } -} - -void -printReport(TestPhase & p){ - if(p.accTime > 0) { - Uint32 secs = (p.accTime/p.loopCount)/1000; - Uint32 mill = (p.accTime/p.loopCount)%1000; - char st[255]; - if(secs > 0){ - sprintf(st, "%d.%.2ds", secs, (mill/10)); - } else { - sprintf(st, "%dms", mill); - } - - Uint32 sps = (1000*p.noOfSignals*p.loopCount)/p.accTime; - Uint32 dps = ((4000*p.noOfSignals)/p.accTime)*(p.loopCount*p.signalSize); - Uint32 bps = ((4000*p.noOfSignals)/p.accTime)*(p.loopCount*(p.signalSize+3)); - if(p.signalSize == 0){ - dps = ((4000*p.noOfSignals)/p.accTime)*(p.loopCount*(13)); - bps = ((4000*p.noOfSignals)/p.accTime)*(p.loopCount*(13+3)); - } - char ssps[255]; - char sbps[255]; - char sdps[255]; - - print(ssps, sps); - print(sbps, bps); - print(sdps, dps); - - - char buf[255]; - if(p.signalSize != 0){ - BaseString::snprintf(buf, 255, - "%d\t%d\t%s\t%s\t%s\t%s\t%d\t%d", - p.noOfSignals, - 4*p.signalSize, - st, - ssps, - sdps, - sbps, - (int)(p.sendLenBytes / (p.sendCount == 0 ? 1 : p.sendCount)), - (int)(p.recvLenBytes / (p.recvCount == 0 ? 1 : p.recvCount))); - } else { - BaseString::snprintf(buf, 255, - "%d\trand\t%s\t%s\t%s\t%s\t%d\t%d", - p.noOfSignals, - st, - ssps, - sdps, - sbps, - (int)(p.sendLenBytes / (p.sendCount == 0 ? 1 : p.sendCount)), - (int)(p.recvLenBytes / (p.recvCount == 0 ? 1 : p.recvCount))); - - } - ndbout << buf << endl; - } -} - -int loopCount = 1; -int sendBufSz = -1; -int recvBufSz = -1; - -bool isClient = false; -bool isConnected = false; -bool isStarted = false; -int currentPhase = 0; -TestPhase allPhases[noOfTests]; -Uint32 signalToEcho; -Uint32 signalsEchoed; -NDB_TICKS startTime, stopTime; - -void -client(NodeId remoteNodeId){ - isClient = true; - - currentPhase = 0; - memcpy(allPhases, testSpec, sizeof(testSpec)); - - int counter = 0; - int sigCounter = 0; - - while(true){ - TestPhase * current = &allPhases[currentPhase]; - if(current->noOfSignals == current->noOfSignalSent && - current->noOfSignals == current->noOfSignalReceived){ - - /** - * Test phase done - */ - current->stopTime = NdbTick_CurrentMillisecond(); - current->accTime += (current->stopTime - current->startTime); - - NdbSleep_MilliSleep(500 / loopCount); - - current->startTime = NdbTick_CurrentMillisecond(); - - current->noOfSignalSent = 0; - current->noOfSignalReceived = 0; - - current->loopCount ++; - if(current->loopCount == loopCount){ - - printReport(allPhases[currentPhase]); - - currentPhase ++; - if(currentPhase == noOfTests){ - /** - * Now we are done - */ - break; - } - NdbSleep_MilliSleep(500); - current = &allPhases[currentPhase]; - current->startTime = NdbTick_CurrentMillisecond(); - } - } - - int signalsLeft = current->noOfSignals - current->noOfSignalSent; - if(signalsLeft > 0){ - for(; signalsLeft > 0; signalsLeft--){ - if(sendSignalTo(remoteNodeId,current->signalSize,sigCounter)== SEND_OK){ - current->noOfSignalSent++; - sigCounter++; - } else { - ndbout << "Failed to send: " << sigCounter << endl; - tReg->external_IO(10); - break; - } - } - } - if(counter % 10 == 0) - tReg->checkConnections(); - tReg->external_IO(0); - counter++; - } -} - -void -server(){ - isClient = false; - - signalToEcho = 0; - signalsEchoed = 0; - for(int i = 0; i signalsEchoed){ - tReg->checkConnections(); - for(int i = 0; i<10; i++) - tReg->external_IO(10); - } -} - -int -main(int argc, const char **argv){ - - const char * progName = argv[0]; - - loopCount = 100; - sendBufSz = -1; - recvBufSz = -1; - - isClient = false; - isConnected = false; - isStarted = false; - currentPhase = 0; - - signalHandler(0); - - if(argc < 5){ - usage(progName); - return 0; - } - - const char * type = argv[1]; - const NodeId localNodeId = atoi(argv[2]); - const char * localHostName = argv[3]; - const char * remoteHost1 = argv[4]; - - if(argc >= 6) - loopCount = atoi(argv[5]); - if(argc >= 7) - sendBufSz = atoi(argv[6]); - if(argc >= 8) - recvBufSz = atoi(argv[7]); - - if(localNodeId < 1 || localNodeId > 2){ - ndbout << "localNodeId = " << localNodeId << endl << endl; - usage(progName); - return 0; - } - - if(localNodeId == 1) - ndbout << "-- ECHO CLIENT --" << endl; - else - ndbout << "-- ECHO SERVER --" << endl; - - ndbout << "localNodeId: " << localNodeId << endl; - ndbout << "localHostName: " << localHostName << endl; - ndbout << "remoteHost1 (node " << (localNodeId == 1?2:1) << "): " - << remoteHost1 << endl; - ndbout << "Loop count: " << loopCount << endl; - ndbout << "-----------------" << endl; - - void * confTemplate = 0; - CreateTransporterFunc func = 0; - if(strcasecmp(type, "tcp") == 0){ - func = createTCPTransporter; - confTemplate = &tcpTemplate; - } else if(strcasecmp(type, "sci") == 0){ - func = createSCITransporter; - confTemplate = &sciTemplate; - } else if(strcasecmp(type, "shm") == 0){ - func = createSHMTransporter; - confTemplate = &shmTemplate; - } else { - ndbout << "Unsupported transporter type" << endl; - return 0; - } - - ndbout << "Creating transporter registry" << endl; - tReg = new TransporterRegistry; - tReg->init(localNodeId); - - switch(localNodeId){ - case 1: - (* func)(confTemplate, 1, 2, localHostName, remoteHost1, - sendBufSz, recvBufSz); - break; - case 2: - (* func)(confTemplate, 2, 1, localHostName, remoteHost1, - sendBufSz, recvBufSz); - break; - } - - ndbout << "Doing startSending/startReceiving" << endl; - tReg->startSending(); - tReg->startReceiving(); - - ndbout << "Connecting" << endl; - tReg->setPerformState(PerformConnect); - tReg->checkConnections(); - - if(localNodeId == 1) - client(2); - else - server(); - - isStarted = false; - - ndbout << "Sleep 3 secs" << endl; - NdbSleep_SecSleep(3); - - ndbout << "Doing setPerformState(Disconnect)" << endl; - tReg->setPerformState(PerformDisconnect); - - ndbout << "Doing checkConnections()" << endl; - tReg->checkConnections(); - - ndbout << "Deleting transporter registry" << endl; - delete tReg; tReg = 0; - - return 0; -} - -void -execute(void* callbackObj, SignalHeader * const header, Uint8 prio, - Uint32 * const theData, - LinearSectionPtr ptr[3]){ - const NodeId nodeId = refToNode(header->theSendersBlockRef); - - if(isClient){ - allPhases[currentPhase].noOfSignalReceived++; - } else { - int sleepTime = 10; - if(theData[0] != signalsEchoed){ - ndbout << "Missing signal theData[0] = " << theData[0] - << " signalsEchoed = " << signalsEchoed << endl; - ndbout << (* header) << endl; - abort(); - } - while(tReg->prepareSend(header, prio, theData, nodeId, ptr) != SEND_OK){ - ndbout << "Failed to echo " << theData[0] << endl; - NdbSleep_MilliSleep(sleepTime); - // sleepTime += 10; - } - signalsEchoed++; - } -} - -void -copy(Uint32 * & insertPtr, - class SectionSegmentPool & thePool, const SegmentedSectionPtr & _ptr){ - abort(); -} - -void -reportError(void* callbackObj, NodeId nodeId, TransporterError errorCode){ - char buf[255]; - sprintf(buf, "reportError (%d, %x) in perfTest", nodeId, errorCode); - ndbout << buf << endl; - if(errorCode & 0x8000 && errorCode != 0x8014){ - abort(); //tReg->setPerformState(nodeId, PerformDisconnect); - } -} - -/** - * Report average send theLength in bytes (4096 last sends) - */ -void -reportSendLen(void* callbackObj, NodeId nodeId, Uint32 count, Uint64 bytes){ - allPhases[currentPhase].sendCount += count; - allPhases[currentPhase].sendLenBytes += bytes; - - if(!isClient){ - ndbout << "reportSendLen(" << nodeId << ", " - << (bytes/count) << ")" << endl; - } -} - -/** - * Report average receive theLength in bytes (4096 last receives) - */ -void -reportReceiveLen(void* callbackObj, NodeId nodeId, Uint32 count, Uint64 bytes){ - allPhases[currentPhase].recvCount += count; - allPhases[currentPhase].recvLenBytes += bytes; - - if(!isClient){ - ndbout << "reportReceiveLen(" << nodeId << ", " - << (bytes/count) << ")" << endl; - } -} - -/** - * Report connection established - */ -void -reportConnect(void* callbackObj, NodeId nodeId){ - char buf[255]; - sprintf(buf, "reportConnect(%d)", nodeId); - ndbout << buf << endl; - tReg->setPerformState(nodeId, PerformIO); - - if(!isStarted){ - isStarted = true; - startTime = NdbTick_CurrentMillisecond(); - if(isClient){ - reportHeader(); - allPhases[0].startTime = startTime; - } - } - else{ - // Resend signals that were lost when connection failed - TestPhase * current = &allPhases[currentPhase]; - current->noOfSignalSent = current->noOfSignalReceived; - } -} - -/** - * Report connection broken - */ -void -reportDisconnect(void* callbackObj, NodeId nodeId, Uint32 errNo){ - char buf[255]; - sprintf(buf, "reportDisconnect(%d)", nodeId); - ndbout << buf << endl; - - if(isStarted) - tReg->setPerformState(nodeId, PerformConnect); -} - - -int -checkJobBuffer() { - /** - * Check to see if jobbbuffers are starting to get full - * and if so call doJob - */ - return 0; -} - -void -createSCITransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendbuf, - int recvbuf) { - - - ndbout << "Creating SCI transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - - SCI_TransporterConfiguration * conf = (SCI_TransporterConfiguration*)_conf; - - conf->remoteSciNodeId0= (Uint16)atoi(localHostName); - conf->remoteSciNodeId1= (Uint16)atoi(remoteHostName); - - - conf->localNodeId = localNodeId; - conf->remoteNodeId = remoteNodeId; - - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - -void -createSHMTransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendbuf, - int recvbuf) { - - - ndbout << "Creating SHM transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - - SHM_TransporterConfiguration * conf = (SHM_TransporterConfiguration*)_conf; - - - conf->localNodeId = localNodeId; - conf->remoteNodeId = remoteNodeId; - - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - - -void -createTCPTransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendBuf, - int recvBuf){ - ndbout << "Creating TCP transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - TCP_TransporterConfiguration * conf = (TCP_TransporterConfiguration*)_conf; - - int port; - if(localNodeId == 1 && remoteNodeId == 2) port = basePortTCP + 0; - if(localNodeId == 1 && remoteNodeId == 3) port = basePortTCP + 1; - if(localNodeId == 2 && remoteNodeId == 1) port = basePortTCP + 0; - if(localNodeId == 2 && remoteNodeId == 3) port = basePortTCP + 2; - if(localNodeId == 3 && remoteNodeId == 1) port = basePortTCP + 1; - if(localNodeId == 3 && remoteNodeId == 2) port = basePortTCP + 2; - - if(sendBuf != -1){ - conf->sendBufferSize = sendBuf; - } - if(recvBuf != -1){ - conf->maxReceiveSize = recvBuf; - } - - ndbout << "\tSendBufferSize: " << conf->sendBufferSize << endl; - ndbout << "\tReceiveBufferSize: " << conf->maxReceiveSize << endl; - - conf->localNodeId = localNodeId; - conf->localHostName = localHostName; - conf->remoteNodeId = remoteNodeId; - conf->remoteHostName = remoteHostName; - conf->port = port; - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} diff --git a/storage/ndb/src/common/transporter/priotest/Makefile b/storage/ndb/src/common/transporter/priotest/Makefile deleted file mode 100644 index 483fc0f1f07..00000000000 --- a/storage/ndb/src/common/transporter/priotest/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -SOURCES = prioTransporterTest.cpp -ARCHIVE_TARGET := libpriotransportertest.a - -DIRS := prioTCP prioSHM prioSCI - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/common/transporter/priotest/prioSCI/Makefile b/storage/ndb/src/common/transporter/priotest/prioSCI/Makefile deleted file mode 100644 index 7d403539bf3..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioSCI/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -include .defs.mk - -TYPE := ndbapi -BIN_TARGET := prioSCI -BIN_TARGET_LIBS := sisci -BIN_TARGET_ARCHIVES := priotransportertest transporter portlib general - -CCFLAGS_LOC += -I.. - -SOURCES = prioSCI.cpp - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp b/storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp deleted file mode 100644 index 2d8ee8de979..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioSCI/prioSCI.cpp +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -NDB_COMMAND(prioSCI, "prioSCI", "prioSCI", "Test the SCI Transporter", 65535) -{ - basePortTCP = 17000; - return prioTransporterTest(TestSCI, "prioSCI", argc, argv); -} - - - - diff --git a/storage/ndb/src/common/transporter/priotest/prioSHM/Makefile b/storage/ndb/src/common/transporter/priotest/prioSHM/Makefile deleted file mode 100644 index a827c6e3f1e..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioSHM/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -BIN_TARGET := prioSHM -BIN_TARGET_ARCHIVES := priotransportertest transporter portlib general - -CCFLAGS_LOC += -I.. - -SOURCES = prioSHM.cpp - -include $(NDB_TOP)/Epilogue.mk - diff --git a/storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp b/storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp deleted file mode 100644 index 4447576987b..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioSHM/prioSHM.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -NDB_COMMAND(prioSHM, "prioSHM", "prioSHM", "Test the SHM Transporter", 65535) -{ - basePortTCP = 17000; - return prioTransporterTest(TestSHM, "prioSHM", argc, argv); -} - diff --git a/storage/ndb/src/common/transporter/priotest/prioTCP/Makefile b/storage/ndb/src/common/transporter/priotest/prioTCP/Makefile deleted file mode 100644 index 92abf3e7424..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioTCP/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include .defs.mk - -TYPE := ndbapi - -BIN_TARGET := prioTCP -BIN_TARGET_ARCHIVES := priotransportertest transporter portlib general - -CCFLAGS_LOC += -I.. - -SOURCES = prioTCP.cpp - -include $(NDB_TOP)/Epilogue.mk - diff --git a/storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp b/storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp deleted file mode 100644 index ddd549b9818..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioTCP/prioTCP.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -NDB_COMMAND(prioTCP, "prioTCP", "prioTCP", "Test the TCP Transporter", 65535) -{ - basePortTCP = 17000; - return prioTransporterTest(TestTCP, "prioTCP", argc, argv); -} - diff --git a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp deleted file mode 100644 index 581bc88ac83..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.cpp +++ /dev/null @@ -1,708 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "TransporterRegistry.hpp" -#include "TransporterDefinitions.hpp" -#include "TransporterCallback.hpp" -#include - -#include "prioTransporterTest.hpp" - -#include -#include -#include -#include - -int basePortTCP = 17000; - -SCI_TransporterConfiguration sciTemplate = { - 2000, - // Packet size - 2000000, // Buffer size - 2, // number of adapters - 1, // remote node id SCI - 2, // Remote node Id SCI - 0, // local ndb node id (server) - 0, // remote ndb node id (client) - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - - -SHM_TransporterConfiguration shmTemplate = { - 100000, // shmSize - 0, // shmKey - 1, // local ndb node id (server) - 2, // remote ndb node id (client) - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - -TCP_TransporterConfiguration tcpTemplate = { - 17000, // port; - "", // remoteHostName; - "", // localhostname - 2, // remoteNodeId; - 1, // localNodeId; - 2000000, // sendBufferSize - Size of SendBuffer of priority B - 2000, // maxReceiveSize - Maximum no of bytes to receive - 0, // byteOrder; - false, // compression; - true, // checksum; - true // signalId; -}; - -TransporterRegistry *tReg = 0; - -#include - -extern "C" -void -signalHandler(int signo){ - ::signal(13, signalHandler); - char buf[255]; - sprintf(buf,"Signal: %d\n", signo); - ndbout << buf << endl; -} - -void -usage(const char * progName){ - ndbout << "Usage: " << progName << " localNodeId localHostName" - << " remoteHostName" - << " [] [] []" << endl; - ndbout << " localNodeId - {1,2}" << endl; -} - -typedef void (* CreateTransporterFunc)(void * conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendBuf, - int recvBuf); - -void -createSCITransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendbuf, - int recvbuf) { - - - ndbout << "Creating SCI transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - - SCI_TransporterConfiguration * conf = (SCI_TransporterConfiguration*)_conf; - - conf->remoteSciNodeId0= (Uint16)atoi(localHostName); - conf->remoteSciNodeId1= (Uint16)atoi(remoteHostName); - - - conf->localNodeId = localNodeId; - conf->remoteNodeId = remoteNodeId; - - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - -void -createSHMTransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendbuf, - int recvbuf) { - - - ndbout << "Creating SHM transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - - SHM_TransporterConfiguration * conf = (SHM_TransporterConfiguration*)_conf; - - - conf->localNodeId = localNodeId; - conf->remoteNodeId = remoteNodeId; - - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - - -void -createTCPTransporter(void * _conf, - NodeId localNodeId, - NodeId remoteNodeId, - const char * localHostName, - const char * remoteHostName, - int sendBuf, - int recvBuf){ - ndbout << "Creating TCP transporter from node " - << localNodeId << "(" << localHostName << ") to " - << remoteNodeId << "(" << remoteHostName << ")..." << endl;; - - TCP_TransporterConfiguration * conf = (TCP_TransporterConfiguration*)_conf; - - int port; - if(localNodeId == 1 && remoteNodeId == 2) port = basePortTCP + 0; - if(localNodeId == 1 && remoteNodeId == 3) port = basePortTCP + 1; - if(localNodeId == 2 && remoteNodeId == 1) port = basePortTCP + 0; - if(localNodeId == 2 && remoteNodeId == 3) port = basePortTCP + 2; - if(localNodeId == 3 && remoteNodeId == 1) port = basePortTCP + 1; - if(localNodeId == 3 && remoteNodeId == 2) port = basePortTCP + 2; - - if(sendBuf != -1){ - conf->sendBufferSize = sendBuf; - } - if(recvBuf != -1){ - conf->maxReceiveSize = recvBuf; - } - - ndbout << "\tSendBufferSize: " << conf->sendBufferSize << endl; - ndbout << "\tReceiveBufferSize: " << conf->maxReceiveSize << endl; - - conf->localNodeId = localNodeId; - conf->localHostName = localHostName; - conf->remoteNodeId = remoteNodeId; - conf->remoteHostName = remoteHostName; - conf->port = port; - bool res = tReg->createTransporter(conf); - if(res) - ndbout << "... -- Success " << endl; - else - ndbout << "... -- Failure " << endl; -} - -struct TestPhase { - int signalSize; - int noOfSignals; - int noOfSignalSent; - int noOfSignalReceived; - NDB_TICKS startTime; - NDB_TICKS stopTime; - - NDB_TICKS startTimePrioA; - NDB_TICKS stopTimePrioA; - NDB_TICKS totTimePrioA; - int bytesSentBeforePrioA; - NDB_TICKS accTime; - int loopCount; - Uint64 sendLenBytes, sendCount; - Uint64 recvLenBytes, recvCount; -}; - -TestPhase testSpec[] = { - { 1, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 1, 10000, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 1, 10000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 1, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 8, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 8, 10000, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 8, 10000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 8, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 16, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 16, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 16, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 16, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 24, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of size 1 word - ,{ 24, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of size 1 word - ,{ 24, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of size 1 word - ,{ 24, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of size 1 word - - ,{ 0, 10, 0,0, 0,0,0,0,0,0,0 } // 10 signals of random size - ,{ 0, 100, 0,0, 0,0,0,0,0,0,0 } // 100 signals of random size - ,{ 0, 1000, 0,0, 0,0,0,0,0,0,0 } // 1000 signals of random size - ,{ 0, 10000, 0,0, 0,0,0,0,0,0,0 } // 10000 signals of random size -}; - -const int noOfTests = sizeof(testSpec)/sizeof(TestPhase); - -SendStatus -sendSignalTo(NodeId nodeId, int signalSize, int prio){ - if(signalSize == 0) - signalSize = (rand() % 25) + 1; - - SignalHeader sh; - sh.theLength = signalSize; - sh.theVerId_signalNumber = rand(); - sh.theReceiversBlockNumber = rand(); - sh.theSendersBlockRef = rand(); - sh.theSendersSignalId = rand(); - sh.theSignalId = rand(); - sh.theTrace = rand(); - - Uint32 theData[25]; - for(int i = 0; iprepareSend(&sh, prio, theData, nodeId); -} - -void -reportHeader(){ - ndbout << "#Sigs\tSz\tPayload\tTime\tSig/sec\tBps\t" - << "s len\tr len\tprioAtime\tbytesb4pA" << endl; -} - -void -printReport(TestPhase & p){ - if(p.accTime > 0) { - Uint32 secs = (p.accTime/p.loopCount)/1000; - Uint32 mill = (p.accTime/p.loopCount)%1000; - char st[255]; - if(secs > 0){ - sprintf(st, "%d.%.2ds", secs, (mill/10)); - } else { - sprintf(st, "%dms", mill); - } - - Uint32 sps = (1000*p.noOfSignals*p.loopCount)/p.accTime; - Uint32 bps = ((4000*p.noOfSignals)/p.accTime)*(p.loopCount*(p.signalSize+3)); - if(p.signalSize == 0) - ((4000*p.noOfSignals)/p.accTime)*(p.loopCount*(13+3)); - - char ssps[255]; - if(sps > 1000000){ - sps /= 1000000; - sprintf(ssps, "%dM", (int)sps); - } else if(sps > 1000){ - sps /= 1000; - sprintf(ssps, "%dk", (int)sps); - } else { - sprintf(ssps, "%d", (int)sps); - } - - char sbps[255]; - if(bps > 1000000){ - bps /= 1000000; - sprintf(sbps, "%dM", bps); - } else if(bps>1000){ - bps /= 1000; - sprintf(sbps, "%dk", bps); - } else { - sprintf(sbps, "%d", bps); - } - - char buf[255]; - if(p.signalSize != 0){ - BaseString::snprintf(buf, 255, - "%d\t%d\t%d\t%s\t%s\t%s\t%d\t%d\t%d\t%d", - p.noOfSignals, - p.signalSize, - (4*p.signalSize), - st, - ssps, - sbps, - (int)(p.sendLenBytes / (p.sendCount == 0 ? 1 : p.sendCount)), - (int)(p.recvLenBytes / (p.recvCount == 0 ? 1 : p.recvCount)), - (int)(p.totTimePrioA / p.loopCount), - (int)(p.bytesSentBeforePrioA)); - } else { - BaseString::snprintf(buf, 255, - "%d\trand\t4*rand\t%s\t%s\t%s\t%d\t%d\t%d\t%d", - p.noOfSignals, - st, - ssps, - sbps, - (int)(p.sendLenBytes / (p.sendCount == 0 ? 1 : p.sendCount)), - (int)(p.recvLenBytes / (p.recvCount == 0 ? 1 : p.recvCount)), - (int)(p.totTimePrioA / p.loopCount), - (int)(p.bytesSentBeforePrioA)); - - } - ndbout << buf << endl; - } -} - -int loopCount = 1; -int sendBufSz = -1; -int recvBufSz = -1; - -NDB_TICKS startSec=0; -NDB_TICKS stopSec=0; -Uint32 startMicro=0; -Uint32 stopMicro=0; -int timerStarted; -int timerStopped; - -bool isClient = false; -bool isConnected = false; -bool isStarted = false; -int currentPhase = 0; -TestPhase allPhases[noOfTests]; -Uint32 signalToEcho; -NDB_TICKS startTime, stopTime; - -void -client(NodeId remoteNodeId){ - isClient = true; - - currentPhase = 0; - memcpy(allPhases, testSpec, sizeof(testSpec)); - - int counter = 0; - - while(true){ - TestPhase * current = &allPhases[currentPhase]; - if(current->noOfSignals == current->noOfSignalSent && - current->noOfSignals == current->noOfSignalReceived){ - - /** - * Test phase done - */ - current->stopTime = NdbTick_CurrentMillisecond(); - current->accTime += (current->stopTime - current->startTime); - - NdbSleep_MilliSleep(500 / loopCount); - - current->startTime = NdbTick_CurrentMillisecond(); - - current->noOfSignalSent = 0; - current->noOfSignalReceived = 0; - - current->loopCount ++; - if(current->loopCount == loopCount){ - - printReport(allPhases[currentPhase]); - - currentPhase ++; - if(currentPhase == noOfTests){ - /** - * Now we are done - */ - break; - } - NdbSleep_MilliSleep(500); - current = &allPhases[currentPhase]; - current->startTime = NdbTick_CurrentMillisecond(); - } - } - int signalsLeft = current->noOfSignals - current->noOfSignalSent; - if(signalsLeft > 0){ - for(; signalsLeft > 1; signalsLeft--){ - if(sendSignalTo(remoteNodeId, current->signalSize, 1) == SEND_OK) { - current->noOfSignalSent++; - // ndbout << "sent prio b" << endl; - current->bytesSentBeforePrioA += (current->signalSize << 2); - } - else { - tReg->external_IO(10); - break; - } - } - //prio A - if(signalsLeft==1) { - NDB_TICKS sec = 0; - Uint32 micro=0; - int ret = NdbTick_CurrentMicrosecond(&sec,µ); - if(ret==0) - current->startTimePrioA = micro + sec*1000000; - if(sendSignalTo(remoteNodeId, current->signalSize, 0) == SEND_OK) { - current->noOfSignalSent++; - signalsLeft--; - } - else { - tReg->external_IO(10); - break; - } - } - } - - if(counter % 10 == 0) - tReg->checkConnections(); - tReg->external_IO(0); - counter++; - } -} - -void -server(){ - isClient = false; - - signalToEcho = 0; - for(int i = 0; i 0){ - tReg->checkConnections(); - for(int i = 0; i<10; i++) - tReg->external_IO(10); - } -} - -int -prioTransporterTest(TestType tt, const char * progName, - int argc, const char **argv){ - - loopCount = 100; - sendBufSz = -1; - recvBufSz = -1; - - isClient = false; - isConnected = false; - isStarted = false; - currentPhase = 0; - - signalHandler(0); - - if(argc < 4){ - usage(progName); - return 0; - } - - const NodeId localNodeId = atoi(argv[1]); - const char * localHostName = argv[2]; - const char * remoteHost1 = argv[3]; - - if(argc >= 5) - loopCount = atoi(argv[4]); - if(argc >= 6) - sendBufSz = atoi(argv[5]); - if(argc >= 7) - recvBufSz = atoi(argv[6]); - - if(localNodeId < 1 || localNodeId > 2){ - ndbout << "localNodeId = " << localNodeId << endl << endl; - usage(progName); - return 0; - } - - if(localNodeId == 1) - ndbout << "-- ECHO CLIENT --" << endl; - else - ndbout << "-- ECHO SERVER --" << endl; - - ndbout << "localNodeId: " << localNodeId << endl; - ndbout << "localHostName: " << localHostName << endl; - ndbout << "remoteHost1 (node " << (localNodeId == 1?2:1) << "): " - << remoteHost1 << endl; - ndbout << "Loop count: " << loopCount << endl; - ndbout << "-----------------" << endl; - - void * confTemplate = 0; - CreateTransporterFunc func = 0; - switch(tt){ - case TestTCP: - func = createTCPTransporter; - confTemplate = &tcpTemplate; - break; - case TestSCI: - func = createSCITransporter; - confTemplate = &sciTemplate; - break; - case TestSHM: - func = createSHMTransporter; - confTemplate = &shmTemplate; - break; - default: - ndbout << "Unsupported transporter type" << endl; - return 0; - } - - ndbout << "Creating transporter registry" << endl; - tReg = new TransporterRegistry; - tReg->init(localNodeId); - - switch(localNodeId){ - case 1: - (* func)(confTemplate, 1, 2, localHostName, remoteHost1, - sendBufSz, recvBufSz); - break; - case 2: - (* func)(confTemplate, 2, 1, localHostName, remoteHost1, - sendBufSz, recvBufSz); - break; - } - - ndbout << "Doing startSending/startReceiving" << endl; - tReg->startSending(); - tReg->startReceiving(); - - ndbout << "Connecting" << endl; - tReg->setPerformState(PerformConnect); - tReg->checkConnections(); - - if(localNodeId == 1) - client(2); - else - server(); - - isStarted = false; - - ndbout << "Sleep 3 secs" << endl; - NdbSleep_SecSleep(3); - - ndbout << "Doing setPerformState(Disconnect)" << endl; - tReg->setPerformState(PerformDisconnect); - - ndbout << "Doing checkConnections()" << endl; - tReg->checkConnections(); - - ndbout << "Deleting transporter registry" << endl; - delete tReg; tReg = 0; - - return 0; -} - -NdbOut & operator <<(NdbOut & out, SignalHeader & sh){ - out << "-- Signal Header --" << endl; - out << "theLength: " << sh.theLength << endl; - out << "gsn: " << sh.theVerId_signalNumber << endl; - out << "recBlockNo: " << sh.theReceiversBlockNumber << endl; - out << "sendBlockRef: " << sh.theSendersBlockRef << endl; - out << "sendersSig: " << sh.theSendersSignalId << endl; - out << "theSignalId: " << sh.theSignalId << endl; - out << "trace: " << (int)sh.theTrace << endl; - return out; -} - -void -execute(SignalHeader * const header, Uint8 prio, Uint32 * const theData){ - const NodeId nodeId = refToNode(header->theSendersBlockRef); - NDB_TICKS sec = 0; - Uint32 micro=0; - int ret = NdbTick_CurrentMicrosecond(&sec,µ); - if(prio == 0 && isClient && ret == 0) { - allPhases[currentPhase].stopTimePrioA = micro + sec*1000000; - allPhases[currentPhase].totTimePrioA += - allPhases[currentPhase].stopTimePrioA - - allPhases[currentPhase].startTimePrioA; - } - if(ret!=0) - allPhases[currentPhase].totTimePrioA = -1; - - if(isClient){ - allPhases[currentPhase].noOfSignalReceived++; - } else { - int sleepTime = 10; - while(tReg->prepareSend(header, prio, theData, nodeId) != SEND_OK){ - ndbout << "Failed to echo" << sleepTime << endl; - NdbSleep_MilliSleep(sleepTime); - // sleepTime += 10; - } - - signalToEcho--; - } -} - -void -reportError(NodeId nodeId, TransporterError errorCode){ - char buf[255]; - sprintf(buf, "reportError (%d, %x) in perfTest", nodeId, errorCode); - ndbout << buf << endl; - if(errorCode & 0x8000){ - tReg->setPerformState(nodeId, PerformDisconnect); - } -} - -/** - * Report average send theLength in bytes (4096 last sends) - */ -void -reportSendLen(NodeId nodeId, Uint32 count, Uint64 bytes){ - allPhases[currentPhase].sendCount += count; - allPhases[currentPhase].sendLenBytes += bytes; - - if(!isClient){ - ndbout << "reportSendLen(" << nodeId << ", " - << (bytes/count) << ")" << endl; - } -} - -/** - * Report average receive theLength in bytes (4096 last receives) - */ -void -reportReceiveLen(NodeId nodeId, Uint32 count, Uint64 bytes){ - allPhases[currentPhase].recvCount += count; - allPhases[currentPhase].recvLenBytes += bytes; - - if(!isClient){ - ndbout << "reportReceiveLen(" << nodeId << ", " - << (bytes/count) << ")" << endl; - } -} - -/** - * Report connection established - */ -void -reportConnect(NodeId nodeId){ - char buf[255]; - sprintf(buf, "reportConnect(%d)", nodeId); - ndbout << buf << endl; - tReg->setPerformState(nodeId, PerformIO); - - if(!isStarted){ - isStarted = true; - startTime = NdbTick_CurrentMillisecond(); - if(isClient){ - reportHeader(); - allPhases[0].startTime = startTime; - } - } - else{ - // Resend signals that were lost when connection failed - TestPhase * current = &allPhases[currentPhase]; - current->noOfSignalSent = current->noOfSignalReceived; - } -} - -/** - * Report connection broken - */ -void -reportDisconnect(NodeId nodeId, Uint32 errNo){ - char buf[255]; - sprintf(buf, "reportDisconnect(%d)", nodeId); - ndbout << buf << endl; - - if(isStarted) - tReg->setPerformState(nodeId, PerformConnect); -} - - -int -checkJobBuffer() { - /** - * Check to see if jobbbuffers are starting to get full - * and if so call doJob - */ - return 0; -} diff --git a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp b/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp deleted file mode 100644 index 518367b69ef..00000000000 --- a/storage/ndb/src/common/transporter/priotest/prioTransporterTest.hpp +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef PRIO_TRANSPORTER_TEST_HPP -#define PRIO_TRANSPORTER_TEST_HPP - - -enum TestType { - TestTCP, - TestSCI, - TestSHM -}; - -extern int basePortTCP; - -int prioTransporterTest(TestType tt, const char * pName, - int argc, const char **argv); - - - -#endif diff --git a/storage/ndb/src/common/util/BaseString.cpp b/storage/ndb/src/common/util/BaseString.cpp deleted file mode 100644 index 019906c9ec6..00000000000 --- a/storage/ndb/src/common/util/BaseString.cpp +++ /dev/null @@ -1,553 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/* -*- c-basic-offset: 4; -*- */ -#include -#include -#include "basestring_vsnprintf.h" - -BaseString::BaseString() -{ - m_chr = new char[1]; - if (m_chr == NULL) - { - errno = ENOMEM; - m_len = 0; - return; - } - m_chr[0] = 0; - m_len = 0; -} - -BaseString::BaseString(const char* s) -{ - if (s == NULL) - { - m_chr = NULL; - m_len = 0; - } - const size_t n = strlen(s); - m_chr = new char[n + 1]; - if (m_chr == NULL) - { - errno = ENOMEM; - m_len = 0; - return; - } - memcpy(m_chr, s, n + 1); - m_len = n; -} - -BaseString::BaseString(const BaseString& str) -{ - const char* const s = str.m_chr; - const size_t n = str.m_len; - if (s == NULL) - { - m_chr = NULL; - m_len = 0; - return; - } - char* t = new char[n + 1]; - if (t == NULL) - { - errno = ENOMEM; - m_chr = NULL; - m_len = 0; - return; - } - memcpy(t, s, n + 1); - m_chr = t; - m_len = n; -} - -BaseString::~BaseString() -{ - delete[] m_chr; -} - -BaseString& -BaseString::assign(const char* s) -{ - if (s == NULL) - { - m_chr = NULL; - m_len = 0; - return *this; - } - size_t n = strlen(s); - char* t = new char[n + 1]; - if (t) - { - memcpy(t, s, n + 1); - } - else - { - errno = ENOMEM; - n = 0; - } - delete[] m_chr; - m_chr = t; - m_len = n; - return *this; -} - -BaseString& -BaseString::assign(const char* s, size_t n) -{ - char* t = new char[n + 1]; - if (t) - { - memcpy(t, s, n); - t[n] = 0; - } - else - { - errno = ENOMEM; - n = 0; - } - delete[] m_chr; - m_chr = t; - m_len = n; - return *this; -} - -BaseString& -BaseString::assign(const BaseString& str, size_t n) -{ - if (n > str.m_len) - n = str.m_len; - return assign(str.m_chr, n); -} - -BaseString& -BaseString::append(const char* s) -{ - size_t n = strlen(s); - char* t = new char[m_len + n + 1]; - if (t) - { - memcpy(t, m_chr, m_len); - memcpy(t + m_len, s, n + 1); - } - else - { - errno = ENOMEM; - m_len = 0; - n = 0; - } - delete[] m_chr; - m_chr = t; - m_len += n; - return *this; -} - -BaseString& -BaseString::append(char c) { - return appfmt("%c", c); -} - -BaseString& -BaseString::append(const BaseString& str) -{ - return append(str.m_chr); -} - -BaseString& -BaseString::append(const Vector &vector, - const BaseString &separator) { - for(size_t i=0;i (int)m_len) { - char *t = new char[l]; - if (t == NULL) - { - errno = ENOMEM; - return *this; - } - delete[] m_chr; - m_chr = t; - } - va_start(ap, fmt); - basestring_vsnprintf(m_chr, l, fmt, ap); - va_end(ap); - m_len = strlen(m_chr); - return *this; -} - -BaseString& -BaseString::appfmt(const char *fmt, ...) -{ - char buf[1]; - va_list ap; - int l; - - /* Figure out how long the formatted string will be. A small temporary - * buffer is used, because I don't trust all implementations to work - * when called as vsnprintf(NULL, 0, ...). - */ - va_start(ap, fmt); - l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1; - va_end(ap); - char *tmp = new char[l]; - if (tmp == NULL) - { - errno = ENOMEM; - return *this; - } - va_start(ap, fmt); - basestring_vsnprintf(tmp, l, fmt, ap); - va_end(ap); - append(tmp); - delete[] tmp; - return *this; -} - -BaseString& -BaseString::operator=(const BaseString& str) -{ - if (this != &str) { - this->assign(str); - } - return *this; -} - -int -BaseString::split(Vector &v, - const BaseString &separator, - int maxSize) const { - char *str = strdup(m_chr); - int i, start, len, num = 0; - len = strlen(str); - for(start = i = 0; - (i <= len) && ( (maxSize<0) || ((int)v.size()<=maxSize-1) ); - i++) { - if(strchr(separator.c_str(), str[i]) || i == len) { - if(maxSize < 0 || (int)v.size() < maxSize-1) - str[i] = '\0'; - v.push_back(BaseString(str+start)); - num++; - start = i+1; - } - } - free(str); - - return num; -} - -ssize_t -BaseString::indexOf(char c) { - char *p; - p = strchr(m_chr, c); - if(p == NULL) - return -1; - return (ssize_t)(p-m_chr); -} - -ssize_t -BaseString::lastIndexOf(char c) { - char *p; - p = strrchr(m_chr, c); - if(p == NULL) - return -1; - return (ssize_t)(p-m_chr); -} - -BaseString -BaseString::substr(ssize_t start, ssize_t stop) { - if(stop < 0) - stop = length(); - ssize_t len = stop-start; - if(len <= 0) - return BaseString(""); - BaseString s; - s.assign(m_chr+start, len); - return s; -} - -static bool -iswhite(char c) { - switch(c) { - case ' ': - case '\t': - return true; - default: - return false; - } - /* NOTREACHED */ -} - -char ** -BaseString::argify(const char *argv0, const char *src) { - Vector vargv; - - if(argv0 != NULL) - { - char *t = strdup(argv0); - if (t == NULL) - { - errno = ENOMEM; - return NULL; - } - if (vargv.push_back(t)) - { - free(t); - return NULL; - } - } - - char *tmp = new char[strlen(src)+1]; - if (tmp == NULL) - { - for(size_t i = 0; i < vargv.size(); i++) - free(vargv[i]); - errno = ENOMEM; - return NULL; - } - char *dst = tmp; - const char *end = src + strlen(src); - /* Copy characters from src to destination, while compacting them - * so that all whitespace is compacted and replaced by a NUL-byte. - * At the same time, add pointers to strings in the vargv vector. - * When whitespace is detected, the characters '"' and '\' are honored, - * to make it possible to give arguments containing whitespace. - * The semantics of '"' and '\' match that of most Unix shells. - */ - while(src < end && *src) { - /* Skip initial whitespace */ - while(src < end && *src && iswhite(*src)) - src++; - - char *begin = dst; - while(src < end && *src) { - /* Handle '"' quotation */ - if(*src == '"') { - src++; - while(src < end && *src && *src != '"') { - if(*src == '\\') - src++; - *dst++ = *src++; - } - src++; - if(src >= end) - goto end; - } - - /* Handle '\' */ - if(*src == '\\') - src++; - else if(iswhite(*src)) - break; - - /* Actually copy characters */ - *dst++ = *src++; - } - - /* Make sure the string is properly terminated */ - *dst++ = '\0'; - src++; - - { - char *t = strdup(begin); - if (t == NULL) - { - delete[] tmp; - for(size_t i = 0; i < vargv.size(); i++) - free(vargv[i]); - errno = ENOMEM; - return NULL; - } - if (vargv.push_back(t)) - { - free(t); - delete[] tmp; - for(size_t i = 0; i < vargv.size(); i++) - free(vargv[i]); - return NULL; - } - } - } - end: - - delete[] tmp; - if (vargv.push_back(NULL)) - { - for(size_t i = 0; i < vargv.size(); i++) - free(vargv[i]); - return NULL; - } - - /* Convert the C++ Vector into a C-vector of strings, suitable for - * calling execv(). - */ - char **argv = (char **)malloc(sizeof(*argv) * (vargv.size())); - if(argv == NULL) - { - for(size_t i = 0; i < vargv.size(); i++) - free(vargv[i]); - errno = ENOMEM; - return NULL; - } - - for(size_t i = 0; i < vargv.size(); i++){ - argv[i] = vargv[i]; - } - - return argv; -} - -BaseString& -BaseString::trim(const char * delim){ - trim(m_chr, delim); - m_len = strlen(m_chr); - return * this; -} - -char* -BaseString::trim(char * str, const char * delim){ - int len = strlen(str) - 1; - for(; len > 0 && strchr(delim, str[len]); len--); - - int pos = 0; - for(; pos <= len && strchr(delim, str[pos]); pos++); - - if(pos > len){ - str[0] = 0; - return 0; - } else { - memmove(str, &str[pos], len - pos + 1); - str[len-pos+1] = 0; - } - - return str; -} - -int -BaseString::vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - return(basestring_vsnprintf(str, size, format, ap)); -} - -int -BaseString::snprintf(char *str, size_t size, const char *format, ...) -{ - va_list ap; - va_start(ap, format); - int ret= basestring_vsnprintf(str, size, format, ap); - va_end(ap); - return(ret); -} - - -#ifdef TEST_BASE_STRING - -/* -g++ -g -Wall -o tbs -DTEST_BASE_STRING -I$NDB_TOP/include/util \ - -I$NDB_TOP/include/portlib BaseString.cpp -valgrind ./tbs -*/ - -int main() -{ - BaseString s("abc"); - BaseString t(s); - s.assign("def"); - t.append("123"); - assert(s == "def"); - assert(t == "abc123"); - s.assign(""); - t.assign(""); - for (unsigned i = 0; i < 1000; i++) { - s.append("xyz"); - t.assign(s); - assert(strlen(t.c_str()) % 3 == 0); - } - - { - BaseString s(":123:abc:;:foo:"); - Vector v; - assert(s.split(v, ":;") == 7); - - assert(v[0] == ""); - assert(v[1] == "123"); - assert(v[2] == "abc"); - assert(v[3] == ""); - assert(v[4] == ""); - assert(v[5] == "foo"); - assert(v[6] == ""); - } - - { - BaseString s(":123:abc:foo:bar"); - Vector v; - assert(s.split(v, ":;", 4) == 4); - - assert(v[0] == ""); - assert(v[1] == "123"); - assert(v[2] == "abc"); - assert(v[3] == "foo:bar"); - - BaseString n; - n.append(v, "()"); - assert(n == "()123()abc()foo:bar"); - n = ""; - n.append(v); - assert(n == " 123 abc foo:bar"); - } - - { - assert(BaseString("hamburger").substr(4,2) == ""); - assert(BaseString("hamburger").substr(3) == "burger"); - assert(BaseString("hamburger").substr(4,8) == "urge"); - assert(BaseString("smiles").substr(1,5) == "mile"); - assert(BaseString("012345").indexOf('2') == 2); - assert(BaseString("hej").indexOf('X') == -1); - } - - { - assert(BaseString(" 1").trim(" ") == "1"); - assert(BaseString("1 ").trim(" ") == "1"); - assert(BaseString(" 1 ").trim(" ") == "1"); - assert(BaseString("abc\t\n\r kalleabc\t\r\n").trim("abc\t\r\n ") == "kalle"); - assert(BaseString(" ").trim(" ") == ""); - } - return 0; -} - -#endif - -template class Vector; -template class Vector; diff --git a/storage/ndb/src/common/util/Bitmask.cpp b/storage/ndb/src/common/util/Bitmask.cpp deleted file mode 100644 index 7a73276b0a9..00000000000 --- a/storage/ndb/src/common/util/Bitmask.cpp +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright (C) 2004-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include - -void -BitmaskImpl::getFieldImpl(const Uint32 src[], - unsigned shiftL, unsigned len, Uint32 dst[]) -{ - /* Copy whole words of src to dst, shifting src left - * by shiftL. Undefined bits of the last written dst word - * should be zeroed. - */ - assert(shiftL < 32); - - unsigned shiftR = 32 - shiftL; - unsigned undefined = shiftL ? ~0 : 0; - - /* Merge first word with previously set bits if there's a shift */ - * dst = shiftL ? * dst : 0; - - /* Treat the zero-shift case separately to avoid - * trampling or reading past the end of src - */ - if (shiftL == 0) - { - while(len >= 32) - { - * dst++ = * src++; - len -=32; - } - - if (len != 0) - { - /* Last word has some bits set */ - Uint32 mask= ((1 << len) -1); // 0000111 - * dst = (* src) & mask; - } - } - else // shiftL !=0, need to build each word from two words shifted - { - while(len >= 32) - { - * dst++ |= (* src) << shiftL; - * dst = ((* src++) >> shiftR) & undefined; - len -= 32; - } - - /* Have space for shiftR more bits in the current dst word - * is that enough? - */ - if(len <= shiftR) - { - /* Fit the remaining bits in the current dst word */ - * dst |= ((* src) & ((1 << len) - 1)) << shiftL; - } - else - { - /* Need to write to two dst words */ - * dst++ |= ((* src) << shiftL); - * dst = ((* src) >> shiftR) & ((1 << (len - shiftR)) - 1) & undefined; - } - } -} - -void -BitmaskImpl::setFieldImpl(Uint32 dst[], - unsigned shiftL, unsigned len, const Uint32 src[]) -{ - /** - * - * abcd ef00 - * 00ab cdef - */ - assert(shiftL < 32); - unsigned shiftR = 32 - shiftL; - unsigned undefined = shiftL ? ~0 : 0; - while(len >= 32) - { - * dst = (* src++) >> shiftL; - * dst++ |= ((* src) << shiftR) & undefined; - len -= 32; - } - - /* Copy last bits */ - Uint32 mask = ((1 << len) -1); - * dst = (* dst & ~mask); - if(len <= shiftR) - { - /* Remaining bits fit in current word */ - * dst |= ((* src++) >> shiftL) & mask; - } - else - { - /* Remaining bits update 2 words */ - * dst |= ((* src++) >> shiftL); - * dst |= ((* src) & ((1 << (len - shiftR)) - 1)) << shiftR ; - } -} - -/* Bitmask testcase code moved from here to - * storage/ndb/test/ndbapi/testBitfield.cpp - * to get coverage from automated testing - */ diff --git a/storage/ndb/src/common/util/ConfigValues.cpp b/storage/ndb/src/common/util/ConfigValues.cpp deleted file mode 100644 index 97ce4cccb5b..00000000000 --- a/storage/ndb/src/common/util/ConfigValues.cpp +++ /dev/null @@ -1,803 +0,0 @@ -/* Copyright (C) 2004, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include -#include -#include - -static bool findKey(const Uint32 * vals, Uint32 sz, Uint32 key, Uint32 * pos); - -/** - * Key - * - * t = Type - 4 bits 0-15 - * s = Section - 14 bits 0-16383 - * k = Key value - 14 bits 0-16383 - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * kkkkkkkkkkkkkkssssssssssssssoooo - */ -#define KP_TYPE_MASK (15) -#define KP_TYPE_SHIFT (28) -#define KP_SECTION_MASK (0x3FFF) -#define KP_SECTION_SHIFT (14) -#define KP_KEYVAL_MASK (0x3FFF) -#define KP_KEYVAL_SHIFT (0) -#define KP_MASK (0x0FFFFFFF) - -static const Uint32 CFV_KEY_PARENT = (KP_KEYVAL_MASK - 1); -static const Uint32 CFV_KEY_FREE = ~0; - -static const char Magic[] = { 'N', 'D', 'B', 'C', 'O', 'N', 'F', 'V' }; - -//#define DEBUG_CV -#ifdef DEBUG_CV -#define DEBUG if(getenv("CV_DEBUG")) -#else -#define DEBUG if(0) -#endif - -inline -ConfigValues::ValueType -getTypeOf(Uint32 k) { - return (ConfigValues::ValueType)((k >> KP_TYPE_SHIFT) & KP_TYPE_MASK); -} - -ConfigValues::ConfigValues(Uint32 sz, Uint32 dsz){ - m_size = sz; - m_dataSize = dsz; - m_stringCount = 0; - m_int64Count = 0; - for(Uint32 i = 0; im_key = key; - return m_cfg.getByPos(pos, result); -} - -bool -ConfigValues::getByPos(Uint32 pos, Entry * result) const { - assert(pos < (2 * m_size)); - Uint32 keypart = m_values[pos]; - Uint32 val2 = m_values[pos+1]; - - switch(::getTypeOf(keypart)){ - case IntType: - case SectionType: - result->m_int = val2; - break; - case StringType: - result->m_string = * getString(val2); - break; - case Int64Type: - result->m_int64 = * get64(val2); - break; - case InvalidType: - default: - return false; - } - - result->m_type = ::getTypeOf(keypart); - - return true; -} - -Uint64 * -ConfigValues::get64(Uint32 index) const { - assert(index < m_int64Count); - const Uint32 * data = m_values + (m_size << 1); - Uint64 * ptr = (Uint64*)data; - ptr += index; - return ptr; -} - -char ** -ConfigValues::getString(Uint32 index) const { - assert(index < m_stringCount); - const Uint32 * data = m_values + (m_size << 1); - char * ptr = (char*)data; - ptr += m_dataSize; - ptr -= (index * sizeof(char *)); - return (char**)ptr; -} - -bool -ConfigValues::ConstIterator::openSection(Uint32 key, Uint32 no){ - Uint32 curr = m_currentSection; - - Entry tmp; - if(get(key, &tmp) && tmp.m_type == SectionType){ - m_currentSection = tmp.m_int; - if(get(no, &tmp) && tmp.m_type == IntType){ - m_currentSection = tmp.m_int; - /** - * Validate - */ - if(get(CFV_KEY_PARENT, &tmp)){ - return true; - } - } - } - - m_currentSection = curr; - return false; -} - -bool -ConfigValues::ConstIterator::closeSection() { - - Entry tmp; - if(get(CFV_KEY_PARENT, &tmp) && tmp.m_type == IntType){ - m_currentSection = tmp.m_int; - return true; - } - - return false; -} - -bool -ConfigValues::Iterator::set(Uint32 key, Uint32 value){ - Uint32 pos; - if(!findKey(m_cfg.m_values, m_cfg.m_size, key | m_currentSection, &pos)){ - return false; - } - - if(::getTypeOf(m_cfg.m_values[pos]) != IntType){ - return false; - } - - m_cfg.m_values[pos+1] = value; - return true; -} - -bool -ConfigValues::Iterator::set(Uint32 key, Uint64 value){ - Uint32 pos; - if(!findKey(m_cfg.m_values, m_cfg.m_size, key | m_currentSection, &pos)){ - return false; - } - - if(::getTypeOf(m_cfg.m_values[pos]) != Int64Type){ - return false; - } - - * m_cfg.get64(m_cfg.m_values[pos+1]) = value; - return true; -} - -bool -ConfigValues::Iterator::set(Uint32 key, const char * value){ - Uint32 pos; - if(!findKey(m_cfg.m_values, m_cfg.m_size, key | m_currentSection, &pos)){ - return false; - } - - if(::getTypeOf(m_cfg.m_values[pos]) != StringType){ - return false; - } - - char ** str = m_cfg.getString(m_cfg.m_values[pos+1]); - free(* str); - * str = strdup(value ? value : ""); - return true; -} - -static -bool -findKey(const Uint32 * values, Uint32 sz, Uint32 key, Uint32 * _pos){ - Uint32 lo = 0; - Uint32 hi = sz; - Uint32 pos = (hi + lo) >> 1; - - DEBUG printf("findKey(H'%.8x %d)", key, sz); - - if (sz == 0) - { - DEBUG ndbout_c(" -> false, 0"); - * _pos = 0; - return false; - } - - Uint32 val = 0; - Uint32 oldpos = pos + 1; - while (pos != oldpos) - { - DEBUG printf(" [ %d %d %d ] ", lo, pos, hi); - assert(pos < hi); - assert(pos >= lo); - val = values[2*pos] & KP_MASK; - if (key > val) - { - lo = pos; - } - else if (key < val) - { - hi = pos; - } - else - { - * _pos = 2*pos; - DEBUG ndbout_c(" -> true, %d", pos); - return true; - } - oldpos = pos; - pos = (hi + lo) >> 1; - } - - DEBUG printf(" pos: %d (key %.8x val: %.8x values[pos]: %x) key>val: %d ", - pos, key, val, values[2*pos] & KP_MASK, - key > val); - - pos += (key > val) ? 1 : 0; - - * _pos = 2*pos; - DEBUG ndbout_c(" -> false, %d", pos); - return false; -} - - -ConfigValuesFactory::ConfigValuesFactory(Uint32 keys, Uint32 data){ - m_sectionCounter = (1 << KP_SECTION_SHIFT); - m_freeKeys = keys; - m_freeData = (data + 7) & ~7; - m_currentSection = 0; - m_cfg = create(m_freeKeys, m_freeData); -} - -ConfigValuesFactory::ConfigValuesFactory(ConfigValues * cfg){ - m_cfg = cfg; - m_freeKeys = 0; - m_freeData = m_cfg->m_dataSize; - m_sectionCounter = (1 << KP_SECTION_SHIFT); - m_currentSection = 0; - const Uint32 sz = 2 * m_cfg->m_size; - for(Uint32 i = 0; im_values[i]; - if(key == CFV_KEY_FREE){ - m_freeKeys++; - } else { - switch(::getTypeOf(key)){ - case ConfigValues::IntType: - case ConfigValues::SectionType: - break; - case ConfigValues::Int64Type: - m_freeData -= sizeof(Uint64); - break; - case ConfigValues::StringType: - m_freeData -= sizeof(char *); - break; - case ConfigValues::InvalidType: - abort(); - } - Uint32 sec = key & (KP_SECTION_MASK << KP_SECTION_SHIFT); - m_sectionCounter = (sec > m_sectionCounter ? sec : m_sectionCounter); - } - } -} - -ConfigValuesFactory::~ConfigValuesFactory() -{ - if(m_cfg) - free(m_cfg); -} - -ConfigValues * -ConfigValuesFactory::create(Uint32 keys, Uint32 data){ - Uint32 sz = sizeof(ConfigValues); - sz += (2 * keys * sizeof(Uint32)); - sz += data; - - void * tmp = malloc(sz); - return new (tmp) ConfigValues(keys, data); -} - -void -ConfigValuesFactory::expand(Uint32 fk, Uint32 fs){ - if(m_freeKeys >= fk && m_freeData >= fs){ - return ; - } - - DEBUG printf("[ fk fd ] : [ %d %d ]", m_freeKeys, m_freeData); - - m_freeKeys = (m_freeKeys >= fk ? m_cfg->m_size : fk + m_cfg->m_size); - m_freeData = (m_freeData >= fs ? m_cfg->m_dataSize : fs + m_cfg->m_dataSize); - m_freeData = (m_freeData + 7) & ~7; - - DEBUG ndbout_c(" [ %d %d ]", m_freeKeys, m_freeData); - - ConfigValues * m_tmp = m_cfg; - m_cfg = create(m_freeKeys, m_freeData); - put(* m_tmp); - m_tmp->~ConfigValues(); - free(m_tmp); -} - -void -ConfigValuesFactory::shrink(){ - if(m_freeKeys == 0 && m_freeData == 0){ - return ; - } - - m_freeKeys = m_cfg->m_size - m_freeKeys; - m_freeData = m_cfg->m_dataSize - m_freeData; - m_freeData = (m_freeData + 7) & ~7; - - ConfigValues * m_tmp = m_cfg; - m_cfg = create(m_freeKeys, m_freeData); - put(* m_tmp); - m_tmp->~ConfigValues(); - free(m_tmp); -} - -bool -ConfigValuesFactory::openSection(Uint32 key, Uint32 no){ - ConfigValues::Entry tmp; - const Uint32 parent = m_currentSection; - - ConfigValues::ConstIterator iter(* m_cfg); - iter.m_currentSection = m_currentSection; - if(!iter.get(key, &tmp)){ - - tmp.m_key = key; - tmp.m_type = ConfigValues::SectionType; - tmp.m_int = m_sectionCounter; - m_sectionCounter += (1 << KP_SECTION_SHIFT); - - if(!put(tmp)){ - return false; - } - } - - if(tmp.m_type != ConfigValues::SectionType){ - return false; - } - - m_currentSection = tmp.m_int; - - tmp.m_key = no; - tmp.m_type = ConfigValues::IntType; - tmp.m_int = m_sectionCounter; - if(!put(tmp)){ - m_currentSection = parent; - return false; - } - m_sectionCounter += (1 << KP_SECTION_SHIFT); - - m_currentSection = tmp.m_int; - tmp.m_type = ConfigValues::IntType; - tmp.m_key = CFV_KEY_PARENT; - tmp.m_int = parent; - if(!put(tmp)){ - m_currentSection = parent; - return false; - } - - return true; -} - -bool -ConfigValuesFactory::closeSection(){ - ConfigValues::ConstIterator iter(* m_cfg); - iter.m_currentSection = m_currentSection; - const bool b = iter.closeSection(); - m_currentSection = iter.m_currentSection; - return b; -} - -bool -ConfigValuesFactory::put(const ConfigValues::Entry & entry){ - - if(m_freeKeys == 0 || - (entry.m_type == ConfigValues::StringType && m_freeData < sizeof(char *)) - || (entry.m_type == ConfigValues::Int64Type && m_freeData < 8 )){ - - DEBUG ndbout_c("m_freeKeys = %d, m_freeData = %d -> expand", - m_freeKeys, m_freeData); - - expand(31, 20); - } - - const Uint32 tmp = entry.m_key | m_currentSection; - const Uint32 sz = m_cfg->m_size - m_freeKeys; - - Uint32 pos; - if (findKey(m_cfg->m_values, sz, tmp, &pos)) - { - DEBUG ndbout_c("key %x already found at pos: %d", tmp, pos); - return false; - } - - DEBUG { - printf("H'before "); - Uint32 prev = 0; - for (Uint32 i = 0; im_values[2*i] & KP_MASK; - ndbout_c("%.8x", val); - assert(val >= prev); - prev = val; - } - } - - if (pos != 2*sz) - { - DEBUG ndbout_c("pos: %d sz: %d", pos, sz); - memmove(m_cfg->m_values + pos + 2, m_cfg->m_values + pos, - 4 * (2*sz - pos)); - } - - - Uint32 key = tmp; - key |= (entry.m_type << KP_TYPE_SHIFT); - m_cfg->m_values[pos] = key; - - DEBUG { - printf("H'after "); - Uint32 prev = 0; - for (Uint32 i = 0; i<=sz; i++) - { - Uint32 val = m_cfg->m_values[2*i] & KP_MASK; - ndbout_c("%.8x", val); - assert(val >= prev); - prev = val; - } - } - - switch(entry.m_type){ - case ConfigValues::IntType: - case ConfigValues::SectionType: - m_cfg->m_values[pos+1] = entry.m_int; - m_freeKeys--; - DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value: %d\n", - pos, sz, 0, - (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, - entry.m_int); - return true; - case ConfigValues::StringType:{ - Uint32 index = m_cfg->m_stringCount++; - m_cfg->m_values[pos+1] = index; - char ** ref = m_cfg->getString(index); - * ref = strdup(entry.m_string ? entry.m_string : ""); - m_freeKeys--; - m_freeData -= sizeof(char *); - DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value(%d): %s\n", - pos, sz, 0, - (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, - index, - entry.m_string); - return true; - } - case ConfigValues::Int64Type:{ - Uint32 index = m_cfg->m_int64Count++; - m_cfg->m_values[pos+1] = index; - * m_cfg->get64(index) = entry.m_int64; - m_freeKeys--; - m_freeData -= 8; - DEBUG printf("Putting at: %d(%d) (loop = %d) key: %d value64(%d): %lld\n", - pos, sz, 0, - (key >> KP_KEYVAL_SHIFT) & KP_KEYVAL_MASK, - index, - entry.m_int64); - return true; - } - case ConfigValues::InvalidType: - default: - return false; - } - return false; -} - -void -ConfigValuesFactory::put(const ConfigValues & cfg){ - - Uint32 curr = m_currentSection; - m_currentSection = 0; - - ConfigValues::Entry tmp; - for(Uint32 i = 0; i < 2 * cfg.m_size; i += 2){ - if(cfg.m_values[i] != CFV_KEY_FREE){ - tmp.m_key = cfg.m_values[i]; - cfg.getByPos(i, &tmp); - put(tmp); - } - } - - m_currentSection = curr; -} - -ConfigValues * -ConfigValuesFactory::extractCurrentSection(const ConfigValues::ConstIterator & cfg){ - ConfigValuesFactory * fac = new ConfigValuesFactory(20, 20); - Uint32 curr = cfg.m_currentSection; - - ConfigValues::Entry tmp; - for(Uint32 i = 0; i < 2 * cfg.m_cfg.m_size; i += 2){ - Uint32 keypart = cfg.m_cfg.m_values[i]; - const Uint32 sec = keypart & (KP_SECTION_MASK << KP_SECTION_SHIFT); - const Uint32 key = keypart & KP_KEYVAL_MASK; - if(sec == curr && key != CFV_KEY_PARENT){ - tmp.m_key = cfg.m_cfg.m_values[i]; - cfg.m_cfg.getByPos(i, &tmp); - tmp.m_key = key; - fac->put(tmp); - } - } - - ConfigValues * ret = fac->getConfigValues(); - delete fac; - return ret; -} - -ConfigValues * -ConfigValuesFactory::getConfigValues(){ - ConfigValues * ret = m_cfg; - m_cfg = create(10, 10); - return ret; -} - -static int -mod4(unsigned int i){ - int res = i + (4 - (i % 4)); - return res; -} - -Uint32 -ConfigValues::getPackedSize() const { - - Uint32 size = 0; - for(Uint32 i = 0; i < 2 * m_size; i += 2){ - Uint32 key = m_values[i]; - if(key != CFV_KEY_FREE){ - switch(::getTypeOf(key)){ - case IntType: - case SectionType: - size += 8; - break; - case Int64Type: - size += 12; - break; - case StringType: - size += 8; // key + len - size += mod4(strlen(* getString(m_values[i+1])) + 1); - break; - case InvalidType: - default: - abort(); - } - } - } - - return size + sizeof(Magic) + 4; // checksum also -} - -Uint32 -ConfigValues::pack(void * _dst, Uint32 _len) const { - Uint32 i; - char * dst = (char*)_dst; - memcpy(dst, Magic, sizeof(Magic)); dst += sizeof(Magic); - - for(i = 0; i < 2 * m_size; i += 2){ - Uint32 key = m_values[i]; - Uint32 val = m_values[i+1]; - if(key != CFV_KEY_FREE){ - switch(::getTypeOf(key)){ - case IntType: - case SectionType: - * (Uint32*)dst = htonl(key); dst += 4; - * (Uint32*)dst = htonl(val); dst += 4; - break; - case Int64Type:{ - Uint64 i64 = * get64(val); - Uint32 hi = (i64 >> 32); - Uint32 lo = (i64 & 0xFFFFFFFF); - * (Uint32*)dst = htonl(key); dst += 4; - * (Uint32*)dst = htonl(hi); dst += 4; - * (Uint32*)dst = htonl(lo); dst += 4; - } - break; - case StringType:{ - const char * str = * getString(val); - Uint32 len = strlen(str) + 1; - * (Uint32*)dst = htonl(key); dst += 4; - * (Uint32*)dst = htonl(len); dst += 4; - memcpy(dst, str, len); - memset(dst+len, 0, mod4(len) - len); - dst += mod4(len); - } - break; - case InvalidType: - default: - abort(); - } - } - } - - const Uint32 * sum = (Uint32*)_dst; - const Uint32 len = ((Uint32*)dst) - sum; - Uint32 chk = 0; - for(i = 0; i> 2); - const Uint32 * tmp = (const Uint32*)_src; - Uint32 chk = 0; - for(Uint32 i = 0; (i+1) 4){ - Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4; - keys++; - switch(::getTypeOf(tmp)){ - case ConfigValues::IntType: - case ConfigValues::SectionType: - src += 4; - break; - case ConfigValues::Int64Type: - src += 8; - data += 8; - break; - case ConfigValues::StringType:{ - Uint32 s_len = ntohl(* (const Uint32 *)src); - src += 4 + mod4(s_len); - data += sizeof(char*); - break; - } - default: - break; - } - } - expand(keys, data); - } - - src = save; - ConfigValues::Entry entry; - while(end - src > 4){ - Uint32 tmp = ntohl(* (const Uint32 *)src); src += 4; - entry.m_key = tmp & KP_MASK; - entry.m_type = ::getTypeOf(tmp); - switch(entry.m_type){ - case ConfigValues::IntType: - case ConfigValues::SectionType: - entry.m_int = ntohl(* (const Uint32 *)src); src += 4; - break; - case ConfigValues::Int64Type:{ - Uint64 hi = ntohl(* (const Uint32 *)src); src += 4; - Uint64 lo = ntohl(* (const Uint32 *)src); src += 4; - entry.m_int64 = (hi <<32) | lo; - } - break; - case ConfigValues::StringType:{ - Uint32 s_len = ntohl(* (const Uint32 *)src); src += 4; - size_t s_len2 = strlen((const char*)src); - if(s_len2 + 1 != s_len){ - DEBUG abort(); - return false; - } - - entry.m_string = (const char*)src; src+= mod4(s_len); - } - break; - case ConfigValues::InvalidType: - default: - DEBUG abort(); - return false; - } - if(!put(entry)){ - DEBUG abort(); - return false; - } - } - if(src != end){ - DEBUG abort(); - return false; - } - return true; -} - -#ifdef __TEST_CV_HASH_HPP - -int -main(void){ - srand(time(0)); - for(int t = 0; t<100; t++){ - const size_t len = directory(rand() % 1000); - - printf("size = %d\n", len); - unsigned * buf = new unsigned[len]; - for(size_t key = 0; key 0) - printf("size=%d key=%d pos(%d)=%d buf[%d]=%d\n", len, key, j, pos, k, buf[k]); - unique ++; - } - } - if(unique > 1){ - printf("key = %d size = %d not uniqe!!\n", key, len); - for(size_t k = 0; k - -#include - -#include -#include - -// -// PUBLIC -// -time_t -File_class::mtime(const char* aFileName) -{ - MY_STAT stmp; - time_t rc = 0; - - if (my_stat(aFileName, &stmp, MYF(0)) != NULL) { - rc = stmp.st_mtime; - } - - return rc; -} - -bool -File_class::exists(const char* aFileName) -{ - MY_STAT stmp; - - return (my_stat(aFileName, &stmp, MYF(0))!=NULL); -} - -off_t -File_class::size(FILE* f) -{ - MY_STAT s; - - // Note that my_fstat behaves *differently* than my_stat. ARGGGHH! - if (my_fstat(fileno(f), &s, MYF(0))) - return 0; - - return s.st_size; -} - -bool -File_class::rename(const char* currFileName, const char* newFileName) -{ - return ::rename(currFileName, newFileName) == 0 ? true : false; -} -bool -File_class::remove(const char* aFileName) -{ - return ::remove(aFileName) == 0 ? true : false; -} - -File_class::File_class() : - m_file(NULL), - m_fileMode("r") -{ -} - -File_class::File_class(const char* aFileName, const char* mode) : - m_file(NULL), - m_fileMode(mode) -{ - BaseString::snprintf(m_fileName, PATH_MAX, aFileName); -} - -bool -File_class::open() -{ - return open(m_fileName, m_fileMode); -} - -bool -File_class::open(const char* aFileName, const char* mode) -{ - if(m_fileName != aFileName){ - /** - * Only copy if it's not the same string - */ - BaseString::snprintf(m_fileName, PATH_MAX, aFileName); - } - m_fileMode = mode; - bool rc = true; - if ((m_file = ::fopen(m_fileName, m_fileMode))== NULL) - { - rc = false; - } - - return rc; -} -File_class::~File_class() -{ - close(); -} - -bool -File_class::remove() -{ - // Close the file first! - close(); - return File_class::remove(m_fileName); -} - -bool -File_class::close() -{ - bool rc = true; - int retval = 0; - - if (m_file != NULL) - { - ::fflush(m_file); - retval = ::fclose(m_file); - while ( (retval != 0) && (errno == EINTR) ){ - retval = ::fclose(m_file); - } - if( retval == 0){ - rc = true; - } - else { - rc = false; - ndbout_c("ERROR: Close file error in File.cpp for %s",strerror(errno)); - } - } - m_file = NULL; - - return rc; -} - -int -File_class::read(void* buf, size_t itemSize, size_t nitems) const -{ - return ::fread(buf, itemSize, nitems, m_file); -} - -int -File_class::readChar(char* buf, long start, long length) const -{ - return ::fread((void*)&buf[start], 1, length, m_file); -} - -int -File_class::readChar(char* buf) -{ - return readChar(buf, 0, strlen(buf)); -} - -int -File_class::write(const void* buf, size_t size_arg, size_t nitems) -{ - return ::fwrite(buf, size_arg, nitems, m_file); -} - -int -File_class::writeChar(const char* buf, long start, long length) -{ - return ::fwrite((const void*)&buf[start], sizeof(char), length, m_file); -} - -int -File_class::writeChar(const char* buf) -{ - return writeChar(buf, 0, ::strlen(buf)); -} - -off_t -File_class::size() const -{ - return File_class::size(m_file); -} - -const char* -File_class::getName() const -{ - return m_fileName; -} - -int -File_class::flush() const -{ - return ::fflush(m_file);; -} diff --git a/storage/ndb/src/common/util/InputStream.cpp b/storage/ndb/src/common/util/InputStream.cpp deleted file mode 100644 index 2033cc6fb0c..00000000000 --- a/storage/ndb/src/common/util/InputStream.cpp +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include "InputStream.hpp" -#include - -FileInputStream Stdin(stdin); - -FileInputStream::FileInputStream(FILE * file) - : f(file) { -} - -char* -FileInputStream::gets(char * buf, int bufLen){ - if(!feof(f)){ - return fgets(buf, bufLen, f); - } - return 0; -} - -SocketInputStream::SocketInputStream(NDB_SOCKET_TYPE socket, - unsigned read_timeout_ms) - : m_socket(socket) { - m_startover= true; - m_timeout_remain= m_timeout_ms = read_timeout_ms; - - m_timedout= false; -} - -char* -SocketInputStream::gets(char * buf, int bufLen) { - if(timedout()) - return 0; - assert(bufLen >= 2); - int offset= 0; - if(m_startover) - { - buf[0]= '\0'; - m_startover= false; - } - else - offset= strlen(buf); - - int time= 0; - int res = readln_socket(m_socket, m_timeout_remain, &time, - buf+offset, bufLen-offset, m_mutex); - - if(res >= 0) - m_timeout_remain-=time; - if(res == 0 || m_timeout_remain<=0) - { - m_timedout= true; - buf[0]=0; - return buf; - } - - m_startover= true; - - if(res == -1) - { - return 0; - } - - return buf; -} diff --git a/storage/ndb/src/common/util/Makefile.am b/storage/ndb/src/common/util/Makefile.am deleted file mode 100644 index 4c469187645..00000000000 --- a/storage/ndb/src/common/util/Makefile.am +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -noinst_LTLIBRARIES = libgeneral.la - -libgeneral_la_SOURCES = \ - File.cpp md5_hash.cpp Properties.cpp socket_io.cpp \ - SimpleProperties.cpp Parser.cpp InputStream.cpp \ - SocketServer.cpp SocketClient.cpp SocketAuthenticator.cpp\ - OutputStream.cpp NdbOut.cpp BaseString.cpp \ - NdbSqlUtil.cpp new.cpp \ - uucode.c random.c version.c \ - strdup.c \ - ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \ - Bitmask.cpp \ - ndb_rand.c - -EXTRA_PROGRAMS = testBitmask -testBitmask_SOURCES = testBitmask.cpp -testBitmask_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a - -testBitmask.cpp : Bitmask.cpp - rm -f testBitmask.cpp - @LN_CP_F@ Bitmask.cpp testBitmask.cpp - -testBitmask.o: $(testBitmask_SOURCES) - $(CXXCOMPILE) -c $(INCLUDES) -D__TEST_BITMASK__ $< - - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_util.mk.am - -windoze-dsp: libgeneral.dsp - -libgeneral.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-lib.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-lib.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(noinst_LTLIBRARIES) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(libgeneral_la_SOURCES) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LIB $(LDADD) diff --git a/storage/ndb/src/common/util/NdbOut.cpp b/storage/ndb/src/common/util/NdbOut.cpp deleted file mode 100644 index 8c6f94cec6c..00000000000 --- a/storage/ndb/src/common/util/NdbOut.cpp +++ /dev/null @@ -1,173 +0,0 @@ -/* Copyright (c) 2003-2005, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include -#include - -static FileOutputStream ndbouts_fileoutputstream(stdout); -NdbOut ndbout(ndbouts_fileoutputstream); - -static const char * fms[] = { - "%d", "0x%02x", // Int8 - "%u", "0x%02x", // Uint8 - "%d", "0x%04x", // Int16 - "%u", "0x%04x", // Uint16 - "%d", "0x%08x", // Int32 - "%u", "0x%08x", // Uint32 - "%lld", "0x%016llx", // Int64 - "%llu", "0x%016llx", // Uint64 - "%llu", "0x%016llx" // UintPtr -}; - -NdbOut& -NdbOut::operator<<(Int8 v) { m_out->print(fms[0+isHex],(int)v); return *this;} -NdbOut& -NdbOut::operator<<(Uint8 v) { m_out->print(fms[2+isHex],(int)v); return *this;} -NdbOut& -NdbOut::operator<<(Int16 v) { m_out->print(fms[4+isHex],(int)v); return *this;} -NdbOut& -NdbOut::operator<<(Uint16 v) { m_out->print(fms[6+isHex],(int)v); return *this;} -NdbOut& -NdbOut::operator<<(Int32 v) { m_out->print(fms[8+isHex], v); return *this;} -NdbOut& -NdbOut::operator<<(Uint32 v) { m_out->print(fms[10+isHex], v); return *this;} -NdbOut& -NdbOut::operator<<(Int64 v) { m_out->print(fms[12+isHex], v); return *this;} -NdbOut& -NdbOut::operator<<(Uint64 v) { m_out->print(fms[14+isHex], v); return *this;} -NdbOut& -NdbOut::operator<<(unsigned long int v) { return *this << (Uint64) v; } - -NdbOut& -NdbOut::operator<<(const char* val){ m_out->print("%s", val ? val : "(null)"); return * this; } -NdbOut& -NdbOut::operator<<(const void* val){ m_out->print("%p", val); return * this; } -NdbOut& -NdbOut::operator<<(BaseString &val){ return *this << val.c_str(); } - -NdbOut& -NdbOut::operator<<(float val){ m_out->print("%f", (double)val); return * this;} -NdbOut& -NdbOut::operator<<(double val){ m_out->print("%f", val); return * this; } - -NdbOut& NdbOut::endline() -{ - isHex = 0; // Reset hex to normal, if user forgot this - m_out->println(""); - m_out->flush(); - return *this; -} - -NdbOut& NdbOut::flushline() -{ - m_out->flush(); - return *this; -} - -NdbOut& NdbOut::setHexFormat(int _format) -{ - isHex = (_format == 0 ? 0 : 1); - return *this; -} - -NdbOut::NdbOut(OutputStream & out) - : m_out(& out) -{ - isHex = 0; -} - -NdbOut::~NdbOut() -{ -} - -void -NdbOut::print(const char * fmt, ...){ - va_list ap; - char buf[1000]; - - va_start(ap, fmt); - if (fmt != 0) - BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); - ndbout << buf; - va_end(ap); -} - -void -NdbOut::println(const char * fmt, ...){ - va_list ap; - char buf[1000]; - - va_start(ap, fmt); - if (fmt != 0) - BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); - ndbout << buf << endl; - va_end(ap); -} - -extern "C" -void -ndbout_c(const char * fmt, ...){ - va_list ap; - char buf[1000]; - - va_start(ap, fmt); - if (fmt != 0) - BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap); - ndbout << buf << endl; - va_end(ap); -} - -FilteredNdbOut::FilteredNdbOut(OutputStream & out, - int threshold, int level) - : NdbOut(out) { - m_level = level; - m_threshold = threshold; - m_org = &out; - m_null = new NullOutputStream(); - setLevel(level); -} - -FilteredNdbOut::~FilteredNdbOut(){ - delete m_null; -} - -void -FilteredNdbOut::setLevel(int i){ - m_level = i; - if(m_level >= m_threshold){ - m_out = m_org; - } else { - m_out = m_null; - } -} - -void -FilteredNdbOut::setThreshold(int i){ - m_threshold = i; - setLevel(m_level); -} - -int -FilteredNdbOut::getLevel() const { - return m_level; -} -int -FilteredNdbOut::getThreshold() const { - return m_threshold; -} - diff --git a/storage/ndb/src/common/util/NdbSqlUtil.cpp b/storage/ndb/src/common/util/NdbSqlUtil.cpp deleted file mode 100644 index c50d3a93e3c..00000000000 --- a/storage/ndb/src/common/util/NdbSqlUtil.cpp +++ /dev/null @@ -1,1016 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -/* - * Data types. The entries must be in the numerical order. - */ - -const NdbSqlUtil::Type -NdbSqlUtil::m_typeList[] = { - { // 0 - Type::Undefined, - NULL, - NULL - }, - { // 1 - Type::Tinyint, - cmpTinyint, - NULL - }, - { // 2 - Type::Tinyunsigned, - cmpTinyunsigned, - NULL - }, - { // 3 - Type::Smallint, - cmpSmallint, - NULL - }, - { // 4 - Type::Smallunsigned, - cmpSmallunsigned, - NULL - }, - { // 5 - Type::Mediumint, - cmpMediumint, - NULL - }, - { // 6 - Type::Mediumunsigned, - cmpMediumunsigned, - NULL - }, - { // 7 - Type::Int, - cmpInt, - NULL - }, - { // 8 - Type::Unsigned, - cmpUnsigned, - NULL - }, - { // 9 - Type::Bigint, - cmpBigint, - NULL - }, - { // 10 - Type::Bigunsigned, - cmpBigunsigned, - NULL - }, - { // 11 - Type::Float, - cmpFloat, - NULL - }, - { // 12 - Type::Double, - cmpDouble, - NULL - }, - { // 13 - Type::Olddecimal, - cmpOlddecimal, - NULL - }, - { // 14 - Type::Char, - cmpChar, - likeChar - }, - { // 15 - Type::Varchar, - cmpVarchar, - likeVarchar - }, - { // 16 - Type::Binary, - cmpBinary, - likeBinary - }, - { // 17 - Type::Varbinary, - cmpVarbinary, - likeVarbinary - }, - { // 18 - Type::Datetime, - cmpDatetime, - NULL - }, - { // 19 - Type::Date, - cmpDate, - NULL - }, - { // 20 - Type::Blob, - NULL, - NULL - }, - { // 21 - Type::Text, - NULL, - NULL - }, - { // 22 - Type::Bit, - cmpBit, - NULL - }, - { // 23 - Type::Longvarchar, - cmpLongvarchar, - likeLongvarchar - }, - { // 24 - Type::Longvarbinary, - cmpLongvarbinary, - likeLongvarbinary - }, - { // 25 - Type::Time, - cmpTime, - NULL - }, - { // 26 - Type::Year, - cmpYear, - NULL - }, - { // 27 - Type::Timestamp, - cmpTimestamp, - NULL - }, - { // 28 - Type::Olddecimalunsigned, - cmpOlddecimalunsigned, - NULL - }, - { // 29 - Type::Decimal, - cmpDecimal, - NULL - }, - { // 30 - Type::Decimalunsigned, - cmpDecimalunsigned, - NULL - } -}; - -const NdbSqlUtil::Type& -NdbSqlUtil::getType(Uint32 typeId) -{ - if (typeId < sizeof(m_typeList) / sizeof(m_typeList[0]) && - m_typeList[typeId].m_typeId != Type::Undefined) { - return m_typeList[typeId]; - } - return m_typeList[Type::Undefined]; -} - -const NdbSqlUtil::Type& -NdbSqlUtil::getTypeBinary(Uint32 typeId) -{ - switch (typeId) { - case Type::Char: - case Type::Varchar: - case Type::Binary: - case Type::Varbinary: - case Type::Longvarchar: - case Type::Longvarbinary: - typeId = Type::Binary; - break; - case Type::Text: - typeId = Type::Blob; - break; - default: - break; - } - return getType(typeId); -} - -/* - * Comparison functions. - */ - -int -NdbSqlUtil::cmpTinyint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Int8)) { - Int8 v1, v2; - memcpy(&v1, p1, sizeof(Int8)); - memcpy(&v2, p2, sizeof(Int8)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpTinyunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Uint8)) { - Uint8 v1, v2; - memcpy(&v1, p1, sizeof(Uint8)); - memcpy(&v2, p2, sizeof(Uint8)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpSmallint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Int16)) { - Int16 v1, v2; - memcpy(&v1, p1, sizeof(Int16)); - memcpy(&v2, p2, sizeof(Int16)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpSmallunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Uint16)) { - Uint16 v1, v2; - memcpy(&v1, p1, sizeof(Uint16)); - memcpy(&v2, p2, sizeof(Uint16)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpMediumint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= 3) { - Int32 v1, v2; - v1 = sint3korr((const uchar*)p1); - v2 = sint3korr((const uchar*)p2); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpMediumunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= 3) { - Uint32 v1, v2; - v1 = uint3korr((const uchar*)p1); - v2 = uint3korr((const uchar*)p2); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpInt(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Int32)) { - Int32 v1, v2; - memcpy(&v1, p1, sizeof(Int32)); - memcpy(&v2, p2, sizeof(Int32)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpUnsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Uint32)) { - Uint32 v1, v2; - memcpy(&v1, p1, sizeof(Uint32)); - memcpy(&v2, p2, sizeof(Uint32)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpBigint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Int64)) { - Int64 v1, v2; - memcpy(&v1, p1, sizeof(Int64)); - memcpy(&v2, p2, sizeof(Int64)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpBigunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Uint64)) { - Uint64 v1, v2; - memcpy(&v1, p1, sizeof(Uint64)); - memcpy(&v2, p2, sizeof(Uint64)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpFloat(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(float)) { - float v1, v2; - memcpy(&v1, p1, sizeof(float)); - memcpy(&v2, p2, sizeof(float)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpDouble(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(double)) { - double v1, v2; - memcpy(&v1, p1, sizeof(double)); - memcpy(&v2, p2, sizeof(double)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmp_olddecimal(const uchar* s1, const uchar* s2, unsigned n) -{ - int sgn = +1; - unsigned i = 0; - while (i < n) { - int c1 = s1[i]; - int c2 = s2[i]; - if (c1 == c2) { - if (c1 == '-') - sgn = -1; - } else if (c1 == '-') { - return -1; - } else if (c2 == '-') { - return +1; - } else if (c1 < c2) { - return -1 * sgn; - } else { - return +1 * sgn; - } - i++; - } - return 0; -} - -int -NdbSqlUtil::cmpOlddecimal(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (full) { - assert(n1 == n2); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - return cmp_olddecimal(v1, v2, n1); - } - return CmpUnknown; -} - -int -NdbSqlUtil::cmpOlddecimalunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (full) { - assert(n1 == n2); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - return cmp_olddecimal(v1, v2, n1); - } - return CmpUnknown; -} - -int -NdbSqlUtil::cmpDecimal(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - // compare as binary strings - unsigned n = (n1 <= n2 ? n1 : n2); - int k = memcmp(v1, v2, n); - if (k == 0) { - k = (full ? n1 : n) - n2; - } - return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; -} - -int -NdbSqlUtil::cmpDecimalunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - // compare as binary strings - unsigned n = (n1 <= n2 ? n1 : n2); - int k = memcmp(v1, v2, n); - if (k == 0) { - k = (full ? n1 : n) - n2; - } - return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; -} - -int -NdbSqlUtil::cmpChar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - // collation does not work on prefix for some charsets - assert(full); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - // not const in MySQL - CHARSET_INFO* cs = (CHARSET_INFO*)(info); - // compare with space padding - int k = (*cs->coll->strnncollsp)(cs, v1, n1, v2, n2, false); - return k < 0 ? -1 : k > 0 ? +1 : 0; -} - -int -NdbSqlUtil::cmpVarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const unsigned lb = 1; - // collation does not work on prefix for some charsets - assert(full && n1 >= lb && n2 >= lb); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - unsigned m1 = *v1; - unsigned m2 = *v2; - if (m1 <= n1 - lb && m2 <= n2 - lb) { - CHARSET_INFO* cs = (CHARSET_INFO*)(info); - // compare with space padding - int k = (*cs->coll->strnncollsp)(cs, v1 + lb, m1, v2 + lb, m2, false); - return k < 0 ? -1 : k > 0 ? +1 : 0; - } - // treat bad data as NULL - if (m1 > n1 - lb && m2 <= n2 - lb) - return -1; - if (m1 <= n1 - lb && m2 > n2 - lb) - return +1; - return 0; -} - -int -NdbSqlUtil::cmpBinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - // compare as binary strings - unsigned n = (n1 <= n2 ? n1 : n2); - int k = memcmp(v1, v2, n); - if (k == 0) { - k = (full ? n1 : n) - n2; - } - return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; -} - -int -NdbSqlUtil::cmpVarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const unsigned lb = 1; - if (n2 >= lb) { - assert(n1 >= lb); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - unsigned m1 = *v1; - unsigned m2 = *v2; - if (m1 <= n1 - lb && m2 <= n2 - lb) { - // compare as binary strings - unsigned m = (m1 <= m2 ? m1 : m2); - int k = memcmp(v1 + lb, v2 + lb, m); - if (k == 0) { - k = (full ? m1 : m) - m2; - } - return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; - } - // treat bad data as NULL - if (m1 > n1 - lb && m2 <= n2 - lb) - return -1; - if (m1 <= n1 - lb && m2 > n2 - lb) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpDatetime(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Int64)) { - Int64 v1, v2; - memcpy(&v1, p1, sizeof(Int64)); - memcpy(&v2, p2, sizeof(Int64)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpDate(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ -#ifdef ndb_date_is_4_byte_native_int - if (n2 >= sizeof(Int32)) { - Int32 v1, v2; - memcpy(&v1, p1, sizeof(Int32)); - memcpy(&v2, p2, sizeof(Int32)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } -#else -#ifdef ndb_date_sol9x86_cc_xO3_madness - if (n2 >= 3) { - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - // from Field_newdate::val_int - Uint64 j1 = uint3korr(v1); - Uint64 j2 = uint3korr(v2); - j1 = (j1 % 32L)+(j1 / 32L % 16L)*100L + (j1/(16L*32L))*10000L; - j2 = (j2 % 32L)+(j2 / 32L % 16L)*100L + (j2/(16L*32L))*10000L; - if (j1 < j2) - return -1; - if (j1 > j2) - return +1; - return 0; - } -#else - if (n2 >= 3) { - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - uint j1 = uint3korr(v1); - uint j2 = uint3korr(v2); - uint d1 = (j1 & 31); - uint d2 = (j2 & 31); - j1 = (j1 >> 5); - j2 = (j2 >> 5); - uint m1 = (j1 & 15); - uint m2 = (j2 & 15); - j1 = (j1 >> 4); - j2 = (j2 >> 4); - uint y1 = j1; - uint y2 = j2; - if (y1 < y2) - return -1; - if (y1 > y2) - return +1; - if (m1 < m2) - return -1; - if (m1 > m2) - return +1; - if (d1 < d2) - return -1; - if (d1 > d2) - return +1; - return 0; - } -#endif -#endif - assert(! full); - return CmpUnknown; -} - -// not supported -int -NdbSqlUtil::cmpBlob(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - assert(false); - return 0; -} - -// not supported -int -NdbSqlUtil::cmpText(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - assert(false); - return 0; -} - -int -NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - Uint32 n = (n1 < n2) ? n1 : n2; - int ret = memcmp(p1, p2, n); - return ret; -} - - -int -NdbSqlUtil::cmpTime(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= 3) { - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - // from Field_time::val_int - Int32 j1 = sint3korr(v1); - Int32 j2 = sint3korr(v2); - if (j1 < j2) - return -1; - if (j1 > j2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -// not yet - -int -NdbSqlUtil::cmpLongvarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const unsigned lb = 2; - // collation does not work on prefix for some charsets - assert(full && n1 >= lb && n2 >= lb); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - unsigned m1 = uint2korr(v1); - unsigned m2 = uint2korr(v2); - if (m1 <= n1 - lb && m2 <= n2 - lb) { - CHARSET_INFO* cs = (CHARSET_INFO*)(info); - // compare with space padding - int k = (*cs->coll->strnncollsp)(cs, v1 + lb, m1, v2 + lb, m2, false); - return k < 0 ? -1 : k > 0 ? +1 : 0; - } - // treat bad data as NULL - if (m1 > n1 - lb && m2 <= n2 - lb) - return -1; - if (m1 <= n1 - lb && m2 > n2 - lb) - return +1; - return 0; -} - -int -NdbSqlUtil::cmpLongvarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - const unsigned lb = 2; - if (n2 >= lb) { - assert(n1 >= lb); - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - unsigned m1 = uint2korr(v1); - unsigned m2 = uint2korr(v2); - if (m1 <= n1 - lb && m2 <= n2 - lb) { - // compare as binary strings - unsigned m = (m1 <= m2 ? m1 : m2); - int k = memcmp(v1 + lb, v2 + lb, m); - if (k == 0) { - k = (full ? m1 : m) - m2; - } - return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; - } - // treat bad data as NULL - if (m1 > n1 - lb && m2 <= n2 - lb) - return -1; - if (m1 <= n1 - lb && m2 > n2 - lb) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpYear(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Uint8)) { - Uint8 v1, v2; - memcpy(&v1, p1, sizeof(Uint8)); - memcpy(&v2, p2, sizeof(Uint8)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -int -NdbSqlUtil::cmpTimestamp(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) -{ - if (n2 >= sizeof(Uint32)) { - Uint32 v1, v2; - memcpy(&v1, p1, sizeof(Uint32)); - memcpy(&v2, p2, sizeof(Uint32)); - if (v1 < v2) - return -1; - if (v1 > v2) - return +1; - return 0; - } - assert(! full); - return CmpUnknown; -} - -// like - -static const int ndb_wild_prefix = '\\'; -static const int ndb_wild_one = '_'; -static const int ndb_wild_many = '%'; - -int -NdbSqlUtil::likeChar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) -{ - const char* v1 = (const char*)p1; - const char* v2 = (const char*)p2; - CHARSET_INFO* cs = (CHARSET_INFO*)(info); - // strip end spaces to match (incorrect) MySQL behaviour - n1 = (*cs->cset->lengthsp)(cs, v1, n1); - int k = (*cs->coll->wildcmp)(cs, v1, v1 + n1, v2, v2 + n2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many); - return k == 0 ? 0 : +1; -} - -int -NdbSqlUtil::likeBinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) -{ - assert(info == 0); - return likeChar(&my_charset_bin, p1, n1, p2, n2); -} - -int -NdbSqlUtil::likeVarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) -{ - const unsigned lb = 1; - if (n1 >= lb) { - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - unsigned m1 = *v1; - unsigned m2 = n2; - if (lb + m1 <= n1) { - const char* w1 = (const char*)v1 + lb; - const char* w2 = (const char*)v2; - CHARSET_INFO* cs = (CHARSET_INFO*)(info); - int k = (*cs->coll->wildcmp)(cs, w1, w1 + m1, w2, w2 + m2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many); - return k == 0 ? 0 : +1; - } - } - return -1; -} - -int -NdbSqlUtil::likeVarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) -{ - assert(info == 0); - return likeVarchar(&my_charset_bin, p1, n1, p2, n2); -} - -int -NdbSqlUtil::likeLongvarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) -{ - const unsigned lb = 2; - if (n1 >= lb) { - const uchar* v1 = (const uchar*)p1; - const uchar* v2 = (const uchar*)p2; - unsigned m1 = uint2korr(v1); - unsigned m2 = n2; - if (lb + m1 <= n1) { - const char* w1 = (const char*)v1 + lb; - const char* w2 = (const char*)v2; - CHARSET_INFO* cs = (CHARSET_INFO*)(info); - int k = (*cs->coll->wildcmp)(cs, w1, w1 + m1, w2, w2 + m2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many); - return k == 0 ? 0 : +1; - } - } - return -1; -} - -int -NdbSqlUtil::likeLongvarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2) -{ - assert(info == 0); - return likeLongvarchar(&my_charset_bin, p1, n1, p2, n2); -} - -// check charset - -uint -NdbSqlUtil::check_column_for_pk(Uint32 typeId, const void* info) -{ - const Type& type = getType(typeId); - switch (type.m_typeId) { - case Type::Char: - case Type::Varchar: - case Type::Longvarchar: - { - const CHARSET_INFO *cs = (const CHARSET_INFO*)info; - if(cs != 0 && - cs->cset != 0 && - cs->coll != 0 && - cs->coll->strnxfrm != 0 && - cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY) - return 0; - else - return 743; - } - break; - case Type::Undefined: - case Type::Blob: - case Type::Text: - case Type::Bit: - break; - default: - return 0; - } - return 906; -} - -uint -NdbSqlUtil::check_column_for_hash_index(Uint32 typeId, const void* info) -{ - return check_column_for_pk(typeId, info); -} - -uint -NdbSqlUtil::check_column_for_ordered_index(Uint32 typeId, const void* info) -{ - const Type& type = getType(typeId); - if (type.m_cmp == NULL) - return false; - switch (type.m_typeId) { - case Type::Char: - case Type::Varchar: - case Type::Longvarchar: - { - const CHARSET_INFO *cs = (const CHARSET_INFO*)info; - if (cs != 0 && - cs->cset != 0 && - cs->coll != 0 && - cs->coll->strnxfrm != 0 && - cs->coll->strnncollsp != 0 && - cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY) - return 0; - else - return 743; - } - break; - case Type::Undefined: - case Type::Blob: - case Type::Text: - case Type::Bit: // can be fixed - break; - default: - return 0; - } - return 906; -} - -// utilities - -bool -NdbSqlUtil::get_var_length(Uint32 typeId, const void* p, unsigned attrlen, Uint32& lb, Uint32& len) -{ - const unsigned char* const src = (const unsigned char*)p; - switch (typeId) { - case NdbSqlUtil::Type::Varchar: - case NdbSqlUtil::Type::Varbinary: - lb = 1; - if (attrlen >= lb) { - len = src[0]; - if (attrlen >= lb + len) - return true; - } - break; - case NdbSqlUtil::Type::Longvarchar: - case NdbSqlUtil::Type::Longvarbinary: - lb = 2; - if (attrlen >= lb) { - len = src[0] + (src[1] << 8); - if (attrlen >= lb + len) - return true; - } - break; - default: - lb = 0; - len = attrlen; - return true; - break; - } - return false; -} - -// workaround - -int -NdbSqlUtil::strnxfrm_bug7284(CHARSET_INFO* cs, unsigned char* dst, unsigned dstLen, const unsigned char*src, unsigned srcLen) -{ - unsigned char nsp[20]; // native space char - unsigned char xsp[20]; // strxfrm-ed space char -#ifdef VM_TRACE - memset(nsp, 0x1f, sizeof(nsp)); - memset(xsp, 0x1f, sizeof(xsp)); -#endif - // convert from unicode codepoint for space - int n1 = (*cs->cset->wc_mb)(cs, (my_wc_t)0x20, nsp, nsp + sizeof(nsp)); - if (n1 <= 0) - return -1; - // strxfrm to binary - int n2 = (*cs->coll->strnxfrm)(cs, xsp, sizeof(xsp), nsp, n1); - if (n2 <= 0) - return -1; - // XXX bug workaround - strnxfrm may not write full string - memset(dst, 0x0, dstLen); - // strxfrm argument string - returns no error indication - int n3 = (*cs->coll->strnxfrm)(cs, dst, dstLen, src, srcLen); - // pad with strxfrm-ed space chars - int n4 = n3; - while (n4 < (int)dstLen) { - dst[n4] = xsp[(n4 - n3) % n2]; - n4++; - } - // no check for partial last - return dstLen; -} diff --git a/storage/ndb/src/common/util/OutputStream.cpp b/storage/ndb/src/common/util/OutputStream.cpp deleted file mode 100644 index 37487be29f0..00000000000 --- a/storage/ndb/src/common/util/OutputStream.cpp +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include -#include - -FileOutputStream::FileOutputStream(FILE * file){ - f = file; -} - -int -FileOutputStream::print(const char * fmt, ...){ - va_list ap; - va_start(ap, fmt); - const int ret = vfprintf(f, fmt, ap); - va_end(ap); - return ret; -} - -int -FileOutputStream::println(const char * fmt, ...){ - va_list ap; - va_start(ap, fmt); - const int ret = vfprintf(f, fmt, ap); - va_end(ap); - return ret + fprintf(f, "\n"); -} - -SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket, - unsigned write_timeout_ms){ - m_socket = socket; - m_timeout_remain= m_timeout_ms = write_timeout_ms; - m_timedout= false; -} - -int -SocketOutputStream::print(const char * fmt, ...){ - va_list ap; - - if(timedout()) - return -1; - - int time= 0; - va_start(ap, fmt); - int ret = vprint_socket(m_socket, m_timeout_ms, &time, fmt, ap); - va_end(ap); - - if(ret >= 0) - m_timeout_remain-=time; - if((ret < 0 && errno==ETIMEDOUT) || m_timeout_remain<=0) - { - m_timedout= true; - ret= -1; - } - - return ret; -} -int -SocketOutputStream::println(const char * fmt, ...){ - va_list ap; - - if(timedout()) - return -1; - - int time= 0; - va_start(ap, fmt); - int ret = vprintln_socket(m_socket, m_timeout_ms, &time, fmt, ap); - va_end(ap); - - if(ret >= 0) - m_timeout_remain-=time; - if ((ret < 0 && errno==ETIMEDOUT) || m_timeout_remain<=0) - { - m_timedout= true; - ret= -1; - } - - return ret; -} diff --git a/storage/ndb/src/common/util/Parser.cpp b/storage/ndb/src/common/util/Parser.cpp deleted file mode 100644 index b497b320a36..00000000000 --- a/storage/ndb/src/common/util/Parser.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include "Parser.hpp" -#include -#include - -#undef DEBUG -#define DEBUG(x) ndbout << x << endl; - -static void trim(char * str); - -class ParseInputStream : public InputStream { -public: - ParseInputStream(InputStream & in, bool trim = true, char eofComment = '#'); - - char* gets(char * buf, int bufLen); - void push_back(const char *); - void set_mutex(NdbMutex *m) { in.set_mutex(m); }; -private: - InputStream & in; - char * buffer; -}; - -ParseInputStream::ParseInputStream(InputStream & _in, - bool /* unused */, - char /* unused */) - : in(_in){ - buffer = 0; -} - -char* -ParseInputStream::gets(char * buf, int bufLen){ - if(buffer != 0){ - strncpy(buf, buffer, bufLen); - free(buffer); - buffer = 0; - return buf; - } - char *t = in.gets(buf, bufLen); - return t; -} - -void -ParseInputStream::push_back(const char * str){ - if(buffer != 0) - abort(); - buffer = strdup(str); -} - -ParserImpl::ParserImpl(const DummyRow * rows, InputStream & in, - bool b_cmd, bool b_empty, bool b_iarg) - : m_rows(rows), input(* new ParseInputStream(in)) -{ - m_breakOnCmd = b_cmd; - m_breakOnEmpty = b_empty; - m_breakOnInvalidArg = b_iarg; -} - -ParserImpl::~ParserImpl(){ - delete & input; -} - -static -bool -Empty(const char * str){ - if(str == 0) - return true; - const int len = strlen(str); - if(len == 0) - return false; - for(int i = 0; im_status = Parser::NoLine; - ctx->m_tokenBuffer[0]= '\0'; - DBUG_RETURN(false); - } - - if(Empty(ctx->m_currentToken)){ - ctx->m_status = Parser::EmptyLine; - DBUG_RETURN(false); - } - - trim(ctx->m_currentToken); - ctx->m_currentCmd = matchCommand(ctx, ctx->m_currentToken, m_rows); - if(ctx->m_currentCmd == 0){ - ctx->m_status = Parser::UnknownCommand; - DBUG_RETURN(false); - } - - Properties * p = new Properties(); - - bool invalidArgument = false; - ctx->m_currentToken = input.gets(ctx->m_tokenBuffer, sz); - - while((! * stop) && - !Eof(ctx->m_currentToken) && - !Empty(ctx->m_currentToken)){ - if(ctx->m_currentToken[0] != 0){ - trim(ctx->m_currentToken); - if(!parseArg(ctx, ctx->m_currentToken, ctx->m_currentCmd + 1, p)){ - delete p; - invalidArgument = true; - break; - } - } - ctx->m_currentToken = input.gets(ctx->m_tokenBuffer, sz); - } - - if(invalidArgument){ - char buf[sz]; - char * tmp; - if(!m_breakOnInvalidArg){ - do { - tmp = input.gets(buf, sz); - } while((! * stop) && !Eof(tmp) && !Empty(tmp)); - } - DBUG_RETURN(false); - } - - if(* stop){ - delete p; - ctx->m_status = Parser::ExternalStop; - DBUG_RETURN(false); - } - - if(!checkMandatory(ctx, p)){ - ctx->m_status = Parser::MissingMandatoryArgument; - delete p; - DBUG_RETURN(false); - } - - /** - * Add alias to properties - */ - for(unsigned i = 0; im_aliasUsed.size(); i++){ - const ParserRow * alias = ctx->m_aliasUsed[i]; - Properties tmp; - tmp.put("name", alias->name); - tmp.put("realName", alias->realName); - p->put("$ALIAS", i, &tmp); - } - p->put("$ALIAS", ctx->m_aliasUsed.size()); - - ctx->m_status = Parser::Ok; - * pDst = p; - DBUG_RETURN(true); -} - -const ParserImpl::DummyRow* -ParserImpl::matchCommand(Context* ctx, const char* buf, const DummyRow rows[]){ - const char * name = buf; - const DummyRow * tmp = &rows[0]; - while(tmp->name != 0 && name != 0){ - if(strcmp(tmp->name, name) == 0){ - if(tmp->type == DummyRow::Cmd) - return tmp; - if(tmp->type == DummyRow::CmdAlias){ - if(ctx != 0) - ctx->m_aliasUsed.push_back(tmp); - name = tmp->realName; - tmp = &rows[0]; - continue; - } - } - tmp++; - } - return 0; -} - -const ParserImpl::DummyRow* -ParserImpl::matchArg(Context* ctx, const char * buf, const DummyRow rows[]){ - const char * name = buf; - const DummyRow * tmp = &rows[0]; - while(tmp->name != 0){ - const DummyRow::Type t = tmp->type; - if(t != DummyRow::Arg && t != DummyRow::ArgAlias && t !=DummyRow::CmdAlias) - break; - if(t !=DummyRow::CmdAlias && strcmp(tmp->name, name) == 0){ - if(tmp->type == DummyRow::Arg){ - return tmp; - } - if(tmp->type == DummyRow::ArgAlias){ - if(ctx != 0) - ctx->m_aliasUsed.push_back(tmp); - name = tmp->realName; - tmp = &rows[0]; - continue; - } - } - tmp++; - } - return 0; -} - -bool -ParserImpl::parseArg(Context * ctx, - char * buf, - const DummyRow * rows, - Properties * p){ - char * name; - char * value; - if(!split(buf, &name, &value)){ - ctx->m_status = Parser::InvalidArgumentFormat; - return false; - } - const DummyRow * arg = matchArg(ctx, name, rows); - if(arg == 0){ - ctx->m_status = Parser::UnknownArgument; - return false; - } - - switch(arg->argType){ - case DummyRow::String: - if(p->put(arg->name, value)) - return true; - break; - case DummyRow::Int:{ - Uint32 i; - int c = sscanf(value, "%u", &i); - if(c != 1){ - ctx->m_status = Parser::TypeMismatch; - return false; - } - if(p->put(arg->name, i)) - return true; - break; - } - - case DummyRow::Properties: { - abort(); - break; - } - default: - ctx->m_status = Parser::UnknownArgumentType; - return false; - } - if(p->getPropertiesErrno() == E_PROPERTIES_ELEMENT_ALREADY_EXISTS){ - ctx->m_status = Parser::ArgumentGivenTwice; - return false; - } - - abort(); -} - -bool -ParserImpl::checkMandatory(Context* ctx, const Properties* props){ - const DummyRow * tmp = &ctx->m_currentCmd[1]; - while(tmp->name != 0 && tmp->type == DummyRow::Arg){ - if(tmp->argRequired == ParserRow::Mandatory && - !props->contains(tmp->name)){ - ctx->m_status = Parser::MissingMandatoryArgument; - ctx->m_currentArg = tmp; - return false; - } - tmp++; - } - return true; -} - -template class Vector*>; diff --git a/storage/ndb/src/common/util/Properties.cpp b/storage/ndb/src/common/util/Properties.cpp deleted file mode 100644 index 8e36062dcea..00000000000 --- a/storage/ndb/src/common/util/Properties.cpp +++ /dev/null @@ -1,1136 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include - -#include -#include - -static -char * f_strdup(const char * s){ - if(!s) return 0; - return strdup(s); -} - -/** - * Note has to be a multiple of 4 bytes - */ -const char Properties::version[] = { 2, 0, 0, 1, 1, 1, 1, 4 }; -const char Properties::delimiter = ':'; - -/** - * PropertyImpl - */ -struct PropertyImpl{ - PropertiesType valueType; - const char * name; - void * value; - - ~PropertyImpl(); - PropertyImpl(const char * name, Uint32 value); - PropertyImpl(const char * name, Uint64 value); - PropertyImpl(const char * name, const char * value); - PropertyImpl(const char * name, const Properties * value); - - static PropertyImpl * copyPropertyImpl(const PropertyImpl &); -}; - -/** - * PropertiesImpl - */ -class PropertiesImpl { - PropertiesImpl(const PropertiesImpl &); // Not implemented - PropertiesImpl& operator=(const PropertiesImpl&); // Not implemented -public: - PropertiesImpl(Properties *, bool case_insensitive); - PropertiesImpl(Properties *, const PropertiesImpl &); - ~PropertiesImpl(); - - Properties * properties; - - Uint32 size; - Uint32 items; - PropertyImpl **content; - - bool m_insensitive; - int (* compare)(const char *s1, const char *s2); - - void setCaseInsensitiveNames(bool value); - void grow(int sizeToAdd); - - PropertyImpl * get(const char * name) const; - PropertyImpl * put(PropertyImpl *); - void remove(const char * name); - - Uint32 getPackedSize(Uint32 pLen) const; - bool pack(Uint32 *& buf, const char * prefix, Uint32 prefixLen) const; - bool unpack(const Uint32 * buf, Uint32 &bufLen, Properties * top, int items); - - Uint32 getTotalItems() const; - - void setErrno(Uint32 pErr, Uint32 osErr = 0){ - properties->setErrno(pErr, osErr); - } - - const char * getProps(const char * name, const PropertiesImpl ** impl) const; - const char * getPropsPut(const char * name, PropertiesImpl ** impl); -}; - -/** - * Methods for Property - */ -Property::Property(const char * name, Uint32 value){ - impl = new PropertyImpl(name, value); -} - -Property::Property(const char * name, const char * value){ - impl = new PropertyImpl(name, value); -} - -Property::Property(const char * name, const class Properties * value){ - impl = new PropertyImpl(name, value); - - ((Properties*)impl->value)->setCaseInsensitiveNames(value->getCaseInsensitiveNames()); -} - -Property::~Property(){ - delete impl; -} - -/** - * Methods for Properties - */ -Properties::Properties(bool case_insensitive){ - parent = 0; - impl = new PropertiesImpl(this, case_insensitive); -} - -Properties::Properties(const Properties & org){ - parent = 0; - impl = new PropertiesImpl(this, * org.impl); -} - -Properties::Properties(const Property * anArray, int arrayLen){ - impl = new PropertiesImpl(this, false); - - put(anArray, arrayLen); -} - -Properties::~Properties(){ - clear(); - delete impl; -} - -void -Properties::put(const Property * anArray, int arrayLen){ - if(anArray == 0) - return; - for(int i = 0; iput(anArray[i].impl); -} - -template -bool -put(PropertiesImpl * impl, const char * name, T value, bool replace){ - if(name == 0){ - impl->setErrno(E_PROPERTIES_INVALID_NAME); - return false; - } - - PropertiesImpl * tmp = 0; - const char * short_name = impl->getPropsPut(name, &tmp); - - if(tmp == 0){ - impl->setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - - if(tmp->get(short_name) != 0){ - if(replace){ - tmp->remove(short_name); - } else { - impl->setErrno(E_PROPERTIES_ELEMENT_ALREADY_EXISTS); - return false; - } - } - return tmp->put(new PropertyImpl(short_name, value)); -} - - -bool -Properties::put(const char * name, Uint32 value, bool replace){ - return ::put(impl, name, value, replace); -} - -bool -Properties::put64(const char * name, Uint64 value, bool replace){ - return ::put(impl, name, value, replace); -} - -bool -Properties::put(const char * name, const char * value, bool replace){ - return ::put(impl, name, value, replace); -} - -bool -Properties::put(const char * name, const Properties * value, bool replace){ - return ::put(impl, name, value, replace); -} - -bool -Properties::getTypeOf(const char * name, PropertiesType * type) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - setErrno(E_PROPERTIES_OK); - * type = nvp->valueType; - return true; -} - -bool -Properties::contains(const char * name) const { - PropertyImpl * nvp = impl->get(name); - return nvp != 0; -} - -bool -Properties::get(const char * name, Uint32 * value) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - - if(nvp->valueType == PropertiesType_Uint32){ - * value = * (Uint32 *)nvp->value; - setErrno(E_PROPERTIES_OK); - return true; - } - - if(nvp->valueType == PropertiesType_Uint64){ - Uint64 tmp = * (Uint64 *)nvp->value; - Uint64 max = 1; max <<= 32; - if(tmp < max){ - * value = (Uint32)tmp; - setErrno(E_PROPERTIES_OK); - return true; - } - } - setErrno(E_PROPERTIES_INVALID_TYPE); - return false; -} - -bool -Properties::get(const char * name, Uint64 * value) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - - if(nvp->valueType == PropertiesType_Uint32){ - Uint32 tmp = * (Uint32 *)nvp->value; - * value = (Uint64)tmp; - setErrno(E_PROPERTIES_OK); - return true; - } - - if(nvp->valueType == PropertiesType_Uint64){ - * value = * (Uint64 *)nvp->value; - setErrno(E_PROPERTIES_OK); - return true; - } - setErrno(E_PROPERTIES_INVALID_TYPE); - return false; -} - -bool -Properties::get(const char * name, const char ** value) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - - if(nvp->valueType == PropertiesType_char){ - * value = (const char *)nvp->value; - setErrno(E_PROPERTIES_OK); - return true; - } - setErrno(E_PROPERTIES_INVALID_TYPE); - return false; -} - -bool -Properties::get(const char * name, BaseString& value) const { - const char *tmp = ""; - bool ret; - ret = get(name, &tmp); - value.assign(tmp); - return ret; -} - -bool -Properties::get(const char * name, const Properties ** value) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - if(nvp->valueType == PropertiesType_Properties){ - * value = (const Properties *)nvp->value; - setErrno(E_PROPERTIES_OK); - return true; - } - setErrno(E_PROPERTIES_INVALID_TYPE); - return false; -} - -bool -Properties::getCopy(const char * name, char ** value) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - - if(nvp->valueType == PropertiesType_char){ - * value = f_strdup((const char *)nvp->value); - setErrno(E_PROPERTIES_OK); - return true; - } - setErrno(E_PROPERTIES_INVALID_TYPE); - return false; -} - -bool -Properties::getCopy(const char * name, Properties ** value) const { - PropertyImpl * nvp = impl->get(name); - if(nvp == 0){ - setErrno(E_PROPERTIES_NO_SUCH_ELEMENT); - return false; - } - - if(nvp->valueType == PropertiesType_Properties){ - * value = new Properties(* (const Properties *)nvp->value); - setErrno(E_PROPERTIES_OK); - return true; - } - setErrno(E_PROPERTIES_INVALID_TYPE); - return false; -} - -void -Properties::clear(){ - while(impl->items > 0) - impl->remove(impl->content[0]->name); -} - -void -Properties::remove(const char * name) { - impl->remove(name); -} - -void -Properties::print(FILE * out, const char * prefix) const{ - char buf[1024]; - if(prefix == 0) - buf[0] = 0; - else - strncpy(buf, prefix, 1024); - - for(unsigned int i = 0; iitems; i++){ - switch(impl->content[i]->valueType){ - case PropertiesType_Uint32: - fprintf(out, "%s%s = (Uint32) %d\n", buf, impl->content[i]->name, - *(Uint32 *)impl->content[i]->value); - break; - case PropertiesType_Uint64: - fprintf(out, "%s%s = (Uint64) %lld\n", buf, impl->content[i]->name, - *(Uint64 *)impl->content[i]->value); - break; - case PropertiesType_char: - fprintf(out, "%s%s = (char*) \"%s\"\n", buf, impl->content[i]->name, - (char *)impl->content[i]->value); - break; - case PropertiesType_Properties: - char buf2 [1024]; - BaseString::snprintf(buf2, sizeof(buf2), "%s%s%c",buf, impl->content[i]->name, - Properties::delimiter); - ((Properties *)impl->content[i]->value)->print(out, buf2); - break; - } - } -} - -Properties::Iterator::Iterator(const Properties* prop) : - m_prop(prop), - m_iterator(0) { -} - -const char* -Properties::Iterator::first() { - m_iterator = 0; - return next(); -} - -const char* -Properties::Iterator::next() { - if (m_iterator < m_prop->impl->items) - return m_prop->impl->content[m_iterator++]->name; - else - return NULL; -} - -Uint32 -Properties::getPackedSize() const { - Uint32 sz = 0; - - sz += sizeof(version); // Version id of properties object - sz += 4; // No Of Items - sz += 4; // Checksum - - return sz + impl->getPackedSize(0); -} - -static -Uint32 -computeChecksum(const Uint32 * buf, Uint32 words){ - Uint32 sum = 0; - for(unsigned int i = 0; igetTotalItems()); - buf++; - bool res = impl->pack(buf, "", 0); - if(!res) - return res; - - * buf = htonl(computeChecksum(bufStart, (buf - bufStart))); - - return true; -} - -bool -Properties::unpack(const Uint32 * buf, Uint32 bufLen){ - const Uint32 * bufStart = buf; - Uint32 bufLenOrg = bufLen; - - if(bufLen < sizeof(version)){ - setErrno(E_PROPERTIES_INVALID_BUFFER_TO_SHORT); - return false; - } - - if(memcmp(buf, version, sizeof(version)) != 0){ - setErrno(E_PROPERTIES_INVALID_VERSION_WHILE_UNPACKING); - return false; - } - bufLen -= sizeof(version); - - // Note that version must be a multiple of 4 - buf += (sizeof(version) / 4); - - if(bufLen < 4){ - setErrno(E_PROPERTIES_INVALID_BUFFER_TO_SHORT); - return false; - } - - Uint32 totalItems = ntohl(* buf); - buf++; bufLen -= 4; - bool res = impl->unpack(buf, bufLen, this, totalItems); - if(!res) - return res; - - Uint32 sum = computeChecksum(bufStart, (bufLenOrg-bufLen)/4); - if(sum != ntohl(bufStart[(bufLenOrg-bufLen)/4])){ - setErrno(E_PROPERTIES_INVALID_CHECKSUM); - return false; - } - return true; -} - -/** - * Methods for PropertiesImpl - */ -PropertiesImpl::PropertiesImpl(Properties * p, bool case_insensitive){ - this->properties = p; - items = 0; - size = 25; - content = new PropertyImpl * [size]; - setCaseInsensitiveNames(case_insensitive); -} - -PropertiesImpl::PropertiesImpl(Properties * p, const PropertiesImpl & org){ - this->properties = p; - this->size = org.size; - this->items = org.items; - this->m_insensitive = org.m_insensitive; - this->compare = org.compare; - content = new PropertyImpl * [size]; - for(unsigned int i = 0; iitems; i++) { - if((* compare)(tmp->content[i]->name, short_name) == 0) - return tmp->content[i]; - } - - return 0; -} - -PropertyImpl * -PropertiesImpl::put(PropertyImpl * nvp){ - if(items == size) - grow(size); - content[items] = nvp; - - items ++; - - if(nvp->valueType == PropertiesType_Properties){ - ((Properties*)nvp->value)->parent = properties; - } - return nvp; -} - -void -PropertiesImpl::remove(const char * name){ - for(unsigned int i = 0; iname, name) == 0){ - delete content[i]; - memmove(&content[i], &content[i+1], (items-i-1)*sizeof(PropertyImpl *)); - items --; - return; - } - } -} - -Uint32 -PropertiesImpl::getTotalItems() const { - int ret = 0; - for(unsigned int i = 0; ivalueType == PropertiesType_Properties){ - ret += ((Properties*)content[i]->value)->impl->getTotalItems(); - } else { - ret ++; - } - return ret; -} - -const char * -PropertiesImpl::getProps(const char * name, - const PropertiesImpl ** impl) const { - const char * ret = name; - const char * tmp = strchr(name, Properties::delimiter); - if(tmp == 0){ - * impl = this; - return ret; - } else { - Uint32 sz = tmp - name; - char * tmp2 = (char*)malloc(sz + 1); - memcpy(tmp2, name, sz); - tmp2[sz] = 0; - - PropertyImpl * nvp = get(tmp2); - - free(tmp2); - - if(nvp == 0){ - * impl = 0; - return 0; - } - if(nvp->valueType != PropertiesType_Properties){ - * impl = 0; - return name; - } - return ((Properties*)nvp->value)->impl->getProps(tmp+1, impl); - } -} - -const char * -PropertiesImpl::getPropsPut(const char * name, - PropertiesImpl ** impl) { - const char * ret = name; - const char * tmp = strchr(name, Properties::delimiter); - if(tmp == 0){ - * impl = this; - return ret; - } else { - Uint32 sz = tmp - name; - char * tmp2 = (char*)malloc(sz + 1); - memcpy(tmp2, name, sz); - tmp2[sz] = 0; - - PropertyImpl * nvp = get(tmp2); - - if(nvp == 0){ - Properties * tmpP = new Properties(); - PropertyImpl * tmpPI = new PropertyImpl(tmp2, tmpP); - PropertyImpl * nvp2 = put(tmpPI); - - delete tmpP; - free(tmp2); - return ((Properties*)nvp2->value)->impl->getPropsPut(tmp+1, impl); - } - free(tmp2); - if(nvp->valueType != PropertiesType_Properties){ - * impl = 0; - return name; - } - return ((Properties*)nvp->value)->impl->getPropsPut(tmp+1, impl); - } -} - -int -mod4(unsigned int i){ - int res = i + (4 - (i % 4)); - return res; -} - -Uint32 -PropertiesImpl::getPackedSize(Uint32 pLen) const { - Uint32 sz = 0; - for(unsigned int i = 0; ivalueType == PropertiesType_Properties){ - Properties * p = (Properties*)content[i]->value; - sz += p->impl->getPackedSize(pLen+strlen(content[i]->name)+1); - } else { - sz += 4; // Type - sz += 4; // Name Len - sz += 4; // Value Len - sz += mod4(pLen + strlen(content[i]->name)); // Name - switch(content[i]->valueType){ - case PropertiesType_char: - sz += mod4(strlen((char *)content[i]->value)); - break; - case PropertiesType_Uint32: - sz += mod4(4); - break; - case PropertiesType_Uint64: - sz += mod4(8); - break; - case PropertiesType_Properties: - default: - assert(0); - } - } - } - return sz; -} - -struct CharBuf { - char * buffer; - Uint32 bufLen; - Uint32 contentLen; - - CharBuf(){ - buffer = 0; - bufLen = 0; - contentLen = 0; - } - - ~CharBuf(){ - free(buffer); - } - - void clear() { contentLen = 0;} - bool add(const char * str, Uint32 strLen){ - if(!expand(contentLen + strLen + 1)) - return false; - memcpy(&buffer[contentLen], str, strLen); - contentLen += strLen; - buffer[contentLen] = 0; - return true; - } - - bool add(char c){ - return add(&c, 1); - } - - bool expand(Uint32 newSize){ - if(newSize >= bufLen){ - - char * tmp = (char*)malloc(newSize + 1024); - memset(tmp, 0, newSize + 1024); - if(tmp == 0) - return false; - if(contentLen > 0) - memcpy(tmp, buffer, contentLen); - if(buffer != 0) - free(buffer); - buffer = tmp; - bufLen = newSize + 1024; - } - return true; - } -}; - -bool -PropertiesImpl::pack(Uint32 *& buf, const char * prefix, Uint32 pLen) const { - CharBuf charBuf; - - for(unsigned int i = 0; iname); - - if(content[i]->valueType == PropertiesType_Properties){ - charBuf.clear(); - if(!charBuf.add(prefix, pLen)){ - properties->setErrno(E_PROPERTIES_ERROR_MALLOC_WHILE_PACKING, - errno); - return false; - } - - if(!charBuf.add(content[i]->name, strLenName)){ - properties->setErrno(E_PROPERTIES_ERROR_MALLOC_WHILE_PACKING, - errno); - return false; - } - - if(!charBuf.add(Properties::delimiter)){ - properties->setErrno(E_PROPERTIES_ERROR_MALLOC_WHILE_PACKING, - errno); - return false; - } - - if(!((Properties*)(content[i]->value))->impl->pack(buf, - charBuf.buffer, - charBuf.contentLen)){ - - return false; - } - continue; - } - - Uint32 valLenData = 0; - Uint32 valLenWrite = 0; - Uint32 sz = 4 + 4 + 4 + mod4(pLen + strLenName); - switch(content[i]->valueType){ - case PropertiesType_Uint32: - valLenData = 4; - break; - case PropertiesType_Uint64: - valLenData = 8; - break; - case PropertiesType_char: - valLenData = strlen((char *)content[i]->value); - break; - case PropertiesType_Properties: - assert(0); - } - valLenWrite = mod4(valLenData); - sz += valLenWrite; - - * (buf + 0) = htonl(content[i]->valueType); - * (buf + 1) = htonl(pLen + strLenName); - * (buf + 2) = htonl(valLenData); - - char * valBuf = (char*)(buf + 3); - char * nameBuf = (char*)(buf + 3 + (valLenWrite / 4)); - - memset(valBuf, 0, sz-12); - - switch(content[i]->valueType){ - case PropertiesType_Uint32: - * (Uint32 *)valBuf = htonl(* (Uint32 *)content[i]->value); - break; - case PropertiesType_Uint64:{ - Uint64 val = * (Uint64 *)content[i]->value; - Uint32 hi = (val >> 32); - Uint32 lo = (val & 0xFFFFFFFF); - * (Uint32 *)valBuf = htonl(hi); - * (Uint32 *)(valBuf + 4) = htonl(lo); - } - break; - case PropertiesType_char: - memcpy(valBuf, content[i]->value, strlen((char*)content[i]->value)); - break; - case PropertiesType_Properties: - assert(0); - } - if(pLen > 0) - memcpy(nameBuf, prefix, pLen); - memcpy(nameBuf + pLen, content[i]->name, strLenName); - - buf += (sz / 4); - } - - return true; -} - -bool -PropertiesImpl::unpack(const Uint32 * buf, Uint32 &bufLen, Properties * top, - int _items){ - CharBuf charBuf; - while(_items > 0){ - Uint32 tmp[3]; - - if(bufLen <= 12){ - top->setErrno(E_PROPERTIES_BUFFER_TO_SMALL_WHILE_UNPACKING); - return false; - } - - tmp[0] = ntohl(buf[0]); - tmp[1] = ntohl(buf[1]); - tmp[2] = ntohl(buf[2]); - buf += 3; - bufLen -= 12; - - PropertiesType pt = (PropertiesType)tmp[0]; - Uint32 nameLen = tmp[1]; - Uint32 valueLen = tmp[2]; - Uint32 nameLenRead = mod4(nameLen); - Uint32 valueLenRead = mod4(valueLen); - - Uint32 sz = nameLenRead + valueLenRead; - if(bufLen < sz){ - top->setErrno(E_PROPERTIES_BUFFER_TO_SMALL_WHILE_UNPACKING); - return false; - } - - if(!charBuf.expand(sz)){ - top->setErrno(E_PROPERTIES_ERROR_MALLOC_WHILE_UNPACKING, errno); - return false; - } - - memcpy(charBuf.buffer, buf, sz); - buf += (sz / 4); - bufLen -= sz ; - - char * valBuf = charBuf.buffer; - char * nameBuf = charBuf.buffer + valueLenRead; - - nameBuf[nameLen] = 0; - valBuf[valueLen] = 0; - - bool res3 = false; - switch(pt){ - case PropertiesType_Uint32: - res3 = top->put(nameBuf, ntohl(* (Uint32 *)valBuf), true); - break; - case PropertiesType_Uint64:{ - Uint64 hi = ntohl(* (Uint32 *)valBuf); - Uint64 lo = ntohl(* (Uint32 *)(valBuf + 4)); - res3 = top->put64(nameBuf, (hi << 32) + lo, true); - } - break; - case PropertiesType_char: - res3 = top->put(nameBuf, valBuf, true); - break; - case PropertiesType_Properties: - assert(0); - } - if(!res3){ - return false; - } - _items--; - } - return true; -} - -PropertyImpl::~PropertyImpl(){ - free((char*)name); - switch(valueType){ - case PropertiesType_Uint32: - delete (Uint32 *)value; - break; - case PropertiesType_Uint64: - delete (Uint64 *)value; - break; - case PropertiesType_char: - free((char *)value); - break; - case PropertiesType_Properties: - delete (Properties *)value; - break; - } -} - -PropertyImpl * -PropertyImpl::copyPropertyImpl(const PropertyImpl & org){ - switch(org.valueType){ - case PropertiesType_Uint32: - return new PropertyImpl(org.name, * (Uint32 *)org.value); - case PropertiesType_Uint64: - return new PropertyImpl(org.name, * (Uint64 *)org.value); - break; - case PropertiesType_char: - return new PropertyImpl(org.name, (char *)org.value); - break; - case PropertiesType_Properties: - return new PropertyImpl(org.name, (Properties *)org.value); - break; - default: - assert(0); - } - return 0; -} - -PropertyImpl::PropertyImpl(const char * _name, Uint32 _value){ - this->name = f_strdup(_name); - this->value = new Uint32; - * ((Uint32 *)this->value) = _value; - this->valueType = PropertiesType_Uint32; -} - -PropertyImpl::PropertyImpl(const char * _name, Uint64 _value){ - this->name = f_strdup(_name); - this->value = new Uint64; - * ((Uint64 *)this->value) = _value; - this->valueType = PropertiesType_Uint64; -} - -PropertyImpl::PropertyImpl(const char * _name, const char * _value){ - this->name = f_strdup(_name); - this->value = f_strdup(_value); - this->valueType = PropertiesType_char; - -} - -PropertyImpl::PropertyImpl(const char * _name, const Properties * _value){ - this->name = f_strdup(_name); - this->value = new Properties(* _value); - this->valueType = PropertiesType_Properties; -} - -const Uint32 E_PROPERTIES_OK = 0; -const Uint32 E_PROPERTIES_INVALID_NAME = 1; -const Uint32 E_PROPERTIES_NO_SUCH_ELEMENT = 2; -const Uint32 E_PROPERTIES_INVALID_TYPE = 3; -const Uint32 E_PROPERTIES_ELEMENT_ALREADY_EXISTS = 4; - -const Uint32 E_PROPERTIES_ERROR_MALLOC_WHILE_PACKING = 5; -const Uint32 E_PROPERTIES_INVALID_VERSION_WHILE_UNPACKING = 6; -const Uint32 E_PROPERTIES_INVALID_BUFFER_TO_SHORT = 7; -const Uint32 E_PROPERTIES_ERROR_MALLOC_WHILE_UNPACKING = 8; -const Uint32 E_PROPERTIES_INVALID_CHECKSUM = 9; -const Uint32 E_PROPERTIES_BUFFER_TO_SMALL_WHILE_UNPACKING = 10; - -/** - * These are methods that used to be inline - * - * But Diab 4.1f could not compile -release with to many inlines - */ -void -Properties::setErrno(Uint32 pErr, Uint32 osErr) const { - if(parent != 0){ - parent->setErrno(pErr, osErr); - return ; - } - - /** - * propErrno & osErrno used to be mutable, - * but diab didn't know what mutable meant. - */ - *((Uint32*)&propErrno) = pErr; - *((Uint32*)&osErrno) = osErr; -} - -/** - * Inlined get/put(name, no, ...) - methods - */ - -bool -Properties::put(const char * name, Uint32 no, Uint32 val, bool replace){ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = put(tmp, val, replace); - free(tmp); - return res; -} - -bool -Properties::put64(const char * name, Uint32 no, Uint64 val, bool replace){ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = put(tmp, val, replace); - free(tmp); - return res; -} - - -bool -Properties::put(const char * name, Uint32 no, const char * val, bool replace){ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = put(tmp, val, replace); - free(tmp); - return res; -} - - -bool -Properties::put(const char * name, Uint32 no, const Properties * val, - bool replace){ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = put(tmp, val, replace); - free(tmp); - return res; -} - - -bool -Properties::getTypeOf(const char * name, Uint32 no, - PropertiesType * type) const { - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = getTypeOf(tmp, type); - free(tmp); - return res; -} - -bool -Properties::contains(const char * name, Uint32 no) const { - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = contains(tmp); - free(tmp); - return res; -} - -bool -Properties::get(const char * name, Uint32 no, Uint32 * value) const{ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = get(tmp, value); - free(tmp); - return res; -} - -bool -Properties::get(const char * name, Uint32 no, Uint64 * value) const{ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = get(tmp, value); - free(tmp); - return res; -} - - -bool -Properties::get(const char * name, Uint32 no, const char ** value) const { - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = get(tmp, value); - free(tmp); - return res; -} - - -bool -Properties::get(const char * name, Uint32 no, const Properties ** value) const{ - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = get(tmp, value); - free(tmp); - return res; -} - - -bool -Properties::getCopy(const char * name, Uint32 no, char ** value) const { - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = getCopy(tmp, value); - free(tmp); - return res; -} - - -bool -Properties::getCopy(const char * name, Uint32 no, Properties ** value) const { - size_t tmp_len = strlen(name)+20; - char * tmp = (char*)malloc(tmp_len); - BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no); - bool res = getCopy(tmp, value); - free(tmp); - return res; -} - -void -Properties::setCaseInsensitiveNames(bool value){ - impl->setCaseInsensitiveNames(value); -} - -bool -Properties::getCaseInsensitiveNames() const { - return impl->m_insensitive; -} - -template bool put(PropertiesImpl *, const char *, Uint32, bool); -template bool put(PropertiesImpl *, const char *, Uint64, bool); -template bool put(PropertiesImpl *, const char *, const char *, bool); -template bool put(PropertiesImpl *, const char *, const Properties*, bool); diff --git a/storage/ndb/src/common/util/SimpleProperties.cpp b/storage/ndb/src/common/util/SimpleProperties.cpp deleted file mode 100644 index bb8c5821cd9..00000000000 --- a/storage/ndb/src/common/util/SimpleProperties.cpp +++ /dev/null @@ -1,530 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include -#include -#include - -bool -SimpleProperties::Writer::first(){ - return reset(); -} - -bool -SimpleProperties::Writer::add(Uint16 key, Uint32 value){ - Uint32 head = Uint32Value; - head <<= 16; - head += key; - if(!putWord(htonl(head))) - return false; - - return putWord(htonl(value)); -} - -bool -SimpleProperties::Writer::add(const char * value, int len){ - const Uint32 valLen = (len + 3) / 4; - - if ((len % 4) == 0) - return putWords((Uint32*)value, valLen); - - const Uint32 putLen= valLen - 1; - if (!putWords((Uint32*)value, putLen)) - return false; - - // Special handling of last bytes - union { - Uint32 lastWord; - char lastBytes[4]; - } tmp; - tmp.lastWord =0 ; - memcpy(tmp.lastBytes, - value + putLen*4, - len - putLen*4); - return putWord(tmp.lastWord); -} - -bool -SimpleProperties::Writer::add(Uint16 key, const char * value){ - Uint32 head = StringValue; - head <<= 16; - head += key; - if(!putWord(htonl(head))) - return false; - Uint32 strLen = strlen(value) + 1; // Including NULL-byte - if(!putWord(htonl(strLen))) - return false; - - return add(value, (int)strLen); - -} - -bool -SimpleProperties::Writer::add(Uint16 key, const void* value, int len){ - Uint32 head = BinaryValue; - head <<= 16; - head += key; - if(!putWord(htonl(head))) - return false; - if(!putWord(htonl(len))) - return false; - - return add((const char*)value, len); -} - -SimpleProperties::Reader::Reader(){ - m_itemLen = 0; -} - -bool -SimpleProperties::Reader::first(){ - reset(); - m_itemLen = 0; - return readValue(); -} - -bool -SimpleProperties::Reader::next(){ - return readValue(); -} - -bool -SimpleProperties::Reader::valid() const { - return m_type != InvalidValue; -} - -Uint16 -SimpleProperties::Reader::getKey() const{ - return m_key; -} - -Uint16 -SimpleProperties::Reader::getValueLen() const { - switch(m_type){ - case Uint32Value: - return 4; - case StringValue: - case BinaryValue: - return m_strLen; - case InvalidValue: - return 0; - } - return 0; -} - -SimpleProperties::ValueType -SimpleProperties::Reader::getValueType() const { - return m_type; -} - -Uint32 -SimpleProperties::Reader::getUint32() const { - return m_ui32_value; -} - -char * -SimpleProperties::Reader::getString(char * dst) const { - if(peekWords((Uint32*)dst, m_itemLen)) - return dst; - return 0; -} - -bool -SimpleProperties::Reader::readValue(){ - if(!step(m_itemLen)){ - m_type = InvalidValue; - return false; - } - - Uint32 tmp; - if(!getWord(&tmp)){ - m_type = InvalidValue; - return false; - } - - tmp = ntohl(tmp); - m_key = tmp & 0xFFFF; - m_type = (SimpleProperties::ValueType)(tmp >> 16); - switch(m_type){ - case Uint32Value: - m_itemLen = 1; - if(!peekWord(&m_ui32_value)) - return false; - m_ui32_value = ntohl(m_ui32_value); - return true; - case StringValue: - case BinaryValue: - if(!getWord(&tmp)) - return false; - m_strLen = ntohl(tmp); - m_itemLen = (m_strLen + 3)/4; - return true; - default: - m_itemLen = 0; - m_type = InvalidValue; - return false; - } -} - -SimpleProperties::UnpackStatus -SimpleProperties::unpack(Reader & it, void * dst, - const SP2StructMapping _map[], Uint32 mapSz, - bool ignoreMinMax, - bool ignoreUnknownKeys){ - do { - if(!it.valid()) - break; - - bool found = false; - Uint16 key = it.getKey(); - for(Uint32 i = 0; i _map[i].maxValue) - return ValueTooHigh; - } - * ((Uint32 *)_dst) = val; - break; - } - case BinaryValue: - case StringValue:{ - unsigned len = it.getValueLen(); - if(len < _map[i].minValue) - return ValueTooLow; - if(len > _map[i].maxValue) - return ValueTooHigh; - it.getString(_dst); - break; - } - default: - abort(); - } - break; - } - } - if(!found && !ignoreUnknownKeys) - return UnknownKey; - } while(it.next()); - - return Eof; -} - -SimpleProperties::UnpackStatus -SimpleProperties::pack(Writer & it, const void * __src, - const SP2StructMapping _map[], Uint32 mapSz, - bool ignoreMinMax){ - - const char * _src = (const char *)__src; - - for(Uint32 i = 0; i _map[i].maxValue) - return ValueTooHigh; - } - ok = it.add(_map[i].Key, val); - } - break; - case SimpleProperties::BinaryValue:{ - const char * src_len = _src + _map[i].Length_Offset; - Uint32 len = *((Uint32*)src_len); - if(!ignoreMinMax){ - if(len > _map[i].maxValue) - return ValueTooHigh; - } - ok = it.add(_map[i].Key, src, len); - break; - } - case SimpleProperties::StringValue: - if(!ignoreMinMax){ - size_t len = strlen(src); - if(len > _map[i].maxValue) - return ValueTooHigh; - } - ok = it.add(_map[i].Key, src); - break; - } - if(!ok) - return OutOfMemory; - } - - return Eof; -} - -void -SimpleProperties::Reader::printAll(NdbOut& ndbout){ - char tmp[1024]; - for(first(); valid(); next()){ - switch(getValueType()){ - case SimpleProperties::Uint32Value: - ndbout << "Key: " << getKey() - << " value(" << getValueLen() << ") : " - << getUint32() << endl; - break; - case SimpleProperties::BinaryValue: - case SimpleProperties::StringValue: - if(getValueLen() < 1024){ - getString(tmp); - ndbout << "Key: " << getKey() - << " value(" << getValueLen() << ") : " - << "\"" << tmp << "\"" << endl; - } else { - ndbout << "Key: " << getKey() - << " value(" << getValueLen() << ") : " - << "\"" << "" << "\"" << endl; - - } - break; - default: - ndbout << "Unknown type for key: " << getKey() - << " type: " << (Uint32)getValueType() << endl; - } - } -} - -SimplePropertiesLinearReader::SimplePropertiesLinearReader -(const Uint32 * src, Uint32 len){ - m_src = src; - m_len = len; - m_pos = 0; - first(); -} - -void -SimplePropertiesLinearReader::reset() { - m_pos = 0; -} - -bool -SimplePropertiesLinearReader::step(Uint32 len){ - m_pos += len; - return m_pos < m_len; -} - -bool -SimplePropertiesLinearReader::getWord(Uint32 * dst) { - if(m_pos 0;} - -bool -LinearWriter::putWord(Uint32 val){ - if(m_pos < m_len){ - m_src[m_pos++] = val; - return true; - } - return false; -} - -bool -LinearWriter::putWords(const Uint32 * src, Uint32 len){ - if(m_pos + len <= m_len){ - memcpy(&m_src[m_pos], src, 4 * len); - m_pos += len; - return true; - } - return false; -} - -Uint32 -LinearWriter::getWordsUsed() const { return m_pos;} - -UtilBufferWriter::UtilBufferWriter(UtilBuffer & b) - : m_buf(b) -{ - reset(); -} - -bool UtilBufferWriter::reset() { m_buf.clear(); return true;} - -bool -UtilBufferWriter::putWord(Uint32 val){ - return (m_buf.append(&val, 4) == 0); -} - -bool -UtilBufferWriter::putWords(const Uint32 * src, Uint32 len){ - return (m_buf.append(src, 4 * len) == 0); -} - - -Uint32 -UtilBufferWriter::getWordsUsed() const { return m_buf.length() / 4;} - -#if 0 -LinearPagesReader::LinearPagesReader(const Uint32 * base, - Uint32 pageSize, - Uint32 headerSize, - Uint32 noOfPages, - Uint32 len){ - m_base = base; - m_pageSz = pageSize; - m_noOfPages = noOfPages; - m_pageHeaderSz = headerSize; - m_len = len; - reset(); -} - -void -LinearPagesReader::reset() { m_pos = 0;} - -bool -LinearPagesReader::step(Uint32 len){ - m_pos += len; - return m_pos < m_len; -} - -bool -LinearPagesReader::getWord(Uint32 * dst) { - if(m_pos - -#include -#include -#include -#include -#include - -SocketAuthSimple::SocketAuthSimple(const char *username, const char *passwd) { - if (username) - m_username= strdup(username); - else - m_username= 0; - if (passwd) - m_passwd= strdup(passwd); - else - m_passwd= 0; -} - -SocketAuthSimple::~SocketAuthSimple() -{ - if (m_passwd) - free((void*)m_passwd); - if (m_username) - free((void*)m_username); -} - -bool SocketAuthSimple::client_authenticate(int sockfd) -{ - SocketOutputStream s_output(sockfd); - SocketInputStream s_input(sockfd); - - if (m_username) - s_output.println("%s", m_username); - else - s_output.println(""); - - if (m_passwd) - s_output.println("%s", m_passwd); - else - s_output.println(""); - - char buf[16]; - if (s_input.gets(buf, 16) == 0) return false; - if (strncmp("ok", buf, 2) == 0) - return true; - - return false; -} - -bool SocketAuthSimple::server_authenticate(int sockfd) -{ - - SocketOutputStream s_output(sockfd); - SocketInputStream s_input(sockfd); - - char buf[256]; - - if (s_input.gets(buf, 256) == 0) return false; - buf[255]= 0; - if (m_username) - free((void*)m_username); - m_username= strdup(buf); - - if (s_input.gets(buf, 256) == 0) return false; - buf[255]= 0; - if (m_passwd) - free((void*)m_passwd); - m_passwd= strdup(buf); - - s_output.println("ok"); - - return true; -} diff --git a/storage/ndb/src/common/util/SocketClient.cpp b/storage/ndb/src/common/util/SocketClient.cpp deleted file mode 100644 index 2c2a39a4b01..00000000000 --- a/storage/ndb/src/common/util/SocketClient.cpp +++ /dev/null @@ -1,203 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -#include -#include - -SocketClient::SocketClient(const char *server_name, unsigned short port, SocketAuthenticator *sa) -{ - m_auth= sa; - m_port= port; - m_server_name= server_name ? strdup(server_name) : 0; - m_sockfd= NDB_INVALID_SOCKET; - m_connect_timeout_sec= 0; -} - -SocketClient::~SocketClient() -{ - if (m_server_name) - free(m_server_name); - if (m_sockfd != NDB_INVALID_SOCKET) - NDB_CLOSE_SOCKET(m_sockfd); - if (m_auth) - delete m_auth; -} - -bool -SocketClient::init() -{ - if (m_sockfd != NDB_INVALID_SOCKET) - NDB_CLOSE_SOCKET(m_sockfd); - - if (m_server_name) - { - memset(&m_servaddr, 0, sizeof(m_servaddr)); - m_servaddr.sin_family = AF_INET; - m_servaddr.sin_port = htons(m_port); - // Convert ip address presentation format to numeric format - if (Ndb_getInAddr(&m_servaddr.sin_addr, m_server_name)) - return false; - } - - m_sockfd= socket(AF_INET, SOCK_STREAM, 0); - if (m_sockfd == NDB_INVALID_SOCKET) { - return false; - } - - DBUG_PRINT("info",("NDB_SOCKET: %d", m_sockfd)); - - return true; -} - -int -SocketClient::bind(const char* bindaddress, unsigned short localport) -{ - if (m_sockfd == NDB_INVALID_SOCKET) - return -1; - - struct sockaddr_in local; - memset(&local, 0, sizeof(local)); - local.sin_family = AF_INET; - local.sin_port = htons(localport); - // Convert ip address presentation format to numeric format - if (Ndb_getInAddr(&local.sin_addr, bindaddress)) - { - return errno ? errno : EINVAL; - } - - const int on = 1; - if (setsockopt(m_sockfd, SOL_SOCKET, SO_REUSEADDR, - (const char*)&on, sizeof(on)) == -1) { - - int ret = errno; - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return ret; - } - - if (::bind(m_sockfd, (struct sockaddr*)&local, sizeof(local)) == -1) - { - int ret = errno; - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return ret; - } - - return 0; -} - -NDB_SOCKET_TYPE -SocketClient::connect(const char *toaddress, unsigned short toport) -{ - fd_set rset, wset; - struct timeval tval; - int r; - bool use_timeout; - SOCKOPT_OPTLEN_TYPE len; - int flags; - - if (m_sockfd == NDB_INVALID_SOCKET) - { - if (!init()) { -#ifdef VM_TRACE - ndbout << "SocketClient::connect() failed " << m_server_name << " " << m_port << endl; -#endif - return NDB_INVALID_SOCKET; - } - } - - if (toaddress) - { - if (m_server_name) - free(m_server_name); - m_server_name = strdup(toaddress); - m_port = toport; - memset(&m_servaddr, 0, sizeof(m_servaddr)); - m_servaddr.sin_family = AF_INET; - m_servaddr.sin_port = htons(toport); - // Convert ip address presentation format to numeric format - if (Ndb_getInAddr(&m_servaddr.sin_addr, m_server_name)) - return NDB_INVALID_SOCKET; - } - - flags= fcntl(m_sockfd, F_GETFL, 0); - fcntl(m_sockfd, F_SETFL, flags | O_NONBLOCK); - - r= ::connect(m_sockfd, (struct sockaddr*) &m_servaddr, sizeof(m_servaddr)); - - if (r == 0) - goto done; // connected immediately. - - if (r < 0 && (errno != EINPROGRESS)) { - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return NDB_INVALID_SOCKET; - } - - FD_ZERO(&rset); - FD_SET(m_sockfd, &rset); - wset= rset; - tval.tv_sec= m_connect_timeout_sec; - tval.tv_usec= 0; - use_timeout= m_connect_timeout_sec; - - if ((r= select(m_sockfd+1, &rset, &wset, NULL, - use_timeout? &tval : NULL)) == 0) - { - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return NDB_INVALID_SOCKET; - } - - if (FD_ISSET(m_sockfd, &rset) || FD_ISSET(m_sockfd, &wset)) - { - len= sizeof(r); - if (getsockopt(m_sockfd, SOL_SOCKET, SO_ERROR, &r, &len) < 0 || r) - { - // Solaris got an error... different than others - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return NDB_INVALID_SOCKET; - } - } - else - { - // select error, probably m_sockfd not set. - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return NDB_INVALID_SOCKET; - } - -done: - fcntl(m_sockfd, F_SETFL, flags); - - if (m_auth) { - if (!m_auth->client_authenticate(m_sockfd)) - { - NDB_CLOSE_SOCKET(m_sockfd); - m_sockfd= NDB_INVALID_SOCKET; - return NDB_INVALID_SOCKET; - } - } - NDB_SOCKET_TYPE sockfd= m_sockfd; - m_sockfd= NDB_INVALID_SOCKET; - - return sockfd; -} diff --git a/storage/ndb/src/common/util/SocketServer.cpp b/storage/ndb/src/common/util/SocketServer.cpp deleted file mode 100644 index 6c634886ad8..00000000000 --- a/storage/ndb/src/common/util/SocketServer.cpp +++ /dev/null @@ -1,357 +0,0 @@ -/* Copyright (c) 2003-2006, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -#include - -#include -#include -#include -#include - -#define DEBUG(x) ndbout << x << endl; - -SocketServer::SocketServer(unsigned maxSessions) : - m_sessions(10), - m_services(5) -{ - m_thread = 0; - m_stopThread = false; - m_maxSessions = maxSessions; -} - -SocketServer::~SocketServer() { - unsigned i; - for(i = 0; i 32 ? 32 : m_maxSessions) == -1){ - DBUG_PRINT("error",("listen() - %d - %s", - errno, strerror(errno))); - NDB_CLOSE_SOCKET(sock); - DBUG_RETURN(false); - } - - ServiceInstance i; - i.m_socket = sock; - i.m_service = service; - m_services.push_back(i); - - *port = ntohs(servaddr.sin_port); - - DBUG_RETURN(true); -} - -void -SocketServer::doAccept(){ - fd_set readSet, exceptionSet; - FD_ZERO(&readSet); - FD_ZERO(&exceptionSet); - - m_services.lock(); - int maxSock = 0; - for (unsigned i = 0; i < m_services.size(); i++){ - const NDB_SOCKET_TYPE s = m_services[i].m_socket; - FD_SET(s, &readSet); - FD_SET(s, &exceptionSet); - maxSock = (maxSock > s ? maxSock : s); - } - struct timeval timeout; - timeout.tv_sec = 1; - timeout.tv_usec = 0; - - if(select(maxSock + 1, &readSet, 0, &exceptionSet, &timeout) > 0){ - for (unsigned i = 0; i < m_services.size(); i++){ - ServiceInstance & si = m_services[i]; - - if(FD_ISSET(si.m_socket, &readSet)){ - NDB_SOCKET_TYPE childSock = accept(si.m_socket, 0, 0); - if(childSock == NDB_INVALID_SOCKET){ - continue; - } - - SessionInstance s; - s.m_service = si.m_service; - s.m_session = si.m_service->newSession(childSock); - if(s.m_session != 0) - { - m_session_mutex.lock(); - m_sessions.push_back(s); - startSession(m_sessions.back()); - m_session_mutex.unlock(); - } - - continue; - } - - if(FD_ISSET(si.m_socket, &exceptionSet)){ - DEBUG("socket in the exceptionSet"); - continue; - } - } - } - m_services.unlock(); -} - -extern "C" -void* -socketServerThread_C(void* _ss){ - SocketServer * ss = (SocketServer *)_ss; - ss->doRun(); - return 0; -} - -void -SocketServer::startServer(){ - m_threadLock.lock(); - if(m_thread == 0 && m_stopThread == false){ - m_thread = NdbThread_Create(socketServerThread_C, - (void**)this, - 32768, - "NdbSockServ", - NDB_THREAD_PRIO_LOW); - } - m_threadLock.unlock(); -} - -void -SocketServer::stopServer(){ - m_threadLock.lock(); - if(m_thread != 0){ - m_stopThread = true; - - void * res; - NdbThread_WaitFor(m_thread, &res); - NdbThread_Destroy(&m_thread); - m_thread = 0; - } - m_threadLock.unlock(); -} - -void -SocketServer::doRun(){ - - while(!m_stopThread){ - m_session_mutex.lock(); - checkSessionsImpl(); - if(m_sessions.size() < m_maxSessions){ - m_session_mutex.unlock(); - doAccept(); - } else { - m_session_mutex.unlock(); - NdbSleep_MilliSleep(200); - } - } -} - -void -SocketServer::startSession(SessionInstance & si){ - si.m_thread = NdbThread_Create(sessionThread_C, - (void**)si.m_session, - 32768, - "NdbSock_Session", - NDB_THREAD_PRIO_LOW); -} - -void -SocketServer::foreachSession(void (*func)(SocketServer::Session*, void *), void *data) -{ - m_session_mutex.lock(); - for(int i = m_sessions.size() - 1; i >= 0; i--){ - (*func)(m_sessions[i].m_session, data); - } - m_session_mutex.unlock(); -} - -void -SocketServer::checkSessions() -{ - m_session_mutex.lock(); - checkSessionsImpl(); - m_session_mutex.unlock(); -} - -void -SocketServer::checkSessionsImpl() -{ - for(int i = m_sessions.size() - 1; i >= 0; i--) - { - if(m_sessions[i].m_session->m_stopped) - { - if(m_sessions[i].m_thread != 0) - { - void* ret; - NdbThread_WaitFor(m_sessions[i].m_thread, &ret); - NdbThread_Destroy(&m_sessions[i].m_thread); - } - m_sessions[i].m_session->stopSession(); - delete m_sessions[i].m_session; - m_sessions.erase(i); - } - } -} - -void -SocketServer::stopSessions(bool wait){ - int i; - m_session_mutex.lock(); - for(i = m_sessions.size() - 1; i>=0; i--) - { - m_sessions[i].m_session->stopSession(); - m_sessions[i].m_session->m_stop = true; // to make sure - } - m_session_mutex.unlock(); - - for(i = m_services.size() - 1; i>=0; i--) - m_services[i].m_service->stopSessions(); - - if(wait){ - m_session_mutex.lock(); - while(m_sessions.size() > 0){ - checkSessionsImpl(); - m_session_mutex.unlock(); - NdbSleep_MilliSleep(100); - m_session_mutex.lock(); - } - m_session_mutex.unlock(); - } -} - -/***** Session code ******/ - -extern "C" -void* -sessionThread_C(void* _sc){ - SocketServer::Session * si = (SocketServer::Session *)_sc; - - /** - * may have m_stopped set if we're transforming a mgm - * connection into a transporter connection. - */ - if(!si->m_stopped) - { - if(!si->m_stop){ - si->m_stopped = false; - si->runSession(); - } else { - NDB_CLOSE_SOCKET(si->m_socket); - } - } - - si->m_stopped = true; - return 0; -} - -template class MutexVector; -template class Vector; diff --git a/storage/ndb/src/common/util/basestring_vsnprintf.c b/storage/ndb/src/common/util/basestring_vsnprintf.c deleted file mode 100644 index bfe7d7ca7a2..00000000000 --- a/storage/ndb/src/common/util/basestring_vsnprintf.c +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifdef __sgi -/* define on IRIX to get posix compliant vsnprintf */ -#define _XOPEN_SOURCE 500 -#endif -#include -#include -#include - -#ifdef _WINDOWS -#define SNPRINTF_RETURN_TRUNC -#define snprintf _snprintf -#define vsnprintf _vsnprintf -#endif - -int -basestring_snprintf(char *str, size_t size, const char *format, ...) -{ - int ret; - va_list ap; - va_start(ap, format); - ret= basestring_vsnprintf(str, size, format, ap); - va_end(ap); - return(ret); -} - -#ifdef SNPRINTF_RETURN_TRUNC -static char basestring_vsnprintf_buf[16*1024]; -#endif -int -basestring_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - if (size == 0) - { -#ifdef SNPRINTF_RETURN_TRUNC - return vsnprintf(basestring_vsnprintf_buf, - sizeof(basestring_vsnprintf_buf), - format, ap); -#else - char buf[1]; - return vsnprintf(buf, 1, format, ap); -#endif - } - { - int ret= vsnprintf(str, size, format, ap); -#ifdef SNPRINTF_RETURN_TRUNC - if (ret == size-1 || ret == -1) - { - ret= vsnprintf(basestring_vsnprintf_buf, - sizeof(basestring_vsnprintf_buf), - format, ap); - } -#endif - return ret; - } -} diff --git a/storage/ndb/src/common/util/filetest/FileUnitTest.cpp b/storage/ndb/src/common/util/filetest/FileUnitTest.cpp deleted file mode 100644 index 8549fa26c7c..00000000000 --- a/storage/ndb/src/common/util/filetest/FileUnitTest.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "FileUnitTest.hpp" -#include - -#include - -typedef bool (*TESTFUNC)(const char*); - -typedef const char TESTNAME; -typedef struct -{ - const char* name; - TESTFUNC test; -}Tests; - -static Tests testCases[] = { {"Create/Write", &FileUnitTest::testWrite}, - {"Read", &FileUnitTest::testRead}, - {"Exists", &FileUnitTest::testExists}, - {"File Size", &FileUnitTest::testSize}, - {"Rename", &FileUnitTest::testRename}, - {"Remove", &FileUnitTest::testRemove} }; - -static int testFailed = 0; - -int main(int argc, char* argv[]) -{ - if (argc < 2) - { - ndbout << "Usage: filetest " << endl; - return 0; - } - const char* fileName = argv[1]; - - int testCount = (sizeof(testCases) / sizeof(Tests)); - ndbout << "Starting " << testCount << " tests..." << endl; - for (int i = 0; i < testCount; i++) - { - ndbout << "-- " << " Test " << i + 1 - << " [" << testCases[i].name << "] --" << endl; - if (testCases[i].test(fileName)) - { - ndbout << "-- Passed --" << endl; - } - else - { - ndbout << "-- Failed -- " << endl; - } - - } - ndbout << endl << "-- " << testCount - testFailed << " passed, " - << testFailed << " failed --" << endl; - return 0; -} - - -bool -FileUnitTest::testWrite(const char* aFileName) -{ - bool rc = true; - File f; - if (f.open(aFileName, "w")) - { - f.writeChar("ABABABABABAB ABBABAB ABBABA ABAB JKH KJHA JHHAHAH..."); - f.writeChar("12129791242 1298371923 912738912 378129837128371128132...\n"); - f.close(); - } - else - { - error("testWrite failed: "); - rc = false; - } - return rc; -} - -bool -FileUnitTest::testRead(const char* aFileName) -{ - bool rc = true; - // Read file - File f; - if (f.open(aFileName, "r")) - { - long size = f.size(); - ndbout << "File size = " << size << endl; - ndbout << "Allocating buf of " << size << " bytes" << endl; - char* buf = new char[size]; - buf[size - 1] = '\0'; - int r = 0; - while ((r = f.readChar(buf, r, size)) > 0) - { - ndbout << "Read(" << r << "):" << buf << endl; - } - f.close(); - delete buf; - } - else - { - error("readTest failed: "); - rc = false; - } - return rc; -} - -bool -FileUnitTest::testExists(const char* aFileName) -{ - bool rc = true; - if (File::exists(aFileName)) - { - if (File::exists("ThisFileShouldnotbe.txt")) - { - rc = false; - error("testExists failed, the file should NOT be found."); - } - } - else - { - rc = false; - error("testExists failed, the file should exist."); - } - - return rc; -} - - -bool -FileUnitTest::testSize(const char* aFileName) -{ - bool rc = true; - File f; - if (f.open(aFileName, "r")) - { - long size = f.size(); - if (size <= 0) - { - rc = false; - error("testSize failed, size is <= 0"); - } - ndbout << "File size = " << size << endl; - } - else - { - rc = false; - error("testSize failed, could no open file."); - } - f.close(); - return rc; -} - -bool -FileUnitTest::testRename(const char* aFileName) -{ - bool rc = true; - if (File::rename(aFileName, "filetest_new.txt")) - { - if (!File::exists("filetest_new.txt")) - { - rc = false; - error("testRename failed, new file does not exists."); - } - else - { - ndbout << "Renamed " << aFileName << " to filetest_new.txt" << endl; - } - } - else - { - rc = false; - error("testRename failed, unable to rename file."); - } - - return rc; -} - -bool -FileUnitTest::testRemove(const char* aFileName) -{ - bool rc = true; - File f; - if (f.open("filetest_new.txt", "r")) - { - if (!f.remove()) - { - rc = false; - error("testRemove failed, could not remove file."); - } - else - { - if (File::exists("filetest_new")) - { - rc = false; - error("testRemove failed, file was not removed, it still exists."); - } - } - } // (f.open("filetest_new", "r")) - else - { - rc = false; - error("testRemove failed, could not read the file."); - } - - return rc; -} - -void -FileUnitTest::error(const char* msg) -{ - testFailed++; - ndbout << "Test failed: " << msg << endl; - perror("Errno msg"); -} - - -FileUnitTest::FileUnitTest() -{ - -} - -FileUnitTest::~FileUnitTest() -{ - -} diff --git a/storage/ndb/src/common/util/filetest/FileUnitTest.hpp b/storage/ndb/src/common/util/filetest/FileUnitTest.hpp deleted file mode 100644 index dc88c8984af..00000000000 --- a/storage/ndb/src/common/util/filetest/FileUnitTest.hpp +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FILEUNITTEST_H -#define FILEUNITTEST_H - -/** - * Unit test of File. - * - * @version #@ $Id: FileUnitTest.hpp,v 1.1 2002/03/13 18:09:03 eyualex Exp $ - */ -class FileUnitTest -{ -public: - static bool testWrite(const char* aFileName); - static bool testRead(const char* aFileName); - static bool testExists(const char* aFileName); - static bool testSize(const char* aFileName); - static bool testRename(const char* aFileName); - static bool testRemove(const char* aFileName); - - static void error(const char* msg); -private: - FileUnitTest(); - ~FileUnitTest(); - -}; -#endif diff --git a/storage/ndb/src/common/util/filetest/Makefile b/storage/ndb/src/common/util/filetest/Makefile deleted file mode 100644 index fe1842921f9..00000000000 --- a/storage/ndb/src/common/util/filetest/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -include .defs.mk - -TYPE := - -BIN_TARGET := filetest -BIN_TARGET_ARCHIVES := portlib general - -SOURCES := FileUnitTest.cpp - -CCFLAGS_LOC += -I$(NDB_TOP)/include/logger -I$(NDB_TOP)/include/portlib - -include $(NDB_TOP)/Epilogue.mk - - diff --git a/storage/ndb/src/common/util/getarg.cat3 b/storage/ndb/src/common/util/getarg.cat3 deleted file mode 100644 index 31685510537..00000000000 --- a/storage/ndb/src/common/util/getarg.cat3 +++ /dev/null @@ -1,237 +0,0 @@ -GETARG(3) OpenBSD Programmer's Manual GETARG(3) - -NNAAMMEE - ggeettaarrgg, aarrgg__pprriinnttuussaaggee - collect command line options - -SSYYNNOOPPSSIISS - ##iinncclluuddee <> - - - _i_n_t - ggeettaarrgg(_s_t_r_u_c_t _g_e_t_a_r_g_s _*_a_r_g_s, _s_i_z_e___t _n_u_m___a_r_g_s, _i_n_t _a_r_g_c, _c_h_a_r _*_*_a_r_g_v, - _i_n_t _*_o_p_t_i_n_d); - - - _v_o_i_d - aarrgg__pprriinnttuussaaggee(_s_t_r_u_c_t _g_e_t_a_r_g_s _*_a_r_g_s, _s_i_z_e___t _n_u_m___a_r_g_s, - _c_o_n_s_t _c_h_a_r _*_p_r_o_g_n_a_m_e, _c_o_n_s_t _c_h_a_r _*_e_x_t_r_a___s_t_r_i_n_g); - - -DDEESSCCRRIIPPTTIIOONN - ggeettaarrgg() collects any command line options given to a program in an easi­ - ly used way. aarrgg__pprriinnttuussaaggee() pretty-prints the available options, with - a short help text. - - _a_r_g_s is the option specification to use, and it's an array of _s_t_r_u_c_t - _g_e_t_a_r_g_s elements. _n_u_m___a_r_g_s is the size of _a_r_g_s (in elements). _a_r_g_c and - _a_r_g_v are the argument count and argument vector to extract option from. - _o_p_t_i_n_d is a pointer to an integer where the index to the last processed - argument is stored, it must be initialised to the first index (minus one) - to process (normally 0) before the first call. - - _a_r_g___p_r_i_n_t_u_s_a_g_e take the same _a_r_g_s and _n_u_m___a_r_g_s as getarg; _p_r_o_g_n_a_m_e _i_s _t_h_e - _n_a_m_e _o_f _t_h_e _p_r_o_g_r_a_m _(_t_o _b_e progname0 _0progname1 _1progname2 _2progname3 - _3progname4 _4progname5 _e_x_t_r_a___s_t_r_i_n_g is a string to print after the actual - options to indicate more arguments. The usefulness of this function is - realised only be people who has used programs that has help strings that - doesn't match what the code does. - - The _g_e_t_a_r_g_s struct has the following elements. - - - struct getargs{ - const char *long_name; - char short_name; - enum { arg_integer, - arg_string, - arg_flag, - arg_negative_flag, - arg_strings, - arg_double, - arg_collect - } type; - void *value; - const char *help; - const char *arg_help; - }; - - _l_o_n_g___n_a_m_e is the long name of the option, it can be NULL, if you don't - want a long name. _s_h_o_r_t___n_a_m_e is the characted to use as short option, it - can be zero. If the option has a value the _v_a_l_u_e field gets filled in - with that value interpreted as specified by the _t_y_p_e field. _h_e_l_p is a - longer help string for the option as a whole, if it's NULL the help text - for the option is omitted (but it's still displayed in the synopsis). - _a_r_g___h_e_l_p is a description of the argument, if NULL a default value will - be used, depending on the type of the option: - - - arg_integer the argument is a signed integer, and _v_a_l_u_e should - point to an _i_n_t. - - _a_r_g___s_t_r_i_n_g the argument is a string, and _v_a_l_u_e should point to a - _c_h_a_r_*. - - _a_r_g___f_l_a_g the argument is a flag, and _v_a_l_u_e should point to a - _i_n_t. It gets filled in with either zero or one, de­ - pending on how the option is given, the normal case - beeing one. Note that if the option isn't given, the - value isn't altered, so it should be initialised to - some useful default. - - _a_r_g___n_e_g_a_t_i_v_e___f_l_a_g this is the same as _a_r_g___f_l_a_g but it reverses the mean­ - ing of the flag (a given short option clears the - flag), and the synopsis of a long option is negated. - - _a_r_g___s_t_r_i_n_g_s the argument can be given multiple times, and the val­ - ues are collected in an array; _v_a_l_u_e should be a - pointer to a _s_t_r_u_c_t _g_e_t_a_r_g___s_t_r_i_n_g_s structure, which - holds a length and a string pointer. - - _a_r_g___d_o_u_b_l_e argument is a double precision floating point value, - and _v_a_l_u_e should point to a _d_o_u_b_l_e. - - _a_r_g___c_o_l_l_e_c_t allows more fine-grained control of the option parsing - process. _v_a_l_u_e should be a pointer to a - _g_e_t_a_r_g___c_o_l_l_e_c_t___i_n_f_o structure: - - typedef int (*getarg_collect_func)(int short_opt, - int argc, - char **argv, - int *optind, - int *optarg, - void *data); - - typedef struct getarg_collect_info { - getarg_collect_func func; - void *data; - } getarg_collect_info; - - With the _f_u_n_c member set to a function to call, and - _d_a_t_a to some application specific data. The parameters - to the collect function are: - - _s_h_o_r_t___f_l_a_g non-zero if this call is via a short option - flag, zero otherwise - - _a_r_g_c, _a_r_g_v the whole argument list - - _o_p_t_i_n_d pointer to the index in argv where the flag is - - _o_p_t_a_r_g pointer to the index in argv[*optind] where the - flag name starts - - _d_a_t_a application specific data - - You can modify _*_o_p_t_i_n_d, and _*_o_p_t_a_r_g, but to do this - correct you (more or less) have to know about the in­ - ner workings of getarg. - - You can skip parts of arguments by increasing _*_o_p_t_a_r_g - (you could implement the --zz_3 set of flags from ggzziipp - with this), or whole argument strings by increasing - _*_o_p_t_i_n_d (let's say you want a flag --cc _x _y _z to specify - a coordinate); if you also have to set _*_o_p_t_a_r_g to a - sane value. - - The collect function should return one of - ARG_ERR_NO_MATCH, ARG_ERR_BAD_ARG, ARG_ERR_NO_ARG on - error, zero otherwise. - - For your convenience there is a function, - ggeettaarrgg__ooppttaarrgg(), that returns the traditional argument - string, and you pass it all arguments, sans data, that - where given to the collection function. - - Don't use this more this unless you absolutely have - to. - - Option parsing is similar to what getopt uses. Short options without ar­ - guments can be compressed (--xxyyzz is the same as --xx --yy --zz), and short op­ - tions with arguments take these as either the rest of the argv-string or - as the next option (--oo_f_o_o, or --oo _f_o_o). - - Long option names are prefixed with -- (double dash), and the value with - a = (equal), ----ffoooo==_b_a_r. Long option flags can either be specified as they - are (----hheellpp), or with an (boolean parsable) option (----hheellpp==_y_e_s, - ----hheellpp==_t_r_u_e, or similar), or they can also be negated (----nnoo--hheellpp is the - same as ----hheellpp==no), and if you're really confused you can do it multiple - times (----nnoo--nnoo--hheellpp==_f_a_l_s_e, or even ----nnoo--nnoo--hheellpp==_m_a_y_b_e). - -EEXXAAMMPPLLEE - #include - #include - #include - - char *source = "Ouagadougou"; - char *destination; - int weight; - int include_catalog = 1; - int help_flag; - - struct getargs args[] = { - { "source", 's', arg_string, &source, - "source of shippment", "city" }, - { "destination", 'd', arg_string, &destination, - "destination of shippment", "city" }, - { "weight", 'w', arg_integer, &weight, - "weight of shippment", "tons" }, - { "catalog", 'c', arg_negative_flag, &include_catalog, - "include product catalog" }, - { "help", 'h', arg_flag, &help_flag } - }; - - int num_args = sizeof(args) / sizeof(args[0]); /* number of elements in args */ - - const char *progname = "ship++"; - - int - main(int argc, char **argv) - { - int optind = 0; - if (getarg(args, num_args, argc, argv, &optind)) { - arg_printusage(args, num_args, progname, "stuff..."); - exit (1); - } - if (help_flag) { - arg_printusage(args, num_args, progname, "stuff..."); - exit (0); - } - if (destination == NULL) { - fprintf(stderr, "%s: must specify destination0, progname); - exit(1); - } - if (strcmp(source, destination) == 0) { - fprintf(stderr, "%s: destination must be different from source0); - exit(1); - } - /* include more stuff here ... */ - exit(2); - } - - The output help output from this program looks like this: - - $ ship++ --help - Usage: ship++ [--source=city] [-s city] [--destination=city] [-d city] - [--weight=tons] [-w tons] [--no-catalog] [-c] [--help] [-h] stuff... - -s city, --source=city source of shippment - -d city, --destination=city destination of shippment - -w tons, --weight=tons weight of shippment - -c, --no-catalog include product catalog - - -BBUUGGSS - It should be more flexible, so it would be possible to use other more - complicated option syntaxes, such as what ps(1), and tar(1), uses, or the - AFS model where you can skip the flag names as long as the options come - in the correct order. - - Options with multiple arguments should be handled better. - - Should be integreated with SL. - - It's very confusing that the struct you pass in is called getargS. - -SSEEEE AALLSSOO - getopt(3) - - ROKEN September 24, 1999 4 diff --git a/storage/ndb/src/common/util/md5_hash.cpp b/storage/ndb/src/common/util/md5_hash.cpp deleted file mode 100644 index cf8c1bb86f0..00000000000 --- a/storage/ndb/src/common/util/md5_hash.cpp +++ /dev/null @@ -1,239 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#include "md5_hash.hpp" - -#ifdef WORDS_BIGENDIAN -#define HIGHFIRST 1 -#endif - -/* - * This code implements the MD5 message-digest algorithm. - * The algorithm is due to Ron Rivest. This code was - * written by Colin Plumb in 1993, no copyright is claimed. - * This code is in the public domain; do with it what you wish. - * - * Equivalent code is available from RSA Data Security, Inc. - * This code has been tested against that, and is equivalent, - * except that you don't need to include two pages of legalese - * with every copy. - * - * The code has been modified by Mikael Ronstroem to handle - * calculating a hash value of a key that is always a multiple - * of 4 bytes long. Word 0 of the calculated 4-word hash value - * is returned as the hash value. - */ - -#ifndef HIGHFIRST -#define byteReverse(buf, len) /* Nothing */ -#else -void byteReverse(unsigned char *buf, unsigned longs); -/* - * Note: this code is harmless on little-endian machines. - */ -void byteReverse(unsigned char *buf, unsigned longs) -{ - Uint32 t; - do { - t = (Uint32) ((unsigned) buf[3] << 8 | buf[2]) << 16 | - ((unsigned) buf[1] << 8 | buf[0]); - *(Uint32 *) buf = t; - buf += 4; - } while (--longs); -} -#endif - -/* The four core functions - F1 is optimized somewhat */ - -/* #define F1(x, y, z) (x & y | ~x & z) */ -#define F1(x, y, z) (z ^ (x & (y ^ z))) -#define F2(x, y, z) F1(z, x, y) -#define F3(x, y, z) (x ^ y ^ z) -#define F4(x, y, z) (y ^ (x | ~z)) - -/* This is the central step in the MD5 algorithm. */ -#define MD5STEP(f, w, x, y, z, data, s) \ - ( w += f(x, y, z) + data, w = w<>(32-s), w += x ) - -/* - * The core of the MD5 algorithm, this alters an existing MD5 hash to - * reflect the addition of 16 longwords of new data. MD5Update blocks - * the data and converts bytes into longwords for this routine. - */ -static void MD5Transform(Uint32 buf[4], Uint32 const in[16]) -{ - register Uint32 a, b, c, d; - - a = buf[0]; - b = buf[1]; - c = buf[2]; - d = buf[3]; - - MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); - MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); - MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); - MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); - MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); - MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); - MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); - MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); - MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); - MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); - MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); - MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); - MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); - MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); - MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); - MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); - - MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); - MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); - MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); - MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); - MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); - MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); - MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); - MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); - MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); - MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); - MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); - MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); - MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); - MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); - MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); - MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); - - MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); - MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); - MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); - MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); - MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); - MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); - MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); - MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); - MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); - MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); - MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); - MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); - MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); - MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); - MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); - MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); - - MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); - MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); - MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); - MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); - MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); - MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); - MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); - MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); - MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); - MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); - MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); - MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); - MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); - MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); - MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); - MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); - - buf[0] += a; - buf[1] += b; - buf[2] += c; - buf[3] += d; -} - -/* - * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious - * initialization constants. - */ -void md5_hash(Uint32 result[4], const Uint64* keybuf, Uint32 no_of_32_words) -{ - /** - * This is the external interface of the module - * It is assumed that keybuf is placed on 8 byte - * alignment. - */ - Uint32 i; - Uint32 buf[4]; - Uint64 transform64_buf[8]; - Uint32* transform32_buf; - Uint32 len = no_of_32_words << 2; - const Uint64* key64buf = (const Uint64*)keybuf; - const Uint32* key32buf = (const Uint32*)keybuf; - - transform32_buf = (Uint32*)&transform64_buf[0]; - buf[0] = 0x67452301; - buf[1] = 0xefcdab89; - buf[2] = 0x98badcfe; - buf[3] = 0x10325476; - - while (no_of_32_words >= 16) { - transform64_buf[0] = key64buf[0]; - transform64_buf[1] = key64buf[1]; - transform64_buf[2] = key64buf[2]; - transform64_buf[3] = key64buf[3]; - transform64_buf[4] = key64buf[4]; - transform64_buf[5] = key64buf[5]; - transform64_buf[6] = key64buf[6]; - transform64_buf[7] = key64buf[7]; - no_of_32_words -= 16; - key64buf += 8; - byteReverse((unsigned char *)transform32_buf, 16); - MD5Transform(buf, transform32_buf); - } - - key32buf = (const Uint32*)key64buf; - transform64_buf[0] = 0; - transform64_buf[1] = 0; - transform64_buf[2] = 0; - transform64_buf[3] = 0; - transform64_buf[4] = 0; - transform64_buf[5] = 0; - transform64_buf[6] = 0; - transform64_buf[7] = (Uint64)len; - - for (i = 0; i < no_of_32_words; i++) - transform32_buf[i] = key32buf[i]; - transform32_buf[no_of_32_words] = 0x80000000; - - if (no_of_32_words < 14) { - byteReverse((unsigned char *)transform32_buf, 16); - MD5Transform(buf, transform32_buf); - } else { - if (no_of_32_words == 14) - transform32_buf[15] = 0; - MD5Transform(buf, transform32_buf); - transform64_buf[0] = 0; - transform64_buf[1] = 0; - transform64_buf[2] = 0; - transform64_buf[3] = 0; - transform64_buf[4] = 0; - transform64_buf[5] = 0; - transform64_buf[6] = 0; - transform64_buf[7] = (Uint64)len; - byteReverse((unsigned char *)transform32_buf, 16); - MD5Transform(buf, transform32_buf); - } - - result[0] = buf[0]; - result[1] = buf[1]; - result[2] = buf[2]; - result[3] = buf[3]; -} - diff --git a/storage/ndb/src/common/util/ndb_init.c b/storage/ndb/src/common/util/ndb_init.c deleted file mode 100644 index 4cf924840ff..00000000000 --- a/storage/ndb/src/common/util/ndb_init.c +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -NdbMutex *g_ndb_connection_mutex = NULL; - -void -ndb_init_internal() -{ - if (!g_ndb_connection_mutex) - g_ndb_connection_mutex = NdbMutex_Create(); -} - -int -ndb_init() -{ - if (my_init()) { - const char* err = "my_init() failed - exit\n"; - write(2, err, strlen(err)); - exit(1); - } - ndb_init_internal(); - return 0; -} - -void -ndb_end_internal() -{ - if (g_ndb_connection_mutex) - NdbMutex_Destroy(g_ndb_connection_mutex); -} - -void -ndb_end(int flags) -{ - my_end(flags); - ndb_end_internal(); -} diff --git a/storage/ndb/src/common/util/ndb_rand.c b/storage/ndb/src/common/util/ndb_rand.c deleted file mode 100644 index a8e922e8bc7..00000000000 --- a/storage/ndb/src/common/util/ndb_rand.c +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2003, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -static unsigned long next= 1; - -/** - * ndb_rand - * - * constant time, cheap, pseudo-random number generator. - * - * NDB_RAND_MAX assumed to be 32767 - * - * This is the POSIX example for "generating the same sequence on - * different machines". Although that is not one of our requirements. - */ -int ndb_rand(void) -{ - next= next * 1103515245 + 12345; - return((unsigned)(next/65536) % 32768); -} - -void ndb_srand(unsigned seed) -{ - next= seed; -} - diff --git a/storage/ndb/src/common/util/new.cpp b/storage/ndb/src/common/util/new.cpp deleted file mode 100644 index e307e00d510..00000000000 --- a/storage/ndb/src/common/util/new.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (C) 2004-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include - -extern "C" { - void (* ndb_new_handler)() = 0; -} - -#if 0 - -void *operator new (size_t sz) -{ - void * p = NdbMem_Allocate(sz ? sz : 1); - if(p) - return p; - if(ndb_new_handler) - (* ndb_new_handler)(); - abort(); -} - -void *operator new[] (size_t sz) -{ - void * p = (void *) NdbMem_Allocate(sz ? sz : 1); - if(p) - return p; - if(ndb_new_handler) - (* ndb_new_handler)(); - abort(); -} - -void operator delete (void *ptr) -{ - if (ptr) - NdbMem_Free(ptr); -} - -void operator delete[] (void *ptr) throw () -{ - if (ptr) - NdbMem_Free(ptr); -} - -#endif // USE_MYSYS_NEW diff --git a/storage/ndb/src/common/util/random.c b/storage/ndb/src/common/util/random.c deleted file mode 100644 index e53501507c6..00000000000 --- a/storage/ndb/src/common/util/random.c +++ /dev/null @@ -1,284 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -/*************************************************************** -* I N C L U D E D F I L E S * -***************************************************************/ - -#include - -#include - -#include - -/*************************************************************** -* L O C A L C O N S T A N T S * -***************************************************************/ - -/*************************************************************** -* L O C A L D A T A S T R U C T U R E S * -***************************************************************/ - -typedef struct { - unsigned short int x[3]; /* Current state. */ - unsigned short int a[3]; /* Factor in congruential formula. */ - unsigned short int c; /* Additive const. in congruential formula. */ - int init; /* Flag for initializing. */ -}DRand48Data; - -/*************************************************************** -* L O C A L F U N C T I O N S * -***************************************************************/ - -static void shuffleSequence(RandomSequence *seq); - -/*************************************************************** -* L O C A L D A T A * -***************************************************************/ - -static DRand48Data dRand48Data; - -/*************************************************************** -* P U B L I C D A T A * -***************************************************************/ - - -/*************************************************************** -**************************************************************** -* L O C A L F U N C T I O N S C O D E S E C T I O N * -**************************************************************** -***************************************************************/ - -static void localRandom48Init(long int seedval, DRand48Data *buffer) -{ - /* The standards say we only have 32 bits. */ - if (sizeof (long int) > 4) - seedval &= 0xffffffffl; - -#if USHRT_MAX == 0xffffU - buffer->x[2] = seedval >> 16; - buffer->x[1] = seedval & 0xffffl; - buffer->x[0] = 0x330e; - - buffer->a[2] = 0x5; - buffer->a[1] = 0xdeec; - buffer->a[0] = 0xe66d; -#else - buffer->x[2] = seedval; - buffer->x[1] = 0x330e0000UL; - buffer->x[0] = 0; - - buffer->a[2] = 0x5deecUL; - buffer->a[1] = 0xe66d0000UL; - buffer->a[0] = 0; -#endif - - buffer->c = 0xb; - buffer->init = 1; -} - -static void localRandom48(DRand48Data *buffer, long int *result) -{ - Uint64 X; - Uint64 a; - Uint64 loc_result; - - /*--------------------------------------*/ - /* Initialize buffer, if not yet done. */ - /*--------------------------------------*/ - if (!buffer->init) { -#if (USHRT_MAX == 0xffffU) - buffer->a[2] = 0x5; - buffer->a[1] = 0xdeec; - buffer->a[0] = 0xe66d; -#else - buffer->a[2] = 0x5deecUL; - buffer->a[1] = 0xe66d0000UL; - buffer->a[0] = 0; -#endif - buffer->c = 0xb; - buffer->init = 1; - } - - /* Do the real work. We choose a data type which contains at least - 48 bits. Because we compute the modulus it does not care how - many bits really are computed. */ - - if (sizeof (unsigned short int) == 2) { - X = (Uint64)buffer->x[2] << 32 | - (Uint64)buffer->x[1] << 16 | - buffer->x[0]; - a = ((Uint64)buffer->a[2] << 32 | - (Uint64)buffer->a[1] << 16 | - buffer->a[0]); - - loc_result = X * a + buffer->c; - - buffer->x[0] = loc_result & 0xffff; - buffer->x[1] = (loc_result >> 16) & 0xffff; - buffer->x[2] = (loc_result >> 32) & 0xffff; - } - else { - X = (Uint64)buffer->x[2] << 16 | - buffer->x[1] >> 16; - a = (Uint64)buffer->a[2] << 16 | - buffer->a[1] >> 16; - - loc_result = X * a + buffer->c; - - buffer->x[0] = loc_result >> 16 & 0xffffffffl; - buffer->x[1] = loc_result << 16 & 0xffff0000l; - } - - /*--------------------*/ - /* Store the result. */ - /*--------------------*/ - if (sizeof (unsigned short int) == 2) - *result = buffer->x[2] << 15 | buffer->x[1] >> 1; - else - *result = buffer->x[2] >> 1; -} - -static void shuffleSequence(RandomSequence *seq) -{ - unsigned int i; - unsigned int j; - unsigned int tmp; - - if( !seq ) return; - - for(i = 0; i < seq->length; i++ ) { - j = myRandom48(seq->length); - if( i != j ) { - tmp = seq->values[i]; - seq->values[i] = seq->values[j]; - seq->values[j] = tmp; - } - } -} - - -/*************************************************************** -**************************************************************** -* P U B L I C F U N C T I O N S C O D E S E C T I O N * -**************************************************************** -***************************************************************/ - - -double getTps(unsigned int count, double timeValue) -{ - double f; - - if( timeValue != 0.0 ) - f = count / timeValue; - else - f = 0.0; - - return(f); -} - -/*----------------------------*/ -/* Random Sequences Functions */ -/*----------------------------*/ -int initSequence(RandomSequence *seq, SequenceValues *inputValues) -{ - unsigned int i; - unsigned int j; - unsigned int totalLength; - unsigned int idx; - - if( !seq || !inputValues ) return(-1); - - /*------------------------------------*/ - /* Find the total length of the array */ - /*------------------------------------*/ - totalLength = 0; - - for(i = 0; inputValues[i].length != 0; i++) - totalLength += inputValues[i].length; - - if( totalLength == 0 ) return(-1); - - seq->length = totalLength; - seq->values = calloc(totalLength, sizeof(unsigned int)); - - if( seq->values == 0 ) return(-1); - - /*----------------------*/ - /* set the array values */ - /*----------------------*/ - idx = 0; - - for(i = 0; inputValues[i].length != 0; i++) { - for(j = 0; j < inputValues[i].length; j++ ) { - seq->values[idx] = inputValues[i].value; - idx++; - } - } - - shuffleSequence(seq); - - seq->currentIndex = 0; - - return(0); -} - -unsigned int getNextRandom(RandomSequence *seq) -{ - unsigned int nextValue; - - nextValue = seq->values[seq->currentIndex]; - - seq->currentIndex++; - - if(seq->currentIndex == seq->length){ - seq->currentIndex = 0; - shuffleSequence(seq); - } - - return nextValue; -} - -void printSequence(RandomSequence *seq, unsigned int numPerRow) -{ - unsigned int i; - - if( !seq ) return; - - for(i = 0; ilength; i++) { - ndbout_c("%d ", seq->values[i]); - - if((i+1) % numPerRow == 0) - ndbout_c(""); - } - - if(i % numPerRow != 0) - ndbout_c(""); -} - -void myRandom48Init(long int seedval) -{ - localRandom48Init(seedval, &dRand48Data); -} - -long int myRandom48(unsigned int maxValue) -{ - long int result; - - localRandom48(&dRand48Data, &result); - - return(result % maxValue); -} diff --git a/storage/ndb/src/common/util/socket_io.cpp b/storage/ndb/src/common/util/socket_io.cpp deleted file mode 100644 index 2e0d9828fa0..00000000000 --- a/storage/ndb/src/common/util/socket_io.cpp +++ /dev/null @@ -1,333 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include -#include -#include -#include - -extern "C" -int -read_socket(NDB_SOCKET_TYPE socket, int timeout_millis, - char * buf, int buflen){ - if(buflen < 1) - return 0; - - fd_set readset; - FD_ZERO(&readset); - FD_SET(socket, &readset); - - struct timeval timeout; - timeout.tv_sec = (timeout_millis / 1000); - timeout.tv_usec = (timeout_millis % 1000) * 1000; - - const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); - if(selectRes == 0) - return 0; - - if(selectRes == -1){ - return -1; - } - - return recv(socket, &buf[0], buflen, 0); -} - -extern "C" -int -readln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - char * buf, int buflen, NdbMutex *mutex){ - if(buflen <= 1) - return 0; - - fd_set readset; - FD_ZERO(&readset); - FD_SET(socket, &readset); - - struct timeval timeout; - timeout.tv_sec = (timeout_millis / 1000); - timeout.tv_usec = (timeout_millis % 1000) * 1000; - - if(mutex) - NdbMutex_Unlock(mutex); - Uint64 tick= NdbTick_CurrentMillisecond(); - const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); - - *time= NdbTick_CurrentMillisecond() - tick; - if(mutex) - NdbMutex_Lock(mutex); - - if(selectRes == 0){ - return 0; - } - - if(selectRes == -1){ - return -1; - } - - char* ptr = buf; - int len = buflen; - do - { - int t; - while((t = recv(socket, ptr, len, MSG_PEEK)) == -1 && errno == EINTR); - - if(t < 1) - { - return -1; - } - - - for(int i = 0; i 0 && buf[i-1] == '\r') - { - buf[i-1] = '\n'; - ptr--; - } - ptr[0]= 0; - return ptr - buf; - } - } - - for (int tmp = t; tmp; ) - { - while ((t = recv(socket, ptr, tmp, 0)) == -1 && errno == EINTR); - if (t < 1) - { - return -1; - } - ptr += t; - len -= t; - tmp -= t; - } - - FD_ZERO(&readset); - FD_SET(socket, &readset); - timeout.tv_sec = ((timeout_millis - *time) / 1000); - timeout.tv_usec = ((timeout_millis - *time) % 1000) * 1000; - - tick= NdbTick_CurrentMillisecond(); - const int selectRes = select(socket + 1, &readset, 0, 0, &timeout); - *time= NdbTick_CurrentMillisecond() - tick; - - if(selectRes != 1){ - return -1; - } - } while (len > 0); - - return -1; -} - -extern "C" -int -write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - const char buf[], int len){ - fd_set writeset; - FD_ZERO(&writeset); - FD_SET(socket, &writeset); - struct timeval timeout; - timeout.tv_sec = (timeout_millis / 1000); - timeout.tv_usec = (timeout_millis % 1000) * 1000; - - - Uint64 tick= NdbTick_CurrentMillisecond(); - const int selectRes = select(socket + 1, 0, &writeset, 0, &timeout); - *time= NdbTick_CurrentMillisecond() - tick; - - if(selectRes != 1){ - return -1; - } - - const char * tmp = &buf[0]; - while(len > 0){ - const int w = send(socket, tmp, len, 0); - if(w == -1){ - return -1; - } - len -= w; - tmp += w; - - if(len == 0) - break; - - FD_ZERO(&writeset); - FD_SET(socket, &writeset); - timeout.tv_sec = ((timeout_millis - *time) / 1000); - timeout.tv_usec = ((timeout_millis - *time) % 1000) * 1000; - - Uint64 tick= NdbTick_CurrentMillisecond(); - const int selectRes2 = select(socket + 1, 0, &writeset, 0, &timeout); - *time= NdbTick_CurrentMillisecond() - tick; - - if(selectRes2 != 1){ - return -1; - } - } - - return 0; -} - -extern "C" -int -print_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - const char * fmt, ...){ - va_list ap; - va_start(ap, fmt); - int ret = vprint_socket(socket, timeout_millis, time, fmt, ap); - va_end(ap); - - return ret; -} - -extern "C" -int -println_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - const char * fmt, ...){ - va_list ap; - va_start(ap, fmt); - int ret = vprintln_socket(socket, timeout_millis, time, fmt, ap); - va_end(ap); - return ret; -} - -extern "C" -int -vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - const char * fmt, va_list ap){ - char buf[1000]; - char *buf2 = buf; - size_t size; - - if (fmt != 0 && fmt[0] != 0) { - size = BaseString::vsnprintf(buf, sizeof(buf), fmt, ap); - /* Check if the output was truncated */ - if(size > sizeof(buf)) { - buf2 = (char *)malloc(size); - if(buf2 == NULL) - return -1; - BaseString::vsnprintf(buf2, size, fmt, ap); - } - } else - return 0; - - int ret = write_socket(socket, timeout_millis, time, buf2, size); - if(buf2 != buf) - free(buf2); - return ret; -} - -extern "C" -int -vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis, int *time, - const char * fmt, va_list ap){ - char buf[1000]; - char *buf2 = buf; - size_t size; - - if (fmt != 0 && fmt[0] != 0) { - size = BaseString::vsnprintf(buf, sizeof(buf), fmt, ap)+1;// extra byte for '/n' - /* Check if the output was truncated */ - if(size > sizeof(buf)) { - buf2 = (char *)malloc(size); - if(buf2 == NULL) - return -1; - BaseString::vsnprintf(buf2, size, fmt, ap); - } - } else { - size = 1; - } - buf2[size-1]='\n'; - - int ret = write_socket(socket, timeout_millis, time, buf2, size); - if(buf2 != buf) - free(buf2); - return ret; -} - -#ifdef NDB_WIN32 - -class INIT_WINSOCK2 -{ -public: - INIT_WINSOCK2(void); - ~INIT_WINSOCK2(void); - -private: - bool m_bAcceptable; -}; - -INIT_WINSOCK2 g_init_winsock2; - -INIT_WINSOCK2::INIT_WINSOCK2(void) -: m_bAcceptable(false) -{ - WORD wVersionRequested; - WSADATA wsaData; - int err; - - wVersionRequested = MAKEWORD( 2, 2 ); - - err = WSAStartup( wVersionRequested, &wsaData ); - if ( err != 0 ) { - /* Tell the user that we could not find a usable */ - /* WinSock DLL. */ - m_bAcceptable = false; - } - - /* Confirm that the WinSock DLL supports 2.2.*/ - /* Note that if the DLL supports versions greater */ - /* than 2.2 in addition to 2.2, it will still return */ - /* 2.2 in wVersion since that is the version we */ - /* requested. */ - - if ( LOBYTE( wsaData.wVersion ) != 2 || - HIBYTE( wsaData.wVersion ) != 2 ) { - /* Tell the user that we could not find a usable */ - /* WinSock DLL. */ - WSACleanup( ); - m_bAcceptable = false; - } - - /* The WinSock DLL is acceptable. Proceed. */ - m_bAcceptable = true; -} - -INIT_WINSOCK2::~INIT_WINSOCK2(void) -{ - if(m_bAcceptable) - { - m_bAcceptable = false; - WSACleanup(); - } -} - -#endif - diff --git a/storage/ndb/src/common/util/strdup.c b/storage/ndb/src/common/util/strdup.c deleted file mode 100644 index 96b6df37712..00000000000 --- a/storage/ndb/src/common/util/strdup.c +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#ifndef HAVE_STRDUP -char * -strdup(const char *s){ - void *p2; - if ((p2 = malloc(strlen(s)+1))) - strcpy(p2, s); - return p2; -} -#endif diff --git a/storage/ndb/src/common/util/testConfigValues/Makefile b/storage/ndb/src/common/util/testConfigValues/Makefile deleted file mode 100644 index 5b7400f5ee3..00000000000 --- a/storage/ndb/src/common/util/testConfigValues/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -include .defs.mk - -TYPE := util - -BIN_TARGET := testConfigValues -BIN_TARGET_ARCHIVES := portlib general - -SOURCES := testConfigValues.cpp - -CCFLAGS_LOC += -I$(call fixpath,$(NDB_TOP)/include/util) - -include $(NDB_TOP)/Epilogue.mk diff --git a/storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp b/storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp deleted file mode 100644 index b21133a438a..00000000000 --- a/storage/ndb/src/common/util/testConfigValues/testConfigValues.cpp +++ /dev/null @@ -1,138 +0,0 @@ -/* Copyright (C) 2004 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include -#include -#include - -#define CF_NODES 1 -#define CF_LOG_PAGES 2 -#define CF_MEM_PAGES 3 -#define CF_START_TO 4 -#define CF_STOP_TO 5 - -void print(Uint32 i, ConfigValues::ConstIterator & cf){ - ndbout_c("---"); - for(Uint32 j = 2; j<=7; j++){ - switch(cf.getTypeOf(j)){ - case ConfigValues::IntType: - ndbout_c("Node %d : CFG(%d) : %d", - i, j, cf.get(j, 999)); - break; - case ConfigValues::Int64Type: - ndbout_c("Node %d : CFG(%d) : %lld (64)", - i, j, cf.get64(j, 999)); - break; - case ConfigValues::StringType: - ndbout_c("Node %d : CFG(%d) : %s", - i, j, cf.get(j, "")); - break; - default: - ndbout_c("Node %d : CFG(%d) : TYPE: %d", - i, j, cf.getTypeOf(j)); - } - } -} - -void print(Uint32 i, ConfigValues & _cf){ - ConfigValues::ConstIterator cf(_cf); - print(i, cf); -} - -void -print(ConfigValues & _cf){ - ConfigValues::ConstIterator cf(_cf); - Uint32 i = 0; - while(cf.openSection(CF_NODES, i)){ - print(i, cf); - cf.closeSection(); - i++; - } -} - -inline -void -require(bool b){ - if(!b) - abort(); -} - -int -main(void){ - - { - ConfigValuesFactory cvf(10, 20); - cvf.openSection(1, 0); - cvf.put(2, 12); - cvf.put64(3, 13); - cvf.put(4, 14); - cvf.put64(5, 15); - cvf.put(6, "Keso"); - cvf.put(7, "Kent"); - cvf.closeSection(); - - cvf.openSection(1, 1); - cvf.put(2, 22); - cvf.put64(3, 23); - cvf.put(4, 24); - cvf.put64(5, 25); - cvf.put(6, "Kalle"); - cvf.put(7, "Anka"); - cvf.closeSection(); - - ndbout_c("-- print --"); - print(* cvf.m_cfg); - - cvf.shrink(); - ndbout_c("shrink\n-- print --"); - print(* cvf.m_cfg); - cvf.expand(10, 10); - ndbout_c("expand\n-- print --"); - print(* cvf.m_cfg); - - ndbout_c("packed size: %d", cvf.m_cfg->getPackedSize()); - - ConfigValues::ConstIterator iter(* cvf.m_cfg); - iter.openSection(CF_NODES, 0); - ConfigValues * cfg2 = ConfigValuesFactory::extractCurrentSection(iter); - print(99, * cfg2); - - cvf.shrink(); - ndbout_c("packed size: %d", cfg2->getPackedSize()); - - UtilBuffer buf; - Uint32 l1 = cvf.m_cfg->pack(buf); - Uint32 l2 = cvf.m_cfg->getPackedSize(); - require(l1 == l2); - - ConfigValuesFactory cvf2; - require(cvf2.unpack(buf)); - UtilBuffer buf2; - cvf2.shrink(); - Uint32 l3 = cvf2.m_cfg->pack(buf2); - require(l1 == l3); - - ndbout_c("unpack\n-- print --"); - print(* cvf2.m_cfg); - - cfg2->~ConfigValues();; - cvf.m_cfg->~ConfigValues(); - free(cfg2); - free(cvf.m_cfg); - } - return 0; -} diff --git a/storage/ndb/src/common/util/testProperties/Makefile b/storage/ndb/src/common/util/testProperties/Makefile deleted file mode 100644 index 343c07a49e7..00000000000 --- a/storage/ndb/src/common/util/testProperties/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -include .defs.mk - -TYPE := util - -BIN_TARGET := keso - -SOURCES := testProperties.cpp - -include $(NDB_TOP)/Epilogue.mk diff --git a/storage/ndb/src/common/util/testProperties/testProperties.cpp b/storage/ndb/src/common/util/testProperties/testProperties.cpp deleted file mode 100644 index b6f7fcb84d5..00000000000 --- a/storage/ndb/src/common/util/testProperties/testProperties.cpp +++ /dev/null @@ -1,195 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "Properties.hpp" -#include - -#include "uucode.h" - -bool -writeToFile(const Properties & p, const char * fname, bool uu = true){ - Uint32 sz = p.getPackedSize(); - char * buffer = (char*)malloc(sz); - - FILE * f = fopen(fname, "wb"); - bool res = p.pack((Uint32*)buffer); - if(res != true){ - ndbout << "Error packing" << endl; - ndbout << "p.getPropertiesErrno() = " << p.getPropertiesErrno() << endl; - ndbout << "p.getOSErrno() = " << p.getOSErrno() << endl; - } - if(uu) - uuencode(buffer, sz, f); - else { - fwrite(buffer, 1, sz, f); - } - - fclose(f); - free(buffer); - return res; -} - -bool -readFromFile(Properties & p, const char *fname, bool uu = true){ - Uint32 sz = 30000; - char * buffer = (char*)malloc(sz); - FILE * f = fopen(fname, "rb"); - if(uu) - uudecode(f, buffer, sz); - else - fread(buffer, 1, sz, f); - fclose(f); - bool res = p.unpack((Uint32*)buffer, sz); - if(res != true){ - ndbout << "Error unpacking" << endl; - ndbout << "p.getPropertiesErrno() = " << p.getPropertiesErrno() << endl; - ndbout << "p.getOSErrno() = " << p.getOSErrno() << endl; - } - free(buffer); - return res; -} - -void putALot(Properties & tmp){ - int i = 123; - tmp.put("LockPagesInMainMemory", i++); - tmp.put("SleepWhenIdle", i++); - tmp.put("NoOfSignalsToExecuteBetweenCommunicationInterfacePoll", i++); - tmp.put("TimeBetweenWatchDogCheck", i++); - tmp.put("StopOnError", i++); - - tmp.put("MaxNoOfConcurrentOperations", i++); - tmp.put("MaxNoOfConcurrentTransactions", i++); - tmp.put("MemorySpaceIndexes", i++); - tmp.put("MemorySpaceTuples", i++); - tmp.put("MemoryDiskPages", i++); - tmp.put("NoOfFreeDiskClusters", i++); - tmp.put("NoOfDiskClusters", i++); - - tmp.put("TimeToWaitAlive", i++); - tmp.put("HeartbeatIntervalDbDb", i++); - tmp.put("HeartbeatIntervalDbApi", i++); - tmp.put("TimeBetweenInactiveTransactionAbortCheck", i++); - - tmp.put("TimeBetweenLocalCheckpoints", i++); - tmp.put("TimeBetweenGlobalCheckpoints", i++); - tmp.put("NoOfFragmentLogFiles", i++); - tmp.put("NoOfConcurrentCheckpointsDuringRestart", i++); - tmp.put("TransactionInactiveTimeBeforeAbort", i++); - tmp.put("NoOfConcurrentProcessesHandleTakeover", i++); - - tmp.put("NoOfConcurrentCheckpointsAfterRestart", i++); - - tmp.put("NoOfDiskPagesToDiskDuringRestartTUP", i++); - tmp.put("NoOfDiskPagesToDiskAfterRestartTUP", i++); - tmp.put("NoOfDiskPagesToDiskDuringRestartACC", i++); - tmp.put("NoOfDiskPagesToDiskAfterRestartACC", i++); - - tmp.put("NoOfDiskClustersPerDiskFile", i++); - tmp.put("NoOfDiskFiles", i++); - - // Always found - tmp.put("NoOfReplicas", 33); - tmp.put("MaxNoOfAttributes", 34); - tmp.put("MaxNoOfTables", 35); -} - -int -main(void){ - Properties p; - - p.put("Kalle", 1); - p.put("Ank1", "anka"); - p.put("Ank2", "anka"); - p.put("Ank3", "anka"); - p.put("Ank4", "anka"); - putALot(p); - - Properties tmp; - tmp.put("Type", "TCP"); - tmp.put("OwnNodeId", 1); - tmp.put("RemoteNodeId", 2); - tmp.put("OwnHostName", "local"); - tmp.put("RemoteHostName", "remote"); - - tmp.put("SendSignalId", 1); - tmp.put("Compression", (Uint32)false); - tmp.put("Checksum", 1); - - tmp.put64("SendBufferSize", 2000); - tmp.put64("MaxReceiveSize", 1000); - - tmp.put("PortNumber", 1233); - putALot(tmp); - - p.put("Connection", 1, &tmp); - - p.put("NoOfConnections", 2); - p.put("NoOfConnection2", 2); - - p.put("kalle", 3); - p.put("anka", "kalle"); - - Properties p2; - p2.put("kalle", "anka"); - - p.put("prop", &p2); - - p.put("Connection", 2, &tmp); - - p.put("Connection", 3, &tmp); - - p.put("Connection", 4, &tmp); - /* - */ - - Uint32 a = 99; - const char * b; - const Properties * p3; - Properties * p4; - - bool bb = p.get("kalle", &a); - bool cc = p.get("anka", &b); - bool dd = p.get("prop", &p3); - if(p.getCopy("prop", &p4)) - delete p4; - - p2.put("p2", &p2); - - p.put("prop2", &p2); - /* */ - - p.print(stdout, "testing 1: "); - - writeToFile(p, "A_1"); - writeToFile(p, "B_1", false); - - Properties r1; - readFromFile(r1, "A_1"); - writeToFile(r1, "A_3"); - - //r1.print(stdout, "testing 2: "); - Properties r2; - readFromFile(r2, "A_1"); - writeToFile(r2, "A_4"); - - Properties r3; - readFromFile(r3, "B_1", false); - writeToFile(r3, "A_5"); - r3.print(stdout, "testing 3: "); - - return 0; -} diff --git a/storage/ndb/src/common/util/testSimpleProperties/Makefile b/storage/ndb/src/common/util/testSimpleProperties/Makefile deleted file mode 100644 index 89d33fa8dd8..00000000000 --- a/storage/ndb/src/common/util/testSimpleProperties/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -include .defs.mk - -TYPE := util - -BIN_TARGET := sp_test -BIN_TARGET_ARCHIVES := portlib general - -SOURCES := sp_test.cpp - -CCFLAGS_LOC += -I$(call fixpath,$(NDB_TOP)/include/util) - -include $(NDB_TOP)/Epilogue.mk diff --git a/storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp b/storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp deleted file mode 100644 index d3278c63b9d..00000000000 --- a/storage/ndb/src/common/util/testSimpleProperties/sp_test.cpp +++ /dev/null @@ -1,95 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "SimpleProperties.hpp" -#include - -Uint32 page[8192]; - -int writer(); -int reader(Uint32 *, Uint32 len); -int unpack(Uint32 *, Uint32 len); - -int main(){ - int len = writer(); - reader(page, len); - unpack(page, len); - - return 0; -} - -int -writer(){ - LinearWriter w(&page[0], 8192); - - w.first(); - w.add(1, 2); - w.add(7, 3); - w.add(3, "jonas"); - w.add(5, "0123456789"); - w.add(7, 4); - w.add(3, "e cool"); - w.add(5, "9876543210"); - - ndbout_c("WordsUsed = %d", w.getWordsUsed()); - - return w.getWordsUsed(); -} - -int -reader(Uint32 * pages, Uint32 len){ - SimplePropertiesLinearReader it(pages, len); - - it.printAll(ndbout); - return 0; -} - -struct Test { - Uint32 val1; - Uint32 val7; - char val3[100]; - Test() : val1(0xFFFFFFFF), val7(0xFFFFFFFF) { sprintf(val3, "bad");} -}; - -static const -SimpleProperties::SP2StructMapping -test_map [] = { - { 1, offsetof(Test, val1), SimpleProperties::Uint32Value, 0, ~0 }, - { 7, offsetof(Test, val7), SimpleProperties::Uint32Value, 0, ~0 }, - { 3, offsetof(Test, val3), SimpleProperties::StringValue, 0, sizeof(100) }, - { 5, 0, SimpleProperties::InvalidValue, 0, 0 } -}; - -static unsigned -test_map_sz = sizeof(test_map)/sizeof(test_map[0]); - -int -unpack(Uint32 * pages, Uint32 len){ - Test test; - SimplePropertiesLinearReader it(pages, len); - SimpleProperties::UnpackStatus status; - while((status = SimpleProperties::unpack(it, &test, test_map, test_map_sz, - true, false)) == SimpleProperties::Break){ - ndbout << "test.val1 = " << test.val1 << endl; - ndbout << "test.val7 = " << test.val7 << endl; - ndbout << "test.val3 = " << test.val3 << endl; - it.next(); - } - assert(status == SimpleProperties::Eof); - return 0; -} diff --git a/storage/ndb/src/common/util/uucode.c b/storage/ndb/src/common/util/uucode.c deleted file mode 100644 index b861e040a5e..00000000000 --- a/storage/ndb/src/common/util/uucode.c +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -/* ENC is the basic 1 character encoding function to make a char printing */ -/* DEC is single character decode */ -#define ENC(c) ((c) ? ((c) & 077) + ' ': '`') -#define DEC(c) (((c) - ' ') & 077) - -/* - * copy from in to out, encoding as you go along. - */ -void -uuencode(const char * data, int dataLen, FILE * out) -{ - int ch, n; - const char *p = data; - - fprintf(out, "begin\n"); - - while (dataLen > 0){ - n = dataLen > 45 ? 45 : dataLen; - dataLen -= n; - ch = ENC(n); - if (putc(ch, out) == EOF) - break; - for (; n > 0; n -= 3, p += 3) { - char p_0 = * p; - char p_1 = 0; - char p_2 = 0; - - if(n >= 2){ - p_1 = p[1]; - } - if(n >= 3){ - p_2 = p[2]; - } - - ch = p_0 >> 2; - ch = ENC(ch); - if (putc(ch, out) == EOF) - break; - ch = ((p_0 << 4) & 060) | ((p_1 >> 4) & 017); - ch = ENC(ch); - if (putc(ch, out) == EOF) - break; - ch = ((p_1 << 2) & 074) | ((p_2 >> 6) & 03); - ch = ENC(ch); - if (putc(ch, out) == EOF) - break; - ch = p_2 & 077; - ch = ENC(ch); - if (putc(ch, out) == EOF) - break; - } - if (putc('\n', out) == EOF) - break; - } - ch = ENC('\0'); - putc(ch, out); - putc('\n', out); - fprintf(out, "end\n"); -} - -int -uudecode(FILE * input, char * outBuf, int bufLen){ - int n; - char ch, *p, returnCode; - char buf[255]; - - returnCode = 0; - /* search for header line */ - do { - if (!fgets(buf, sizeof(buf), input)) { - return 1; - } - } while (strncmp(buf, "begin", 5)); - - /* for each input line */ - for (;;) { - if (!fgets(p = buf, sizeof(buf), input)) { - return 1; - } - /* - * `n' is used to avoid writing out all the characters - * at the end of the file. - */ - if ((n = DEC(*p)) <= 0) - break; - if(n >= bufLen){ - returnCode = 1; - break; - } - for (++p; n > 0; p += 4, n -= 3) - if (n >= 3) { - ch = DEC(p[0]) << 2 | DEC(p[1]) >> 4; - * outBuf = ch; outBuf++; bufLen--; - ch = DEC(p[1]) << 4 | DEC(p[2]) >> 2; - * outBuf = ch; outBuf++; bufLen--; - ch = DEC(p[2]) << 6 | DEC(p[3]); - * outBuf = ch; outBuf++; bufLen--; - } else { - if (n >= 1) { - ch = DEC(p[0]) << 2 | DEC(p[1]) >> 4; - * outBuf = ch; outBuf++; bufLen--; - } - if (n >= 2) { - ch = DEC(p[1]) << 4 | DEC(p[2]) >> 2; - * outBuf = ch; outBuf++; bufLen--; - } - if (n >= 3) { - ch = DEC(p[2]) << 6 | DEC(p[3]); - * outBuf = ch; outBuf++; bufLen--; - } - } - } - if (!fgets(buf, sizeof(buf), input) || strcmp(buf, "end\n")) { - return 1; - } - return returnCode; -} - -int -uuencode_mem(char * dst, const char * data, int dataLen) -{ - int sz = 0; - - int ch, n; - const char *p = data; - - while (dataLen > 0){ - n = dataLen > 45 ? 45 : dataLen; - dataLen -= n; - ch = ENC(n); - * dst = ch; dst++; sz++; - for (; n > 0; n -= 3, p += 3) { - char p_0 = * p; - char p_1 = 0; - char p_2 = 0; - - if(n >= 2){ - p_1 = p[1]; - } - if(n >= 3){ - p_2 = p[2]; - } - - ch = p_0 >> 2; - ch = ENC(ch); - * dst = ch; dst++; sz++; - - ch = ((p_0 << 4) & 060) | ((p_1 >> 4) & 017); - ch = ENC(ch); - * dst = ch; dst++; sz++; - - ch = ((p_1 << 2) & 074) | ((p_2 >> 6) & 03); - ch = ENC(ch); - * dst = ch; dst++; sz++; - - ch = p_2 & 077; - ch = ENC(ch); - * dst = ch; dst++; sz++; - } - - * dst = '\n'; dst++; sz++; - } - ch = ENC('\0'); - * dst = ch; dst++; sz++; - - * dst = '\n'; dst++; sz++; - * dst = 0; dst++; sz++; - - return sz; -} - -int -uudecode_mem(char * outBuf, int bufLen, const char * src){ - int n; - char ch; - int sz = 0; - const char * p = src; - - /* - * `n' is used to avoid writing out all the characters - * at the end of the file. - */ - if ((n = DEC(*p)) <= 0) - return 0; - if(n >= bufLen){ - return -1; - } - for (++p; n > 0; p += 4, n -= 3){ - if (n >= 3) { - ch = DEC(p[0]) << 2 | DEC(p[1]) >> 4; - * outBuf = ch; outBuf++; bufLen--; sz++; - ch = DEC(p[1]) << 4 | DEC(p[2]) >> 2; - * outBuf = ch; outBuf++; bufLen--; sz++; - ch = DEC(p[2]) << 6 | DEC(p[3]); - * outBuf = ch; outBuf++; bufLen--; sz++; - } else { - if (n >= 1) { - ch = DEC(p[0]) << 2 | DEC(p[1]) >> 4; - * outBuf = ch; outBuf++; bufLen--; sz++; - } - if (n >= 2) { - ch = DEC(p[1]) << 4 | DEC(p[2]) >> 2; - * outBuf = ch; outBuf++; bufLen--; sz++; - } - if (n >= 3) { - ch = DEC(p[2]) << 6 | DEC(p[3]); - * outBuf = ch; outBuf++; bufLen--; sz++; - } - } - } - return sz; -} - - - diff --git a/storage/ndb/src/common/util/version.c b/storage/ndb/src/common/util/version.c deleted file mode 100644 index 241d071b603..00000000000 --- a/storage/ndb/src/common/util/version.c +++ /dev/null @@ -1,255 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include -#include -#include -#include - -Uint32 ndbGetMajor(Uint32 version) { - return (version >> 16) & 0xFF; -} - -Uint32 ndbGetMinor(Uint32 version) { - return (version >> 8) & 0xFF; -} - -Uint32 ndbGetBuild(Uint32 version) { - return (version >> 0) & 0xFF; -} - -Uint32 ndbMakeVersion(Uint32 major, Uint32 minor, Uint32 build) { - return NDB_MAKE_VERSION(major, minor, build); - -} - -const char * ndbGetOwnVersionString() -{ - static char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; - return ndbGetVersionString(NDB_VERSION, NDB_VERSION_STATUS, - ndb_version_string_buf, - sizeof(ndb_version_string_buf)); -} - -const char * ndbGetVersionString(Uint32 version, const char * status, - char *buf, unsigned sz) -{ - if (status && status[0] != 0) - basestring_snprintf(buf, sz, - "Version %d.%d.%d (%s)", - getMajor(version), - getMinor(version), - getBuild(version), - status); - else - basestring_snprintf(buf, sz, - "Version %d.%d.%d", - getMajor(version), - getMinor(version), - getBuild(version)); - return buf; -} - -typedef enum { - UG_Null, - UG_Range, - UG_Exact -} UG_MatchType; - -struct NdbUpGradeCompatible { - Uint32 ownVersion; - Uint32 otherVersion; - UG_MatchType matchType; -}; - -/*#define TEST_VERSION*/ - -#define HAVE_NDB_SETVERSION -#ifdef HAVE_NDB_SETVERSION -Uint32 ndbOwnVersionTesting = 0; -void -ndbSetOwnVersion() { - char buf[256]; - if (NdbEnv_GetEnv("NDB_SETVERSION", buf, sizeof(buf))) { - Uint32 _v1,_v2,_v3; - if (sscanf(buf, "%u.%u.%u", &_v1, &_v2, &_v3) == 3) { - ndbOwnVersionTesting = MAKE_VERSION(_v1,_v2,_v3); - ndbout_c("Testing: Version set to 0x%x", ndbOwnVersionTesting); - } - } -} -#else -void ndbSetOwnVersion() {} -#endif - -#ifndef TEST_VERSION -struct NdbUpGradeCompatible ndbCompatibleTable_full[] = { - { MAKE_VERSION(5,4,NDB_VERSION_BUILD), MAKE_VERSION(5,1,18), UG_Range}, - { MAKE_VERSION(5,1,17), MAKE_VERSION(5,1,0), UG_Range}, - { MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,12), UG_Range}, - { MAKE_VERSION(5,0,11), MAKE_VERSION(5,0,2), UG_Range}, - { MAKE_VERSION(4,1,NDB_VERSION_BUILD), MAKE_VERSION(4,1,15), UG_Range }, - { MAKE_VERSION(4,1,14), MAKE_VERSION(4,1,10), UG_Range }, - { MAKE_VERSION(4,1,10), MAKE_VERSION(4,1,9), UG_Exact }, - { MAKE_VERSION(4,1,9), MAKE_VERSION(4,1,8), UG_Exact }, - { MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact }, - { 0, 0, UG_Null } -}; - -struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = { - { MAKE_VERSION(5,0,12), MAKE_VERSION(5,0,11), UG_Exact }, - { MAKE_VERSION(5,0,2), MAKE_VERSION(4,1,8), UG_Exact }, - { MAKE_VERSION(4,1,15), MAKE_VERSION(4,1,14), UG_Exact }, - { MAKE_VERSION(3,5,4), MAKE_VERSION(3,5,3), UG_Exact }, - { 0, 0, UG_Null } -}; - -#else /* testing purposes */ - -struct NdbUpGradeCompatible ndbCompatibleTable_full[] = { - { MAKE_VERSION(4,1,5), MAKE_VERSION(4,1,0), UG_Range }, - { MAKE_VERSION(3,6,9), MAKE_VERSION(3,6,1), UG_Range }, - { MAKE_VERSION(3,6,2), MAKE_VERSION(3,6,1), UG_Range }, - { MAKE_VERSION(3,5,7), MAKE_VERSION(3,5,0), UG_Range }, - { MAKE_VERSION(3,5,1), MAKE_VERSION(3,5,0), UG_Range }, - { NDB_VERSION_D , MAKE_VERSION(NDB_VERSION_MAJOR,NDB_VERSION_MINOR,2), UG_Range }, - { 0, 0, UG_Null } -}; - -struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = { - { MAKE_VERSION(4,1,5), MAKE_VERSION(3,6,9), UG_Exact }, - { MAKE_VERSION(3,6,2), MAKE_VERSION(3,5,7), UG_Exact }, - { MAKE_VERSION(3,5,1), NDB_VERSION_D , UG_Exact }, - { 0, 0, UG_Null } -}; - - -#endif - -void ndbPrintVersion() -{ - printf("Version: %u.%u.%u\n", - getMajor(ndbGetOwnVersion()), - getMinor(ndbGetOwnVersion()), - getBuild(ndbGetOwnVersion())); -} - -Uint32 -ndbGetOwnVersion() -{ -#ifdef HAVE_NDB_SETVERSION - if (ndbOwnVersionTesting == 0) - return NDB_VERSION_D; - else - return ndbOwnVersionTesting; -#else - return NDB_VERSION_D; -#endif -} - -int -ndbSearchUpgradeCompatibleTable(Uint32 ownVersion, Uint32 otherVersion, - struct NdbUpGradeCompatible table[]) -{ - int i; - for (i = 0; table[i].ownVersion != 0 && table[i].otherVersion != 0; i++) { - if (table[i].ownVersion == ownVersion || - table[i].ownVersion == (Uint32) ~0) { - switch (table[i].matchType) { - case UG_Range: - if (otherVersion >= table[i].otherVersion){ - return 1; - } - break; - case UG_Exact: - if (otherVersion == table[i].otherVersion){ - return 1; - } - break; - default: - break; - } - } - } - return 0; -} - -int -ndbCompatible(Uint32 ownVersion, Uint32 otherVersion, struct NdbUpGradeCompatible table[]) -{ - if (otherVersion >= ownVersion) { - return 1; - } - return ndbSearchUpgradeCompatibleTable(ownVersion, otherVersion, table); -} - -int -ndbCompatible_full(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible(ownVersion, otherVersion, ndbCompatibleTable_full); -} - -int -ndbCompatible_upgrade(Uint32 ownVersion, Uint32 otherVersion) -{ - if (ndbCompatible_full(ownVersion, otherVersion)) - return 1; - return ndbCompatible(ownVersion, otherVersion, ndbCompatibleTable_upgrade); -} - -int -ndbCompatible_mgmt_ndb(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_upgrade(ownVersion, otherVersion); -} - -int -ndbCompatible_mgmt_api(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_upgrade(ownVersion, otherVersion); -} - -int -ndbCompatible_ndb_mgmt(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_full(ownVersion, otherVersion); -} - -int -ndbCompatible_api_mgmt(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_full(ownVersion, otherVersion); -} - -int -ndbCompatible_api_ndb(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_full(ownVersion, otherVersion); -} - -int -ndbCompatible_ndb_api(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_upgrade(ownVersion, otherVersion); -} - -int -ndbCompatible_ndb_ndb(Uint32 ownVersion, Uint32 otherVersion) -{ - return ndbCompatible_upgrade(ownVersion, otherVersion); -} diff --git a/storage/ndb/src/cw/Makefile.am b/storage/ndb/src/cw/Makefile.am deleted file mode 100644 index f354846a5b6..00000000000 --- a/storage/ndb/src/cw/Makefile.am +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SUBDIRS = cpcd - -windoze-dsp: diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp deleted file mode 100644 index 068416b2f15..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.cpp +++ /dev/null @@ -1,215 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "stdafx.h" - -HINSTANCE hInst ; -TCHAR szTitle[MAX_LOADSTRING] ; -TCHAR szWindowClass[MAX_LOADSTRING] ; - -static CNdbControls controls ; - -int APIENTRY WinMain(HINSTANCE hInstance, - HINSTANCE hPrevInstance, - LPSTR lpCmdLine, - int nCmdShow){ - MSG msg; - HACCEL hAccelTable; - - LoadString(hInstance, IDS_APP_TITLE, szTitle, MAX_LOADSTRING) ; - LoadString(hInstance, IDC_CPC_GUI, szWindowClass, MAX_LOADSTRING) ; - NdbRegisterClass(hInstance); - - if (!InitInstance (hInstance, nCmdShow)) { - return FALSE; - } - - hAccelTable = LoadAccelerators(hInstance, (LPCTSTR)IDC_CPC_GUI); - - while (GetMessage(&msg, NULL, 0, 0)){ - - if (!TranslateAccelerator(msg.hwnd, hAccelTable, &msg)){ - - TranslateMessage(&msg); - DispatchMessage(&msg); - - } - - } - - return msg.wParam; -} - - -ATOM NdbRegisterClass(HINSTANCE hInstance){ - WNDCLASSEX wcex; - - wcex.cbSize = sizeof(WNDCLASSEX); - - wcex.style = CS_HREDRAW | CS_VREDRAW ; - wcex.lpfnWndProc = (WNDPROC)WndProc; - wcex.cbClsExtra = 0; - wcex.cbWndExtra = 0; - wcex.hInstance = hInstance; - wcex.hIcon = LoadIcon(hInstance, (LPCTSTR)IDI_CPC_GUI); - wcex.hCursor = LoadCursor(NULL, IDC_ARROW); - wcex.hbrBackground = (HBRUSH)(COLOR_WINDOW); - wcex.lpszMenuName = (LPCSTR)IDC_CPC_GUI; - wcex.lpszClassName = szWindowClass; - wcex.hIconSm = LoadIcon(wcex.hInstance, (LPCTSTR)IDI_SMALL); - - return RegisterClassEx(&wcex); -} - - -BOOL InitInstance(HINSTANCE hInstance, int nCmdShow){ - - HWND hWnd; - - hInst = hInstance; - - hWnd = CreateWindow(szWindowClass, szTitle, WS_OVERLAPPEDWINDOW, - CW_USEDEFAULT, 0, CW_USEDEFAULT, 0, NULL, NULL, hInstance, NULL); - - InitCommonControls(); - - if (!hWnd) return FALSE ; - - ShowWindow(hWnd, nCmdShow) ; - UpdateWindow(hWnd) ; - - return TRUE; -} - -LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam){ - - int wmId, wmEvent; - PAINTSTRUCT ps; - HDC hdc; - int c = 0 ; - - switch (message) - { - - case WM_CREATE: - _assert(controls.Create(hInst, hWnd)) ; - return 0 ; - - case WM_COMMAND: - wmId = LOWORD(wParam); - wmEvent = HIWORD(wParam); - - switch (wmId){ - case IDM_ABOUT: - DialogBox(hInst, (LPCTSTR)IDD_ABOUTBOX, hWnd, (DLGPROC)About); - break; - case IDM_EXIT: - DestroyWindow(hWnd); - break; - default: - return DefWindowProc(hWnd, message, wParam, lParam); - } - break; - - case WM_NOTIFY: - switch (((LPNMHDR) lParam)->code) { - case TTN_GETDISPINFO: { - - LPTOOLTIPTEXT lpttt; - lpttt = (LPTOOLTIPTEXT) lParam; - lpttt->hinst = hInst; - - int idButton = lpttt->hdr.idFrom; - - switch (idButton){ - case IDM_NEW: - lpttt->lpszText = MAKEINTRESOURCE(IDS_TIP_NEW); - break; - case IDM_DELETE: - lpttt->lpszText = MAKEINTRESOURCE(IDS_TIP_DELETE); - break; - case IDM_PROPS: - lpttt->lpszText = MAKEINTRESOURCE(IDS_TIP_PROPS); - break; - } - break; - } - case TVN_SELCHANGED: { - LPNMTREEVIEW pnmtv ; - - pnmtv = (LPNMTREEVIEW) lParam ; - controls.ToggleListViews(pnmtv) ; - - break ; - } - - case NM_RCLICK: { - LPNMHDR lpnmh ; - lpnmh = (LPNMHDR) lParam ; - switch(lpnmh->idFrom){ - case ID_TREEVIEW: - break; - default: - break ; - } - } - - default: - break; - } - - - case WM_PAINT: - hdc = BeginPaint(hWnd, &ps) ; - EndPaint(hWnd, &ps); - break; - - case WM_SIZE: - controls.Resize() ; - return 0 ; - - case WM_DESTROY: - PostQuitMessage(0); - break; - - default: - return DefWindowProc(hWnd, message, wParam, lParam); - } - return 0; -} - - -LRESULT CALLBACK About(HWND hDlg, UINT message, WPARAM wParam, LPARAM lParam){ - - switch (message){ - - case WM_INITDIALOG: - return TRUE; - - case WM_COMMAND: - if (LOWORD(wParam) == IDOK || LOWORD(wParam) == IDCANCEL){ - EndDialog(hDlg, LOWORD(wParam)); - return TRUE; - } - break; - } - return FALSE; -} - - - - - diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp deleted file mode 100644 index 91007b0a47e..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsp +++ /dev/null @@ -1,216 +0,0 @@ -# Microsoft Developer Studio Project File - Name="CPC_GUI" - Package Owner=<4> -# Microsoft Developer Studio Generated Build File, Format Version 6.00 -# ** DO NOT EDIT ** - -# TARGTYPE "Win32 (x86) Application" 0x0101 - -CFG=CPC_GUI - Win32 Debug -!MESSAGE This is not a valid makefile. To build this project using NMAKE, -!MESSAGE use the Export Makefile command and run -!MESSAGE -!MESSAGE NMAKE /f "CPC_GUI.mak". -!MESSAGE -!MESSAGE You can specify a configuration when running NMAKE -!MESSAGE by defining the macro CFG on the command line. For example: -!MESSAGE -!MESSAGE NMAKE /f "CPC_GUI.mak" CFG="CPC_GUI - Win32 Debug" -!MESSAGE -!MESSAGE Possible choices for configuration are: -!MESSAGE -!MESSAGE "CPC_GUI - Win32 Release" (based on "Win32 (x86) Application") -!MESSAGE "CPC_GUI - Win32 Debug" (based on "Win32 (x86) Application") -!MESSAGE - -# Begin Project -# PROP AllowPerConfigDependencies 0 -# PROP Scc_ProjName "" -# PROP Scc_LocalPath "" -CPP=cl.exe -MTL=midl.exe -RSC=rc.exe - -!IF "$(CFG)" == "CPC_GUI - Win32 Release" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 0 -# PROP BASE Output_Dir "Release" -# PROP BASE Intermediate_Dir "Release" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 0 -# PROP Output_Dir "Release" -# PROP Intermediate_Dir "Release" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /Yu"stdafx.h" /FD /c -# ADD CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /D "_MBCS" /Yu"stdafx.h" /FD /c -# ADD BASE MTL /nologo /D "NDEBUG" /mktyplib203 /win32 -# ADD MTL /nologo /D "NDEBUG" /mktyplib203 /win32 -# ADD BASE RSC /l 0x409 /d "NDEBUG" -# ADD RSC /l 0x409 /d "NDEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /machine:I386 -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib mfc42.lib /nologo /subsystem:windows /machine:I386 - -!ELSEIF "$(CFG)" == "CPC_GUI - Win32 Debug" - -# PROP BASE Use_MFC 0 -# PROP BASE Use_Debug_Libraries 1 -# PROP BASE Output_Dir "Debug" -# PROP BASE Intermediate_Dir "Debug" -# PROP BASE Target_Dir "" -# PROP Use_MFC 0 -# PROP Use_Debug_Libraries 1 -# PROP Output_Dir "Debug" -# PROP Intermediate_Dir "Debug" -# PROP Ignore_Export_Lib 0 -# PROP Target_Dir "" -# ADD BASE CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /Yu"stdafx.h" /FD /GZ /c -# ADD CPP /nologo /W3 /Gm /GX /ZI /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /D "_MBCS" /FR /Yu"stdafx.h" /FD /GZ /c -# ADD BASE MTL /nologo /D "_DEBUG" /mktyplib203 /win32 -# ADD MTL /nologo /D "_DEBUG" /mktyplib203 /win32 -# ADD BASE RSC /l 0x409 /d "_DEBUG" -# ADD RSC /l 0x409 /d "_DEBUG" -BSC32=bscmake.exe -# ADD BASE BSC32 /nologo -# ADD BSC32 /nologo -LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /debug /machine:I386 /pdbtype:sept -# ADD LINK32 kernel32.lib user32.lib gdi32.lib comdlg32.lib advapi32.lib shell32.lib comctl32.lib mfc42d.lib /nologo /subsystem:windows /debug /machine:I386 /pdbtype:sept - -!ENDIF - -# Begin Target - -# Name "CPC_GUI - Win32 Release" -# Name "CPC_GUI - Win32 Debug" -# Begin Group "Source Files" - -# PROP Default_Filter "cpp;c;cxx;rc;def;r;odl;idl;hpj;bat" -# Begin Source File - -SOURCE=.\CPC_GUI.cpp -# End Source File -# Begin Source File - -SOURCE=.\CPC_GUI.rc -# End Source File -# Begin Source File - -SOURCE=.\NdbControls.cpp -# End Source File -# Begin Source File - -SOURCE=.\StdAfx.cpp -# ADD CPP /Yc"stdafx.h" -# End Source File -# End Group -# Begin Group "Header Files" - -# PROP Default_Filter "h;hpp;hxx;hm;inl" -# Begin Source File - -SOURCE=.\CPC_GUI.h -# End Source File -# Begin Source File - -SOURCE=.\NdbControls.h -# End Source File -# Begin Source File - -SOURCE=.\resource.h -# End Source File -# Begin Source File - -SOURCE=.\StdAfx.h -# End Source File -# End Group -# Begin Group "Resource Files" - -# PROP Default_Filter "ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe" -# Begin Source File - -SOURCE=.\bitmap1.bmp -# End Source File -# Begin Source File - -SOURCE=.\bmp00001.bmp -# End Source File -# Begin Source File - -SOURCE=.\C.bmp -# End Source File -# Begin Source File - -SOURCE=.\Closed.BMP -# End Source File -# Begin Source File - -SOURCE=.\Closed.ICO -# End Source File -# Begin Source File - -SOURCE=.\Closed24.bmp -# End Source File -# Begin Source File - -SOURCE=.\Computer24.BMP -# End Source File -# Begin Source File - -SOURCE=.\CPC_GUI.ico -# End Source File -# Begin Source File - -SOURCE=.\Db.bmp -# End Source File -# Begin Source File - -SOURCE=.\icon1.ico -# End Source File -# Begin Source File - -SOURCE=.\O.bmp -# End Source File -# Begin Source File - -SOURCE=.\Open.BMP -# End Source File -# Begin Source File - -SOURCE=.\Open.ICO -# End Source File -# Begin Source File - -SOURCE=.\Open24.bmp -# End Source File -# Begin Source File - -SOURCE=.\small.ico -# End Source File -# Begin Source File - -SOURCE=.\toolbar.bmp -# End Source File -# Begin Source File - -SOURCE=.\toolbar1.bmp -# End Source File -# Begin Source File - -SOURCE=.\Tower2.ICO -# End Source File -# Begin Source File - -SOURCE=.\TowerIC1.BMP -# End Source File -# End Group -# Begin Source File - -SOURCE=.\ReadMe.txt -# End Source File -# End Target -# End Project diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw deleted file mode 100644 index 1f163a31662..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.dsw +++ /dev/null @@ -1,29 +0,0 @@ -Microsoft Developer Studio Workspace File, Format Version 6.00 -# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE! - -############################################################################### - -Project: "CPC_GUI"=.\CPC_GUI.dsp - Package Owner=<4> - -Package=<5> -{{{ -}}} - -Package=<4> -{{{ -}}} - -############################################################################### - -Global: - -Package=<5> -{{{ -}}} - -Package=<3> -{{{ -}}} - -############################################################################### - diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h deleted file mode 100644 index bbf5115510a..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.h +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#if !defined(AFX_CPC_GUI_H__EA01C861_C56D_48F1_856F_4935E20620B1__INCLUDED_) -#define AFX_CPC_GUI_H__EA01C861_C56D_48F1_856F_4935E20620B1__INCLUDED_ - -#if _MSC_VER > 1000 -#pragma once -#endif // _MSC_VER > 1000 - - -#define MAX_LOADSTRING 100 - - - -#define TV_ROOT_ITEMS 2 - - -// Global Variables - -ATOM NdbRegisterClass(HINSTANCE) ; -BOOL InitInstance(HINSTANCE, int) ; -LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM) ; -LRESULT CALLBACK About(HWND, UINT, WPARAM, LPARAM); - -#endif // !defined(AFX_CPC_GUI_H__EA01C861_C56D_48F1_856F_4935E20620B1__INCLUDED_) diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico deleted file mode 100644 index 386883523bc..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.ico and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc deleted file mode 100644 index 41d75b2b282..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.rc +++ /dev/null @@ -1,193 +0,0 @@ -//Microsoft Developer Studio generated resource script. -// -#include "resource.h" - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#define APSTUDIO_HIDDEN_SYMBOLS -#include "windows.h" -#undef APSTUDIO_HIDDEN_SYMBOLS -#include "resource.h" - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// English (U.S.) resources - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) -#ifdef _WIN32 -LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US -#pragma code_page(1252) -#endif //_WIN32 - -///////////////////////////////////////////////////////////////////////////// -// -// Icon -// - -// Icon with lowest ID value placed first to ensure application icon -// remains consistent on all systems. -IDI_CPC_GUI ICON DISCARDABLE "CPC_GUI.ICO" -IDI_SMALL ICON DISCARDABLE "SMALL.ICO" - -///////////////////////////////////////////////////////////////////////////// -// -// Menu -// - -IDM_CPC_GUI MENU DISCARDABLE -BEGIN - POPUP "&File" - BEGIN - MENUITEM "E&xit", IDM_EXIT - END - POPUP "&Actions" - BEGIN - MENUITEM "&Insert...", ID_ACTIONS_INSERT - MENUITEM "&Delete", ID_ACTIONS_DELETE - MENUITEM "&Properties", ID_ACTIONS_PROPERTIES - END - POPUP "&Help" - BEGIN - MENUITEM "&About ...", IDM_ABOUT - END -END - -IDM_TREEVIEW MENU DISCARDABLE -BEGIN - MENUITEM "&Insert", ID_TREEVIEW1 - MENUITEM "&Delete", ID_DELETE - MENUITEM "&Properties", ID_PROPERTIES -END - - -///////////////////////////////////////////////////////////////////////////// -// -// Accelerator -// - -IDC_CPC_GUI ACCELERATORS MOVEABLE PURE -BEGIN - "?", IDM_ABOUT, ASCII, ALT - "/", IDM_ABOUT, ASCII, ALT -END - - -///////////////////////////////////////////////////////////////////////////// -// -// Dialog -// - -IDD_ABOUTBOX DIALOG DISCARDABLE 22, 17, 230, 75 -STYLE DS_MODALFRAME | WS_CAPTION | WS_SYSMENU -CAPTION "About" -FONT 8, "MS Sans Serif" -BEGIN - LTEXT "NDB Cluster Process Control Applet v1.0",IDC_STATIC,7,8, - 213,8,SS_NOPREFIX - LTEXT "Copyright (C) 2003 MySQL AB", - IDC_STATIC,7,20,213,20 - DEFPUSHBUTTON "OK",IDOK,185,55,41,16,WS_GROUP -END - - -#ifdef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// TEXTINCLUDE -// - -2 TEXTINCLUDE MOVEABLE PURE -BEGIN - "#define APSTUDIO_HIDDEN_SYMBOLS\r\n" - "#include ""windows.h""\r\n" - "#undef APSTUDIO_HIDDEN_SYMBOLS\r\n" - "#include ""resource.h""\r\n" - "\0" -END - -3 TEXTINCLUDE MOVEABLE PURE -BEGIN - "\r\n" - "\0" -END - -1 TEXTINCLUDE MOVEABLE PURE -BEGIN - "resource.h\0" -END - -#endif // APSTUDIO_INVOKED - - -///////////////////////////////////////////////////////////////////////////// -// -// Bitmap -// - -IDR_TOOLBAR BITMAP MOVEABLE PURE "toolbar.bmp" -IDB_TOOLBAR BITMAP MOVEABLE PURE "bitmap1.bmp" -IDB_COMPUTER BITMAP MOVEABLE PURE "TowerIC1.BMP" -IDB_OPEN BITMAP MOVEABLE PURE "Open.BMP" -IDB_CLOSED BITMAP MOVEABLE PURE "Closed.BMP" -IDB_DATABASE BITMAP MOVEABLE PURE "DB.bmp" - -///////////////////////////////////////////////////////////////////////////// -// -// Toolbar -// - -IDR_TOOLBAR TOOLBAR MOVEABLE PURE 18, 18 -BEGIN - BUTTON ID_BUTTON32773 - BUTTON ID_BUTTON32783 - BUTTON ID_BUTTON32784 -END - - -///////////////////////////////////////////////////////////////////////////// -// -// String Table -// - -STRINGTABLE DISCARDABLE -BEGIN - IDS_APP_TITLE "NDB Cluster Process Control Applet" - IDS_TV_ROOT_COMPUTERS "Computers" - IDS_TV_ROOT_DATABASES "Databases" - IDS_LV_COMPUTER_HEADER_1 "Computer" - IDS_LV_COMPUTER_HEADER_2 "Hostname" - IDS_LV_PROCESS_HEADER_1 "Process" - IDC_CPC_GUI "CPC_GUI" - IDS_TIP_NEW "Add new computer or database" - IDS_TIP_DELETE "Delete selected computer or database" -END - -STRINGTABLE DISCARDABLE -BEGIN - IDS_TIP_PROPS "Display properties for selected computer or database" - IDS_LV_PROCESS_HEADER_2 "Name" - IDS_LV_PROCESS_HEADER_3 "Owner" - IDS_LV_PROCESS_HEADER_4 "Status" - IDS_LV_COMPUTER_HEADER_3 "Status" -END - -#endif // English (U.S.) resources -///////////////////////////////////////////////////////////////////////////// - - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln deleted file mode 100644 index 86b574d851d..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.sln +++ /dev/null @@ -1,21 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 7.00 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "CPC_GUI", "CPC_GUI.vcproj", "{F5FADD9D-4353-4A73-88DC-474A4D17B485}" -EndProject -Global - GlobalSection(SolutionConfiguration) = preSolution - ConfigName.0 = Debug - ConfigName.1 = Release - EndGlobalSection - GlobalSection(ProjectDependencies) = postSolution - EndGlobalSection - GlobalSection(ProjectConfiguration) = postSolution - {F5FADD9D-4353-4A73-88DC-474A4D17B485}.Debug.ActiveCfg = Debug|Win32 - {F5FADD9D-4353-4A73-88DC-474A4D17B485}.Debug.Build.0 = Debug|Win32 - {F5FADD9D-4353-4A73-88DC-474A4D17B485}.Release.ActiveCfg = Release|Win32 - {F5FADD9D-4353-4A73-88DC-474A4D17B485}.Release.Build.0 = Release|Win32 - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - EndGlobalSection - GlobalSection(ExtensibilityAddIns) = postSolution - EndGlobalSection -EndGlobal diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo deleted file mode 100644 index e7d178f04c3..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.suo and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj b/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj deleted file mode 100644 index fb1e2fd601c..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj +++ /dev/null @@ -1,240 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO b/storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO deleted file mode 100644 index 044042b42fb..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/Closed.ICO and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp b/storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp deleted file mode 100644 index 63383ad0990..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/NdbControls.cpp +++ /dev/null @@ -1,436 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "stdafx.h" -#include "NdbControls.h" - - -/** -* CNdbControl implementation -*/ - -BOOL CNdbControl::GetRect(LPRECT lprc) const { - - _ASSERT(this) ; - - return GetClientRect(m_hControl, lprc) ; - -} - -BOOL CNdbControl::Resize(LONG x, LONG y, LONG w, LONG h) const { - - _ASSERT(this) ; - - if(!MoveWindow(m_hControl, x, y, w, h, TRUE)) - return FALSE ; - if(m_bVisible){ - ShowWindow(m_hControl, SW_SHOW) ; - UpdateWindow(m_hControl) ; - } - return TRUE ; - -} - -BOOL CNdbControl::Show(BOOL bShow) { - - _ASSERT(this) ; - - if(bShow){ - ShowWindow(m_hControl, SW_SHOW); - m_bVisible = TRUE ; - }else{ - ShowWindow(m_hControl, SW_HIDE); - m_bVisible = FALSE ; - } - EnableWindow(m_hControl, bShow) ; - UpdateWindow(m_hControl) ; - - return TRUE ; -} - - - -CNdbControl::~CNdbControl(){ - - DestroyWindow(m_hControl) ; - if(m_hMenu) - DestroyMenu(m_hMenu) ; - -} - - -/** -* CNdbListView implementation -*/ - -BOOL CNdbListView::Create(HINSTANCE hInst, HWND hParent, DWORD dwId, NDB_ITEM_TYPE enType, PNDB_LV pstH, DWORD dwWidth) { - - if(!pstH) - return FALSE ; - - LV_COLUMN lvC ; - m_hInstance = hInst ; - m_hParent = hParent ; - m_dwId = dwId ; - m_dwWidth = dwWidth ; - m_dwWidth = 100 ; - m_enType = enType; - char* szLabels[MAX_LV_HEADERS] ; - int count = 0 ; - - m_hControl = CreateWindowEx(WS_EX_OVERLAPPEDWINDOW, WC_LISTVIEW, TEXT(""), - WS_VISIBLE | WS_CHILD | WS_BORDER | LVS_REPORT, - 0, 0, 0, 0, m_hParent, (HMENU)m_dwId, hInst, NULL ); - - if(!m_hControl) - return FALSE ; - - lvC.mask = LVCF_FMT | LVCF_WIDTH | LVCF_TEXT | LVCF_SUBITEM; - lvC.fmt = LVCFMT_LEFT; - - switch(enType){ - case ITEM_COMPR_ROOT: - szLabels[0] = pstH->szComputer ; - szLabels[1] = pstH->szHostname ; - szLabels[2] = pstH->szStatus ; - count = 3 ; - break ; - case ITEM_DB_ROOT: - szLabels[0] = pstH->szDatabase ; - szLabels[1] = pstH->szStatus ; - count = 2 ; - break ; - case ITEM_COMPR: - szLabels[0] = pstH->szProcess ; - szLabels[1] = pstH->szDatabase; - szLabels[2] = pstH->szOwner ; - szLabels[3] = pstH->szStatus ; - count = 4 ; - case ITEM_DB: - szLabels[0] = pstH->szProcess ; - szLabels[1] = pstH->szComputer; - szLabels[2] = pstH->szOwner ; - szLabels[3] = pstH->szStatus ; - count = 4 ; - break ; - NDB_DEFAULT_UNREACHABLE ; - } - - for(int j = 0 ; j < count ; ++j){ - lvC.iSubItem = j ; - lvC.cx = m_dwWidth ; - lvC.pszText = szLabels[j] ; - if(0xFFFFFFFF == ListView_InsertColumn(m_hControl, j, &lvC)) - return FALSE ; - } - - SendMessage(m_hControl, LVM_SETEXTENDEDLISTVIEWSTYLE, LVS_EX_FULLROWSELECT, - LVS_EX_FULLROWSELECT ); - - ShowWindow(m_hControl, SW_SHOW) ; - - return TRUE ; - -} - - - -/** -* CNdbToolBar implementation -*/ - - - -/** -* CNdbTreeView implementation -*/ - -BOOL CNdbTreeView::Create(HINSTANCE hInst, HWND hParent, DWORD dwMenuId, DWORD dwId){ - - if(!CreateTreeView(hInst, hParent, dwId)) - return FALSE ; - - m_hMenu = LoadMenu(m_hInstance,MAKEINTRESOURCE(dwMenuId)) ; - if(!m_hMenu) - return FALSE ; - - return TRUE ; -} - - -BOOL CNdbTreeView::CreateTreeView(HINSTANCE hInst, HWND hParent, DWORD dwId){ - - - m_hInstance = hInst ; - m_hParent = hParent ; - m_dwId = dwId ; - HIMAGELIST himl ; - HBITMAP hbmp ; - DWORD dwCount = 0 ; - - m_hControl = CreateWindowEx(WS_EX_OVERLAPPEDWINDOW, WC_TREEVIEW, "Tree View", - WS_VISIBLE | WS_CHILD | WS_BORDER | TVS_HASLINES | - TVS_HASBUTTONS | TVS_LINESATROOT | TVS_SINGLEEXPAND, - 0, 0, 0, 0, m_hParent, (HMENU)m_dwId, m_hInstance, NULL) ; - - if(!m_hControl) - return FALSE ; - - if((himl = ImageList_Create(nX, nY, ILC_MASK | ILC_COLOR8, 4, 0)) == NULL) - return FALSE ; - - hbmp = LoadBitmap(m_hInstance, MAKEINTRESOURCE(IDI_OPEN)); - hbmp = (HBITMAP)LoadImage(m_hInstance, MAKEINTRESOURCE(IDB_OPEN), IMAGE_BITMAP, nX, 0, LR_DEFAULTSIZE); - m_nOpen = ImageList_AddMasked(himl, hbmp, clr); - DeleteObject(hbmp); - hbmp = (HBITMAP)LoadImage(m_hInstance, MAKEINTRESOURCE(IDB_CLOSED), IMAGE_BITMAP, 0, 0, LR_DEFAULTSIZE); - m_nClosed = ImageList_AddMasked(himl, hbmp, clr); - DeleteObject(hbmp); - hbmp = (HBITMAP)LoadImage(m_hInstance, MAKEINTRESOURCE(IDB_COMPUTER),IMAGE_BITMAP, 0, 0, LR_DEFAULTSIZE); - m_nComputer = ImageList_AddMasked(himl, hbmp, clr); - DeleteObject(hbmp); - hbmp = (HBITMAP)LoadImage(m_hInstance, MAKEINTRESOURCE(IDB_DATABASE), IMAGE_BITMAP, 0, 0, LR_DEFAULTSIZE); - m_nDatabase = ImageList_AddMasked(himl, hbmp, clr); - DeleteObject(hbmp); - - if(ImageList_GetImageCount(himl) < 4) - return FALSE ; - - TreeView_SetImageList(m_hControl, himl, TVSIL_NORMAL); - - ShowWindow(m_hControl, SW_SHOW) ; - - return TRUE ; - -} - - - -HTREEITEM CNdbTreeView::AddItem(LPSTR szText, NDB_ITEM_TYPE enType, DWORD dwLVId){ - - TVITEM tvi ; - TVINSERTSTRUCT tvins ; - HTREEITEM hti ; - HTREEITEM hTemp ; - int nImage = m_nClosed ; - - tvi.mask = TVIF_TEXT | TVIF_IMAGE - | TVIF_SELECTEDIMAGE | TVIF_PARAM; - - tvi.pszText = szText; - tvi.cchTextMax = lstrlen(szText); - - switch(enType){ - - case ITEM_COMPR_ROOT: - nImage = m_nClosed ; - if(!m_hPrevRoot) - tvins.hParent = TVI_ROOT; - else - tvins.hInsertAfter = m_hPrevRoot ; - break ; - - case ITEM_DB_ROOT: - if(!m_hPrevRoot) - tvins.hParent = TVI_ROOT; - else - tvins.hInsertAfter = m_hPrevRoot ; - break ; - - case ITEM_COMPR: - nImage = m_nComputer ; - if(!m_hPrevComputersChild || !m_hComputersRoot) - return 0 ; - else - tvins.hInsertAfter = m_hPrevComputersChild ; - tvins.hParent = m_hComputersRoot ; - break ; - - case ITEM_DB: - nImage = m_nDatabase ; - if(!m_hPrevComputersChild || !m_hComputersRoot) - return 0 ; - else - tvins.hInsertAfter = m_hPrevDatabasesChild ; - tvins.hParent = m_hDatabasesRoot ; - break ; - - NDB_DEFAULT_UNREACHABLE ; - - } - - tvi.iImage = nImage ; - tvi.iSelectedImage = nImage ; - tvi.lParam = (LPARAM) dwLVId ; - tvins.item = tvi ; - - hTemp = TreeView_InsertItem(m_hControl, &tvins); - if(!hTemp) - return NULL ; - - switch(enType){ - - case ITEM_COMPR_ROOT: - m_hComputersRoot = hTemp ; - break ; - - case ITEM_DB_ROOT: - m_hDatabasesRoot = hTemp ; - break ; - - case ITEM_COMPR: - m_hPrevComputersChild = hTemp ; - break ; - - case ITEM_DB: - m_hPrevComputersChild = hTemp ; - break ; - - NDB_DEFAULT_UNREACHABLE ; - - } - - if (ITEM_COMPR_ROOT != enType && ITEM_DB_ROOT != enType) { - - hti = TreeView_GetParent(m_hControl, hTemp); - tvi.mask = TVIF_IMAGE | TVIF_SELECTEDIMAGE; - tvi.hItem = hti; - tvi.iImage = m_nClosed; - tvi.iSelectedImage = m_nClosed; - TreeView_SetItem(m_hControl, &tvi); - - } - - return hTemp ; -} - - -BOOL CNdbControls::Create(HINSTANCE hInst, HWND hParent){ - - m_hInstance = hInst ; - m_hParent = hParent ; - m_tb.Create(m_hInstance, m_hParent, ID_TOOLBAR, IDB_TOOLBAR) ; - m_sb.Create(m_hInstance, m_hParent, ID_STATUSBAR) ; - m_tv.Create(m_hInstance, m_hParent, IDM_TREEVIEW, ID_TREEVIEW) ; - _assert(AddView("Computers", ITEM_COMPR_ROOT)) ; - _assert(AddView("Databases", ITEM_DB_ROOT)) ; - - return TRUE ; -} - -BOOL CNdbControls::AddListView(NDB_ITEM_TYPE enType, DWORD dwId){ - - int count ; - CNdbListView* plv ; - PNDB_LV pst ; - - plv = new CNdbListView ; - - if(!plv) - return FALSE ; - - count = m_map_lvc.GetCount() + m_dwFirstId_lv ; - - switch(enType){ - case ITEM_COMPR_ROOT: - pst = &m_stlvcRoot ; - break ; - case ITEM_DB_ROOT: - pst = &m_stlvdRoot ; - break ; - case ITEM_COMPR: - pst = &m_stlvc ; - break ; - case ITEM_DB: - pst = &m_stlvd ; - break ; - NDB_DEFAULT_UNREACHABLE ; - } - - plv->Create(m_hInstance, m_hParent, dwId, enType, pst, LV_HEADER_WIDTH) ; - - m_map_lvc[count] = plv ; - - return TRUE ; -} - -BOOL CNdbControls::AddView(LPSTR szText, NDB_ITEM_TYPE enType){ - - DWORD dwId_lv = m_dwNextId_lv ; - - if(AddListView(enType, dwId_lv) && m_tv.AddItem(szText, enType, dwId_lv)) - m_dwNextId_lv++ ; - else - return FALSE ; - - return TRUE ; -}; - - -VOID CNdbControls::ToggleListViews(LPNMTREEVIEW pnmtv){ - - CNdbListView* plv ; - int count = m_map_lvc.GetCount() + m_dwFirstId_lv ; - - for(int c = FIRST_ID_LV ; c < count; ++c){ - _assert(m_map_lvc.Lookup(c, plv)) ; - if(pnmtv->itemNew.lParam == (c)) - plv->Show(TRUE) ; - else - plv->Show(FALSE) ; - } -} - - - -VOID CNdbControls::Resize(){ - - RECT rc, rcTB, rcSB ; - LONG tw, sw, lx, ly, lw, lh, tvw, tvh ; - CNdbListView* plv ; - int count ; //, id ; - - GetClientRect(m_hParent, &rc) ; - m_tb.GetRect(&rcTB) ; - m_sb.GetRect(&rcSB) ; - - sw = rcSB.bottom ; - tw = rcTB.bottom ; - - m_tb.Resize(0, 0, rc.right, tw) ; - - tvw = rc.right / 4 ; - tvh = rc.bottom - sw - tw - BORDER ; - - m_tv.Resize(0, tw + BORDER, tvw, tvh) ; - - m_sb.Resize(0, tvh, rc.left, sw) ; - - lx = tvw + BORDER - 2 ; - ly = tw + BORDER ; - lw = rc.right - tvw - BORDER + 1 ; - lh = tvh ; - - count = m_map_lvc.GetCount() + FIRST_ID_LV ; - - for(int c = FIRST_ID_LV ; c < count; ++c){ - _assert(m_map_lvc.Lookup(c, plv)) ; - plv->Resize(lx, ly, lw, lh) ; - } - - return ; - -} diff --git a/storage/ndb/src/cw/cpcc-win32/C++/Open.ICO b/storage/ndb/src/cw/cpcc-win32/C++/Open.ICO deleted file mode 100644 index ab7b05d9df7..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/Open.ICO and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp b/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp deleted file mode 100644 index d2b002c3d90..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.cpp +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -// stdafx.cpp : source file that includes just the standard includes -// CPC_GUI.pch will be the pre-compiled header -// stdafx.obj will contain the pre-compiled type information - -#include "stdafx.h" - -// TODO: reference any additional headers you need in STDAFX.H -// and not in this file diff --git a/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h b/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h deleted file mode 100644 index aedd535b205..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/StdAfx.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -// stdafx.h : include file for standard system include files, -// or project specific include files that are used frequently, but -// are changed infrequently -// - -#if !defined(AFX_STDAFX_H__A9DB83DB_A9FD_11D0_BFD1_444553540000__INCLUDED_) -#define AFX_STDAFX_H__A9DB83DB_A9FD_11D0_BFD1_444553540000__INCLUDED_ - -#if _MSC_VER > 1000 -#pragma once -#endif // _MSC_VER > 1000 - -#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers - -#ifdef _DEBUG -#define NDB_DEFAULT_UNREACHABLE default: _ASSERT(0); break -#elif _MSC_VER >= 1200 -#define NDB_DEFAULT_UNREACHABLE default: __assume(0); break -#else -#define NDB_DEFAULT_UNREACHABLE default: break -#endif; - - -#ifdef _DEBUG -#define _assert _ASSERT -#else -#define _assert(expr) expr -#endif - - -#include -#include - -// C RunTime Header Files -#include -#include -#include -#include -#include -#include - -// Local Header Files -#include "resource.h" -#include "NdbControls.h" -#include "CPC_GUI.h" - - -// TODO: reference additional headers your program requires here - -//{{AFX_INSERT_LOCATION}} -// Microsoft Visual C++ will insert additional declarations immediately before the previous line. - -#endif // !defined(AFX_STDAFX_H__A9DB83DB_A9FD_11D0_BFD1_444553540000__INCLUDED_) diff --git a/storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp b/storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp deleted file mode 100644 index bcc7e67d06f..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/TreeView.cpp +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "StdAfx.h" -#include "resource.h" -#include "CPC_GUI.h" diff --git a/storage/ndb/src/cw/cpcc-win32/C++/TreeView.h b/storage/ndb/src/cw/cpcc-win32/C++/TreeView.h deleted file mode 100644 index 68859693228..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/TreeView.h +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - - diff --git a/storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp b/storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp deleted file mode 100644 index e50af403eda..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/bmp00001.bmp and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/C++/resource.h b/storage/ndb/src/cw/cpcc-win32/C++/resource.h deleted file mode 100644 index 8270a3e9962..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/C++/resource.h +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//{{NO_DEPENDENCIES}} -// Microsoft Developer Studio generated include file. -// Used by CPC_GUI.rc -// -#define IDC_MYICON 2 -#define IDD_CPC_GUI_DIALOG 102 -#define IDD_ABOUTBOX 103 -#define IDS_APP_TITLE 103 -#define IDM_ABOUT 104 -#define IDS_LV_ROOT_COMPUTERS 104 -#define IDS_TV_ROOT_COMPUTERS 104 -#define IDM_EXIT 105 -#define IDS_LV_ROOT_DATABASES 105 -#define IDS_TV_ROOT_DATABASES 105 -#define IDS_HELLO 106 -#define IDS_LV_COMPUTER_HEADER_1 106 -#define IDI_CPC_GUI 107 -#define IDS_LV_COMPUTER_HEADER_2 107 -#define IDI_SMALL 108 -#define IDS_LV_PROCESS_HEADER_1 108 -#define IDC_CPC_GUI 109 -#define IDM_CPC_GUI 109 -#define IDS_TIP_NEW 110 -#define IDS_TIP_DELETE 111 -#define IDS_TIP_PROPS 112 -#define IDS_LV_PROCESS_HEADER_2 113 -#define IDS_LV_PROCESS_HEADER_3 114 -#define IDS_LV_PROCESS_HEADER_4 115 -#define IDS_LV_COMPUTER_HEADER_3 116 -#define IDR_MAINFRAME 128 -#define ID_TREEVIEW 130 -#define IDM_TREEVIEW 130 -#define IDB_TOOLBAR 137 -#define ID_TOOLBAR 158 -#define IDB_COMPUTER 168 -#define IDB_CLOSED 169 -#define IDB_OPEN 170 -#define IDI_DATABASE 172 -#define IDI_CLOSED 175 -#define IDI_OPEN 176 -#define IDI_COMPUTER 177 -#define IDB_MASK 178 -#define IDB_DATABASE 182 -#define IDM_TV 183 -#define ID_TREEVIEW1 32771 -#define ID_BUTTON32773 32773 -#define IDM_NEW 32774 -#define IDM_DELETE 32775 -#define IDM_PROPS 32776 -#define ID_LIST_C 32777 -#define ID_ACTIONS_INSERT 32778 -#define ID_ACTIONS_DELETE 32779 -#define ID_DELETE 32780 -#define ID_PROPERTIES 32781 -#define ID_ACTIONS_PROPERTIES 32782 -#define ID_BUTTON32783 32783 -#define ID_BUTTON32784 32784 -#define ID_LIST_P 32785 -#define ID_STATUSBAR 32786 -#define ID_LIST_C_ROOT 32787 -#define ID_LIST_D_ROOT 32788 -#define IDM_ADDNEW 32793 -#define IDC_STATIC -1 - -// Next default values for new objects -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NEXT_RESOURCE_VALUE 184 -#define _APS_NEXT_COMMAND_VALUE 32796 -#define _APS_NEXT_CONTROL_VALUE 1000 -#define _APS_NEXT_SYMED_VALUE 110 -#endif -#endif diff --git a/storage/ndb/src/cw/cpcc-win32/C++/small.ico b/storage/ndb/src/cw/cpcc-win32/C++/small.ico deleted file mode 100644 index 8f94d9aa828..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/small.ico and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp b/storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp deleted file mode 100644 index a1059352c66..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/C++/toolbar.bmp and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/App.ico b/storage/ndb/src/cw/cpcc-win32/csharp/App.ico deleted file mode 100644 index 3a5525fd794..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/csharp/App.ico and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs b/storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs deleted file mode 100644 index c29998ff178..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/CPC_Form.cs +++ /dev/null @@ -1,1416 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; -using System.Data; -using System.Threading; - -namespace NDB_CPC -{ - /// - /// Summary description for Form1. - /// - public class CPC : System.Windows.Forms.Form - { - private System.Windows.Forms.TreeView tvComputerCluster; - private System.Windows.Forms.ContextMenu ctxTreeViewMenu; - private System.Windows.Forms.ColumnHeader chComputer; - private System.Windows.Forms.ColumnHeader chProcessName; - private System.Windows.Forms.ContextMenu ctxListViewMenu; - private System.Windows.Forms.MenuItem mainMenuItem; - private System.Windows.Forms.ColumnHeader chProcesses; - private System.Windows.Forms.MainMenu mainMenu; - private System.Windows.Forms.Panel panel1; - private System.Windows.Forms.MenuItem menuItem7; - private System.Windows.Forms.MenuItem menuItem10; - private System.Windows.Forms.MenuItem mainMenuFile; - private System.Windows.Forms.MenuItem mainMenuComputer; - private System.Windows.Forms.MenuItem subMenuComputerAdd; - private System.Windows.Forms.MenuItem subMenuComputerRemove; - private System.Windows.Forms.MenuItem subMenuComputerDisconnect; - private System.Windows.Forms.MenuItem subMenuComputerProperties; - private System.ComponentModel.IContainer components; - - private System.Windows.Forms.MenuItem menuItem3; - private System.Windows.Forms.MenuItem computerMenuAdd; - private System.Windows.Forms.MenuItem computerMenuRemove; - private System.Windows.Forms.MenuItem menuItem5; - private System.Windows.Forms.MenuItem computerMenuDisconnect; - private System.Windows.Forms.MenuItem computerMenuConnect; - private System.Windows.Forms.MenuItem computerMenuProperties; - private System.Windows.Forms.MenuItem menuItem11; - private System.Windows.Forms.MenuItem tvCtxMenuComputerAdd; - private System.Windows.Forms.MenuItem tvCtxMenuComputerRemove; - private System.Windows.Forms.MenuItem tvCtxMenuComputerConnect; - private System.Windows.Forms.MenuItem tvCtxMenuComputerDisconnect; - private System.Windows.Forms.MenuItem tvCtxMenuComputerDefine; - private System.Windows.Forms.MenuItem tvCtxMenuDatabaseNew; - private System.Windows.Forms.MenuItem menuItem1; - private System.Windows.Forms.MenuItem menuItem2; - private System.Windows.Forms.MenuItem mainMenuDatabase; - private System.Windows.Forms.MenuItem subMenuDatabaseCreate; - private System.Windows.Forms.MenuItem menuItem8; - private System.Windows.Forms.MenuItem tvCtxMenuProperties; - private System.Windows.Forms.ImageList imageTV; - - private ComputerMgmt computerMgmt; - private System.Windows.Forms.MenuItem computerMenuRefresh; - private System.Windows.Forms.ListView listView; - private System.Windows.Forms.ColumnHeader chComputerIP; - private System.Windows.Forms.ColumnHeader chDatabase; - private System.Windows.Forms.ColumnHeader chName; - private System.Windows.Forms.ColumnHeader chOwner; - private System.Windows.Forms.ColumnHeader chStatus; - private System.Windows.Forms.Splitter splitter2; - private System.Windows.Forms.Splitter splitterVertical; - private System.Windows.Forms.Splitter splitterHorizont; - private Thread guiThread; - private float resizeWidthRatio; - private System.Windows.Forms.MenuItem menuItem6; - private System.Windows.Forms.MenuItem menuGetStatus; - private System.Windows.Forms.MenuItem menuStartProcess; - private System.Windows.Forms.MenuItem menuRestartProcess; - private System.Windows.Forms.MenuItem menuStopProcess; - private System.Windows.Forms.MenuItem menuRemoveProcess; - private System.Windows.Forms.MenuItem menuRefresh; - private System.Windows.Forms.OpenFileDialog openHostFileDialog; - private System.Windows.Forms.SaveFileDialog saveHostFileDialog; - private float resizeHeightRatio; - private System.Windows.Forms.TextBox mgmConsole; - int i; - public CPC() - { - // - // Required for Windows Form Designer support - // - InitializeComponent(); - - // TODO: Add any constructor code after InitializeComponent call - // - computerMgmt = new ComputerMgmt(); - guiThread = new Thread(new ThreadStart(updateGuiThread)); - - // guiThread.Start(); - } - - /// - /// Clean up any resources being used. - /// - protected override void Dispose( bool disposing ) - { - if( disposing ) - { - if (components != null) - { - components.Dispose(); - } - } - //guiThread.Abort(); - base.Dispose( disposing ); - } - - #region Windows Form Designer generated code - /// - /// Required method for Designer support - do not modify - /// the contents of this method with the code editor. - /// - private void InitializeComponent() - { - this.components = new System.ComponentModel.Container(); - System.Resources.ResourceManager resources = new System.Resources.ResourceManager(typeof(CPC)); - this.tvComputerCluster = new System.Windows.Forms.TreeView(); - this.ctxTreeViewMenu = new System.Windows.Forms.ContextMenu(); - this.tvCtxMenuComputerAdd = new System.Windows.Forms.MenuItem(); - this.tvCtxMenuComputerRemove = new System.Windows.Forms.MenuItem(); - this.menuGetStatus = new System.Windows.Forms.MenuItem(); - this.menuItem6 = new System.Windows.Forms.MenuItem(); - this.tvCtxMenuComputerConnect = new System.Windows.Forms.MenuItem(); - this.tvCtxMenuComputerDisconnect = new System.Windows.Forms.MenuItem(); - this.tvCtxMenuDatabaseNew = new System.Windows.Forms.MenuItem(); - this.tvCtxMenuComputerDefine = new System.Windows.Forms.MenuItem(); - this.menuItem8 = new System.Windows.Forms.MenuItem(); - this.tvCtxMenuProperties = new System.Windows.Forms.MenuItem(); - this.imageTV = new System.Windows.Forms.ImageList(this.components); - this.ctxListViewMenu = new System.Windows.Forms.ContextMenu(); - this.menuStartProcess = new System.Windows.Forms.MenuItem(); - this.menuRestartProcess = new System.Windows.Forms.MenuItem(); - this.menuStopProcess = new System.Windows.Forms.MenuItem(); - this.menuRemoveProcess = new System.Windows.Forms.MenuItem(); - this.menuRefresh = new System.Windows.Forms.MenuItem(); - this.computerMenuAdd = new System.Windows.Forms.MenuItem(); - this.menuItem3 = new System.Windows.Forms.MenuItem(); - this.computerMenuRemove = new System.Windows.Forms.MenuItem(); - this.menuItem5 = new System.Windows.Forms.MenuItem(); - this.computerMenuDisconnect = new System.Windows.Forms.MenuItem(); - this.computerMenuConnect = new System.Windows.Forms.MenuItem(); - this.menuItem11 = new System.Windows.Forms.MenuItem(); - this.computerMenuProperties = new System.Windows.Forms.MenuItem(); - this.computerMenuRefresh = new System.Windows.Forms.MenuItem(); - this.chComputer = new System.Windows.Forms.ColumnHeader(); - this.chProcessName = new System.Windows.Forms.ColumnHeader(); - this.mainMenuItem = new System.Windows.Forms.MenuItem(); - this.chProcesses = new System.Windows.Forms.ColumnHeader(); - this.mainMenu = new System.Windows.Forms.MainMenu(); - this.mainMenuFile = new System.Windows.Forms.MenuItem(); - this.menuItem2 = new System.Windows.Forms.MenuItem(); - this.menuItem1 = new System.Windows.Forms.MenuItem(); - this.mainMenuComputer = new System.Windows.Forms.MenuItem(); - this.subMenuComputerAdd = new System.Windows.Forms.MenuItem(); - this.menuItem7 = new System.Windows.Forms.MenuItem(); - this.subMenuComputerDisconnect = new System.Windows.Forms.MenuItem(); - this.subMenuComputerRemove = new System.Windows.Forms.MenuItem(); - this.menuItem10 = new System.Windows.Forms.MenuItem(); - this.subMenuComputerProperties = new System.Windows.Forms.MenuItem(); - this.mainMenuDatabase = new System.Windows.Forms.MenuItem(); - this.subMenuDatabaseCreate = new System.Windows.Forms.MenuItem(); - this.panel1 = new System.Windows.Forms.Panel(); - this.mgmConsole = new System.Windows.Forms.TextBox(); - this.splitterHorizont = new System.Windows.Forms.Splitter(); - this.splitter2 = new System.Windows.Forms.Splitter(); - this.listView = new System.Windows.Forms.ListView(); - this.chComputerIP = new System.Windows.Forms.ColumnHeader(); - this.chStatus = new System.Windows.Forms.ColumnHeader(); - this.chDatabase = new System.Windows.Forms.ColumnHeader(); - this.chName = new System.Windows.Forms.ColumnHeader(); - this.chOwner = new System.Windows.Forms.ColumnHeader(); - this.splitterVertical = new System.Windows.Forms.Splitter(); - this.openHostFileDialog = new System.Windows.Forms.OpenFileDialog(); - this.saveHostFileDialog = new System.Windows.Forms.SaveFileDialog(); - this.panel1.SuspendLayout(); - this.SuspendLayout(); - // - // tvComputerCluster - // - this.tvComputerCluster.CausesValidation = false; - this.tvComputerCluster.ContextMenu = this.ctxTreeViewMenu; - this.tvComputerCluster.Dock = System.Windows.Forms.DockStyle.Left; - this.tvComputerCluster.ImageList = this.imageTV; - this.tvComputerCluster.Name = "tvComputerCluster"; - this.tvComputerCluster.Nodes.AddRange(new System.Windows.Forms.TreeNode[] { - new System.Windows.Forms.TreeNode("Computer", 0, 0), - new System.Windows.Forms.TreeNode("Database", 5, 5)}); - this.tvComputerCluster.Size = new System.Drawing.Size(104, 333); - this.tvComputerCluster.TabIndex = 5; - this.tvComputerCluster.MouseDown += new System.Windows.Forms.MouseEventHandler(this.tvComputerCluster_MouseDown); - this.tvComputerCluster.AfterSelect += new System.Windows.Forms.TreeViewEventHandler(this.tvComputerCluster_AfterSelect); - this.tvComputerCluster.BeforeCollapse += new System.Windows.Forms.TreeViewCancelEventHandler(this.tvComputerCluster_BeforeCollapse); - this.tvComputerCluster.BeforeExpand += new System.Windows.Forms.TreeViewCancelEventHandler(this.tvComputerCluster_BeforeExpand); - // - // ctxTreeViewMenu - // - this.ctxTreeViewMenu.MenuItems.AddRange(new System.Windows.Forms.MenuItem[] { - this.tvCtxMenuComputerAdd, - this.tvCtxMenuComputerRemove, - this.menuGetStatus, - this.menuItem6, - this.tvCtxMenuComputerConnect, - this.tvCtxMenuComputerDisconnect, - this.tvCtxMenuDatabaseNew, - this.tvCtxMenuComputerDefine, - this.menuItem8, - this.tvCtxMenuProperties}); - this.ctxTreeViewMenu.Popup += new System.EventHandler(this.ctxTreeViewMenu_Popup); - // - // tvCtxMenuComputerAdd - // - this.tvCtxMenuComputerAdd.Index = 0; - this.tvCtxMenuComputerAdd.Text = "Add computer"; - this.tvCtxMenuComputerAdd.Click += new System.EventHandler(this.computerMenuAdd_Click); - // - // tvCtxMenuComputerRemove - // - this.tvCtxMenuComputerRemove.Index = 1; - this.tvCtxMenuComputerRemove.Text = "Remove computer"; - this.tvCtxMenuComputerRemove.Click += new System.EventHandler(this.computerMenuRemove_Click); - // - // menuGetStatus - // - this.menuGetStatus.Index = 2; - this.menuGetStatus.Text = "Get Status"; - this.menuGetStatus.Click += new System.EventHandler(this.menuGetStatus_Click); - // - // menuItem6 - // - this.menuItem6.Index = 3; - this.menuItem6.Text = "-"; - // - // tvCtxMenuComputerConnect - // - this.tvCtxMenuComputerConnect.Index = 4; - this.tvCtxMenuComputerConnect.Text = "Connect"; - // - // tvCtxMenuComputerDisconnect - // - this.tvCtxMenuComputerDisconnect.Index = 5; - this.tvCtxMenuComputerDisconnect.Text = "Disconnect"; - // - // tvCtxMenuDatabaseNew - // - this.tvCtxMenuDatabaseNew.Index = 6; - this.tvCtxMenuDatabaseNew.Text = "Create database..."; - this.tvCtxMenuDatabaseNew.Click += new System.EventHandler(this.subMenuDatabaseCreate_Click); - // - // tvCtxMenuComputerDefine - // - this.tvCtxMenuComputerDefine.Index = 7; - this.tvCtxMenuComputerDefine.Text = "Define process..."; - this.tvCtxMenuComputerDefine.Click += new System.EventHandler(this.tvCtxMenuComputerDefine_Click); - // - // menuItem8 - // - this.menuItem8.Index = 8; - this.menuItem8.Text = "-"; - // - // tvCtxMenuProperties - // - this.tvCtxMenuProperties.Index = 9; - this.tvCtxMenuProperties.Text = "Properties"; - // - // imageTV - // - this.imageTV.ColorDepth = System.Windows.Forms.ColorDepth.Depth8Bit; - this.imageTV.ImageSize = new System.Drawing.Size(16, 16); - this.imageTV.ImageStream = ((System.Windows.Forms.ImageListStreamer)(resources.GetObject("imageTV.ImageStream"))); - this.imageTV.TransparentColor = System.Drawing.Color.Transparent; - // - // ctxListViewMenu - // - this.ctxListViewMenu.MenuItems.AddRange(new System.Windows.Forms.MenuItem[] { - this.menuStartProcess, - this.menuRestartProcess, - this.menuStopProcess, - this.menuRemoveProcess, - this.menuRefresh}); - this.ctxListViewMenu.Popup += new System.EventHandler(this.ctxListViewMenu_Popup); - // - // menuStartProcess - // - this.menuStartProcess.Index = 0; - this.menuStartProcess.Text = "Start process"; - this.menuStartProcess.Click += new System.EventHandler(this.startProcess); - // - // menuRestartProcess - // - this.menuRestartProcess.Index = 1; - this.menuRestartProcess.Text = "Restart process"; - this.menuRestartProcess.Click += new System.EventHandler(this.restartProcess); - // - // menuStopProcess - // - this.menuStopProcess.Index = 2; - this.menuStopProcess.Text = "Stop process"; - this.menuStopProcess.Click += new System.EventHandler(this.stopProcess); - // - // menuRemoveProcess - // - this.menuRemoveProcess.Index = 3; - this.menuRemoveProcess.Text = "Remove process"; - this.menuRemoveProcess.Click += new System.EventHandler(this.removeProcess); - // - // menuRefresh - // - this.menuRefresh.Index = 4; - this.menuRefresh.Text = "Refresh"; - this.menuRefresh.Click += new System.EventHandler(this.menuRefresh_Click); - // - // computerMenuAdd - // - this.computerMenuAdd.Index = -1; - this.computerMenuAdd.Text = "Add"; - this.computerMenuAdd.Click += new System.EventHandler(this.computerMenuAdd_Click); - // - // menuItem3 - // - this.menuItem3.Index = -1; - this.menuItem3.Text = "-"; - // - // computerMenuRemove - // - this.computerMenuRemove.Index = -1; - this.computerMenuRemove.Text = "Remove"; - this.computerMenuRemove.Click += new System.EventHandler(this.computerMenuRemove_Click); - // - // menuItem5 - // - this.menuItem5.Index = -1; - this.menuItem5.Text = "-"; - // - // computerMenuDisconnect - // - this.computerMenuDisconnect.Index = -1; - this.computerMenuDisconnect.Text = "Disconnect"; - // - // computerMenuConnect - // - this.computerMenuConnect.Index = -1; - this.computerMenuConnect.Text = "Connect"; - // - // menuItem11 - // - this.menuItem11.Index = -1; - this.menuItem11.Text = "-"; - // - // computerMenuProperties - // - this.computerMenuProperties.Index = -1; - this.computerMenuProperties.Text = "Properties"; - // - // computerMenuRefresh - // - this.computerMenuRefresh.Index = -1; - this.computerMenuRefresh.Text = "Refresh"; - this.computerMenuRefresh.Click += new System.EventHandler(this.computerMenuRefresh_Click); - // - // chComputer - // - this.chComputer.Text = "Computer"; - // - // chProcessName - // - this.chProcessName.Text = "Name"; - // - // mainMenuItem - // - this.mainMenuItem.Index = -1; - this.mainMenuItem.Text = "File"; - // - // chProcesses - // - this.chProcesses.Text = "Id"; - // - // mainMenu - // - this.mainMenu.MenuItems.AddRange(new System.Windows.Forms.MenuItem[] { - this.mainMenuFile, - this.mainMenuComputer, - this.mainMenuDatabase}); - // - // mainMenuFile - // - this.mainMenuFile.Index = 0; - this.mainMenuFile.MenuItems.AddRange(new System.Windows.Forms.MenuItem[] { - this.menuItem2, - this.menuItem1}); - this.mainMenuFile.Text = "&File"; - // - // menuItem2 - // - this.menuItem2.Index = 0; - this.menuItem2.Text = "&Import..."; - this.menuItem2.Click += new System.EventHandler(this.importHostFile); - // - // menuItem1 - // - this.menuItem1.Index = 1; - this.menuItem1.Text = "&Export..."; - this.menuItem1.Click += new System.EventHandler(this.exportHostFile); - // - // mainMenuComputer - // - this.mainMenuComputer.Index = 1; - this.mainMenuComputer.MenuItems.AddRange(new System.Windows.Forms.MenuItem[] { - this.subMenuComputerAdd, - this.menuItem7, - this.subMenuComputerDisconnect, - this.subMenuComputerRemove, - this.menuItem10, - this.subMenuComputerProperties}); - this.mainMenuComputer.Text = "&Computer"; - // - // subMenuComputerAdd - // - this.subMenuComputerAdd.Index = 0; - this.subMenuComputerAdd.Text = "&Add Computer"; - this.subMenuComputerAdd.Click += new System.EventHandler(this.computerMenuAdd_Click); - // - // menuItem7 - // - this.menuItem7.Index = 1; - this.menuItem7.Text = "-"; - // - // subMenuComputerDisconnect - // - this.subMenuComputerDisconnect.Index = 2; - this.subMenuComputerDisconnect.Text = "&Disconnect"; - // - // subMenuComputerRemove - // - this.subMenuComputerRemove.Index = 3; - this.subMenuComputerRemove.Text = "&Remove Computer"; - this.subMenuComputerRemove.Click += new System.EventHandler(this.computerMenuRemove_Click); - // - // menuItem10 - // - this.menuItem10.Index = 4; - this.menuItem10.Text = "-"; - // - // subMenuComputerProperties - // - this.subMenuComputerProperties.Index = 5; - this.subMenuComputerProperties.Text = "&Properties"; - // - // mainMenuDatabase - // - this.mainMenuDatabase.Index = 2; - this.mainMenuDatabase.MenuItems.AddRange(new System.Windows.Forms.MenuItem[] { - this.subMenuDatabaseCreate}); - this.mainMenuDatabase.Text = "&Database"; - this.mainMenuDatabase.Click += new System.EventHandler(this.subMenuDatabaseCreate_Click); - // - // subMenuDatabaseCreate - // - this.subMenuDatabaseCreate.Index = 0; - this.subMenuDatabaseCreate.Text = "&Create database..."; - this.subMenuDatabaseCreate.Click += new System.EventHandler(this.subMenuDatabaseCreate_Click); - // - // panel1 - // - this.panel1.Controls.AddRange(new System.Windows.Forms.Control[] { - this.mgmConsole, - this.splitterHorizont, - this.splitter2, - this.listView}); - this.panel1.Dock = System.Windows.Forms.DockStyle.Fill; - this.panel1.Location = new System.Drawing.Point(104, 0); - this.panel1.Name = "panel1"; - this.panel1.Size = new System.Drawing.Size(384, 333); - this.panel1.TabIndex = 6; - // - // mgmConsole - // - this.mgmConsole.AccessibleRole = System.Windows.Forms.AccessibleRole.StaticText; - this.mgmConsole.Dock = System.Windows.Forms.DockStyle.Bottom; - this.mgmConsole.Location = new System.Drawing.Point(0, 231); - this.mgmConsole.Multiline = true; - this.mgmConsole.Name = "mgmConsole"; - this.mgmConsole.Size = new System.Drawing.Size(384, 96); - this.mgmConsole.TabIndex = 5; - this.mgmConsole.Text = "textBox1"; - this.mgmConsole.TextChanged += new System.EventHandler(this.mgmConsole_TextChanged); - this.mgmConsole.Enter += new System.EventHandler(this.mgmConsole_Enter); - // - // splitterHorizont - // - this.splitterHorizont.Dock = System.Windows.Forms.DockStyle.Bottom; - this.splitterHorizont.Location = new System.Drawing.Point(0, 327); - this.splitterHorizont.MinExtra = 100; - this.splitterHorizont.MinSize = 100; - this.splitterHorizont.Name = "splitterHorizont"; - this.splitterHorizont.Size = new System.Drawing.Size(384, 3); - this.splitterHorizont.TabIndex = 4; - this.splitterHorizont.TabStop = false; - // - // splitter2 - // - this.splitter2.Dock = System.Windows.Forms.DockStyle.Bottom; - this.splitter2.Location = new System.Drawing.Point(0, 330); - this.splitter2.Name = "splitter2"; - this.splitter2.Size = new System.Drawing.Size(384, 3); - this.splitter2.TabIndex = 2; - this.splitter2.TabStop = false; - // - // listView - // - this.listView.Columns.AddRange(new System.Windows.Forms.ColumnHeader[] { - this.chComputerIP, - this.chStatus, - this.chDatabase, - this.chName, - this.chOwner}); - this.listView.ContextMenu = this.ctxListViewMenu; - this.listView.Dock = System.Windows.Forms.DockStyle.Fill; - this.listView.FullRowSelect = true; - this.listView.Name = "listView"; - this.listView.Size = new System.Drawing.Size(384, 333); - this.listView.TabIndex = 0; - this.listView.View = System.Windows.Forms.View.Details; - this.listView.ColumnClick += new System.Windows.Forms.ColumnClickEventHandler(this.listView_ColumnClick_1); - this.listView.SelectedIndexChanged += new System.EventHandler(this.listView_SelectedIndexChanged); - // - // chComputerIP - // - this.chComputerIP.Text = "IP Adress"; - // - // chStatus - // - this.chStatus.Text = "Status"; - // - // chDatabase - // - this.chDatabase.Text = "Database"; - // - // chName - // - this.chName.Text = "Name"; - // - // chOwner - // - this.chOwner.Text = "Owner"; - // - // splitterVertical - // - this.splitterVertical.Location = new System.Drawing.Point(104, 0); - this.splitterVertical.MinSize = 100; - this.splitterVertical.Name = "splitterVertical"; - this.splitterVertical.Size = new System.Drawing.Size(3, 333); - this.splitterVertical.TabIndex = 7; - this.splitterVertical.TabStop = false; - this.splitterVertical.SplitterMoved += new System.Windows.Forms.SplitterEventHandler(this.splitterVertical_SplitterMoved); - // - // openHostFileDialog - // - this.openHostFileDialog.DefaultExt = "cpc"; - this.openHostFileDialog.Filter = "CPCd configuration files (*.cpc)|*.cpc| All Files (*.*)|*.*"; - this.openHostFileDialog.Title = "Import a CPCd configuration file"; - this.openHostFileDialog.FileOk += new System.ComponentModel.CancelEventHandler(this.openHostFileDialog_FileOk); - // - // saveHostFileDialog - // - this.saveHostFileDialog.Filter = "CPCd configuration files (*.cpc)|*.cpc| All Files (*.*)|*.*"; - this.saveHostFileDialog.Title = "Export a CPCd configuration file"; - this.saveHostFileDialog.FileOk += new System.ComponentModel.CancelEventHandler(this.saveHostFileDialog_FileOk); - // - // CPC - // - this.AutoScaleBaseSize = new System.Drawing.Size(5, 13); - this.ClientSize = new System.Drawing.Size(488, 333); - this.Controls.AddRange(new System.Windows.Forms.Control[] { - this.splitterVertical, - this.panel1, - this.tvComputerCluster}); - this.Menu = this.mainMenu; - this.Name = "CPC"; - this.Text = "CPC"; - this.Resize += new System.EventHandler(this.CPC_Resize); - this.MouseDown += new System.Windows.Forms.MouseEventHandler(this.CPC_MouseDown); - this.Closing += new System.ComponentModel.CancelEventHandler(this.CPC_Closing); - this.Load += new System.EventHandler(this.CPC_Load); - this.Activated += new System.EventHandler(this.CPC_Activated); - this.Paint += new System.Windows.Forms.PaintEventHandler(this.CPC_Paint); - this.panel1.ResumeLayout(false); - this.ResumeLayout(false); - - } - #endregion - - /// - /// The main entry point for the application. - /// - [STAThread] - static void Main() - { - Application.Run(new CPC()); - - } - - private void tvComputerCluster_AfterSelect(object sender, System.Windows.Forms.TreeViewEventArgs e) - { - if(e.Node.Text.ToString().Equals("Database")) - { - updateListViews("Database"); - - return; - } - if(e.Node.Text.ToString().Equals("Computer")) - { - //updateListViews(); - - updateListViews("Computer"); - return; - } - if(e.Node.Parent.Text.ToString().Equals("Database")) - { - //updateListViews(); - listView.Columns.Clear(); - listView.Columns.Add(this.chName); - listView.Columns.Add(this.chDatabase); - listView.Columns.Add(this.chStatus); - listView.Columns.Add(this.chOwner); - updateDatabaseView(e.Node.Text.ToString()); - } - - if(e.Node.Parent.Text=="Computer") - { - //updateListViews(); - - Computer c=computerMgmt.getComputer(e.Node.Text.ToString()); - string [] processcols= new string[5]; - ArrayList processes; - processes = c.getProcesses(); - listView.Items.Clear(); - listView.Columns.Clear(); - listView.Columns.Add(this.chComputer); - listView.Columns.Add(this.chDatabase); - listView.Columns.Add(this.chName); - listView.Columns.Add(this.chStatus); - listView.Columns.Add(this.chOwner); - if(processes != null ) - { - - listView.BeginUpdate(); - foreach(Process p in processes) - { - processcols[0]=p.getComputer().getName(); - processcols[1]=p.getDatabase(); - processcols[2]=p.getName(); - processcols[3]=p.getStatusString(); - processcols[4]=p.getOwner(); - ListViewItem lvp= new ListViewItem(processcols); - listView.Items.Add(lvp); - } - - listView.EndUpdate(); - } - - - listView.Show(); - } - - } - - - - private void ctxTreeViewMenu_Popup(object sender, System.EventArgs e) - { - tvCtxMenuComputerAdd.Enabled=true; - tvCtxMenuComputerRemove.Enabled=true; - tvCtxMenuComputerConnect.Enabled=true; - tvCtxMenuComputerDisconnect.Enabled=true; - tvCtxMenuComputerDefine.Enabled=true; - menuGetStatus.Enabled=true; - tvCtxMenuDatabaseNew.Enabled=true; - tvCtxMenuComputerAdd.Visible=true; - tvCtxMenuComputerRemove.Visible=true; - tvCtxMenuComputerConnect.Visible=true; - tvCtxMenuComputerDisconnect.Visible=true; - tvCtxMenuComputerDefine.Visible=true; - tvCtxMenuDatabaseNew.Visible=true; - tvCtxMenuProperties.Visible=true; - menuGetStatus.Visible=true; - - if(tvComputerCluster.SelectedNode.Text.Equals("Computer")) - { - tvCtxMenuComputerAdd.Enabled=true; - tvCtxMenuComputerRemove.Enabled=false; - tvCtxMenuComputerConnect.Enabled=false; - tvCtxMenuComputerDisconnect.Enabled=false; - tvCtxMenuComputerDefine.Enabled=false; - tvCtxMenuDatabaseNew.Visible=false; - menuGetStatus.Visible=false; - return; - } - - if(tvComputerCluster.SelectedNode.Text.Equals("Database")) - { - // ctxTreeViewMenu.MenuItems.Add(menuDatabaseItem1); - tvCtxMenuComputerAdd.Visible=false; - tvCtxMenuComputerRemove.Visible=false; - tvCtxMenuComputerConnect.Visible=false; - tvCtxMenuComputerDisconnect.Visible=false; - tvCtxMenuComputerDefine.Visible=false; - tvCtxMenuDatabaseNew.Visible=true; - tvCtxMenuDatabaseNew.Enabled=true; - menuGetStatus.Visible=false; - menuItem6.Visible=false; - return; - } - if(tvComputerCluster.SelectedNode.Parent.Text.Equals("Computer")) - { - - Computer c= computerMgmt.getComputer(tvComputerCluster.SelectedNode.Text.ToString()); - if(c.getStatus().Equals(Computer.Status.Disconnected)) - { - tvCtxMenuComputerConnect.Enabled=true; - tvCtxMenuComputerDisconnect.Enabled=false; - } - else - { - tvCtxMenuComputerDisconnect.Enabled=true; - tvCtxMenuComputerConnect.Enabled=false; - } - - tvCtxMenuComputerAdd.Enabled=false; - tvCtxMenuComputerRemove.Enabled=true; - menuGetStatus.Visible=false; - - tvCtxMenuComputerDefine.Enabled=true; - tvCtxMenuDatabaseNew.Visible=false; - return; - } - - if(tvComputerCluster.SelectedNode.Parent.Text.Equals("Database")) - { - tvCtxMenuComputerAdd.Enabled=true; - tvCtxMenuComputerRemove.Enabled=false; - tvCtxMenuComputerConnect.Enabled=false; - tvCtxMenuComputerDisconnect.Enabled=false; - tvCtxMenuComputerDefine.Enabled=false; - tvCtxMenuDatabaseNew.Visible=true; - menuGetStatus.Visible=true; - return; - } - - - } - - - private void listView_SelectedIndexChanged(object sender, System.EventArgs e) - { - //MessageBox.Show(listView.SelectedItems[0].Text); - } - - - private void tvComputerCluster_MouseDown(object sender, System.Windows.Forms.MouseEventArgs e) - { /* - TreeNode node = tvComputerCluster.GetNodeAt(e.X,e.Y); - if(node==null) - { - return; - } - tvComputerCluster.SelectedNode=node; -// updateListViews(); - tvComputerCluster.SelectedNode.Expand(); - */ - } - - - private void subMenuComputerRemove_Click(object sender, System.EventArgs e) - { - //ComputerRemoveDialog crd=new ComputerRemoveDialog(computerMgmt); - //crd.Show(); - //updateListViews(); -/* string computer = tvComputerCluster.SelectedNode.Text.ToString(); - if(MessageBox.Show(this,"Are you sure you want to remove: " +computer+ "?","Remove computer",MessageBoxButtons.YesNo)==DialogResult.Yes) - { - computerMgmt.RemoveComputer(computer); - } -*/ - } - - private void subMenuComputerAdd_Click(object sender, System.EventArgs e) - { - ComputerAddDialog cad=new ComputerAddDialog(computerMgmt); - cad.ShowDialog(); - cad.Dispose(); -/// updateListViews(tvComputerCluster.SelectedNode.Text.ToString()); - } - - - - private void updateListViews(string node) - { - if(node.Equals("Computer")) - { - listView.Columns.Clear(); - listView.Items.Clear(); - ArrayList list= computerMgmt.getComputerCollection(); - string [] computercols= new string[2]; - - - listView.BeginUpdate(); - listView.Columns.Add(this.chComputer); - listView.Columns.Add(this.chStatus); - foreach (Computer computer in list) - { - computercols[0]=computer.getName(); - computercols[1]=computer.getStatusString(); - - ListViewItem lvc= new ListViewItem(computercols); - - listView.Items.Add(lvc); - - } - listView.EndUpdate(); - listView.Show(); - } - - if(node.Equals("Database")) - { - - ArrayList databases= computerMgmt.getDatabaseCollection(); - string [] dbcols= new string[3]; - - - listView.BeginUpdate(); - listView.Items.Clear(); - listView.Columns.Clear(); - listView.Columns.Add(this.chDatabase); - listView.Columns.Add(this.chStatus); - listView.Columns.Add(this.chOwner); - foreach (Database db in databases) - { - dbcols[0]=db.getName(); - dbcols[1]=db.getStatusString(); - dbcols[2]=db.getOwner(); - - ListViewItem lvc= new ListViewItem(dbcols); - - listView.Items.Add(lvc); - - } - listView.EndUpdate(); - - listView.Show(); - } - - } - - public void updateDatabaseView(string database) - { - Database d=computerMgmt.getDatabase(database); - string [] processcols= new string[5]; - ArrayList processes = d.getProcesses(); - listView.Items.Clear(); - if(processes != null ) - { - - listView.BeginUpdate(); - listView.Columns.Clear(); - listView.Columns.Add(this.chComputer); - listView.Columns.Add(this.chDatabase); - listView.Columns.Add(this.chName); - listView.Columns.Add(this.chStatus); - listView.Columns.Add(this.chOwner); - - foreach(Process p in processes) - { - processcols[0]=p.getComputer().getName(); - processcols[1]=p.getDatabase(); - processcols[2]=p.getName(); - processcols[3]=p.getStatusString(); - processcols[4]=p.getOwner(); - ListViewItem lvp= new ListViewItem(processcols); - listView.Items.Add(lvp); - } - - listView.EndUpdate(); - } - - listView.Show(); - } - - private void updateTreeViews() - { - //tvComputerCluster.Nodes.Clear(); - ArrayList computers= computerMgmt.getComputerCollection(); - - ArrayList databases= computerMgmt.getDatabaseCollection(); - - tvComputerCluster.BeginUpdate(); - tvComputerCluster.Nodes[0].Nodes.Clear(); - tvComputerCluster.Nodes[1].Nodes.Clear(); - if(computers != null) - { - foreach (Computer computer in computers) - { - tvComputerCluster.Nodes[0].Nodes.Add(new TreeNode(computer.getName().ToString())); - } - } - if(databases != null) - { - foreach (Database db in databases) - { - tvComputerCluster.Nodes[1].Nodes.Add(new TreeNode(db.getName().ToString())); - } - } - - tvComputerCluster.EndUpdate(); - } - - - private void CPC_MouseDown(object sender, System.Windows.Forms.MouseEventArgs e) - { - //updateListViews(); - //updateTreeViews(); - - } - - private void CPC_Paint(object sender, System.Windows.Forms.PaintEventArgs e) - { - if(tvComputerCluster.SelectedNode!=null) - { - if(tvComputerCluster.SelectedNode.Text.ToString().Equals("Computer")) - updateListViews("Computer"); - } - - //updateListViews(); - //updateTreeViews(); - } - - private void CPC_Activated(object sender, System.EventArgs e) - { - updateListViews(tvComputerCluster.SelectedNode.Text.ToString()); - //updateListViews(); - updateTreeViews(); - } - - - private void computerMenuAdd_Click(object sender, System.EventArgs e) - { - ComputerAddDialog cad=new ComputerAddDialog(computerMgmt); - cad.ShowDialog(); - cad.Dispose(); - - } - - private void computerMenuRemove_Click(object sender, System.EventArgs e) - { - - string computer = tvComputerCluster.SelectedNode.Text.ToString(); - if(MessageBox.Show("Are you sure you want to remove: " + computer +"?\n" + "This will remove all processes on the computer!" ,"Remove selected computer",MessageBoxButtons.YesNo, MessageBoxIcon.Question)== DialogResult.Yes) - { - removeComputer(computer); - } - } - - private void removeComputer(string computer) - { - ArrayList processes; - Computer c=computerMgmt.getComputer(computer); - processes = c.getProcesses(); - - /*foreach(Process p in processes) - { - removeProcess(computer,p.getName()); - processes=c.getProcesses(); - } -*/ - if(computerMgmt.RemoveComputer(computer)) - { - tvComputerCluster.SelectedNode=tvComputerCluster.SelectedNode.PrevVisibleNode; - this.updateTreeViews(); - this.updateListViews("Computer"); - - if(tvComputerCluster.SelectedNode!=null) - this.updateListViews(tvComputerCluster.SelectedNode.Text.ToString()); - //updateListViews(); - } - } - - private void listView_ColumnClick(object sender, System.Windows.Forms.ColumnClickEventArgs e) - { - - if(listView.Sorting.Equals(SortOrder.Ascending)) - listView.Sorting=SortOrder.Descending; - else - listView.Sorting=SortOrder.Ascending; - - } - - - private void subMenuDatabaseCreate_Click(object sender, System.EventArgs e) - { - PanelWizard p = new PanelWizard(this.computerMgmt); - p.ShowDialog(); - } - - private void tvCtxMenuComputerDefine_Click(object sender, System.EventArgs e) - { - ProcessDefineDialog pdd = new ProcessDefineDialog(this.computerMgmt, - tvComputerCluster.SelectedNode.Text.ToString()); - pdd.Show(); - } - - private void listView_ItemActivate(object sender, System.EventArgs e) - { - updateDatabaseView(listView.SelectedItems[0].Text.ToString()); - for(int i=0;i0) - selectedItem=listView.FocusedItem.Text.ToString(); - - - if(selectedItem.Equals("")) - { - computerMenuAdd.Enabled=true; - computerMenuRemove.Enabled=false; - computerMenuConnect.Enabled=false; - computerMenuDisconnect.Enabled=false; - return; - } - else - { - computerMenuAdd.Enabled=false; - if(computerMgmt.getStatus(selectedItem).Equals(Computer.Status.Connected)) - { - computerMenuConnect.Enabled=false; - computerMenuRemove.Enabled=true; - } - if(computerMgmt.getStatus(selectedItem).Equals(Computer.Status.Disconnected)) - computerMenuDisconnect.Enabled=false; - } - - - } - - private void startProcess(object sender, System.EventArgs e) - { - if(listView.SelectedItems.Count==0) - return; - - string computer = listView.SelectedItems[0].SubItems[0].Text.ToString(); - string process = listView.SelectedItems[0].SubItems[2].Text.ToString(); - - if(computerMgmt.getComputer(computer).getProcessByName(process).getStatus()==Process.Status.Running) - { - MessageBox.Show(this,"The process is already started!" ,"Process failed to start",MessageBoxButtons.OK); - return; - } - - int status = startProcess(listView.SelectedItems[0].SubItems[0].Text.ToString(),listView.SelectedItems[0].SubItems[2].Text.ToString()); - - - if(status < 0) - MessageBox.Show(this,"Either the link is not OK, or the process is misconfigured! Status : " + status,"Process failed to start",MessageBoxButtons.OK); - else - MessageBox.Show(this,"The process was sucessfully started!","Process started",MessageBoxButtons.OK); - - } - - private int startProcess(string computer, string process) - { - Computer c=computerMgmt.getComputer(computer); - int status = c.startProcess(c.getProcessByName(process)); - return status; - } - - private void listView_ColumnClick_1(object sender, System.Windows.Forms.ColumnClickEventArgs e) - { - // if(listView.Columns[e.Column].Text.Equals("Computer")) - // { - if(listView.Sorting.Equals(SortOrder.Ascending)) - { - listView.Sorting=SortOrder.Descending; - } - else - { - listView.Sorting=SortOrder.Ascending; - } - // } - } - - private void removeProcess(object sender, System.EventArgs e) - { - if(listView.SelectedItems.Count==0) - return; - string process = listView.SelectedItems[0].SubItems[2].Text.ToString(); - string computer = listView.SelectedItems[0].SubItems[0].Text.ToString(); - - if(MessageBox.Show("Are you sure that you want to remove " + process + " permanently?","Remove process",MessageBoxButtons.YesNo) == DialogResult.No) - return; - removeProcess(computer,process); - MessageBox.Show(this,"The process was sucessfully removed!","Remove process",MessageBoxButtons.OK); - } - - private void removeProcess(string computer, string process) - { - - Computer c=computerMgmt.getComputer(computer); - stopProcess(computer,process); - int status = c.undefineProcess(c.getProcessByName(process)); - //if(status < 0) - // MessageBox.Show(this,"The process could not be removed!","Failed to remove process",MessageBoxButtons.OK); - // else - // { - Database db = computerMgmt.getDatabase((c.getProcessByName(process).getDatabase())); - db.removeProcess(process); - c.removeProcess(process,db.getName()); - updateListViews("Database"); - // } - } - - private void stopProcess(object sender, System.EventArgs e) - { - if(listView.SelectedItems.Count==0) - return; - string computer = listView.SelectedItems[0].SubItems[0].Text.ToString(); - string process = listView.SelectedItems[0].SubItems[2].Text.ToString(); - if(computerMgmt.getComputer(computer).getProcessByName(process).getStatus()==Process.Status.Stopped) - { - MessageBox.Show(this,"The process is already stopped!" ,"Process failed to stop",MessageBoxButtons.OK); - return; - } - - if(DialogResult.No==MessageBox.Show(this,"Are you sure you want to stop the " + process + " process?","Stop process!", MessageBoxButtons.YesNo)) - return; - - int status = stopProcess(computer, process); - if(status < 0) - MessageBox.Show(this,"The process could not be stopped. Status: " + status ,"Process failed to stop",MessageBoxButtons.OK); - else - MessageBox.Show(this,"The process was sucessfully stopped!","Process stopped",MessageBoxButtons.OK); - } - - private int stopProcess(string computer, string process) - { - Computer c=computerMgmt.getComputer(computer); - int status = c.stopProcess(c.getProcessByName(process)); - return status; - } - - private void restartProcess(object sender, System.EventArgs e) - { - if(listView.SelectedItems.Count==0) - return; - string computer = listView.SelectedItems[0].SubItems[0].Text.ToString(); - string process = listView.SelectedItems[0].SubItems[2].Text.ToString(); - if(stopProcess(computer, process)<0) - { - MessageBox.Show("Restart process failed!!!", "Restart process"); - return; - } - if(startProcess(computer, process)<0) - { - MessageBox.Show("Restart process failed!!!", "Restart process"); - return; - } - MessageBox.Show("Succesfully restarted the process!","Restart process"); - } - - private void menuRefresh_Click(object sender, System.EventArgs e) - { - //string computer = tvComputerCluster.SelectedNode.Text; - - this.listProcesses(); - } - - private void importHostFile(object sender, System.EventArgs e) - { - openHostFileDialog.ShowDialog(); - } - - private void exportHostFile(object sender, System.EventArgs e) - { - saveHostFileDialog.ShowDialog(); - } - - private void listProcesses() - { - /* add process in computer list*/ - ArrayList computers = computerMgmt.getComputerCollection(); - foreach(Computer c in computers) - { - c.listProcesses(); - ArrayList processes = c.getProcesses(); - if(processes!=null) - { - foreach(Process p in processes) - { - Database db = computerMgmt.getDatabase(p.getDatabase()); - if(db!=null) - { - p.setDefined(true); - db.addProcessCheck(p); - } - } - } - } - updateListViews("Computer"); - updateListViews("Database"); - } - - private void openHostFileDialog_FileOk(object sender, System.ComponentModel.CancelEventArgs e) - { - computerMgmt.importHostFile(openHostFileDialog.FileName); - this.updateTreeViews(); - openHostFileDialog.Dispose(); - listProcesses(); - } - - private void saveHostFileDialog_FileOk(object sender, System.ComponentModel.CancelEventArgs e) - { - computerMgmt.exportHostFile(saveHostFileDialog.FileName); - saveHostFileDialog.Dispose(); - } - - private void mgmConsole_Enter(object sender, System.EventArgs e) - {/* - //telnetclient.telnetClient tc= new telnetclient.telnetClient("10.0.13.1",10000,mgmConsole); - socketcomm.SocketComm sc = new socketcomm.SocketComm("10.0.13.1",10000); - sc.doConnect(); - while(!sc.isConnected()) - { - Thread.Sleep(100); - } - sc.writeMessage("get status\r"); - string line = sc.readLine(); - while(!line.Equals("")) - { - MessageBox.Show(line); - line=sc.readLine(); - } -*/ - } - - private void mgmConsole_TextChanged(object sender, System.EventArgs e) - { - - } - - - - - - - - - - } - -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs b/storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs deleted file mode 100644 index 879605dbd23..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/Computer.cs +++ /dev/null @@ -1,272 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; -using System.Data; -using System.IO; -using NDB_CPC.socketcomm; -using NDB_CPC.simpleparser; - - -namespace NDB_CPC -{ - /// - /// Summary description for Computer. - /// - public class Computer - { - public enum Status {Disconnected=1,Connected=2, Unknown=3} - private string m_ip; - private int m_cpcdPort; - private string m_name; - private Status m_status; - private ArrayList m_processes; - private SocketComm m_socket; - public Computer(string name, int port) - { - m_name = name; - m_status = Status.Disconnected; - m_processes = new ArrayList(); - m_cpcdPort=port; - m_socket = new SocketComm(m_name,m_cpcdPort); - } - - public Computer(string name, string ip) - { - m_ip = ip; - m_name = name; - m_status = Status.Disconnected; - m_processes = new ArrayList(); - m_cpcdPort=1234; //default port - m_socket = new SocketComm(m_ip,m_cpcdPort); - } - - public void connectToCpcd() - { - m_socket.doConnect(); - } - - private bool sendMessage(string str) - { - return m_socket.writeMessage(str); - - } - - public string getName() {return m_name;} - public string getIp() {return m_ip;} - public ArrayList getProcesses() - { - if(m_processes.Count>0) - return m_processes; - else - return null; - } - public string getStatusString() - { - try - { - if(m_socket.isConnected()) - return "Connected"; - else - return "Disconnected"; - } - catch(Exception e) - { - return "Unknown"; - } - } - - - public bool isConnected() - { - if(m_socket.isConnected()) - return true; - return false; - } - - public Status getStatus() - { - try - { - if(m_socket.isConnected()) - return Status.Connected; - else - return Status.Disconnected; - } - catch(Exception e) - { - return Status.Unknown; - } - } - - public void setStatus(Status status) - { - m_status=status; - } - - public void addProcess(Process process) - { - m_processes.Add(process); - } - - public Process getProcessByName(string name) - { - foreach(Process process in m_processes) - { - if(process.getName().Equals(name)) - return process; - } - return null; - } - - - public bool removeProcess(string name, string database) - { - foreach(Process p in m_processes) - { - if(p.getName().Equals(name) && p.getDatabase().Equals(database)) - { - m_processes.Remove(p); - return true; - } - } - return false; - } - - public void disconnect() - { - m_socket.disconnect(); - } - public Process getProcess(string id) - { - foreach(Process process in m_processes) - { - if(process.getId().Equals(id)) - return process; - } - return null; - } - - public int listProcesses() - { - string list = "list processes\n\n"; - - if(!sendMessage(list)) - return -2; - - SimpleCPCParser.parse(m_processes, this, m_socket); - return 1; - } - - public int defineProcess(Process p) - { - string define = "define process \n"; - define = define + "name:" + p.getName() + "\n"; - define = define + "group:" + p.getDatabase() + "\n"; - define = define + "env:" + "NDB_CONNECTSTRING="+p.getConnectString() ; - if(p.getEnv().Equals("")) - define = define + "\n"; - else - define = define + " " + p.getEnv() + "\n"; - - //if(p.getPath().EndsWith("\\")) - // define = define + "path:" + p.getPath()+ "ndb" + "\n"; - //else - define = define + "path:" + p.getPath() + "\n"; - define = define + "args:" + p.getArgs() + "\n"; - define = define + "type:" + "permanent" + "\n"; - define = define + "cwd:" + p.getCwd() + "\n"; - define = define + "owner:" + "ejohson" + "\n\n"; - - if(!sendMessage(define)) - return -2; - - SimpleCPCParser.parse(p, m_socket); - if(p.isDefined()) - return 1; - else - return -1; - - } - - public int startProcess(Process p) - { - if(!p.isDefined()) - { - this.defineProcess(p); - if(!p.isDefined()) - return -4; //process misconfigured - - } - string start= "start process \n"; - start = start + "id:" + p.getId() + "\n\n"; - if(!sendMessage(start)) - return -2; - SimpleCPCParser.parse(p, m_socket); - if(p.getStatus().Equals(Process.Status.Running)) - return 1; - else - return -1; - } - - public int stopProcess(Process p) - { - if(!p.isDefined()) - { - return -4; //process not defined - } - string stop= "stop process \n"; - stop = stop + "id:" + p.getId() + "\n\n"; - if(!sendMessage(stop)) - return -2; - SimpleCPCParser.parse(p, m_socket); - - if(p.getStatus().Equals(Process.Status.Stopped)) - return 1; - else - return -1; - } - - public int undefineProcess(Process p) - { - if(!p.isDefined()) - { - return -4; //process not defined - } - string undefine= "undefine process \n"; - undefine = undefine + "id:" + p.getId() + "\n\n"; - if(!sendMessage(undefine)) - return -2; - SimpleCPCParser.parse(p, m_socket); - if(!p.isDefined()) - { - return 1; - - } - return -1; - } - - public int getCpcdPort() - { - return this.m_cpcdPort; - } - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs b/storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs deleted file mode 100644 index bdd8b04c0ae..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/ComputerAddDialog.cs +++ /dev/null @@ -1,258 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; - -namespace NDB_CPC -{ - /// - /// Summary description for ComputerAddDialog. - /// - public class ComputerAddDialog : System.Windows.Forms.Form - { - private System.Windows.Forms.Label label1; - private System.Windows.Forms.TextBox textboxComputerName; - private System.Windows.Forms.Button btnAdd; - private System.Windows.Forms.Button btnCancel; - private System.Windows.Forms.Label label2; - /// - /// Required designer variable. - /// - private System.ComponentModel.Container components = null; - private System.Windows.Forms.Label label6; - private System.Windows.Forms.CheckBox checkBoxDefault; - private System.Windows.Forms.TextBox textBoxPort; - - private ComputerMgmt mgmt; - public ComputerAddDialog(ComputerMgmt mgmt) - { - // - // Required for Windows Form Designer support - // - InitializeComponent(); - - // - // TODO: Add any constructor code after InitializeComponent call - // - this.mgmt=mgmt; - } - /// - /// Clean up any resources being used. - /// - protected override void Dispose( bool disposing ) - { - if( disposing ) - { - if(components != null) - { - components.Dispose(); - } - } - base.Dispose( disposing ); - } - - #region Windows Form Designer generated code - /// - /// Required method for Designer support - do not modify - /// the contents of this method with the code editor. - /// - private void InitializeComponent() - { - this.textboxComputerName = new System.Windows.Forms.TextBox(); - this.label1 = new System.Windows.Forms.Label(); - this.btnAdd = new System.Windows.Forms.Button(); - this.btnCancel = new System.Windows.Forms.Button(); - this.label2 = new System.Windows.Forms.Label(); - this.label6 = new System.Windows.Forms.Label(); - this.textBoxPort = new System.Windows.Forms.TextBox(); - this.checkBoxDefault = new System.Windows.Forms.CheckBox(); - this.SuspendLayout(); - // - // textboxComputerName - // - this.textboxComputerName.Location = new System.Drawing.Point(128, 16); - this.textboxComputerName.Name = "textboxComputerName"; - this.textboxComputerName.Size = new System.Drawing.Size(136, 20); - this.textboxComputerName.TabIndex = 0; - this.textboxComputerName.Text = ""; - // - // label1 - // - this.label1.Location = new System.Drawing.Point(40, 16); - this.label1.Name = "label1"; - this.label1.Size = new System.Drawing.Size(88, 23); - this.label1.TabIndex = 1; - this.label1.Text = "Computer name:"; - this.label1.TextAlign = System.Drawing.ContentAlignment.MiddleRight; - // - // btnAdd - // - this.btnAdd.Location = new System.Drawing.Point(112, 128); - this.btnAdd.Name = "btnAdd"; - this.btnAdd.Size = new System.Drawing.Size(80, 24); - this.btnAdd.TabIndex = 4; - this.btnAdd.Text = "Add"; - this.btnAdd.Click += new System.EventHandler(this.btnAdd_Click); - // - // btnCancel - // - this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel; - this.btnCancel.Location = new System.Drawing.Point(200, 128); - this.btnCancel.Name = "btnCancel"; - this.btnCancel.Size = new System.Drawing.Size(80, 24); - this.btnCancel.TabIndex = 5; - this.btnCancel.Text = "Cancel"; - this.btnCancel.Click += new System.EventHandler(this.btnCancel_Click); - // - // label2 - // - this.label2.Location = new System.Drawing.Point(128, 40); - this.label2.Name = "label2"; - this.label2.Size = new System.Drawing.Size(136, 16); - this.label2.TabIndex = 4; - this.label2.Text = "(e.g. Ndb01 or 10.0.1.1)"; - // - // label6 - // - this.label6.Location = new System.Drawing.Point(48, 64); - this.label6.Name = "label6"; - this.label6.Size = new System.Drawing.Size(80, 24); - this.label6.TabIndex = 9; - this.label6.Text = "CPCd port:"; - this.label6.TextAlign = System.Drawing.ContentAlignment.MiddleRight; - // - // textBoxPort - // - this.textBoxPort.Enabled = false; - this.textBoxPort.Location = new System.Drawing.Point(128, 64); - this.textBoxPort.Name = "textBoxPort"; - this.textBoxPort.Size = new System.Drawing.Size(136, 20); - this.textBoxPort.TabIndex = 2; - this.textBoxPort.TabStop = false; - this.textBoxPort.Text = ""; - // - // checkBoxDefault - // - this.checkBoxDefault.Checked = true; - this.checkBoxDefault.CheckState = System.Windows.Forms.CheckState.Checked; - this.checkBoxDefault.Location = new System.Drawing.Point(96, 96); - this.checkBoxDefault.Name = "checkBoxDefault"; - this.checkBoxDefault.Size = new System.Drawing.Size(168, 16); - this.checkBoxDefault.TabIndex = 3; - this.checkBoxDefault.Text = "Use default port (1234)?"; - this.checkBoxDefault.CheckedChanged += new System.EventHandler(this.checkBoxDefault_CheckedChanged); - // - // ComputerAddDialog - // - this.AcceptButton = this.btnAdd; - this.AutoScaleBaseSize = new System.Drawing.Size(5, 13); - this.CancelButton = this.btnCancel; - this.ClientSize = new System.Drawing.Size(298, 159); - this.Controls.AddRange(new System.Windows.Forms.Control[] { - this.checkBoxDefault, - this.label6, - this.textBoxPort, - this.label2, - this.btnCancel, - this.btnAdd, - this.label1, - this.textboxComputerName}); - this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog; - this.MaximizeBox = false; - this.MinimizeBox = false; - this.Name = "ComputerAddDialog"; - this.StartPosition = System.Windows.Forms.FormStartPosition.CenterParent; - this.Text = "Add a computer"; - this.Load += new System.EventHandler(this.ComputerAddDialog_Load); - this.ResumeLayout(false); - - } - #endregion - - private void btnCancel_Click(object sender, System.EventArgs e) - { - this.Close(); - this.Dispose(); - } - - private void btnAdd_Click(object sender, System.EventArgs e) - { - int port; - if(this.textboxComputerName.Text.Equals("")) - { - MessageBox.Show(this,"A computer must have an IP address or a host name","Warning!",MessageBoxButtons.OK); - return; - } - if(this.checkBoxDefault.Checked) - { - port=1234; - } - else - { - if(this.textBoxPort.Text.Equals("")) - { - MessageBox.Show(this,"You must specify a port number!!!","Warning!",MessageBoxButtons.OK); - return; - } - else - { - try - { - port=Convert.ToInt32(this.textBoxPort.Text.ToString()); - - } - catch (Exception exception) - { - MessageBox.Show(this,"Port number must be numeric!!!","Warning!",MessageBoxButtons.OK); - return; - } - } - } - - if(mgmt.getComputer(this.textboxComputerName.Text)==null) - { - mgmt.AddComputer(this.textboxComputerName.Text.ToString(),port);} - else - { - MessageBox.Show("This computer does already exist!", "Add computer"); - return; - } - - this.Dispose(); - } - - private void ComputerAddDialog_Load(object sender, System.EventArgs e) - { - - } - - private void checkBoxDefault_CheckedChanged(object sender, System.EventArgs e) - { - if(checkBoxDefault.Checked) - textBoxPort.Enabled=false; - else - textBoxPort.Enabled=true; - } - - - - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs b/storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs deleted file mode 100644 index e2f32637a8c..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/ComputerRemoveDialog.cs +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; - -namespace NDB_CPC -{ - /// - /// Summary description for ComputerRemoveDialog. - /// - public class ComputerRemoveDialog : System.Windows.Forms.Form - { - private System.Windows.Forms.Label label1; - private System.Windows.Forms.ComboBox comboComputer; - private System.Windows.Forms.Button btnCancel; - private System.Windows.Forms.Button btnRemove; - /// - /// Required designer variable. - /// - private System.ComponentModel.Container components = null; - private ComputerMgmt mgmt; - - public ComputerRemoveDialog(ComputerMgmt mgmt) - { - // - // Required for Windows Form Designer support - // - InitializeComponent(); - - // - // TODO: Add any constructor code after InitializeComponent call - // - this.mgmt=mgmt; - } - - /// - /// Clean up any resources being used. - /// - protected override void Dispose( bool disposing ) - { - if( disposing ) - { - if(components != null) - { - components.Dispose(); - } - } - base.Dispose( disposing ); - } - - #region Windows Form Designer generated code - /// - /// Required method for Designer support - do not modify - /// the contents of this method with the code editor. - /// - private void InitializeComponent() - { - System.Resources.ResourceManager resources = new System.Resources.ResourceManager(typeof(ComputerRemoveDialog)); - this.label1 = new System.Windows.Forms.Label(); - this.comboComputer = new System.Windows.Forms.ComboBox(); - this.btnCancel = new System.Windows.Forms.Button(); - this.btnRemove = new System.Windows.Forms.Button(); - this.SuspendLayout(); - // - // label1 - // - this.label1.AccessibleDescription = ((string)(resources.GetObject("label1.AccessibleDescription"))); - this.label1.AccessibleName = ((string)(resources.GetObject("label1.AccessibleName"))); - this.label1.Anchor = ((System.Windows.Forms.AnchorStyles)(resources.GetObject("label1.Anchor"))); - this.label1.AutoSize = ((bool)(resources.GetObject("label1.AutoSize"))); - this.label1.Dock = ((System.Windows.Forms.DockStyle)(resources.GetObject("label1.Dock"))); - this.label1.Enabled = ((bool)(resources.GetObject("label1.Enabled"))); - this.label1.Font = ((System.Drawing.Font)(resources.GetObject("label1.Font"))); - this.label1.Image = ((System.Drawing.Image)(resources.GetObject("label1.Image"))); - this.label1.ImageAlign = ((System.Drawing.ContentAlignment)(resources.GetObject("label1.ImageAlign"))); - this.label1.ImageIndex = ((int)(resources.GetObject("label1.ImageIndex"))); - this.label1.ImeMode = ((System.Windows.Forms.ImeMode)(resources.GetObject("label1.ImeMode"))); - this.label1.Location = ((System.Drawing.Point)(resources.GetObject("label1.Location"))); - this.label1.Name = "label1"; - this.label1.RightToLeft = ((System.Windows.Forms.RightToLeft)(resources.GetObject("label1.RightToLeft"))); - this.label1.Size = ((System.Drawing.Size)(resources.GetObject("label1.Size"))); - this.label1.TabIndex = ((int)(resources.GetObject("label1.TabIndex"))); - this.label1.Text = resources.GetString("label1.Text"); - this.label1.TextAlign = ((System.Drawing.ContentAlignment)(resources.GetObject("label1.TextAlign"))); - this.label1.Visible = ((bool)(resources.GetObject("label1.Visible"))); - // - // comboComputer - // - this.comboComputer.AccessibleDescription = ((string)(resources.GetObject("comboComputer.AccessibleDescription"))); - this.comboComputer.AccessibleName = ((string)(resources.GetObject("comboComputer.AccessibleName"))); - this.comboComputer.Anchor = ((System.Windows.Forms.AnchorStyles)(resources.GetObject("comboComputer.Anchor"))); - this.comboComputer.BackgroundImage = ((System.Drawing.Image)(resources.GetObject("comboComputer.BackgroundImage"))); - this.comboComputer.Dock = ((System.Windows.Forms.DockStyle)(resources.GetObject("comboComputer.Dock"))); - this.comboComputer.Enabled = ((bool)(resources.GetObject("comboComputer.Enabled"))); - this.comboComputer.Font = ((System.Drawing.Font)(resources.GetObject("comboComputer.Font"))); - this.comboComputer.ImeMode = ((System.Windows.Forms.ImeMode)(resources.GetObject("comboComputer.ImeMode"))); - this.comboComputer.IntegralHeight = ((bool)(resources.GetObject("comboComputer.IntegralHeight"))); - this.comboComputer.ItemHeight = ((int)(resources.GetObject("comboComputer.ItemHeight"))); - this.comboComputer.Location = ((System.Drawing.Point)(resources.GetObject("comboComputer.Location"))); - this.comboComputer.MaxDropDownItems = ((int)(resources.GetObject("comboComputer.MaxDropDownItems"))); - this.comboComputer.MaxLength = ((int)(resources.GetObject("comboComputer.MaxLength"))); - this.comboComputer.Name = "comboComputer"; - this.comboComputer.RightToLeft = ((System.Windows.Forms.RightToLeft)(resources.GetObject("comboComputer.RightToLeft"))); - this.comboComputer.Size = ((System.Drawing.Size)(resources.GetObject("comboComputer.Size"))); - this.comboComputer.Sorted = true; - this.comboComputer.TabIndex = ((int)(resources.GetObject("comboComputer.TabIndex"))); - this.comboComputer.Text = resources.GetString("comboComputer.Text"); - this.comboComputer.Visible = ((bool)(resources.GetObject("comboComputer.Visible"))); - this.comboComputer.SelectedIndexChanged += new System.EventHandler(this.comboComputer_SelectedIndexChanged); - // - // btnCancel - // - this.btnCancel.AccessibleDescription = ((string)(resources.GetObject("btnCancel.AccessibleDescription"))); - this.btnCancel.AccessibleName = ((string)(resources.GetObject("btnCancel.AccessibleName"))); - this.btnCancel.Anchor = ((System.Windows.Forms.AnchorStyles)(resources.GetObject("btnCancel.Anchor"))); - this.btnCancel.BackgroundImage = ((System.Drawing.Image)(resources.GetObject("btnCancel.BackgroundImage"))); - this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel; - this.btnCancel.Dock = ((System.Windows.Forms.DockStyle)(resources.GetObject("btnCancel.Dock"))); - this.btnCancel.Enabled = ((bool)(resources.GetObject("btnCancel.Enabled"))); - this.btnCancel.FlatStyle = ((System.Windows.Forms.FlatStyle)(resources.GetObject("btnCancel.FlatStyle"))); - this.btnCancel.Font = ((System.Drawing.Font)(resources.GetObject("btnCancel.Font"))); - this.btnCancel.Image = ((System.Drawing.Image)(resources.GetObject("btnCancel.Image"))); - this.btnCancel.ImageAlign = ((System.Drawing.ContentAlignment)(resources.GetObject("btnCancel.ImageAlign"))); - this.btnCancel.ImageIndex = ((int)(resources.GetObject("btnCancel.ImageIndex"))); - this.btnCancel.ImeMode = ((System.Windows.Forms.ImeMode)(resources.GetObject("btnCancel.ImeMode"))); - this.btnCancel.Location = ((System.Drawing.Point)(resources.GetObject("btnCancel.Location"))); - this.btnCancel.Name = "btnCancel"; - this.btnCancel.RightToLeft = ((System.Windows.Forms.RightToLeft)(resources.GetObject("btnCancel.RightToLeft"))); - this.btnCancel.Size = ((System.Drawing.Size)(resources.GetObject("btnCancel.Size"))); - this.btnCancel.TabIndex = ((int)(resources.GetObject("btnCancel.TabIndex"))); - this.btnCancel.Text = resources.GetString("btnCancel.Text"); - this.btnCancel.TextAlign = ((System.Drawing.ContentAlignment)(resources.GetObject("btnCancel.TextAlign"))); - this.btnCancel.Visible = ((bool)(resources.GetObject("btnCancel.Visible"))); - this.btnCancel.Click += new System.EventHandler(this.btnCancel_Click); - // - // btnRemove - // - this.btnRemove.AccessibleDescription = ((string)(resources.GetObject("btnRemove.AccessibleDescription"))); - this.btnRemove.AccessibleName = ((string)(resources.GetObject("btnRemove.AccessibleName"))); - this.btnRemove.Anchor = ((System.Windows.Forms.AnchorStyles)(resources.GetObject("btnRemove.Anchor"))); - this.btnRemove.BackgroundImage = ((System.Drawing.Image)(resources.GetObject("btnRemove.BackgroundImage"))); - this.btnRemove.Dock = ((System.Windows.Forms.DockStyle)(resources.GetObject("btnRemove.Dock"))); - this.btnRemove.Enabled = ((bool)(resources.GetObject("btnRemove.Enabled"))); - this.btnRemove.FlatStyle = ((System.Windows.Forms.FlatStyle)(resources.GetObject("btnRemove.FlatStyle"))); - this.btnRemove.Font = ((System.Drawing.Font)(resources.GetObject("btnRemove.Font"))); - this.btnRemove.Image = ((System.Drawing.Image)(resources.GetObject("btnRemove.Image"))); - this.btnRemove.ImageAlign = ((System.Drawing.ContentAlignment)(resources.GetObject("btnRemove.ImageAlign"))); - this.btnRemove.ImageIndex = ((int)(resources.GetObject("btnRemove.ImageIndex"))); - this.btnRemove.ImeMode = ((System.Windows.Forms.ImeMode)(resources.GetObject("btnRemove.ImeMode"))); - this.btnRemove.Location = ((System.Drawing.Point)(resources.GetObject("btnRemove.Location"))); - this.btnRemove.Name = "btnRemove"; - this.btnRemove.RightToLeft = ((System.Windows.Forms.RightToLeft)(resources.GetObject("btnRemove.RightToLeft"))); - this.btnRemove.Size = ((System.Drawing.Size)(resources.GetObject("btnRemove.Size"))); - this.btnRemove.TabIndex = ((int)(resources.GetObject("btnRemove.TabIndex"))); - this.btnRemove.Text = resources.GetString("btnRemove.Text"); - this.btnRemove.TextAlign = ((System.Drawing.ContentAlignment)(resources.GetObject("btnRemove.TextAlign"))); - this.btnRemove.Visible = ((bool)(resources.GetObject("btnRemove.Visible"))); - this.btnRemove.Click += new System.EventHandler(this.btnRemove_Click); - // - // ComputerRemoveDialog - // - this.AcceptButton = this.btnRemove; - this.AccessibleDescription = ((string)(resources.GetObject("$this.AccessibleDescription"))); - this.AccessibleName = ((string)(resources.GetObject("$this.AccessibleName"))); - this.Anchor = ((System.Windows.Forms.AnchorStyles)(resources.GetObject("$this.Anchor"))); - this.AutoScaleBaseSize = ((System.Drawing.Size)(resources.GetObject("$this.AutoScaleBaseSize"))); - this.AutoScroll = ((bool)(resources.GetObject("$this.AutoScroll"))); - this.AutoScrollMargin = ((System.Drawing.Size)(resources.GetObject("$this.AutoScrollMargin"))); - this.AutoScrollMinSize = ((System.Drawing.Size)(resources.GetObject("$this.AutoScrollMinSize"))); - this.BackgroundImage = ((System.Drawing.Image)(resources.GetObject("$this.BackgroundImage"))); - this.CancelButton = this.btnCancel; - this.ClientSize = ((System.Drawing.Size)(resources.GetObject("$this.ClientSize"))); - this.Controls.AddRange(new System.Windows.Forms.Control[] { - this.btnRemove, - this.btnCancel, - this.comboComputer, - this.label1}); - this.Dock = ((System.Windows.Forms.DockStyle)(resources.GetObject("$this.Dock"))); - this.Enabled = ((bool)(resources.GetObject("$this.Enabled"))); - this.Font = ((System.Drawing.Font)(resources.GetObject("$this.Font"))); - this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog; - this.Icon = ((System.Drawing.Icon)(resources.GetObject("$this.Icon"))); - this.ImeMode = ((System.Windows.Forms.ImeMode)(resources.GetObject("$this.ImeMode"))); - this.Location = ((System.Drawing.Point)(resources.GetObject("$this.Location"))); - this.MaximizeBox = false; - this.MaximumSize = ((System.Drawing.Size)(resources.GetObject("$this.MaximumSize"))); - this.MinimizeBox = false; - this.MinimumSize = ((System.Drawing.Size)(resources.GetObject("$this.MinimumSize"))); - this.Name = "ComputerRemoveDialog"; - this.RightToLeft = ((System.Windows.Forms.RightToLeft)(resources.GetObject("$this.RightToLeft"))); - this.StartPosition = ((System.Windows.Forms.FormStartPosition)(resources.GetObject("$this.StartPosition"))); - this.Text = resources.GetString("$this.Text"); - this.Visible = ((bool)(resources.GetObject("$this.Visible"))); - this.Load += new System.EventHandler(this.ComputerRemoveDialog_Load); - this.ResumeLayout(false); - - } - #endregion - - private void btnRemove_Click(object sender, System.EventArgs e) - { - mgmt.RemoveComputer(comboComputer.SelectedItem.ToString()); - this.Dispose(); - } - - private void ComputerRemoveDialog_Load(object sender, System.EventArgs e) - { - ArrayList list = mgmt.getComputerCollection(); - foreach (Computer computer in list) - { - comboComputer.Items.Add(computer.getName()); - } - } - - private void btnCancel_Click(object sender, System.EventArgs e) - { - this.Close(); - this.Dispose(); - } - - private void comboComputer_SelectedIndexChanged(object sender, System.EventArgs e) - { - } - - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO b/storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO deleted file mode 100644 index 9689aa88361..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/csharp/DATABASE.ICO and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/Database.cs b/storage/ndb/src/cw/cpcc-win32/csharp/Database.cs deleted file mode 100644 index d08d3464b18..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/Database.cs +++ /dev/null @@ -1,178 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; -using System.Data; - -namespace NDB_CPC -{ - /// - /// Summary description for Database. - /// - public class Database - { - public enum Status {Disconnected=1,Connected=2, Unknown=3} - private string m_name; - private string m_owner; - private int m_mgmtPort; - private Status m_status; - private ArrayList m_processes; - public Database(string name) - { - m_name=name; - m_processes = new ArrayList(); - } - public Database(string name, string owner) - { - m_name=name; - m_owner=owner; - m_processes = new ArrayList(); - } - public Database() - { - m_processes = new ArrayList(); - } - - public string getName() - { - return m_name; - } - - public void setName(string name) - { - m_name=name; - } - - public void setMgmtPort(int port) - { - m_mgmtPort=port; - } - - public string getOwner() - { - return m_owner; - } - - public void setOwner(string name) - { - m_owner=name; - } - - - public Status getStatus() - { - return m_status; - } - - public string getStatusString() - { - if(m_status.Equals(Status.Connected)) - return "Connected"; - if(m_status.Equals(Status.Disconnected)) - return "Disconnected"; - if(m_status.Equals(Status.Unknown)) - return "Unknown"; - return "Unknown"; - } - public void setStatus(Status status) - { - m_status=status; - } - - public void addProcess(Process process) - { - /*if(check) - { - if(m_processes==null) - return; - if(m_processes.Count>0) - { - foreach (Process p in m_processes) - { - if(process.getId().Equals(p.getId())) - return; - } - } - } - */ - m_processes.Add(process); - } - public void addProcessCheck(Process process) - { - - if(m_processes==null) - return; - if(m_processes.Count>0) - { - foreach (Process p in m_processes) - { - if(process.getId().Equals(p.getId())) - return; - } - } - m_processes.Add(process); - } - - public Process getProcess(string id) - { - foreach(Process process in m_processes) - { - if(process.getId().Equals(id)) - return process; - } - return null; - } - - public Process getProcessByName(string name) - { - foreach(Process process in m_processes) - { - if(process.getName().Equals(name)) - return process; - } - return null; - } - - public void removeProcess( string processName) - { - Process p = this.getProcessByName(processName); - m_processes.Remove(p); - } - - public void removeAllProcesses() - { - Computer c; - foreach(Process p in m_processes) - { - c=p.getComputer(); - if(c.removeProcess(p.getName(),m_name).Equals(false)) - { - - } - } - m_processes.Clear(); - } - - public ArrayList getProcesses() - { - return m_processes; - } - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj deleted file mode 100644 index 6384eff8329..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj +++ /dev/null @@ -1,240 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user deleted file mode 100644 index 68937906d93..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.csproj.user +++ /dev/null @@ -1,48 +0,0 @@ - - - - - - - - - - - - diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb deleted file mode 100644 index ed3460476b0..00000000000 Binary files a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.ncb and /dev/null differ diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln b/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln deleted file mode 100644 index ef18b5e94ce..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/NDB_CPC.sln +++ /dev/null @@ -1,21 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 7.00 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NDB_CPC", "NDB_CPC.csproj", "{B78F6720-D36C-43DD-B442-F583718D0286}" -EndProject -Global - GlobalSection(SolutionConfiguration) = preSolution - ConfigName.0 = Debug - ConfigName.1 = Release - EndGlobalSection - GlobalSection(ProjectDependencies) = postSolution - EndGlobalSection - GlobalSection(ProjectConfiguration) = postSolution - {B78F6720-D36C-43DD-B442-F583718D0286}.Debug.ActiveCfg = Debug|.NET - {B78F6720-D36C-43DD-B442-F583718D0286}.Debug.Build.0 = Debug|.NET - {B78F6720-D36C-43DD-B442-F583718D0286}.Release.ActiveCfg = Release|.NET - {B78F6720-D36C-43DD-B442-F583718D0286}.Release.Build.0 = Release|.NET - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - EndGlobalSection - GlobalSection(ExtensibilityAddIns) = postSolution - EndGlobalSection -EndGlobal diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs b/storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs deleted file mode 100644 index e4cfc37e850..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/PanelWizard.cs +++ /dev/null @@ -1,1899 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//author:Arun -//date:Nov 13,2002 -//Wizard using panel -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; - -namespace NDB_CPC -{ - /// - /// Summary description for MDXQueryBuilderWizard. - /// - public class PanelWizard : System.Windows.Forms.Form - { - private System.Windows.Forms.Button btnCancel; - private System.Windows.Forms.Button btnback; - private System.Windows.Forms.Button btnNext; - private System.Windows.Forms.Button btnFinish; - - //---enabling and disabling the buttons - private bool cancelEnabled; - private bool backEnabled; - private bool nextEnabled; - private bool finishEnabled; - //-------- - //--set the next and back panel - private Panel nextPanel; - private Panel backPanel; - private Panel presentPanel; - // - private Panel[] arrayPanel; - private System.Windows.Forms.Panel panel1; - private System.Windows.Forms.Panel panel2; - private System.Windows.Forms.Panel panel3; - private System.Windows.Forms.RadioButton radioBtnYes; - private System.Windows.Forms.RadioButton radioBtnNo; - private System.Windows.Forms.ListBox listBoxComputers; - private System.Windows.Forms.Label label5; - private System.Windows.Forms.Label label1; - private System.ComponentModel.IContainer components; - private System.Windows.Forms.Button buttonComputerAdd; - private System.Windows.Forms.Label label2; - private System.Windows.Forms.Label label6; - private System.Windows.Forms.Label label7; - private System.Windows.Forms.ComboBox comboNDB; - private System.Windows.Forms.ComboBox comboAPI; - private System.Windows.Forms.Label label8; - private System.Windows.Forms.ComboBox comboMGM; - private System.Windows.Forms.Button btnTransferNodeToComp; - private System.Windows.Forms.TreeView tvComputer; - private System.Windows.Forms.ListView lvNode; - private System.Windows.Forms.Button btnTransferCompToNode; - private System.Windows.Forms.Label label3; - private System.Windows.Forms.Label label9; - private System.Windows.Forms.Label label10; - private int m_nMGM; - private ComputerMgmt mgmt; - private int m_nNDB; - private int m_nAPI; - private Database m_db; - private System.Windows.Forms.Label label11; - private System.Windows.Forms.TextBox textDbName; - private System.Windows.Forms.Label label31; - private System.Windows.Forms.Label label32; - private System.Windows.Forms.Label label33; - private System.Windows.Forms.Label label18; - private System.Windows.Forms.Label labelTitle; - private System.Windows.Forms.Label labelCwd; - private System.Windows.Forms.Label labelArgs; - private System.Windows.Forms.Label labelOther; - private System.Windows.Forms.Label labelPath; - private int m_noOfConfiguredNodes; - private int m_noOfConfiguredMgmt; - private int m_noOfConfiguredNdb; - private string m_mgmHost; - private string m_mgmPort; - private System.Windows.Forms.TextBox textCwd; - private System.Windows.Forms.TextBox textArgs; - private System.Windows.Forms.TextBox textOther; - private System.Windows.Forms.TextBox textPath; - private System.Windows.Forms.TextBox textComputer; - private System.Windows.Forms.TextBox textDatabase; - private System.Windows.Forms.TextBox textName; - private int m_noOfConfiguredApi; - private bool m_bMgmt; - private System.Windows.Forms.Button buttonSave; - private System.Windows.Forms.CheckBox checkBoxReuse; - private System.Windows.Forms.Label label4; - private System.Windows.Forms.Panel panel4; - private System.Windows.Forms.CheckBox checkBoxLater; - private System.Windows.Forms.RadioButton radioYes; - private System.Windows.Forms.RadioButton radioNo; - private System.Windows.Forms.Panel panel6; - private System.Windows.Forms.Panel panel5; - private System.Windows.Forms.RadioButton radioStartNo; - private System.Windows.Forms.RadioButton radioStartYes; - private System.Windows.Forms.ImageList imageListComp; - private System.Windows.Forms.Label label12; - private System.Windows.Forms.TextBox textOwner; - private System.Windows.Forms.Label label13; - private System.Windows.Forms.TextBox textEnv; - private bool m_bNdb; - public PanelWizard(ComputerMgmt comp) - { - mgmt=comp; - m_noOfConfiguredNodes=0; - m_noOfConfiguredMgmt=0; - m_noOfConfiguredNdb=0; - m_noOfConfiguredApi=0; - Size panelSize= new Size(350,300); - Size s= new Size(355,360); - Point cancel= new Point(8,310); - Point back= new Point(96,310); - Point next = new Point(184,310); - Point finish= new Point(272,310); - InitializeComponent(); - this.Size=s; - this.btnCancel.Location=cancel; - - this.btnback.Location=back; - this.btnNext.Location=next; - this.btnFinish.Location=finish; - - arrayPanel=new Panel[]{panel1,panel2,panel3,panel4,panel5,panel6};//,panel5, panel6}; - panel1.Size=panelSize; - - comboNDB.SelectedIndex=0; - comboAPI.SelectedIndex=0; - comboMGM.SelectedIndex=0; - m_bMgmt=false; - m_bNdb=false; - - m_db = new Database(); - if(listBoxComputers.Items.Count.Equals(0)) - btnNext.Enabled=false; - } - - /// - /// Clean up any resources being used. - /// - protected override void Dispose( bool disposing ) - { - if( disposing ) - { - if(components != null) - { - components.Dispose(); - } - } - base.Dispose( disposing ); - } - - #region Windows Form Designer generated code - /// - /// Required method for Designer support - do not modify - /// the contents of this method with the code editor. - /// - private void InitializeComponent() - { - this.components = new System.ComponentModel.Container(); - System.Resources.ResourceManager resources = new System.Resources.ResourceManager(typeof(PanelWizard)); - this.panel1 = new System.Windows.Forms.Panel(); - this.buttonComputerAdd = new System.Windows.Forms.Button(); - this.label1 = new System.Windows.Forms.Label(); - this.label5 = new System.Windows.Forms.Label(); - this.listBoxComputers = new System.Windows.Forms.ListBox(); - this.radioBtnNo = new System.Windows.Forms.RadioButton(); - this.radioBtnYes = new System.Windows.Forms.RadioButton(); - this.panel2 = new System.Windows.Forms.Panel(); - this.label12 = new System.Windows.Forms.Label(); - this.textOwner = new System.Windows.Forms.TextBox(); - this.label11 = new System.Windows.Forms.Label(); - this.textDbName = new System.Windows.Forms.TextBox(); - this.label8 = new System.Windows.Forms.Label(); - this.label7 = new System.Windows.Forms.Label(); - this.label6 = new System.Windows.Forms.Label(); - this.comboMGM = new System.Windows.Forms.ComboBox(); - this.comboAPI = new System.Windows.Forms.ComboBox(); - this.comboNDB = new System.Windows.Forms.ComboBox(); - this.label2 = new System.Windows.Forms.Label(); - this.panel3 = new System.Windows.Forms.Panel(); - this.checkBoxLater = new System.Windows.Forms.CheckBox(); - this.label10 = new System.Windows.Forms.Label(); - this.label9 = new System.Windows.Forms.Label(); - this.label3 = new System.Windows.Forms.Label(); - this.btnTransferCompToNode = new System.Windows.Forms.Button(); - this.btnTransferNodeToComp = new System.Windows.Forms.Button(); - this.lvNode = new System.Windows.Forms.ListView(); - this.tvComputer = new System.Windows.Forms.TreeView(); - this.imageListComp = new System.Windows.Forms.ImageList(this.components); - this.panel6 = new System.Windows.Forms.Panel(); - this.radioStartNo = new System.Windows.Forms.RadioButton(); - this.radioStartYes = new System.Windows.Forms.RadioButton(); - this.label18 = new System.Windows.Forms.Label(); - this.btnCancel = new System.Windows.Forms.Button(); - this.btnback = new System.Windows.Forms.Button(); - this.btnNext = new System.Windows.Forms.Button(); - this.btnFinish = new System.Windows.Forms.Button(); - this.panel4 = new System.Windows.Forms.Panel(); - this.textEnv = new System.Windows.Forms.TextBox(); - this.label13 = new System.Windows.Forms.Label(); - this.checkBoxReuse = new System.Windows.Forms.CheckBox(); - this.buttonSave = new System.Windows.Forms.Button(); - this.labelTitle = new System.Windows.Forms.Label(); - this.textComputer = new System.Windows.Forms.TextBox(); - this.textCwd = new System.Windows.Forms.TextBox(); - this.textArgs = new System.Windows.Forms.TextBox(); - this.textOther = new System.Windows.Forms.TextBox(); - this.textPath = new System.Windows.Forms.TextBox(); - this.textDatabase = new System.Windows.Forms.TextBox(); - this.textName = new System.Windows.Forms.TextBox(); - this.labelCwd = new System.Windows.Forms.Label(); - this.labelArgs = new System.Windows.Forms.Label(); - this.labelOther = new System.Windows.Forms.Label(); - this.labelPath = new System.Windows.Forms.Label(); - this.label31 = new System.Windows.Forms.Label(); - this.label32 = new System.Windows.Forms.Label(); - this.label33 = new System.Windows.Forms.Label(); - this.panel5 = new System.Windows.Forms.Panel(); - this.radioNo = new System.Windows.Forms.RadioButton(); - this.radioYes = new System.Windows.Forms.RadioButton(); - this.label4 = new System.Windows.Forms.Label(); - this.panel1.SuspendLayout(); - this.panel2.SuspendLayout(); - this.panel3.SuspendLayout(); - this.panel6.SuspendLayout(); - this.panel4.SuspendLayout(); - this.panel5.SuspendLayout(); - this.SuspendLayout(); - // - // panel1 - // - this.panel1.Controls.AddRange(new System.Windows.Forms.Control[] { - this.buttonComputerAdd, - this.label1, - this.label5, - this.listBoxComputers, - this.radioBtnNo, - this.radioBtnYes}); - this.panel1.Name = "panel1"; - this.panel1.Size = new System.Drawing.Size(344, 312); - this.panel1.TabIndex = 0; - this.panel1.Paint += new System.Windows.Forms.PaintEventHandler(this.panel1_Paint); - // - // buttonComputerAdd - // - this.buttonComputerAdd.Enabled = false; - this.buttonComputerAdd.Location = new System.Drawing.Point(192, 232); - this.buttonComputerAdd.Name = "buttonComputerAdd"; - this.buttonComputerAdd.Size = new System.Drawing.Size(96, 24); - this.buttonComputerAdd.TabIndex = 3; - this.buttonComputerAdd.Text = "Add computer..."; - this.buttonComputerAdd.Click += new System.EventHandler(this.buttonComputerAdd_Click); - // - // label1 - // - this.label1.Font = new System.Drawing.Font("Microsoft Sans Serif", 12F, System.Drawing.FontStyle.Bold, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0))); - this.label1.Location = new System.Drawing.Point(80, 8); - this.label1.Name = "label1"; - this.label1.Size = new System.Drawing.Size(200, 23); - this.label1.TabIndex = 5; - this.label1.Text = "Configure computers"; - this.label1.TextAlign = System.Drawing.ContentAlignment.MiddleCenter; - // - // label5 - // - this.label5.Location = new System.Drawing.Point(24, 40); - this.label5.Name = "label5"; - this.label5.Size = new System.Drawing.Size(128, 23); - this.label5.TabIndex = 4; - this.label5.Text = "Available computers:"; - this.label5.TextAlign = System.Drawing.ContentAlignment.MiddleCenter; - // - // listBoxComputers - // - this.listBoxComputers.Location = new System.Drawing.Point(24, 64); - this.listBoxComputers.Name = "listBoxComputers"; - this.listBoxComputers.Size = new System.Drawing.Size(128, 212); - this.listBoxComputers.TabIndex = 3; - this.listBoxComputers.SelectedIndexChanged += new System.EventHandler(this.listBoxComputers_SelectedIndexChanged); - // - // radioBtnNo - // - this.radioBtnNo.AutoCheck = false; - this.radioBtnNo.Location = new System.Drawing.Point(168, 168); - this.radioBtnNo.Name = "radioBtnNo"; - this.radioBtnNo.Size = new System.Drawing.Size(152, 64); - this.radioBtnNo.TabIndex = 2; - this.radioBtnNo.Text = "No, I have to add more computers in order to deploy NDB Cluster. "; - this.radioBtnNo.Click += new System.EventHandler(this.radioBtnNo_Click); - // - // radioBtnYes - // - this.radioBtnYes.AutoCheck = false; - this.radioBtnYes.Location = new System.Drawing.Point(168, 72); - this.radioBtnYes.Name = "radioBtnYes"; - this.radioBtnYes.Size = new System.Drawing.Size(152, 80); - this.radioBtnYes.TabIndex = 1; - this.radioBtnYes.Text = "Yes, all the computers that I need to deploy NDB Cluster exists in the list \"Avai" + - "lable computers\""; - this.radioBtnYes.Click += new System.EventHandler(this.radioBtnYes_Click); - // - // panel2 - // - this.panel2.Controls.AddRange(new System.Windows.Forms.Control[] { - this.label12, - this.textOwner, - this.label11, - this.textDbName, - this.label8, - this.label7, - this.label6, - this.comboMGM, - this.comboAPI, - this.comboNDB, - this.label2}); - this.panel2.Location = new System.Drawing.Point(0, 320); - this.panel2.Name = "panel2"; - this.panel2.Size = new System.Drawing.Size(344, 312); - this.panel2.TabIndex = 1; - this.panel2.Validating += new System.ComponentModel.CancelEventHandler(this.panel2_Validating); - this.panel2.Paint += new System.Windows.Forms.PaintEventHandler(this.panel2_Paint); - // - // label12 - // - this.label12.Location = new System.Drawing.Point(72, 216); - this.label12.Name = "label12"; - this.label12.Size = new System.Drawing.Size(112, 24); - this.label12.TabIndex = 16; - this.label12.Text = "Database owner:"; - this.label12.TextAlign = System.Drawing.ContentAlignment.MiddleRight; - // - // textOwner - // - this.textOwner.Location = new System.Drawing.Point(192, 216); - this.textOwner.Name = "textOwner"; - this.textOwner.TabIndex = 5; - this.textOwner.Text = ""; - this.textOwner.TextChanged += new System.EventHandler(this.textOwner_TextChanged); - // - // label11 - // - this.label11.Location = new System.Drawing.Point(72, 184); - this.label11.Name = "label11"; - this.label11.Size = new System.Drawing.Size(112, 24); - this.label11.TabIndex = 14; - this.label11.Text = "Database name:"; - this.label11.TextAlign = System.Drawing.ContentAlignment.MiddleRight; - this.label11.Click += new System.EventHandler(this.label11_Click); - // - // textDbName - // - this.textDbName.Location = new System.Drawing.Point(192, 184); - this.textDbName.Name = "textDbName"; - this.textDbName.TabIndex = 4; - this.textDbName.Text = ""; - - this.textDbName.TextChanged += new System.EventHandler(this.textDbName_TextChanged); - // - // label8 - // - this.label8.Location = new System.Drawing.Point(16, 120); - this.label8.Name = "label8"; - this.label8.Size = new System.Drawing.Size(176, 24); - this.label8.TabIndex = 12; - this.label8.Text = "Number of management servers:"; - this.label8.TextAlign = System.Drawing.ContentAlignment.MiddleLeft; - // - // label7 - // - this.label7.Location = new System.Drawing.Point(16, 88); - this.label7.Name = "label7"; - this.label7.Size = new System.Drawing.Size(120, 24); - this.label7.TabIndex = 11; - this.label7.Text = "Number of API nodes:"; - this.label7.TextAlign = System.Drawing.ContentAlignment.MiddleLeft; - // - // label6 - // - this.label6.Location = new System.Drawing.Point(16, 56); - this.label6.Name = "label6"; - this.label6.Size = new System.Drawing.Size(144, 24); - this.label6.TabIndex = 10; - this.label6.Text = "Number of database nodes:"; - this.label6.TextAlign = System.Drawing.ContentAlignment.MiddleLeft; - // - // comboMGM - // - this.comboMGM.DisplayMember = "0"; - this.comboMGM.Items.AddRange(new object[] { - "1"}); - this.comboMGM.Location = new System.Drawing.Point(192, 120); - this.comboMGM.Name = "comboMGM"; - this.comboMGM.Size = new System.Drawing.Size(104, 21); - this.comboMGM.TabIndex = 3; - this.comboMGM.Text = "comboBox3"; - // - // comboAPI - // - this.comboAPI.DisplayMember = "0"; - this.comboAPI.Items.AddRange(new object[] { - "1", - "2", - "3", - "4", - "5", - "6", - "7", - "8", - "9", - "10"}); - this.comboAPI.Location = new System.Drawing.Point(192, 88); - this.comboAPI.Name = "comboAPI"; - this.comboAPI.Size = new System.Drawing.Size(104, 21); - this.comboAPI.TabIndex = 2; - this.comboAPI.Text = "comboBox2"; - // - // comboNDB - // - this.comboNDB.DisplayMember = "0"; - this.comboNDB.Items.AddRange(new object[] { - "1", - "2", - "4", - "8"}); - this.comboNDB.Location = new System.Drawing.Point(192, 56); - this.comboNDB.Name = "comboNDB"; - this.comboNDB.Size = new System.Drawing.Size(104, 21); - this.comboNDB.TabIndex = 1; - this.comboNDB.Text = "comboBox1"; - // - // label2 - // - this.label2.Font = new System.Drawing.Font("Microsoft Sans Serif", 12F, System.Drawing.FontStyle.Bold, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0))); - this.label2.Location = new System.Drawing.Point(80, 8); - this.label2.Name = "label2"; - this.label2.Size = new System.Drawing.Size(208, 23); - this.label2.TabIndex = 6; - this.label2.Text = "Setup NDB Cluster nodes"; - this.label2.TextAlign = System.Drawing.ContentAlignment.MiddleCenter; - // - // panel3 - // - this.panel3.Controls.AddRange(new System.Windows.Forms.Control[] { - this.checkBoxLater, - this.label10, - this.label9, - this.label3, - this.btnTransferCompToNode, - this.btnTransferNodeToComp, - this.lvNode, - this.tvComputer}); - this.panel3.Location = new System.Drawing.Point(360, 8); - this.panel3.Name = "panel3"; - this.panel3.Size = new System.Drawing.Size(320, 312); - this.panel3.TabIndex = 2; - // - // checkBoxLater - // - this.checkBoxLater.Location = new System.Drawing.Point(40, 256); - this.checkBoxLater.Name = "checkBoxLater"; - this.checkBoxLater.Size = new System.Drawing.Size(240, 16); - this.checkBoxLater.TabIndex = 9; - this.checkBoxLater.Text = "I will configure these nodes manually, later."; - // - // label10 - // - this.label10.Location = new System.Drawing.Point(16, 40); - this.label10.Name = "label10"; - this.label10.Size = new System.Drawing.Size(104, 16); - this.label10.TabIndex = 8; - this.label10.Text = "NDB Cluster nodes:"; - this.label10.TextAlign = System.Drawing.ContentAlignment.MiddleLeft; - // - // label9 - // - this.label9.Location = new System.Drawing.Point(192, 40); - this.label9.Name = "label9"; - this.label9.Size = new System.Drawing.Size(100, 16); - this.label9.TabIndex = 7; - this.label9.Text = "Computers:"; - this.label9.TextAlign = System.Drawing.ContentAlignment.MiddleLeft; - // - // label3 - // - this.label3.Font = new System.Drawing.Font("Microsoft Sans Serif", 12F, System.Drawing.FontStyle.Bold, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0))); - this.label3.Location = new System.Drawing.Point(40, 8); - this.label3.Name = "label3"; - this.label3.Size = new System.Drawing.Size(280, 23); - this.label3.TabIndex = 6; - this.label3.Text = "Assign NDB nodes to computers"; - this.label3.TextAlign = System.Drawing.ContentAlignment.MiddleCenter; - // - // btnTransferCompToNode - // - this.btnTransferCompToNode.Location = new System.Drawing.Point(144, 160); - this.btnTransferCompToNode.Name = "btnTransferCompToNode"; - this.btnTransferCompToNode.Size = new System.Drawing.Size(40, 24); - this.btnTransferCompToNode.TabIndex = 4; - this.btnTransferCompToNode.Text = "<---"; - // - // btnTransferNodeToComp - // - this.btnTransferNodeToComp.Location = new System.Drawing.Point(144, 128); - this.btnTransferNodeToComp.Name = "btnTransferNodeToComp"; - this.btnTransferNodeToComp.Size = new System.Drawing.Size(40, 24); - this.btnTransferNodeToComp.TabIndex = 3; - this.btnTransferNodeToComp.Text = "--->"; - this.btnTransferNodeToComp.Click += new System.EventHandler(this.btnTransferNodeToComp_Click); - // - // lvNode - // - this.lvNode.HideSelection = false; - this.lvNode.Location = new System.Drawing.Point(16, 56); - this.lvNode.Name = "lvNode"; - this.lvNode.Size = new System.Drawing.Size(112, 192); - this.lvNode.TabIndex = 2; - this.lvNode.View = System.Windows.Forms.View.List; - this.lvNode.SelectedIndexChanged += new System.EventHandler(this.lvNode_SelectedIndexChanged); - // - // tvComputer - // - this.tvComputer.HideSelection = false; - this.tvComputer.ImageList = this.imageListComp; - this.tvComputer.Location = new System.Drawing.Point(192, 56); - this.tvComputer.Name = "tvComputer"; - this.tvComputer.Size = new System.Drawing.Size(120, 192); - this.tvComputer.TabIndex = 1; - this.tvComputer.MouseDown += new System.Windows.Forms.MouseEventHandler(this.tvComputer_MouseDown); - this.tvComputer.AfterSelect += new System.Windows.Forms.TreeViewEventHandler(this.tvComputer_AfterSelect); - this.tvComputer.MouseLeave += new System.EventHandler(this.tvComputer_MouseLeave); - this.tvComputer.DragDrop += new System.Windows.Forms.DragEventHandler(this.tvComputer_DragDrop); - // - // imageListComp - // - this.imageListComp.ColorDepth = System.Windows.Forms.ColorDepth.Depth8Bit; - this.imageListComp.ImageSize = new System.Drawing.Size(16, 16); - this.imageListComp.ImageStream = ((System.Windows.Forms.ImageListStreamer)(resources.GetObject("imageListComp.ImageStream"))); - this.imageListComp.TransparentColor = System.Drawing.Color.Transparent; - // - // panel6 - // - this.panel6.Controls.AddRange(new System.Windows.Forms.Control[] { - this.radioStartNo, - this.radioStartYes, - this.label18}); - this.panel6.Location = new System.Drawing.Point(344, 336); - this.panel6.Name = "panel6"; - this.panel6.Size = new System.Drawing.Size(344, 312); - this.panel6.TabIndex = 3; - this.panel6.Paint += new System.Windows.Forms.PaintEventHandler(this.panel4_Paint); - // - // radioStartNo - // - this.radioStartNo.Location = new System.Drawing.Point(40, 144); - this.radioStartNo.Name = "radioStartNo"; - this.radioStartNo.Size = new System.Drawing.Size(272, 48); - this.radioStartNo.TabIndex = 81; - this.radioStartNo.Text = "Manually start NDB Cluster. The Magician will exit and you must start NDB Cluster" + - " manually."; - this.radioStartNo.CheckedChanged += new System.EventHandler(this.radioStartNo_CheckedChanged); - // - // radioStartYes - // - this.radioStartYes.Location = new System.Drawing.Point(40, 40); - this.radioStartYes.Name = "radioStartYes"; - this.radioStartYes.Size = new System.Drawing.Size(272, 88); - this.radioStartYes.TabIndex = 80; - this.radioStartYes.Text = "Start NDB Cluster now. The Magician will start NDB Cluster and exit. MAKE SURE YO" + - "U HAVE STARTED THE MGMTSRVR WITH THE CORRECT CONFIGURATION FILE!!!"; - this.radioStartYes.CheckedChanged += new System.EventHandler(this.radioStartYes_CheckedChanged); - // - // label18 - // - this.label18.Font = new System.Drawing.Font("Microsoft Sans Serif", 12F, System.Drawing.FontStyle.Bold, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0))); - this.label18.Location = new System.Drawing.Point(56, 8); - this.label18.Name = "label18"; - this.label18.Size = new System.Drawing.Size(224, 24); - this.label18.TabIndex = 79; - this.label18.Text = "Start NDB Cluster and finish"; - // - // btnCancel - // - this.btnCancel.Location = new System.Drawing.Point(8, 656); - this.btnCancel.Name = "btnCancel"; - this.btnCancel.Size = new System.Drawing.Size(70, 23); - this.btnCancel.TabIndex = 10; - this.btnCancel.Text = "Cancel"; - this.btnCancel.Click += new System.EventHandler(this.btnCancel_Click); - // - // btnback - // - this.btnback.Location = new System.Drawing.Point(96, 656); - this.btnback.Name = "btnback"; - this.btnback.Size = new System.Drawing.Size(70, 23); - this.btnback.TabIndex = 11; - this.btnback.Text = "< Back"; - this.btnback.Click += new System.EventHandler(this.btnback_Click); - // - // btnNext - // - this.btnNext.Location = new System.Drawing.Point(184, 656); - this.btnNext.Name = "btnNext"; - this.btnNext.Size = new System.Drawing.Size(70, 23); - this.btnNext.TabIndex = 12; - this.btnNext.Text = "Next >"; - this.btnNext.Click += new System.EventHandler(this.btnNext_Click); - // - // btnFinish - // - this.btnFinish.Location = new System.Drawing.Point(272, 656); - this.btnFinish.Name = "btnFinish"; - this.btnFinish.Size = new System.Drawing.Size(70, 23); - this.btnFinish.TabIndex = 13; - this.btnFinish.Text = "Finish"; - this.btnFinish.Click += new System.EventHandler(this.btnFinish_Click); - // - // panel4 - // - this.panel4.Controls.AddRange(new System.Windows.Forms.Control[] { - this.textEnv, - this.label13, - this.checkBoxReuse, - this.buttonSave, - this.labelTitle, - this.textComputer, - this.textCwd, - this.textArgs, - this.textOther, - this.textPath, - this.textDatabase, - this.textName, - this.labelCwd, - this.labelArgs, - this.labelOther, - this.labelPath, - this.label31, - this.label32, - this.label33}); - this.panel4.Location = new System.Drawing.Point(672, 8); - this.panel4.Name = "panel4"; - this.panel4.Size = new System.Drawing.Size(344, 312); - this.panel4.TabIndex = 62; - this.panel4.Paint += new System.Windows.Forms.PaintEventHandler(this.panel5_Paint); - // - // textEnv - // - this.textEnv.Location = new System.Drawing.Point(136, 136); - this.textEnv.Name = "textEnv"; - this.textEnv.Size = new System.Drawing.Size(184, 20); - this.textEnv.TabIndex = 2; - this.textEnv.TabStop = false; - this.textEnv.Text = ""; - // - // label13 - // - this.label13.Location = new System.Drawing.Point(8, 136); - this.label13.Name = "label13"; - this.label13.Size = new System.Drawing.Size(136, 24); - this.label13.TabIndex = 81; - this.label13.Text = "Environment variables:"; - // - // checkBoxReuse - // - this.checkBoxReuse.Location = new System.Drawing.Point(88, 232); - this.checkBoxReuse.Name = "checkBoxReuse"; - this.checkBoxReuse.Size = new System.Drawing.Size(240, 32); - this.checkBoxReuse.TabIndex = 5; - this.checkBoxReuse.TabStop = false; - this.checkBoxReuse.Text = "Use the same configuration for ALL NDB nodes?"; - // - // buttonSave - // - this.buttonSave.Location = new System.Drawing.Point(184, 264); - this.buttonSave.Name = "buttonSave"; - this.buttonSave.Size = new System.Drawing.Size(88, 24); - this.buttonSave.TabIndex = 6; - this.buttonSave.Text = "Save"; - this.buttonSave.Click += new System.EventHandler(this.buttonSave_Click); - // - // labelTitle - // - this.labelTitle.Font = new System.Drawing.Font("Microsoft Sans Serif", 12F, System.Drawing.FontStyle.Bold, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0))); - this.labelTitle.Location = new System.Drawing.Point(80, 16); - this.labelTitle.Name = "labelTitle"; - this.labelTitle.Size = new System.Drawing.Size(192, 23); - this.labelTitle.TabIndex = 79; - this.labelTitle.Text = "Mgmtsrvr configuration"; - // - // textComputer - // - this.textComputer.Location = new System.Drawing.Point(136, 40); - this.textComputer.Name = "textComputer"; - this.textComputer.ReadOnly = true; - this.textComputer.Size = new System.Drawing.Size(184, 20); - this.textComputer.TabIndex = 77; - this.textComputer.TabStop = false; - this.textComputer.Text = ""; - // - // textCwd - // - this.textCwd.Location = new System.Drawing.Point(136, 208); - this.textCwd.Name = "textCwd"; - this.textCwd.Size = new System.Drawing.Size(184, 20); - this.textCwd.TabIndex = 5; - this.textCwd.TabStop = false; - this.textCwd.Text = ""; - // - // textArgs - // - this.textArgs.Location = new System.Drawing.Point(136, 184); - this.textArgs.Name = "textArgs"; - this.textArgs.Size = new System.Drawing.Size(184, 20); - this.textArgs.TabIndex = 4; - this.textArgs.TabStop = false; - this.textArgs.Text = ""; - // - // textOther - // - this.textOther.Location = new System.Drawing.Point(136, 160); - this.textOther.Name = "textOther"; - this.textOther.Size = new System.Drawing.Size(184, 20); - this.textOther.TabIndex = 3; - this.textOther.TabStop = false; - this.textOther.Text = ""; - // - // textPath - // - this.textPath.Location = new System.Drawing.Point(136, 112); - this.textPath.Name = "textPath"; - this.textPath.Size = new System.Drawing.Size(184, 20); - this.textPath.TabIndex = 1; - this.textPath.TabStop = false; - this.textPath.Text = ""; - this.textPath.TextChanged += new System.EventHandler(this.textPath_TextChanged); - // - // textDatabase - // - this.textDatabase.Location = new System.Drawing.Point(136, 88); - this.textDatabase.Name = "textDatabase"; - this.textDatabase.ReadOnly = true; - this.textDatabase.Size = new System.Drawing.Size(184, 20); - this.textDatabase.TabIndex = 62; - this.textDatabase.TabStop = false; - this.textDatabase.Text = ""; - // - // textName - // - this.textName.Location = new System.Drawing.Point(136, 64); - this.textName.Name = "textName"; - this.textName.ReadOnly = true; - this.textName.Size = new System.Drawing.Size(184, 20); - this.textName.TabIndex = 60; - this.textName.TabStop = false; - this.textName.Text = ""; - // - // labelCwd - // - this.labelCwd.Location = new System.Drawing.Point(8, 208); - this.labelCwd.Name = "labelCwd"; - this.labelCwd.Size = new System.Drawing.Size(112, 24); - this.labelCwd.TabIndex = 72; - this.labelCwd.Text = "Current working dir.:"; - // - // labelArgs - // - this.labelArgs.Location = new System.Drawing.Point(8, 184); - this.labelArgs.Name = "labelArgs"; - this.labelArgs.Size = new System.Drawing.Size(128, 24); - this.labelArgs.TabIndex = 70; - this.labelArgs.Text = "Arguments to mgmtsrvr:"; - // - // labelOther - // - this.labelOther.Location = new System.Drawing.Point(8, 160); - this.labelOther.Name = "labelOther"; - this.labelOther.Size = new System.Drawing.Size(136, 24); - this.labelOther.TabIndex = 69; - this.labelOther.Text = "Mgmtsrvr port:"; - // - // labelPath - // - this.labelPath.Location = new System.Drawing.Point(8, 112); - this.labelPath.Name = "labelPath"; - this.labelPath.Size = new System.Drawing.Size(128, 24); - this.labelPath.TabIndex = 67; - this.labelPath.Text = "Path to mgmtsrvr binary:"; - // - // label31 - // - this.label31.Location = new System.Drawing.Point(8, 88); - this.label31.Name = "label31"; - this.label31.Size = new System.Drawing.Size(88, 24); - this.label31.TabIndex = 65; - this.label31.Text = "Database:"; - // - // label32 - // - this.label32.Location = new System.Drawing.Point(8, 64); - this.label32.Name = "label32"; - this.label32.Size = new System.Drawing.Size(88, 24); - this.label32.TabIndex = 63; - this.label32.Text = "Process name:"; - // - // label33 - // - this.label33.Location = new System.Drawing.Point(8, 40); - this.label33.Name = "label33"; - this.label33.Size = new System.Drawing.Size(64, 24); - this.label33.TabIndex = 61; - this.label33.Text = "Computer:"; - // - // panel5 - // - this.panel5.Controls.AddRange(new System.Windows.Forms.Control[] { - this.radioNo, - this.radioYes, - this.label4}); - this.panel5.Location = new System.Drawing.Point(672, 328); - this.panel5.Name = "panel5"; - this.panel5.Size = new System.Drawing.Size(344, 312); - this.panel5.TabIndex = 63; - // - // radioNo - // - this.radioNo.Location = new System.Drawing.Point(72, 160); - this.radioNo.Name = "radioNo"; - this.radioNo.Size = new System.Drawing.Size(240, 48); - this.radioNo.TabIndex = 1; - this.radioNo.Text = "I already have a configuration file that I want to use for this configuration."; - this.radioNo.CheckedChanged += new System.EventHandler(this.radioNo_CheckedChanged); - // - // radioYes - // - this.radioYes.Checked = true; - this.radioYes.Location = new System.Drawing.Point(72, 56); - this.radioYes.Name = "radioYes"; - this.radioYes.Size = new System.Drawing.Size(240, 88); - this.radioYes.TabIndex = 0; - this.radioYes.TabStop = true; - this.radioYes.Text = "Generate a configuration file template (initconfig.txt) for the mgmtsrvr based on" + - " the specified configuration? Notepad will be started with a template that you m" + - "ust complete and save in the cwd of the mgmtsrvr."; - this.radioYes.CheckedChanged += new System.EventHandler(this.radioYes_CheckedChanged); - // - // label4 - // - this.label4.Font = new System.Drawing.Font("Microsoft Sans Serif", 12F, System.Drawing.FontStyle.Bold, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0))); - this.label4.Location = new System.Drawing.Point(88, 8); - this.label4.Name = "label4"; - this.label4.Size = new System.Drawing.Size(192, 40); - this.label4.TabIndex = 79; - this.label4.Text = "Tying up the configuration"; - this.label4.TextAlign = System.Drawing.ContentAlignment.TopCenter; - // - // PanelWizard - // - this.AutoScaleBaseSize = new System.Drawing.Size(5, 13); - this.ClientSize = new System.Drawing.Size(1030, 755); - this.Controls.AddRange(new System.Windows.Forms.Control[] { - this.panel5, - this.panel4, - this.panel1, - this.btnFinish, - this.btnNext, - this.btnback, - this.btnCancel, - this.panel6, - this.panel3, - this.panel2}); - this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog; - this.MaximizeBox = false; - this.MinimizeBox = false; - this.Name = "PanelWizard"; - this.SizeGripStyle = System.Windows.Forms.SizeGripStyle.Hide; - this.Text = "Create Database Magician"; - this.Load += new System.EventHandler(this.MDXQueryBuilderWizard_Load); - this.Activated += new System.EventHandler(this.PanelWizard_Activated); - this.panel1.ResumeLayout(false); - this.panel2.ResumeLayout(false); - this.panel3.ResumeLayout(false); - this.panel6.ResumeLayout(false); - this.panel4.ResumeLayout(false); - this.panel5.ResumeLayout(false); - this.ResumeLayout(false); - - } - #endregion - - private void MDXQueryBuilderWizard_Load(object sender, System.EventArgs e) - { - - foreach(Control ct in this.Controls) - { - if(ct.GetType().Name=="Panel") - { - ct.Left=0; - ct.Top=0; - ct.Visible=false; - } - - } - presentPanel=arrayPanel[0]; - //--set the properties - setBtnPanProperty(getPosition(presentPanel)); - //------ - refreshLook(); - } - - //-set the buttons and panel - private void refreshLook() - { - if(cancelEnabled) - btnCancel.Enabled=true; - else - btnCancel.Enabled=false; - - if(backEnabled) - btnback.Enabled=true; - else - btnback.Enabled=false; - - if(nextEnabled) - btnNext.Enabled=true; - else - btnNext.Enabled=false; - - if(finishEnabled) - btnFinish.Enabled=true; - else - btnFinish.Enabled=false; - - if(presentPanel!=null) - { - presentPanel.Show(); - presentPanel.BringToFront(); - } - } - //-------- - private int getPosition(Panel p) - { - int result=-1; - for(int i=0;i 0) - { - btnNext.Enabled=true; - } - this.listBoxComputers.EndUpdate(); - this.listBoxComputers.Refresh(); - } - - - private void tvComputer_AfterSelect(object sender, System.Windows.Forms.TreeViewEventArgs e) - { - tvComputer.SelectedNode.Expand(); - - } - - private void tvComputer_DragDrop(object sender, System.Windows.Forms.DragEventArgs e) - { - - } - - private void tvComputer_MouseLeave(object sender, System.EventArgs e) - { - - } - - private void tvComputer_MouseDown(object sender, System.Windows.Forms.MouseEventArgs e) - { - TreeNode prevNode = tvComputer.SelectedNode; - if(prevNode!=null) - { - prevNode.BackColor=Color.White; - } - TreeNode node = tvComputer.GetNodeAt(e.X,e.Y); - if(node==null) - { - return; - } - - tvComputer.SelectedNode=node; - tvComputer.SelectedNode.BackColor=Color.LightGray; - - } - - private void btnTransferNodeToComp_Click(object sender, System.EventArgs e) - { - - if(tvComputer.SelectedNode==null) - return; - if(lvNode.SelectedItems.Equals(null)) - return; - int itemCount=lvNode.SelectedItems.Count; - lvNode.BeginUpdate(); - tvComputer.BeginUpdate(); - for(int i=0;i < itemCount;i++) - { - tvComputer.SelectedNode.Nodes.Add(lvNode.SelectedItems[i].Text.ToString()); - } - - for(int i=0;i < itemCount;i++) - { - lvNode.Items.RemoveAt(lvNode.SelectedIndices[0]); - - } - if(lvNode.Items.Count.Equals(0)) - btnNext.Enabled=true; - else - btnNext.Enabled=false; - tvComputer.SelectedNode.Expand(); - lvNode.EndUpdate(); - tvComputer.EndUpdate(); - } - - private void lvNode_SelectedIndexChanged(object sender, System.EventArgs e) - { - } - - private void prepareNodeAssignmentPanel() - { - ArrayList computers = mgmt.getComputerCollection(); - m_nNDB=Convert.ToInt32(comboNDB.SelectedItem.ToString()); - m_nAPI=Convert.ToInt32(comboAPI.SelectedItem.ToString()); - m_nMGM=Convert.ToInt32(comboMGM.SelectedItem.ToString()); - - lvNode.Items.Clear(); - tvComputer.Nodes.Clear(); - for (int i=1;i<=m_nMGM;i++) - lvNode.Items.Add("mgm."+i); - - for (int i=m_nMGM+1;i<=(m_nNDB+m_nMGM);i++) - lvNode.Items.Add("ndb."+i); - - for (int i=m_nMGM+m_nNDB+1;i<=(m_nNDB+m_nMGM+m_nAPI);i++) - lvNode.Items.Add("api."+i); - - foreach(Computer c in computers) - { - if(c.getStatus() == Computer.Status.Connected) - tvComputer.Nodes.Add(c.getName()); - } - - } - private void prepareNodeConfigurationPanel() - { - Computer c; - for(int i=0;i0 && textDbName.TextLength > 0) - nextEnabled=true; - else - nextEnabled=false; - - refreshLook(); - - } - - private void checkBoxLater_CheckedChanged(object sender, System.EventArgs e) - { - if(checkBoxLater.Checked.Equals(true)) - { - this.finishEnabled=true; - this.nextEnabled=false; - } - else - { - this.finishEnabled=false; - this.nextEnabled=true; - } - this.refreshLook(); - } - - private void btnFinish_Click(object sender, System.EventArgs e) - { - mgmt.AddDatabase(this.m_db); - - if(radioStartYes.Checked==true) - startDatabase(); - this.Dispose(); - } - - private void panel4_Paint(object sender, System.Windows.Forms.PaintEventArgs e) - { - - // Point location= new Point(8,40); - // Size s= new Size(panel4.Size.Width-8,panel4.Size.Height-120); - // lvConfig.Location=location; - // lvConfig.Size=s; - - - } - - private void configureMgmt() - { - //clear old - textOther.Text=""; - textArgs.Text=""; - textCwd.Text=""; - textPath.Text=""; - - textPath.Clear(); - textEnv.Clear(); - textOther.Clear(); - textCwd.Clear(); - textArgs.Clear(); - - textPath.ClearUndo(); - textEnv.ClearUndo(); - textOther.ClearUndo(); - textCwd.ClearUndo(); - textArgs.ClearUndo(); - - - textOther.Enabled=true; - textArgs.Enabled=true; - textCwd.Enabled=true; - textPath.Enabled=true; - - textPath.TabStop=true; - textOther.TabStop=true; - textArgs.TabStop=true; - textCwd.TabStop=true; - textEnv.TabStop=true; - - labelTitle.Text="Mgmtsrvr configuration"; - labelPath.Text="Path to mgmtsrvr binary:"; - labelArgs.Text="Arguments to mgmtsrvr:"; - labelOther.Text="Mgmtsrvr port (-p X):"; - - //get new - String process="mgm." + Convert.ToString(m_noOfConfiguredMgmt+1); - Process mgmt=m_db.getProcessByName(process); - textComputer.Text=mgmt.getComputer().getName(); - textName.Text=mgmt.getName().ToString(); - textDatabase.Text=mgmt.getDatabase().ToString(); - m_mgmHost=mgmt.getComputer().getName(); - textPath.Focus(); - } - private void configureApi() - { - checkBoxReuse.Text="Use the same configuration for ALL API nodes?"; - if(m_nAPI > 1) - { - checkBoxReuse.Visible=true; - checkBoxReuse.Enabled=true; - - } - else - { - checkBoxReuse.Enabled=false; - checkBoxReuse.Visible=true; - } - - // clear previous and get a new api - - textOther.Text=""; - textArgs.Text=""; - //textCwd.Text=""; - //textPath.Text=""; - //get new api - textOther.Enabled=false; - textArgs.Enabled=true; - labelTitle.Text="API node configuration"; - labelPath.Text="Path to api binary:"; - labelArgs.Text="Arguments to api:"; - labelOther.Text="NDB_CONNECTSTRING"; - String process="api." + Convert.ToString(m_noOfConfiguredApi+m_nMGM+m_nNDB+1); - Process api=m_db.getProcessByName(process); - textComputer.Text=api.getComputer().getName(); - textName.Text=api.getName().ToString(); - textOther.Text="nodeid=" + Convert.ToString(m_noOfConfiguredApi+m_nMGM+m_nNDB+1) + ";host="+this.m_mgmHost + ":" + this.m_mgmPort; - textDatabase.Text=api.getDatabase().ToString(); - textPath.Focus(); - } - - private void configureNdb() - { - - - checkBoxReuse.Text="Use the same configuration for ALL NDB nodes?"; - - - if(this.m_nNDB > 1) - { - checkBoxReuse.Visible=true; - checkBoxReuse.Enabled=true; - - } - else - { - checkBoxReuse.Enabled=false; - checkBoxReuse.Visible=true; - } - - - - labelPath.Text="Path to ndb binary:"; - labelArgs.Text="Arguments to ndb:"; - - // clear previous and get a new ndb - - labelOther.Text="NDB_CONNECTSTRING"; - textArgs.Text="-i"; - textOther.Enabled=false; - textArgs.Enabled=false; - - textPath.TabStop=true; - textEnv.TabStop=true; - textOther.TabStop=false; - textArgs.TabStop=false; - textCwd.TabStop=true; - - //textCwd.Text=""; - //textPath.Text=""; - //get new - - String process="ndb." + Convert.ToString(m_noOfConfiguredNdb+m_nMGM+1); - textOther.Text="nodeid=" + Convert.ToString(m_noOfConfiguredNdb+m_nMGM+1) + ";host="+this.m_mgmHost + ":" + this.m_mgmPort; - Process ndb=m_db.getProcessByName(process); - textComputer.Text=ndb.getComputer().getName(); - textName.Text=ndb.getName().ToString(); - textDatabase.Text=ndb.getDatabase().ToString(); - textPath.Focus(); - } - - - public void saveMgm() - { - String process="mgm." + Convert.ToString(m_noOfConfiguredMgmt+1); - Process mgmt=m_db.getProcessByName(process); - mgmt.setOther(textOther.Text.ToString()); - mgmt.setEnv(textEnv.Text.ToString()); - m_mgmPort = textOther.Text.ToString(); - try - { - m_db.setMgmtPort(Convert.ToInt32(m_mgmPort)); - } - catch(Exception e) - { - MessageBox.Show("Port number must be numeric!!!", "Error",MessageBoxButtons.OK); - this.configureMgmt(); - return; - } - mgmt.setPath(textPath.Text.ToString()); - mgmt.setCwd(textCwd.Text.ToString()); - mgmt.setProcessType("permanent"); - mgmt.setArgs("-i initconfig.txt"); - mgmt.setConnectString("nodeid=" + Convert.ToString(m_noOfConfiguredMgmt+1)+";host="+m_mgmHost+":" + m_mgmPort); - this.m_noOfConfiguredMgmt++; - } - - public void saveApi() - { - if(checkBoxReuse.Checked) - { - for(;m_noOfConfiguredApi 0 && textOwner.TextLength > 0) - nextEnabled=true; - else - nextEnabled=false; - - refreshLook(); - } - - private void panel2_Paint(object sender, System.Windows.Forms.PaintEventArgs e) - { - textOwner.Text=System.Environment.UserName; - this.Validate(); - if(textDbName.TextLength > 0 && textOwner.TextLength>0) - { - nextEnabled=true; - } - else - { - nextEnabled=false; - } - refreshLook(); - } - - private void textPath_TextChanged(object sender, System.EventArgs e) - { - try - { - - } - catch (Exception exc) - { - MessageBox.Show(exc.ToString()); - } - } - - private void panel2_Validating(object sender, System.ComponentModel.CancelEventArgs e) - { - if(textOwner.TextLength>0 && textDbName.TextLength > 0) - nextEnabled=true; - else - nextEnabled=false; - } - - - - - - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/Process.cs b/storage/ndb/src/cw/cpcc-win32/csharp/Process.cs deleted file mode 100644 index d861781c737..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/Process.cs +++ /dev/null @@ -1,160 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; -using System.Data; - -namespace NDB_CPC -{ - /// - /// Summary description for Process. - /// - public class Process - { - public enum Status {Running, Stopped, Unknown} - private string m_id; - protected string m_name; - private Status m_status; - private Computer m_computer; - private string m_owner; - private string m_cwd; - private string m_type; - private string m_path; - private string m_other; - private string m_args; - private string m_env; - private string m_database; - private string m_connectString; - private bool m_defined; - public Process( string name, - string owner, string database, - Computer computer) - { - m_name=name; - m_owner=owner; - m_computer=computer; - m_status=Status.Unknown; - m_database=database; - m_defined=false; - m_path=""; - m_cwd=""; - m_args=""; - m_other=""; - } - public Process() - { - - } - public Process(string id) - { - m_id=id; - } - - public Process( string name, - string database, - Computer computer) - { - m_name=name; - m_computer=computer; - m_status=Status.Unknown; - m_database=database; - m_defined=false; - } - - public Process( string name, - Computer computer) - { - m_name=name; - m_computer=computer; - m_status=Status.Unknown; - m_defined=false; - } - - - public string getStatusString() - { - if(m_status.Equals(Status.Running)) - return "Running"; - if(m_status.Equals(Status.Stopped)) - return "Stopped"; - return "Unknown"; - } - - public Computer getComputer() {return m_computer;} - public string getName() {return m_name;} - public string getDatabase() {return m_database;} - public string getOwner() {return m_owner;} - public string getId() {return m_id;} - public void setId(string id) {m_id=id;} - - public void setCwd(string cwd) {m_cwd=cwd;} - public void setPath(string path) {m_path=path;} - public void setArgs(string args) {m_args=args;} - public void setOther(string other) {m_other=other;} - public void setEnv(string env) {m_env=env;} - public void setName(string name) {m_name=name;} - public void setOwner(string owner) {m_owner=owner;} - public void setDatabase(string db) {m_database=db;} - public void setComputer(Computer c) {m_computer=c;} - - - public string getCwd() {return m_cwd;} - public string getPath() {return m_path;} - public string getArgs() {return m_args;} - public string getOther() {return m_other;} - public string getEnv() {return m_env;} - - public bool isDefined() {return m_defined;} - public void setDefined(bool defined) - { - m_defined=defined; - } - - public Status getStatus() - { - return m_status; - } - - public void setConnectString(string cs) - { - m_connectString=cs; - } - - public string getConnectString() - { - return m_connectString; - } - public void setStatus(Status status) - { - m_status=status; - } - - - public void setProcessType(string type) - { - m_type=type; - } - public string getProcessType() - { - return m_type; - } - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs b/storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs deleted file mode 100644 index 814b820c53d..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/ProcessDefineDialog.cs +++ /dev/null @@ -1,451 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; - -namespace NDB_CPC -{ - /// - /// Summary description for ProcessDefineDialog. - /// - public class ProcessDefineDialog : System.Windows.Forms.Form - { - private System.Windows.Forms.ComboBox comboComputer; - private System.Windows.Forms.Label label1; - private System.Windows.Forms.Label label2; - private System.Windows.Forms.Label label3; - private System.Windows.Forms.Label label4; - private System.Windows.Forms.Label label5; - private System.Windows.Forms.Label label6; - private System.Windows.Forms.Label label7; - private System.Windows.Forms.Label label8; - private System.Windows.Forms.Label label9; - private System.Windows.Forms.TextBox textProcessName; - private System.Windows.Forms.TextBox textProcessGroup; - private System.Windows.Forms.TextBox textProcessEnv; - private System.Windows.Forms.TextBox textProcessPath; - private System.Windows.Forms.TextBox textProcessArgs; - private System.Windows.Forms.TextBox textProcessCWD; - private System.Windows.Forms.TextBox textProcessOwner; - private System.Windows.Forms.ComboBox comboType; - private System.Windows.Forms.Label label10; - private System.Windows.Forms.Label label11; - private System.Windows.Forms.Label label12; - private System.Windows.Forms.Label label13; - private System.Windows.Forms.Label label15; - private System.Windows.Forms.Label label16; - private System.Windows.Forms.Label label14; - private System.Windows.Forms.Label label17; - private System.Windows.Forms.Label label18; - private System.Windows.Forms.Button btnAdd; - private System.Windows.Forms.Button btnCancel; - /// - /// Required designer variable. - /// - private System.ComponentModel.Container components = null; - private ComputerMgmt c_mgmt; - private string m_selComputer; - public ProcessDefineDialog(ComputerMgmt mgmt, string computer) - { - - // Required for Windows Form Designer support - // - InitializeComponent(); - - // - // TODO: Add any constructor code after InitializeComponent call - // - m_selComputer =computer; //the selected computer in the TreeView - c_mgmt=mgmt; - } - - /// - /// Clean up any resources being used. - /// - protected override void Dispose( bool disposing ) - { - if( disposing ) - { - if(components != null) - { - components.Dispose(); - } - } - base.Dispose( disposing ); - } - - #region Windows Form Designer generated code - /// - /// Required method for Designer support - do not modify - /// the contents of this method with the code editor. - /// - private void InitializeComponent() - { - this.comboComputer = new System.Windows.Forms.ComboBox(); - this.label1 = new System.Windows.Forms.Label(); - this.label2 = new System.Windows.Forms.Label(); - this.label3 = new System.Windows.Forms.Label(); - this.label4 = new System.Windows.Forms.Label(); - this.label5 = new System.Windows.Forms.Label(); - this.label6 = new System.Windows.Forms.Label(); - this.label7 = new System.Windows.Forms.Label(); - this.label8 = new System.Windows.Forms.Label(); - this.label9 = new System.Windows.Forms.Label(); - this.textProcessName = new System.Windows.Forms.TextBox(); - this.textProcessGroup = new System.Windows.Forms.TextBox(); - this.textProcessEnv = new System.Windows.Forms.TextBox(); - this.textProcessPath = new System.Windows.Forms.TextBox(); - this.textProcessArgs = new System.Windows.Forms.TextBox(); - this.textProcessCWD = new System.Windows.Forms.TextBox(); - this.textProcessOwner = new System.Windows.Forms.TextBox(); - this.comboType = new System.Windows.Forms.ComboBox(); - this.label10 = new System.Windows.Forms.Label(); - this.label11 = new System.Windows.Forms.Label(); - this.label12 = new System.Windows.Forms.Label(); - this.label13 = new System.Windows.Forms.Label(); - this.label15 = new System.Windows.Forms.Label(); - this.label16 = new System.Windows.Forms.Label(); - this.label14 = new System.Windows.Forms.Label(); - this.label17 = new System.Windows.Forms.Label(); - this.label18 = new System.Windows.Forms.Label(); - this.btnAdd = new System.Windows.Forms.Button(); - this.btnCancel = new System.Windows.Forms.Button(); - this.SuspendLayout(); - // - // comboComputer - // - this.comboComputer.ItemHeight = 13; - this.comboComputer.Location = new System.Drawing.Point(152, 24); - this.comboComputer.Name = "comboComputer"; - this.comboComputer.Size = new System.Drawing.Size(112, 21); - this.comboComputer.TabIndex = 0; - // - // label1 - // - this.label1.Location = new System.Drawing.Point(24, 24); - this.label1.Name = "label1"; - this.label1.Size = new System.Drawing.Size(64, 24); - this.label1.TabIndex = 1; - this.label1.Text = "Computer:"; - // - // label2 - // - this.label2.Location = new System.Drawing.Point(24, 48); - this.label2.Name = "label2"; - this.label2.Size = new System.Drawing.Size(88, 24); - this.label2.TabIndex = 2; - this.label2.Text = "Process name:"; - // - // label3 - // - this.label3.Location = new System.Drawing.Point(24, 72); - this.label3.Name = "label3"; - this.label3.Size = new System.Drawing.Size(88, 24); - this.label3.TabIndex = 3; - this.label3.Text = "Group:"; - // - // label4 - // - this.label4.Location = new System.Drawing.Point(24, 96); - this.label4.Name = "label4"; - this.label4.Size = new System.Drawing.Size(88, 24); - this.label4.TabIndex = 4; - this.label4.Text = "Env. variables:"; - // - // label5 - // - this.label5.Location = new System.Drawing.Point(24, 120); - this.label5.Name = "label5"; - this.label5.Size = new System.Drawing.Size(88, 24); - this.label5.TabIndex = 5; - this.label5.Text = "Path to binary:"; - // - // label6 - // - this.label6.Location = new System.Drawing.Point(24, 144); - this.label6.Name = "label6"; - this.label6.Size = new System.Drawing.Size(112, 24); - this.label6.TabIndex = 6; - this.label6.Text = "Arguments to binary:"; - // - // label7 - // - this.label7.Location = new System.Drawing.Point(24, 168); - this.label7.Name = "label7"; - this.label7.Size = new System.Drawing.Size(112, 24); - this.label7.TabIndex = 7; - this.label7.Text = "Type of process:"; - // - // label8 - // - this.label8.Location = new System.Drawing.Point(24, 192); - this.label8.Name = "label8"; - this.label8.Size = new System.Drawing.Size(112, 24); - this.label8.TabIndex = 8; - this.label8.Text = "Current working dir.:"; - // - // label9 - // - this.label9.Location = new System.Drawing.Point(24, 216); - this.label9.Name = "label9"; - this.label9.Size = new System.Drawing.Size(112, 24); - this.label9.TabIndex = 9; - this.label9.Text = "Owner:"; - // - // textProcessName - // - this.textProcessName.Location = new System.Drawing.Point(152, 48); - this.textProcessName.Name = "textProcessName"; - this.textProcessName.Size = new System.Drawing.Size(112, 20); - this.textProcessName.TabIndex = 1; - this.textProcessName.Text = ""; - // - // textProcessGroup - // - this.textProcessGroup.Location = new System.Drawing.Point(152, 72); - this.textProcessGroup.Name = "textProcessGroup"; - this.textProcessGroup.Size = new System.Drawing.Size(112, 20); - this.textProcessGroup.TabIndex = 2; - this.textProcessGroup.Text = ""; - // - // textProcessEnv - // - this.textProcessEnv.Location = new System.Drawing.Point(152, 96); - this.textProcessEnv.Name = "textProcessEnv"; - this.textProcessEnv.Size = new System.Drawing.Size(112, 20); - this.textProcessEnv.TabIndex = 3; - this.textProcessEnv.Text = ""; - // - // textProcessPath - // - this.textProcessPath.Location = new System.Drawing.Point(152, 120); - this.textProcessPath.Name = "textProcessPath"; - this.textProcessPath.Size = new System.Drawing.Size(112, 20); - this.textProcessPath.TabIndex = 4; - this.textProcessPath.Text = ""; - // - // textProcessArgs - // - this.textProcessArgs.Location = new System.Drawing.Point(152, 144); - this.textProcessArgs.Name = "textProcessArgs"; - this.textProcessArgs.Size = new System.Drawing.Size(112, 20); - this.textProcessArgs.TabIndex = 5; - this.textProcessArgs.Text = ""; - // - // textProcessCWD - // - this.textProcessCWD.Location = new System.Drawing.Point(152, 192); - this.textProcessCWD.Name = "textProcessCWD"; - this.textProcessCWD.Size = new System.Drawing.Size(112, 20); - this.textProcessCWD.TabIndex = 7; - this.textProcessCWD.Text = ""; - // - // textProcessOwner - // - this.textProcessOwner.Location = new System.Drawing.Point(152, 216); - this.textProcessOwner.Name = "textProcessOwner"; - this.textProcessOwner.Size = new System.Drawing.Size(112, 20); - this.textProcessOwner.TabIndex = 8; - this.textProcessOwner.Text = ""; - // - // comboType - // - this.comboType.ItemHeight = 13; - this.comboType.Items.AddRange(new object[] { - "Permanent", - "Interactive"}); - this.comboType.Location = new System.Drawing.Point(152, 168); - this.comboType.Name = "comboType"; - this.comboType.Size = new System.Drawing.Size(112, 21); - this.comboType.TabIndex = 6; - // - // label10 - // - this.label10.Location = new System.Drawing.Point(272, 32); - this.label10.Name = "label10"; - this.label10.Size = new System.Drawing.Size(88, 16); - this.label10.TabIndex = 19; - this.label10.Text = "(Mandatory)"; - // - // label11 - // - this.label11.Location = new System.Drawing.Point(272, 56); - this.label11.Name = "label11"; - this.label11.Size = new System.Drawing.Size(88, 16); - this.label11.TabIndex = 20; - this.label11.Text = "(Mandatory)"; - // - // label12 - // - this.label12.Location = new System.Drawing.Point(272, 80); - this.label12.Name = "label12"; - this.label12.Size = new System.Drawing.Size(88, 16); - this.label12.TabIndex = 21; - this.label12.Text = "(Mandatory)"; - // - // label13 - // - this.label13.Location = new System.Drawing.Point(272, 127); - this.label13.Name = "label13"; - this.label13.Size = new System.Drawing.Size(88, 16); - this.label13.TabIndex = 22; - this.label13.Text = "(Mandatory)"; - // - // label15 - // - this.label15.Location = new System.Drawing.Point(272, 176); - this.label15.Name = "label15"; - this.label15.Size = new System.Drawing.Size(88, 16); - this.label15.TabIndex = 24; - this.label15.Text = "(Mandatory)"; - // - // label16 - // - this.label16.Location = new System.Drawing.Point(272, 200); - this.label16.Name = "label16"; - this.label16.Size = new System.Drawing.Size(88, 16); - this.label16.TabIndex = 25; - this.label16.Text = "(Mandatory)"; - // - // label14 - // - this.label14.Location = new System.Drawing.Point(272, 224); - this.label14.Name = "label14"; - this.label14.Size = new System.Drawing.Size(88, 16); - this.label14.TabIndex = 26; - this.label14.Text = "(Mandatory)"; - // - // label17 - // - this.label17.Location = new System.Drawing.Point(272, 104); - this.label17.Name = "label17"; - this.label17.Size = new System.Drawing.Size(88, 16); - this.label17.TabIndex = 27; - this.label17.Text = "(Optional)"; - // - // label18 - // - this.label18.Location = new System.Drawing.Point(272, 152); - this.label18.Name = "label18"; - this.label18.Size = new System.Drawing.Size(88, 16); - this.label18.TabIndex = 28; - this.label18.Text = "(Optional)"; - // - // btnAdd - // - this.btnAdd.Location = new System.Drawing.Point(288, 248); - this.btnAdd.Name = "btnAdd"; - this.btnAdd.TabIndex = 9; - this.btnAdd.Text = "Define..."; - this.btnAdd.Click += new System.EventHandler(this.btnAdd_Click); - // - // btnCancel - // - this.btnCancel.Location = new System.Drawing.Point(152, 248); - this.btnCancel.Name = "btnCancel"; - this.btnCancel.TabIndex = 10; - this.btnCancel.Text = "Cancel"; - this.btnCancel.Click += new System.EventHandler(this.btnCancel_Click); - // - // ProcessDefineDialog - // - this.AutoScaleBaseSize = new System.Drawing.Size(5, 13); - this.ClientSize = new System.Drawing.Size(370, 279); - this.Controls.AddRange(new System.Windows.Forms.Control[] { - this.btnCancel, - this.btnAdd, - this.label18, - this.label17, - this.label14, - this.label16, - this.label15, - this.label13, - this.label12, - this.label11, - this.label10, - this.comboType, - this.textProcessOwner, - this.textProcessCWD, - this.textProcessArgs, - this.textProcessPath, - this.textProcessEnv, - this.textProcessGroup, - this.textProcessName, - this.label9, - this.label8, - this.label7, - this.label6, - this.label5, - this.label4, - this.label3, - this.label2, - this.label1, - this.comboComputer}); - this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedDialog; - this.MaximizeBox = false; - this.MinimizeBox = false; - this.Name = "ProcessDefineDialog"; - this.StartPosition = System.Windows.Forms.FormStartPosition.CenterParent; - this.Text = "Define Process"; - this.Load += new System.EventHandler(this.ProcessDefineDialog_Load); - this.ResumeLayout(false); - - } - #endregion - - private void btnCancel_Click(object sender, System.EventArgs e) - { - this.Dispose(); - this.Close(); - } - - private void btnAdd_Click(object sender, System.EventArgs e) - { - //TODO: ERROR CHECK - - Computer c; - c=c_mgmt.getComputer(this.m_selComputer); - - c.addProcess(new Process(this.textProcessName.Text.ToString(), - this.textProcessOwner.Text.ToString(), - this.textProcessGroup.Text.ToString(), - c)); - this.Close(); - this.Dispose(); - } - - private void ProcessDefineDialog_Load(object sender, System.EventArgs e) - { - comboType.SelectedIndex=0; - ArrayList list = c_mgmt.getComputerCollection(); - int i=0, selIndex=0; - foreach(Computer computer in list) - { - this.comboComputer.Items.Add(computer.getName()); - if(computer.getName().Equals(m_selComputer)) - selIndex=i; - i++; - } - comboComputer.SelectedIndex=selIndex; - } - - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs b/storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs deleted file mode 100644 index 246c108c661..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/fileaccess/FileMgmt.cs +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Text; -using System.Collections.Specialized; -using System.IO; -using System.Windows.Forms; -namespace NDB_CPC.fileaccess -{ - /// - /// Summary description for FileMgmt. - /// - public class FileMgmt - { - public FileMgmt() - { - } - - public StringCollection importHostFile(string filename) - { - StringCollection sc = new StringCollection(); - StreamReader SR = new StreamReader(filename); - string line =""; - line = SR.ReadLine(); - while(!line.Equals("")) - { - sc.Add(line); - line = SR.ReadLine(); - } - return sc; - } - - public void exportHostFile(string filename, string content) - { - StreamWriter SW = new StreamWriter(filename,false); - SW.Write(content); - SW.WriteLine(""); - SW.WriteLine(""); - SW.Close(); - } - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs b/storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs deleted file mode 100644 index 1b55d2c2ab8..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/simpleparser/SimpleCPCParser.cs +++ /dev/null @@ -1,376 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Collections; -using System.IO; -using System.Windows.Forms; -using NDB_CPC; -using NDB_CPC.socketcomm; - -namespace NDB_CPC.simpleparser -{ - /// - /// Summary description for SimpleCPCParser. - /// - public class SimpleCPCParser - { - public SimpleCPCParser() - { - // - // TODO: Add constructor logic here - // - } - - public static void parse(Process p, SocketComm comm) - { - - string line=comm.readLine();//reader.ReadLine(); - while(line.Equals("")) - { - line=comm.readLine(); - } - if(line.Equals("define process")) - { - defineProcess(p, comm); - line=""; - return; - } - if(line.Equals("start process")) - { - startProcess(p,comm); - line=""; - return; - } - if(line.Equals("stop process")) - { - stopProcess(p,comm); - line=""; - return; - } - if(line.Equals("undefine process")) - { - undefineProcess(p,comm); - line=""; - return; - } - - } - - public static void parse(ArrayList processes, Computer c, SocketComm comm) - { - - string line=comm.readLine();//reader.ReadLine(); - while(line.Equals("")) - { - line=comm.readLine(); - } - - if(line.Equals("start processes")) - { - listProcesses(processes, c, comm); - line=""; - return; - } - - } - - private static void defineProcess(Process p, SocketComm comm) - { - string line=comm.readLine();//reader.ReadLine(); - while(!line.Equals("")) - { - if(line.StartsWith("status:")) - { - line=line.Remove(0,7); - line=line.Trim(); - if(line.Equals("1")) - { - p.setDefined(true); - p.setStatus(Process.Status.Stopped); - } - else - p.setDefined(false); - } - if(line.StartsWith("id:")) - { - line=line.Remove(0,3); - line=line.Trim(); - p.setId(line); - } - line=comm.readLine(); - } - } - - - private static void startProcess(Process p, SocketComm comm) - { - string line=comm.readLine();//reader.ReadLine(); - while(!line.Equals("")) - { - if(line.StartsWith("status:")) - { - line=line.Remove(0,7); - line=line.Trim(); - if(line.Equals("1")) - p.setStatus(NDB_CPC.Process.Status.Running); - else - p.setStatus(NDB_CPC.Process.Status.Unknown); - - } - if(line.StartsWith("id:")) - { - line=line.Remove(0,3); - line=line.Trim(); - if(p.getId().Equals(line)) - { - ; - } - else - { - //damn something is wrong - p.setStatus(NDB_CPC.Process.Status.Unknown); - } - - } - line=comm.readLine(); - } - } - private static void undefineProcess(Process p, SocketComm comm) - { - string line=comm.readLine();//reader.ReadLine(); - while(!line.Equals("")) - { - if(line.StartsWith("status:")) - { - - line=line.Remove(0,7); - line=line.Trim(); - if(line.Equals("1")) - p.setDefined(false); - else - p.setDefined(true); - - } - if(line.StartsWith("id:")) - { - line=line.Remove(0,3); - line=line.Trim(); - } - line=comm.readLine(); - } - } - - private static void stopProcess(Process p, SocketComm comm) - { - string line=comm.readLine();//reader.ReadLine(); - while(!line.Equals("")) - { - if(line.StartsWith("status:")) - { - line=line.Remove(0,7); - line=line.Trim(); - if(line.Equals("1")) - p.setStatus(NDB_CPC.Process.Status.Stopped); - else - p.setStatus(NDB_CPC.Process.Status.Unknown); - - } - if(line.StartsWith("id:")) - { - line=line.Remove(0,3); - line=line.Trim(); - if(p.getId().Equals(line)) - { - ; - } - else - { - //damn something is wrong - p.setStatus(NDB_CPC.Process.Status.Unknown); - } - - } - line=comm.readLine(); - } - } - private static void listProcesses(ArrayList processes, Computer c, SocketComm comm) - { - bool processExist = false; - - string line=comm.readLine();//reader.ReadLine(); - while(!line.Equals("end processes")) - { - if(line.Equals("process")) - { - line=comm.readLine(); - Process p = new Process(); - - while(!line.Equals("")) - { - if(line.StartsWith("id:")) - { - string pid; - line=line.Remove(0,3); - pid=line.Trim(); - /*check if process already exist*/ - processExist=findProcess(processes,pid); - if(!processExist) - { - p.setId(pid); - } - } - - if(line.StartsWith("name:")) - { - - line=line.Remove(0,5); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setName(line); - } - } - - if(line.StartsWith("path:")) - { - - line=line.Remove(0,5); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setPath(line); - } - } - - if(line.StartsWith("args:")) - { - - line=line.Remove(0,5); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setArgs(line); - } - } - - if(line.StartsWith("type:")) - { - - line=line.Remove(0,5); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - - } - } - - if(line.StartsWith("cwd:")) - { - - line=line.Remove(0,4); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setCwd(line); - } - } - - if(line.StartsWith("env:")) - { - - line=line.Remove(0,4); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setEnv(line); - } - } - - if(line.StartsWith("owner:")) - { - - line=line.Remove(0,6); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setOwner(line); - } - } - if(line.StartsWith("group:")) - { - - line=line.Remove(0,6); - line=line.Trim(); - /*check if process already exist*/ - if(!processExist) - { - p.setDatabase(line); - } - } - - if(line.StartsWith("status:")) - { - - line=line.Remove(0,7); - line=line.Trim(); - /*check if process already exist*/ - //if(!processExist) - //{ - if(line.Equals("0")) - p.setStatus(Process.Status.Stopped); - if(line.Equals("1")) - p.setStatus(Process.Status.Running); - if(line.Equals("2")) - p.setStatus(Process.Status.Unknown); - //} - } - - - line=comm.readLine(); - } - if(!processExist) - { - p.setComputer(c); - p.setDefined(true); - processes.Add(p); - } - processExist=false; - } - line=comm.readLine(); - - } - } - - private static bool findProcess(ArrayList processes, string pid) - { - foreach (Process p in processes) - { - if(p.getId().Equals(pid)) - return true; - } - return false; - - } - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs b/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs deleted file mode 100644 index 34678086057..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/SocketComm.cs +++ /dev/null @@ -1,223 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Net; -using System.Net.Sockets; -using System.Text; -using System.Windows.Forms; -using System.Threading; -using System.IO; - -namespace NDB_CPC.socketcomm -{ - /// - /// Summary description for SocketComm. - /// - public class SocketComm - { - private myTcpClient sender; - private StreamWriter writer; - private StreamReader reader; - private string m_host; - private int m_port; - private bool m_connected; - private bool m_connecting; - private Thread connectThread; - public SocketComm(string host, int port) - { - - m_host=host; - m_port=port; - m_connected=false; - m_connecting=false; - } - - - - public bool isConnected() - { - return m_connected; - } - - public void doConnect() - { - if(!m_connecting && !m_connected) - { - connectThread= new Thread(new ThreadStart(connect)); - connectThread.Start(); - } - - } - - private void connect() - { - m_connecting=true; - while(true) - { - if(!m_connected) - { - try - { - // Establish the remote endpoint for the socket. - // The name of the - // remote device is "host.contoso.com". - - // Create a TCP/IP socket. - sender = new myTcpClient(); - // Connect the socket to the remote endpoint. Catch any errors. - try - { - /* - IPAddress ipAddress = Dns.Resolve(host).AddressList[0]; - IPEndPoint ipLocalEndPoint = new IPEndPoint(ipAddress, 11000); -*/ - - - sender.Connect(m_host,m_port);; - - writer = new StreamWriter(sender.GetStream(), Encoding.ASCII); - reader = new StreamReader(sender.GetStream(), Encoding.ASCII); - m_connected=true; - m_connecting=false; - // break; - Console.WriteLine("Socket connected to {0}", - sender.ToString()); - - } - catch (ArgumentNullException ane) - { - Console.WriteLine("ArgumentNullException : {0}",ane.ToString()); - m_connected=false; - } - catch (SocketException se) - { - Console.WriteLine("SocketException : {0}",se.ToString()); - m_connected=false; - } - } - catch (Exception e) - { - Console.WriteLine("Unexpected exception : {0}", e.ToString()); - m_connected=false; - } - - } - - Thread.Sleep(200); - } - } - - public bool disconnect() - { - try - { - this.m_connected=false; - this.m_connecting=false; - sender.GetUnderlyingSocket().Shutdown(SocketShutdown.Both); - sender.GetUnderlyingSocket().Close(); - writer.Close(); - reader.Close(); - sender.Close(); - - } - catch (ArgumentNullException ane) - { - Console.WriteLine("ArgumentNullException : {0}",ane.ToString()); - connectThread.Abort(); - return false; - } - catch (SocketException se) - { - Console.WriteLine("SocketException : {0}",se.ToString()); - connectThread.Abort(); - return false; - } - catch (Exception e) - { - Console.WriteLine("Unexpected exception : {0}", e.ToString()); - connectThread.Abort(); - return false; - } - connectThread.Abort(); - return true; - } - - public bool writeMessage(string message) - { - int attempts=0; - while (attempts < 10) - { - try - { - writer.WriteLine(message); - writer.Flush(); - message=""; - return true; - } - catch(IOException e) - { - this.disconnect(); - this.doConnect(); - Thread.Sleep(200); - attempts++; - } - catch(System.NullReferenceException) - { - this.disconnect(); - this.doConnect(); - - Thread.Sleep(200); - attempts++; - } - } - return false; - } - - public string readLine() - { - int attempts=0; - string line=""; - while (attempts < 10){ - try - { - line = reader.ReadLine(); - if(line==null) - line=""; - return line; - } - catch(IOException e) - { - this.disconnect(); - this.doConnect(); - Thread.Sleep(400); - attempts++; - } - catch(System.NullReferenceException) - { - this.disconnect(); - this.doConnect(); - Thread.Sleep(400); - attempts++; - } - } - return ""; - - } - - } -} - diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs b/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs deleted file mode 100644 index 20d86477b3a..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/socketcomm/myTcpClient.cs +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Net; -using System.Net.Sockets; -using System.Text; -using System.Threading; -using System.IO; - - -namespace NDB_CPC.socketcomm -{ - public class myTcpClient : TcpClient - { - private Socket s; - public myTcpClient(): base() - { - if(this.Active) - { - s = this.Client; - } - } - public Socket GetUnderlyingSocket() - { - return s; - } - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs b/storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs deleted file mode 100644 index 9a702b9fc9e..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/startDatabaseDlg.cs +++ /dev/null @@ -1,267 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; -using NDB_CPC.simpleparser; - -namespace NDB_CPC -{ - /// - /// Summary description for startDatabase. - /// - public class startDatabaseDlg : System.Windows.Forms.Form - { - private System.Windows.Forms.TextBox textAction; - private System.Windows.Forms.Label label1; - /// - /// Required designer variable. - /// - private System.ComponentModel.Container components = null; - private System.Windows.Forms.ProgressBar progressBar; - private System.Windows.Forms.Label label2; - private System.Windows.Forms.Button buttonGo; - private Database m_db; - public startDatabaseDlg(Database db) - { - - // - // Required for Windows Form Designer support - // - InitializeComponent(); - - // - // TODO: Add any constructor code after InitializeComponent call - // - m_db=db; - } - - /// - /// Clean up any resources being used. - /// - protected override void Dispose( bool disposing ) - { - if( disposing ) - { - if(components != null) - { - components.Dispose(); - } - } - base.Dispose( disposing ); - } - - #region Windows Form Designer generated code - /// - /// Required method for Designer support - do not modify - /// the contents of this method with the code editor. - /// - private void InitializeComponent() - { - this.textAction = new System.Windows.Forms.TextBox(); - this.label1 = new System.Windows.Forms.Label(); - this.progressBar = new System.Windows.Forms.ProgressBar(); - this.label2 = new System.Windows.Forms.Label(); - this.buttonGo = new System.Windows.Forms.Button(); - this.SuspendLayout(); - // - // textAction - // - this.textAction.Location = new System.Drawing.Point(104, 40); - this.textAction.Name = "textAction"; - this.textAction.ReadOnly = true; - this.textAction.Size = new System.Drawing.Size(256, 20); - this.textAction.TabIndex = 0; - this.textAction.Text = ""; - // - // label1 - // - this.label1.Location = new System.Drawing.Point(8, 40); - this.label1.Name = "label1"; - this.label1.Size = new System.Drawing.Size(96, 16); - this.label1.TabIndex = 1; - this.label1.Text = "Current activity:"; - this.label1.TextAlign = System.Drawing.ContentAlignment.MiddleRight; - // - // progressBar - // - this.progressBar.Location = new System.Drawing.Point(104, 88); - this.progressBar.Name = "progressBar"; - this.progressBar.Size = new System.Drawing.Size(152, 16); - this.progressBar.TabIndex = 2; - // - // label2 - // - this.label2.Location = new System.Drawing.Point(8, 88); - this.label2.Name = "label2"; - this.label2.Size = new System.Drawing.Size(96, 16); - this.label2.TabIndex = 3; - this.label2.Text = "Activity progress:"; - this.label2.TextAlign = System.Drawing.ContentAlignment.MiddleCenter; - // - // buttonGo - // - this.buttonGo.Location = new System.Drawing.Point(152, 136); - this.buttonGo.Name = "buttonGo"; - this.buttonGo.Size = new System.Drawing.Size(96, 24); - this.buttonGo.TabIndex = 4; - this.buttonGo.Text = "Go!"; - this.buttonGo.Click += new System.EventHandler(this.buttonGo_Click); - // - // startDatabaseDlg - // - this.AutoScaleBaseSize = new System.Drawing.Size(5, 13); - this.ClientSize = new System.Drawing.Size(378, 167); - this.Controls.AddRange(new System.Windows.Forms.Control[] { - this.buttonGo, - this.label2, - this.progressBar, - this.label1, - this.textAction}); - this.FormBorderStyle = System.Windows.Forms.FormBorderStyle.FixedSingle; - this.MaximizeBox = false; - this.MinimizeBox = false; - this.Name = "startDatabaseDlg"; - this.Text = "Starting database"; - this.Load += new System.EventHandler(this.startDatabase_Load); - this.Paint += new System.Windows.Forms.PaintEventHandler(this.startDatabase_Paint); - this.ResumeLayout(false); - - } - #endregion - - private void startDatabase_Load(object sender, System.EventArgs e) - { - - } - - private void startDatabase_Paint(object sender, System.Windows.Forms.PaintEventArgs e) - { - - - - } - private void defineProcesses() - { - ArrayList processes = m_db.getProcesses(); - progressBar.Maximum = processes.Count; - progressBar.Minimum = 0; - - int retry=0; - //sc.connect("130.100.232.7"); - foreach (Process p in processes) - { - Computer comp; - retry=0; - //if(p.getName().StartsWith("ndb") || p.getName().StartsWith("mgm")) - //{ - textAction.Text="Defining process " + p.getName(); - textAction.Refresh(); - comp=p.getComputer(); - while(retry<10) - { - if(!comp.isConnected()) - { - comp.connectToCpcd(); - - } - else - { - if(comp.defineProcess(p)<0) - { - ; - } - else - break; - } - if(retry==9) - { - if(MessageBox.Show(this,"Failed to define process. Try again?","Warning!!!",MessageBoxButtons.YesNo)==DialogResult.Yes) - retry=0; - } - retry++; - //comp.undefineProcess(p); - } - //} - progressBar.PerformStep(); - } - } - - private void startProcesses() - { - - ArrayList processes = m_db.getProcesses(); - progressBar.Maximum = processes.Count; - progressBar.Minimum = 0; - string start = "start process \n"; - - int retry=0; - //sc.connect("130.100.232.7"); - foreach (Process p in processes) - { - Computer comp; - if((p.getName().StartsWith("ndb")) || (p.getName().StartsWith("mgm"))) - { - textAction.Text="Starting process " + p.getName(); - textAction.Refresh(); - start = start + "id:" + p.getId() + "\n\n"; - comp=p.getComputer(); - while(retry<10) - { - if(!comp.isConnected()) - { - comp.connectToCpcd(); - } - else - { - if(comp.startProcess(p)<0) - { - ; - } - else - break; - } - if(retry==9) - { - if(MessageBox.Show(this,"Failed to start process. Retry again?","Warning!!!",MessageBoxButtons.YesNo)==DialogResult.Yes) - retry=0; - } - - retry++; - } - } - progressBar.PerformStep(); - - } - - } - - private void buttonGo_Click(object sender, System.EventArgs e) - { - buttonGo.Enabled=false; - progressBar.Step=1; - defineProcesses(); - progressBar.Value=0; - startProcesses(); - - } - - - } -} diff --git a/storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs b/storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs deleted file mode 100644 index fda0fc937e1..00000000000 --- a/storage/ndb/src/cw/cpcc-win32/csharp/telnetclient/telnetClient.cs +++ /dev/null @@ -1,424 +0,0 @@ -/* Copyright (c) 2004, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -using System; -using System.Drawing; -using System.Collections; -using System.ComponentModel; -using System.Windows.Forms; -using System.Data; -using System.Net; -using System.Net.Sockets; -using System.Text; -using System.IO; -using System.Threading ; - -namespace NDB_CPC.telnetclient -{ - /// - /// Summary description for telnetClient. - /// - public class telnetClient - { - Char IAC = Convert.ToChar(255); - Char DO = Convert.ToChar(253); - Char DONT = Convert.ToChar(254); - Char WILL = Convert.ToChar(251); - Char WONT = Convert.ToChar(252); - Char SB = Convert.ToChar(250); - Char SE = Convert.ToChar(240); - const Char IS = '0'; - const Char SEND = '1'; - const Char INFO = '2'; - const Char VAR = '0'; - const Char VALUE = '1'; - const Char ESC = '2'; - const Char USERVAR = '3'; - string m_strResp; - - private ArrayList m_ListOptions = new ArrayList(); - private IPEndPoint iep ; - private AsyncCallback callbackProc ; - private string address ; - private int port ; - private Socket s ; - private TextBox textBox1; - Byte[] m_byBuff = new Byte[32767]; - - - public telnetClient(string ip, int p, TextBox tb) - { - - address = ip; - port = p; - textBox1=tb; - IPHostEntry IPHost = Dns.Resolve(address); - string []aliases = IPHost.Aliases; - IPAddress[] addr = IPHost.AddressList; - - try - { - // Create New Socket - s = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); - // Create New EndPoint - iep = new IPEndPoint(addr[0],port); - // This is a non blocking IO - s.Blocking = false ; - // Assign Callback function to read from Asyncronous Socket - callbackProc = new AsyncCallback(ConnectCallback); - // Begin Asyncronous Connection - s.BeginConnect(iep , callbackProc, s ) ; - - } - catch(Exception eeeee ) - { - MessageBox.Show(eeeee.Message , "Application Error!!!" , MessageBoxButtons.OK , MessageBoxIcon.Stop ); - Application.Exit(); - } - } - - public void ConnectCallback( IAsyncResult ar ) - { - try - { - // Get The connection socket from the callback - Socket sock1 = (Socket)ar.AsyncState; - if ( sock1.Connected ) - { - // Define a new Callback to read the data - AsyncCallback recieveData = new AsyncCallback( OnRecievedData ); - // Begin reading data asyncronously - sock1.BeginReceive( m_byBuff, 0, m_byBuff.Length, SocketFlags.None, recieveData , sock1 ); - } - } - catch( Exception ex ) - { - MessageBox.Show(ex.Message, "Setup Recieve callbackProc failed!" ); - } - } - - - public void OnRecievedData( IAsyncResult ar ) - { - // Get The connection socket from the callback - Socket sock = (Socket)ar.AsyncState; - // Get The data , if any - int nBytesRec = sock.EndReceive( ar ); - if( nBytesRec > 0 ) - { - string sRecieved = Encoding.ASCII.GetString( m_byBuff, 0, nBytesRec ); - string m_strLine=""; - for ( int i=0; i < nBytesRec;i++) - { - Char ch = Convert.ToChar(m_byBuff[i]); - switch( ch ) - { - case '\r': - m_strLine += Convert.ToString("\r\n"); - break; - case '\n': - break; - default: - m_strLine += Convert.ToString(ch); - break; - } - } - try - { - int strLinelen = m_strLine.Length ; - if ( strLinelen == 0 ) - { - m_strLine = Convert.ToString("\r\n"); - } - - Byte[] mToProcess = new Byte[strLinelen]; - for ( int i=0; i < strLinelen ; i++) - mToProcess[i] = Convert.ToByte(m_strLine[i]); - // Process the incoming data - string mOutText = ProcessOptions(mToProcess); - if ( mOutText != "" ) - textBox1.AppendText(mOutText); - - // Respond to any incoming commands - RespondToOptions(); - } - catch( Exception ex ) - { - Object x = this ; - MessageBox.Show(ex.Message , "Information!" ); - } - } - else - { - // If no data was recieved then the connection is probably dead - Console.WriteLine( "Disconnected", sock.RemoteEndPoint ); - sock.Shutdown( SocketShutdown.Both ); - sock.Close(); - Application.Exit(); - } - } - - private string ProcessOptions(byte[] m_strLineToProcess) - { - string m_DISPLAYTEXT =""; - string m_strTemp ="" ; - string m_strOption =""; - string m_strNormalText =""; - bool bScanDone =false; - int ndx =0; - int ldx =0; - char ch ; - try - { - for ( int i=0; i < m_strLineToProcess.Length ; i++) - { - Char ss = Convert.ToChar(m_strLineToProcess[i]); - m_strTemp = m_strTemp + Convert.ToString(ss); - } - - while(bScanDone != true ) - { - int lensmk = m_strTemp.Length; - ndx = m_strTemp.IndexOf(Convert.ToString(IAC)); - if ( ndx > lensmk ) - ndx = m_strTemp.Length; - - if(ndx != -1) - { - m_DISPLAYTEXT+= m_strTemp.Substring(0,ndx); - ch = m_strTemp[ndx + 1]; - if ( ch == DO || ch == DONT || ch == WILL || ch == WONT ) - { - m_strOption = m_strTemp.Substring(ndx, 3); - string txt = m_strTemp.Substring(ndx + 3); - m_DISPLAYTEXT+= m_strTemp.Substring(0,ndx); - m_ListOptions.Add(m_strOption); - m_strTemp = txt ; - } - else - if ( ch == IAC) - { - m_DISPLAYTEXT= m_strTemp.Substring(0,ndx); - m_strTemp = m_strTemp.Substring(ndx + 1); - } - else - if ( ch == SB ) - { - m_DISPLAYTEXT= m_strTemp.Substring(0,ndx); - ldx = m_strTemp.IndexOf(Convert.ToString(SE)); - m_strOption = m_strTemp.Substring(ndx, ldx); - m_ListOptions.Add(m_strOption); - m_strTemp = m_strTemp.Substring(ldx); - } - } - else - { - m_DISPLAYTEXT = m_DISPLAYTEXT + m_strTemp; - bScanDone = true ; - } - } - m_strNormalText = m_DISPLAYTEXT; - } - catch(Exception eP) - { - MessageBox.Show(eP.Message , "Application Error!!!" , MessageBoxButtons.OK , MessageBoxIcon.Stop ); - Application.Exit(); - } - return m_strNormalText ; - } - - void DispatchMessage(string strText) - { - try - { - Byte[] smk = new Byte[strText.Length]; - for ( int i=0; i < strText.Length ; i++) - { - Byte ss = Convert.ToByte(strText[i]); - smk[i] = ss ; - } - - IAsyncResult ar2 = s.BeginSend(smk , 0 , smk.Length , SocketFlags.None , callbackProc , s ); - s.EndSend(ar2); - } - catch(Exception ers) - { - MessageBox.Show("ERROR IN RESPOND OPTIONS"); - } - } - - void RespondToOptions() - { - try - { - string strOption; - for ( int i=0; i < m_ListOptions.Count; i++) - { - strOption = (string)m_ListOptions[i]; - ArrangeReply(strOption); - } - DispatchMessage(m_strResp); - m_strResp =""; - m_ListOptions.Clear(); - } - catch(Exception ers) - { - MessageBox.Show("ERROR IN RESPOND OPTIONS"); - } - } - void ArrangeReply(string strOption) - { - try - { - - Char Verb; - Char Option; - Char Modifier; - Char ch; - bool bDefined = false; - - if(strOption.Length < 3) return; - - Verb = strOption[1]; - Option = strOption[2]; - - if ( Option == 1 || Option == 3 ) - { - // case 1: // Echo - // case 3: // Suppress Go-Ahead - bDefined = true; - // break; - } - - m_strResp += IAC; - - if(bDefined == true ) - { - if ( Verb == DO ) - { - // case DO: - ch = WILL; - m_strResp += ch; - m_strResp += Option; - // break; - } - if ( Verb == DONT ) - { - ch = WONT; - m_strResp += ch; - m_strResp += Option; - // break; - } - if ( Verb == WILL ) - { - ch = DO; - m_strResp += ch; - m_strResp += Option; - //break; - } - if ( Verb == WONT) - { - ch = DONT; - m_strResp += ch; - m_strResp += Option; - // break; - } - if ( Verb == SB) - { - Modifier = strOption[3]; - if(Modifier == SEND) - { - ch = SB; - m_strResp += ch; - m_strResp += Option; - m_strResp += IS; - m_strResp += IAC; - m_strResp += SE; - } - // break; - } - } - else - { - // switch(Verb) - // { - if ( Verb == DO ) - { - ch = WONT; - m_strResp += ch; - m_strResp += Option; - // break; - } - if ( Verb == DONT) - { - ch = WONT; - m_strResp += ch; - m_strResp += Option; - // break; - } - if ( Verb == WILL) - { - ch = DONT; - m_strResp += ch; - m_strResp += Option; - // break; - } - if ( Verb == WONT) - { - ch = DONT; - m_strResp += ch; - m_strResp += Option; - // break; - } - } - } - catch(Exception eeeee ) - { - MessageBox.Show(eeeee.Message , "Application Error!!!" , MessageBoxButtons.OK , MessageBoxIcon.Stop ); - Application.Exit(); - } - - } - - private void textBox1_KeyPress_1(object sender, System.Windows.Forms.KeyPressEventArgs e) - { - if ( e.KeyChar == 13 ) - { - DispatchMessage("\r\n"); - } - else - if ( e.KeyChar == 8 ) - { - try - { -// string mtmp = textBox1.Text.Substring(0,textBox1.Text.Length-1); -// textBox1.Text = "" ; - } - catch(Exception ebs) - { - MessageBox.Show("ERROR IN BACKSPACE"); - } - } - else - { - string str = e.KeyChar.ToString(); - DispatchMessage(str); - } - } - - - } -} diff --git a/storage/ndb/src/cw/cpcd/APIService.cpp b/storage/ndb/src/cw/cpcd/APIService.cpp deleted file mode 100644 index b8f2b7d5f05..00000000000 --- a/storage/ndb/src/cw/cpcd/APIService.cpp +++ /dev/null @@ -1,401 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include -#include - -#include "APIService.hpp" -#include "CPCD.hpp" -#include -#include - -/** - const char * name; - const char * realName; - const Type type; - const ArgType argType; - const ArgRequired argRequired; - const ArgMinMax argMinMax; - const int minVal; - const int maxVal; - void (T::* function)(const class Properties & args); - const char * description; -*/ - -#define CPCD_CMD(name, fun, desc) \ - { name, \ - 0, \ - ParserRow::Cmd, \ - ParserRow::String, \ - ParserRow::Optional, \ - ParserRow::IgnoreMinMax, \ - 0, 0, \ - fun, \ - desc, 0 } - -#define CPCD_ARG(name, type, opt, desc) \ - { name, \ - 0, \ - ParserRow::Arg, \ - ParserRow::type, \ - ParserRow::opt, \ - ParserRow::IgnoreMinMax, \ - 0, 0, \ - 0, \ - desc, 0 } - -#define CPCD_ARG2(name, type, opt, min, max, desc) \ - { name, \ - 0, \ - ParserRow::Arg, \ - ParserRow::type, \ - ParserRow::opt, \ - ParserRow::IgnoreMinMax, \ - min, max, \ - 0, \ - desc, 0 } - -#define CPCD_END() \ - { 0, \ - 0, \ - ParserRow::Arg, \ - ParserRow::Int, \ - ParserRow::Optional, \ - ParserRow::IgnoreMinMax, \ - 0, 0, \ - 0, \ - 0, 0 } - -#define CPCD_CMD_ALIAS(name, realName, fun) \ - { name, \ - realName, \ - ParserRow::CmdAlias, \ - ParserRow::Int, \ - ParserRow::Optional, \ - ParserRow::IgnoreMinMax, \ - 0, 0, \ - 0, \ - 0, 0 } - -#define CPCD_ARG_ALIAS(name, realName, fun) \ - { name, \ - realName, \ - ParserRow::ArgAlias, \ - ParserRow::Int, \ - ParserRow::Optional, \ - ParserRow::IgnoreMinMax, \ - 0, 0, \ - 0, \ - 0, 0 } - -const -ParserRow commands[] = -{ - CPCD_CMD("define process" , &CPCDAPISession::defineProcess, ""), - CPCD_ARG("id", Int, Optional, "Id of process."), - CPCD_ARG("name", String, Mandatory, "Name of process"), - CPCD_ARG("group", String, Mandatory, "Group of process"), - CPCD_ARG("env", String, Optional, "Environment variables for process"), - CPCD_ARG("path", String, Mandatory, "Path to binary"), - CPCD_ARG("args", String, Optional, "Arguments to process"), - CPCD_ARG("type", String, Mandatory, "Type of process"), - CPCD_ARG("cwd", String, Mandatory, "Working directory of process"), - CPCD_ARG("owner", String, Mandatory, "Owner of process"), - CPCD_ARG("runas", String, Optional, "Run as user"), - CPCD_ARG("stdout", String, Optional, "Redirection of stdout"), - CPCD_ARG("stderr", String, Optional, "Redirection of stderr"), - CPCD_ARG("stdin", String, Optional, "Redirection of stderr"), - CPCD_ARG("ulimit", String, Optional, "ulimit"), - CPCD_ARG("shutdown", String, Optional, "shutdown options"), - - CPCD_CMD("undefine process", &CPCDAPISession::undefineProcess, ""), - CPCD_CMD_ALIAS("undef", "undefine process", 0), - CPCD_ARG("id", Int, Mandatory, "Id of process"), - CPCD_ARG_ALIAS("i", "id", 0), - - CPCD_CMD("start process", &CPCDAPISession::startProcess, ""), - CPCD_ARG("id", Int, Mandatory, "Id of process"), - - CPCD_CMD("stop process", &CPCDAPISession::stopProcess, ""), - CPCD_ARG("id", Int, Mandatory, "Id of process"), - - CPCD_CMD("list processes", &CPCDAPISession::listProcesses, ""), - - CPCD_CMD("show version", &CPCDAPISession::showVersion, ""), - - CPCD_END() -}; -CPCDAPISession::CPCDAPISession(NDB_SOCKET_TYPE sock, - CPCD & cpcd) - : SocketServer::Session(sock) - , m_cpcd(cpcd) -{ - m_input = new SocketInputStream(sock, 7*24*60*60000); - m_output = new SocketOutputStream(sock); - m_parser = new Parser(commands, *m_input, true, true, true); -} - -CPCDAPISession::CPCDAPISession(FILE * f, CPCD & cpcd) - : SocketServer::Session(1) - , m_cpcd(cpcd) -{ - m_input = new FileInputStream(f); - m_parser = new Parser(commands, *m_input, true, true, true); -} - -CPCDAPISession::~CPCDAPISession() { - delete m_input; - delete m_parser; -} - -void -CPCDAPISession::runSession(){ - Parser_t::Context ctx; - while(!m_stop){ - m_parser->run(ctx, * this); - if(ctx.m_currentToken == 0) - break; - - switch(ctx.m_status){ - case Parser_t::Ok: - for(size_t i = 0; i %s", - ctx.m_aliasUsed[i]->name, ctx.m_aliasUsed[i]->realName); - break; - case Parser_t::NoLine: - case Parser_t::EmptyLine: - break; - default: - break; - } - } - NDB_CLOSE_SOCKET(m_socket); -} - -void -CPCDAPISession::stopSession(){ - CPCD::RequestStatus rs; - for(size_t i = 0; irun(ctx, * this); - if(ctx.m_currentToken == 0) - break; - - switch(ctx.m_status){ - case Parser_t::Ok: - for(size_t i = 0; i %s", - ctx.m_aliasUsed[i]->name, ctx.m_aliasUsed[i]->realName); - break; - case Parser_t::NoLine: - case Parser_t::EmptyLine: - break; - default: - break; - } - } -} - -static const int g_TimeOut = 1000; - -void -CPCDAPISession::defineProcess(Parser_t::Context & /* unused */, - const class Properties & args){ - - CPCD::Process * p = new CPCD::Process(args, &m_cpcd); - - CPCD::RequestStatus rs; - - bool ret = m_cpcd.defineProcess(&rs, p); - if(!m_cpcd.loadingProcessList) { - m_output->println("define process"); - m_output->println("status: %d", rs.getStatus()); - if(ret == true){ - m_output->println("id: %d", p->m_id); - if(p->m_processType == TEMPORARY){ - m_temporaryProcesses.push_back(p->m_id); - } - } else { - m_output->println("errormessage: %s", rs.getErrMsg()); - } - m_output->println(""); - } -} - -void -CPCDAPISession::undefineProcess(Parser_t::Context & /* unused */, - const class Properties & args){ - Uint32 id; - CPCD::RequestStatus rs; - - args.get("id", &id); - bool ret = m_cpcd.undefineProcess(&rs, id); - - m_output->println("undefine process"); - m_output->println("id: %d", id); - m_output->println("status: %d", rs.getStatus()); - if(!ret) - m_output->println("errormessage: %s", rs.getErrMsg()); - - m_output->println(""); -} - -void -CPCDAPISession::startProcess(Parser_t::Context & /* unused */, - const class Properties & args){ - Uint32 id; - CPCD::RequestStatus rs; - - args.get("id", &id); - const int ret = m_cpcd.startProcess(&rs, id); - - if(!m_cpcd.loadingProcessList) { - m_output->println("start process"); - m_output->println("id: %d", id); - m_output->println("status: %d", rs.getStatus()); - if(!ret) - m_output->println("errormessage: %s", rs.getErrMsg()); - m_output->println(""); - } -} - -void -CPCDAPISession::stopProcess(Parser_t::Context & /* unused */, - const class Properties & args){ - Uint32 id; - CPCD::RequestStatus rs; - - args.get("id", &id); - int ret = m_cpcd.stopProcess(&rs, id); - - m_output->println("stop process"); - m_output->println("id: %d", id); - m_output->println("status: %d", rs.getStatus()); - if(!ret) - m_output->println("errormessage: %s", rs.getErrMsg()); - - m_output->println(""); -} - -static const char * -propToString(Properties *prop, const char *key) { - static char buf[32]; - const char *retval = NULL; - PropertiesType pt; - - prop->getTypeOf(key, &pt); - switch(pt) { - case PropertiesType_Uint32: - Uint32 val; - prop->get(key, &val); - BaseString::snprintf(buf, sizeof buf, "%d", val); - retval = buf; - break; - case PropertiesType_char: - const char *str; - prop->get(key, &str); - retval = str; - break; - default: - BaseString::snprintf(buf, sizeof buf, "(unknown)"); - retval = buf; - } - return retval; -} - -void -CPCDAPISession::printProperty(Properties *prop, const char *key) { - m_output->println("%s: %s", key, propToString(prop, key)); -} - -void -CPCDAPISession::listProcesses(Parser_t::Context & /* unused */, - const class Properties & /* unused */){ - m_cpcd.m_processes.lock(); - MutexVector *proclist = m_cpcd.getProcessList(); - - m_output->println("start processes"); - m_output->println(""); - - - for(size_t i = 0; i < proclist->size(); i++) { - CPCD::Process *p = (*proclist)[i]; - - m_output->println("process"); - - m_output->println("id: %d", p->m_id); - m_output->println("name: %s", p->m_name.c_str()); - m_output->println("path: %s", p->m_path.c_str()); - m_output->println("args: %s", p->m_args.c_str()); - m_output->println("type: %s", p->m_type.c_str()); - m_output->println("cwd: %s", p->m_cwd.c_str()); - m_output->println("env: %s", p->m_env.c_str()); - m_output->println("owner: %s", p->m_owner.c_str()); - m_output->println("group: %s", p->m_group.c_str()); - m_output->println("runas: %s", p->m_runas.c_str()); - m_output->println("stdin: %s", p->m_stdin.c_str()); - m_output->println("stdout: %s", p->m_stdout.c_str()); - m_output->println("stderr: %s", p->m_stderr.c_str()); - m_output->println("ulimit: %s", p->m_ulimit.c_str()); - m_output->println("shutdown: %s", p->m_shutdown_options.c_str()); - switch(p->m_status){ - case STOPPED: - m_output->println("status: stopped"); - break; - case STARTING: - m_output->println("status: starting"); - break; - case RUNNING: - m_output->println("status: running"); - break; - case STOPPING: - m_output->println("status: stopping"); - break; - } - - m_output->println(""); - - } - - m_output->println("end processes"); - m_output->println(""); - - m_cpcd.m_processes.unlock(); -} - -void -CPCDAPISession::showVersion(Parser_t::Context & /* unused */, - const class Properties & args){ - CPCD::RequestStatus rs; - - m_output->println("show version"); - m_output->println("compile time: %s %s", __DATE__, __TIME__); - - m_output->println(""); -} - -template class Vector const*>; diff --git a/storage/ndb/src/cw/cpcd/APIService.hpp b/storage/ndb/src/cw/cpcd/APIService.hpp deleted file mode 100644 index d6a45dc26a4..00000000000 --- a/storage/ndb/src/cw/cpcd/APIService.hpp +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CPCD_API_HPP -#define CPCD_API_HPP - -#include -#include -#include - -class CPCD; - -class CPCDAPISession : public SocketServer::Session { - typedef Parser Parser_t; - - class CPCD & m_cpcd; - InputStream *m_input; - OutputStream *m_output; - Parser_t *m_parser; - - Vector m_temporaryProcesses; - - void printProperty(Properties *prop, const char *key); -public: - CPCDAPISession(NDB_SOCKET_TYPE, class CPCD &); - CPCDAPISession(FILE * f, CPCD & cpcd); - ~CPCDAPISession(); - - virtual void runSession(); - virtual void stopSession(); - void loadFile(); - - void defineProcess(Parser_t::Context & ctx, const class Properties & args); - void undefineProcess(Parser_t::Context & ctx, const class Properties & args); - void startProcess(Parser_t::Context & ctx, const class Properties & args); - void stopProcess(Parser_t::Context & ctx, const class Properties & args); - void showProcess(Parser_t::Context & ctx, const class Properties & args); - void listProcesses(Parser_t::Context & ctx, const class Properties & args); - void showVersion(Parser_t::Context & ctx, const class Properties & args); -}; - -class CPCDAPIService : public SocketServer::Service { - class CPCD & m_cpcd; -public: - CPCDAPIService(class CPCD & cpcd) : m_cpcd(cpcd) {} - - CPCDAPISession * newSession(NDB_SOCKET_TYPE theSock){ - return new CPCDAPISession(theSock, m_cpcd); - } -}; - -#endif diff --git a/storage/ndb/src/cw/cpcd/CPCD.cpp b/storage/ndb/src/cw/cpcd/CPCD.cpp deleted file mode 100644 index 48e946f58b8..00000000000 --- a/storage/ndb/src/cw/cpcd/CPCD.cpp +++ /dev/null @@ -1,435 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -#include "APIService.hpp" -#include "CPCD.hpp" -#include - -#include "common.hpp" - -extern const ParserRow commands[]; - - -CPCD::CPCD() { - loadingProcessList = false; - m_processes.clear(); - m_monitor = NULL; - m_monitor = new Monitor(this); - m_procfile = "ndb_cpcd.db"; -} - -CPCD::~CPCD() { - if(m_monitor != NULL) { - delete m_monitor; - m_monitor = NULL; - } -} - -int -CPCD::findUniqueId() { - int id; - bool ok = false; - m_processes.lock(); - - while(!ok) { - ok = true; - id = random() % 8192; /* Don't want so big numbers */ - - if(id == 0) - ok = false; - - for(size_t i = 0; im_id == id) - ok = false; - } - } - m_processes.unlock(); - return id; -} - -bool -CPCD::defineProcess(RequestStatus * rs, Process * arg){ - if(arg->m_id == -1) - arg->m_id = findUniqueId(); - - Guard tmp(m_processes); - - for(size_t i = 0; im_name.c_str(), proc->m_name.c_str()) == 0) && - (strcmp(arg->m_group.c_str(), proc->m_group.c_str()) == 0)) { - /* Identical names in the same group */ - rs->err(AlreadyExists, "Name already exists"); - return false; - } - - if(arg->m_id == proc->m_id) { - /* Identical ID numbers */ - rs->err(AlreadyExists, "Id already exists"); - return false; - } - } - - m_processes.push_back(arg, false); - - notifyChanges(); - report(arg->m_id, CPCEvent::ET_PROC_USER_DEFINE); - - return true; -} - -bool -CPCD::undefineProcess(CPCD::RequestStatus *rs, int id) { - - Guard tmp(m_processes); - - Process * proc = 0; - size_t i; - for(i = 0; i < m_processes.size(); i++) { - if(m_processes[i]->m_id == id) { - proc = m_processes[i]; - break; - } - } - - if(proc == 0){ - rs->err(NotExists, "No such process"); - return false; - } - - switch(proc->m_status){ - case RUNNING: - case STOPPED: - case STOPPING: - case STARTING: - proc->stop(); - m_processes.erase(i, false /* Already locked */); - } - - - notifyChanges(); - - report(id, CPCEvent::ET_PROC_USER_UNDEFINE); - - return true; -} - -bool -CPCD::startProcess(CPCD::RequestStatus *rs, int id) { - - Process * proc = 0; - { - - Guard tmp(m_processes); - - for(size_t i = 0; i < m_processes.size(); i++) { - if(m_processes[i]->m_id == id) { - proc = m_processes[i]; - break; - } - } - - if(proc == 0){ - rs->err(NotExists, "No such process"); - return false; - } - - switch(proc->m_status){ - case STOPPED: - proc->m_status = STARTING; - if(proc->start() != 0){ - rs->err(Error, "Failed to start"); - return false; - } - break; - case STARTING: - rs->err(Error, "Already starting"); - return false; - case RUNNING: - rs->err(Error, "Already started"); - return false; - case STOPPING: - rs->err(Error, "Currently stopping"); - return false; - } - - notifyChanges(); - } - report(id, CPCEvent::ET_PROC_USER_START); - - return true; -} - -bool -CPCD::stopProcess(CPCD::RequestStatus *rs, int id) { - - Guard tmp(m_processes); - - Process * proc = 0; - for(size_t i = 0; i < m_processes.size(); i++) { - if(m_processes[i]->m_id == id) { - proc = m_processes[i]; - break; - } - } - - if(proc == 0){ - rs->err(NotExists, "No such process"); - return false; - } - - switch(proc->m_status){ - case STARTING: - case RUNNING: - proc->stop(); - break; - case STOPPED: - rs->err(AlreadyStopped, "Already stopped"); - return false; - break; - case STOPPING: - rs->err(Error, "Already stopping"); - return false; - } - - notifyChanges(); - - report(id, CPCEvent::ET_PROC_USER_START); - - return true; -} - -bool -CPCD::notifyChanges() { - bool ret = true; - if(!loadingProcessList) - ret = saveProcessList(); - - m_monitor->signal(); - - return ret; -} - -/* Must be called with m_processlist locked */ -bool -CPCD::saveProcessList(){ - char newfile[PATH_MAX+4]; - char oldfile[PATH_MAX+4]; - char curfile[PATH_MAX]; - FILE *f; - - /* Create the filenames that we will use later */ - BaseString::snprintf(newfile, sizeof(newfile), "%s.new", m_procfile.c_str()); - BaseString::snprintf(oldfile, sizeof(oldfile), "%s.old", m_procfile.c_str()); - BaseString::snprintf(curfile, sizeof(curfile), "%s", m_procfile.c_str()); - - f = fopen(newfile, "w"); - - if(f == NULL) { - /* XXX What should be done here? */ - logger.critical("Cannot open `%s': %s\n", newfile, strerror(errno)); - return false; - } - - for(size_t i = 0; iprint(f); - fprintf(f, "\n"); - - if(m_processes[i]->m_processType == TEMPORARY){ - /** - * Interactive process should never be "restarted" on cpcd restart - */ - continue; - } - - if(m_processes[i]->m_status == RUNNING || - m_processes[i]->m_status == STARTING){ - fprintf(f, "start process\nid: %d\n\n", m_processes[i]->m_id); - } - } - - fclose(f); - f = NULL; - - /* This will probably only work on reasonably Unix-like systems. You have - * been warned... - * - * The motivation behind all this link()ing is that the daemon might - * crash right in the middle of updating the configuration file, and in - * that case we want to be sure that the old file is around until we are - * guaranteed that there is always at least one copy of either the old or - * the new configuration file left. - */ - - /* Remove an old config file if it exists */ - unlink(oldfile); - - if(link(curfile, oldfile) != 0) /* make a backup of the running config */ - logger.error("Cannot rename '%s' -> '%s'", curfile, oldfile); - else { - if(unlink(curfile) != 0) { /* remove the running config file */ - logger.critical("Cannot remove file '%s'", curfile); - return false; - } - } - - if(link(newfile, curfile) != 0) { /* put the new config file in place */ - printf("-->%d\n", __LINE__); - - logger.critical("Cannot rename '%s' -> '%s': %s", - curfile, newfile, strerror(errno)); - return false; - } - - /* XXX Ideally we would fsync() the directory here, but I'm not sure if - * that actually works. - */ - - unlink(newfile); /* remove the temporary file */ - unlink(oldfile); /* remove the old file */ - - logger.info("Process list saved as '%s'", curfile); - - return true; -} - -bool -CPCD::loadProcessList(){ - BaseString secondfile; - FILE *f; - - loadingProcessList = true; - - secondfile.assfmt("%s.new", m_procfile.c_str()); - - /* Try to open the config file */ - f = fopen(m_procfile.c_str(), "r"); - - /* If it did not exist, try to open the backup. See the saveProcessList() - * method for an explanation why it is done this way. - */ - if(f == NULL) { - f = fopen(secondfile.c_str(), "r"); - - if(f == NULL) { - /* XXX What to do here? */ - logger.info("Configuration file `%s' not found", - m_procfile.c_str()); - logger.info("Starting with empty configuration"); - loadingProcessList = false; - return false; - } else { - logger.info("Configuration file `%s' missing", - m_procfile.c_str()); - logger.info("Backup configuration file `%s' is used", - secondfile.c_str()); - /* XXX Maybe we should just rename the backup file to the official - * name, and be done with it? - */ - } - } - - CPCDAPISession sess(f, *this); - sess.loadFile(); - loadingProcessList = false; - - size_t i; - Vector temporary; - for(i = 0; ireadPid(); - if(proc->m_processType == TEMPORARY){ - temporary.push_back(proc->m_id); - } - } - - for(i = 0; isignal(); - return true; -} - -MutexVector * -CPCD::getProcessList() { - return &m_processes; -} - -void -CPCD::RequestStatus::err(enum RequestStatusCode status, const char *msg) { - m_status = status; - BaseString::snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg); -} - -#if 0 -void -CPCD::sigchild(int pid){ - m_processes.lock(); - for(size_t i = 0; i; diff --git a/storage/ndb/src/cw/cpcd/CPCD.hpp b/storage/ndb/src/cw/cpcd/CPCD.hpp deleted file mode 100644 index 3c2934c0f49..00000000000 --- a/storage/ndb/src/cw/cpcd/CPCD.hpp +++ /dev/null @@ -1,390 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CPCD_HPP -#define CPCD_HPP - -#include -#include -#include -#include -#include -#include - -/* XXX Need to figure out how to do this for non-Unix systems */ -#define CPCD_DEFAULT_WORK_DIR "/var/run/ndb_cpcd" -#define CPCD_DEFAULT_PROC_FILE "ndb_cpcd.conf" -#define CPCD_DEFAULT_TCP_PORT 1234 -#define CPCD_DEFAULT_POLLING_INTERVAL 5 /* seconds */ -#define CPCD_DEFAULT_CONFIG_FILE "/etc/ndb_cpcd.conf" - -enum ProcessStatus { - STOPPED = 0, - STARTING = 1, - RUNNING = 2, - STOPPING = 3 -}; - -enum ProcessType { - PERMANENT = 0, - TEMPORARY = 1 -}; - -struct CPCEvent { - enum EventType { - ET_USER_CONNECT, - ET_USER_DISCONNECT, - - ET_PROC_USER_DEFINE, // Defined proc - ET_PROC_USER_UNDEFINE, // Undefined proc - ET_PROC_USER_START, // Proc ordered to start - ET_PROC_USER_STOP, // Proc ordered to stop - ET_PROC_STATE_RUNNING, // exec returned(?) ok - ET_PROC_STATE_STOPPED // detected that proc is ! running - }; - - int m_proc; - time_t m_time; - EventType m_type; -}; - -struct EventSubscriber { - virtual void report(const CPCEvent &) = 0; - EventSubscriber() {} - virtual ~EventSubscriber() {} -}; - -/** - * @brief Error codes for CPCD requests - */ -enum RequestStatusCode { - OK = 0, ///< Everything OK - Error = 1, ///< Generic error - AlreadyExists = 2, ///< Entry already exists in list - NotExists = 3, ///< Entry does not exist in list - AlreadyStopped = 4 -}; - -/** - * @class CPCD - * @brief Manages processes, letting them be controlled with a TCP connection. - * - * The class implementing the Cluster Process Control Daemon - */ -class CPCD { -public: - /** @brief Describes the status of a client request */ - class RequestStatus { - public: - /** @brief Constructs an empty RequestStatus */ - RequestStatus() { m_status = OK; m_errorstring[0] = '\0'; }; - - /** @brief Sets an errorcode and a printable message */ - void err(enum RequestStatusCode, const char *); - - /** @brief Returns the error message */ - char *getErrMsg() { return m_errorstring; }; - - /** @brief Returns the error code */ - enum RequestStatusCode getStatus() { return m_status; }; - private: - enum RequestStatusCode m_status; - char m_errorstring[256]; - }; - /** - * @brief Manages a process - */ - class Process { - int m_pid; - public: - /** - * @brief Constructs and empty Process - */ - Process(const Properties & props, class CPCD *cpcd); - /** - * @brief Monitors the process - * - * The process is started or stopped as needed. - */ - void monitor(); - - /** - * @brief Checks if the process is running or not - * - * @return - * - true if the process is running, - * - false if the process is not running - */ - bool isRunning(); - - /** @brief Starts the process */ - int start(); - - /** @brief Stops the process */ - void stop(); - - /** - * @brief Reads the pid from stable storage - * - * @return The pid number - */ - int readPid(); - - /** - * @brief Writes the pid from stable storage - * - * @return - * - 0 if successful - - -1 and sets errno if an error occured - */ - int writePid(int pid); - - /** - * @brief Prints a textual description of the process on a file - */ - void print(FILE *); - - /** Id number of the Process. - * - * @note This is not the same as a pid. This number is used in the - * protocol, and will not be changed if a processes is restarted. - */ - int m_id; - - /** @brief The name shown to the user */ - BaseString m_name; - - /** @brief Used to group a number of processes */ - BaseString m_group; - - /** @brief Environment variables - * - * Environmentvariables to add for the process. - * - * @note - * - The environment cpcd started with is preserved - * - There is no way to delete variables - */ - BaseString m_env; - - /** @brief Path to the binary to run */ - BaseString m_path; - - /** @brief Arguments to the process. - * - * @note - * - This includes argv[0]. - * - If no argv[0] is given, argv[0] will be set to m_path. - */ - BaseString m_args; - - /** - * @brief Type of process - * - * Either set to "interactive" or "permanent". - */ - BaseString m_type; - ProcessType m_processType; - - /** - * @brief Working directory - * - * Working directory the process will start in. - */ - BaseString m_cwd; - - /** - * @brief Owner of the process. - * - * @note This will not affect the process' uid or gid; - * it is only used for managemental purposes. - * @see m_runas - */ - BaseString m_owner; - - /** - * @bried Run as - * @note This affects uid - * @see m_owner - */ - BaseString m_runas; - - /** - * @brief redirection for stdin - */ - BaseString m_stdin; - - /** - * @brief redirection for stdout - */ - BaseString m_stdout; - - /** - * @brief redirection for stderr - */ - BaseString m_stderr; - - /** @brief Status of the process */ - enum ProcessStatus m_status; - - /** - * @brief ulimits for process - * @desc Format c:unlimited d:0 ... - */ - BaseString m_ulimit; - - /** - * @brief shutdown options - */ - BaseString m_shutdown_options; - - private: - class CPCD *m_cpcd; - void do_exec(); - }; - - /** - * @brief Starts and stops processes as needed - * - * At a specified interval (default 5 seconds) calls the monitor function - * of all the processes in the CPCDs list, causing the to start or - * stop, depending on the configuration. - */ - class Monitor { - public: - /** Creates a new CPCD::Monitor object, connected to the specified - * CPCD. - * A new thread will be created, which will poll the processes of - * the CPCD at the specifed interval. - */ - Monitor(CPCD *cpcd, int poll = CPCD_DEFAULT_POLLING_INTERVAL); - - /** Stops the monitor, but does not stop the processes */ - ~Monitor(); - - /** Runs the monitor thread. */ - void run(); - - /** Signals configuration changes to the monitor thread, causing it to - * do the check without waiting for the timeout */ - void signal(); - private: - class CPCD *m_cpcd; - struct NdbThread *m_monitorThread; - bool m_monitorThreadQuitFlag; - struct NdbCondition *m_changeCondition; - NdbMutex *m_changeMutex; - int m_pollingInterval; /* seconds */ - }; - - /** @brief Constructs a CPCD object */ - CPCD(); - - /** - * @brief Destroys a CPCD object, - * but does not stop the processes it manages - */ - ~CPCD(); - - /** Adds a Process to the CPCDs list of managed Processes. - * - * @note The process will not be started until it is explicitly - * marked as running with CPCD::startProcess(). - * - * @return - * - true if the addition was successful, - * - false if not - * - RequestStatus will be filled in with a suitable error - * if an error occured. - */ - bool defineProcess(RequestStatus *rs, Process * arg); - - /** Removes a Process from the CPCD. - * - * @note A Process that is running cannot be removed. - * - * @return - * - true if the removal was successful, - * - false if not - * - The RequestStatus will be filled in with a suitable error - * if an error occured. - */ - bool undefineProcess(RequestStatus *rs, int id); - - /** Marks a Process for starting. - * - * @note The fact that a process has started does not mean it will actually - * start properly. This command only makes sure the CPCD will - * try to start it. - * - * @return - * - true if the marking was successful - * - false if not - * - RequestStatus will be filled in with a suitable error - * if an error occured. - */ - bool startProcess(RequestStatus *rs, int id); - - /** Marks a Process for stopping. - * - * @return - * - true if the marking was successful - * - false if not - * - The RequestStatus will be filled in with a suitable error - * if an error occured. - */ - bool stopProcess(RequestStatus *rs, int id); - - /** Generates a list of processes, and sends them to the CPCD client */ - bool listProcesses(RequestStatus *rs, MutexVector &); - - /** Set to true while the CPCD is reading the configuration file */ - bool loadingProcessList; - - /** Saves the list of Processes and their status to the configuration file. - * Called whenever the configuration is changed. - */ - bool saveProcessList(); - - /** Loads the list of Processes and their status from the configuration - * file. - * @note This function should only be called when the CPCD is starting, - * calling it at other times will cause unspecified behaviour. - */ - bool loadProcessList(); - - /** Returns the list of processes */ - MutexVector *getProcessList(); - - /** The list of processes. Should not be used directly */ - MutexVector m_processes; - - /** Register event subscriber */ - void do_register(EventSubscriber * sub); - EventSubscriber* do_unregister(EventSubscriber * sub); - -private: - friend class Process; - bool notifyChanges(); - int findUniqueId(); - BaseString m_procfile; - Monitor *m_monitor; - - void report(int id, CPCEvent::EventType); - MutexVector m_subscribers; -}; - -#endif diff --git a/storage/ndb/src/cw/cpcd/Makefile.am b/storage/ndb/src/cw/cpcd/Makefile.am deleted file mode 100644 index 58092dd6025..00000000000 --- a/storage/ndb/src/cw/cpcd/Makefile.am +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -ndbbin_PROGRAMS = ndb_cpcd - -ndb_cpcd_SOURCES = main.cpp CPCD.cpp Process.cpp APIService.cpp Monitor.cpp common.cpp - -LDADD_LOC = \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_util.mk.am - -ndb_cpcd_LDFLAGS = -static @ndb_bin_am_ldflags@ - -windoze-dsp: diff --git a/storage/ndb/src/cw/cpcd/Monitor.cpp b/storage/ndb/src/cw/cpcd/Monitor.cpp deleted file mode 100644 index c096bb85029..00000000000 --- a/storage/ndb/src/cw/cpcd/Monitor.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include -#include -#include - -#include "CPCD.hpp" -#include "common.hpp" - -static void * -monitor_thread_create_wrapper(void * arg) { - CPCD::Monitor *mon = (CPCD::Monitor *)arg; - mon->run(); - return NULL; -} - -CPCD::Monitor::Monitor(CPCD *cpcd, int poll) { - m_cpcd = cpcd; - m_pollingInterval = poll; - m_changeCondition = NdbCondition_Create(); - m_changeMutex = NdbMutex_Create(); - m_monitorThread = NdbThread_Create(monitor_thread_create_wrapper, - (NDB_THREAD_ARG*) this, - 32768, - "ndb_cpcd_monitor", - NDB_THREAD_PRIO_MEAN); - m_monitorThreadQuitFlag = false; -} - -CPCD::Monitor::~Monitor() { - NdbThread_Destroy(&m_monitorThread); - NdbCondition_Destroy(m_changeCondition); - NdbMutex_Destroy(m_changeMutex); -} - -void -CPCD::Monitor::run() { - while(1) { - NdbMutex_Lock(m_changeMutex); - NdbCondition_WaitTimeout(m_changeCondition, - m_changeMutex, - m_pollingInterval * 1000); - - MutexVector &proc = *m_cpcd->getProcessList(); - - proc.lock(); - - for(size_t i = 0; i < proc.size(); i++) { - proc[i]->monitor(); - } - - proc.unlock(); - - NdbMutex_Unlock(m_changeMutex); - } -} - -void -CPCD::Monitor::signal() { - NdbCondition_Signal(m_changeCondition); -} - -template class MutexVector; diff --git a/storage/ndb/src/cw/cpcd/Process.cpp b/storage/ndb/src/cw/cpcd/Process.cpp deleted file mode 100644 index c2e24cecd77..00000000000 --- a/storage/ndb/src/cw/cpcd/Process.cpp +++ /dev/null @@ -1,486 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include -#include - -#include "common.hpp" -#include "CPCD.hpp" - -#include -#ifdef HAVE_GETRLIMIT -#include -#endif - -void -CPCD::Process::print(FILE * f){ - fprintf(f, "define process\n"); - fprintf(f, "id: %d\n", m_id); - fprintf(f, "name: %s\n", m_name.c_str() ? m_name.c_str() : ""); - fprintf(f, "group: %s\n", m_group.c_str() ? m_group.c_str() : ""); - fprintf(f, "env: %s\n", m_env.c_str() ? m_env.c_str() : ""); - fprintf(f, "path: %s\n", m_path.c_str() ? m_path.c_str() : ""); - fprintf(f, "args: %s\n", m_args.c_str() ? m_args.c_str() : ""); - fprintf(f, "type: %s\n", m_type.c_str() ? m_type.c_str() : ""); - fprintf(f, "cwd: %s\n", m_cwd.c_str() ? m_cwd.c_str() : ""); - fprintf(f, "owner: %s\n", m_owner.c_str() ? m_owner.c_str() : ""); - fprintf(f, "runas: %s\n", m_runas.c_str() ? m_runas.c_str() : ""); - fprintf(f, "stdin: %s\n", m_stdin.c_str() ? m_stdin.c_str() : ""); - fprintf(f, "stdout: %s\n", m_stdout.c_str() ? m_stdout.c_str() : ""); - fprintf(f, "stderr: %s\n", m_stderr.c_str() ? m_stderr.c_str() : ""); - fprintf(f, "ulimit: %s\n", m_ulimit.c_str() ? m_ulimit.c_str() : ""); - fprintf(f, "shutdown: %s\n", m_shutdown_options.c_str() ? - m_shutdown_options.c_str() : ""); -} - -CPCD::Process::Process(const Properties & props, class CPCD *cpcd) { - m_id = -1; - m_pid = -1; - props.get("id", (Uint32 *) &m_id); - props.get("name", m_name); - props.get("group", m_group); - props.get("env", m_env); - props.get("path", m_path); - props.get("args", m_args); - props.get("cwd", m_cwd); - props.get("owner", m_owner); - props.get("type", m_type); - props.get("runas", m_runas); - - props.get("stdin", m_stdin); - props.get("stdout", m_stdout); - props.get("stderr", m_stderr); - props.get("ulimit", m_ulimit); - props.get("shutdown", m_shutdown_options); - m_status = STOPPED; - - if(strcasecmp(m_type.c_str(), "temporary") == 0){ - m_processType = TEMPORARY; - } else { - m_processType = PERMANENT; - } - - m_cpcd = cpcd; -} - -void -CPCD::Process::monitor() { - switch(m_status) { - case STARTING: - break; - case RUNNING: - if(!isRunning()){ - m_cpcd->report(m_id, CPCEvent::ET_PROC_STATE_STOPPED); - if(m_processType == TEMPORARY){ - m_status = STOPPED; - } else { - start(); - } - } - break; - case STOPPED: - assert(!isRunning()); - break; - case STOPPING: - break; - } -} - -bool -CPCD::Process::isRunning() { - - if(m_pid <= 1){ - //logger.critical("isRunning(%d) invalid pid: %d", m_id, m_pid); - return false; - } - /* Check if there actually exists a process with such a pid */ - errno = 0; - int s = kill((pid_t)-m_pid, 0); /* Sending "signal" 0 to a process only - * checkes if the process actually exists */ - if(s != 0) { - switch(errno) { - case EPERM: - logger.critical("Not enough privileges to control pid %d\n", m_pid); - break; - case ESRCH: - /* The pid in the file does not exist, which probably means that it - has died, or the file contains garbage for some other reason */ - break; - default: - logger.critical("Cannot not control pid %d: %s\n", m_pid, strerror(errno)); - break; - } - return false; - } - return true; -} - -int -CPCD::Process::readPid() { - if(m_pid != -1){ - logger.critical("Reading pid while != -1(%d)", m_pid); - return m_pid; - } - - char filename[PATH_MAX*2+1]; - char buf[1024]; - FILE *f; - - memset(buf, 0, sizeof(buf)); - - BaseString::snprintf(filename, sizeof(filename), "%d", m_id); - - f = fopen(filename, "r"); - - if(f == NULL){ - return -1; /* File didn't exist */ - } - - errno = 0; - size_t r = fread(buf, 1, sizeof(buf), f); - fclose(f); - if(r > 0) - m_pid = strtol(buf, (char **)NULL, 0); - - if(errno == 0){ - return m_pid; - } - - return -1; -} - -int -CPCD::Process::writePid(int pid) { - char tmpfilename[PATH_MAX+1+4+8]; - char filename[PATH_MAX*2+1]; - FILE *f; - - BaseString::snprintf(tmpfilename, sizeof(tmpfilename), "tmp.XXXXXX"); - BaseString::snprintf(filename, sizeof(filename), "%d", m_id); - - int fd = mkstemp(tmpfilename); - if(fd < 0) { - logger.error("Cannot open `%s': %s\n", tmpfilename, strerror(errno)); - return -1; /* Couldn't open file */ - } - - f = fdopen(fd, "w"); - - if(f == NULL) { - logger.error("Cannot open `%s': %s\n", tmpfilename, strerror(errno)); - return -1; /* Couldn't open file */ - } - - fprintf(f, "%d", pid); - fclose(f); - - if(rename(tmpfilename, filename) == -1){ - logger.error("Unable to rename from %s to %s", tmpfilename, filename); - return -1; - } - return 0; -} - -static void -setup_environment(const char *env) { - char **p; - p = BaseString::argify("", env); - for(int i = 0; p[i] != NULL; i++){ - /*int res = */ putenv(p[i]); - } -} - -static -int -set_ulimit(const BaseString & pair){ -#ifdef HAVE_GETRLIMIT - errno = 0; - Vector list; - pair.split(list, ":"); - if(list.size() != 2){ - logger.error("Unable to process ulimit: split >%s< list.size()=%d", - pair.c_str(), list.size()); - return -1; - } - - int res; - rlim_t value = RLIM_INFINITY; - if(!(list[1].trim() == "unlimited")){ - value = atoi(list[1].c_str()); - } - - struct rlimit rlp; -#define _RLIMIT_FIX(x) { res = getrlimit(x,&rlp); if(!res){ rlp.rlim_cur = value; res = setrlimit(x, &rlp); }} - - if(list[0].trim() == "c"){ - _RLIMIT_FIX(RLIMIT_CORE); - } else if(list[0] == "d"){ - _RLIMIT_FIX(RLIMIT_DATA); - } else if(list[0] == "f"){ - _RLIMIT_FIX(RLIMIT_FSIZE); - } else if(list[0] == "n"){ - _RLIMIT_FIX(RLIMIT_NOFILE); - } else if(list[0] == "s"){ - _RLIMIT_FIX(RLIMIT_STACK); - } else if(list[0] == "t"){ - _RLIMIT_FIX(RLIMIT_CPU); - } else { - res= -11; - errno = EINVAL; - } - if(res){ - logger.error("Unable to process ulimit: %s res=%d error=%d(%s)", - pair.c_str(), res, errno, strerror(errno)); - return -1; - } -#endif - return 0; -} - -void -CPCD::Process::do_exec() { - size_t i; - setup_environment(m_env.c_str()); - - char **argv = BaseString::argify(m_path.c_str(), m_args.c_str()); - - if(strlen(m_cwd.c_str()) > 0) { - int err = chdir(m_cwd.c_str()); - if(err == -1) { - BaseString err; - logger.error("%s: %s\n", m_cwd.c_str(), strerror(errno)); - _exit(1); - } - } - - Vector ulimit; - m_ulimit.split(ulimit); - for(i = 0; i 0 && set_ulimit(ulimit[i]) != 0){ - _exit(1); - } - } - - int fd = open("/dev/null", O_RDWR, 0); - if(fd == -1) { - logger.error("Cannot open `/dev/null': %s\n", strerror(errno)); - _exit(1); - } - - BaseString * redirects[] = { &m_stdin, &m_stdout, &m_stderr }; - int fds[3]; - for(i = 0; i<3; i++){ - if(redirects[i]->empty()){ -#ifndef DEBUG - dup2(fd, i); -#endif - continue; - } - - if((* redirects[i]) == "2>&1" && i == 2){ - dup2(fds[1], 2); - continue; - } - - /** - * Make file - */ - int flags = 0; - int mode = S_IRUSR | S_IWUSR ; - if(i == 0){ - flags |= O_RDONLY; - } else { - flags |= O_WRONLY | O_CREAT | O_APPEND; - } - int f = fds[i]= open(redirects[i]->c_str(), flags, mode); - if(f == -1){ - logger.error("Cannot redirect %d to/from '%s' : %s\n", i, - redirects[i]->c_str(), strerror(errno)); - _exit(1); - } - dup2(f, i); - } - - /* Close all filedescriptors */ - for(i = STDERR_FILENO+1; (int)i < getdtablesize(); i++) - close(i); - - execv(m_path.c_str(), argv); - /* XXX If we reach this point, an error has occurred, but it's kind of hard - * to report it, because we've closed all files... So we should probably - * create a new logger here */ - logger.error("Exec failed: %s\n", strerror(errno)); - /* NOTREACHED */ -} - -int -CPCD::Process::start() { - /* We need to fork() twice, so that the second child (grandchild?) can - * become a daemon. The original child then writes the pid file, - * so that the monitor knows the pid of the new process, and then - * exit()s. That way, the monitor process can pickup the pid, and - * the running process is a daemon. - * - * This is a bit tricky but has the following advantages: - * - the cpcd can die, and "reconnect" to the monitored clients - * without restarting them. - * - the cpcd does not have to wait() for the childs. init(1) will - * take care of that. - */ - logger.info("Starting %d: %s", m_id, m_name.c_str()); - m_status = STARTING; - - int pid = -1; - switch(m_processType){ - case TEMPORARY:{ - /** - * Simple fork - * don't ignore child - */ - switch(pid = fork()) { - case 0: /* Child */ - setsid(); - writePid(getpgrp()); - if(runas(m_runas.c_str()) == 0){ - signal(SIGCHLD, SIG_DFL); - do_exec(); - } - _exit(1); - break; - case -1: /* Error */ - logger.error("Cannot fork: %s\n", strerror(errno)); - m_status = STOPPED; - return -1; - break; - default: /* Parent */ - logger.debug("Started temporary %d : pid=%d", m_id, pid); - m_cpcd->report(m_id, CPCEvent::ET_PROC_STATE_RUNNING); - break; - } - break; - } - case PERMANENT:{ - /** - * PERMANENT - */ - switch(fork()) { - case 0: /* Child */ - signal(SIGCHLD, SIG_IGN); - switch(pid = fork()) { - case 0: /* Child */ - setsid(); - writePid(getpgrp()); - if(runas(m_runas.c_str()) != 0){ - _exit(1); - } - signal(SIGCHLD, SIG_DFL); - do_exec(); - _exit(1); - /* NOTREACHED */ - break; - case -1: /* Error */ - logger.error("Cannot fork: %s\n", strerror(errno)); - writePid(-1); - _exit(1); - break; - default: /* Parent */ - logger.debug("Started permanent %d : pid=%d", m_id, pid); - _exit(0); - break; - } - break; - case -1: /* Error */ - logger.error("Cannot fork: %s\n", strerror(errno)); - m_status = STOPPED; - return -1; - break; - default: /* Parent */ - m_cpcd->report(m_id, CPCEvent::ET_PROC_STATE_RUNNING); - break; - } - break; - } - default: - logger.critical("Unknown process type"); - return -1; - } - - while(readPid() < 0){ - sched_yield(); - } - - errno = 0; - pid_t pgid = getpgid(pid); - - if(pgid != -1 && pgid != m_pid){ - logger.error("pgid and m_pid don't match: %d %d (%d)", pgid, m_pid, pid); - } - - if(isRunning()){ - m_status = RUNNING; - return 0; - } - m_status = STOPPED; - return -1; -} - -void -CPCD::Process::stop() { - - char filename[PATH_MAX*2+1]; - BaseString::snprintf(filename, sizeof(filename), "%d", m_id); - unlink(filename); - - if(m_pid <= 1){ - logger.critical("Stopping process with bogus pid: %d id: %d", - m_pid, m_id); - return; - } - m_status = STOPPING; - - errno = 0; - int signo= SIGTERM; - if(m_shutdown_options == "SIGKILL") - signo= SIGKILL; - - int ret = kill(-m_pid, signo); - switch(ret) { - case 0: - logger.debug("Sent SIGTERM to pid %d", (int)-m_pid); - break; - default: - logger.debug("kill pid: %d : %s", (int)-m_pid, strerror(errno)); - break; - } - - if(isRunning()){ - errno = 0; - ret = kill(-m_pid, SIGKILL); - switch(ret) { - case 0: - logger.debug("Sent SIGKILL to pid %d", (int)-m_pid); - break; - default: - logger.debug("kill pid: %d : %s\n", (int)-m_pid, strerror(errno)); - break; - } - } - - m_pid = -1; - m_status = STOPPED; -} diff --git a/storage/ndb/src/cw/cpcd/common.cpp b/storage/ndb/src/cw/cpcd/common.cpp deleted file mode 100644 index 1a799bc2bd5..00000000000 --- a/storage/ndb/src/cw/cpcd/common.cpp +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "common.hpp" -#include -#include - -#include -#include - -int debug = 0; - -Logger logger; - -int -runas(const char * user){ - if(user == 0 || strlen(user) == 0){ - return 0; - } - struct passwd * pw = getpwnam(user); - if(pw == 0){ - logger.error("Can't find user to %s", user); - return -1; - } - uid_t uid = pw->pw_uid; - gid_t gid = pw->pw_gid; - int res = setgid(gid); - if(res != 0){ - logger.error("Can't change group to %s(%d)", user, gid); - return res; - } - - res = setuid(uid); - if(res != 0){ - logger.error("Can't change user to %s(%d)", user, uid); - } - return res; -} - -int -insert(const char * pair, Properties & p){ - BaseString tmp(pair); - - tmp.trim(" \t\n\r"); - - Vector split; - tmp.split(split, ":=", 2); - - if(split.size() != 2) - return -1; - - p.put(split[0].trim().c_str(), split[1].trim().c_str()); - - return 0; -} - -int -insert_file(FILE * f, class Properties& p, bool break_on_empty){ - if(f == 0) - return -1; - - while(!feof(f)){ - char buf[1024]; - fgets(buf, 1024, f); - BaseString tmp = buf; - - if(tmp.length() > 0 && tmp.c_str()[0] == '#') - continue; - - if(insert(tmp.c_str(), p) != 0 && break_on_empty) - break; - } - - return 0; -} - -int -insert_file(const char * filename, class Properties& p){ - FILE * f = fopen(filename, "r"); - int res = insert_file(f, p); - if(f) fclose(f); - return res; -} diff --git a/storage/ndb/src/cw/cpcd/common.hpp b/storage/ndb/src/cw/cpcd/common.hpp deleted file mode 100644 index eb984696f00..00000000000 --- a/storage/ndb/src/cw/cpcd/common.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __CPCD_COMMON_HPP_INCLUDED__ -#define __CPCD_COMMON_HPP_INCLUDED__ - -#include -#include -#if 0 -#include -#endif - -extern int debug; - -extern Logger logger; - -int runas(const char * user); -int insert(const char * pair, class Properties & p); - -int insert_file(const char * filename, class Properties&); -int insert_file(FILE *, class Properties&, bool break_on_empty = false); - -#endif /* ! __CPCD_COMMON_HPP_INCLUDED__ */ diff --git a/storage/ndb/src/cw/cpcd/main.cpp b/storage/ndb/src/cw/cpcd/main.cpp deleted file mode 100644 index c90d0f43f1b..00000000000 --- a/storage/ndb/src/cw/cpcd/main.cpp +++ /dev/null @@ -1,183 +0,0 @@ -/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include /* Needed for mkdir(2) */ -#include -#include -#include -#include - -#include "CPCD.hpp" -#include "APIService.hpp" -#include -#include -#include -#include -#include -#include - -#include "common.hpp" - -static const char *work_dir = CPCD_DEFAULT_WORK_DIR; -static int unsigned port; -static int use_syslog; -static const char *logfile = NULL; -static const char *user = 0; - -static struct my_option my_long_options[] = -{ - { "work-dir", 'w', "Work directory", - &work_dir, &work_dir, 0, - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - { "port", 'p', "TCP port to listen on", - &port, &port, 0, - GET_INT, REQUIRED_ARG, CPCD_DEFAULT_TCP_PORT, 0, 0, 0, 0, 0 }, - { "syslog", 'S', "Log events to syslog", - &use_syslog, &use_syslog, 0, - GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "logfile", 'L', "File to log events to", - &logfile, &logfile, 0, - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - { "debug", 'D', "Enable debug mode", - &debug, &debug, 0, - GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, - { "user", 'u', "Run as user", - &user, &user, 0, - GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} -}; - -static my_bool -get_one_option(int optid, const struct my_option *opt __attribute__((unused)), - char *argument) -{ - return 0; -} - -static CPCD * g_cpcd = 0; -#if 0 -extern "C" static void sig_child(int signo, siginfo_t*, void*); -#endif - -const char *progname = "ndb_cpcd"; - -int main(int argc, char** argv){ - const char *load_default_groups[]= { "ndb_cpcd",0 }; - MY_INIT(argv[0]); - - load_defaults("ndb_cpcd",load_default_groups,&argc,&argv); - if (handle_options(&argc, &argv, my_long_options, get_one_option)) { - print_defaults(MYSQL_CONFIG_NAME,load_default_groups); - puts(""); - my_print_help(my_long_options); - my_print_variables(my_long_options); - exit(1); - } - - logger.setCategory(progname); - logger.enable(Logger::LL_ALL); - - if(debug) - logger.createConsoleHandler(); - - if(user && runas(user) != 0){ - logger.critical("Unable to change user: %s", user); - _exit(1); - } - - if(logfile != NULL){ - BaseString tmp; - if(logfile[0] != '/') - tmp.append(work_dir); - tmp.append(logfile); - logger.addHandler(new FileLogHandler(tmp.c_str())); - } - - if(use_syslog) - logger.addHandler(new SysLogHandler()); - - logger.info("Starting"); - - CPCD cpcd; - g_cpcd = &cpcd; - - /* XXX This will probably not work on !unix */ - int err = mkdir(work_dir, S_IRWXU | S_IRGRP | S_IROTH); - if(err != 0) { - switch(errno) { - case EEXIST: - break; - default: - fprintf(stderr, "Cannot mkdir %s: %s\n", work_dir, strerror(errno)); - exit(1); - } - } - - if(strlen(work_dir) > 0){ - logger.debug("Changing dir to '%s'", work_dir); - if((err = chdir(work_dir)) != 0){ - fprintf(stderr, "Cannot chdir %s: %s\n", work_dir, strerror(errno)); - exit(1); - } - } - - cpcd.loadProcessList(); - - SocketServer * ss = new SocketServer(); - CPCDAPIService * serv = new CPCDAPIService(cpcd); - unsigned short real_port= port; // correct type - if(!ss->setup(serv, &real_port)){ - logger.critical("Cannot setup server: %s", strerror(errno)); - sleep(1); - delete ss; - delete serv; - return 1; - } - - ss->startServer(); - - { - signal(SIGPIPE, SIG_IGN); - signal(SIGCHLD, SIG_IGN); -#if 0 - struct sigaction act; - act.sa_handler = 0; - act.sa_sigaction = sig_child; - sigemptyset(&act.sa_mask); - act.sa_flags = SA_SIGINFO; - sigaction(SIGCHLD, &act, 0); -#endif - } - - logger.debug("Start completed"); - while(true) NdbSleep_MilliSleep(1000); - - delete ss; - return 0; -} - -#if 0 -extern "C" -void -sig_child(int signo, siginfo_t* info, void*){ - printf("signo: %d si_signo: %d si_errno: %d si_code: %d si_pid: %d\n", - signo, - info->si_signo, - info->si_errno, - info->si_code, - info->si_pid); - -} -#endif diff --git a/storage/ndb/src/cw/test/socketclient/Makefile b/storage/ndb/src/cw/test/socketclient/Makefile deleted file mode 100644 index 04f11f031e5..00000000000 --- a/storage/ndb/src/cw/test/socketclient/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -include .defs.mk - -TYPE := - -BIN_TARGET := socketclient - - - -CCFLAGS_LOC += -I../../util/ -I../../cpcd/ - -LIBS_LOC += -L$(NDB_TOP)/lib/ -L$(EXTERNAL_LIB_DIR)/sci - -LIBS_SPEC += -lsocketclient - - -SOURCES = socketClientTest.cpp - - -include $(NDB_TOP)/Epilogue.mk - - - - - diff --git a/storage/ndb/src/cw/test/socketclient/socketClientTest.cpp b/storage/ndb/src/cw/test/socketclient/socketClientTest.cpp deleted file mode 100644 index 6b17d072f64..00000000000 --- a/storage/ndb/src/cw/test/socketclient/socketClientTest.cpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include -#include -#include -#include -#include -#include "SocketService.hpp" -#include "SocketRegistry.hpp" -#include "SocketClient.hpp" -#include "ClientInterface.hpp" - -#include - -#include - -NDB_MAIN(socketclient) { - - - if(argc<3) { - printf("wrong args: socketclient \n"); - return 0; - } - const char * remotehost = argv[1]; - const int port = atoi(argv[2]); - - - ClientInterface * ci = new ClientInterface(2); - ci->connectCPCDdaemon(remotehost,port); - - /*ci->listProcesses(remotehost); - - ci->startProcess(remotehost, "1247"); - - ci->stopProcess(remotehost, "1247");*/ - - ci->defineProcess(remotehost, "ndb", "ndb-cluster1", "envirnm", "/ndb/bin", - "-i", "permanent", "/ndb/ndb.2", "team"); - - ci->startProcess(remotehost, "1247"); - - ci->listProcesses(remotehost); - - //ci->undefineProcess(remotehost, "1247"); - - ci->disconnectCPCDdaemon(remotehost); -} diff --git a/storage/ndb/src/cw/util/ClientInterface.cpp b/storage/ndb/src/cw/util/ClientInterface.cpp deleted file mode 100644 index 725a9d47d43..00000000000 --- a/storage/ndb/src/cw/util/ClientInterface.cpp +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "ClientInterface.hpp" - - - -ClientInterface::ClientInterface(Uint32 maxNoOfCPCD) { - sr = new SocketRegistry(maxNoOfCPCD); - ss = new SocketService(); -} - - -ClientInterface::~ClientInterface() { - delete sr; - delete ss; - -} - - -void ClientInterface::connectCPCDdaemon(const char * remotehost, Uint16 port) -{ - sr->createSocketClient(remotehost, port); -} - -void ClientInterface::disconnectCPCDdaemon(const char * remotehost) -{ - sr->removeSocketClient(remotehost); -} - -void ClientInterface::removeCPCDdaemon(const char * remotehost) -{ - sr->removeSocketClient(remotehost); -} - -void ClientInterface::startProcess(const char * remotehost, char * id) { - char buf[255] = "start process "; - char str[80]; - char line[10]; - - strcpy(line, id); - strcpy(str, "id:"); - strcat(str, line); - strcat(str, "\n\n"); - strcat(buf, str); - printf("Request: %s\n", buf); - - sr->performSend(buf,255,remotehost); - sr->syncPerformReceive(remotehost, *ss, 0); - ss->getPropertyObject(); -} - -void ClientInterface::stopProcess(const char * remotehost, char * id) { - char buf[255] = "stop process "; - char str[80]; - char line[10]; - - strcpy(line, id); - strcpy(str, "id:"); - strcat(str, line); - strcat(str, "\n\n"); - strcat(buf, str); - printf("Request: %s\n", buf); - - sr->performSend(buf,255,remotehost); - sr->syncPerformReceive(remotehost, *ss, 0); - ss->getPropertyObject(); -} - -void ClientInterface::defineProcess(const char * remotehost, char * name, - char * group, char * env, char * path, - char * args, char * type, char * cwd, char * owner){ - char buf[255] = "define process "; - char str[80]; - char line[10]; - - strcpy(line, name); - strcpy(str, "name:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, group); - strcpy(str, "group:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, env); - strcpy(str, "env:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, path); - strcpy(str, "path:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, args); - strcpy(str, "args:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, type); - strcpy(str, "type:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, cwd); - strcpy(str, "cwd:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, " \n"); - - strcpy(line, owner); - strcpy(str, "owner:"); - strcat(str, line); - strcat(buf, str); - strcat(buf, "\n\n"); - - printf("Request: %s\n", buf); - - sr->performSend(buf,255,remotehost); - sr->syncPerformReceive(remotehost, *ss, 0); - ss->getPropertyObject(); -} - -void ClientInterface::undefineProcess(const char * remotehost, char * id){ - char buf[255] = "undefine process "; - char str[80]; - char line[10]; - - strcpy(line, id); - strcpy(str, "id:"); - strcat(str, line); - strcat(str, "\n\n"); - strcat(buf, str); - printf("Request: %s\n", buf); - - sr->performSend(buf,255,remotehost); - sr->syncPerformReceive(remotehost, *ss, 0); - ss->getPropertyObject(); -} - -void ClientInterface::listProcesses(const char * remotehost) { - char buf[255]="list processes\n\n"; - printf("Request: %s\n", buf); - sr->performSend(buf,255,remotehost); - sr->syncPerformReceive(remotehost, *ss, 0); - ss->getPropertyObject(); -} - -void ClientInterface::showProcess(const char * remotehost, char * id) { - char buf[255] = "show process "; - char str[80]; - char line[10]; - - strcpy(line, id); - strcpy(str, "id:"); - strcat(str, line); - strcat(str, "\n\n"); - strcat(buf, str); - printf("Request: %s\n", buf); - - sr->performSend(buf,255,remotehost); - sr->syncPerformReceive(remotehost, *ss, 0); - ss->getPropertyObject(); -} diff --git a/storage/ndb/src/cw/util/ClientInterface.hpp b/storage/ndb/src/cw/util/ClientInterface.hpp deleted file mode 100644 index 798f9c80a3a..00000000000 --- a/storage/ndb/src/cw/util/ClientInterface.hpp +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef CLIENT_IF_HPP -#define CLIENT_IF_HPP -#include -#include -#include -#include -#include -#include -#include "SocketRegistry.hpp" -#include "SocketService.hpp" - -class ClientInterface { -private: - SocketService * ss; - SocketRegistry * sr; - -public: - ClientInterface(Uint32 maxNoOfCPC); - ~ClientInterface(); - void startProcess(const char * remotehost, char * id); - void stopProcess(const char * remotehost, char * id); - void defineProcess(const char * remotehost, char * name, char * group, - char * env, char * path, char * args, char * type, - char * cwd, char * owner); - void undefineProcess(const char * remotehost, char * id); - void listProcesses(const char * remotehost); - void showProcess(const char * remotehost, char * id); - void connectCPCDdaemon(const char * remotehost, Uint16 port); - void disconnectCPCDdaemon(const char * remotehost); - void removeCPCDdaemon(const char * remotehost); - -}; -#endif diff --git a/storage/ndb/src/cw/util/Makefile b/storage/ndb/src/cw/util/Makefile deleted file mode 100644 index f5ab16721be..00000000000 --- a/storage/ndb/src/cw/util/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -include .defs.mk -TYPE := ndbapi - -PIC_ARCHIVE := Y -ARCHIVE_TARGET := socketclient - -# Source files of non-templated classes (.cpp files) -SOURCES = ClientInterface.cpp SocketService.cpp SocketClient.cpp - -include $(NDB_TOP)/Epilogue.mk diff --git a/storage/ndb/src/cw/util/SocketRegistry.cpp b/storage/ndb/src/cw/util/SocketRegistry.cpp deleted file mode 100644 index 0bda227be6e..00000000000 --- a/storage/ndb/src/cw/util/SocketRegistry.cpp +++ /dev/null @@ -1,213 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "SocketRegistry.hpp" -#include - -template -SocketRegistry::SocketRegistry(Uint32 maxSocketClients) { - -} - - -template -SocketRegistry::~SocketRegistry() { - delete [] m_socketClients; -} - -template -bool -SocketRegistry::createSocketClient(const char * host, Uint16 port) { - - if(port == 0) - return false; - if(host==NULL) - return false; - - SocketClient * socketClient = new SocketClient(host, port); - - if(socketClient->openSocket() < 0 || socketClient == NULL) { - ndbout << "could not connect" << endl; - delete socketClient; - return false; - } - else { - m_socketClients[m_nSocketClients] = socketClient; - m_nSocketClients++; - } - return true; -} - -template -int -SocketRegistry::pollSocketClients(Uint32 timeOutMillis) { - - - - // Return directly if there are no TCP transporters configured - if (m_nSocketClients == 0){ - tcpReadSelectReply = 0; - return 0; - } - struct timeval timeout; - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - - - NDB_SOCKET_TYPE maxSocketValue = 0; - - // Needed for TCP/IP connections - // The read- and writeset are used by select - - FD_ZERO(&tcpReadset); - - // Prepare for sending and receiving - for (Uint32 i = 0; i < m_nSocketClients; i++) { - SocketClient * t = m_socketClients[i]; - - // If the socketclient is connected - if (t->isConnected()) { - - const NDB_SOCKET_TYPE socket = t->getSocket(); - // Find the highest socket value. It will be used by select - if (socket > maxSocketValue) - maxSocketValue = socket; - - // Put the connected transporters in the socket read-set - FD_SET(socket, &tcpReadset); - } - } - - // The highest socket value plus one - maxSocketValue++; - - tcpReadSelectReply = select(maxSocketValue, &tcpReadset, 0, 0, &timeout); -#ifdef NDB_WIN32 - if(tcpReadSelectReply == SOCKET_ERROR) - { - NdbSleep_MilliSleep(timeOutMillis); - } -#endif - - return tcpReadSelectReply; - -} - -template -bool -SocketRegistry::performSend(const char * buf, - Uint32 len, - const char * remotehost) -{ - SocketClient * socketClient; - for(Uint32 i=0; i < m_nSocketClients; i++) { - socketClient = m_socketClients[i]; - if(strcmp(socketClient->gethostname(), remotehost)==0) { - if(socketClient->isConnected()) { - if(socketClient->writeSocket(buf, len)>0) - return true; - else - return false; - } - } - } - return false; -} - -template -int -SocketRegistry::performReceive(T & t) { - char buf[255] ; //temp. just for testing. must fix better - - if(tcpReadSelectReply > 0){ - for (Uint32 i=0; igetSocket(); - if(sc->isConnected() && FD_ISSET(socket, &tcpReadset)) { - t->runSession(socket,t); - } - } - return 1; - } - return 0; - -} - - - -template -inline -int -SocketRegistry::syncPerformReceive(char * host, - T & t, - Uint32 timeOutMillis) { - char buf[255] ; //temp. just for testing. must fix better - struct timeval timeout; - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - int reply; - SocketClient * sc; - for(Uint32 i=0; i < m_nSocketClients; i++) { - sc = m_socketClients[i]; - if(strcmp(sc->gethostname(), remotehost)==0) { - if(sc->isConnected()) { - FD_ZERO(&tcpReadset); - reply = select(sc->getSocket(), &tcpReadset, 0, 0, &timeout); - if(reply > 0) { - return t->runSession(sc->getSocket(), t); - } - } - - } - } - return 0; -} - - - -template -bool -SocketRegistry::reconnect(const char * host){ - for(Uint32 i=0; i < m_nSocketClients; i++) { - SocketClient * socketClient = m_socketClients[i]; - if(strcmp(socketClient->gethostname(), host)==0) { - if(!socketClient->isConnected()) { - if(socketClient->openSocket() > 0) - return true; - else return false; - } - } - } - return false; -} - -template -bool -SocketRegistry::removeSocketClient(const char * host){ - for(Uint32 i=0; i < m_nSocketClients; i++) { - SocketClient * socketClient = m_socketClients[i]; - if(strcmp(socketClient->gethostname(), host)==0) { - if(!socketClient->isConnected()) { - if(socketClient->closeSocket() > 0) { - delete socketClient; - return true; - } - else return false; - } - } - } - return false; -} diff --git a/storage/ndb/src/cw/util/SocketRegistry.hpp b/storage/ndb/src/cw/util/SocketRegistry.hpp deleted file mode 100644 index 9e1b737f224..00000000000 --- a/storage/ndb/src/cw/util/SocketRegistry.hpp +++ /dev/null @@ -1,290 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SocketClientRegistry_H -#define SocketClientRegistry_H - -#include -#include - -#include "SocketClient.hpp" - -template -class SocketRegistry { - -public: - SocketRegistry(Uint32 maxSocketClients); - ~SocketRegistry(); - /** - * creates and adds a SocketClient to m_socketClients[] - * @param host - host name - * @param port - port to connect to - */ - bool createSocketClient(const char * host, const Uint16 port); - - /** - * performReceive reads from sockets should do more stuff - */ - int performReceive(T &); - - - /** - * performReceive reads from sockets should do more stuff - */ - int syncPerformReceive(const char* ,T &, Uint32); - - - /** - * performSend sends a command to a host - */ - bool performSend(const char * buf, Uint32 len, const char * remotehost); - - /** - * pollSocketClients performs a select (for a max. of timeoutmillis) or - * until there is data to be read from any SocketClient - * @param timeOutMillis - select timeout - */ - int pollSocketClients(Uint32 timeOutMillis); - - /** - * reconnect tries to reconnect to a cpcd given its hostname - * @param host - name of host to reconnect to. - */ - bool reconnect(const char * host); - - - /** - * removeSocketClient - * @param host - name of host for which to remove the SocketConnection - */ - bool removeSocketClient(const char * host); - -private: - SocketClient** m_socketClients; - Uint32 m_maxSocketClients; - Uint32 m_nSocketClients; - int tcpReadSelectReply; - fd_set tcpReadset; - - -}; - - -template -inline -SocketRegistry::SocketRegistry(Uint32 maxSocketClients) { - m_maxSocketClients = maxSocketClients; - m_socketClients = new SocketClient * [m_maxSocketClients]; - m_nSocketClients = 0; -} - - -template -inline -SocketRegistry::~SocketRegistry() { - delete [] m_socketClients; -} - -template -inline -bool -SocketRegistry::createSocketClient(const char * host, Uint16 port) { - - if(port == 0) - return false; - if(host==NULL) - return false; - - SocketClient * socketClient = new SocketClient(host, port); - - if(socketClient->openSocket() < 0 || socketClient == NULL) { - ndbout << "could not connect" << endl; - delete socketClient; - return false; - } - else { - m_socketClients[m_nSocketClients] = socketClient; - m_nSocketClients++; - } - return true; -} - -template -inline -int -SocketRegistry::pollSocketClients(Uint32 timeOutMillis) { - - - - // Return directly if there are no TCP transporters configured - if (m_nSocketClients == 0){ - tcpReadSelectReply = 0; - return 0; - } - struct timeval timeout; - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - - - NDB_SOCKET_TYPE maxSocketValue = 0; - - // Needed for TCP/IP connections - // The read- and writeset are used by select - - FD_ZERO(&tcpReadset); - - // Prepare for sending and receiving - for (Uint32 i = 0; i < m_nSocketClients; i++) { - SocketClient * t = m_socketClients[i]; - - // If the socketclient is connected - if (t->isConnected()) { - - const NDB_SOCKET_TYPE socket = t->getSocket(); - // Find the highest socket value. It will be used by select - if (socket > maxSocketValue) - maxSocketValue = socket; - - // Put the connected transporters in the socket read-set - FD_SET(socket, &tcpReadset); - } - } - - // The highest socket value plus one - maxSocketValue++; - - tcpReadSelectReply = select(maxSocketValue, &tcpReadset, 0, 0, &timeout); -#ifdef NDB_WIN32 - if(tcpReadSelectReply == SOCKET_ERROR) - { - NdbSleep_MilliSleep(timeOutMillis); - } -#endif - - return tcpReadSelectReply; - -} - -template -inline -bool -SocketRegistry::performSend(const char * buf, Uint32 len, const char * remotehost) -{ - SocketClient * socketClient; - for(Uint32 i=0; i < m_nSocketClients; i++) { - socketClient = m_socketClients[i]; - if(strcmp(socketClient->gethostname(), remotehost)==0) { - if(socketClient->isConnected()) { - if(socketClient->writeSocket(buf, len)>0) - return true; - else - return false; - } - } - } - return false; -} - -template -inline -int -SocketRegistry::performReceive(T & t) { - char buf[255] ; //temp. just for testing. must fix better - - if(tcpReadSelectReply > 0){ - for (Uint32 i=0; igetSocket(); - if(sc->isConnected() && FD_ISSET(socket, &tcpReadset)) { - t->runSession(socket,t); - } - } - return 1; - } - return 0; - -} - - - -template -inline -int -SocketRegistry::syncPerformReceive(const char * remotehost, - T & t, - Uint32 timeOutMillis) { - char buf[255] ; //temp. just for testing. must fix better - struct timeval timeout; - timeout.tv_sec = timeOutMillis / 1000; - timeout.tv_usec = (timeOutMillis % 1000) * 1000; - int reply; - SocketClient * sc; - for(Uint32 i=0; i < m_nSocketClients; i++) { - sc = m_socketClients[i]; - if(strcmp(sc->gethostname(), remotehost)==0) { - if(sc->isConnected()) { - /*FD_ZERO(&tcpReadset); - reply = select(sc->getSocket()+1, 0, 0, 0, &timeout); - reply=1; - if(reply > 0) {*/ - t.runSession(sc->getSocket(), t); - //} - } - - } - } -} - - - -template -inline -bool -SocketRegistry::reconnect(const char * host){ - for(Uint32 i=0; i < m_nSocketClients; i++) { - SocketClient * socketClient = m_socketClients[i]; - if(strcmp(socketClient->gethostname(), host)==0) { - if(!socketClient->isConnected()) { - if(socketClient->openSocket() > 0) - return true; - else return false; - } - } - } - return false; -} - -template -inline -bool -SocketRegistry::removeSocketClient(const char * host){ - for(Uint32 i=0; i < m_nSocketClients; i++) { - SocketClient * socketClient = m_socketClients[i]; - if(strcmp(socketClient->gethostname(), host)==0) { - if(!socketClient->isConnected()) { - if(socketClient->closeSocket() > 0) { - delete socketClient; - return true; - } - else return false; - } - } - } - return false; -} - - -#endif // Define of SocketRegistry diff --git a/storage/ndb/src/cw/util/SocketService.cpp b/storage/ndb/src/cw/util/SocketService.cpp deleted file mode 100644 index ee64f3bc4f1..00000000000 --- a/storage/ndb/src/cw/util/SocketService.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include -#include -#include "SocketService.hpp" - -SocketService::SocketService() { - -} - -SocketService::~SocketService() { - -} - -int -SocketService::runSession(NDB_SOCKET_TYPE socket, SocketService & ss){ - InputStream *m_input = new SocketInputStream(socket); - char buf[255]; - - m_input->gets(buf,255); - ndbout_c("SocketService:received: %s\n", buf); - ndbout_c("This should now be parsed\n"); - ndbout_c("and put in a property object.\n"); - ndbout_c("The propery is then accessible from the ClientInterface.\n"); - ndbout_c("by getPropertyObject.\n"); - ndbout_c("At least this is the idea."); - /*Parser_t *m_parser = - new Parser(commands, *m_input, true, true, true); - */ - /** to do - * add a proprty object to which the parser will put its result. - */ - - return 1 ; //succesful - //return 0; //unsuccesful - -} - -void -SocketService::getPropertyObject() { - ndbout << "get property object. return to front end or something" << endl; -} - - diff --git a/storage/ndb/src/cw/util/SocketService.hpp b/storage/ndb/src/cw/util/SocketService.hpp deleted file mode 100644 index 5a860f3aee1..00000000000 --- a/storage/ndb/src/cw/util/SocketService.hpp +++ /dev/null @@ -1,46 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SOCKET_SERVICE_HPP -#define SOCKET_SERVICE_HPP -#include -#include -#include -#include -#include -#include "SocketRegistry.hpp" - - - - -class SocketService { - friend class SocketRegistry; -private: - typedef Parser Parser_t; - int runSession(NDB_SOCKET_TYPE socket, SocketService &); -public: - void getPropertyObject(); - SocketService(); - ~SocketService(); - -}; - - - - - - -#endif diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT b/storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT deleted file mode 100644 index 97fe959bb2c..00000000000 --- a/storage/ndb/src/external/WIN32.x86/sci/lib/SISCI_LIBRARY_WIN32.TXT +++ /dev/null @@ -1,77 +0,0 @@ -SISCI_API LIBRARIES AND LINKING -=============================== - - -/MD, /ML, /MT (Use Run-Time Library) - - - -sisci_api.lib - Single threaded - -sisci_api_md.lib - Multithreaded DLL - -sisci_api_mt.lib - Multithreaded - - - - -With these libraries, you can select either single-threaded or multithreaded run-time routines, -indicate that a multithreaded module is a dynamic-link library (DLL), and select the retail -or debug version of the library. - -Note Having more than one copy of the run-time libraries in a process can cause problems, -because static data in one copy is not shared with the other copy. To ensure that your process -contains only one copy, avoid mixing static and dynamic versions of the run-time libraries. -The linker will prevent you from linking with both static and dynamic versions within one .EXE file, -but you can still end up with two (or more) copies of the run-time libraries. -For example, a dynamic-link library linked with the static (non-DLL) versions of the run-time -libraries can cause problems when used with an .EXE file that was linked with the dynamic (DLL) -version of the run-time libraries. (You should also avoid mixing the debug and non-debug versions -of the libraries in one process.) - - -MD Multithreaded ----------------- - -/MD Multithreaded DLL Defines _MT and _DLL so that both multithread- and DLL-specific versions -of the run-time routines are selected from the standard .H files. This option also causes the -compiler to place the library name MSVCRT.LIB into the .OBJ file. -Applications compiled with this option are statically linked to MSVCRT.LIB. This library provides -a layer of code that allows the linker to resolve external references. The actual working code is -contained in MSVCRT.DLL, which must be available at run time to applications linked with MSVCRT.LIB. - - -/MDd Debug Multithreaded DLL Defines _DEBUG, _MT, and _DLL so that debug multithread- and DLL-specific -versions of the run-time routines are selected from the standard .H files. It also causes the compiler -to place the library name MSVCRTD.LIB into the .OBJ file. - - -ML Single-Threaded ------------------- - - -/ML Single-Threaded Causes the compiler to place the library name LIBC.LIB into the .OBJ file so -that the linker will use LIBC.LIB to resolve external symbols. This is the compiler’s default action. -LIBC.LIB does not provide multithread support. - - -/MLd Debug Single-Threaded Defines _DEBUG and causes the compiler to place the library name LIBCD.LIB -into the .OBJ file so that the linker will use LIBCD.LIB to resolve external symbols. LIBCD.LIB does -not provide multithread support. - - -MT Multithreaded ----------------- - - -/MT Multithreaded Defines _MT so that multithread-specific versions of the run-time routines are -selected from the standard header (.H) files. This option also causes the compiler to place the library -name LIBCMT.LIB into the .OBJ file so that the linker will use LIBCMT.LIB to resolve external symbols. - -Either /MT or /MD (or their debug equivalents /MTd or /MDd) is required to create multithreaded programs. -/MTd Debug Multithreaded Defines _DEBUG and _MT. Defining _MT causes multithread-specific versions of -the run-time routines to be selected from the standard .H files. This option also causes the compiler -to place the library name LIBCMTD.LIB into the .OBJ file so that the linker will use LIBCMTD.LIB to -resolve external symbols. Either /MTd or /MDd (or their non-debug equivalents /MT or MD) is required to -create multithreaded programs. - diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib deleted file mode 100644 index 572169a2016..00000000000 Binary files a/storage/ndb/src/external/WIN32.x86/sci/lib/scilib.lib and /dev/null differ diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib deleted file mode 100644 index f18cba61336..00000000000 Binary files a/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_md.lib and /dev/null differ diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib deleted file mode 100644 index 3e9982468ea..00000000000 Binary files a/storage/ndb/src/external/WIN32.x86/sci/lib/scilib_mt.lib and /dev/null differ diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib deleted file mode 100644 index 3fbff6ec809..00000000000 Binary files a/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api.lib and /dev/null differ diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib deleted file mode 100644 index 1d8d42d1d35..00000000000 Binary files a/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_md.lib and /dev/null differ diff --git a/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib b/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib deleted file mode 100644 index 017fad7ba31..00000000000 Binary files a/storage/ndb/src/external/WIN32.x86/sci/lib/sisci_api_mt.lib and /dev/null differ diff --git a/storage/ndb/src/kernel/Makefile.am b/storage/ndb/src/kernel/Makefile.am deleted file mode 100644 index 7652c5f3ade..00000000000 --- a/storage/ndb/src/kernel/Makefile.am +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SUBDIRS = vm error blocks - -include $(top_srcdir)/storage/ndb/config/common.mk.am - -ndbbin_PROGRAMS = ndbd - -ndbd_SOURCES = main.cpp SimBlockList.cpp - -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am - -INCLUDES += \ - -I$(srcdir)/blocks/cmvmi \ - -I$(srcdir)/blocks/dbacc \ - -I$(srcdir)/blocks/dbdict \ - -I$(srcdir)/blocks/dbdih \ - -I$(srcdir)/blocks/dblqh \ - -I$(srcdir)/blocks/dbtc \ - -I$(srcdir)/blocks/dbtup \ - -I$(srcdir)/blocks/ndbfs \ - -I$(srcdir)/blocks/ndbcntr \ - -I$(srcdir)/blocks/qmgr \ - -I$(srcdir)/blocks/trix \ - -I$(srcdir)/blocks/backup \ - -I$(srcdir)/blocks/dbutil \ - -I$(srcdir)/blocks/suma \ - -I$(srcdir)/blocks/dbtux \ - -I$(srcdir)/blocks - -LDADD += \ - blocks/libblocks.a \ - vm/libkernel.a \ - error/liberror.a \ - $(top_builddir)/storage/ndb/src/common/transporter/libtransporter.la \ - $(top_builddir)/storage/ndb/src/common/debugger/libtrace.la \ - $(top_builddir)/storage/ndb/src/common/debugger/signaldata/libsignaldataprint.la \ - $(top_builddir)/storage/ndb/src/common/logger/liblogger.la \ - $(top_builddir)/storage/ndb/src/common/mgmcommon/libmgmsrvcommon.la \ - $(top_builddir)/storage/ndb/src/mgmapi/libmgmapi.la \ - $(top_builddir)/storage/ndb/src/common/portlib/libportlib.la \ - $(top_builddir)/storage/ndb/src/common/util/libgeneral.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ - -windoze-dsp: ndbd.dsp - -storage/ndbd.dsp: Makefile \ - $(top_srcdir)/storage/ndb/config/win-prg.am \ - $(top_srcdir)/storage/ndb/config/win-name \ - $(top_srcdir)/storage/ndb/config/win-includes \ - $(top_srcdir)/storage/ndb/config/win-sources \ - $(top_srcdir)/storage/ndb/config/win-libraries - cat $(top_srcdir)/storage/ndb/config/win-prg.am > $@ - @$(top_srcdir)/storage/ndb/config/win-name $@ $(ndbbin_PROGRAMS) - @$(top_srcdir)/storage/ndb/config/win-includes $@ $(INCLUDES) - @$(top_srcdir)/storage/ndb/config/win-sources $@ $(ndbd_SOURCES) - @$(top_srcdir)/storage/ndb/config/win-libraries $@ LINK $(LDADD) diff --git a/storage/ndb/src/kernel/SimBlockList.cpp b/storage/ndb/src/kernel/SimBlockList.cpp deleted file mode 100644 index 68c0b4a4642..00000000000 --- a/storage/ndb/src/kernel/SimBlockList.cpp +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "SimBlockList.hpp" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef VM_TRACE -#define NEW_BLOCK(B) new B -#else -enum SIMBLOCKLIST_DUMMY { A_VALUE = 0 }; - -void * operator new (size_t sz, SIMBLOCKLIST_DUMMY dummy){ - char * tmp = (char *)malloc(sz); - -#ifndef NDB_PURIFY -#ifdef VM_TRACE - const int initValue = 0xf3; -#else - const int initValue = 0x0; -#endif - - const int p = (sz / 4096); - const int r = (sz % 4096); - - for(int i = 0; i 0) - memset(tmp+p*4096, initValue, r); - -#endif - - return tmp; -} -#define NEW_BLOCK(B) new(A_VALUE) B -#endif - -void -SimBlockList::load(EmulatorData& data){ - noOfBlocks = NO_OF_BLOCKS; - theList = new SimulatedBlock * [noOfBlocks]; - Dbdict* dbdict = 0; - Dbdih* dbdih = 0; - Pgman* pg = 0; - Lgman* lg = 0; - Tsman* ts = 0; - - Block_context ctx(*data.theConfiguration, *data.m_mem_manager); - - SimulatedBlock * fs = 0; - { - Uint32 dl; - const ndb_mgm_configuration_iterator * p = - ctx.m_config.getOwnConfigIterator(); - if(p && !ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl){ - fs = NEW_BLOCK(VoidFs)(ctx); - } else { - fs = NEW_BLOCK(Ndbfs)(ctx); - } - } - - theList[0] = pg = NEW_BLOCK(Pgman)(ctx); - theList[1] = lg = NEW_BLOCK(Lgman)(ctx); - theList[2] = ts = NEW_BLOCK(Tsman)(ctx, pg, lg); - theList[3] = NEW_BLOCK(Dbacc)(ctx); - theList[4] = NEW_BLOCK(Cmvmi)(ctx); - theList[5] = fs; - theList[6] = dbdict = NEW_BLOCK(Dbdict)(ctx); - theList[7] = dbdih = NEW_BLOCK(Dbdih)(ctx); - theList[8] = NEW_BLOCK(Dblqh)(ctx); - theList[9] = NEW_BLOCK(Dbtc)(ctx); - theList[10] = NEW_BLOCK(Dbtup)(ctx, pg); - theList[11] = NEW_BLOCK(Ndbcntr)(ctx); - theList[12] = NEW_BLOCK(Qmgr)(ctx); - theList[13] = NEW_BLOCK(Trix)(ctx); - theList[14] = NEW_BLOCK(Backup)(ctx); - theList[15] = NEW_BLOCK(DbUtil)(ctx); - theList[16] = NEW_BLOCK(Suma)(ctx); - theList[17] = NEW_BLOCK(Dbtux)(ctx); - theList[18] = NEW_BLOCK(Restore)(ctx); - assert(NO_OF_BLOCKS == 19); -} - -void -SimBlockList::unload(){ - if(theList != 0){ - for(int i = 0; i~SimulatedBlock(); - free(theList[i]); -#else - delete(theList[i]); -#endif - theList[i] = 0; - } - } - delete [] theList; - theList = 0; - noOfBlocks = 0; - } -} diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt deleted file mode 100644 index 150400b9deb..00000000000 --- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt +++ /dev/null @@ -1,565 +0,0 @@ -Next QMGR 1 -Next NDBCNTR 1002 -Next NDBFS 2000 -Next DBACC 3002 -Next DBTUP 4029 -Next DBLQH 5050 -Next DBDICT 6008 -Next DBDIH 7195 -Next DBTC 8058 -Next CMVMI 9000 -Next BACKUP 10038 -Next DBUTIL 11002 -Next DBTUX 12008 -Next SUMA 13034 -Next SUMA 13036 -Next LGMAN 15001 -Next TSMAN 16001 - -TESTING NODE FAILURE, ARBITRATION ---------------------------------- - -911 - 919: -Crash president when he starts to run in ArbitState 1-9. - -910: Crash new president after node crash - -934 : Crash president in ALLOC_NODE_ID_REQ - -935 : Crash master on node failure (delayed) - and skip sending GSN_COMMIT_FAILREQ to specified node - -ERROR CODES FOR TESTING NODE FAILURE, GLOBAL CHECKPOINT HANDLING: ------------------------------------------------------------------ - -7000: -Insert system error in master when global checkpoint is idle. - -7001: -Insert system error in master after receiving GCP_PREPARE from -all nodes in the cluster. - -7002: -Insert system error in master after receiving GCP_NODEFINISH from -all nodes in the cluster. - -7003: -Insert system error in master after receiving GCP_SAVECONF from -all nodes in the cluster. - -7004: -Insert system error in master after completing global checkpoint with -all nodes in the cluster. - -7005: -Insert system error in GCP participant when receiving GCP_PREPARE. - -7006: -Insert system error in GCP participant when receiving GCP_COMMIT. - -7007: -Insert system error in GCP participant when receiving GCP_TCFINISHED. - -7008: -Insert system error in GCP participant when receiving COPY_GCICONF. - -5000: -Insert system error in GCP participant when receiving GCP_SAVEREQ. - -5007: -Delay GCP_SAVEREQ by 10 secs - -7165: Delay INCL_NODE_REQ in starting node yeilding error in GCP_PREPARE - -7030: Delay in GCP_PREPARE until node has completed a node failure -7031: Delay in GCP_PREPARE and die 3s later - -7177: Delay copying of sysfileData in execCOPY_GCIREQ - -7180: Crash master during master-take-over in execMASTER_LCPCONF - -7183: Crash when receiving COPY_GCIREQ - -7184: Crash before starting next GCP after a node failure - -7185: Dont reply to COPY_GCI_REQ where reason == GCP - -7193: Dont send LCP_FRAG_ORD to self, and crash when sending first - LCP_FRAG_ORD(last) - -7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF - -ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING: ------------------------------------------------------------------ - -7009: -Insert system error in master when local checkpoint is idle. - -7010: -Insert system error in master when local checkpoint is in the -state clcpStatus = CALCULATE_KEEP_GCI. - -7011: -Stop local checkpoint in the state CALCULATE_KEEP_GCI. - -7012: -Restart local checkpoint after stopping in CALCULATE_KEEP_GCI. - -Method: -1) Error 7011 in master, wait until report of stopped. -2) Error xxxx in participant to crash it. -3) Error 7012 in master to start again. - -7013: -Insert system error in master when local checkpoint is in the -state clcpStatus = COPY_GCI before sending COPY_GCIREQ. - -7014: -Insert system error in master when local checkpoint is in the -state clcpStatus = TC_CLOPSIZE before sending TC_CLOPSIZEREQ. - -7015: -Insert system error in master when local checkpoint is in the -state clcpStatus = START_LCP_ROUND before sending START_LCP_ROUND. - -7016: -Insert system error in master when local checkpoint is in the -state clcpStatus = START_LCP_ROUND after receiving LCP_REPORT. - -7017: -Insert system error in master when local checkpoint is in the -state clcpStatus = TAB_COMPLETED. - -7018: -Insert system error in master when local checkpoint is in the -state clcpStatus = TAB_SAVED before sending DIH_LCPCOMPLETE. - -7019: -Insert system error in master when local checkpoint is in the -state clcpStatus = IDLE before sending CONTINUEB(ZCHECK_TC_COUNTER). - -7020: -Insert system error in local checkpoint participant at reception of -COPY_GCIREQ. - -7075: Master -Don't send any LCP_FRAG_ORD(last=true) -And crash when all have "not" been sent - -8000: Crash particpant when receiving TCGETOPSIZEREQ -8001: Crash particpant when receiving TC_CLOPSIZEREQ -5010: Crash any when receiving LCP_FRAGORD - -7021: Crash in master when receiving START_LCP_REQ -7022: Crash in !master when receiving START_LCP_REQ - -7023: Crash in master when sending START_LCP_CONF -7024: Crash in !master when sending START_LCP_CONF - -7025: Crash in master when receiving LCP_FRAG_REP -7016: Crash in !master when receiving LCP_FRAG_REP - -7026: Crash in master when changing state to LCP_TAB_COMPLETED -7017: Crash in !master when changing state to LCP_TAB_COMPLETED - -7027: Crash in master when changing state to LCP_TAB_SAVED -7018: Crash in master when changing state to LCP_TAB_SAVED - -7191: Crash when receiving LCP_COMPLETE_REP -7192: Crash in setLcpActiveStatusStart - when dead node missed to LCP's - -ERROR CODES FOR TESTING NODE FAILURE, FAILURE IN COPY FRAGMENT PROCESS: ------------------------------------------------------------------------ - -5002: -Insert node failure in starting node when receiving a tuple copied from the copy node -as part of copy fragment process. -5003: -Insert node failure when receiving ABORT signal. - -5004: -Insert node failure handling when receiving COMMITREQ. - -5005: -Insert node failure handling when receiving COMPLETEREQ. - -5006: -Insert node failure handling when receiving ABORTREQ. - -5042: -As 5002, but with specified table (see DumpStateOrd) - -These error code can be combined with error codes for testing time-out -handling in DBTC to ensure that node failures are also well handled in -time-out handling. They can also be used to test multiple node failure -handling. - -5045: Crash in PREPARE_COPY_FRAG_REQ -5046: Crash if LQHKEYREQ (NrCopy) comes when frag-state is incorrect - -ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBLQH -------------------------------------------------- -5011: -Delay execution of COMMIT signal 2 seconds to generate time-out. - -5012 (use 5017): -First delay execution of COMMIT signal 2 seconds to generate COMMITREQ. -Delay execution of COMMITREQ signal 2 seconds to generate time-out. - -5013: -Delay execution of COMPLETE signal 2 seconds to generate time-out. - -5014 (use 5018): -First delay execution of COMPLETE signal 2 seconds to generate COMPLETEREQ. -Delay execution of COMPLETEREQ signal 2 seconds to generate time-out. - -5015: -Delay execution of ABORT signal 2 seconds to generate time-out. - -5016: (ABORTREQ only as part of take-over) -Delay execution of ABORTREQ signal 2 seconds to generate time-out. - -5031: lqhKeyRef, ZNO_TC_CONNECT_ERROR -5032: lqhKeyRef, ZTEMPORARY_REDO_LOG_FAILURE -5033: lqhKeyRef, ZTAIL_PROBLEM_IN_LOG_ERROR - -5034: Don't pop scan queue - -5035: Delay ACC_CONTOPCONT - -5038: Drop LQHKEYREQ + set 5039 -5039: Drop ABORT + set 5003 - -8048: Make TC not choose own node for simple/dirty read -5041: Crash is receiving simple read from other TC on different node - -8050: Send TCKEYREF is operation is non local - -5100,5101: Drop ABORT req in primary replica - Crash on "next" ABORT - -ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC -------------------------------------------------- -8040: -Delay execution of ABORTED signal 2 seconds to generate time-out. - -8041: -Delay execution of COMMITTED signal 2 seconds to generate time-out. -8042 (use 8046): -Delay execution of COMMITTED signal 2 seconds to generate COMMITCONF. -Delay execution of COMMITCONF signal 2 seconds to generate time-out. - -8043: -Delay execution of COMPLETED signal 2 seconds to generate time-out. - -8044 (use 8047): -Delay execution of COMPLETED signal 2 seconds to generate COMPLETECONF. -Delay execution of COMPLETECONF signal 2 seconds to generate time-out. - -8045: (ABORTCONF only as part of take-over) -Delay execution of ABORTCONF signal 2 seconds to generate time-out. - -8050: Send ZABORT_TIMEOUT_BREAK delayed - -8053: Crash in timeOutFoundLab, state CS_WAIT_COMMIT_CONF - -5048: Crash in execCOMMIT -5049: SET_ERROR_INSERT_VALUE(5048) - -ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC -------------------------------------------------- - -8003: Throw away a LQHKEYCONF in state STARTED -8004: Throw away a LQHKEYCONF in state RECEIVING -8005: Throw away a LQHKEYCONF in state REC_COMMITTING -8006: Throw away a LQHKEYCONF in state START_COMMITTING - -8007: Ignore send of LQHKEYREQ in state STARTED -8008: Ignore send of LQHKEYREQ in state START_COMMITTING - -8009: Ignore send of LQHKEYREQ+ATTRINFO in state STARTED -8010: Ignore send of LQHKEYREQ+ATTRINFO in state START_COMMITTING - -8011: Abort at send of CONTINUEB(ZSEND_ATTRINFO) in state STARTED -8012: Abort at send of CONTINUEB(ZSEND_ATTRINFO) in state START_COMMITTING - -8013: Ignore send of CONTINUEB(ZSEND_COMPLETE_LOOP) (should crash eventually) -8014: Ignore send of CONTINUEB(ZSEND_COMMIT_LOOP) (should crash eventually) - -8015: Ignore ATTRINFO signal in DBTC in state REC_COMMITTING -8016: Ignore ATTRINFO signal in DBTC in state RECEIVING - -8017: Return immediately from DIVERIFYCONF (should crash eventually) -8018: Throw away a COMMITTED signal -8019: Throw away a COMPLETED signal - -TESTING TAKE-OVER FUNCTIONALITY IN DBTC ---------------------------------------- - -8002: Crash when sending LQHKEYREQ -8029: Crash when receiving LQHKEYCONF -8030: Crash when receiving COMMITTED -8031: Crash when receiving COMPLETED -8020: Crash when all COMMITTED has arrived -8021: Crash when all COMPLETED has arrived -8022: Crash when all LQHKEYCONF has arrived - -COMBINATION OF TIME-OUT + CRASH -------------------------------- - -8023 (use 8024): Ignore LQHKEYCONF and crash when ABORTED signal arrives by setting 8024 -8025 (use 8026): Ignore COMMITTED and crash when COMMITCONF signal arrives by setting 8026 -8027 (use 8028): Ignore COMPLETED and crash when COMPLETECONF signal arrives by setting 8028 - -ABORT OF TCKEYREQ ------------------ - -8032: No free TC records any more - -8037 : Invalid schema version in TCINDXREQ - ------- - -8038 : Simulate API disconnect just after SCAN_TAB_REQ - -8057 : Send only 1 COMMIT per timeslice - -8052 : Simulate failure of TransactionBufferMemory allocation for OI lookup - -8051 : Simulate failure of allocation for saveINDXKEYINFO - - -CMVMI ------ -9000 Set RestartOnErrorInsert to restart -n -9998 Enter endless loop (trigger watchdog) -9999 Crash system immediatly - -Test Crashes in handling node restarts --------------------------------------- - -7121: Crash after receiving permission to start (START_PERMCONF) in starting - node. -7122: Crash master when receiving request for permission to start (START_PERMREQ). -7123: Crash any non-starting node when receiving information about a starting node - (START_INFOREQ) -7124: Respond negatively on an info request (START_INFOREQ) -7125: Stop an invalidate Node LCP process in the middle to test if START_INFOREQ - stopped by long-running processes are handled in a correct manner. -7126: Allow node restarts for all nodes (used in conjunction with 7025) -7127: Crash when receiving a INCL_NODEREQ message. -7128: Crash master after receiving all INCL_NODECONF from all nodes -7129: Crash master after receiving all INCL_NODECONF from all nodes and releasing - the lock on the dictionary -7130: Crash starting node after receiving START_MECONF -7131: Crash when receiving START_COPYREQ in master node -7132: Crash when receiving START_COPYCONF in starting node - -7170: Crash when receiving START_PERMREF (InitialStartRequired) - -8039: DBTC delay INCL_NODECONF and kill starting node - -7174: Crash starting node before sending DICT_LOCK_REQ -7175: Master sends one fake START_PERMREF (ZNODE_ALREADY_STARTING_ERROR) -7176: Slave NR pretends master does not support DICT lock (rolling upgrade) - -DICT: -6000 Crash during NR when receiving DICTSTARTREQ -6001 Crash during NR when receiving SCHEMA_INFO -6002 Crash during NR soon after sending GET_TABINFO_REQ - -LQH: -5026 Crash when receiving COPY_ACTIVEREQ -5027 Crash when receiving STAT_RECREQ - -5043 Crash starting node, when scan is finished on primary replica - -Test Crashes in handling take over ----------------------------------- - -7133: Crash when receiving START_TOREQ -7134: Crash master after receiving all START_TOCONF -7135: Crash master after copying table 0 to starting node -7136: Crash master after completing copy of tables -7137: Crash master after adding a fragment before copying it -7138: Crash when receiving CREATE_FRAGREQ in prepare phase -7139: Crash when receiving CREATE_FRAGREQ in commit phase -7140: Crash master when receiving all CREATE_FRAGCONF in prepare phase -7141: Crash master when receiving all CREATE_FRAGCONF in commit phase -7142: Crash master when receiving COPY_FRAGCONF -7143: Crash master when receiving COPY_ACTIVECONF -7144: Crash when receiving END_TOREQ -7145: Crash master after receiving first END_TOCONF -7146: Crash master after receiving all END_TOCONF -7147: Crash master after receiving first START_TOCONF -7148: Crash master after receiving first CREATE_FRAGCONF -7152: Crash master after receiving first UPDATE_TOCONF -7153: Crash master after receiving all UPDATE_TOCONF -7154: Crash when receiving UPDATE_TOREQ -7155: Crash master when completing writing start take over info -7156: Crash master when completing writing end take over info - -Test failures in various states in take over functionality ----------------------------------------------------------- -7157: Block take over at start take over -7158: Block take over at sending of START_TOREQ -7159: Block take over at selecting next fragment -7160: Block take over at creating new fragment -7161: Block take over at sending of CREATE_FRAGREQ in prepare phase -7162: Block take over at sending of CREATE_FRAGREQ in commit phase -7163: Block take over at sending of UPDATE_TOREQ at end of copy frag -7164: Block take over at sending of END_TOREQ -7169: Block take over at sending of UPDATE_TOREQ at end of copy - -5008: Crash at reception of EMPTY_LCPREQ (at master take over after NF) -5009: Crash at sending of EMPTY_LCPCONF (at master take over after NF) - -Test Crashes in Handling Graceful Shutdown ------------------------------------------- -7065: Crash when receiving STOP_PERMREQ in master -7066: Crash when receiving STOP_PERMREQ in slave -7067: Crash when receiving DIH_SWITCH_REPLICA_REQ -7068: Crash when receiving DIH_SWITCH_REPLICA_CONF - - -Backup Stuff: ------------------------------------------- -10001: Crash on NODE_FAILREP in Backup coordinator -10002: Crash on NODE_FAILREP when coordinatorTakeOver -10003: Crash on PREP_CREATE_TRIG_{CONF/REF} (only coordinator) -10004: Crash on START_BACKUP_{CONF/REF} (only coordinator) -10005: Crash on CREATE_TRIG_{CONF/REF} (only coordinator) -10006: Crash on WAIT_GCP_REF (only coordinator) -10007: Crash on WAIT_GCP_CONF (only coordinator) -10008: Crash on WAIT_GCP_CONF during start of backup (only coordinator) -10009: Crash on WAIT_GCP_CONF during stop of backup (only coordinator) -10010: Crash on BACKUP_FRAGMENT_CONF (only coordinator) -10011: Crash on BACKUP_FRAGMENT_REF (only coordinator) -10012: Crash on DROP_TRIG_{CONF/REF} (only coordinator) -10013: Crash on STOP_BACKUP_{CONF/REF} (only coordinator) -10014: Crash on DEFINE_BACKUP_REQ (participant) -10015: Crash on START_BACKUP_REQ (participant) -10016: Crash on BACKUP_FRAGMENT_REQ (participant) -10017: Crash on SCAN_FRAGCONF (participant) -10018: Crash on FSAPPENDCONF (participant) -10019: Crash on TRIG_ATTRINFO (participant) -10020: Crash on STOP_BACKUP_REQ (participant) -10021: Crash on NODE_FAILREP in participant not becoming coordinator - -10022: Fake no backup records at DEFINE_BACKUP_REQ (participant) -10023: Abort backup by error at reception of UTIL_SEQUENCE_CONF (code 300) -10024: Abort backup by error at reception of DEFINE_BACKUP_CONF (code 301) -10025: Abort backup by error at reception of CREATE_TRIG_CONF last (code 302) -10026: Abort backup by error at reception of START_BACKUP_CONF (code 303) -10027: Abort backup by error at reception of DEFINE_BACKUP_REQ at master (code 304) -10028: Abort backup by error at reception of BACKUP_FRAGMENT_CONF at master (code 305) -10029: Abort backup by error at reception of FSAPPENDCONF in slave (FileOrScanError = 5) -10030: Simulate buffer full from trigger execution => abort backup -10031: Error 331 for dictCommitTableMutex_locked -10032: backup checkscan -10033: backup checkscan -10034: define backup reply error -10035: Fail to allocate buffers - -10036: Halt backup for table >= 2 -10037: Resume backup (from 10036) - -11001: Send UTIL_SEQUENCE_REF (in master) - -5028: Crash when receiving LQHKEYREQ (in non-master) - -Failed Create Table: --------------------- -7173: Create table failed due to not sufficient number of fragment or - replica records. -3001: Fail create 1st fragment -4007 12001: Fail create 1st fragment -4008 12002: Fail create 2nd fragment -4009 12003: Fail create 1st attribute in 1st fragment -4010 12004: Fail create last attribute in 1st fragment -4011 12005: Fail create 1st attribute in 2nd fragment -4012 12006: Fail create last attribute in 2nd fragment - -Drop Table/Index: ------------------ -4001: Crash on REL_TABMEMREQ in TUP -4002: Crash on DROP_TABFILEREQ in TUP -4003: Fail next trigger create in TUP -4004: Fail next trigger drop in TUP -8033: Fail next trigger create in TC -8034: Fail next index create in TC -8035: Fail next trigger drop in TC -8036: Fail next index drop in TC -6006: Crash participant in create index - -4013: verify TUP tab descr before and after next DROP TABLE - -System Restart: ---------------- - -5020: Force system to read pages form file when executing prepare operation record -3000: Delay writing of datapages in ACC when LCP is started -4000: Delay writing of datapages in TUP when LCP is started -7070: Set TimeBetweenLcp to min value -7071: Set TimeBetweenLcp to max value -7072: Split START_FRAGREQ into several log nodes -7073: Don't include own node in START_FRAGREQ -7074: 7072 + 7073 - -Scan: ------- - -5021: Crash when receiving SCAN_NEXTREQ if sender is own node -5022: Crash when receiving SCAN_NEXTREQ if sender is NOT own node -5023: Drop SCAN_NEXTREQ if sender is own node -5024: Drop SCAN_NEXTREQ if sender is NOT own node -5025: Delay SCAN_NEXTREQ 1 second if sender is NOT own node -5030: Drop all SCAN_NEXTREQ until node is shutdown with SYSTEM_ERROR - because of scan fragment timeout - -Test routing of signals: ------------------------ -4006: Turn on routing of TRANSID_AI signals from TUP -5029: Turn on routing of KEYINFO20 signals from LQH - -Ordered index: --------------- -12007: Make next alloc node fail with no memory error - -Dbdict: -------- -6003 Crash in participant @ CreateTabReq::Prepare -6004 Crash in participant @ CreateTabReq::Commit -6005 Crash in participant @ CreateTabReq::CreateDrop -6007 Fail on readTableFile for READ_TAB_FILE1 (28770) - -Dbtup: -4014 - handleInsert - Out of undo buffer -4015 - handleInsert - Out of log space -4016 - handleInsert - AI Inconsistency -4017 - handleInsert - Out of memory -4018 - handleInsert - Null check error -4019 - handleInsert - Alloc rowid error -4020 - handleInsert - Size change error -4021 - handleInsert - Out of disk space - -4022 - addTuxEntries - fail before add of first entry -4023 - addTuxEntries - fail add of last entry (the entry for last index) - -4025: Fail all inserts with out of memory -4026: Fail one insert with oom -4027: Fail inserts randomly with oom -4028: Fail one random insert with oom - -NDBCNTR: - -1000: Crash insertion on SystemError::CopyFragRef -1001: Delay sending NODE_FAILREP (to own node), until error is cleared - -LGMAN: ------ -15000: Fail to create log file - -TSMAN: ------ -16000: Fail to create data file diff --git a/storage/ndb/src/kernel/blocks/Makefile.am b/storage/ndb/src/kernel/blocks/Makefile.am deleted file mode 100644 index 9b806638837..00000000000 --- a/storage/ndb/src/kernel/blocks/Makefile.am +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2004-2006 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -SUBDIRS = \ - dbdict \ - dbdih \ - dblqh \ - dbtup \ - backup - -noinst_LIBRARIES = libblocks.a - -INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/kernel/blocks/dblqh -libblocks_a_SOURCES = tsman.cpp lgman.cpp pgman.cpp diskpage.cpp restore.cpp\ - cmvmi/Cmvmi.cpp \ - dbacc/DbaccInit.cpp dbacc/DbaccMain.cpp \ - dbdict/Dbdict.cpp \ - dbdih/DbdihInit.cpp dbdih/DbdihMain.cpp \ - dblqh/DblqhInit.cpp dblqh/DblqhMain.cpp \ - dbtc/DbtcInit.cpp dbtc/DbtcMain.cpp \ - dbtup/DbtupExecQuery.cpp dbtup/DbtupBuffer.cpp \ - dbtup/DbtupRoutines.cpp dbtup/DbtupCommit.cpp \ - dbtup/DbtupFixAlloc.cpp dbtup/DbtupTrigger.cpp \ - dbtup/DbtupAbort.cpp dbtup/DbtupPageMap.cpp \ - dbtup/DbtupPagMan.cpp dbtup/DbtupStoredProcDef.cpp \ - dbtup/DbtupMeta.cpp dbtup/DbtupTabDesMan.cpp \ - dbtup/DbtupGen.cpp dbtup/DbtupIndex.cpp \ - dbtup/DbtupDebug.cpp dbtup/DbtupScan.cpp \ - dbtup/DbtupDiskAlloc.cpp dbtup/DbtupVarAlloc.cpp \ - dbtup/tuppage.cpp dbtup/Undo_buffer.cpp \ - ndbfs/AsyncFile.cpp ndbfs/Ndbfs.cpp ndbfs/VoidFs.cpp \ - ndbfs/Filename.cpp ndbfs/CircularIndex.cpp \ - ndbcntr/NdbcntrInit.cpp ndbcntr/NdbcntrSysTable.cpp ndbcntr/NdbcntrMain.cpp \ - qmgr/QmgrInit.cpp qmgr/QmgrMain.cpp \ - trix/Trix.cpp \ - backup/Backup.cpp backup/BackupInit.cpp \ - dbutil/DbUtil.cpp \ - suma/Suma.cpp suma/SumaInit.cpp \ - dbtux/DbtuxGen.cpp dbtux/DbtuxMeta.cpp dbtux/DbtuxMaint.cpp \ - dbtux/DbtuxNode.cpp dbtux/DbtuxTree.cpp dbtux/DbtuxScan.cpp \ - dbtux/DbtuxSearch.cpp dbtux/DbtuxCmp.cpp dbtux/DbtuxStat.cpp \ - dbtux/DbtuxDebug.cpp - -EXTRA_PROGRAMS = ndb_print_file -ndb_print_file_SOURCES = print_file.cpp diskpage.cpp dbtup/tuppage.cpp -ndb_print_file_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am - -windoze-dsp: diff --git a/storage/ndb/src/kernel/blocks/NodeRestart.new.txt b/storage/ndb/src/kernel/blocks/NodeRestart.new.txt deleted file mode 100644 index 00ab8f0c208..00000000000 --- a/storage/ndb/src/kernel/blocks/NodeRestart.new.txt +++ /dev/null @@ -1,82 +0,0 @@ - -Master DIH Starting DIH Starting DICT ----------------------- ---------------------- --------------------- - - Check for sysfile - DIH_RESTARTCONF -> - -****************************************************************************** -* NDB_STTOR interal startphase = 1 -****************************************************************************** - - Read schema file - -****************************************************************************** -* NDB_STTOR interal startphase = 2 -****************************************************************************** - - <- START_PERMREQ - -XXX - -START_PERMCONF -> - -****************************************************************************** -* NDB_STTOR interal startphase = 3 -****************************************************************************** - - <- START_MEREQ - -START_RECREQ -> starting LQH - <- START_RECCONF - -For each table - COPY_TABREQ -> starting DIH - -DICTSTARTREQ -> starting DICT - GET_SCHEMA_INFOREQ - (to master DICT) - - ->SCHEMA_INFO - (schema file) - - 1) For each table - If TableStatus OK - ReadTableFile - else - GET_TABINFOREQ - 2) DIADDTABREQ->DIH - - For each local frag - ADD_FRAG_REQ -> local DICT - DI_ADD_TAB_CONF - <- DICTSTARTCONF - -INCL_NODEREQ -> all DIH - -START_MECONF -> starting DIH - (including sysfile) - -****************************************************************************** -* NDB_STTOR interal startphase = 5 -****************************************************************************** - - <- START_COPYREQ - -START_TOREQ -> all DIH - -For each fragment - CREATE_FRAGREQ -> all DIH - - COPY-DATA (LQHKEYREQ++) - - UPDATE_TOREQ -> all DIH - - COPY_ACTIVEREQ -> starting LQH - - CREATE_FRAGREQ -> all DIH - -START_COPYCONF -> - -LOCAL CHECKPOINT - diff --git a/storage/ndb/src/kernel/blocks/NodeRestart.txt b/storage/ndb/src/kernel/blocks/NodeRestart.txt deleted file mode 100644 index e9f277bb39e..00000000000 --- a/storage/ndb/src/kernel/blocks/NodeRestart.txt +++ /dev/null @@ -1,80 +0,0 @@ - -Master DIH Starting DIH Starting DICT ----------------------- ---------------------- --------------------- - - Check for sysfile - DIH_RESTARTCONF -> - -****************************************************************************** -* NDB_STTOR interal startphase = 1 -****************************************************************************** - - Read schema file - -****************************************************************************** -* NDB_STTOR interal startphase = 2 -****************************************************************************** - - <- START_PERMREQ - -XXX - -START_PERMCONF -> - -****************************************************************************** -* NDB_STTOR interal startphase = 3 -****************************************************************************** - - <- START_MEREQ - -START_RECREQ -> starting LQH - <- START_RECCONF - -DICTSTARTREQ -> starting DICT - GET_SCHEMA_INFOREQ - (to master DICT) - - ->SCHEMA_INFO - (schema file) - - 1) For each table - 1) If TableStatus match - ReadTableFile - else - GET_TABINFOREQ - - <- DICTSTARTCONF - -For each table - COPY_TABREQ -> starting DIH - -INCL_NODEREQ -> all DIH - -START_MECONF -> starting DIH - (including sysfile) - -****************************************************************************** -* NDB_STTOR interal startphase = 5 -****************************************************************************** - - <- START_COPYREQ - -START_TOREQ -> all DIH - -For each fragment - ADD_FRAG_REQ -> local DICT -> LQHFRAGREQ -> starting LQH - - CREATE_FRAGREQ -> all DIH - - COPY-DATA (LQHKEYREQ++) - - UPDATE_TOREQ -> all DIH - - COPY_ACTIVEREQ -> starting LQH - - CREATE_FRAGREQ -> all DIH - -START_COPYCONF -> - -LOCAL CHECKPOINT - diff --git a/storage/ndb/src/kernel/blocks/OptNR.txt b/storage/ndb/src/kernel/blocks/OptNR.txt deleted file mode 100644 index 17e9a62bb0e..00000000000 --- a/storage/ndb/src/kernel/blocks/OptNR.txt +++ /dev/null @@ -1,49 +0,0 @@ -*** Copy thread - - Scan rowids with GCP > starting nodes GCP - Cases for different ROWID combinations - -RI Primary Starting Result -1 A A Update A -2 B B* Delete B* + Insert B -3 C C* Delete C* + Delete C + Insert C - C -4 Deleted D Delete D -5 E Deleted Insert E -6 F Deleted Delete F + Insert F - F -7 Deleted Deleted Update GCP - -*** Ordinary operations -Op Starting Result -Insert A@1 A@1 Update A -Insert A@1 A@2 Delete A@2, Insert A@1 -Insert A@1 1 busy, A@2 Delete 1, Delete A@2, Insert A@1 -Insert A@1 1 busy Delete 1, Insert A@1 - -Delete A@1 A@1 Delete A@1 -Delete A@1 else noop - -Update A@1 A@1 Update A -Update A@1 else noop - -*** - -Rationale: - -If copy has passed rowid, - then no ordinary operation should be a noop - -If copy has not passed, - then it's ok to do a noop as copy will get there sooner or later - -Copy may not end up in lock queue, as no lock is held on primary. - therefore ordinary ops must be noops when rowid missmatch - -When not scanning in rowid order (e.g. disk order) one must -1 make a second pass in rowid order - - finding deletes and inserts (as 2) -2 mark all inserts "earlier" than current scan pos - so they will be found during second pass - -Note: Dealloc is performed first on backup then on primary diff --git a/storage/ndb/src/kernel/blocks/Start.txt b/storage/ndb/src/kernel/blocks/Start.txt deleted file mode 100644 index 3e805ebab55..00000000000 --- a/storage/ndb/src/kernel/blocks/Start.txt +++ /dev/null @@ -1,97 +0,0 @@ - ---- Start phase 1 - Qmgr ------------------------------------------- - -1) Set timer 1 - TimeToWaitAlive - -2) Send CM_REGREQ to all connected(and connecting) nodes - -3) Wait until - -a) The precident answers CM_REGCONF -b) All nodes has answered and I'm the candidate -> election won -c) 30s has passed and I'm the candidate -> election won -d) TimeToWaitAlive has passed -> Failure to start - -When receiving CM_REGCONF -4) Send CM_NODEINFOREQ to all connected(and connecting) nodes - reported in CM_REGCONF - -5) Wait until - -a) All CM_NODEINFO_CONF has arrived -b) TimeToWaitAlive has passed -> Failure to start - -6) Send CM_ACKADD to president - -7) Wait until - -a) Receive CM_ADD(CommitNew) from president -> I'm in the qmgr cluster -b) TimeToWaitAlive has passed -> Failure to start - -NOTE: -30s is hardcoded in 3c. -TimeToWaitAlive should be atleast X sec greater than 30s. i.e. 30+X sec -to support "partial starts" - -NOTE: -In 3b, a more correct number (instead of all) would be -N-NG+1 where N is #nodes and NG is #node groups = (N/R where R is # replicas) -But Qmgr has no notion about node groups or replicas - ---- Start phase X - Qmgr ------------------------------------------- - -President - When accepting a CM_REGREQ -1) Send CM_REGCONF to starting node -2) Send CM_ADD(Prepare) to all started nodes + starting node -3) Send CM_ADD(AddCommit) to all started nodes -4) Send CM_ADD(CommitNew) to starting node - -Cluster participant - -1) Wait for both CM_NODEINFOREQ from starting and CM_ADD(Prepare) from pres. -2) Send CM_ACKADD(Prepare) -3) Wait for CM_ADD(AddCommit) from president -4) Send CM_ACKADD(AddCommit) - ---- Start phase 2 - NdbCntr ---------------------------------------- - -- Use same TimeToWaitAliveTimer - -1) Check sysfile (DIH_RESTART_REQ) -2) Read nodes (from Qmgr) P = qmgr president - -3) Send CNTR_MASTER_REQ to cntr(P) - including info in DIH_RESTART_REF/CONF - -4) Wait until - -b) Receiving CNTR_START_CONF -> continue -b) Receiving CNTR_START_REF -> P = node specified in REF, goto 3 -c) TimeToWaitAlive has passed -> Failure to start - -4) Run ndb-startphase 1 - --- -Initial start/System restart NdbCntr (on qmgr president node) - -1) Wait until - -a) Receiving CNTR_START_REQ with GCI > than own GCI - send CNTR_START_REF to all waiting nodes -b) Receiving all CNTR_START_REQ (for all defined nodes) -c) TimeToWait has passed and partition win -d) TimeToWait has passed and partitioning - and configuration "start with partition" = true - -2) Send CNTR_START_CONF to all nodes "with filesystem" - -3) Wait until - - Receiving CNTR_START_REP for all starting nodes - -4) Start waiting nodes (if any) - -NOTE: -1c) Partition win = 1 node in each node group and 1 full node group -1d) Pattitioning = at least 1 node in each node group --- -Running NdbCntr - -When receiving CNTR_MASTER_REQ -1) If I'm not master send CNTR_MASTER_REF (including master node id) -2) If I'm master - Coordinate parallell node restarts - send CNTR_MASTER_CONF (node restart) diff --git a/storage/ndb/src/kernel/blocks/SystemRestart.new.txt b/storage/ndb/src/kernel/blocks/SystemRestart.new.txt deleted file mode 100644 index 3738de28df8..00000000000 --- a/storage/ndb/src/kernel/blocks/SystemRestart.new.txt +++ /dev/null @@ -1,61 +0,0 @@ - -DIH DICT CNTR ----------------------- ---------------------- --------------------- - <- DIHRESTARTREQ -Check for sysfile -DIH_RESTARTCONF -> - -NDB_STTORY -> DICT (sp=1) - Read schema file - -****************************************************************************** -* Elect master -****************************************************************************** - --- Master DIH -- - -Read sysfile - -COPY_GCIREQ -> all DIHs - -DICTSTARTREQ -> local DICT (master) - - master - ====== - For each table (that should be started) - 1) ReadTableFile - 2) DI_ADD_TAB_REQ -> local DIH - -1) ReadTableFile (DIH) -2) COPY_TABREQ -> all DIH (but self) -3) For each local frag - ADD_FRAG_REQ -> local DICT -4) DI_ADD_TAB_CONF - - SCHEMA_INFO -> all DICTs - Info = schema file - - Participant - =========== - 1) For each table - 1) If TableStatus match - ReadTableFile - else - GET_TABINFOREQ - 2) WriteTableFile - 3) Parse Table Data - 4) DI_ADD_TAB_REQ -> local DIH - - <- SCHEMA_INFOCONF - - - <- DICTSTARTCONF - -For each fragment - IF Fragment is logged - START_FRAGREQ -> LQH x - - START_RECREQ -> all LQH - Note does not wait for START_FRAGCONF - -NDB_STARTCONF -> diff --git a/storage/ndb/src/kernel/blocks/SystemRestart.txt b/storage/ndb/src/kernel/blocks/SystemRestart.txt deleted file mode 100644 index 235dfb968fa..00000000000 --- a/storage/ndb/src/kernel/blocks/SystemRestart.txt +++ /dev/null @@ -1,61 +0,0 @@ - -NDBCNTR DIH DICT ----------------------- ---------------------- --------------- -DIH_RESTARTREQ -> DIH - Check for sysfile - <- DIH_RESTARTCONF - -NDB_STTORY -> DICT -sp = 1 - Read schema file - ----- Master - -NDB_STARTREQ -> DIH - Read sysfile - - COPY_GCIREQ -> all DIHs - - DICTSTARTREQ -> local DICT - local - ====== - SCHEMA_INFO -> all DICTs - Info = schema file - - Participant - =========== - 1) For each table - If TableStatus match - ReadTableFile - else - GET_TABINFOREQ - - <- SCHEMA_INFOCONF - - local - ====== - For each table - DIHSTARTTABREQ -> DIH - - <- DICTSTARTCONF - - For each table (STARTED) - Read table description - from disk - - For each fragment - IF Fragment dont have LCP - ADD_FRAGREQ -> local DICT - 1) LQHFRAGREQ -> LQH x - 2) For each attribute - LQHADDATTREQ - IF Fragment is logged - START_FRAGREQ -> LQH x - - START_RECREQ -> all LQH - Note does not wait for START_FRAGCONF - - For each table - COPY_TABREQ -> all DIH (but self) - - <- NDB_STARTCONF diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp deleted file mode 100644 index 3aa19644491..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ /dev/null @@ -1,5144 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "Backup.hpp" - -#include - -#include -#include - -#include -#include - -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -static NDB_TICKS startTime; - -static const Uint32 BACKUP_SEQUENCE = 0x1F000000; - -#ifdef VM_TRACE -#define DEBUG_OUT(x) ndbout << x << endl -#else -#define DEBUG_OUT(x) -#endif - -//#define DEBUG_ABORT -//#define dbg globalSignalLoggers.log - -static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE; - -#define SEND_BACKUP_STARTED_FLAG(A) (((A) & 0x3) > 0) -#define SEND_BACKUP_COMPLETED_FLAG(A) (((A) & 0x3) > 1) - -void -Backup::execSTTOR(Signal* signal) -{ - jamEntry(); - - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; - - if (startphase == 1) - { - m_curr_disk_write_speed = c_defaults.m_disk_write_speed_sr; - m_overflow_disk_write = 0; - m_reset_disk_speed_time = NdbTick_CurrentMillisecond(); - m_reset_delay_used = Backup::DISK_SPEED_CHECK_DELAY; - signal->theData[0] = BackupContinueB::RESET_DISK_SPEED_COUNTER; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, - Backup::DISK_SPEED_CHECK_DELAY, 1); - } - if (startphase == 3) { - jam(); - g_TypeOfStart = typeOfStart; - signal->theData[0] = reference(); - sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB); - return; - }//if - - if (startphase == 7) - { - m_curr_disk_write_speed = c_defaults.m_disk_write_speed; - } - - if(startphase == 7 && g_TypeOfStart == NodeState::ST_INITIAL_START && - c_masterNodeId == getOwnNodeId()){ - jam(); - createSequence(signal); - return; - }//if - - sendSTTORRY(signal); - return; -}//Dbdict::execSTTOR() - -void -Backup::execREAD_NODESCONF(Signal* signal) -{ - jamEntry(); - ReadNodesConf * conf = (ReadNodesConf *)signal->getDataPtr(); - - c_aliveNodes.clear(); - - Uint32 count = 0; - for (Uint32 i = 0; iallNodes, i)){ - jam(); - count++; - - NodePtr node; - ndbrequire(c_nodes.seize(node)); - - node.p->nodeId = i; - if(NodeBitmask::get(conf->inactiveNodes, i)) { - jam(); - node.p->alive = 0; - } else { - jam(); - node.p->alive = 1; - c_aliveNodes.set(i); - }//if - }//if - }//for - c_masterNodeId = conf->masterNodeId; - ndbrequire(count == conf->noOfNodes); - sendSTTORRY(signal); -} - -void -Backup::sendSTTORRY(Signal* signal) -{ - signal->theData[0] = 0; - signal->theData[3] = 1; - signal->theData[4] = 3; - signal->theData[5] = 7; - signal->theData[6] = 255; // No more start phases from missra - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 7, JBB); -} - -void -Backup::createSequence(Signal* signal) -{ - UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtrSend(); - - req->senderData = RNIL; - req->sequenceId = BACKUP_SEQUENCE; - req->requestType = UtilSequenceReq::Create; - - sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ, - signal, UtilSequenceReq::SignalLength, JBB); -} - -void -Backup::execCONTINUEB(Signal* signal) -{ - jamEntry(); - const Uint32 Tdata0 = signal->theData[0]; - const Uint32 Tdata1 = signal->theData[1]; - const Uint32 Tdata2 = signal->theData[2]; - - switch(Tdata0) { - case BackupContinueB::RESET_DISK_SPEED_COUNTER: - { - /* - Adjust for upto 10 millisecond delay of this signal. Longer - delays will not be handled, in this case the system is most - likely under too high load and it won't matter very much that - we decrease the speed of checkpoints. - - We use a technique where we allow an overflow write in one - period. This overflow will be removed from the next period - such that the load will at average be as specified. - */ - int delay_time = m_reset_delay_used; - NDB_TICKS curr_time = NdbTick_CurrentMillisecond(); - int sig_delay = curr_time - m_reset_disk_speed_time; - - m_words_written_this_period = m_overflow_disk_write; - m_overflow_disk_write = 0; - m_reset_disk_speed_time = curr_time; - - if (sig_delay > delay_time + 10) - delay_time = Backup::DISK_SPEED_CHECK_DELAY - 10; - else if (sig_delay < delay_time - 10) - delay_time = Backup::DISK_SPEED_CHECK_DELAY + 10; - else - delay_time = Backup::DISK_SPEED_CHECK_DELAY - (sig_delay - delay_time); - m_reset_delay_used= delay_time; - signal->theData[0] = BackupContinueB::RESET_DISK_SPEED_COUNTER; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, delay_time, 1); -#if 0 - ndbout << "Signal delay was = " << sig_delay; - ndbout << " Current time = " << curr_time << endl; - ndbout << " Delay time will be = " << delay_time << endl << endl; -#endif - break; - } - case BackupContinueB::BACKUP_FRAGMENT_INFO: - { - jam(); - const Uint32 ptr_I = Tdata1; - Uint32 tabPtr_I = Tdata2; - Uint32 fragPtr_I = signal->theData[3]; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptr_I); - TablePtr tabPtr; - ptr.p->tables.getPtr(tabPtr, tabPtr_I); - - if (fragPtr_I != tabPtr.p->fragments.getSize()) - { - jam(); - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I); - - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - - const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2; - Uint32 * dst; - if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz)) - { - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4); - return; - } - - BackupFormat::CtlFile::FragmentInfo * fragInfo = - (BackupFormat::CtlFile::FragmentInfo*)dst; - fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO); - fragInfo->SectionLength = htonl(sz); - fragInfo->TableId = htonl(fragPtr.p->tableId); - fragInfo->FragmentNo = htonl(fragPtr_I); - fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF); - fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32); - fragInfo->FilePosLow = htonl(0); - fragInfo->FilePosHigh = htonl(0); - - filePtr.p->operation.dataBuffer.updateWritePtr(sz); - - fragPtr_I++; - } - - if (fragPtr_I == tabPtr.p->fragments.getSize()) - { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); - - fragPtr_I = 0; - ptr.p->tables.next(tabPtr); - if ((tabPtr_I = tabPtr.i) == RNIL) - { - jam(); - closeFiles(signal, ptr); - return; - } - } - - signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; - signal->theData[1] = ptr_I; - signal->theData[2] = tabPtr_I; - signal->theData[3] = fragPtr_I; - sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); - return; - } - case BackupContinueB::START_FILE_THREAD: - case BackupContinueB::BUFFER_UNDERFLOW: - { - jam(); - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, Tdata1); - checkFile(signal, filePtr); - return; - } - break; - case BackupContinueB::BUFFER_FULL_SCAN: - { - jam(); - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, Tdata1); - checkScan(signal, filePtr); - return; - } - break; - case BackupContinueB::BUFFER_FULL_FRAG_COMPLETE: - { - jam(); - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, Tdata1); - fragmentCompleted(signal, filePtr); - return; - } - break; - case BackupContinueB::BUFFER_FULL_META: - { - jam(); - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, Tdata1); - - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - FsBuffer & buf = filePtr.p->operation.dataBuffer; - - if(buf.getFreeSize() < buf.getMaxWrite()) { - jam(); - TablePtr tabPtr LINT_SET_PTR; - c_tablePool.getPtr(tabPtr, Tdata2); - - DEBUG_OUT("Backup - Buffer full - " - << buf.getFreeSize() - << " < " << buf.getMaxWrite() - << " (sz: " << buf.getUsableSize() - << " getMinRead: " << buf.getMinRead() - << ") - tableId = " << tabPtr.p->tableId); - - signal->theData[0] = BackupContinueB::BUFFER_FULL_META; - signal->theData[1] = Tdata1; - signal->theData[2] = Tdata2; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3); - return; - }//if - - TablePtr tabPtr LINT_SET_PTR; - c_tablePool.getPtr(tabPtr, Tdata2); - GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = ptr.i; - req->requestType = GetTabInfoReq::RequestById | - GetTabInfoReq::LongSignalConf; - req->tableId = tabPtr.p->tableId; - sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB); - return; - } - case BackupContinueB::ZDELAY_SCAN_NEXT: - if (ERROR_INSERTED(10036)) - { - jam(); - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 300, - signal->getLength()); - return; - } - else - { - jam(); - CLEAR_ERROR_INSERT_VALUE; - ndbout_c("Resuming backup"); - memmove(signal->theData, signal->theData + 1, - 4*ScanFragNextReq::SignalLength); - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - return ; - } - default: - ndbrequire(0); - }//switch -} - -void -Backup::execDUMP_STATE_ORD(Signal* signal) -{ - jamEntry(); - - if(signal->theData[0] == 20){ - if(signal->length() > 1){ - c_defaults.m_dataBufferSize = (signal->theData[1] * 1024 * 1024); - } - if(signal->length() > 2){ - c_defaults.m_logBufferSize = (signal->theData[2] * 1024 * 1024); - } - if(signal->length() > 3){ - c_defaults.m_minWriteSize = signal->theData[3] * 1024; - } - if(signal->length() > 4){ - c_defaults.m_maxWriteSize = signal->theData[4] * 1024; - } - - infoEvent("Backup: data: %d log: %d min: %d max: %d", - c_defaults.m_dataBufferSize, - c_defaults.m_logBufferSize, - c_defaults.m_minWriteSize, - c_defaults.m_maxWriteSize); - return; - } - if(signal->theData[0] == 21){ - BackupReq * req = (BackupReq*)signal->getDataPtrSend(); - req->senderData = 23; - req->backupDataLen = 0; - sendSignal(BACKUP_REF, GSN_BACKUP_REQ,signal,BackupReq::SignalLength, JBB); - startTime = NdbTick_CurrentMillisecond(); - return; - } - - if(signal->theData[0] == 22){ - const Uint32 seq = signal->theData[1]; - FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = 23; - req->directory = 1; - req->ownDirectory = 1; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); - FsOpenReq::v2_setSequence(req->fileNumber, seq); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, - FsRemoveReq::SignalLength, JBA); - return; - } - - if(signal->theData[0] == 23){ - /** - * Print records - */ - BackupRecordPtr ptr LINT_SET_PTR; - for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)){ - infoEvent("BackupRecord %d: BackupId: %d MasterRef: %x ClientRef: %x", - ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef); - infoEvent(" State: %d", ptr.p->slaveState.getState()); - BackupFilePtr filePtr; - for(ptr.p->files.first(filePtr); filePtr.i != RNIL; - ptr.p->files.next(filePtr)){ - jam(); - infoEvent(" file %d: type: %d flags: H'%x", - filePtr.i, filePtr.p->fileType, - filePtr.p->m_flags); - } - } - - ndbout_c("m_curr_disk_write_speed: %u m_words_written_this_period: %u m_overflow_disk_write: %u", - m_curr_disk_write_speed, m_words_written_this_period, m_overflow_disk_write); - ndbout_c("m_reset_delay_used: %u m_reset_disk_speed_time: %llu", - m_reset_delay_used, (Uint64)m_reset_disk_speed_time); - for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) - { - ndbout_c("BackupRecord %u: BackupId: %u MasterRef: %x ClientRef: %x", - ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef); - ndbout_c(" State: %u", ptr.p->slaveState.getState()); - ndbout_c(" noOfByte: %llu noOfRecords: %llu", - ptr.p->noOfBytes, ptr.p->noOfRecords); - ndbout_c(" noOfLogBytes: %llu noOfLogRecords: %llu", - ptr.p->noOfLogBytes, ptr.p->noOfLogRecords); - ndbout_c(" errorCode: %u", ptr.p->errorCode); - BackupFilePtr filePtr; - for(ptr.p->files.first(filePtr); filePtr.i != RNIL; - ptr.p->files.next(filePtr)) - { - ndbout_c(" file %u: type: %u flags: H'%x tableId: %u fragmentId: %u", - filePtr.i, filePtr.p->fileType, filePtr.p->m_flags, - filePtr.p->tableId, filePtr.p->fragmentNo); - } - if (ptr.p->slaveState.getState() == SCANNING && ptr.p->dataFilePtr != RNIL) - { - c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); - OperationRecord & op = filePtr.p->operation; - Uint32 *tmp = NULL; - Uint32 sz = 0; - bool eof = FALSE; - bool ready = op.dataBuffer.getReadPtr(&tmp, &sz, &eof); - ndbout_c("ready: %s eof: %s", ready ? "TRUE" : "FALSE", eof ? "TRUE" : "FALSE"); - } - } - return; - } - if(signal->theData[0] == 24){ - /** - * Print size of records etc. - */ - infoEvent("Backup - dump pool sizes"); - infoEvent("BackupPool: %d BackupFilePool: %d TablePool: %d", - c_backupPool.getSize(), c_backupFilePool.getSize(), - c_tablePool.getSize()); - infoEvent("AttrPool: %d TriggerPool: %d FragmentPool: %d", - c_backupPool.getSize(), c_backupFilePool.getSize(), - c_tablePool.getSize()); - infoEvent("PagePool: %d", - c_pagePool.getSize()); - - - if(signal->getLength() == 2 && signal->theData[1] == 2424) - { - /** - * Handle LCP - */ - BackupRecordPtr lcp; - ndbrequire(c_backups.first(lcp)); - - ndbrequire(c_backupPool.getSize() == c_backupPool.getNoOfFree() + 1); - if(lcp.p->tables.isEmpty()) - { - ndbrequire(c_tablePool.getSize() == c_tablePool.getNoOfFree()); - ndbrequire(c_attributePool.getSize() == c_attributePool.getNoOfFree()); - ndbrequire(c_fragmentPool.getSize() == c_fragmentPool.getNoOfFree()); - ndbrequire(c_triggerPool.getSize() == c_triggerPool.getNoOfFree()); - } - ndbrequire(c_backupFilePool.getSize() == c_backupFilePool.getNoOfFree() + 1); - BackupFilePtr lcp_file; - c_backupFilePool.getPtr(lcp_file, lcp.p->dataFilePtr); - ndbrequire(c_pagePool.getSize() == - c_pagePool.getNoOfFree() + - lcp_file.p->pages.getSize()); - } - } -} - -bool -Backup::findTable(const BackupRecordPtr & ptr, - TablePtr & tabPtr, Uint32 tableId) const -{ - for(ptr.p->tables.first(tabPtr); - tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) { - jam(); - if(tabPtr.p->tableId == tableId){ - jam(); - return true; - }//if - }//for - tabPtr.i = RNIL; - tabPtr.p = 0; - return false; -} - -static Uint32 xps(Uint64 x, Uint64 ms) -{ - float fx = x; - float fs = ms; - - if(ms == 0 || x == 0) { - jam(); - return 0; - }//if - jam(); - return ((Uint32)(1000.0f * (fx + fs/2.1f))) / ((Uint32)fs); -} - -struct Number { - Number(Uint64 r) { val = r;} - Number & operator=(Uint64 r) { val = r; return * this; } - Uint64 val; -}; - -NdbOut & -operator<< (NdbOut & out, const Number & val){ - char p = 0; - Uint32 loop = 1; - while(val.val > loop){ - loop *= 1000; - p += 3; - } - if(loop != 1){ - p -= 3; - loop /= 1000; - } - - switch(p){ - case 0: - break; - case 3: - p = 'k'; - break; - case 6: - p = 'M'; - break; - case 9: - p = 'G'; - break; - default: - p = 0; - } - char str[2]; - str[0] = p; - str[1] = 0; - Uint32 tmp = (val.val + (loop >> 1)) / loop; -#if 1 - if(p > 0) - out << tmp << str; - else - out << tmp; -#else - out << val.val; -#endif - - return out; -} - -void -Backup::execBACKUP_CONF(Signal* signal) -{ - jamEntry(); - BackupConf * conf = (BackupConf*)signal->getDataPtr(); - - ndbout_c("Backup %d has started", conf->backupId); -} - -void -Backup::execBACKUP_REF(Signal* signal) -{ - jamEntry(); - BackupRef * ref = (BackupRef*)signal->getDataPtr(); - - ndbout_c("Backup (%d) has NOT started %d", ref->senderData, ref->errorCode); -} - -void -Backup::execBACKUP_COMPLETE_REP(Signal* signal) -{ - jamEntry(); - BackupCompleteRep* rep = (BackupCompleteRep*)signal->getDataPtr(); - - startTime = NdbTick_CurrentMillisecond() - startTime; - - ndbout_c("Backup %d has completed", rep->backupId); - const Uint64 bytes = - rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32); - const Uint64 records = - rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32); - - Number rps = xps(records, startTime); - Number bps = xps(bytes, startTime); - - ndbout << " Data [ " - << Number(records) << " rows " - << Number(bytes) << " bytes " << startTime << " ms ] " - << " => " - << rps << " row/s & " << bps << "b/s" << endl; - - bps = xps(rep->noOfLogBytes, startTime); - rps = xps(rep->noOfLogRecords, startTime); - - ndbout << " Log [ " - << Number(rep->noOfLogRecords) << " log records " - << Number(rep->noOfLogBytes) << " bytes " << startTime << " ms ] " - << " => " - << rps << " records/s & " << bps << "b/s" << endl; - -} - -void -Backup::execBACKUP_ABORT_REP(Signal* signal) -{ - jamEntry(); - BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtr(); - - ndbout_c("Backup %d has been aborted %d", rep->backupId, rep->reason); -} - -const TriggerEvent::Value triggerEventValues[] = { - TriggerEvent::TE_INSERT, - TriggerEvent::TE_UPDATE, - TriggerEvent::TE_DELETE -}; - -const Backup::State -Backup::validSlaveTransitions[] = { - INITIAL, DEFINING, - DEFINING, DEFINED, - DEFINED, STARTED, - STARTED, STARTED, // Several START_BACKUP_REQ is sent - STARTED, SCANNING, - SCANNING, STARTED, - STARTED, STOPPING, - STOPPING, CLEANING, - CLEANING, INITIAL, - - INITIAL, ABORTING, // Node fail - DEFINING, ABORTING, - DEFINED, ABORTING, - STARTED, ABORTING, - SCANNING, ABORTING, - STOPPING, ABORTING, - CLEANING, ABORTING, // Node fail w/ master takeover - ABORTING, ABORTING, // Slave who initiates ABORT should have this transition - - ABORTING, INITIAL, - INITIAL, INITIAL -}; - -const Uint32 -Backup::validSlaveTransitionsCount = -sizeof(Backup::validSlaveTransitions) / sizeof(Backup::State); - -void -Backup::CompoundState::setState(State newState){ - bool found = false; - const State currState = state; - for(unsigned i = 0; i & ah, - ArrayPool & fh) - : attributes(ah), fragments(fh) -{ - triggerIds[0] = ILLEGAL_TRIGGER_ID; - triggerIds[1] = ILLEGAL_TRIGGER_ID; - triggerIds[2] = ILLEGAL_TRIGGER_ID; - triggerAllocated[0] = false; - triggerAllocated[1] = false; - triggerAllocated[2] = false; -} - -/***************************************************************************** - * - * Node state handling - * - *****************************************************************************/ -void -Backup::execNODE_FAILREP(Signal* signal) -{ - jamEntry(); - - NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr(); - - bool doStuff = false; - /* - Start by saving important signal data which will be destroyed before the - process is completed. - */ - NodeId new_master_node_id = rep->masterNodeId; - Uint32 theFailedNodes[NodeBitmask::Size]; - for (Uint32 i = 0; i < NodeBitmask::Size; i++) - theFailedNodes[i] = rep->theNodes[i]; - - c_masterNodeId = new_master_node_id; - - NodePtr nodePtr; - for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)) { - jam(); - if(NodeBitmask::get(theFailedNodes, nodePtr.p->nodeId)){ - if(nodePtr.p->alive){ - jam(); - ndbrequire(c_aliveNodes.get(nodePtr.p->nodeId)); - doStuff = true; - } else { - jam(); - ndbrequire(!c_aliveNodes.get(nodePtr.p->nodeId)); - }//if - nodePtr.p->alive = 0; - c_aliveNodes.clear(nodePtr.p->nodeId); - }//if - }//for - - if(!doStuff){ - jam(); - return; - }//if - -#ifdef DEBUG_ABORT - ndbout_c("****************** Node fail rep ******************"); -#endif - - NodeId newCoordinator = c_masterNodeId; - BackupRecordPtr ptr; - for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) { - jam(); - checkNodeFail(signal, ptr, newCoordinator, theFailedNodes); - } -} - -bool -Backup::verifyNodesAlive(BackupRecordPtr ptr, - const NdbNodeBitmask& aNodeBitMask) -{ - Uint32 version = getNodeInfo(getOwnNodeId()).m_version; - for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { - jam(); - if(aNodeBitMask.get(i)) { - if(!c_aliveNodes.get(i)){ - jam(); - ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); - return false; - }//if - if(getNodeInfo(i).m_version != version) - { - jam(); - ptr.p->setErrorCode(AbortBackupOrd::IncompatibleVersions); - return false; - } - }//if - }//for - return true; -} - -void -Backup::checkNodeFail(Signal* signal, - BackupRecordPtr ptr, - NodeId newCoord, - Uint32 theFailedNodes[NodeBitmask::Size]) -{ - NdbNodeBitmask mask; - mask.assign(2, theFailedNodes); - - /* Update ptr.p->nodes to be up to date with current alive nodes - */ - NodePtr nodePtr; - bool found = false; - for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)) { - jam(); - if(NodeBitmask::get(theFailedNodes, nodePtr.p->nodeId)) { - jam(); - if (ptr.p->nodes.get(nodePtr.p->nodeId)) { - jam(); - ptr.p->nodes.clear(nodePtr.p->nodeId); - found = true; - } - }//if - }//for - - if(!found) { - jam(); - return; // failed node is not part of backup process, safe to continue - } - - if(mask.get(refToNode(ptr.p->masterRef))) - { - /** - * Master died...abort - */ - ptr.p->masterRef = reference(); - ptr.p->nodes.clear(); - ptr.p->nodes.set(getOwnNodeId()); - ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); - switch(ptr.p->m_gsn){ - case GSN_DEFINE_BACKUP_REQ: - case GSN_START_BACKUP_REQ: - case GSN_BACKUP_FRAGMENT_REQ: - case GSN_STOP_BACKUP_REQ: - // I'm currently processing...reply to self and abort... - ptr.p->masterData.gsn = ptr.p->m_gsn; - ptr.p->masterData.sendCounter = ptr.p->nodes; - return; - case GSN_DEFINE_BACKUP_REF: - case GSN_DEFINE_BACKUP_CONF: - case GSN_START_BACKUP_REF: - case GSN_START_BACKUP_CONF: - case GSN_BACKUP_FRAGMENT_REF: - case GSN_BACKUP_FRAGMENT_CONF: - case GSN_STOP_BACKUP_REF: - case GSN_STOP_BACKUP_CONF: - ptr.p->masterData.gsn = GSN_DEFINE_BACKUP_REQ; - masterAbort(signal, ptr); - return; - case GSN_ABORT_BACKUP_ORD: - // Already aborting - return; - } - } - else if (newCoord == getOwnNodeId()) - { - /** - * I'm master for this backup - */ - jam(); - CRASH_INSERTION((10001)); -#ifdef DEBUG_ABORT - ndbout_c("**** Master: Node failed: Master id = %u", - refToNode(ptr.p->masterRef)); -#endif - - Uint32 gsn, len, pos; - LINT_INIT(gsn); - LINT_INIT(len); - LINT_INIT(pos); - ptr.p->nodes.bitANDC(mask); - switch(ptr.p->masterData.gsn){ - case GSN_DEFINE_BACKUP_REQ: - { - DefineBackupRef * ref = (DefineBackupRef*)signal->getDataPtr(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - gsn= GSN_DEFINE_BACKUP_REF; - len= DefineBackupRef::SignalLength; - pos= &ref->nodeId - signal->getDataPtr(); - break; - } - case GSN_START_BACKUP_REQ: - { - StartBackupRef * ref = (StartBackupRef*)signal->getDataPtr(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - gsn= GSN_START_BACKUP_REF; - len= StartBackupRef::SignalLength; - pos= &ref->nodeId - signal->getDataPtr(); - break; - } - case GSN_BACKUP_FRAGMENT_REQ: - { - BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - gsn= GSN_BACKUP_FRAGMENT_REF; - len= BackupFragmentRef::SignalLength; - pos= &ref->nodeId - signal->getDataPtr(); - break; - } - case GSN_STOP_BACKUP_REQ: - { - StopBackupRef * ref = (StopBackupRef*)signal->getDataPtr(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - ref->nodeId = getOwnNodeId(); - gsn= GSN_STOP_BACKUP_REF; - len= StopBackupRef::SignalLength; - pos= &ref->nodeId - signal->getDataPtr(); - break; - } - case GSN_WAIT_GCP_REQ: - case GSN_DROP_TRIG_REQ: - case GSN_CREATE_TRIG_REQ: - case GSN_ALTER_TRIG_REQ: - ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); - return; - case GSN_UTIL_SEQUENCE_REQ: - case GSN_UTIL_LOCK_REQ: - return; - default: - ndbrequire(false); - } - - for(Uint32 i = 0; (i = mask.find(i+1)) != NdbNodeBitmask::NotFound; ) - { - signal->theData[pos] = i; - sendSignal(reference(), gsn, signal, len, JBB); -#ifdef DEBUG_ABORT - ndbout_c("sending %d to self from %d", gsn, i); -#endif - } - return; - }//if - - /** - * I abort myself as slave if not master - */ - CRASH_INSERTION((10021)); -} - -void -Backup::execINCL_NODEREQ(Signal* signal) -{ - jamEntry(); - - const Uint32 senderRef = signal->theData[0]; - const Uint32 inclNode = signal->theData[1]; - - NodePtr node; - for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)) { - jam(); - const Uint32 nodeId = node.p->nodeId; - if(inclNode == nodeId){ - jam(); - - ndbrequire(node.p->alive == 0); - ndbrequire(!c_aliveNodes.get(nodeId)); - - node.p->alive = 1; - c_aliveNodes.set(nodeId); - - break; - }//if - }//for - signal->theData[0] = inclNode; - signal->theData[1] = reference(); - sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB); -} - -/***************************************************************************** - * - * Master functionallity - Define backup - * - *****************************************************************************/ - -void -Backup::execBACKUP_REQ(Signal* signal) -{ - jamEntry(); - BackupReq * req = (BackupReq*)signal->getDataPtr(); - - const Uint32 senderData = req->senderData; - const BlockReference senderRef = signal->senderBlockRef(); - const Uint32 dataLen32 = req->backupDataLen; // In 32 bit words - const Uint32 flags = signal->getLength() > 2 ? req->flags : 2; - - if(getOwnNodeId() != getMasterNodeId()) { - jam(); - sendBackupRef(senderRef, flags, signal, senderData, BackupRef::IAmNotMaster); - return; - }//if - - if (c_defaults.m_diskless) - { - sendBackupRef(senderRef, flags, signal, senderData, - BackupRef::CannotBackupDiskless); - return; - } - - if(dataLen32 != 0) { - jam(); - sendBackupRef(senderRef, flags, signal, senderData, - BackupRef::BackupDefinitionNotImplemented); - return; - }//if - -#ifdef DEBUG_ABORT - dumpUsedResources(); -#endif - /** - * Seize a backup record - */ - BackupRecordPtr ptr; - c_backups.seize(ptr); - if(ptr.i == RNIL) { - jam(); - sendBackupRef(senderRef, flags, signal, senderData, BackupRef::OutOfBackupRecord); - return; - }//if - - ndbrequire(ptr.p->tables.isEmpty()); - - ptr.p->m_gsn = 0; - ptr.p->errorCode = 0; - ptr.p->clientRef = senderRef; - ptr.p->clientData = senderData; - ptr.p->flags = flags; - ptr.p->masterRef = reference(); - ptr.p->nodes = c_aliveNodes; - ptr.p->backupId = 0; - ptr.p->backupKey[0] = 0; - ptr.p->backupKey[1] = 0; - ptr.p->backupDataLen = 0; - ptr.p->masterData.errorCode = 0; - - UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend(); - - ptr.p->masterData.gsn = GSN_UTIL_SEQUENCE_REQ; - utilReq->senderData = ptr.i; - utilReq->sequenceId = BACKUP_SEQUENCE; - utilReq->requestType = UtilSequenceReq::NextVal; - sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ, - signal, UtilSequenceReq::SignalLength, JBB); -} - -void -Backup::execUTIL_SEQUENCE_REF(Signal* signal) -{ - BackupRecordPtr ptr LINT_SET_PTR; - jamEntry(); - UtilSequenceRef * utilRef = (UtilSequenceRef*)signal->getDataPtr(); - ptr.i = utilRef->senderData; - c_backupPool.getPtr(ptr); - ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ); - sendBackupRef(signal, ptr, BackupRef::SequenceFailure); -}//execUTIL_SEQUENCE_REF() - - -void -Backup::sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode) -{ - jam(); - sendBackupRef(ptr.p->clientRef, ptr.p->flags, signal, ptr.p->clientData, errorCode); - cleanup(signal, ptr); -} - -void -Backup::sendBackupRef(BlockReference senderRef, Uint32 flags, Signal *signal, - Uint32 senderData, Uint32 errorCode) -{ - jam(); - if (SEND_BACKUP_STARTED_FLAG(flags)) - { - BackupRef* ref = (BackupRef*)signal->getDataPtrSend(); - ref->senderData = senderData; - ref->errorCode = errorCode; - ref->masterRef = numberToRef(BACKUP, getMasterNodeId()); - sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB); - } - - if(errorCode != BackupRef::IAmNotMaster){ - signal->theData[0] = NDB_LE_BackupFailedToStart; - signal->theData[1] = senderRef; - signal->theData[2] = errorCode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - } -} - -void -Backup::execUTIL_SEQUENCE_CONF(Signal* signal) -{ - jamEntry(); - - UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr(); - - if(conf->requestType == UtilSequenceReq::Create) - { - jam(); - sendSTTORRY(signal); // At startup in NDB - return; - } - - BackupRecordPtr ptr LINT_SET_PTR; - ptr.i = conf->senderData; - c_backupPool.getPtr(ptr); - - ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ); - - if (ptr.p->checkError()) - { - jam(); - sendBackupRef(signal, ptr, ptr.p->errorCode); - return; - }//if - - if (ERROR_INSERTED(10023)) - { - sendBackupRef(signal, ptr, 323); - return; - }//if - - - { - Uint64 backupId; - memcpy(&backupId,conf->sequenceValue,8); - ptr.p->backupId= (Uint32)backupId; - } - ptr.p->backupKey[0] = (getOwnNodeId() << 16) | (ptr.p->backupId & 0xFFFF); - ptr.p->backupKey[1] = NdbTick_CurrentMillisecond(); - - ptr.p->masterData.gsn = GSN_UTIL_LOCK_REQ; - Mutex mutex(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); - Callback c = { safe_cast(&Backup::defineBackupMutex_locked), ptr.i }; - ndbrequire(mutex.lock(c)); - - return; -} - -void -Backup::defineBackupMutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){ - jamEntry(); - ndbrequire(retVal == 0); - - BackupRecordPtr ptr LINT_SET_PTR; - ptr.i = ptrI; - c_backupPool.getPtr(ptr); - - ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ); - - ptr.p->masterData.gsn = GSN_UTIL_LOCK_REQ; - Mutex mutex(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); - Callback c = { safe_cast(&Backup::dictCommitTableMutex_locked), ptr.i }; - ndbrequire(mutex.lock(c)); -} - -void -Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) -{ - jamEntry(); - ndbrequire(retVal == 0); - - /** - * We now have both the mutexes - */ - BackupRecordPtr ptr LINT_SET_PTR; - ptr.i = ptrI; - c_backupPool.getPtr(ptr); - - ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ); - - if (ERROR_INSERTED(10031)) { - ptr.p->setErrorCode(331); - }//if - - if (ptr.p->checkError()) - { - jam(); - - /** - * Unlock mutexes - */ - jam(); - Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); - jam(); - mutex1.unlock(); // ignore response - - jam(); - Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); - jam(); - mutex2.unlock(); // ignore response - - sendBackupRef(signal, ptr, ptr.p->errorCode); - return; - }//if - - sendDefineBackupReq(signal, ptr); -} - -/***************************************************************************** - * - * Master functionallity - Define backup cont'd (from now on all slaves are in) - * - *****************************************************************************/ - -bool -Backup::haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId) -{ - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == gsn); - ndbrequire(!ptr.p->masterData.sendCounter.done()); - ndbrequire(ptr.p->masterData.sendCounter.isWaitingFor(nodeId)); - - ptr.p->masterData.sendCounter.clearWaitingFor(nodeId); - return ptr.p->masterData.sendCounter.done(); -} - -void -Backup::sendDefineBackupReq(Signal *signal, BackupRecordPtr ptr) -{ - /** - * Sending define backup to all participants - */ - DefineBackupReq * req = (DefineBackupReq*)signal->getDataPtrSend(); - req->backupId = ptr.p->backupId; - req->clientRef = ptr.p->clientRef; - req->clientData = ptr.p->clientData; - req->senderRef = reference(); - req->backupPtr = ptr.i; - req->backupKey[0] = ptr.p->backupKey[0]; - req->backupKey[1] = ptr.p->backupKey[1]; - req->nodes = ptr.p->nodes; - req->backupDataLen = ptr.p->backupDataLen; - req->flags = ptr.p->flags; - - ptr.p->masterData.gsn = GSN_DEFINE_BACKUP_REQ; - ptr.p->masterData.sendCounter = ptr.p->nodes; - NodeReceiverGroup rg(BACKUP, ptr.p->nodes); - sendSignal(rg, GSN_DEFINE_BACKUP_REQ, signal, - DefineBackupReq::SignalLength, JBB); - - /** - * Now send backup data - */ - const Uint32 len = ptr.p->backupDataLen; - if(len == 0){ - /** - * No data to send - */ - jam(); - return; - }//if - - /** - * Not implemented - */ - ndbrequire(0); -} - -void -Backup::execDEFINE_BACKUP_REF(Signal* signal) -{ - jamEntry(); - - DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtr(); - - const Uint32 ptrI = ref->backupPtr; - //const Uint32 backupId = ref->backupId; - const Uint32 nodeId = ref->nodeId; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->setErrorCode(ref->errorCode); - defineBackupReply(signal, ptr, nodeId); -} - -void -Backup::execDEFINE_BACKUP_CONF(Signal* signal) -{ - jamEntry(); - - DefineBackupConf* conf = (DefineBackupConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->backupPtr; - //const Uint32 backupId = conf->backupId; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - if (ERROR_INSERTED(10024)) - { - ptr.p->setErrorCode(324); - } - - defineBackupReply(signal, ptr, nodeId); -} - -void -Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) -{ - if (!haveAllSignals(ptr, GSN_DEFINE_BACKUP_REQ, nodeId)) { - jam(); - return; - } - - /** - * Unlock mutexes - */ - jam(); - Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); - jam(); - mutex1.unlock(); // ignore response - - jam(); - Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); - jam(); - mutex2.unlock(); // ignore response - - if(ptr.p->checkError()) - { - jam(); - masterAbort(signal, ptr); - return; - } - - /** - * Reply to client - */ - CRASH_INSERTION((10034)); - - if (SEND_BACKUP_STARTED_FLAG(ptr.p->flags)) - { - BackupConf * conf = (BackupConf*)signal->getDataPtrSend(); - conf->backupId = ptr.p->backupId; - conf->senderData = ptr.p->clientData; - conf->nodes = ptr.p->nodes; - sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal, - BackupConf::SignalLength, JBB); - } - - signal->theData[0] = NDB_LE_BackupStarted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB); - - /** - * We've received GSN_DEFINE_BACKUP_CONF from all participants. - * - * Our next step is to send START_BACKUP_REQ to all participants, - * who will then send CREATE_TRIG_REQ for all tables to their local - * DBTUP. - */ - TablePtr tabPtr; - ptr.p->tables.first(tabPtr); - - sendStartBackup(signal, ptr, tabPtr); -} - -/***************************************************************************** - * - * Master functionallity - Prepare triggers - * - *****************************************************************************/ -void -Backup::createAttributeMask(TablePtr tabPtr, - Bitmask & mask) -{ - mask.clear(); - Table & table = * tabPtr.p; - Ptr attrPtr; - table.attributes.first(attrPtr); - for(; !attrPtr.isNull(); table.attributes.next(attrPtr)) - { - jam(); - mask.set(attrPtr.p->data.attrId); - } -} - -void -Backup::sendCreateTrig(Signal* signal, - BackupRecordPtr ptr, TablePtr tabPtr) -{ - CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend(); - - /* - * First, setup the structures - */ - for(Uint32 j=0; j<3; j++) { - jam(); - - TriggerPtr trigPtr; - if(!ptr.p->triggers.seize(trigPtr)) { - jam(); - ptr.p->m_gsn = GSN_START_BACKUP_REF; - StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord; - ref->nodeId = getOwnNodeId(); - sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, - StartBackupRef::SignalLength, JBB); - return; - } // if - - const Uint32 triggerId= trigPtr.i; - tabPtr.p->triggerIds[j] = triggerId; - tabPtr.p->triggerAllocated[j] = true; - trigPtr.p->backupPtr = ptr.i; - trigPtr.p->tableId = tabPtr.p->tableId; - trigPtr.p->tab_ptr_i = tabPtr.i; - trigPtr.p->logEntry = 0; - trigPtr.p->event = j; - trigPtr.p->maxRecordSize = 4096; - trigPtr.p->operation = - &ptr.p->files.getPtr(ptr.p->logFilePtr)->operation; - trigPtr.p->operation->noOfBytes = 0; - trigPtr.p->operation->noOfRecords = 0; - trigPtr.p->errorCode = 0; - } // for - - /* - * now ask DBTUP to create - */ - ptr.p->slaveData.gsn = GSN_CREATE_TRIG_REQ; - ptr.p->slaveData.trigSendCounter = 3; - ptr.p->slaveData.createTrig.tableId = tabPtr.p->tableId; - - req->setUserRef(reference()); - req->setReceiverRef(reference()); - req->setConnectionPtr(ptr.i); - req->setRequestType(CreateTrigReq::RT_USER); - - Bitmask attrMask; - createAttributeMask(tabPtr, attrMask); - req->setAttributeMask(attrMask); - req->setTableId(tabPtr.p->tableId); - req->setIndexId(RNIL); // not used - req->setTriggerType(TriggerType::SUBSCRIPTION); - req->setTriggerActionTime(TriggerActionTime::TA_DETACHED); - req->setMonitorReplicas(true); - req->setMonitorAllAttributes(false); - req->setOnline(true); - - for (int i=0; i < 3; i++) { - req->setTriggerId(tabPtr.p->triggerIds[i]); - req->setTriggerEvent(triggerEventValues[i]); - - sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB); - } -} - -void -Backup::execCREATE_TRIG_CONF(Signal* signal) -{ - jamEntry(); - CreateTrigConf * conf = (CreateTrigConf*)signal->getDataPtr(); - - const Uint32 ptrI = conf->getConnectionPtr(); - const Uint32 tableId = conf->getTableId(); - const TriggerEvent::Value type = conf->getTriggerEvent(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - /** - * Verify that I'm waiting for this conf - * - * ptr.p->masterRef != reference() - * as slaves and masters have triggers now. - */ - ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ); - ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false); - ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId); - - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, tableId)); - ndbrequire(type < 3); // if some decides to change the enums - - createTrigReply(signal, ptr); -} - -void -Backup::execCREATE_TRIG_REF(Signal* signal) -{ - jamEntry(); - CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtr(); - - const Uint32 ptrI = ref->getConnectionPtr(); - const Uint32 tableId = ref->getTableId(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - /** - * Verify that I'm waiting for this ref - * - * ptr.p->masterRef != reference() - * as slaves and masters have triggers now - */ - ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ); - ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false); - ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId); - - ptr.p->setErrorCode(ref->getErrorCode()); - - createTrigReply(signal, ptr); -} - -void -Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) -{ - CRASH_INSERTION(10003); - - /** - * Check finished with table - */ - ptr.p->slaveData.trigSendCounter--; - if(ptr.p->slaveData.trigSendCounter.done() == false){ - jam(); - return; - }//if - - if (ERROR_INSERTED(10025)) - { - ptr.p->errorCode = 325; - } - - if(ptr.p->checkError()) { - jam(); - ptr.p->m_gsn = GSN_START_BACKUP_REF; - StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = ptr.p->errorCode; - ref->nodeId = getOwnNodeId(); - sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, - StartBackupRef::SignalLength, JBB); - return; - }//if - - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.createTrig.tableId)); - - /** - * Next table - */ - ptr.p->tables.next(tabPtr); - if(tabPtr.i != RNIL){ - jam(); - sendCreateTrig(signal, ptr, tabPtr); - return; - }//if - - /** - * We've finished creating triggers. - * - * send conf and wait - */ - ptr.p->m_gsn = GSN_START_BACKUP_CONF; - StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend(); - conf->backupPtr = ptr.i; - conf->backupId = ptr.p->backupId; - sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal, - StartBackupConf::SignalLength, JBB); -} - -/***************************************************************************** - * - * Master functionallity - Start backup - * - *****************************************************************************/ -void -Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) -{ - - ptr.p->masterData.startBackup.tablePtr = tabPtr.i; - - StartBackupReq* req = (StartBackupReq*)signal->getDataPtrSend(); - req->backupId = ptr.p->backupId; - req->backupPtr = ptr.i; - - /** - * We use trigger Ids that are unique to BACKUP. - * These don't interfere with other triggers (e.g. from DBDICT) - * as there is a special case in DBTUP. - * - * Consequently, backups during online upgrade won't work - */ - ptr.p->masterData.gsn = GSN_START_BACKUP_REQ; - ptr.p->masterData.sendCounter = ptr.p->nodes; - NodeReceiverGroup rg(BACKUP, ptr.p->nodes); - sendSignal(rg, GSN_START_BACKUP_REQ, signal, - StartBackupReq::SignalLength, JBB); -} - -void -Backup::execSTART_BACKUP_REF(Signal* signal) -{ - jamEntry(); - - StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->backupPtr; - //const Uint32 backupId = ref->backupId; - const Uint32 nodeId = ref->nodeId; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->setErrorCode(ref->errorCode); - startBackupReply(signal, ptr, nodeId); -} - -void -Backup::execSTART_BACKUP_CONF(Signal* signal) -{ - jamEntry(); - - StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->backupPtr; - //const Uint32 backupId = conf->backupId; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - startBackupReply(signal, ptr, nodeId); -} - -void -Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) -{ - - CRASH_INSERTION((10004)); - - if (!haveAllSignals(ptr, GSN_START_BACKUP_REQ, nodeId)) { - jam(); - return; - } - - if (ERROR_INSERTED(10026)) - { - ptr.p->errorCode = 326; - } - - if(ptr.p->checkError()){ - jam(); - masterAbort(signal, ptr); - return; - } - - /** - * Wait for GCP - */ - ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ; - ptr.p->masterData.waitGCP.startBackup = true; - - WaitGCPReq * waitGCPReq = (WaitGCPReq*)signal->getDataPtrSend(); - waitGCPReq->senderRef = reference(); - waitGCPReq->senderData = ptr.i; - waitGCPReq->requestType = WaitGCPReq::CompleteForceStart; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength,JBB); -} - -void -Backup::execWAIT_GCP_REF(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION((10006)); - - WaitGCPRef * ref = (WaitGCPRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->senderData; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ); - - WaitGCPReq * req = (WaitGCPReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = ptr.i; - req->requestType = WaitGCPReq::CompleteForceStart; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength,JBB); -} - -void -Backup::execWAIT_GCP_CONF(Signal* signal){ - jamEntry(); - - CRASH_INSERTION((10007)); - - WaitGCPConf * conf = (WaitGCPConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->senderData; - const Uint32 gcp = conf->gcp; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ); - - if(ptr.p->checkError()) { - jam(); - masterAbort(signal, ptr); - return; - }//if - - if(ptr.p->masterData.waitGCP.startBackup) { - jam(); - CRASH_INSERTION((10008)); - ptr.p->startGCP = gcp; - ptr.p->masterData.sendCounter= 0; - ptr.p->masterData.gsn = GSN_BACKUP_FRAGMENT_REQ; - nextFragment(signal, ptr); - return; - } else { - jam(); - if(gcp >= ptr.p->startGCP + 3) - { - CRASH_INSERTION((10009)); - ptr.p->stopGCP = gcp; - /** - * Backup is complete - begin cleanup - * STOP_BACKUP_REQ is sent to participants. - * They then drop the local triggers - */ - sendStopBackup(signal, ptr); - return; - }//if - - /** - * Make sure that we got entire stopGCP - */ - WaitGCPReq * req = (WaitGCPReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = ptr.i; - req->requestType = WaitGCPReq::CompleteForceStart; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength,JBB); - return; - } -} - -/***************************************************************************** - * - * Master functionallity - Backup fragment - * - *****************************************************************************/ -void -Backup::nextFragment(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - BackupFragmentReq* req = (BackupFragmentReq*)signal->getDataPtrSend(); - req->backupPtr = ptr.i; - req->backupId = ptr.p->backupId; - - NodeBitmask nodes = ptr.p->nodes; - Uint32 idleNodes = nodes.count(); - Uint32 saveIdleNodes = idleNodes; - ndbrequire(idleNodes > 0); - - TablePtr tabPtr; - ptr.p->tables.first(tabPtr); - for(; tabPtr.i != RNIL && idleNodes > 0; ptr.p->tables.next(tabPtr)) { - jam(); - FragmentPtr fragPtr; - Array & frags = tabPtr.p->fragments; - const Uint32 fragCount = frags.getSize(); - - for(Uint32 i = 0; i 0; i++) { - jam(); - tabPtr.p->fragments.getPtr(fragPtr, i); - const Uint32 nodeId = fragPtr.p->node; - if(fragPtr.p->scanning != 0) { - jam(); - ndbrequire(nodes.get(nodeId)); - nodes.clear(nodeId); - idleNodes--; - } else if(fragPtr.p->scanned == 0 && nodes.get(nodeId)){ - jam(); - fragPtr.p->scanning = 1; - nodes.clear(nodeId); - idleNodes--; - - req->tableId = tabPtr.p->tableId; - req->fragmentNo = i; - req->count = 0; - - ptr.p->masterData.sendCounter++; - const BlockReference ref = numberToRef(BACKUP, nodeId); - sendSignal(ref, GSN_BACKUP_FRAGMENT_REQ, signal, - BackupFragmentReq::SignalLength, JBB); - }//if - }//for - }//for - - if(idleNodes != saveIdleNodes){ - jam(); - return; - }//if - - /** - * Finished with all tables - */ - { - ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ; - ptr.p->masterData.waitGCP.startBackup = false; - - WaitGCPReq * req = (WaitGCPReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = ptr.i; - req->requestType = WaitGCPReq::CompleteForceStart; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); - } -} - -void -Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION((10010)); - - BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->backupPtr; - //const Uint32 backupId = conf->backupId; - const Uint32 tableId = conf->tableId; - const Uint32 fragmentNo = conf->fragmentNo; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); - const Uint64 noOfBytes = - conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32); - const Uint64 noOfRecords = - conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->noOfBytes += noOfBytes; - ptr.p->noOfRecords += noOfRecords; - ptr.p->masterData.sendCounter--; - - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, tableId)); - - tabPtr.p->noOfRecords += noOfRecords; - - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, fragmentNo); - - fragPtr.p->noOfRecords = noOfRecords; - - ndbrequire(fragPtr.p->scanned == 0); - ndbrequire(fragPtr.p->scanning == 1); - ndbrequire(fragPtr.p->node == nodeId); - - fragPtr.p->scanned = 1; - fragPtr.p->scanning = 0; - - if (ERROR_INSERTED(10028)) - { - ptr.p->errorCode = 328; - } - - if(ptr.p->checkError()) - { - if(ptr.p->masterData.sendCounter.done()) - { - jam(); - masterAbort(signal, ptr); - return; - }//if - } - else - { - NodeBitmask nodes = ptr.p->nodes; - nodes.clear(getOwnNodeId()); - if (!nodes.isclear()) - { - BackupFragmentCompleteRep *rep = - (BackupFragmentCompleteRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->backupPtr = ptr.i; - rep->tableId = tableId; - rep->fragmentNo = fragmentNo; - rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF); - rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32); - rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF); - rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32); - NodeReceiverGroup rg(BACKUP, ptr.p->nodes); - sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal, - BackupFragmentCompleteRep::SignalLength, JBB); - } - nextFragment(signal, ptr); - } -} - -void -Backup::execBACKUP_FRAGMENT_REF(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION((10011)); - - BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->backupPtr; - //const Uint32 backupId = ref->backupId; - const Uint32 nodeId = ref->nodeId; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - TablePtr tabPtr; - ptr.p->tables.first(tabPtr); - for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { - jam(); - FragmentPtr fragPtr; - Array & frags = tabPtr.p->fragments; - const Uint32 fragCount = frags.getSize(); - - for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); - if(fragPtr.p->scanning != 0 && nodeId == fragPtr.p->node) - { - jam(); - ndbrequire(fragPtr.p->scanned == 0); - fragPtr.p->scanned = 1; - fragPtr.p->scanning = 0; - goto done; - } - } - } - goto err; - -done: - ptr.p->masterData.sendCounter--; - ptr.p->setErrorCode(ref->errorCode); - - if(ptr.p->masterData.sendCounter.done()) - { - jam(); - masterAbort(signal, ptr); - return; - }//if - -err: - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->requestType = AbortBackupOrd::LogBufferFull; - ord->senderData= ptr.i; - execABORT_BACKUP_ORD(signal); -} - -void -Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal) -{ - jamEntry(); - BackupFragmentCompleteRep * rep = - (BackupFragmentCompleteRep*)signal->getDataPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, rep->backupPtr); - - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, rep->tableId)); - - tabPtr.p->noOfRecords = - rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32); - - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo); - - fragPtr.p->noOfRecords = - rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32); -} - -/***************************************************************************** - * - * Slave functionallity - Drop triggers - * - *****************************************************************************/ - -void -Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr) -{ - TablePtr tabPtr; - ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ; - - if (ptr.p->slaveData.dropTrig.tableId == RNIL) { - jam(); - if(ptr.p->tables.count()) - ptr.p->tables.first(tabPtr); - else - { - // Early abort, go to close files - jam(); - closeFiles(signal, ptr); - return; - } - } else { - jam(); - ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.dropTrig.tableId)); - ptr.p->tables.next(tabPtr); - }//if - if (tabPtr.i != RNIL) { - jam(); - sendDropTrig(signal, ptr, tabPtr); - } else { - /** - * Insert footers - */ - //if backup error, we needn't insert footers - if(ptr.p->checkError()) - { - jam(); - closeFiles(signal, ptr); - ptr.p->errorCode = 0; - return; - } - - { - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - Uint32 * dst; - LINT_INIT(dst); - ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1)); - * dst = 0; - filePtr.p->operation.dataBuffer.updateWritePtr(1); - } - - { - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - - const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2; - - Uint32 * dst; - LINT_INIT(dst); - ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz)); - - BackupFormat::CtlFile::GCPEntry * gcp = - (BackupFormat::CtlFile::GCPEntry*)dst; - - gcp->SectionType = htonl(BackupFormat::GCP_ENTRY); - gcp->SectionLength = htonl(gcpSz); - gcp->StartGCP = htonl(ptr.p->startGCP); - gcp->StopGCP = htonl(ptr.p->stopGCP - 1); - filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); - - { - TablePtr tabPtr; - if (ptr.p->tables.first(tabPtr)) - { - jam(); - signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; - signal->theData[1] = ptr.i; - signal->theData[2] = tabPtr.i; - signal->theData[3] = 0; - sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); - } - else - { - jam(); - closeFiles(signal, ptr); - } - } - } - } -} - -void -Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) -{ - jam(); - DropTrigReq * req = (DropTrigReq *)signal->getDataPtrSend(); - - ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ; - ptr.p->slaveData.trigSendCounter = 0; - req->setConnectionPtr(ptr.i); - req->setUserRef(reference()); // Sending to myself - req->setRequestType(DropTrigReq::RT_USER); - req->setIndexId(RNIL); - req->setTriggerInfo(0); // not used on DROP - req->setTriggerType(TriggerType::SUBSCRIPTION); - req->setTriggerActionTime(TriggerActionTime::TA_DETACHED); - - ptr.p->slaveData.dropTrig.tableId = tabPtr.p->tableId; - req->setTableId(tabPtr.p->tableId); - - for (int i = 0; i < 3; i++) { - Uint32 id = tabPtr.p->triggerIds[i]; - req->setTriggerId(id); - req->setTriggerEvent(triggerEventValues[i]); - sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - ptr.p->slaveData.trigSendCounter ++; - } -} - -void -Backup::execDROP_TRIG_REF(Signal* signal) -{ - jamEntry(); - - DropTrigRef* ref = (DropTrigRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->getConnectionPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - if(ref->getConf()->getTriggerId() != ~(Uint32) 0) - { - ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId(); - ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl; - } - - dropTrigReply(signal, ptr); -} - -void -Backup::execDROP_TRIG_CONF(Signal* signal) -{ - jamEntry(); - - DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->getConnectionPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - dropTrigReply(signal, ptr); -} - -void -Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr) -{ - CRASH_INSERTION((10012)); - - ndbrequire(ptr.p->slaveData.gsn == GSN_DROP_TRIG_REQ); - ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false); - - // move from .masterData to .slaveData - ptr.p->slaveData.trigSendCounter--; - if(ptr.p->slaveData.trigSendCounter.done() == false){ - jam(); - return; - }//if - - sendDropTrig(signal, ptr); // recursive next -} - -/***************************************************************************** - * - * Master functionallity - Stop backup - * - *****************************************************************************/ -void -Backup::execSTOP_BACKUP_REF(Signal* signal) -{ - jamEntry(); - - StopBackupRef* ref = (StopBackupRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->backupPtr; - //const Uint32 backupId = ref->backupId; - const Uint32 nodeId = ref->nodeId; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->setErrorCode(ref->errorCode); - stopBackupReply(signal, ptr, nodeId); -} - -void -Backup::sendStopBackup(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - StopBackupReq* stop = (StopBackupReq*)signal->getDataPtrSend(); - stop->backupPtr = ptr.i; - stop->backupId = ptr.p->backupId; - stop->startGCP = ptr.p->startGCP; - stop->stopGCP = ptr.p->stopGCP; - - ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; - ptr.p->masterData.sendCounter = ptr.p->nodes; - NodeReceiverGroup rg(BACKUP, ptr.p->nodes); - sendSignal(rg, GSN_STOP_BACKUP_REQ, signal, - StopBackupReq::SignalLength, JBB); -} - -void -Backup::execSTOP_BACKUP_CONF(Signal* signal) -{ - jamEntry(); - - StopBackupConf* conf = (StopBackupConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->backupPtr; - //const Uint32 backupId = conf->backupId; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->noOfLogBytes += conf->noOfLogBytes; - ptr.p->noOfLogRecords += conf->noOfLogRecords; - - stopBackupReply(signal, ptr, nodeId); -} - -void -Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) -{ - CRASH_INSERTION((10013)); - - if (!haveAllSignals(ptr, GSN_STOP_BACKUP_REQ, nodeId)) { - jam(); - return; - } - - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupComplete); - - if(!ptr.p->checkError()) - { - if (SEND_BACKUP_COMPLETED_FLAG(ptr.p->flags)) - { - BackupCompleteRep * rep = (BackupCompleteRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->senderData = ptr.p->clientData; - rep->startGCP = ptr.p->startGCP; - rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); - rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); - rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32); - rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32); - rep->noOfLogBytes = ptr.p->noOfLogBytes; - rep->noOfLogRecords = ptr.p->noOfLogRecords; - rep->nodes = ptr.p->nodes; - sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, - BackupCompleteRep::SignalLength, JBB); - } - - signal->theData[0] = NDB_LE_BackupCompleted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - signal->theData[3] = ptr.p->startGCP; - signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); - signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); - signal->theData[7] = ptr.p->noOfLogBytes; - signal->theData[8] = ptr.p->noOfLogRecords; - ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32); - signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB); - } - else - { - masterAbort(signal, ptr); - } -} - -/***************************************************************************** - * - * Master functionallity - Abort backup - * - *****************************************************************************/ -void -Backup::masterAbort(Signal* signal, BackupRecordPtr ptr) -{ - jam(); -#ifdef DEBUG_ABORT - ndbout_c("************ masterAbort"); -#endif - - ndbassert(ptr.p->masterRef == reference()); - - if(ptr.p->masterData.errorCode != 0) - { - jam(); - return; - } - - if (SEND_BACKUP_COMPLETED_FLAG(ptr.p->flags)) - { - BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->senderData = ptr.p->clientData; - rep->reason = ptr.p->errorCode; - sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, - BackupAbortRep::SignalLength, JBB); - } - signal->theData[0] = NDB_LE_BackupAborted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - signal->theData[3] = ptr.p->errorCode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - - ndbrequire(ptr.p->errorCode); - ptr.p->masterData.errorCode = ptr.p->errorCode; - - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->senderData= ptr.i; - NodeReceiverGroup rg(BACKUP, ptr.p->nodes); - - switch(ptr.p->masterData.gsn){ - case GSN_DEFINE_BACKUP_REQ: - ord->requestType = AbortBackupOrd::BackupFailure; - sendSignal(rg, GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); - return; - case GSN_CREATE_TRIG_REQ: - case GSN_START_BACKUP_REQ: - case GSN_ALTER_TRIG_REQ: - case GSN_WAIT_GCP_REQ: - case GSN_BACKUP_FRAGMENT_REQ: - jam(); - ptr.p->stopGCP= ptr.p->startGCP + 1; - sendStopBackup(signal, ptr); // dropping due to error - return; - case GSN_UTIL_SEQUENCE_REQ: - case GSN_UTIL_LOCK_REQ: - ndbrequire(false); - return; - case GSN_DROP_TRIG_REQ: - case GSN_STOP_BACKUP_REQ: - return; - } -} - -void -Backup::abort_scan(Signal * signal, BackupRecordPtr ptr) -{ - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->senderData= ptr.i; - ord->requestType = AbortBackupOrd::AbortScan; - - TablePtr tabPtr; - ptr.p->tables.first(tabPtr); - for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { - jam(); - FragmentPtr fragPtr; - Array & frags = tabPtr.p->fragments; - const Uint32 fragCount = frags.getSize(); - - for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); - const Uint32 nodeId = fragPtr.p->node; - if(fragPtr.p->scanning != 0 && ptr.p->nodes.get(nodeId)) { - jam(); - - const BlockReference ref = numberToRef(BACKUP, nodeId); - sendSignal(ref, GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); - - } - } - } -} - -/***************************************************************************** - * - * Slave functionallity: Define Backup - * - *****************************************************************************/ -void -Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode) -{ - jam(); - ptr.p->setErrorCode(errCode); - if(ptr.p->is_lcp()) - { - jam(); - if (ptr.p->ctlFilePtr == RNIL) { - ptr.p->m_gsn = GSN_DEFINE_BACKUP_REF; - ndbrequire(ptr.p->errorCode != 0); - DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); - ref->backupId = ptr.p->backupId; - ref->backupPtr = ptr.i; - ref->errorCode = ptr.p->errorCode; - ref->nodeId = getOwnNodeId(); - sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal, - DefineBackupRef::SignalLength, JBB); - return; - } - - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - if (filePtr.p->m_flags & BackupFile::BF_LCP_META) - { - jam(); - ndbrequire(! (filePtr.p->m_flags & BackupFile::BF_FILE_THREAD)); - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_LCP_META; - if (filePtr.p->m_flags & BackupFile::BF_OPEN) - { - closeFile(signal, ptr, filePtr); - return; - } - } - - ndbrequire(filePtr.p->m_flags == 0); - - TablePtr tabPtr; - FragmentPtr fragPtr; - - ndbrequire(ptr.p->tables.first(tabPtr)); - tabPtr.p->fragments.getPtr(fragPtr, 0); - - LcpPrepareRef* ref= (LcpPrepareRef*)signal->getDataPtrSend(); - ref->senderData = ptr.p->clientData; - ref->senderRef = reference(); - ref->tableId = tabPtr.p->tableId; - ref->fragmentId = fragPtr.p->fragmentId; - ref->errorCode = errCode; - sendSignal(ptr.p->masterRef, GSN_LCP_PREPARE_REF, - signal, LcpPrepareRef::SignalLength, JBB); - return; - } - - ptr.p->m_gsn = GSN_DEFINE_BACKUP_REF; - ndbrequire(ptr.p->errorCode != 0); - - DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); - ref->backupId = ptr.p->backupId; - ref->backupPtr = ptr.i; - ref->errorCode = ptr.p->errorCode; - ref->nodeId = getOwnNodeId(); - sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal, - DefineBackupRef::SignalLength, JBB); -} - -void -Backup::execDEFINE_BACKUP_REQ(Signal* signal) -{ - jamEntry(); - - DefineBackupReq* req = (DefineBackupReq*)signal->getDataPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; - const BlockReference senderRef = req->senderRef; - - if(senderRef == reference()){ - /** - * Signal sent from myself -> record already seized - */ - jam(); - c_backupPool.getPtr(ptr, ptrI); - } else { // from other node - jam(); -#ifdef DEBUG_ABORT - dumpUsedResources(); -#endif - if(!c_backups.seizeId(ptr, ptrI)) { - jam(); - ndbrequire(false); // If master has succeeded slave should succed - }//if - }//if - - CRASH_INSERTION((10014)); - - ptr.p->m_gsn = GSN_DEFINE_BACKUP_REQ; - ptr.p->slaveState.forceState(INITIAL); - ptr.p->slaveState.setState(DEFINING); - ptr.p->slaveData.dropTrig.tableId = RNIL; - ptr.p->errorCode = 0; - ptr.p->clientRef = req->clientRef; - ptr.p->clientData = req->clientData; - if(senderRef == reference()) - ptr.p->flags = req->flags; - else - ptr.p->flags = req->flags & ~((Uint32)0x3); /* remove waitCompleted flags - * as non master should never - * reply - */ - ptr.p->masterRef = senderRef; - ptr.p->nodes = req->nodes; - ptr.p->backupId = backupId; - ptr.p->backupKey[0] = req->backupKey[0]; - ptr.p->backupKey[1] = req->backupKey[1]; - ptr.p->backupDataLen = req->backupDataLen; - ptr.p->masterData.errorCode = 0; - ptr.p->noOfBytes = 0; - ptr.p->noOfRecords = 0; - ptr.p->noOfLogBytes = 0; - ptr.p->noOfLogRecords = 0; - ptr.p->currGCP = 0; - ptr.p->startGCP = 0; - ptr.p->stopGCP = 0; - - /** - * Allocate files - */ - BackupFilePtr files[3]; - Uint32 noOfPages[] = { - NO_OF_PAGES_META_FILE, - 2, // 32k - 0 // 3M - }; - const Uint32 maxInsert[] = { - MAX_WORDS_META_FILE, - 4096, // 16k - 16*3000, // Max 16 tuples - }; - Uint32 minWrite[] = { - 8192, - 8192, - 32768 - }; - Uint32 maxWrite[] = { - 8192, - 8192, - 32768 - }; - - minWrite[1] = c_defaults.m_minWriteSize; - maxWrite[1] = c_defaults.m_maxWriteSize; - noOfPages[1] = (c_defaults.m_logBufferSize + sizeof(Page32) - 1) / - sizeof(Page32); - minWrite[2] = c_defaults.m_minWriteSize; - maxWrite[2] = c_defaults.m_maxWriteSize; - noOfPages[2] = (c_defaults.m_dataBufferSize + sizeof(Page32) - 1) / - sizeof(Page32); - - if (ptr.p->is_lcp()) - { - noOfPages[2] = (c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) / - sizeof(Page32); - } - - ptr.p->ctlFilePtr = ptr.p->logFilePtr = ptr.p->dataFilePtr = RNIL; - - for(Uint32 i = 0; i<3; i++) { - jam(); - if(ptr.p->is_lcp() && i != 2) - { - files[i].i = RNIL; - continue; - } - if(!ptr.p->files.seize(files[i])) { - jam(); - defineBackupRef(signal, ptr, - DefineBackupRef::FailedToAllocateFileRecord); - return; - }//if - - files[i].p->tableId = RNIL; - files[i].p->backupPtr = ptr.i; - files[i].p->filePointer = RNIL; - files[i].p->m_flags = 0; - files[i].p->errorCode = 0; - - if(ERROR_INSERTED(10035) || files[i].p->pages.seize(noOfPages[i]) == false) - { - jam(); - DEBUG_OUT("Failed to seize " << noOfPages[i] << " pages"); - defineBackupRef(signal, ptr, DefineBackupRef::FailedToAllocateBuffers); - return; - }//if - Page32Ptr pagePtr; - files[i].p->pages.getPtr(pagePtr, 0); - - const char * msg = files[i].p-> - operation.dataBuffer.setup((Uint32*)pagePtr.p, - noOfPages[i] * (sizeof(Page32) >> 2), - 128, - minWrite[i] >> 2, - maxWrite[i] >> 2, - maxInsert[i]); - if(msg != 0) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedToSetupFsBuffers); - return; - }//if - - switch(i){ - case 0: - files[i].p->fileType = BackupFormat::CTL_FILE; - ptr.p->ctlFilePtr = files[i].i; - break; - case 1: - files[i].p->fileType = BackupFormat::LOG_FILE; - ptr.p->logFilePtr = files[i].i; - break; - case 2: - files[i].p->fileType = BackupFormat::DATA_FILE; - ptr.p->dataFilePtr = files[i].i; - } - }//for - - if (!verifyNodesAlive(ptr, ptr.p->nodes)) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::Undefined); - return; - }//if - if (ERROR_INSERTED(10027)) { - jam(); - defineBackupRef(signal, ptr, 327); - return; - }//if - - if(ptr.p->backupDataLen == 0) { - jam(); - backupAllData(signal, ptr); - return; - }//if - - if(ptr.p->is_lcp()) - { - jam(); - getFragmentInfoDone(signal, ptr); - return; - } - - /** - * Not implemented - */ - ndbrequire(0); -} - -void -Backup::backupAllData(Signal* signal, BackupRecordPtr ptr) -{ - /** - * Get all tables from dict - */ - ListTablesReq * req = (ListTablesReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = ptr.i; - req->requestData = 0; - sendSignal(DBDICT_REF, GSN_LIST_TABLES_REQ, signal, - ListTablesReq::SignalLength, JBB); -} - -void -Backup::execLIST_TABLES_CONF(Signal* signal) -{ - jamEntry(); - - ListTablesConf* conf = (ListTablesConf*)signal->getDataPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, conf->senderData); - - const Uint32 len = signal->length() - ListTablesConf::HeaderLength; - for(unsigned int i = 0; itableData[i]); - Uint32 tableType = ListTablesConf::getTableType(conf->tableData[i]); - Uint32 state= ListTablesConf::getTableState(conf->tableData[i]); - - if (! (DictTabInfo::isTable(tableType) || - DictTabInfo::isIndex(tableType) || - DictTabInfo::isFilegroup(tableType) || - DictTabInfo::isFile(tableType))) - { - jam(); - continue; - } - - if (state != DictTabInfo::StateOnline) - { - jam(); - continue; - } - - TablePtr tabPtr; - ptr.p->tables.seize(tabPtr); - if(tabPtr.i == RNIL) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedToAllocateTables); - return; - }//if - tabPtr.p->tableId = tableId; - tabPtr.p->tableType = tableType; - }//for - - if(len == ListTablesConf::DataLength) { - jam(); - /** - * Not finished... - */ - return; - }//if - - /** - * All tables fetched - */ - openFiles(signal, ptr); -} - -void -Backup::openFiles(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - BackupFilePtr filePtr LINT_SET_PTR; - - FsOpenReq * req = (FsOpenReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->fileFlags = - FsOpenReq::OM_WRITEONLY | - FsOpenReq::OM_TRUNCATE | - FsOpenReq::OM_CREATE | - FsOpenReq::OM_APPEND | - FsOpenReq::OM_AUTOSYNC; - FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF); - req->auto_sync_size = c_defaults.m_disk_synch_size; - /** - * Ctl file - */ - c_backupFilePool.getPtr(filePtr, ptr.p->ctlFilePtr); - filePtr.p->m_flags |= BackupFile::BF_OPENING; - - req->userPointer = filePtr.i; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); - FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); - - /** - * Log file - */ - c_backupFilePool.getPtr(filePtr, ptr.p->logFilePtr); - filePtr.p->m_flags |= BackupFile::BF_OPENING; - - req->userPointer = filePtr.i; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_LOG); - FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); - - /** - * Data file - */ - c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); - filePtr.p->m_flags |= BackupFile::BF_OPENING; - - if (c_defaults.m_o_direct) - req->fileFlags |= FsOpenReq::OM_DIRECT; - req->userPointer = filePtr.i; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); - FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - FsOpenReq::v2_setCount(req->fileNumber, 0); - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); -} - -void -Backup::execFSOPENREF(Signal* signal) -{ - jamEntry(); - - FsRef * ref = (FsRef *)signal->getDataPtr(); - - const Uint32 userPtr = ref->userPointer; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, userPtr); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - ptr.p->setErrorCode(ref->errorCode); - openFilesReply(signal, ptr, filePtr); -} - -void -Backup::execFSOPENCONF(Signal* signal) -{ - jamEntry(); - - FsConf * conf = (FsConf *)signal->getDataPtr(); - - const Uint32 userPtr = conf->userPointer; - const Uint32 filePointer = conf->filePointer; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, userPtr); - filePtr.p->filePointer = filePointer; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - ndbrequire(! (filePtr.p->m_flags & BackupFile::BF_OPEN)); - filePtr.p->m_flags |= BackupFile::BF_OPEN; - openFilesReply(signal, ptr, filePtr); -} - -void -Backup::openFilesReply(Signal* signal, - BackupRecordPtr ptr, BackupFilePtr filePtr) -{ - jam(); - - /** - * Mark files as "opened" - */ - ndbrequire(filePtr.p->m_flags & BackupFile::BF_OPENING); - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_OPENING; - filePtr.p->m_flags |= BackupFile::BF_OPEN; - /** - * Check if all files have recived open_reply - */ - for(ptr.p->files.first(filePtr); filePtr.i!=RNIL;ptr.p->files.next(filePtr)) - { - jam(); - if(filePtr.p->m_flags & BackupFile::BF_OPENING) { - jam(); - return; - }//if - }//for - - /** - * Did open succeed for all files - */ - if(ptr.p->checkError()) { - jam(); - defineBackupRef(signal, ptr); - return; - }//if - - if(!ptr.p->is_lcp()) - { - /** - * Insert file headers - */ - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - if(!insertFileHeader(BackupFormat::CTL_FILE, ptr.p, filePtr.p)) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader); - return; - }//if - - ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - if(!insertFileHeader(BackupFormat::LOG_FILE, ptr.p, filePtr.p)) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader); - return; - }//if - - ptr.p->files.getPtr(filePtr, ptr.p->dataFilePtr); - if(!insertFileHeader(BackupFormat::DATA_FILE, ptr.p, filePtr.p)) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader); - return; - }//if - } - else - { - ptr.p->files.getPtr(filePtr, ptr.p->dataFilePtr); - if(!insertFileHeader(BackupFormat::LCP_FILE, ptr.p, filePtr.p)) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertFileHeader); - return; - }//if - - ptr.p->ctlFilePtr = ptr.p->dataFilePtr; - } - - /** - * Start CTL file thread - */ - if (!ptr.p->is_lcp()) - { - jam(); - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - filePtr.p->m_flags |= BackupFile::BF_FILE_THREAD; - - signal->theData[0] = BackupContinueB::START_FILE_THREAD; - signal->theData[1] = filePtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 2); - } - else - { - jam(); - filePtr.p->m_flags |= BackupFile::BF_LCP_META; - } - - /** - * Insert table list in ctl file - */ - FsBuffer & buf = filePtr.p->operation.dataBuffer; - - const Uint32 sz = - (sizeof(BackupFormat::CtlFile::TableList) >> 2) + - ptr.p->tables.count() - 1; - - Uint32 * dst; - ndbrequire(sz < buf.getMaxWrite()); - if(!buf.getWritePtr(&dst, sz)) { - jam(); - defineBackupRef(signal, ptr, DefineBackupRef::FailedInsertTableList); - return; - }//if - - BackupFormat::CtlFile::TableList* tl = - (BackupFormat::CtlFile::TableList*)dst; - tl->SectionType = htonl(BackupFormat::TABLE_LIST); - tl->SectionLength = htonl(sz); - - TablePtr tabPtr; - Uint32 count = 0; - for(ptr.p->tables.first(tabPtr); - tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)){ - jam(); - tl->TableIds[count] = htonl(tabPtr.p->tableId); - count++; - }//for - - buf.updateWritePtr(sz); - - /** - * Start getting table definition data - */ - ndbrequire(ptr.p->tables.first(tabPtr)); - - signal->theData[0] = BackupContinueB::BUFFER_FULL_META; - signal->theData[1] = ptr.i; - signal->theData[2] = tabPtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3); - return; -} - -bool -Backup::insertFileHeader(BackupFormat::FileType ft, - BackupRecord * ptrP, - BackupFile * filePtrP){ - FsBuffer & buf = filePtrP->operation.dataBuffer; - - const Uint32 sz = sizeof(BackupFormat::FileHeader) >> 2; - - Uint32 * dst; - ndbrequire(sz < buf.getMaxWrite()); - if(!buf.getWritePtr(&dst, sz)) { - jam(); - return false; - }//if - - BackupFormat::FileHeader* header = (BackupFormat::FileHeader*)dst; - ndbrequire(sizeof(header->Magic) == sizeof(BACKUP_MAGIC)); - memcpy(header->Magic, BACKUP_MAGIC, sizeof(BACKUP_MAGIC)); - header->NdbVersion = htonl(NDB_VERSION); - header->SectionType = htonl(BackupFormat::FILE_HEADER); - header->SectionLength = htonl(sz - 3); - header->FileType = htonl(ft); - header->BackupId = htonl(ptrP->backupId); - header->BackupKey_0 = htonl(ptrP->backupKey[0]); - header->BackupKey_1 = htonl(ptrP->backupKey[1]); - header->ByteOrder = 0x12345678; - - buf.updateWritePtr(sz); - return true; -} - -void -Backup::execGET_TABINFOREF(Signal* signal) -{ - GetTabInfoRef * ref = (GetTabInfoRef*)signal->getDataPtr(); - - const Uint32 senderData = ref->senderData; - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, senderData); - - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD; - - defineBackupRef(signal, ptr, ref->errorCode); -} - -void -Backup::execGET_TABINFO_CONF(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)) { - jam(); - return; - }//if - - GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr(); - //const Uint32 senderRef = info->senderRef; - const Uint32 len = conf->totalLen; - const Uint32 senderData = conf->senderData; - const Uint32 tableType = conf->tableType; - const Uint32 tableId = conf->tableId; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, senderData); - - SegmentedSectionPtr dictTabInfoPtr; - signal->getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO); - ndbrequire(dictTabInfoPtr.sz == len); - - TablePtr tabPtr ; - ndbrequire(findTable(ptr, tabPtr, tableId)); - - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - FsBuffer & buf = filePtr.p->operation.dataBuffer; - Uint32* dst = 0; - { // Write into ctl file - Uint32 dstLen = len + 3; - if(!buf.getWritePtr(&dst, dstLen)) { - jam(); - ndbrequire(false); - ptr.p->setErrorCode(DefineBackupRef::FailedAllocateTableMem); - releaseSections(signal); - defineBackupRef(signal, ptr); - return; - }//if - if(dst != 0) { - jam(); - - BackupFormat::CtlFile::TableDescription * desc = - (BackupFormat::CtlFile::TableDescription*)dst; - desc->SectionType = htonl(BackupFormat::TABLE_DESCRIPTION); - desc->SectionLength = htonl(len + 3); - desc->TableType = htonl(tableType); - dst += 3; - - copy(dst, dictTabInfoPtr); - buf.updateWritePtr(dstLen); - }//if - } - - releaseSections(signal); - - if(ptr.p->checkError()) { - jam(); - defineBackupRef(signal, ptr); - return; - }//if - - if (!DictTabInfo::isTable(tabPtr.p->tableType)) - { - jam(); - - TablePtr tmp = tabPtr; - ptr.p->tables.next(tabPtr); - ptr.p->tables.release(tmp); - goto next; - } - - if (!parseTableDescription(signal, ptr, tabPtr, dst, len)) - { - jam(); - defineBackupRef(signal, ptr); - return; - } - - if(!ptr.p->is_lcp()) - { - jam(); - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 1; // lock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); - } - - ptr.p->tables.next(tabPtr); - -next: - if(tabPtr.i == RNIL) - { - /** - * Done with all tables... - */ - jam(); - - if(ptr.p->is_lcp()) - { - lcp_open_file_done(signal, ptr); - return; - } - - ndbrequire(ptr.p->tables.first(tabPtr)); - DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); - req->m_connectionData = RNIL; - req->m_tableRef = tabPtr.p->tableId; - req->m_senderData = ptr.i; - sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, - DihFragCountReq::SignalLength, JBB); - return; - }//if - - /** - * Fetch next table... - */ - signal->theData[0] = BackupContinueB::BUFFER_FULL_META; - signal->theData[1] = ptr.i; - signal->theData[2] = tabPtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3); - return; -} - -bool -Backup::parseTableDescription(Signal* signal, - BackupRecordPtr ptr, - TablePtr tabPtr, - const Uint32 * tabdescptr, - Uint32 len) -{ - SimplePropertiesLinearReader it(tabdescptr, len); - - it.first(); - - DictTabInfo::Table tmpTab; tmpTab.init(); - SimpleProperties::UnpackStatus stat; - stat = SimpleProperties::unpack(it, &tmpTab, - DictTabInfo::TableMapping, - DictTabInfo::TableMappingSize, - true, true); - ndbrequire(stat == SimpleProperties::Break); - - bool lcp = ptr.p->is_lcp(); - - ndbrequire(tabPtr.p->tableId == tmpTab.TableId); - ndbrequire(lcp || (tabPtr.p->tableType == tmpTab.TableType)); - - /** - * LCP should not save disk attributes but only mem attributes - */ - - /** - * Initialize table object - */ - tabPtr.p->noOfRecords = 0; - tabPtr.p->schemaVersion = tmpTab.TableVersion; - tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes; - tabPtr.p->noOfNull = 0; - tabPtr.p->noOfVariable = 0; // Computed while iterating over attribs - tabPtr.p->sz_FixedAttributes = 0; // Computed while iterating over attribs - tabPtr.p->triggerIds[0] = ILLEGAL_TRIGGER_ID; - tabPtr.p->triggerIds[1] = ILLEGAL_TRIGGER_ID; - tabPtr.p->triggerIds[2] = ILLEGAL_TRIGGER_ID; - tabPtr.p->triggerAllocated[0] = false; - tabPtr.p->triggerAllocated[1] = false; - tabPtr.p->triggerAllocated[2] = false; - - Uint32 disk = 0; - const Uint32 count = tabPtr.p->noOfAttributes; - for(Uint32 i = 0; i> 5; - - if(lcp && tmp.AttributeStorageType == NDB_STORAGETYPE_DISK) - { - disk++; - continue; - } - - AttributePtr attrPtr; - if(!tabPtr.p->attributes.seize(attrPtr)) - { - jam(); - ptr.p->setErrorCode(DefineBackupRef::FailedToAllocateAttributeRecord); - return false; - } - - attrPtr.p->data.m_flags = 0; - attrPtr.p->data.attrId = tmp.AttributeId; - - attrPtr.p->data.m_flags |= - (tmp.AttributeNullableFlag ? Attribute::COL_NULLABLE : 0); - attrPtr.p->data.m_flags |= (tmp.AttributeArrayType == NDB_ARRAYTYPE_FIXED)? - Attribute::COL_FIXED : 0; - attrPtr.p->data.sz32 = sz32; - - /** - * 1) Fixed non-nullable - * 2) Other - */ - if(attrPtr.p->data.m_flags & Attribute::COL_FIXED && - !(attrPtr.p->data.m_flags & Attribute::COL_NULLABLE)) { - jam(); - attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes; - tabPtr.p->sz_FixedAttributes += sz32; - } else { - attrPtr.p->data.offset = ~0; - tabPtr.p->noOfVariable++; - } - }//for - - - if(lcp) - { - if (disk) - { - /** - * Remove all disk attributes - */ - tabPtr.p->noOfAttributes -= disk; - - { - AttributePtr attrPtr; - ndbrequire(tabPtr.p->attributes.seize(attrPtr)); - - Uint32 sz32 = 2; - attrPtr.p->data.attrId = AttributeHeader::DISK_REF; - attrPtr.p->data.m_flags = Attribute::COL_FIXED; - attrPtr.p->data.sz32 = 2; - - attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes; - tabPtr.p->sz_FixedAttributes += sz32; - tabPtr.p->noOfAttributes ++; - } - } - - { - AttributePtr attrPtr; - ndbrequire(tabPtr.p->attributes.seize(attrPtr)); - - Uint32 sz32 = 2; - attrPtr.p->data.attrId = AttributeHeader::ROWID; - attrPtr.p->data.m_flags = Attribute::COL_FIXED; - attrPtr.p->data.sz32 = 2; - - attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes; - tabPtr.p->sz_FixedAttributes += sz32; - tabPtr.p->noOfAttributes ++; - } - - if (tmpTab.RowGCIFlag) - { - AttributePtr attrPtr; - ndbrequire(tabPtr.p->attributes.seize(attrPtr)); - - Uint32 sz32 = 2; - attrPtr.p->data.attrId = AttributeHeader::ROW_GCI; - attrPtr.p->data.m_flags = Attribute::COL_FIXED; - attrPtr.p->data.sz32 = 2; - - attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes; - tabPtr.p->sz_FixedAttributes += sz32; - tabPtr.p->noOfAttributes ++; - } - } - return true; -} - -void -Backup::execDI_FCOUNTCONF(Signal* signal) -{ - jamEntry(); - DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); - const Uint32 userPtr = conf->m_connectionData; - const Uint32 fragCount = conf->m_fragmentCount; - const Uint32 tableId = conf->m_tableRef; - const Uint32 senderData = conf->m_senderData; - - ndbrequire(userPtr == RNIL && signal->length() == 5); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, senderData); - - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, tableId)); - - ndbrequire(tabPtr.p->fragments.seize(fragCount) != false); - for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); - fragPtr.p->scanned = 0; - fragPtr.p->scanning = 0; - fragPtr.p->tableId = tableId; - fragPtr.p->fragmentId = i; - fragPtr.p->node = 0; - }//for - - /** - * Next table - */ - if(ptr.p->tables.next(tabPtr)) { - jam(); - DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); - req->m_connectionData = RNIL; - req->m_tableRef = tabPtr.p->tableId; - req->m_senderData = ptr.i; - sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, - DihFragCountReq::SignalLength, JBB); - return; - }//if - - ptr.p->tables.first(tabPtr); - getFragmentInfo(signal, ptr, tabPtr, 0); -} - -void -Backup::getFragmentInfo(Signal* signal, - BackupRecordPtr ptr, TablePtr tabPtr, Uint32 fragNo) -{ - jam(); - - for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { - jam(); - const Uint32 fragCount = tabPtr.p->fragments.getSize(); - for(; fragNo < fragCount; fragNo ++) { - jam(); - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, fragNo); - - if(fragPtr.p->scanned == 0 && fragPtr.p->scanning == 0) { - jam(); - signal->theData[0] = RNIL; - signal->theData[1] = ptr.i; - signal->theData[2] = tabPtr.p->tableId; - signal->theData[3] = fragNo; - sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB); - return; - }//if - }//for - fragNo = 0; - }//for - - getFragmentInfoDone(signal, ptr); -} - -void -Backup::execDIGETPRIMCONF(Signal* signal) -{ - jamEntry(); - - const Uint32 userPtr = signal->theData[0]; - const Uint32 senderData = signal->theData[1]; - const Uint32 nodeCount = signal->theData[6]; - const Uint32 tableId = signal->theData[7]; - const Uint32 fragNo = signal->theData[8]; - - ndbrequire(userPtr == RNIL && signal->length() == 9); - ndbrequire(nodeCount > 0 && nodeCount <= MAX_REPLICAS); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, senderData); - - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, tableId)); - - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, fragNo); - - fragPtr.p->node = signal->theData[2]; - - getFragmentInfo(signal, ptr, tabPtr, fragNo + 1); -} - -void -Backup::getFragmentInfoDone(Signal* signal, BackupRecordPtr ptr) -{ - ptr.p->m_gsn = GSN_DEFINE_BACKUP_CONF; - ptr.p->slaveState.setState(DEFINED); - DefineBackupConf * conf = (DefineBackupConf*)signal->getDataPtr(); - conf->backupPtr = ptr.i; - conf->backupId = ptr.p->backupId; - sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_CONF, signal, - DefineBackupConf::SignalLength, JBB); -} - - -/***************************************************************************** - * - * Slave functionallity: Start backup - * - *****************************************************************************/ -void -Backup::execSTART_BACKUP_REQ(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION((10015)); - - StartBackupReq* req = (StartBackupReq*)signal->getDataPtr(); - const Uint32 ptrI = req->backupPtr; - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->slaveState.setState(STARTED); - ptr.p->m_gsn = GSN_START_BACKUP_REQ; - - /** - * Start file threads... - */ - BackupFilePtr filePtr; - for(ptr.p->files.first(filePtr); filePtr.i!=RNIL;ptr.p->files.next(filePtr)) - { - jam(); - if(! (filePtr.p->m_flags & BackupFile::BF_FILE_THREAD)) - { - jam(); - filePtr.p->m_flags |= BackupFile::BF_FILE_THREAD; - signal->theData[0] = BackupContinueB::START_FILE_THREAD; - signal->theData[1] = filePtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 2); - }//if - }//for - - /** - * Tell DBTUP to create triggers - */ - TablePtr tabPtr; - ndbrequire(ptr.p->tables.first(tabPtr)); - sendCreateTrig(signal, ptr, tabPtr); -} - -/***************************************************************************** - * - * Slave functionallity: Backup fragment - * - *****************************************************************************/ -void -Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) -{ - jamEntry(); - BackupFragmentReq* req = (BackupFragmentReq*)signal->getDataPtr(); - - CRASH_INSERTION((10016)); - - const Uint32 ptrI = req->backupPtr; - //const Uint32 backupId = req->backupId; - const Uint32 tableId = req->tableId; - const Uint32 fragNo = req->fragmentNo; - const Uint32 count = req->count; - - /** - * Get backup record - */ - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->slaveState.setState(SCANNING); - ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REQ; - - /** - * Get file - */ - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); - - ndbrequire(filePtr.p->backupPtr == ptrI); - ndbrequire(filePtr.p->m_flags == - (BackupFile::BF_OPEN | BackupFile::BF_FILE_THREAD)); - - /** - * Get table - */ - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, tableId)); - - /** - * Get fragment - */ - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, fragNo); - - ndbrequire(fragPtr.p->scanned == 0); - ndbrequire(fragPtr.p->scanning == 0 || - refToNode(ptr.p->masterRef) == getOwnNodeId()); - - /** - * Init operation - */ - if(filePtr.p->tableId != tableId) { - jam(); - filePtr.p->operation.init(tabPtr); - filePtr.p->tableId = tableId; - }//if - - /** - * Check for space in buffer - */ - if(!filePtr.p->operation.newFragment(tableId, fragPtr.p->fragmentId)) { - jam(); - req->count = count + 1; - sendSignalWithDelay(BACKUP_REF, GSN_BACKUP_FRAGMENT_REQ, signal, 50, - signal->length()); - ptr.p->slaveState.setState(STARTED); - return; - }//if - - /** - * Mark things as "in use" - */ - fragPtr.p->scanning = 1; - filePtr.p->fragmentNo = fragPtr.p->fragmentId; - - /** - * Start scan - */ - { - filePtr.p->m_flags |= BackupFile::BF_SCAN_THREAD; - - Table & table = * tabPtr.p; - ScanFragReq * req = (ScanFragReq *)signal->getDataPtrSend(); - const Uint32 parallelism = 16; - const Uint32 attrLen = 5 + table.noOfAttributes; - - req->senderData = filePtr.i; - req->resultRef = reference(); - req->schemaVersion = table.schemaVersion; - req->fragmentNoKeyLen = fragPtr.p->fragmentId; - req->requestInfo = 0; - req->savePointId = 0; - req->tableId = table.tableId; - ScanFragReq::setReadCommittedFlag(req->requestInfo, 1); - ScanFragReq::setLockMode(req->requestInfo, 0); - ScanFragReq::setHoldLockFlag(req->requestInfo, 0); - ScanFragReq::setKeyinfoFlag(req->requestInfo, 0); - ScanFragReq::setAttrLen(req->requestInfo,attrLen); - ScanFragReq::setTupScanFlag(req->requestInfo, 1); - if (ptr.p->is_lcp()) - { - ScanFragReq::setScanPrio(req->requestInfo, 1); - ScanFragReq::setNoDiskFlag(req->requestInfo, 1); - ScanFragReq::setLcpScanFlag(req->requestInfo, 1); - } - req->transId1 = 0; - req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); - req->clientOpPtr= filePtr.i; - req->batch_size_rows= parallelism; - req->batch_size_bytes= 0; - sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal, - ScanFragReq::SignalLength, JBB); - - signal->theData[0] = filePtr.i; - signal->theData[1] = 0; - signal->theData[2] = (BACKUP << 20) + (getOwnNodeId() << 8); - - // Return all - signal->theData[3] = table.noOfAttributes; - signal->theData[4] = 0; - signal->theData[5] = 0; - signal->theData[6] = 0; - signal->theData[7] = 0; - - Uint32 dataPos = 8; - Ptr attrPtr; - table.attributes.first(attrPtr); - for(; !attrPtr.isNull(); table.attributes.next(attrPtr)) - { - jam(); - - /** - * LCP should not save disk attributes - */ - ndbrequire(! (ptr.p->is_lcp() && - attrPtr.p->data.m_flags & Attribute::COL_DISK)); - - AttributeHeader::init(&signal->theData[dataPos], - attrPtr.p->data.attrId, 0); - dataPos++; - if(dataPos == 25) { - jam(); - sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, 25, JBB); - dataPos = 3; - }//if - }//for - if(dataPos != 3) { - jam(); - sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, dataPos, JBB); - }//if - } -} - -void -Backup::execSCAN_HBREP(Signal* signal) -{ - jamEntry(); -} - -void -Backup::execTRANSID_AI(Signal* signal) -{ - jamEntry(); - - const Uint32 filePtrI = signal->theData[0]; - //const Uint32 transId1 = signal->theData[1]; - //const Uint32 transId2 = signal->theData[2]; - const Uint32 dataLen = signal->length() - 3; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - - OperationRecord & op = filePtr.p->operation; - - TablePtr tabPtr LINT_SET_PTR; - c_tablePool.getPtr(tabPtr, op.tablePtr); - - Table & table = * tabPtr.p; - - /** - * Unpack data - */ - op.attrSzTotal += dataLen; - - Uint32 srcSz = dataLen; - Uint32 usedSz = 0; - const Uint32 * src = &signal->theData[3]; - - Ptr attrPtr; - table.attributes.first(attrPtr); - Uint32 columnNo = 0; - - while (usedSz < srcSz) - { - jam(); - - /** - * Finished with one attribute now find next - */ - const AttributeHeader attrHead(* src); - const Uint32 attrId = attrHead.getAttributeId(); - const bool null = attrHead.isNULL(); - const Attribute::Data attr = attrPtr.p->data; - ndbrequire(attrId == attr.attrId); - - usedSz += attrHead.getHeaderSize(); - src += attrHead.getHeaderSize(); - - if (null) { - jam(); - ndbrequire(attr.m_flags & Attribute::COL_NULLABLE); - op.nullVariable(); - } else { - Uint32* dst; - Uint32 dstSz = attrHead.getDataSize(); - if (attr.m_flags & Attribute::COL_FIXED && - ! (attr.m_flags & Attribute::COL_NULLABLE)) { - jam(); - dst = op.newAttrib(attr.offset, dstSz); - ndbrequire(dstSz == attr.sz32); - } else { - dst = op.newVariable(columnNo, attrHead.getByteSize()); - ndbrequire(dstSz <= attr.sz32); - } - - memcpy(dst, src, (dstSz << 2)); - src += dstSz; - usedSz += dstSz; - } - table.attributes.next(attrPtr); - columnNo++; - } - - ndbrequire(usedSz == srcSz); - ndbrequire(op.finished()); - op.newRecord(op.dst); -} - -void -Backup::OperationRecord::init(const TablePtr & ptr) -{ - - tablePtr = ptr.i; - noOfAttributes = ptr.p->noOfAttributes; - - sz_Bitmask = (ptr.p->noOfNull + 31) >> 5; - sz_FixedAttribs = ptr.p->sz_FixedAttributes; - - if(ptr.p->noOfVariable == 0) { - jam(); - maxRecordSize = 1 + sz_Bitmask + sz_FixedAttribs; - } else { - jam(); - maxRecordSize = - 1 + sz_Bitmask + 2048 /* Max tuple size */ + 2 * ptr.p->noOfVariable; - }//if -} - -bool -Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo) -{ - Uint32 * tmp; - const Uint32 headSz = (sizeof(BackupFormat::DataFile::FragmentHeader) >> 2); - const Uint32 sz = headSz + 16 * maxRecordSize; - - ndbrequire(sz < dataBuffer.getMaxWrite()); - if(dataBuffer.getWritePtr(&tmp, sz)) { - jam(); - BackupFormat::DataFile::FragmentHeader * head = - (BackupFormat::DataFile::FragmentHeader*)tmp; - - head->SectionType = htonl(BackupFormat::FRAGMENT_HEADER); - head->SectionLength = htonl(headSz); - head->TableId = htonl(tableId); - head->FragmentNo = htonl(fragNo); - head->ChecksumType = htonl(0); - - opNoDone = opNoConf = opLen = 0; - newRecord(tmp + headSz); - scanStart = tmp; - scanStop = (tmp + headSz); - - noOfRecords = 0; - noOfBytes = 0; - return true; - }//if - return false; -} - -bool -Backup::OperationRecord::fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record) -{ - Uint32 * tmp; - const Uint32 footSz = sizeof(BackupFormat::DataFile::FragmentFooter) >> 2; - Uint32 sz = footSz + 1; - - if (fill_record) - { - Uint32 * new_tmp; - if (!dataBuffer.getWritePtr(&tmp, sz)) - return false; - new_tmp = tmp + sz; - - if ((UintPtr)new_tmp & (sizeof(Page32)-1)) - { - /* padding is needed to get full write */ - new_tmp += 2 /* to fit empty header minimum 2 words*/; - new_tmp = (Uint32 *)(((UintPtr)new_tmp + sizeof(Page32)-1) & - ~(UintPtr)(sizeof(Page32)-1)); - /* new write sz */ - sz = new_tmp - tmp; - } - } - - if(dataBuffer.getWritePtr(&tmp, sz)) { - jam(); - * tmp = 0; // Finish record stream - tmp++; - BackupFormat::DataFile::FragmentFooter * foot = - (BackupFormat::DataFile::FragmentFooter*)tmp; - foot->SectionType = htonl(BackupFormat::FRAGMENT_FOOTER); - foot->SectionLength = htonl(footSz); - foot->TableId = htonl(tableId); - foot->FragmentNo = htonl(fragNo); - foot->NoOfRecords = htonl(noOfRecords); - foot->Checksum = htonl(0); - - if (sz != footSz + 1) - { - tmp += footSz; - memset(tmp, 0, (sz - footSz - 1) * 4); - *tmp = htonl(BackupFormat::EMPTY_ENTRY); - tmp++; - *tmp = htonl(sz - footSz - 1); - } - - dataBuffer.updateWritePtr(sz); - return true; - }//if - return false; -} - -bool -Backup::OperationRecord::newScan() -{ - Uint32 * tmp; - ndbrequire(16 * maxRecordSize < dataBuffer.getMaxWrite()); - if(dataBuffer.getWritePtr(&tmp, 16 * maxRecordSize)) { - jam(); - opNoDone = opNoConf = opLen = 0; - newRecord(tmp); - scanStart = tmp; - scanStop = tmp; - return true; - }//if - return false; -} - -bool -Backup::OperationRecord::closeScan() -{ - opNoDone = opNoConf = opLen = 0; - return true; -} - -bool -Backup::OperationRecord::scanConf(Uint32 noOfOps, Uint32 total_len) -{ - const Uint32 done = opNoDone-opNoConf; - - ndbrequire(noOfOps == done); - ndbrequire(opLen == total_len); - opNoConf = opNoDone; - - const Uint32 len = (scanStop - scanStart); - ndbrequire(len < dataBuffer.getMaxWrite()); - dataBuffer.updateWritePtr(len); - noOfBytes += (len << 2); - return true; -} - -void -Backup::execSCAN_FRAGREF(Signal* signal) -{ - jamEntry(); - - ScanFragRef * ref = (ScanFragRef*)signal->getDataPtr(); - - const Uint32 filePtrI = ref->senderData; - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - - filePtr.p->errorCode = ref->errorCode; - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD; - - backupFragmentRef(signal, filePtr); -} - -void -Backup::execSCAN_FRAGCONF(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION((10017)); - - ScanFragConf * conf = (ScanFragConf*)signal->getDataPtr(); - - const Uint32 filePtrI = conf->senderData; - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - - OperationRecord & op = filePtr.p->operation; - - op.scanConf(conf->completedOps, conf->total_len); - const Uint32 completed = conf->fragmentCompleted; - if(completed != 2) { - jam(); - - checkScan(signal, filePtr); - return; - }//if - - fragmentCompleted(signal, filePtr); -} - -void -Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) -{ - jam(); - - if(filePtr.p->errorCode != 0) - { - jam(); - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD; - backupFragmentRef(signal, filePtr); // Scan completed - return; - }//if - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - OperationRecord & op = filePtr.p->operation; - if(!op.fragComplete(filePtr.p->tableId, filePtr.p->fragmentNo, - c_defaults.m_o_direct)) - { - jam(); - signal->theData[0] = BackupContinueB::BUFFER_FULL_FRAG_COMPLETE; - signal->theData[1] = filePtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 50, 2); - return; - }//if - - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_SCAN_THREAD; - - if (ptr.p->is_lcp()) - { - ptr.p->slaveState.setState(STOPPING); - filePtr.p->operation.dataBuffer.eof(); - } - else - { - BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtrSend(); - conf->backupId = ptr.p->backupId; - conf->backupPtr = ptr.i; - conf->tableId = filePtr.p->tableId; - conf->fragmentNo = filePtr.p->fragmentNo; - conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF); - conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32); - conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF); - conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32); - sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, - BackupFragmentConf::SignalLength, JBB); - - ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_CONF; - ptr.p->slaveState.setState(STARTED); - } - return; -} - -void -Backup::backupFragmentRef(Signal * signal, BackupFilePtr filePtr) -{ - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REF; - - BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtrSend(); - ref->backupId = ptr.p->backupId; - ref->backupPtr = ptr.i; - ref->nodeId = getOwnNodeId(); - ref->errorCode = filePtr.p->errorCode; - sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_REF, signal, - BackupFragmentRef::SignalLength, JBB); -} - -void -Backup::checkScan(Signal* signal, BackupFilePtr filePtr) -{ - OperationRecord & op = filePtr.p->operation; - - if(filePtr.p->errorCode != 0) - { - jam(); - - /** - * Close scan - */ - op.closeScan(); - ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend(); - req->senderData = filePtr.i; - req->closeFlag = 1; - req->transId1 = 0; - req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - return; - }//if - - if(op.newScan()) { - jam(); - - ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend(); - req->senderData = filePtr.i; - req->closeFlag = 0; - req->transId1 = 0; - req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); - req->batch_size_rows= 16; - req->batch_size_bytes= 0; - - if (ERROR_INSERTED(10036) && - filePtr.p->tableId >= 2 && - filePtr.p->operation.noOfRecords > 0) - { - ndbout_c("halting backup for table %d fragment: %d after %llu records", - filePtr.p->tableId, - filePtr.p->fragmentNo, - filePtr.p->operation.noOfRecords); - memmove(signal->theData+1, signal->theData, - 4*ScanFragNextReq::SignalLength); - signal->theData[0] = BackupContinueB::ZDELAY_SCAN_NEXT; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, - 300, 1+ScanFragNextReq::SignalLength); - return; - } - if(ERROR_INSERTED(10032)) - sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - 100, ScanFragNextReq::SignalLength); - else if(ERROR_INSERTED(10033)) - { - SET_ERROR_INSERT_VALUE(10032); - sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - 10000, ScanFragNextReq::SignalLength); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->requestType = AbortBackupOrd::FileOrScanError; - ord->senderData= ptr.i; - sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); - } - else - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - return; - }//if - - signal->theData[0] = BackupContinueB::BUFFER_FULL_SCAN; - signal->theData[1] = filePtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 50, 2); -} - -void -Backup::execFSAPPENDREF(Signal* signal) -{ - jamEntry(); - - FsRef * ref = (FsRef *)signal->getDataPtr(); - - const Uint32 filePtrI = ref->userPointer; - const Uint32 errCode = ref->errorCode; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD; - filePtr.p->errorCode = errCode; - - checkFile(signal, filePtr); -} - -void -Backup::execFSAPPENDCONF(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION((10018)); - - //FsConf * conf = (FsConf*)signal->getDataPtr(); - const Uint32 filePtrI = signal->theData[0]; //conf->userPointer; - const Uint32 bytes = signal->theData[1]; //conf->bytes; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - - OperationRecord & op = filePtr.p->operation; - - op.dataBuffer.updateReadPtr(bytes >> 2); - - checkFile(signal, filePtr); -} - -/* - This routine handles two problems with writing to disk during local - checkpoints and backups. The first problem is that we need to limit - the writing to ensure that we don't use too much CPU and disk resources - for backups and checkpoints. The perfect solution to this is to use - a dynamic algorithm that adapts to the environment. Until we have - implemented this we can satisfy ourselves with an algorithm that - uses a configurable limit. - - The second problem is that in Linux we can get severe problems if we - write very much to the disk without synching. In the worst case we - can have Gigabytes of data in the Linux page cache before we reach - the limit of how much we can write. If this happens the performance - will drop significantly when we reach this limit since the Linux flush - daemon will spend a few minutes on writing out the page cache to disk. - To avoid this we ensure that a file never have more than a certain - amount of data outstanding before synch. This variable is also - configurable. -*/ -bool -Backup::ready_to_write(bool ready, Uint32 sz, bool eof, BackupFile *fileP) -{ -#if 0 - ndbout << "ready_to_write: ready = " << ready << " eof = " << eof; - ndbout << " sz = " << sz << endl; - ndbout << "words this period = " << m_words_written_this_period; - ndbout << endl << "overflow disk write = " << m_overflow_disk_write; - ndbout << endl << "Current Millisecond is = "; - ndbout << NdbTick_CurrentMillisecond() << endl; -#endif - if ((ready || eof) && - m_words_written_this_period <= m_curr_disk_write_speed) - { - /* - We have a buffer ready to write or we have reached end of - file and thus we must write the last before closing the - file. - We have already check that we are allowed to write at this - moment. We only worry about history of last 100 milliseconds. - What happened before that is of no interest since a disk - write that was issued more than 100 milliseconds should be - completed by now. - */ - int overflow; - m_words_written_this_period += sz; - overflow = m_words_written_this_period - m_curr_disk_write_speed; - if (overflow > 0) - m_overflow_disk_write = overflow; -#if 0 - ndbout << "Will write with " << endl; - ndbout << endl; -#endif - return true; - } - else - { -#if 0 - ndbout << "Will not write now" << endl << endl; -#endif - return false; - } -} - -void -Backup::checkFile(Signal* signal, BackupFilePtr filePtr) -{ - -#ifdef DEBUG_ABORT - // ndbout_c("---- check file filePtr.i = %u", filePtr.i); -#endif - - OperationRecord & op = filePtr.p->operation; - Uint32 *tmp = NULL; - Uint32 sz = 0; - bool eof = FALSE; - bool ready = op.dataBuffer.getReadPtr(&tmp, &sz, &eof); -#if 0 - ndbout << "Ptr to data = " << hex << tmp << endl; -#endif - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - if (ERROR_INSERTED(10036)) - { - jam(); - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD; - filePtr.p->errorCode = 2810; - ptr.p->setErrorCode(2810); - - if(ptr.p->m_gsn == GSN_STOP_BACKUP_REQ) - { - jam(); - closeFile(signal, ptr, filePtr); - } - return; - } - - if(filePtr.p->errorCode != 0) - { - jam(); - ptr.p->setErrorCode(filePtr.p->errorCode); - - if(ptr.p->m_gsn == GSN_STOP_BACKUP_REQ) - { - jam(); - closeFile(signal, ptr, filePtr); - } - return; - } - - if (!ready_to_write(ready, sz, eof, filePtr.p)) - { - jam(); - signal->theData[0] = BackupContinueB::BUFFER_UNDERFLOW; - signal->theData[1] = filePtr.i; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 20, 2); - return; - } - else if (sz > 0) - { - jam(); - FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); - req->filePointer = filePtr.p->filePointer; - req->userPointer = filePtr.i; - req->userReference = reference(); - req->varIndex = 0; - req->offset = tmp - c_startOfPages; - req->size = sz; - req->synch_flag = 0; - - sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal, - FsAppendReq::SignalLength, JBA); - return; - } - - Uint32 flags = filePtr.p->m_flags; - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_FILE_THREAD; - - ndbrequire(flags & BackupFile::BF_OPEN); - ndbrequire(flags & BackupFile::BF_FILE_THREAD); - - closeFile(signal, ptr, filePtr); -} - - -/**************************************************************************** - * - * Slave functionallity: Perform logging - * - ****************************************************************************/ -void -Backup::execBACKUP_TRIG_REQ(Signal* signal) -{ - /* - TUP asks if this trigger is to be fired on this node. - */ - TriggerPtr trigPtr LINT_SET_PTR; - TablePtr tabPtr LINT_SET_PTR; - FragmentPtr fragPtr; - Uint32 trigger_id = signal->theData[0]; - Uint32 frag_id = signal->theData[1]; - Uint32 result; - - jamEntry(); - - c_triggerPool.getPtr(trigPtr, trigger_id); - - c_tablePool.getPtr(tabPtr, trigPtr.p->tab_ptr_i); - tabPtr.p->fragments.getPtr(fragPtr, frag_id); - if (fragPtr.p->node != getOwnNodeId()) { - - jam(); - result = ZFALSE; - } else { - jam(); - result = ZTRUE; - }//if - signal->theData[0] = result; -} - -void -Backup::execTRIG_ATTRINFO(Signal* signal) { - jamEntry(); - - CRASH_INSERTION((10019)); - - TrigAttrInfo * trg = (TrigAttrInfo*)signal->getDataPtr(); - - TriggerPtr trigPtr LINT_SET_PTR; - c_triggerPool.getPtr(trigPtr, trg->getTriggerId()); - ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); // Online... - - if(trigPtr.p->errorCode != 0) { - jam(); - return; - }//if - - if(trg->getAttrInfoType() == TrigAttrInfo::BEFORE_VALUES) { - jam(); - /** - * Backup is doing REDO logging and don't need before values - */ - return; - }//if - - BackupFormat::LogFile::LogEntry * logEntry = trigPtr.p->logEntry; - if(logEntry == 0) - { - jam(); - Uint32 * dst; - FsBuffer & buf = trigPtr.p->operation->dataBuffer; - ndbrequire(trigPtr.p->maxRecordSize <= buf.getMaxWrite()); - - if(ERROR_INSERTED(10030) || - !buf.getWritePtr(&dst, trigPtr.p->maxRecordSize)) - { - jam(); - Uint32 save[TrigAttrInfo::StaticLength]; - memcpy(save, signal->getDataPtr(), 4*TrigAttrInfo::StaticLength); - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->requestType = AbortBackupOrd::LogBufferFull; - ord->senderData= ptr.i; - sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); - - memcpy(signal->getDataPtrSend(), save, 4*TrigAttrInfo::StaticLength); - return; - }//if - - logEntry = (BackupFormat::LogFile::LogEntry *)dst; - trigPtr.p->logEntry = logEntry; - logEntry->Length = 0; - logEntry->TableId = htonl(trigPtr.p->tableId); - - - if(trigPtr.p->event==0) - logEntry->TriggerEvent= htonl(TriggerEvent::TE_INSERT); - else if(trigPtr.p->event==1) - logEntry->TriggerEvent= htonl(TriggerEvent::TE_UPDATE); - else if(trigPtr.p->event==2) - logEntry->TriggerEvent= htonl(TriggerEvent::TE_DELETE); - else { - ndbout << "Bad Event: " << trigPtr.p->event << endl; - ndbrequire(false); - } - } else { - ndbrequire(logEntry->TableId == htonl(trigPtr.p->tableId)); -// ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event)); - }//if - - const Uint32 pos = logEntry->Length; - const Uint32 dataLen = signal->length() - TrigAttrInfo::StaticLength; - memcpy(&logEntry->Data[pos], trg->getData(), dataLen << 2); - - logEntry->Length = pos + dataLen; -} - -void -Backup::execFIRE_TRIG_ORD(Signal* signal) -{ - jamEntry(); - FireTrigOrd* trg = (FireTrigOrd*)signal->getDataPtr(); - - const Uint32 gci = trg->getGCI(); - const Uint32 trI = trg->getTriggerId(); - const Uint32 fragId = trg->fragId; - - TriggerPtr trigPtr LINT_SET_PTR; - c_triggerPool.getPtr(trigPtr, trI); - - ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); - - if(trigPtr.p->errorCode != 0) { - jam(); - return; - }//if - - ndbrequire(trigPtr.p->logEntry != 0); - Uint32 len = trigPtr.p->logEntry->Length; - trigPtr.p->logEntry->FragId = htonl(fragId); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - if(gci != ptr.p->currGCP) - { - jam(); - trigPtr.p->logEntry->TriggerEvent|= htonl(0x10000); - trigPtr.p->logEntry->Data[len] = htonl(gci); - len++; - ptr.p->currGCP = gci; - } - - len += (sizeof(BackupFormat::LogFile::LogEntry) >> 2) - 2; - trigPtr.p->logEntry->Length = htonl(len); - - ndbrequire(len + 1 <= trigPtr.p->operation->dataBuffer.getMaxWrite()); - trigPtr.p->operation->dataBuffer.updateWritePtr(len + 1); - trigPtr.p->logEntry = 0; - - trigPtr.p->operation->noOfBytes += (len + 1) << 2; - trigPtr.p->operation->noOfRecords += 1; -} - -void -Backup::sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, - Uint32 requestType) -{ - jam(); - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->requestType = requestType; - ord->senderData= ptr.i; - NodePtr node; - for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)) { - jam(); - const Uint32 nodeId = node.p->nodeId; - if(node.p->alive && ptr.p->nodes.get(nodeId)) { - jam(); - sendSignal(numberToRef(BACKUP, nodeId), GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); - }//if - }//for -} - -/***************************************************************************** - * - * Slave functionallity: Stop backup - * - *****************************************************************************/ -void -Backup::execSTOP_BACKUP_REQ(Signal* signal) -{ - jamEntry(); - StopBackupReq * req = (StopBackupReq*)signal->getDataPtr(); - - CRASH_INSERTION((10020)); - - const Uint32 ptrI = req->backupPtr; - //const Uint32 backupId = req->backupId; - const Uint32 startGCP = req->startGCP; - const Uint32 stopGCP = req->stopGCP; - - /** - * At least one GCP must have passed - */ - ndbrequire(stopGCP > startGCP); - - /** - * Get backup record - */ - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->slaveState.setState(STOPPING); - ptr.p->m_gsn = GSN_STOP_BACKUP_REQ; - ptr.p->startGCP= startGCP; - ptr.p->stopGCP= stopGCP; - - /** - * Destroy the triggers in local DBTUP we created - */ - sendDropTrig(signal, ptr); -} - -void -Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) -{ - /** - * Close all files - */ - BackupFilePtr filePtr; - int openCount = 0; - for(ptr.p->files.first(filePtr); filePtr.i!=RNIL; ptr.p->files.next(filePtr)) - { - if(! (filePtr.p->m_flags & BackupFile::BF_OPEN)) - { - jam(); - continue; - } - - jam(); - openCount++; - - if(filePtr.p->m_flags & BackupFile::BF_CLOSING) - { - jam(); - continue; - }//if - - filePtr.p->operation.dataBuffer.eof(); - if(filePtr.p->m_flags & BackupFile::BF_FILE_THREAD) - { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("Close files fileRunning == 1, filePtr.i=%u", filePtr.i); -#endif - } - else - { - jam(); - closeFile(sig, ptr, filePtr); - } - } - - if(openCount == 0){ - jam(); - closeFilesDone(sig, ptr); - }//if -} - -void -Backup::closeFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr) -{ - ndbrequire(filePtr.p->m_flags & BackupFile::BF_OPEN); - ndbrequire(! (filePtr.p->m_flags & BackupFile::BF_OPENING)); - ndbrequire(! (filePtr.p->m_flags & BackupFile::BF_CLOSING)); - filePtr.p->m_flags |= BackupFile::BF_CLOSING; - - FsCloseReq * req = (FsCloseReq *)signal->getDataPtrSend(); - req->filePointer = filePtr.p->filePointer; - req->userPointer = filePtr.i; - req->userReference = reference(); - req->fileFlag = 0; - - if (ptr.p->errorCode) - { - FsCloseReq::setRemoveFileFlag(req->fileFlag, 1); - } - -#ifdef DEBUG_ABORT - ndbout_c("***** a FSCLOSEREQ filePtr.i = %u flags: %x", - filePtr.i, filePtr.p->m_flags); -#endif - sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA); - -} - -void -Backup::execFSCLOSEREF(Signal* signal) -{ - jamEntry(); - - FsRef * ref = (FsRef*)signal->getDataPtr(); - const Uint32 filePtrI = ref->userPointer; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - FsConf * conf = (FsConf*)signal->getDataPtr(); - conf->userPointer = filePtrI; - - execFSCLOSECONF(signal); -} - -void -Backup::execFSCLOSECONF(Signal* signal) -{ - jamEntry(); - - FsConf * conf = (FsConf*)signal->getDataPtr(); - const Uint32 filePtrI = conf->userPointer; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, filePtrI); - -#ifdef DEBUG_ABORT - ndbout_c("***** FSCLOSECONF filePtrI = %u", filePtrI); -#endif - - ndbrequire(filePtr.p->m_flags == (BackupFile::BF_OPEN | - BackupFile::BF_CLOSING)); - - - filePtr.p->m_flags &= ~(Uint32)(BackupFile::BF_OPEN |BackupFile::BF_CLOSING); - filePtr.p->operation.dataBuffer.reset(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - closeFiles(signal, ptr); -} - -void -Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - if(ptr.p->is_lcp()) - { - lcp_close_file_conf(signal, ptr); - return; - } - - jam(); - - //error when do insert footer or close file - if(ptr.p->checkError()) - { - StopBackupRef * ref = (StopBackupRef*)signal->getDataPtr(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->errorCode = ptr.p->errorCode; - ref->nodeId = getOwnNodeId(); - sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_REF, signal, - StopBackupConf::SignalLength, JBB); - - ptr.p->m_gsn = GSN_STOP_BACKUP_REF; - ptr.p->slaveState.setState(CLEANING); - return; - } - - StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend(); - conf->backupId = ptr.p->backupId; - conf->backupPtr = ptr.i; - - BackupFilePtr filePtr LINT_SET_PTR; - if(ptr.p->logFilePtr != RNIL) - { - ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - conf->noOfLogBytes= filePtr.p->operation.noOfBytes; - conf->noOfLogRecords= filePtr.p->operation.noOfRecords; - } - else - { - conf->noOfLogBytes= 0; - conf->noOfLogRecords= 0; - } - - sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_CONF, signal, - StopBackupConf::SignalLength, JBB); - - ptr.p->m_gsn = GSN_STOP_BACKUP_CONF; - ptr.p->slaveState.setState(CLEANING); -} - -/***************************************************************************** - * - * Slave functionallity: Abort backup - * - *****************************************************************************/ -/***************************************************************************** - * - * Slave functionallity: Abort backup - * - *****************************************************************************/ -void -Backup::execABORT_BACKUP_ORD(Signal* signal) -{ - jamEntry(); - AbortBackupOrd* ord = (AbortBackupOrd*)signal->getDataPtr(); - - const Uint32 backupId = ord->backupId; - const AbortBackupOrd::RequestType requestType = - (AbortBackupOrd::RequestType)ord->requestType; - const Uint32 senderData = ord->senderData; - -#ifdef DEBUG_ABORT - ndbout_c("******** ABORT_BACKUP_ORD ********* nodeId = %u", - refToNode(signal->getSendersBlockRef())); - ndbout_c("backupId = %u, requestType = %u, senderData = %u, ", - backupId, requestType, senderData); - dumpUsedResources(); -#endif - - BackupRecordPtr ptr LINT_SET_PTR; - if(requestType == AbortBackupOrd::ClientAbort) { - if (getOwnNodeId() != getMasterNodeId()) { - jam(); - // forward to master -#ifdef DEBUG_ABORT - ndbout_c("---- Forward to master nodeId = %u", getMasterNodeId()); -#endif - sendSignal(calcBackupBlockRef(getMasterNodeId()), GSN_ABORT_BACKUP_ORD, - signal, AbortBackupOrd::SignalLength, JBB); - return; - } - jam(); - for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) { - jam(); - if(ptr.p->backupId == backupId && ptr.p->clientData == senderData) { - jam(); - break; - }//if - }//for - if(ptr.i == RNIL) { - jam(); - return; - }//if - } else { - if (c_backupPool.findId(senderData)) { - jam(); - c_backupPool.getPtr(ptr, senderData); - } else { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("Backup: abort request type=%u on id=%u,%u not found", - requestType, backupId, senderData); -#endif - return; - } - }//if - - ptr.p->m_gsn = GSN_ABORT_BACKUP_ORD; - const bool isCoordinator = (ptr.p->masterRef == reference()); - - bool ok = false; - switch(requestType){ - - /** - * Requests sent to master - */ - case AbortBackupOrd::ClientAbort: - jam(); - // fall through - case AbortBackupOrd::LogBufferFull: - jam(); - // fall through - case AbortBackupOrd::FileOrScanError: - jam(); - ndbrequire(isCoordinator); - ptr.p->setErrorCode(requestType); - if(ptr.p->masterData.gsn == GSN_BACKUP_FRAGMENT_REQ) - { - /** - * Only scans are actively aborted - */ - abort_scan(signal, ptr); - } - return; - - /** - * Requests sent to slave - */ - case AbortBackupOrd::AbortScan: - jam(); - ptr.p->setErrorCode(requestType); - return; - - case AbortBackupOrd::BackupComplete: - jam(); - cleanup(signal, ptr); - return; - case AbortBackupOrd::BackupFailure: - case AbortBackupOrd::BackupFailureDueToNodeFail: - case AbortBackupOrd::OkToClean: - case AbortBackupOrd::IncompatibleVersions: -#ifndef VM_TRACE - default: -#endif - ptr.p->setErrorCode(requestType); - ok= true; - } - ndbrequire(ok); - - ptr.p->masterRef = reference(); - ptr.p->nodes.clear(); - ptr.p->nodes.set(getOwnNodeId()); - - - ptr.p->stopGCP= ptr.p->startGCP + 1; - sendStopBackup(signal, ptr); -} - - -void -Backup::dumpUsedResources() -{ - jam(); - BackupRecordPtr ptr; - - for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)) { - ndbout_c("Backup id=%u, slaveState.getState = %u, errorCode=%u", - ptr.p->backupId, - ptr.p->slaveState.getState(), - ptr.p->errorCode); - - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); - tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) { - jam(); - for(Uint32 j = 0; j<3; j++) { - jam(); - TriggerPtr trigPtr LINT_SET_PTR; - if(tabPtr.p->triggerAllocated[j]) { - jam(); - c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]); - ndbout_c("Allocated[%u] Triggerid = %u, event = %u", - j, - tabPtr.p->triggerIds[j], - trigPtr.p->event); - }//if - }//for - }//for - - BackupFilePtr filePtr; - for(ptr.p->files.first(filePtr); - filePtr.i != RNIL; - ptr.p->files.next(filePtr)) { - jam(); - ndbout_c("filePtr.i = %u, flags: H'%x ", - filePtr.i, filePtr.p->m_flags); - }//for - } -} - -void -Backup::cleanup(Signal* signal, BackupRecordPtr ptr) -{ - - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;ptr.p->tables.next(tabPtr)) - { - jam(); - tabPtr.p->attributes.release(); - tabPtr.p->fragments.release(); - for(Uint32 j = 0; j<3; j++) { - jam(); - TriggerPtr trigPtr LINT_SET_PTR; - if(tabPtr.p->triggerAllocated[j]) { - jam(); - c_triggerPool.getPtr(trigPtr, tabPtr.p->triggerIds[j]); - trigPtr.p->event = ILLEGAL_TRIGGER_ID; - tabPtr.p->triggerAllocated[j] = false; - }//if - tabPtr.p->triggerIds[j] = ILLEGAL_TRIGGER_ID; - }//for - { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); - } - }//for - - BackupFilePtr filePtr; - for(ptr.p->files.first(filePtr);filePtr.i != RNIL;ptr.p->files.next(filePtr)) - { - jam(); - ndbrequire(filePtr.p->m_flags == 0); - filePtr.p->pages.release(); - }//for - - ptr.p->files.release(); - ptr.p->tables.release(); - ptr.p->triggers.release(); - ptr.p->backupId = ~0; - - if(ptr.p->checkError()) - removeBackup(signal, ptr); - else - c_backups.release(ptr); -} - - -void -Backup::removeBackup(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = ptr.i; - req->directory = 1; - req->ownDirectory = 1; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); - FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, - FsRemoveReq::SignalLength, JBA); -} - -void -Backup::execFSREMOVEREF(Signal* signal) -{ - jamEntry(); - FsRef * ref = (FsRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->userPointer; - - FsConf * conf = (FsConf*)signal->getDataPtr(); - conf->userPointer = ptrI; - execFSREMOVECONF(signal); -} - -void -Backup::execFSREMOVECONF(Signal* signal){ - jamEntry(); - - FsConf * conf = (FsConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->userPointer; - - /** - * Get backup record - */ - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, ptrI); - c_backups.release(ptr); -} - -/** - * LCP - */ -void -Backup::execLCP_PREPARE_REQ(Signal* signal) -{ - jamEntry(); - LcpPrepareReq req = *(LcpPrepareReq*)signal->getDataPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, req.backupPtr); - - ptr.p->m_gsn = GSN_LCP_PREPARE_REQ; - - TablePtr tabPtr; - FragmentPtr fragPtr; - if (!ptr.p->tables.isEmpty()) - { - jam(); - ndbrequire(ptr.p->errorCode); - ptr.p->tables.first(tabPtr); - if (tabPtr.p->tableId == req.tableId) - { - jam(); - ndbrequire(!tabPtr.p->fragments.empty()); - tabPtr.p->fragments.getPtr(fragPtr, 0); - fragPtr.p->fragmentId = req.fragmentId; - defineBackupRef(signal, ptr, ptr.p->errorCode); - return; - } - else - { - jam(); - tabPtr.p->attributes.release(); - tabPtr.p->fragments.release(); - ptr.p->tables.release(); - ptr.p->errorCode = 0; - // fall-through - } - } - - if(!ptr.p->tables.seize(tabPtr) || !tabPtr.p->fragments.seize(1)) - { - if(!tabPtr.isNull()) - ptr.p->tables.release(); - ndbrequire(false); // TODO - } - tabPtr.p->tableId = req.tableId; - tabPtr.p->fragments.getPtr(fragPtr, 0); - tabPtr.p->tableType = DictTabInfo::UserTable; - fragPtr.p->fragmentId = req.fragmentId; - fragPtr.p->lcp_no = req.lcpNo; - fragPtr.p->scanned = 0; - fragPtr.p->scanning = 0; - fragPtr.p->tableId = req.tableId; - - ptr.p->backupId= req.backupId; - lcp_open_file(signal, ptr); -} - -void -Backup::lcp_close_file_conf(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - TablePtr tabPtr; - ndbrequire(ptr.p->tables.first(tabPtr)); - Uint32 tableId = tabPtr.p->tableId; - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); - ndbrequire(filePtr.p->m_flags == 0); - - if (ptr.p->m_gsn == GSN_LCP_PREPARE_REQ) - { - jam(); - defineBackupRef(signal, ptr, ptr.p->errorCode); - return; - } - - FragmentPtr fragPtr; - tabPtr.p->fragments.getPtr(fragPtr, 0); - Uint32 fragmentId = fragPtr.p->fragmentId; - - tabPtr.p->attributes.release(); - tabPtr.p->fragments.release(); - ptr.p->tables.release(); - ptr.p->errorCode = 0; - - BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtrSend(); - conf->backupId = ptr.p->backupId; - conf->backupPtr = ptr.i; - conf->tableId = tableId; - conf->fragmentNo = fragmentId; - conf->noOfRecordsLow = 0; - conf->noOfRecordsHigh = 0; - conf->noOfBytesLow = 0; - conf->noOfBytesHigh = 0; - sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, - BackupFragmentConf::SignalLength, JBB); -} - -void -Backup::lcp_open_file(Signal* signal, BackupRecordPtr ptr) -{ - FsOpenReq * req = (FsOpenReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->fileFlags = - FsOpenReq::OM_WRITEONLY | - FsOpenReq::OM_TRUNCATE | - FsOpenReq::OM_CREATE | - FsOpenReq::OM_APPEND | - FsOpenReq::OM_AUTOSYNC; - if (c_defaults.m_o_direct) - req->fileFlags |= FsOpenReq::OM_DIRECT; - FsOpenReq::v2_setCount(req->fileNumber, 0xFFFFFFFF); - req->auto_sync_size = c_defaults.m_disk_synch_size; - - TablePtr tabPtr; - FragmentPtr fragPtr; - - ndbrequire(ptr.p->tables.first(tabPtr)); - tabPtr.p->fragments.getPtr(fragPtr, 0); - - /** - * Lcp file - */ - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); - ndbrequire(filePtr.p->m_flags == 0); - filePtr.p->m_flags |= BackupFile::BF_OPENING; - filePtr.p->tableId = RNIL; // Will force init - req->userPointer = filePtr.i; - FsOpenReq::setVersion(req->fileNumber, 5); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); - FsOpenReq::v5_setLcpNo(req->fileNumber, fragPtr.p->lcp_no); - FsOpenReq::v5_setTableId(req->fileNumber, tabPtr.p->tableId); - FsOpenReq::v5_setFragmentId(req->fileNumber, fragPtr.p->fragmentId); - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); -} - -void -Backup::lcp_open_file_done(Signal* signal, BackupRecordPtr ptr) -{ - TablePtr tabPtr; - FragmentPtr fragPtr; - - ndbrequire(ptr.p->tables.first(tabPtr)); - tabPtr.p->fragments.getPtr(fragPtr, 0); - - BackupFilePtr filePtr LINT_SET_PTR; - c_backupFilePool.getPtr(filePtr, ptr.p->dataFilePtr); - ndbrequire(filePtr.p->m_flags == - (BackupFile::BF_OPEN | BackupFile::BF_LCP_META)); - filePtr.p->m_flags &= ~(Uint32)BackupFile::BF_LCP_META; - - ptr.p->slaveState.setState(STARTED); - - LcpPrepareConf* conf= (LcpPrepareConf*)signal->getDataPtrSend(); - conf->senderData = ptr.p->clientData; - conf->senderRef = reference(); - conf->tableId = tabPtr.p->tableId; - conf->fragmentId = fragPtr.p->fragmentId; - sendSignal(ptr.p->masterRef, GSN_LCP_PREPARE_CONF, - signal, LcpPrepareConf::SignalLength, JBB); - - /** - * Start file thread - */ - filePtr.p->m_flags |= BackupFile::BF_FILE_THREAD; - - signal->theData[0] = BackupContinueB::START_FILE_THREAD; - signal->theData[1] = filePtr.i; - signal->theData[2] = __LINE__; - sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 3); -} - -void -Backup::execEND_LCPREQ(Signal* signal) -{ - EndLcpReq* req= (EndLcpReq*)signal->getDataPtr(); - - BackupRecordPtr ptr LINT_SET_PTR; - c_backupPool.getPtr(ptr, req->backupPtr); - ndbrequire(ptr.p->backupId == req->backupId); - - BackupFilePtr filePtr LINT_SET_PTR; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - ndbrequire(filePtr.p->m_flags == 0); - - if (!ptr.p->tables.isEmpty()) - { - jam(); - ndbrequire(ptr.p->errorCode); - TablePtr tabPtr; - ptr.p->tables.first(tabPtr); - tabPtr.p->attributes.release(); - tabPtr.p->fragments.release(); - ptr.p->tables.release(); - ptr.p->errorCode = 0; - } - - ptr.p->errorCode = 0; - ptr.p->slaveState.setState(CLEANING); - ptr.p->slaveState.setState(INITIAL); - ptr.p->slaveState.setState(DEFINING); - ptr.p->slaveState.setState(DEFINED); - - EndLcpConf* conf= (EndLcpConf*)signal->getDataPtr(); - conf->senderData = ptr.p->clientData; - conf->senderRef = reference(); - sendSignal(ptr.p->masterRef, GSN_END_LCPCONF, - signal, EndLcpConf::SignalLength, JBB); -} diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp deleted file mode 100644 index 7a3280f2ba6..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp +++ /dev/null @@ -1,752 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BACKUP_H -#define BACKUP_H - -#include -#include - -#include "FsBuffer.hpp" -#include "BackupFormat.hpp" - -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -/** - * Backup - This block manages database backup and restore - */ -class Backup : public SimulatedBlock -{ -public: - Backup(Block_context& ctx); - virtual ~Backup(); - BLOCK_DEFINES(Backup); - -protected: - - void execSTTOR(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execDUMP_STATE_ORD(Signal* signal); - void execREAD_NODESCONF(Signal* signal); - void execNODE_FAILREP(Signal* signal); - void execINCL_NODEREQ(Signal* signal); - void execCONTINUEB(Signal* signal); - - /** - * Testing - */ - void execBACKUP_REF(Signal* signal); - void execBACKUP_CONF(Signal* signal); - void execBACKUP_ABORT_REP(Signal* signal); - void execBACKUP_COMPLETE_REP(Signal* signal); - - /** - * Signals sent from master - */ - void execDEFINE_BACKUP_REQ(Signal* signal); - void execBACKUP_DATA(Signal* signal); - void execSTART_BACKUP_REQ(Signal* signal); - void execBACKUP_FRAGMENT_REQ(Signal* signal); - void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal); - void execSTOP_BACKUP_REQ(Signal* signal); - void execBACKUP_STATUS_REQ(Signal* signal); - void execABORT_BACKUP_ORD(Signal* signal); - - /** - * The actual scan - */ - void execSCAN_HBREP(Signal* signal); - void execTRANSID_AI(Signal* signal); - void execSCAN_FRAGREF(Signal* signal); - void execSCAN_FRAGCONF(Signal* signal); - - /** - * Trigger logging - */ - void execBACKUP_TRIG_REQ(Signal* signal); - void execTRIG_ATTRINFO(Signal* signal); - void execFIRE_TRIG_ORD(Signal* signal); - - /** - * DICT signals - */ - void execLIST_TABLES_CONF(Signal* signal); - void execGET_TABINFOREF(Signal* signal); - void execGET_TABINFO_CONF(Signal* signal); - void execCREATE_TRIG_REF(Signal* signal); - void execCREATE_TRIG_CONF(Signal* signal); - void execDROP_TRIG_REF(Signal* signal); - void execDROP_TRIG_CONF(Signal* signal); - - /** - * DIH signals - */ - void execDI_FCOUNTCONF(Signal* signal); - void execDIGETPRIMCONF(Signal* signal); - - /** - * FS signals - */ - void execFSOPENREF(Signal* signal); - void execFSOPENCONF(Signal* signal); - - void execFSCLOSEREF(Signal* signal); - void execFSCLOSECONF(Signal* signal); - - void execFSAPPENDREF(Signal* signal); - void execFSAPPENDCONF(Signal* signal); - - void execFSREMOVEREF(Signal* signal); - void execFSREMOVECONF(Signal* signal); - - /** - * Master functinallity - */ - void execBACKUP_REQ(Signal* signal); - void execABORT_BACKUP_REQ(Signal* signal); - - void execDEFINE_BACKUP_REF(Signal* signal); - void execDEFINE_BACKUP_CONF(Signal* signal); - - void execSTART_BACKUP_REF(Signal* signal); - void execSTART_BACKUP_CONF(Signal* signal); - - void execBACKUP_FRAGMENT_REF(Signal* signal); - void execBACKUP_FRAGMENT_CONF(Signal* signal); - - void execSTOP_BACKUP_REF(Signal* signal); - void execSTOP_BACKUP_CONF(Signal* signal); - - void execBACKUP_STATUS_CONF(Signal* signal); - - void execUTIL_SEQUENCE_REF(Signal* signal); - void execUTIL_SEQUENCE_CONF(Signal* signal); - - void execWAIT_GCP_REF(Signal* signal); - void execWAIT_GCP_CONF(Signal* signal); - - void execLCP_PREPARE_REQ(Signal* signal); - void execLCP_FRAGMENT_REQ(Signal*); - void execEND_LCPREQ(Signal* signal); -private: - void defineBackupMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal); - void dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal); - -public: - struct Node { - Uint32 nodeId; - Uint32 alive; - Uint32 nextList; - union { Uint32 prevList; Uint32 nextPool; }; - }; - typedef Ptr NodePtr; - -#define BACKUP_WORDS_PER_PAGE 8191 - struct Page32 { - Uint32 data[BACKUP_WORDS_PER_PAGE]; - Uint32 nextPool; - }; - typedef Ptr Page32Ptr; - - struct Attribute { - enum Flags { - COL_NULLABLE = 0x1, - COL_FIXED = 0x2, - COL_DISK = 0x4 - }; - struct Data { - Uint16 m_flags; - Uint16 attrId; - Uint32 sz32; // No of 32 bit words - Uint32 offset; // Relative DataFixedAttributes/DataFixedKeys - Uint32 offsetNull; // In NullBitmask - } data; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - typedef Ptr AttributePtr; - - struct Fragment { - Uint64 noOfRecords; - Uint32 tableId; - Uint16 node; - Uint16 fragmentId; - Uint8 scanned; // 0 = not scanned x = scanned by node x - Uint8 scanning; // 0 = not scanning x = scanning on node x - Uint8 lcp_no; - Uint32 nextPool; - }; - typedef Ptr FragmentPtr; - - struct Table { - Table(ArrayPool &, ArrayPool &); - - Uint64 noOfRecords; - - Uint32 tableId; - Uint32 schemaVersion; - Uint32 tableType; - Uint32 noOfNull; - Uint32 noOfAttributes; - Uint32 noOfVariable; - Uint32 sz_FixedAttributes; - Uint32 triggerIds[3]; - bool triggerAllocated[3]; - - DLFifoList attributes; - Array fragments; - - Uint32 nextList; - union { Uint32 nextPool; Uint32 prevList; }; - }; - typedef Ptr TablePtr; - - struct OperationRecord { - public: - OperationRecord(Backup & b) : backup(b) {} - - /** - * Once per table - */ - void init(const TablePtr & ptr); - - /** - * Once per fragment - */ - bool newFragment(Uint32 tableId, Uint32 fragNo); - bool fragComplete(Uint32 tableId, Uint32 fragNo, bool fill_record); - - /** - * Once per scan frag (next) req/conf - */ - bool newScan(); - bool scanConf(Uint32 noOfOps, Uint32 opLen); - bool closeScan(); - - /** - * Per record - */ - void newRecord(Uint32 * base); - bool finished(); - - /** - * Per attribute - */ - void nullVariable(); - void nullAttribute(Uint32 nullOffset); - Uint32 * newNullable(Uint32 attrId, Uint32 sz); - Uint32 * newAttrib(Uint32 offset, Uint32 sz); - Uint32 * newVariable(Uint32 id, Uint32 sz); - - private: - Uint32* base; - Uint32* dst_Length; - Uint32* dst_Bitmask; - Uint32* dst_FixedAttribs; - BackupFormat::DataFile::VariableData* dst_VariableData; - - Uint32 noOfAttributes; // No of Attributes - Uint32 attrLeft; // No of attributes left - - Uint32 opNoDone; - Uint32 opNoConf; - Uint32 opLen; - - public: - Uint32* dst; - Uint32 attrSzTotal; // No of AI words received - Uint32 tablePtr; // Ptr.i to current table - - FsBuffer dataBuffer; - Uint64 noOfRecords; - Uint64 noOfBytes; - Uint32 maxRecordSize; - - private: - Uint32* scanStart; - Uint32* scanStop; - - /** - * sizes of part - */ - Uint32 sz_Bitmask; - Uint32 sz_FixedAttribs; - - public: - union { Uint32 nextPool; Uint32 nextList; }; - Uint32 prevList; - private: - - Backup & backup; - BlockNumber number() const { return backup.number(); } - void progError(int line, int cause, const char * extra) { - backup.progError(line, cause, extra); - } - }; - friend struct OperationRecord; - - struct TriggerRecord { - TriggerRecord() { event = ~0;} - OperationRecord * operation; - BackupFormat::LogFile::LogEntry * logEntry; - Uint32 maxRecordSize; - Uint32 tableId; - Uint32 tab_ptr_i; - Uint32 event; - Uint32 backupPtr; - Uint32 errorCode; - union { Uint32 nextPool; Uint32 nextList; }; - }; - typedef Ptr TriggerPtr; - - /** - * BackupFile - At least 3 per backup - */ - struct BackupFile { - BackupFile(Backup & backup, ArrayPool & pp) - : operation(backup), pages(pp) {} - - Uint32 backupPtr; // Pointer to backup record - Uint32 tableId; - Uint32 fragmentNo; - Uint32 filePointer; - Uint32 errorCode; - BackupFormat::FileType fileType; - OperationRecord operation; - - Array pages; - Uint32 nextList; - union { Uint32 prevList; Uint32 nextPool; }; - - enum { - BF_OPEN = 0x1 - ,BF_OPENING = 0x2 - ,BF_CLOSING = 0x4 - ,BF_FILE_THREAD = 0x8 - ,BF_SCAN_THREAD = 0x10 - ,BF_LCP_META = 0x20 - }; - Uint32 m_flags; - Uint32 m_pos; - }; - typedef Ptr BackupFilePtr; - - - /** - * State for BackupRecord - */ - enum State { - INITIAL = 0, - DEFINING = 1, // Defining backup content and parameters - DEFINED = 2, // DEFINE_BACKUP_CONF sent in slave, received all in master - STARTED = 3, // Creating triggers - SCANNING = 4, // Scanning fragments - STOPPING = 5, // Closing files - CLEANING = 6, // Cleaning resources - ABORTING = 7 // Aborting backup - }; - - static const Uint32 validSlaveTransitionsCount; - static const Uint32 validMasterTransitionsCount; - static const State validSlaveTransitions[]; - static const State validMasterTransitions[]; - - class CompoundState { - public: - CompoundState(Backup & b, - const State valid[], - Uint32 count, Uint32 _id) - : backup(b) - , validTransitions(valid), - noOfValidTransitions(count), id(_id) - { - state = INITIAL; - abortState = state; - } - - void setState(State s); - State getState() const { return state;} - State getAbortState() const { return abortState;} - - void forceState(State s); - - BlockNumber number() const { return backup.number(); } - void progError(int line, int cause, const char * extra) { - backup.progError(line, cause, extra); - } - private: - Backup & backup; - State state; - State abortState; /** - When state == ABORTING, this contains the state - when the abort started - */ - const State * validTransitions; - const Uint32 noOfValidTransitions; - const Uint32 id; - }; - friend class CompoundState; - - /** - * Backup record - * - * One record per backup - */ - struct BackupRecord { - BackupRecord(Backup& b, - ArrayPool
& tp, - ArrayPool & bp, - ArrayPool & trp) - : slaveState(b, validSlaveTransitions, validSlaveTransitionsCount,1) - , tables(tp), triggers(trp), files(bp) - , ctlFilePtr(RNIL), logFilePtr(RNIL), dataFilePtr(RNIL) - , masterData(b), backup(b) - - { - } - - Uint32 m_gsn; - CompoundState slaveState; - - Uint32 clientRef; - Uint32 clientData; - Uint32 flags; - Uint32 signalNo; - Uint32 backupId; - Uint32 backupKey[2]; - Uint32 masterRef; - Uint32 errorCode; - NdbNodeBitmask nodes; - - Uint64 noOfBytes; - Uint64 noOfRecords; - Uint64 noOfLogBytes; - Uint64 noOfLogRecords; - - Uint32 startGCP; - Uint32 currGCP; - Uint32 stopGCP; - DLCFifoList
tables; - SLList triggers; - - SLList files; - Uint32 ctlFilePtr; // Ptr.i to ctl-file - Uint32 logFilePtr; // Ptr.i to log-file - Uint32 dataFilePtr; // Ptr.i to first data-file - - Uint32 backupDataLen; // Used for (un)packing backup request - SimpleProperties props;// Used for (un)packing backup request - - struct SlaveData { - SignalCounter trigSendCounter; - Uint32 gsn; - struct { - Uint32 tableId; - } createTrig; - struct { - Uint32 tableId; - } dropTrig; - } slaveData; - - struct MasterData { - MasterData(Backup & b) - { - } - MutexHandle2 m_defineBackupMutex; - MutexHandle2 m_dictCommitTableMutex; - - Uint32 gsn; - SignalCounter sendCounter; - Uint32 errorCode; - union { - struct { - Uint32 startBackup; - } waitGCP; - struct { - Uint32 signalNo; - Uint32 noOfSignals; - Uint32 tablePtr; - } startBackup; - struct { - Uint32 dummy; - } stopBackup; - }; - } masterData; - - Uint32 nextList; - union { Uint32 prevList; Uint32 nextPool; }; - - void setErrorCode(Uint32 errCode){ - if(errorCode == 0) - errorCode = errCode; - } - - bool checkError() const { - return errorCode != 0; - } - - bool is_lcp() const { - return backupDataLen == ~(Uint32)0; - } - - Backup & backup; - BlockNumber number() const { return backup.number(); } - void progError(int line, int cause, const char * extra) { - backup.progError(line, cause, extra); - } - }; - friend struct BackupRecord; - typedef Ptr BackupRecordPtr; - - struct Config { - Uint32 m_dataBufferSize; - Uint32 m_logBufferSize; - Uint32 m_minWriteSize; - Uint32 m_maxWriteSize; - Uint32 m_lcp_buffer_size; - - Uint32 m_disk_write_speed_sr; - Uint32 m_disk_write_speed; - Uint32 m_disk_synch_size; - Uint32 m_diskless; - Uint32 m_o_direct; - }; - - /** - * Variables - */ - Uint32 * c_startOfPages; - NodeId c_masterNodeId; - SLList c_nodes; - NdbNodeBitmask c_aliveNodes; - DLList c_backups; - Config c_defaults; - - /* - Variables that control checkpoint to disk speed - */ - Uint32 m_curr_disk_write_speed; - Uint32 m_words_written_this_period; - Uint32 m_overflow_disk_write; - Uint32 m_reset_delay_used; - NDB_TICKS m_reset_disk_speed_time; - static const int DISK_SPEED_CHECK_DELAY = 100; - - STATIC_CONST(NO_OF_PAGES_META_FILE = - (2*MAX_WORDS_META_FILE + BACKUP_WORDS_PER_PAGE - 1) / - BACKUP_WORDS_PER_PAGE); - - /** - * Pools - */ - ArrayPool
c_tablePool; - ArrayPool c_attributePool; - ArrayPool c_backupPool; - ArrayPool c_backupFilePool; - ArrayPool c_pagePool; - ArrayPool c_fragmentPool; - ArrayPool c_nodePool; - ArrayPool c_triggerPool; - - void checkFile(Signal*, BackupFilePtr); - void checkScan(Signal*, BackupFilePtr); - void fragmentCompleted(Signal*, BackupFilePtr); - - void backupAllData(Signal* signal, BackupRecordPtr); - - void getFragmentInfo(Signal*, BackupRecordPtr, TablePtr, Uint32 fragNo); - void getFragmentInfoDone(Signal*, BackupRecordPtr); - - void openFiles(Signal* signal, BackupRecordPtr ptr); - void openFilesReply(Signal*, BackupRecordPtr ptr, BackupFilePtr); - void closeFiles(Signal*, BackupRecordPtr ptr); - void closeFile(Signal*, BackupRecordPtr, BackupFilePtr); - void closeFilesDone(Signal*, BackupRecordPtr ptr); - - void sendDefineBackupReq(Signal *signal, BackupRecordPtr ptr); - - void defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId); - void createTrigReply(Signal* signal, BackupRecordPtr ptr); - void alterTrigReply(Signal* signal, BackupRecordPtr ptr); - void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32); - void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId); - - void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0); - void backupFragmentRef(Signal * signal, BackupFilePtr filePtr); - - void nextFragment(Signal*, BackupRecordPtr); - - void sendCreateTrig(Signal*, BackupRecordPtr ptr, TablePtr tabPtr); - void createAttributeMask(TablePtr tab, Bitmask&); - void sendStartBackup(Signal*, BackupRecordPtr, TablePtr); - void sendAlterTrig(Signal*, BackupRecordPtr ptr); - - void sendDropTrig(Signal*, BackupRecordPtr ptr); - void sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr); - void dropTrigReply(Signal*, BackupRecordPtr ptr); - - void sendSignalAllWait(BackupRecordPtr ptr, Uint32 gsn, Signal *signal, - Uint32 signalLength, - bool executeDirect = false); - bool haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId); - - void sendStopBackup(Signal*, BackupRecordPtr ptr); - void sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, Uint32 errCode); - void sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr, - Uint32 errCode); - void masterAbort(Signal*, BackupRecordPtr ptr); - void masterSendAbortBackup(Signal*, BackupRecordPtr ptr); - void slaveAbort(Signal*, BackupRecordPtr ptr); - - void abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr); - void abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanDone); - - bool verifyNodesAlive(BackupRecordPtr, const NdbNodeBitmask& aNodeBitMask); - bool checkAbort(BackupRecordPtr ptr); - void checkNodeFail(Signal* signal, - BackupRecordPtr ptr, - NodeId newCoord, - Uint32 theFailedNodes[NodeBitmask::Size]); - void masterTakeOver(Signal* signal, BackupRecordPtr ptr); - - - NodeId getMasterNodeId() const { return c_masterNodeId; } - bool findTable(const BackupRecordPtr &, TablePtr &, Uint32 tableId) const; - bool parseTableDescription(Signal*, BackupRecordPtr ptr, TablePtr, const Uint32*, Uint32); - - bool insertFileHeader(BackupFormat::FileType, BackupRecord*, BackupFile*); - void sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode); - void sendBackupRef(BlockReference ref, Uint32 flags, Signal *signal, - Uint32 senderData, Uint32 errorCode); - void dumpUsedResources(); - void cleanup(Signal*, BackupRecordPtr ptr); - void abort_scan(Signal*, BackupRecordPtr ptr); - void removeBackup(Signal*, BackupRecordPtr ptr); - - void sendSTTORRY(Signal*); - void createSequence(Signal* signal); - void createSequenceReply(Signal*, class UtilSequenceConf *); - - void lcp_open_file(Signal* signal, BackupRecordPtr ptr); - void lcp_open_file_done(Signal*, BackupRecordPtr); - void lcp_close_file_conf(Signal* signal, BackupRecordPtr); - - bool ready_to_write(bool ready, Uint32 sz, bool eof, BackupFile *fileP); -}; - -inline -void -Backup::OperationRecord::newRecord(Uint32 * p){ - base = p; - dst_Length = p; p += 1; - dst_Bitmask = p; p += sz_Bitmask; - dst_FixedAttribs = p; p += sz_FixedAttribs; - dst_VariableData = (BackupFormat::DataFile::VariableData*)p; - BitmaskImpl::clear(sz_Bitmask, dst_Bitmask); - attrLeft = noOfAttributes; - attrSzTotal = 0; -} - -inline -Uint32 * -Backup::OperationRecord::newAttrib(Uint32 offset, Uint32 sz){ - attrLeft--; - dst = dst_FixedAttribs + offset; - return dst; -} - -inline -void -Backup::OperationRecord::nullAttribute(Uint32 offsetNull){ - attrLeft --; - BitmaskImpl::set(sz_Bitmask, dst_Bitmask, offsetNull); -} - -inline -void -Backup::OperationRecord::nullVariable() -{ - attrLeft --; -} - -inline -Uint32 * -Backup::OperationRecord::newNullable(Uint32 id, Uint32 sz){ - Uint32 sz32 = (sz + 3) >> 2; - - attrLeft--; - - dst = &dst_VariableData->Data[0]; - dst_VariableData->Sz = htonl(sz); - dst_VariableData->Id = htonl(id); - - dst_VariableData = (BackupFormat::DataFile::VariableData *)(dst + sz32); - - // Clear all bits on newRecord -> dont need to clear this - // BitmaskImpl::clear(sz_Bitmask, dst_Bitmask, offsetNull); - return dst; -} - -inline -Uint32 * -Backup::OperationRecord::newVariable(Uint32 id, Uint32 sz){ - Uint32 sz32 = (sz + 3) >> 2; - - attrLeft--; - - dst = &dst_VariableData->Data[0]; - dst_VariableData->Sz = htonl(sz); - dst_VariableData->Id = htonl(id); - - dst_VariableData = (BackupFormat::DataFile::VariableData *)(dst + sz32); - return dst; -} - -inline -bool -Backup::OperationRecord::finished(){ - if(attrLeft != 0){ - return false; - } - - opLen += attrSzTotal; - opNoDone++; - - scanStop = dst = (Uint32 *)dst_VariableData; - - const Uint32 len = (dst - base - 1); - * dst_Length = htonl(len); - - noOfRecords++; - - return true; -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.txt b/storage/ndb/src/kernel/blocks/backup/Backup.txt deleted file mode 100644 index 38b93f2d3c4..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/Backup.txt +++ /dev/null @@ -1,427 +0,0 @@ --- BACKUP SIGNAL DIAGRAM COMPLEMENT TO BACKUP AMENDMENTS 2003-07-11 -- - -USER MASTER MASTER SLAVE SLAVE ---------------------------------------------------------------------- -BACKUP_REQ -----------------> - UTIL_SEQUENCE - ---------------> - <--------------- - DEFINE_BACKUP - ------------------------------> (Local signals) - LIST_TABLES - ---------------> - <--------------- - FSOPEN - ---------------> - GET_TABINFO - <--------------- - DI_FCOUNT - ---------------> - <--------------- - DI_GETPRIM - ---------------> - <--------------- - <------------------------------- -BACKUP_CONF -<---------------- - START_BACKUP - ------------------------------> - CREATE_TRIG - --------------> - <-------------- - <------------------------------ - WAIT_GCP - --------------> - <-------------- - BACKUP_FRAGMENT - ------------------------------> - SCAN_FRAG - ---------------> - <--------------- - <------------------------------ - WAIT_GCP - --------------> - <-------------- - STOP_BACKUP - ------------------------------> - DROP_TRIG - --------------> - <-------------- - <------------------------------ -BACKUP_COMPLETE_REP -<---------------- - ABORT_BACKUP - ------------------------------> - ----------------------------------------------------------------------------- - -USER BACKUP-MASTER - -1) BACKUP_REQ --> - -2) To all slaves DEFINE_BACKUP_REQ - This signals contains info so that all - slaves can take over as master - Tomas: Except triggerId info... - -3) Wait for conf - -4) <-- BACKUP_CONF - -5) For Each Table - PREP_CREATE_TRIG_REQ - Wait for Conf - -6) To all slaves START_BACKUP_REQ - Include trigger ids - Wait for conf - -7) For Each Table - CREATE_TRIG_REQ - Wait for conf - -8) Wait for GCP - -9) For each table - For each fragment - BACKUP_FRAGMENT_REQ --> - <-- BACKUP_FRAGMENT_CONF - -10) Wait for GCP - -11) To all slaves STOP_BACKUP_REQ - This signal turns off logging - -12) Wait for conf - -13) <-- BACKUP_COMPLETE_REP - ----- - -Slave: Master Died -Wait for master take-over, max 30 sec then abort everything - -Slave: Master TakeOver - -BACKUP_STATUS_REQ --> To all nodes -<-- BACKUP_STATUS_CONF - -BACKUP_STATUS_CONF - BACKUP_DEFINED - BACKUP_STARTED - BACKUP_FRAGMENT - -Master: Slave died - --- Define Backup Req -- - -1) Get backup definition - Which tables (all) - -2) Open files - Write table list to CTL - file - -3) Get definitions for all tables in backup - -4) Get Fragment info - -5) Define Backup Conf - --- Define Backup Req -- - --- Abort Backup Req -- - -1) Report to others - -2) Stop logging -3) Stop file(s) -4) Stop scan - -5) If failure/abort - Remove files - -6) If XXX - Report to user -7) Clean up records/stuff - --- Abort Backup -- - -Reasons for aborting: - -1a) client abort - -1b) slave failure - -1c) node failure - -Resources to be cleaned up: - -Slave responsability: - -2a) Close and remove files - -2b) Free allocated resources - -Master responsability: - -2c) Drop triggers - -USER MASTER MASTER SLAVE SLAVE ---------------------------------------------------------------------- - BACKUP_ABORT_ORD: - -------------------------(ALL)--> - Set Master State ABORTING Set Slave State ABORTING - Drop Triggers Close and Remove files - CleanupSlaveResources() - - BACKUP_ABORT_ORD:OkToClean - -------------------------(ALL)--> - - - CleanupMasterResources() - -BACKUP_ABORT_REP -<--------------- - - - -State descriptions: - -Master - INITIAL -BACKUP_REQ -> -Master - DEFINING -DEFINE_BACKUP_CONF -> -Master - DEFINED -CREATE_TRIG_CONF -> -Master - STARTED -<---> -Master - SCANNING -WAIT_GCP_CONF -> -Master - STOPPING -(Master - CLEANING) --------- -Master - ABORTING - - -Slave - INITIAL -DEFINE_BACKUP_REQ -> -Slave - DEFINING - - backupId - - tables -DIGETPRIMCONF -> -Slave - DEFINED -START_BACKUP_REQ -> -Slave - STARTED -Slave - SCANNING -STOP_BACKUP_REQ -> -Slave - STOPPING -FSCLOSECONF -> -Slave - CLEANING ------ -Slave - ABORTING - - - -Testcases: - -2. Master failure at first START_BACKUP_CONF - - error 10004 -start backup - -- Ok - -2. Master failure at first CREATE_TRIG_CONF - - error 10003 -start backup - -- Ok - -2. Master failure at first ALTER_TRIG_CONF - - error 10005 -start backup - -- Ok - -2. Master failure at WAIT_GCP_CONF - - error 10007 -start backup - -- Ok - -2. Master failure at WAIT_GCP_CONF, nextFragment - - error 10008 -start backup - -- Ok - -2. Master failure at WAIT_GCP_CONF, stopping - - error 10009 -start backup - -- Ok - -2. Master failure at BACKUP_FRAGMENT_CONF - - error 10010 -start backup - -- Ok - -2. Master failure at first DROP_TRIG_CONF - - error 10012 -start backup - -- Ok - -1. Master failure at first STOP_BACKUP_CONF - - error 10013 -start backup - -- Ok - -3. Multiple node failiure: - - error 10001 - error 10014 -start backup - -- Ok (note, mgmtsrvr does gets BACKUP_ABORT_REP but expects BACKUP_REF, hangs...) - -4. Multiple node failiure: - - error 10007 - error 10002 -start backup - -- Ok - - - - ndbrequire(!ERROR_INSERTED(10001)); - ndbrequire(!ERROR_INSERTED(10002)); - ndbrequire(!ERROR_INSERTED(10021)); - ndbrequire(!ERROR_INSERTED(10003)); - ndbrequire(!ERROR_INSERTED(10004)); - ndbrequire(!ERROR_INSERTED(10005)); - ndbrequire(!ERROR_INSERTED(10006)); - ndbrequire(!ERROR_INSERTED(10007)); - ndbrequire(!ERROR_INSERTED(10008)); - ndbrequire(!ERROR_INSERTED(10009)); - ndbrequire(!ERROR_INSERTED(10010)); - ndbrequire(!ERROR_INSERTED(10011)); - ndbrequire(!ERROR_INSERTED(10012)); - ndbrequire(!ERROR_INSERTED(10013)); - ndbrequire(!ERROR_INSERTED(10014)); - ndbrequire(!ERROR_INSERTED(10015)); - ndbrequire(!ERROR_INSERTED(10016)); - ndbrequire(!ERROR_INSERTED(10017)); - ndbrequire(!ERROR_INSERTED(10018)); - ndbrequire(!ERROR_INSERTED(10019)); - ndbrequire(!ERROR_INSERTED(10020)); - - if (ERROR_INSERTED(10023)) { - if (ERROR_INSERTED(10023)) { - if (ERROR_INSERTED(10024)) { - if (ERROR_INSERTED(10025)) { - if (ERROR_INSERTED(10026)) { - if (ERROR_INSERTED(10028)) { - if (ERROR_INSERTED(10027)) { - (ERROR_INSERTED(10022))) { - if (ERROR_INSERTED(10029)) { - if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) { - ------ XXX --- - -DEFINE_BACKUP_REF -> - ABORT_BACKUP_ORD(no reply) when all DEFINE_BACKUP replies has arrived - -START_BACKUP_REF - ABORT_BACKUP_ORD(no reply) when all START_BACKUP_ replies has arrived - -BACKUP_FRAGMENT_REF - ABORT_BACKUP_ORD(reply) directly to all nodes running BACKUP_FRAGMENT - - When all nodes has replied BACKUP_FRAGMENT - ABORT_BACKUP_ORD(no reply) - -STOP_BACKUP_REF - ABORT_BACKUP_ORD(no reply) when all STOP_BACKUP_ replies has arrived - -NF_COMPLETE_REP - slave dies - master sends OUTSTANDING_REF to self - slave does nothing - - master dies - slave elects self as master and sets only itself as participant - - -DATA FORMATS ------------- - -Note: api-restore must be able to read all old formats. - -Todo: header formats - -4.1.x ------ - -Todo - -5.0.x ------ - -Producers: backup, Consumers: api-restore - -In 5.0 - 1) attrs have fixed size - 2) external attr id (column_no) is same as internal attr id (attr_id). - 3) no disk attributes - -Format: - Part 0: null-bit mask for all nullable rounded to word - Part 1: fixed + non-nullable in column_no order - Part 2: fixed + nullable in column_no order - -Part 1: - plain value rounded to words [value] - -Part 2: - not-null => clear null bit, data words [len_in_words attr_id value] - null => set only null bit in null-bit mask - -Note: redundancy in null-bit mask vs 2 word header - -5.1.x ------ - -Producers: backup, Consumers: api-restore lcp-restore - -In 5.1 - 1) attrs can have var size, length encoded in value - 2) column_no need not equal attr_id - 3) disk attributes - -Null-bit mask (5.0) is dropped. -Length encoded in value is not used. -In "lcp backup" disk attributes are replaced by 64-bit DISK_REF. - -Format: - Part 1: fixed + non-nullable in column_no order - Part 2: other attributes - -Part 1: - plain value rounded to words [value] - -Part 2: - not-null => data words [len_in_bytes attr_id value] - null => not present diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp deleted file mode 100644 index f48d0ed09d3..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ /dev/null @@ -1,197 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef BACKUP_FORMAT_HPP -#define BACKUP_FORMAT_HPP - -#include - -static const char BACKUP_MAGIC[] = { 'N', 'D', 'B', 'B', 'C', 'K', 'U', 'P' }; - -struct BackupFormat { - - /** - * Section types in file - */ - enum SectionType { - FILE_HEADER = 1, - FRAGMENT_HEADER = 2, - FRAGMENT_FOOTER = 3, - TABLE_LIST = 4, - TABLE_DESCRIPTION = 5, - GCP_ENTRY = 6, - FRAGMENT_INFO = 7, - EMPTY_ENTRY = 8 - }; - - struct FileHeader { - char Magic[8]; - Uint32 NdbVersion; - - Uint32 SectionType; - Uint32 SectionLength; - Uint32 FileType; - Uint32 BackupId; - Uint32 BackupKey_0; - Uint32 BackupKey_1; - Uint32 ByteOrder; - }; - - /** - * File types - */ - enum FileType { - CTL_FILE = 1, - LOG_FILE = 2, - DATA_FILE = 3, - LCP_FILE = 4 - }; - - /** - * Data file formats - */ - struct DataFile { - - struct FragmentHeader { - Uint32 SectionType; - Uint32 SectionLength; - Uint32 TableId; - Uint32 FragmentNo; - Uint32 ChecksumType; - }; - - struct VariableData { - Uint32 Sz; - Uint32 Id; - Uint32 Data[1]; - }; - - struct Record { - Uint32 Length; - Uint32 NullBitmask[1]; - Uint32 DataFixedKeys[1]; - Uint32 DataFixedAttributes[1]; - VariableData DataVariableAttributes[1]; - }; - - struct FragmentFooter { - Uint32 SectionType; - Uint32 SectionLength; - Uint32 TableId; - Uint32 FragmentNo; - Uint32 NoOfRecords; - Uint32 Checksum; - }; - - /* optional padding for O_DIRECT */ - struct EmptyEntry { - Uint32 SectionType; - Uint32 SectionLength; - /* not used data */ - }; - }; - - /** - * CTL file formats - */ - struct CtlFile { - - /** - * Table list - */ - struct TableList { - Uint32 SectionType; - Uint32 SectionLength; - Uint32 TableIds[1]; // Length = SectionLength - 2 - }; - - /** - * Table description(s) - */ - struct TableDescription { - Uint32 SectionType; - Uint32 SectionLength; - Uint32 TableType; - Uint32 DictTabInfo[1]; // Length = SectionLength - 3 - }; - - /** - * GCP Entry - */ - struct GCPEntry { - Uint32 SectionType; - Uint32 SectionLength; - Uint32 StartGCP; - Uint32 StopGCP; - }; - - /** - * Fragment Info - */ - struct FragmentInfo { - Uint32 SectionType; - Uint32 SectionLength; - Uint32 TableId; - Uint32 FragmentNo; - Uint32 NoOfRecordsLow; - Uint32 NoOfRecordsHigh; - Uint32 FilePosLow; - Uint32 FilePosHigh; - }; - }; - - /** - * LOG file format - */ - struct LogFile { - - /** - * Log Entry - */ - struct LogEntry { - Uint32 Length; - Uint32 TableId; - // If TriggerEvent & 0x10000 == true then GCI is right after data - Uint32 TriggerEvent; - Uint32 FragId; - Uint32 Data[1]; // Len = Length - 3 - }; - - /** - * Log Entry pre NDBD_FRAGID_VERSION - */ - struct LogEntry_no_fragid { - Uint32 Length; - Uint32 TableId; - // If TriggerEvent & 0x10000 == true then GCI is right after data - Uint32 TriggerEvent; - Uint32 Data[1]; // Len = Length - 2 - }; - }; - - /** - * LCP file format - */ - struct LcpFile { - CtlFile::TableList TableList; - CtlFile::TableDescription TableDescription; - DataFile::FragmentHeader FragmentHeader; - DataFile::Record Record; - DataFile::FragmentFooter FragmentFooter; - }; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp deleted file mode 100644 index 00a2a258085..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//**************************************************************************** -// -// NAME -// Backup - Database backup / restore -// -//=========================================================================== -#include "Backup.hpp" - -#include -#include - -//extern const unsigned Ndbcntr::g_sysTableCount; - -Backup::Backup(Block_context& ctx) : - SimulatedBlock(BACKUP, ctx), - c_nodes(c_nodePool), - c_backups(c_backupPool) -{ - BLOCK_CONSTRUCTOR(Backup); - - c_masterNodeId = getOwnNodeId(); - - // Add received signals - addRecSignal(GSN_READ_CONFIG_REQ, &Backup::execREAD_CONFIG_REQ); - addRecSignal(GSN_STTOR, &Backup::execSTTOR); - addRecSignal(GSN_DUMP_STATE_ORD, &Backup::execDUMP_STATE_ORD); - addRecSignal(GSN_READ_NODESCONF, &Backup::execREAD_NODESCONF); - addRecSignal(GSN_NODE_FAILREP, &Backup::execNODE_FAILREP); - addRecSignal(GSN_INCL_NODEREQ, &Backup::execINCL_NODEREQ); - addRecSignal(GSN_CONTINUEB, &Backup::execCONTINUEB); - addRecSignal(GSN_READ_CONFIG_REQ, &Backup::execREAD_CONFIG_REQ, true); - - addRecSignal(GSN_SCAN_HBREP, &Backup::execSCAN_HBREP); - addRecSignal(GSN_TRANSID_AI, &Backup::execTRANSID_AI); - addRecSignal(GSN_SCAN_FRAGREF, &Backup::execSCAN_FRAGREF); - addRecSignal(GSN_SCAN_FRAGCONF, &Backup::execSCAN_FRAGCONF); - - addRecSignal(GSN_BACKUP_TRIG_REQ, &Backup::execBACKUP_TRIG_REQ); - addRecSignal(GSN_TRIG_ATTRINFO, &Backup::execTRIG_ATTRINFO); - addRecSignal(GSN_FIRE_TRIG_ORD, &Backup::execFIRE_TRIG_ORD); - - addRecSignal(GSN_LIST_TABLES_CONF, &Backup::execLIST_TABLES_CONF); - addRecSignal(GSN_GET_TABINFOREF, &Backup::execGET_TABINFOREF); - addRecSignal(GSN_GET_TABINFO_CONF, &Backup::execGET_TABINFO_CONF); - - addRecSignal(GSN_CREATE_TRIG_REF, &Backup::execCREATE_TRIG_REF); - addRecSignal(GSN_CREATE_TRIG_CONF, &Backup::execCREATE_TRIG_CONF); - - addRecSignal(GSN_DROP_TRIG_REF, &Backup::execDROP_TRIG_REF); - addRecSignal(GSN_DROP_TRIG_CONF, &Backup::execDROP_TRIG_CONF); - - addRecSignal(GSN_DI_FCOUNTCONF, &Backup::execDI_FCOUNTCONF); - addRecSignal(GSN_DIGETPRIMCONF, &Backup::execDIGETPRIMCONF); - - addRecSignal(GSN_FSOPENREF, &Backup::execFSOPENREF, true); - addRecSignal(GSN_FSOPENCONF, &Backup::execFSOPENCONF); - - addRecSignal(GSN_FSCLOSEREF, &Backup::execFSCLOSEREF, true); - addRecSignal(GSN_FSCLOSECONF, &Backup::execFSCLOSECONF); - - addRecSignal(GSN_FSAPPENDREF, &Backup::execFSAPPENDREF, true); - addRecSignal(GSN_FSAPPENDCONF, &Backup::execFSAPPENDCONF); - - addRecSignal(GSN_FSREMOVEREF, &Backup::execFSREMOVEREF, true); - addRecSignal(GSN_FSREMOVECONF, &Backup::execFSREMOVECONF); - - /*****/ - addRecSignal(GSN_BACKUP_REQ, &Backup::execBACKUP_REQ); - addRecSignal(GSN_ABORT_BACKUP_ORD, &Backup::execABORT_BACKUP_ORD); - - addRecSignal(GSN_DEFINE_BACKUP_REQ, &Backup::execDEFINE_BACKUP_REQ); - addRecSignal(GSN_DEFINE_BACKUP_REF, &Backup::execDEFINE_BACKUP_REF); - addRecSignal(GSN_DEFINE_BACKUP_CONF, &Backup::execDEFINE_BACKUP_CONF); - - addRecSignal(GSN_START_BACKUP_REQ, &Backup::execSTART_BACKUP_REQ); - addRecSignal(GSN_START_BACKUP_REF, &Backup::execSTART_BACKUP_REF); - addRecSignal(GSN_START_BACKUP_CONF, &Backup::execSTART_BACKUP_CONF); - - addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); - addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); - addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); - - addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP, - &Backup::execBACKUP_FRAGMENT_COMPLETE_REP); - - addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); - addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF); - addRecSignal(GSN_STOP_BACKUP_CONF, &Backup::execSTOP_BACKUP_CONF); - - //addRecSignal(GSN_BACKUP_STATUS_REQ, &Backup::execBACKUP_STATUS_REQ); - //addRecSignal(GSN_BACKUP_STATUS_CONF, &Backup::execBACKUP_STATUS_CONF); - - addRecSignal(GSN_UTIL_SEQUENCE_REF, &Backup::execUTIL_SEQUENCE_REF); - addRecSignal(GSN_UTIL_SEQUENCE_CONF, &Backup::execUTIL_SEQUENCE_CONF); - - addRecSignal(GSN_WAIT_GCP_REF, &Backup::execWAIT_GCP_REF); - addRecSignal(GSN_WAIT_GCP_CONF, &Backup::execWAIT_GCP_CONF); - - /** - * Testing - */ - addRecSignal(GSN_BACKUP_REF, &Backup::execBACKUP_REF); - addRecSignal(GSN_BACKUP_CONF, &Backup::execBACKUP_CONF); - addRecSignal(GSN_BACKUP_ABORT_REP, &Backup::execBACKUP_ABORT_REP); - addRecSignal(GSN_BACKUP_COMPLETE_REP, &Backup::execBACKUP_COMPLETE_REP); - - addRecSignal(GSN_LCP_PREPARE_REQ, &Backup::execLCP_PREPARE_REQ); - addRecSignal(GSN_END_LCPREQ, &Backup::execEND_LCPREQ); -} - -Backup::~Backup() -{ -} - -BLOCK_FUNCTIONS(Backup) - -template class ArrayPool; -template class ArrayPool; -template class ArrayPool; - -void -Backup::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - c_defaults.m_disk_write_speed = 10 * (1024 * 1024); - c_defaults.m_disk_write_speed_sr = 100 * (1024 * 1024); - c_defaults.m_disk_synch_size = 4 * (1024 * 1024); - c_defaults.m_o_direct = true; - - Uint32 noBackups = 0, noTables = 0, noAttribs = 0, noFrags = 0; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, - &c_defaults.m_diskless)); - ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, - &c_defaults.m_o_direct); - ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED_SR, - &c_defaults.m_disk_write_speed_sr); - ndb_mgm_get_int_parameter(p, CFG_DB_CHECKPOINT_SPEED, - &c_defaults.m_disk_write_speed); - ndb_mgm_get_int_parameter(p, CFG_DB_DISK_SYNCH_SIZE, - &c_defaults.m_disk_synch_size); - - /* - We adjust the disk speed parameters from bytes per second to rather be - words per 100 milliseconds. We convert disk synch size from bytes per - second to words per second. - */ - c_defaults.m_disk_write_speed /= (4 * 10); - c_defaults.m_disk_write_speed_sr /= (4 * 10); - - ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups); - // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, &noFrags)); - - noAttribs++; //RT 527 bug fix - - c_nodePool.setSize(MAX_NDB_NODES); - c_backupPool.setSize(noBackups + 1); - c_backupFilePool.setSize(3 * noBackups + 1); - c_tablePool.setSize(noBackups * noTables + 1); - c_attributePool.setSize(noBackups * noAttribs + MAX_ATTRIBUTES_IN_TABLE); - c_triggerPool.setSize(noBackups * 3 * noTables); - c_fragmentPool.setSize(noBackups * noFrags + 1); - - Uint32 szDataBuf = (2 * 1024 * 1024); - Uint32 szLogBuf = (2 * 1024 * 1024); - Uint32 szWrite = 32768, maxWriteSize = (256 * 1024); - ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_DATA_BUFFER_MEM, &szDataBuf); - ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_LOG_BUFFER_MEM, &szLogBuf); - ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_WRITE_SIZE, &szWrite); - ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MAX_WRITE_SIZE, &maxWriteSize); - - c_defaults.m_logBufferSize = szLogBuf; - c_defaults.m_dataBufferSize = szDataBuf; - c_defaults.m_minWriteSize = szWrite; - c_defaults.m_maxWriteSize = maxWriteSize; - c_defaults.m_lcp_buffer_size = szDataBuf; - - - Uint32 szMem = 0; - ndb_mgm_get_int_parameter(p, CFG_DB_BACKUP_MEM, &szMem); - Uint32 noPages = (szMem + c_defaults.m_lcp_buffer_size + sizeof(Page32) - 1) - / sizeof(Page32); - // We need to allocate an additional of 2 pages. 1 page because of a bug in - // ArrayPool and another one for DICTTAINFO. - c_pagePool.setSize(noPages + NO_OF_PAGES_META_FILE + 2, true); - - { // Init all tables - SLList
tables(c_tablePool); - TablePtr ptr; - while(tables.seize(ptr)){ - new (ptr.p) Table(c_attributePool, c_fragmentPool); - } - tables.release(); - } - - { - SLList ops(c_backupFilePool); - BackupFilePtr ptr; - while(ops.seize(ptr)){ - new (ptr.p) BackupFile(* this, c_pagePool); - } - ops.release(); - } - - { - SLList recs(c_backupPool); - BackupRecordPtr ptr; - while(recs.seize(ptr)){ - new (ptr.p) BackupRecord(* this, c_tablePool, - c_backupFilePool, c_triggerPool); - } - recs.release(); - } - - // Initialize BAT for interface to file system - { - Page32Ptr p; - ndbrequire(c_pagePool.seizeId(p, 0)); - c_startOfPages = (Uint32 *)p.p; - c_pagePool.release(p); - - NewVARIABLE* bat = allocateBat(1); - bat[0].WA = c_startOfPages; - bat[0].nrr = c_pagePool.getSize()*sizeof(Page32)/sizeof(Uint32); - } - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - diff --git a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp b/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp deleted file mode 100644 index 1349ddf6282..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/FsBuffer.hpp +++ /dev/null @@ -1,360 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FS_BUFFER_HPP -#define FS_BUFFER_HPP - -#include - -#define DEBUG(x) - -/** - * A circular data buffer to be used together with the FS - * - * One writer - Typically your block - * getWritePtr() - * updateWritePtr() - * - * One reader - Typically "thread" in your block sending stuff to NDBFS - * getReadPtr() - * updateReadPtr() - */ -class FsBuffer { -public: - /** - * Default constructor - */ - FsBuffer(); - - /** - * setup FsBuffer - * - * @param Buffer - Ptr to continuous memory - * @param Size - Buffer size in 32-bit words - * @param BlockSize - Size of block in 32-bit words - * @param MinRead - Min read size in 32-bit words - * Get rounded(down) to nearest multiple of block size. - * @param MaxRead - Max read size in 32-bit words - * Get rounded(down) to nearest multiple of block size. - * @param MaxWrite - Maximum write (into buffer) in 32-bit words - * - * @return NULL if everything is OK - * else A string describing problem - */ - const char * setup(Uint32 * Buffer, - Uint32 Size, - Uint32 BlockSize = 128, // 512 bytes - Uint32 MinRead = 1024, // 4k - Uint32 MaxRead = 1024, // 4k - Uint32 MaxWrite = 1024); // 4k - /* - * @return NULL if everything is OK - * else A string describing problem - */ - const char * valid() const; - - Uint32 getBufferSize() const; - Uint32 getUsableSize() const; - Uint32 * getStart() const; - - /** - * getReadPtr - Get pointer and size of data to send to FS - * - * @param ptr - Where to fetch data - * @param sz - How much data in 32-bit words - * @param eof - Is this the last fetch (only if return false) - * - * @return true - If there is data of size >= minread - * false - If there is can be data be if it is is < minread - * - else eof = true - */ - bool getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * eof); - - /** - * @note: sz must be equal to sz returned by getReadPtr - */ - void updateReadPtr(Uint32 sz); - - /** - * - * @note Must be followed by a updateWritePtr(no of words used) - */ - bool getWritePtr(Uint32 ** ptr, Uint32 sz); - - void updateWritePtr(Uint32 sz); - - /** - * There will be no more writing to this buffer - */ - void eof(); - - /** - * Getters for varibles - */ - Uint32 getMaxWrite() const { return m_maxWrite;} - Uint32 getMinRead() const { return m_minRead;} - - Uint32 getFreeSize() const { return m_free; } - - /** - * reset - */ - void reset(); - -private: - - Uint32 m_free; - Uint32 m_readIndex; - Uint32 m_writeIndex; - Uint32 m_eof; - Uint32 * m_start; - Uint32 m_minRead; - Uint32 m_maxRead; - Uint32 m_maxWrite; - Uint32 m_size; - - Uint32 * m_buffer; - Uint32 m_bufSize; - Uint32 m_blockSize; - - void clear(); -}; - -inline -FsBuffer::FsBuffer() -{ - clear(); -} - -inline -void -FsBuffer::clear(){ - m_minRead = m_maxRead = m_maxWrite = m_size = m_bufSize = m_free = 0; - m_buffer = m_start = 0; -} - -static -Uint32 * -align(Uint32 * ptr, Uint32 alignment, bool downwards){ - - const UintPtr a = (UintPtr)ptr; - const UintPtr b = a % alignment; - - if(downwards){ - return (Uint32 *)(a - b); - } else { - return (Uint32 *)(a + (b == 0 ? 0 : (alignment - b))); - } -} - -inline -const char * -FsBuffer::setup(Uint32 * Buffer, - Uint32 Size, - Uint32 Block, - Uint32 MinRead, - Uint32 MaxRead, - Uint32 MaxWrite) -{ - clear(); - m_buffer = Buffer; - m_bufSize = Size; - m_blockSize = Block; - if(Block == 0){ - return valid(); - } - - m_minRead = (MinRead / Block) * Block; - m_maxRead = (MaxRead / Block) * Block; - m_maxWrite = MaxWrite; - - m_start = align(Buffer, Block*4, false); - Uint32 * stop = align(Buffer + Size - MaxWrite, Block*4, true); - if(stop > m_start){ - m_size = stop - m_start; - } else { - m_size = 0; - } - - if(m_minRead == 0) - m_size = 0; - else - m_size = (m_size / m_minRead) * m_minRead; - -#if 0 - ndbout_c("Block = %d MinRead = %d -> %d", Block*4, MinRead*4, m_minRead*4); - ndbout_c("Block = %d MaxRead = %d -> %d", Block*4, MaxRead*4, m_maxRead*4); - - ndbout_c("Buffer = %d -> %d", Buffer, m_start); - ndbout_c("Buffer = %d Size = %d MaxWrite = %d -> %d", - Buffer, Size*4, MaxWrite*4, m_size*4); -#endif - - m_readIndex = m_writeIndex = m_eof = 0; - m_free = m_size; - return valid(); -} - -inline -void -FsBuffer::reset() -{ - m_readIndex = m_writeIndex = 0; - m_free = m_size; - m_eof = 0; -} - -inline -const char * -FsBuffer::valid() const { - if(m_buffer == 0) return "Null pointer buffer"; - if(m_bufSize == 0) return "Zero size buffer"; - if(m_blockSize == 0) return "Zero block size"; - if(m_minRead < m_blockSize) return "Min read less than block size"; - if(m_maxRead < m_blockSize) return "Max read less than block size"; - if(m_maxRead < m_minRead) return "Max read less than min read"; - if(m_size == 0) return "Zero usable space"; - return 0; -} - -inline -Uint32 -FsBuffer::getBufferSize() const { - return m_bufSize; -} - -inline -Uint32 -FsBuffer::getUsableSize() const { - return m_size; -} - -inline -Uint32 * -FsBuffer::getStart() const { - return m_start; -} - -inline -bool -FsBuffer::getReadPtr(Uint32 ** ptr, Uint32 * sz, bool * _eof){ - - Uint32 * Tp = m_start; - const Uint32 Tr = m_readIndex; - const Uint32 Tm = m_minRead; - const Uint32 Ts = m_size; - const Uint32 Tmw = m_maxRead; - - Uint32 sz1 = m_size - m_free; // Used - - if(sz1 >= Tm){ - if(Tr + sz1 > Ts) - sz1 = (Ts - Tr); - - if(sz1 > Tmw) - * sz = Tmw; - else - * sz = sz1 - (sz1 % Tm); - - * ptr = &Tp[Tr]; - - DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d", - Tr, Tmw, Ts, Tm, sz1, * sz)); - - return true; - } - - if(!m_eof){ - * _eof = false; - - DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> false", - Tr, Tmw, Ts, Tm, sz1)); - - return false; - } - - * sz = sz1; - * _eof = true; - * ptr = &Tp[Tr]; - - DEBUG(ndbout_c("getReadPtr() Tr: %d Tmw: %d Ts: %d Tm: %d sz1: %d -> %d eof", - Tr, Tmw, Ts, Tm, sz1, * sz)); - - return false; -} - -inline -void -FsBuffer::updateReadPtr(Uint32 sz){ - const Uint32 Tr = m_readIndex; - const Uint32 Ts = m_size; - - m_free += sz; - m_readIndex = (Tr + sz) % Ts; -} - -inline -bool -FsBuffer::getWritePtr(Uint32 ** ptr, Uint32 sz){ - assert(sz <= m_maxWrite); - Uint32 * Tp = m_start; - const Uint32 Tw = m_writeIndex; - const Uint32 sz1 = m_free; - - if(sz1 > sz){ // Note at least 1 word of slack - * ptr = &Tp[Tw]; - - DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> true", - sz, Tw, sz1)); - return true; - } - - DEBUG(ndbout_c("getWritePtr(%d) Tw: %d sz1: %d -> false", - sz, Tw, sz1)); - - return false; -} - -inline -void -FsBuffer::updateWritePtr(Uint32 sz){ - assert(sz <= m_maxWrite); - Uint32 * Tp = m_start; - const Uint32 Tw = m_writeIndex; - const Uint32 Ts = m_size; - - const Uint32 Tnew = (Tw + sz); - m_free -= sz; - if(Tnew < Ts){ - m_writeIndex = Tnew; - DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d", - sz, m_writeIndex)); - return; - } - - memcpy(Tp, &Tp[Ts], (Tnew - Ts) << 2); - m_writeIndex = Tnew - Ts; - DEBUG(ndbout_c("updateWritePtr(%d) m_writeIndex: %d", - sz, m_writeIndex)); -} - -inline -void -FsBuffer::eof(){ - m_eof = 1; -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/backup/Makefile.am b/storage/ndb/src/kernel/blocks/backup/Makefile.am deleted file mode 100644 index d8a82014445..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2004 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -ndbtools_PROGRAMS = ndb_print_backup_file -ndb_print_backup_file_SOURCES = read.cpp -ndb_print_backup_file_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/strings/libmystrings.a - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am diff --git a/storage/ndb/src/kernel/blocks/backup/read.cpp b/storage/ndb/src/kernel/blocks/backup/read.cpp deleted file mode 100644 index 3d4780f5eb4..00000000000 --- a/storage/ndb/src/kernel/blocks/backup/read.cpp +++ /dev/null @@ -1,523 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include -#include -#include "BackupFormat.hpp" -#include -#include -#include - -bool readHeader(FILE*, BackupFormat::FileHeader *); -bool readFragHeader(FILE*, BackupFormat::DataFile::FragmentHeader *); -bool readFragFooter(FILE*, BackupFormat::DataFile::FragmentFooter *); -Int32 readRecord(FILE*, Uint32 **); - -NdbOut & operator<<(NdbOut&, const BackupFormat::FileHeader &); -NdbOut & operator<<(NdbOut&, const BackupFormat::DataFile::FragmentHeader &); -NdbOut & operator<<(NdbOut&, const BackupFormat::DataFile::FragmentFooter &); - -bool readTableList(FILE*, BackupFormat::CtlFile::TableList **); -bool readTableDesc(FILE*, BackupFormat::CtlFile::TableDescription **); -bool readGCPEntry(FILE*, BackupFormat::CtlFile::GCPEntry **); - -NdbOut & operator<<(NdbOut&, const BackupFormat::CtlFile::TableList &); -NdbOut & operator<<(NdbOut&, const BackupFormat::CtlFile::TableDescription &); -NdbOut & operator<<(NdbOut&, const BackupFormat::CtlFile::GCPEntry &); - -Int32 readLogEntry(FILE*, Uint32**); - -static Uint32 recNo; -static Uint32 logEntryNo; - -int -main(int argc, const char * argv[]){ - - ndb_init(); - if(argc <= 1){ - printf("Usage: %s \n", argv[0]); - exit(1); - } - FILE * f = fopen(argv[1], "rb"); - if(!f){ - ndbout << "No such file!" << endl; - exit(1); - } - - BackupFormat::FileHeader fileHeader; - if(!readHeader(f, &fileHeader)){ - ndbout << "Invalid file!" << endl; - exit(1); - } - ndbout << fileHeader << endl; - - switch(fileHeader.FileType){ - case BackupFormat::DATA_FILE: - while(!feof(f)){ - BackupFormat::DataFile::FragmentHeader fragHeader; - if(!readFragHeader(f, &fragHeader)) - break; - ndbout << fragHeader << endl; - - Uint32 len, * data; - while((len = readRecord(f, &data)) > 0){ -#if 0 - ndbout << "-> " << hex; - for(Uint32 i = 0; iSectionLength - 2; - for(Uint32 i = 0; i 0){ - LogEntry * logEntry = (LogEntry *) data; - /** - * Log Entry - */ - Uint32 event = ntohl(logEntry->TriggerEvent); - bool gcp = (event & 0x10000) != 0; - event &= 0xFFFF; - if(gcp) - len --; - - ndbout << "LogEntry Table: " << (Uint32)ntohl(logEntry->TableId) - << " Event: " << event - << " Length: " << (len - 2); - - const Uint32 dataLen = len - 2; -#if 0 - Uint32 pos = 0; - while(pos < dataLen){ - AttributeHeader * ah = (AttributeHeader*)&logEntry->Data[pos]; - ndbout_c(" Attribut: %d Size: %d", - ah->getAttributeId(), - ah->getDataSize()); - pos += ah->getDataSize() + 1; - } -#endif - if(gcp) - ndbout << " GCP: " << (Uint32)ntohl(logEntry->Data[dataLen]); - ndbout << endl; - } - break; - } - case BackupFormat::LCP_FILE: - { - BackupFormat::CtlFile::TableList * tabList; - if(!readTableList(f, &tabList)){ - ndbout << "Invalid file! No table list" << endl; - break; - } - ndbout << (* tabList) << endl; - - const Uint32 noOfTables = tabList->SectionLength - 2; - for(Uint32 i = 0; i 0){ -#if 0 - ndbout << "-> " << hex; - for(Uint32 i = 0; iMagic, BACKUP_MAGIC, sizeof(BACKUP_MAGIC)) != 0) - RETURN_FALSE(); - - dst->NdbVersion = ntohl(dst->NdbVersion); - if(dst->NdbVersion != NDB_VERSION) - RETURN_FALSE(); - - if(fread(&dst->SectionType, 4, 2, f) != 2) - RETURN_FALSE(); - dst->SectionType = ntohl(dst->SectionType); - dst->SectionLength = ntohl(dst->SectionLength); - - if(dst->SectionType != BackupFormat::FILE_HEADER) - RETURN_FALSE(); - - if(dst->SectionLength != ((sizeof(BackupFormat::FileHeader) - 12) >> 2)) - RETURN_FALSE(); - - if(fread(&dst->FileType, 4, dst->SectionLength - 2, f) != - (dst->SectionLength - 2)) - RETURN_FALSE(); - - dst->FileType = ntohl(dst->FileType); - dst->BackupId = ntohl(dst->BackupId); - dst->BackupKey_0 = ntohl(dst->BackupKey_0); - dst->BackupKey_1 = ntohl(dst->BackupKey_1); - - if(dst->ByteOrder != 0x12345678) - endian = true; - - return true; -} - -bool -readFragHeader(FILE* f, BackupFormat::DataFile::FragmentHeader * dst){ - if(fread(dst, 1, sizeof(* dst), f) != sizeof(* dst)) - return false; - - dst->SectionType = ntohl(dst->SectionType); - dst->SectionLength = ntohl(dst->SectionLength); - dst->TableId = ntohl(dst->TableId); - dst->FragmentNo = ntohl(dst->FragmentNo); - dst->ChecksumType = ntohl(dst->ChecksumType); - - if(dst->SectionLength != (sizeof(* dst) >> 2)) - RETURN_FALSE(); - - if(dst->SectionType != BackupFormat::FRAGMENT_HEADER) - RETURN_FALSE(); - - recNo = 0; - - return true; -} - -bool -readFragFooter(FILE* f, BackupFormat::DataFile::FragmentFooter * dst){ - if(fread(dst, 1, sizeof(* dst), f) != sizeof(* dst)) - RETURN_FALSE(); - - dst->SectionType = ntohl(dst->SectionType); - dst->SectionLength = ntohl(dst->SectionLength); - dst->TableId = ntohl(dst->TableId); - dst->FragmentNo = ntohl(dst->FragmentNo); - dst->NoOfRecords = ntohl(dst->NoOfRecords); - dst->Checksum = ntohl(dst->Checksum); - - if(dst->SectionLength != (sizeof(* dst) >> 2)) - RETURN_FALSE(); - - if(dst->SectionType != BackupFormat::FRAGMENT_FOOTER) - RETURN_FALSE(); - return true; -} - -static Uint32 buf[8192]; - -Int32 -readRecord(FILE* f, Uint32 **dst){ - Uint32 len; - if(fread(&len, 1, 4, f) != 4) - RETURN_FALSE(); - - len = ntohl(len); - - if(fread(buf, 4, len, f) != len) - { - return -1; - } - - if(len > 0) - recNo++; - else - ndbout_c("Found %d records", recNo); - - * dst = &buf[0]; - - - return len; -} - -Int32 -readLogEntry(FILE* f, Uint32 **dst){ - Uint32 len; - if(fread(&len, 1, 4, f) != 4) - RETURN_FALSE(); - - len = ntohl(len); - - if(fread(&buf[1], 4, len, f) != len) - return -1; - - buf[0] = len; - - if(len > 0) - logEntryNo++; - - * dst = &buf[0]; - - return len; -} - - -NdbOut & -operator<<(NdbOut& ndbout, const BackupFormat::FileHeader & hf){ - - char buf[9]; - memcpy(buf, hf.Magic, sizeof(hf.Magic)); - buf[8] = 0; - - ndbout << "-- FileHeader:" << endl; - ndbout << "Magic: " << buf << endl; - ndbout << "NdbVersion: " << hf.NdbVersion << endl; - ndbout << "SectionType: " << hf.SectionType << endl; - ndbout << "SectionLength: " << hf.SectionLength << endl; - ndbout << "FileType: " << hf.FileType << endl; - ndbout << "BackupId: " << hf.BackupId << endl; - ndbout << "BackupKey: [ " << hex << hf.BackupKey_0 - << " "<< hf.BackupKey_1 << " ]" << endl; - ndbout << "ByteOrder: " << hex << hf.ByteOrder << endl; - return ndbout; -} - -NdbOut & operator<<(NdbOut& ndbout, - const BackupFormat::DataFile::FragmentHeader & hf){ - - ndbout << "-- Fragment header:" << endl; - ndbout << "SectionType: " << hf.SectionType << endl; - ndbout << "SectionLength: " << hf.SectionLength << endl; - ndbout << "TableId: " << hf.TableId << endl; - ndbout << "FragmentNo: " << hf.FragmentNo << endl; - ndbout << "ChecksumType: " << hf.ChecksumType << endl; - - return ndbout; -} -NdbOut & operator<<(NdbOut& ndbout, - const BackupFormat::DataFile::FragmentFooter & hf){ - - ndbout << "-- Fragment footer:" << endl; - ndbout << "SectionType: " << hf.SectionType << endl; - ndbout << "SectionLength: " << hf.SectionLength << endl; - ndbout << "TableId: " << hf.TableId << endl; - ndbout << "FragmentNo: " << hf.FragmentNo << endl; - ndbout << "NoOfRecords: " << hf.NoOfRecords << endl; - ndbout << "Checksum: " << hf.Checksum << endl; - - return ndbout; -} - -bool -readTableList(FILE* f, BackupFormat::CtlFile::TableList **ret){ - BackupFormat::CtlFile::TableList * dst = - (BackupFormat::CtlFile::TableList *)&buf[0]; - - if(fread(dst, 4, 2, f) != 2) - RETURN_FALSE(); - - dst->SectionType = ntohl(dst->SectionType); - dst->SectionLength = ntohl(dst->SectionLength); - - if(dst->SectionType != BackupFormat::TABLE_LIST) - RETURN_FALSE(); - - const Uint32 len = dst->SectionLength - 2; - if(fread(&dst->TableIds[0], 4, len, f) != len) - RETURN_FALSE(); - - for(Uint32 i = 0; iTableIds[i] = ntohl(dst->TableIds[i]); - } - - * ret = dst; - - return true; -} - -bool -readTableDesc(FILE* f, BackupFormat::CtlFile::TableDescription **ret){ - BackupFormat::CtlFile::TableDescription * dst = - (BackupFormat::CtlFile::TableDescription *)&buf[0]; - - if(fread(dst, 4, 2, f) != 2) - RETURN_FALSE(); - - dst->SectionType = ntohl(dst->SectionType); - dst->SectionLength = ntohl(dst->SectionLength); - - if(dst->SectionType != BackupFormat::TABLE_DESCRIPTION) - RETURN_FALSE(); - - const Uint32 len = dst->SectionLength - 2; - if(fread(&dst->DictTabInfo[0], 4, len, f) != len) - RETURN_FALSE(); - - * ret = dst; - - return true; -} - -bool -readGCPEntry(FILE* f, BackupFormat::CtlFile::GCPEntry **ret){ - BackupFormat::CtlFile::GCPEntry * dst = - (BackupFormat::CtlFile::GCPEntry *)&buf[0]; - - if(fread(dst, 4, 4, f) != 4) - RETURN_FALSE(); - - dst->SectionType = ntohl(dst->SectionType); - dst->SectionLength = ntohl(dst->SectionLength); - - if(dst->SectionType != BackupFormat::GCP_ENTRY) - RETURN_FALSE(); - - dst->StartGCP = ntohl(dst->StartGCP); - dst->StopGCP = ntohl(dst->StopGCP); - - * ret = dst; - - return true; -} - - -NdbOut & -operator<<(NdbOut& ndbout, const BackupFormat::CtlFile::TableList & hf) { - ndbout << "-- Table List:" << endl; - ndbout << "SectionType: " << hf.SectionType << endl; - ndbout << "SectionLength: " << hf.SectionLength << endl; - for(Uint32 i = 0; i < hf.SectionLength - 2; i++){ - ndbout << hf.TableIds[i] << " "; - if((i + 1) % 16 == 0) - ndbout << endl; - } - return ndbout; -} - -NdbOut & -operator<<(NdbOut& ndbout, const BackupFormat::CtlFile::TableDescription & hf){ - ndbout << "-- Table Description:" << endl; - ndbout << "SectionType: " << hf.SectionType << endl; - ndbout << "SectionLength: " << hf.SectionLength << endl; - - SimplePropertiesLinearReader it(&hf.DictTabInfo[0], hf.SectionLength - 2); - char buf[1024]; - for(it.first(); it.valid(); it.next()){ - switch(it.getValueType()){ - case SimpleProperties::Uint32Value: - ndbout << "Key: " << it.getKey() - << " value(" << it.getValueLen() << ") : " - << it.getUint32() << endl; - break; - case SimpleProperties::StringValue: - if(it.getValueLen() < sizeof(buf)){ - it.getString(buf); - ndbout << "Key: " << it.getKey() - << " value(" << it.getValueLen() << ") : " - << "\"" << buf << "\"" << endl; - } else { - ndbout << "Key: " << it.getKey() - << " value(" << it.getValueLen() << ") : " - << "\"" << "" << "\"" << endl; - - } - break; - default: - ndbout << "Unknown type for key: " << it.getKey() - << " type: " << it.getValueType() << endl; - } - } - - return ndbout; -} - -NdbOut & -operator<<(NdbOut& ndbout, const BackupFormat::CtlFile::GCPEntry & hf) { - ndbout << "-- GCP Entry:" << endl; - ndbout << "SectionType: " << hf.SectionType << endl; - ndbout << "SectionLength: " << hf.SectionLength << endl; - ndbout << "Start GCP: " << hf.StartGCP << endl; - ndbout << "Stop GCP: " << hf.StopGCP << endl; - - return ndbout; -} - diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp deleted file mode 100644 index a99ae597f00..00000000000 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ /dev/null @@ -1,1453 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "Cmvmi.hpp" - -#include -#include -#include -#include -#include - -#include -#include - -#define DEBUG(x) { ndbout << "CMVMI::" << x << endl; } - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -// Used here only to print event reports on stdout/console. -EventLogger g_eventLogger; -extern int simulate_error_during_shutdown; - -Cmvmi::Cmvmi(Block_context& ctx) : - SimulatedBlock(CMVMI, ctx) - ,subscribers(subscriberPool) -{ - BLOCK_CONSTRUCTOR(Cmvmi); - - Uint32 long_sig_buffer_size; - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - ndb_mgm_get_int_parameter(p, CFG_DB_LONG_SIGNAL_BUFFER, - &long_sig_buffer_size); - - long_sig_buffer_size= long_sig_buffer_size / 256; - g_sectionSegmentPool.setSize(long_sig_buffer_size, - false,true,true,CFG_DB_LONG_SIGNAL_BUFFER); - - // Add received signals - addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP); - addRecSignal(GSN_DISCONNECT_REP, &Cmvmi::execDISCONNECT_REP); - - addRecSignal(GSN_NDB_TAMPER, &Cmvmi::execNDB_TAMPER, true); - addRecSignal(GSN_SET_LOGLEVELORD, &Cmvmi::execSET_LOGLEVELORD); - addRecSignal(GSN_EVENT_REP, &Cmvmi::execEVENT_REP); - addRecSignal(GSN_STTOR, &Cmvmi::execSTTOR); - addRecSignal(GSN_READ_CONFIG_REQ, &Cmvmi::execREAD_CONFIG_REQ); - addRecSignal(GSN_CLOSE_COMREQ, &Cmvmi::execCLOSE_COMREQ); - addRecSignal(GSN_ENABLE_COMORD, &Cmvmi::execENABLE_COMORD); - addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ); - addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD); - - addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD); - addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD); - addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD); - addRecSignal(GSN_EVENT_SUBSCRIBE_REQ, - &Cmvmi::execEVENT_SUBSCRIBE_REQ); - - addRecSignal(GSN_DUMP_STATE_ORD, &Cmvmi::execDUMP_STATE_ORD); - - addRecSignal(GSN_TESTSIG, &Cmvmi::execTESTSIG); - addRecSignal(GSN_NODE_START_REP, &Cmvmi::execNODE_START_REP, true); - - subscriberPool.setSize(5); - - const ndb_mgm_configuration_iterator * db = m_ctx.m_config.getOwnConfigIterator(); - for(unsigned j = 0; jtheData[0]); - if(ERROR_INSERTED(9999)){ - CRASH_INSERTION(9999); - } - - if(ERROR_INSERTED(9998)){ - while(true) NdbSleep_SecSleep(1); - } - - if(ERROR_INSERTED(9997)){ - ndbrequire(false); - } - -#ifndef NDB_WIN32 - if(ERROR_INSERTED(9996)){ - simulate_error_during_shutdown= SIGSEGV; - ndbrequire(false); - } - - if(ERROR_INSERTED(9995)){ - simulate_error_during_shutdown= SIGSEGV; - kill(getpid(), SIGABRT); - } -#endif - -#ifdef ERROR_INSERT - if (signal->theData[0] == 9003) - { - if (MAX_RECEIVED_SIGNALS < 1024) - { - MAX_RECEIVED_SIGNALS = 1024; - } - else - { - MAX_RECEIVED_SIGNALS = 1 + (rand() % 128); - } - ndbout_c("MAX_RECEIVED_SIGNALS: %d", MAX_RECEIVED_SIGNALS); - CLEAR_ERROR_INSERT_VALUE; - } -#endif -}//execNDB_TAMPER() - -void Cmvmi::execSET_LOGLEVELORD(Signal* signal) -{ - SetLogLevelOrd * const llOrd = (SetLogLevelOrd *)&signal->theData[0]; - LogLevel::EventCategory category; - Uint32 level; - jamEntry(); - - for(unsigned int i = 0; inoOfEntries; i++){ - category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16); - level = llOrd->theData[i] & 0xFFFF; - - clogLevel.setLogLevel(category, level); - } -}//execSET_LOGLEVELORD() - -void Cmvmi::execEVENT_REP(Signal* signal) -{ - //----------------------------------------------------------------------- - // This message is sent to report any types of events in NDB. - // Based on the log level they will be either ignored or - // reported. Currently they are printed, but they will be - // transferred to the management server for further distribution - // to the graphical management interface. - //----------------------------------------------------------------------- - EventReport * const eventReport = (EventReport *)&signal->theData[0]; - Ndb_logevent_type eventType = eventReport->getEventType(); - Uint32 nodeId= eventReport->getNodeId(); - if (nodeId == 0) - { - nodeId= refToNode(signal->getSendersBlockRef()); - eventReport->setNodeId(nodeId); - } - - jamEntry(); - - /** - * If entry is not found - */ - Uint32 threshold; - LogLevel::EventCategory eventCategory; - Logger::LoggerLevel severity; - EventLoggerBase::EventTextFunction textF; - if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity,textF)) - return; - - SubscriberPtr ptr; - for(subscribers.first(ptr); ptr.i != RNIL; subscribers.next(ptr)){ - if(ptr.p->logLevel.getLogLevel(eventCategory) < threshold){ - continue; - } - - sendSignal(ptr.p->blockRef, GSN_EVENT_REP, signal, signal->length(), JBB); - } - - if(clogLevel.getLogLevel(eventCategory) < threshold){ - return; - } - - // Print the event info - g_eventLogger.log(eventReport->getEventType(), signal->theData); - - return; -}//execEVENT_REP() - -void -Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){ - EventSubscribeReq * subReq = (EventSubscribeReq *)&signal->theData[0]; - Uint32 senderRef = signal->getSendersBlockRef(); - SubscriberPtr ptr; - jamEntry(); - DBUG_ENTER("Cmvmi::execEVENT_SUBSCRIBE_REQ"); - - /** - * Search for subcription - */ - for(subscribers.first(ptr); ptr.i != RNIL; subscribers.next(ptr)){ - if(ptr.p->blockRef == subReq->blockRef) - break; - } - - if(ptr.i == RNIL){ - /** - * Create a new one - */ - if(subscribers.seize(ptr) == false){ - sendSignal(senderRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB); - return; - } - ptr.p->logLevel.clear(); - ptr.p->blockRef = subReq->blockRef; - } - - if(subReq->noOfEntries == 0){ - /** - * Cancel subscription - */ - subscribers.release(ptr.i); - } else { - /** - * Update subscription - */ - LogLevel::EventCategory category; - Uint32 level = 0; - for(Uint32 i = 0; inoOfEntries; i++){ - category = (LogLevel::EventCategory)(subReq->theData[i] >> 16); - level = subReq->theData[i] & 0xFFFF; - ptr.p->logLevel.setLogLevel(category, level); - DBUG_PRINT("info",("entry %d: level=%d, category= %d", i, level, category)); - } - } - - signal->theData[0] = ptr.i; - sendSignal(senderRef, GSN_EVENT_SUBSCRIBE_CONF, signal, 1, JBB); - DBUG_VOID_RETURN; -} - -void -Cmvmi::cancelSubscription(NodeId nodeId){ - - SubscriberPtr ptr; - subscribers.first(ptr); - - while(ptr.i != RNIL){ - Uint32 i = ptr.i; - BlockReference blockRef = ptr.p->blockRef; - - subscribers.next(ptr); - - if(refToNode(blockRef) == nodeId){ - subscribers.release(i); - } - } -} - -void Cmvmi::sendSTTORRY(Signal* signal) -{ - jam(); - signal->theData[3] = 1; - signal->theData[4] = 3; - signal->theData[5] = 8; - signal->theData[6] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 7, JBB); -}//Cmvmi::sendSTTORRY - - -void -Cmvmi::execREAD_CONFIG_REQ(Signal* signal) -{ - jamEntry(); - - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - Uint64 page_buffer = 64*1024*1024; - ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY, &page_buffer); - - Uint32 pages = 0; - pages += page_buffer / GLOBAL_PAGE_SIZE; // in pages - pages += LCP_RESTORE_BUFFER; - m_global_page_pool.setSize(pages + 64, true); - - Uint64 shared_mem = 8*1024*1024; - ndb_mgm_get_int64_parameter(p, CFG_DB_SGA, &shared_mem); - shared_mem /= GLOBAL_PAGE_SIZE; - if (shared_mem) - { - Resource_limit rl; - rl.m_min = 0; - rl.m_max = shared_mem; - rl.m_resource_id = 0; - m_ctx.m_mm.set_resource_limit(rl); - } - - ndbrequire(m_ctx.m_mm.init()); - { - void* ptr = m_ctx.m_mm.get_memroot(); - m_shared_page_pool.set((GlobalPage*)ptr, ~0); - } - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - -void Cmvmi::execSTTOR(Signal* signal) -{ - Uint32 theStartPhase = signal->theData[1]; - - jamEntry(); - if (theStartPhase == 1){ - jam(); - - if(m_ctx.m_config.lockPagesInMainMemory() == 1) - { - int res = NdbMem_MemLockAll(0); - if(res != 0){ - g_eventLogger.warning("Failed to memlock pages"); - warningEvent("Failed to memlock pages"); - } - } - - sendSTTORRY(signal); - return; - } else if (theStartPhase == 3) { - jam(); - globalData.activateSendPacked = 1; - sendSTTORRY(signal); - } else if (theStartPhase == 8){ - /*---------------------------------------------------*/ - /* Open com to API + REP nodes */ - /*---------------------------------------------------*/ - signal->theData[0] = 0; // no answer - signal->theData[1] = 0; // no id - signal->theData[2] = NodeInfo::API; - execOPEN_COMREQ(signal); - globalData.theStartLevel = NodeState::SL_STARTED; - sendSTTORRY(signal); - } -} - -void Cmvmi::execCLOSE_COMREQ(Signal* signal) -{ - // Close communication with the node and halt input/output from - // other blocks than QMGR - - CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0]; - - const BlockReference userRef = closeCom->xxxBlockRef; - Uint32 failNo = closeCom->failNo; -// Uint32 noOfNodes = closeCom->noOfNodes; - - jamEntry(); - for (unsigned i = 0; i < MAX_NODES; i++) - { - if(NodeBitmask::get(closeCom->theNodes, i)) - { - jam(); - - //----------------------------------------------------- - // Report that the connection to the node is closed - //----------------------------------------------------- - signal->theData[0] = NDB_LE_CommunicationClosed; - signal->theData[1] = i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - globalTransporterRegistry.setIOState(i, HaltIO); - globalTransporterRegistry.do_disconnect(i); - } - } - - if (failNo != 0) - { - jam(); - signal->theData[0] = userRef; - signal->theData[1] = failNo; - sendSignal(QMGR_REF, GSN_CLOSE_COMCONF, signal, 19, JBA); - } -} - -void Cmvmi::execOPEN_COMREQ(Signal* signal) -{ - // Connect to the specifed NDB node, only QMGR allowed communication - // so far with the node - - const BlockReference userRef = signal->theData[0]; - Uint32 tStartingNode = signal->theData[1]; - Uint32 tData2 = signal->theData[2]; - jamEntry(); - - const Uint32 len = signal->getLength(); - if(len == 2) - { -#ifdef ERROR_INSERT - if (! ((ERROR_INSERTED(9000) || ERROR_INSERTED(9002)) - && c_error_9000_nodes_mask.get(tStartingNode))) -#endif - { - if (globalData.theStartLevel != NodeState::SL_STARTED && - (getNodeInfo(tStartingNode).m_type != NodeInfo::DB && - getNodeInfo(tStartingNode).m_type != NodeInfo::MGM)) - { - jam(); - goto done; - } - - globalTransporterRegistry.do_connect(tStartingNode); - globalTransporterRegistry.setIOState(tStartingNode, HaltIO); - - //----------------------------------------------------- - // Report that the connection to the node is opened - //----------------------------------------------------- - signal->theData[0] = NDB_LE_CommunicationOpened; - signal->theData[1] = tStartingNode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - //----------------------------------------------------- - } - } else { - for(unsigned int i = 1; i < MAX_NODES; i++ ) - { - jam(); - if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2) - { - jam(); - -#ifdef ERROR_INSERT - if ((ERROR_INSERTED(9000) || ERROR_INSERTED(9002)) - && c_error_9000_nodes_mask.get(i)) - continue; -#endif - - globalTransporterRegistry.do_connect(i); - globalTransporterRegistry.setIOState(i, HaltIO); - - signal->theData[0] = NDB_LE_CommunicationOpened; - signal->theData[1] = i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - } - } - } - -done: - if (userRef != 0) { - jam(); - signal->theData[0] = tStartingNode; - signal->theData[1] = tData2; - sendSignal(userRef, GSN_OPEN_COMCONF, signal, len - 1,JBA); - } -} - -void Cmvmi::execENABLE_COMORD(Signal* signal) -{ - // Enable communication with all our NDB blocks to this node - - Uint32 tStartingNode = signal->theData[0]; - globalTransporterRegistry.setIOState(tStartingNode, NoHalt); - setNodeInfo(tStartingNode).m_connected = true; - //----------------------------------------------------- - // Report that the version of the node - //----------------------------------------------------- - signal->theData[0] = NDB_LE_ConnectedApiVersion; - signal->theData[1] = tStartingNode; - signal->theData[2] = getNodeInfo(tStartingNode).m_version; - - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - //----------------------------------------------------- - - jamEntry(); -} - -void Cmvmi::execDISCONNECT_REP(Signal *signal) -{ - const DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0]; - const Uint32 hostId = rep->nodeId; - const Uint32 errNo = rep->err; - - jamEntry(); - - setNodeInfo(hostId).m_connected = false; - setNodeInfo(hostId).m_connectCount++; - const NodeInfo::NodeType type = getNodeInfo(hostId).getType(); - ndbrequire(type != NodeInfo::INVALID); - - sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal, - DisconnectRep::SignalLength, JBA); - - cancelSubscription(hostId); - - signal->theData[0] = NDB_LE_Disconnected; - signal->theData[1] = hostId; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); -} - -void Cmvmi::execCONNECT_REP(Signal *signal){ - const Uint32 hostId = signal->theData[0]; - jamEntry(); - - const NodeInfo::NodeType type = (NodeInfo::NodeType)getNodeInfo(hostId).m_type; - ndbrequire(type != NodeInfo::INVALID); - globalData.m_nodeInfo[hostId].m_version = 0; - globalData.m_nodeInfo[hostId].m_signalVersion = 0; - - if(type == NodeInfo::DB || globalData.theStartLevel >= NodeState::SL_STARTED){ - jam(); - - /** - * Inform QMGR that client has connected - */ - - signal->theData[0] = hostId; - sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA); - } else if(globalData.theStartLevel == NodeState::SL_CMVMI || - globalData.theStartLevel == NodeState::SL_STARTING) { - jam(); - /** - * Someone connected before start was finished - */ - if(type == NodeInfo::MGM){ - jam(); - signal->theData[0] = hostId; - sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA); - } else { - /** - * Dont allow api nodes to connect - */ - ndbout_c("%d %d %d", hostId, type, globalData.theStartLevel); - abort(); - globalTransporterRegistry.do_disconnect(hostId); - } - } - - /* Automatically subscribe events for MGM nodes. - */ - if(type == NodeInfo::MGM){ - jam(); - globalTransporterRegistry.setIOState(hostId, NoHalt); - } - - //------------------------------------------ - // Also report this event to the Event handler - //------------------------------------------ - signal->theData[0] = NDB_LE_Connected; - signal->theData[1] = hostId; - signal->header.theLength = 2; - - execEVENT_REP(signal); -} - -#ifdef VM_TRACE -void -modifySignalLogger(bool allBlocks, BlockNumber bno, - TestOrd::Command cmd, - TestOrd::SignalLoggerSpecification spec){ - SignalLoggerManager::LogMode logMode; - - /** - * Mapping between SignalLoggerManager::LogMode and - * TestOrd::SignalLoggerSpecification - */ - switch(spec){ - case TestOrd::InputSignals: - logMode = SignalLoggerManager::LogIn; - break; - case TestOrd::OutputSignals: - logMode = SignalLoggerManager::LogOut; - break; - case TestOrd::InputOutputSignals: - logMode = SignalLoggerManager::LogInOut; - break; - default: - return; - break; - } - - switch(cmd){ - case TestOrd::On: - globalSignalLoggers.logOn(allBlocks, bno, logMode); - break; - case TestOrd::Off: - globalSignalLoggers.logOff(allBlocks, bno, logMode); - break; - case TestOrd::Toggle: - globalSignalLoggers.logToggle(allBlocks, bno, logMode); - break; - case TestOrd::KeepUnchanged: - // Do nothing - break; - } - globalSignalLoggers.flushSignalLog(); -} -#endif - -void -Cmvmi::execTEST_ORD(Signal * signal){ - jamEntry(); - -#ifdef VM_TRACE - TestOrd * const testOrd = (TestOrd *)&signal->theData[0]; - - TestOrd::Command cmd; - - { - /** - * Process Trace command - */ - TestOrd::TraceSpecification traceSpec; - - testOrd->getTraceCommand(cmd, traceSpec); - unsigned long traceVal = traceSpec; - unsigned long currentTraceVal = globalSignalLoggers.getTrace(); - switch(cmd){ - case TestOrd::On: - currentTraceVal |= traceVal; - break; - case TestOrd::Off: - currentTraceVal &= (~traceVal); - break; - case TestOrd::Toggle: - currentTraceVal ^= traceVal; - break; - case TestOrd::KeepUnchanged: - // Do nothing - break; - } - globalSignalLoggers.setTrace(currentTraceVal); - } - - { - /** - * Process Log command - */ - TestOrd::SignalLoggerSpecification logSpec; - BlockNumber bno; - unsigned int loggers = testOrd->getNoOfSignalLoggerCommands(); - - if(loggers == (unsigned)~0){ // Apply command to all blocks - testOrd->getSignalLoggerCommand(0, bno, cmd, logSpec); - modifySignalLogger(true, bno, cmd, logSpec); - } else { - for(unsigned int i = 0; igetSignalLoggerCommand(i, bno, cmd, logSpec); - modifySignalLogger(false, bno, cmd, logSpec); - } - } - } - - { - /** - * Process test command - */ - testOrd->getTestCommand(cmd); - switch(cmd){ - case TestOrd::On:{ - SET_GLOBAL_TEST_ON; - } - break; - case TestOrd::Off:{ - SET_GLOBAL_TEST_OFF; - } - break; - case TestOrd::Toggle:{ - TOGGLE_GLOBAL_TEST_FLAG; - } - break; - case TestOrd::KeepUnchanged: - // Do nothing - break; - } - globalSignalLoggers.flushSignalLog(); - } - -#endif -} - -void Cmvmi::execSTOP_ORD(Signal* signal) -{ - jamEntry(); - globalData.theRestartFlag = perform_stop; -}//execSTOP_ORD() - -void -Cmvmi::execSTART_ORD(Signal* signal) { - - StartOrd * const startOrd = (StartOrd *)&signal->theData[0]; - jamEntry(); - - Uint32 tmp = startOrd->restartInfo; - if(StopReq::getPerformRestart(tmp)){ - jam(); - /** - * - */ - NdbRestartType type = NRT_Default; - if(StopReq::getNoStart(tmp) && StopReq::getInitialStart(tmp)) - type = NRT_NoStart_InitialStart; - if(StopReq::getNoStart(tmp) && !StopReq::getInitialStart(tmp)) - type = NRT_NoStart_Restart; - if(!StopReq::getNoStart(tmp) && StopReq::getInitialStart(tmp)) - type = NRT_DoStart_InitialStart; - if(!StopReq::getNoStart(tmp)&&!StopReq::getInitialStart(tmp)) - type = NRT_DoStart_Restart; - NdbShutdown(NST_Restart, type); - } - - if(globalData.theRestartFlag == system_started){ - jam() - /** - * START_ORD received when already started(ignored) - */ - //ndbout << "START_ORD received when already started(ignored)" << endl; - return; - } - - if(globalData.theRestartFlag == perform_stop){ - jam() - /** - * START_ORD received when stopping(ignored) - */ - //ndbout << "START_ORD received when stopping(ignored)" << endl; - return; - } - - if(globalData.theStartLevel == NodeState::SL_NOTHING){ - jam(); - globalData.theStartLevel = NodeState::SL_CMVMI; - /** - * Open connections to management servers - */ - for(unsigned int i = 1; i < MAX_NODES; i++ ){ - if (getNodeInfo(i).m_type == NodeInfo::MGM){ - if(!globalTransporterRegistry.is_connected(i)){ - globalTransporterRegistry.do_connect(i); - globalTransporterRegistry.setIOState(i, NoHalt); - } - } - } - - EXECUTE_DIRECT(QMGR, GSN_START_ORD, signal, 1); - return ; - } - - if(globalData.theStartLevel == NodeState::SL_CMVMI){ - jam(); - - if(m_ctx.m_config.lockPagesInMainMemory() == 2) - { - int res = NdbMem_MemLockAll(1); - if(res != 0) - { - g_eventLogger.warning("Failed to memlock pages"); - warningEvent("Failed to memlock pages"); - } - else - { - g_eventLogger.info("Locked future allocations"); - } - } - - globalData.theStartLevel = NodeState::SL_STARTING; - globalData.theRestartFlag = system_started; - /** - * StartLevel 1 - * - * Do Restart - */ - - // Disconnect all nodes as part of the system restart. - // We need to ensure that we are starting up - // without any connected nodes. - for(unsigned int i = 1; i < MAX_NODES; i++ ){ - if (i != getOwnNodeId() && getNodeInfo(i).m_type != NodeInfo::MGM){ - globalTransporterRegistry.do_disconnect(i); - globalTransporterRegistry.setIOState(i, HaltIO); - } - } - - /** - * Start running startphases - */ - sendSignal(NDBCNTR_REF, GSN_START_ORD, signal, 1, JBA); - return; - } -}//execSTART_ORD() - -void Cmvmi::execTAMPER_ORD(Signal* signal) -{ - jamEntry(); - // TODO We should maybe introduce a CONF and REF signal - // to be able to indicate if we really introduced an error. -#ifdef ERROR_INSERT - TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0]; - signal->theData[2] = 0; - signal->theData[1] = tamperOrd->errorNo; - signal->theData[0] = 5; - sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB); -#endif - -}//execTAMPER_ORD() - -#ifdef VM_TRACE -class RefSignalTest { -public: - enum ErrorCode { - OK = 0, - NF_FakeErrorREF = 7 - }; - Uint32 senderRef; - Uint32 senderData; - Uint32 errorCode; -}; -#endif - - -static int iii; - -static -int -recurse(char * buf, int loops, int arg){ - char * tmp = (char*)alloca(arg); - printf("tmp = %p\n", tmp); - for(iii = 0; iiilength(), JBB); - sendSignal(NDBCNTR_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBTC_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBDIH_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBDICT_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBLQH_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBTUP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBACC_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(NDBFS_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(BACKUP_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBUTIL_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(SUMA_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(TRIX_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(DBTUX_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(LGMAN_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(TSMAN_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - sendSignal(PGMAN_REF, GSN_DUMP_STATE_ORD, signal, signal->length(), JBB); - - /** - * - * Here I can dump CMVMI state if needed - */ - if(signal->theData[0] == 13){ -#if 0 - int loop = 100; - int len = (10*1024*1024); - if(signal->getLength() > 1) - loop = signal->theData[1]; - if(signal->getLength() > 2) - len = signal->theData[2]; - - ndbout_c("recurse(%d loop, %dkb per recurse)", loop, len/1024); - int a = recurse(0, loop, len); - ndbout_c("after...%d", a); -#endif - } - - DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0]; - Uint32 arg = dumpState->args[0]; - if (arg == DumpStateOrd::CmvmiDumpConnections){ - for(unsigned int i = 1; i < MAX_NODES; i++ ){ - const char* nodeTypeStr = ""; - switch(getNodeInfo(i).m_type){ - case NodeInfo::DB: - nodeTypeStr = "DB"; - break; - case NodeInfo::API: - nodeTypeStr = "API"; - break; - case NodeInfo::MGM: - nodeTypeStr = "MGM"; - break; - case NodeInfo::INVALID: - nodeTypeStr = 0; - break; - default: - nodeTypeStr = ""; - } - - if(nodeTypeStr == 0) - continue; - - infoEvent("Connection to %d (%s) %s", - i, - nodeTypeStr, - globalTransporterRegistry.getPerformStateString(i)); - } - } - - if (arg == DumpStateOrd::CmvmiDumpSubscriptions) - { - SubscriberPtr ptr; - subscribers.first(ptr); - g_eventLogger.info("List subscriptions:"); - while(ptr.i != RNIL) - { - g_eventLogger.info("Subscription: %u, nodeId: %u, ref: 0x%x", - ptr.i, refToNode(ptr.p->blockRef), ptr.p->blockRef); - for(Uint32 i = 0; i < LogLevel::LOGLEVEL_CATEGORIES; i++) - { - Uint32 level = ptr.p->logLevel.getLogLevel((LogLevel::EventCategory)i); - g_eventLogger.info("Category %u Level %u", i, level); - } - subscribers.next(ptr); - } - } - - if (arg == DumpStateOrd::CmvmiDumpLongSignalMemory){ - infoEvent("Cmvmi: g_sectionSegmentPool size: %d free: %d", - g_sectionSegmentPool.getSize(), - g_sectionSegmentPool.getNoOfFree()); - } - - if (dumpState->args[0] == 1000) - { - Uint32 len = signal->getLength(); - if (signal->getLength() == 1) - { - signal->theData[1] = 0; - signal->theData[2] = ~0; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 3, JBB); - return; - } - Uint32 id = signal->theData[1]; - Resource_limit rl; - if (!m_ctx.m_mm.get_resource_limit(id, rl)) - len = 2; - else - { - if (rl.m_min || rl.m_curr || rl.m_max) - infoEvent("Resource %d min: %d max: %d curr: %d", - id, rl.m_min, rl.m_max, rl.m_curr); - } - - if (len == 3) - { - signal->theData[0] = 1000; - signal->theData[1] = id+1; - signal->theData[2] = ~0; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 3, JBB); - } - return; - } - - if (arg == DumpStateOrd::CmvmiSetRestartOnErrorInsert) - { - if(signal->getLength() == 1) - { - Uint32 val = (Uint32)NRT_NoStart_Restart; - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - if(!ndb_mgm_get_int_parameter(p, CFG_DB_STOP_ON_ERROR_INSERT, &val)) - { - m_ctx.m_config.setRestartOnErrorInsert(val); - } - } - else - { - m_ctx.m_config.setRestartOnErrorInsert(signal->theData[1]); - } - } - - if (arg == DumpStateOrd::CmvmiTestLongSigWithDelay) { - unsigned i; - Uint32 loopCount = dumpState->args[1]; - const unsigned len0 = 11; - const unsigned len1 = 123; - Uint32 sec0[len0]; - Uint32 sec1[len1]; - for (i = 0; i < len0; i++) - sec0[i] = i; - for (i = 0; i < len1; i++) - sec1[i] = 16 * i; - Uint32* sig = signal->getDataPtrSend(); - sig[0] = reference(); - sig[1] = 20; // test type - sig[2] = 0; - sig[3] = 0; - sig[4] = loopCount; - sig[5] = len0; - sig[6] = len1; - sig[7] = 0; - LinearSectionPtr ptr[3]; - ptr[0].p = sec0; - ptr[0].sz = len0; - ptr[1].p = sec1; - ptr[1].sz = len1; - sendSignal(reference(), GSN_TESTSIG, signal, 8, JBB, ptr, 2); - } - -#ifdef ERROR_INSERT - if (arg == 9000 || arg == 9002) - { - SET_ERROR_INSERT_VALUE(arg); - for (Uint32 i = 1; igetLength(); i++) - c_error_9000_nodes_mask.set(signal->theData[i]); - } - - if (arg == 9001) - { - CLEAR_ERROR_INSERT_VALUE; - if (signal->getLength() == 1 || signal->theData[1]) - { - for (Uint32 i = 0; itheData[0] = 0; - signal->theData[1] = i; - EXECUTE_DIRECT(CMVMI, GSN_OPEN_COMREQ, signal, 2); - } - } - } - c_error_9000_nodes_mask.clear(); - } -#endif - -#ifdef VM_TRACE -#if 0 - { - SafeCounterManager mgr(* this); mgr.setSize(1); - SafeCounterHandle handle; - - { - SafeCounter tmp(mgr, handle); - tmp.init(CMVMI, GSN_TESTSIG, /* senderData */ 13); - tmp.setWaitingFor(3); - ndbrequire(!tmp.done()); - ndbout_c("Allocted"); - } - ndbrequire(!handle.done()); - { - SafeCounter tmp(mgr, handle); - tmp.clearWaitingFor(3); - ndbrequire(tmp.done()); - ndbout_c("Deallocted"); - } - ndbrequire(handle.done()); - } -#endif -#endif - - if (arg == 9999) - { - Uint32 delay = 1000; - switch(signal->getLength()){ - case 1: - break; - case 2: - delay = signal->theData[1]; - break; - default:{ - Uint32 dmin = signal->theData[1]; - Uint32 dmax = signal->theData[2]; - delay = dmin + (rand() % (dmax - dmin)); - break; - } - } - - signal->theData[0] = 9999; - if (delay == 0) - { - execNDB_TAMPER(signal); - } - else if (delay < 10) - { - sendSignal(reference(), GSN_NDB_TAMPER, signal, 1, JBB); - } - else - { - sendSignalWithDelay(reference(), GSN_NDB_TAMPER, signal, delay, 1); - } - } -}//Cmvmi::execDUMP_STATE_ORD() - -void -Cmvmi::execNODE_START_REP(Signal* signal) -{ -#ifdef ERROR_INSERT - if (ERROR_INSERTED(9002) && signal->theData[0] == getOwnNodeId()) - { - signal->theData[0] = 9001; - execDUMP_STATE_ORD(signal); - } -#endif -} - -BLOCK_FUNCTIONS(Cmvmi) - -static Uint32 g_print; -static LinearSectionPtr g_test[3]; - -void -Cmvmi::execTESTSIG(Signal* signal){ - Uint32 i; - /** - * Test of SafeCounter - */ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - Uint32 ref = signal->theData[0]; - Uint32 testType = signal->theData[1]; - Uint32 fragmentLength = signal->theData[2]; - g_print = signal->theData[3]; -// Uint32 returnCount = signal->theData[4]; - Uint32 * secSizes = &signal->theData[5]; - - if(g_print){ - SignalLoggerManager::printSignalHeader(stdout, - signal->header, - 0, - getOwnNodeId(), - true); - ndbout_c("-- Fixed section --"); - for(i = 0; ilength(); i++){ - fprintf(stdout, "H'0x%.8x ", signal->theData[i]); - if(((i + 1) % 6) == 0) - fprintf(stdout, "\n"); - } - fprintf(stdout, "\n"); - - for(i = 0; iheader.m_noOfSections; i++){ - SegmentedSectionPtr ptr(0,0,0); - ndbout_c("-- Section %d --", i); - signal->getSection(ptr, i); - ndbrequire(ptr.p != 0); - print(ptr, stdout); - ndbrequire(ptr.sz == secSizes[i]); - } - } - - /** - * Validate length:s - */ - for(i = 0; iheader.m_noOfSections; i++){ - SegmentedSectionPtr ptr; - signal->getSection(ptr, i); - ndbrequire(ptr.p != 0); - ndbrequire(ptr.sz == secSizes[i]); - } - - /** - * Testing send with delay. - */ - if (testType == 20) { - if (signal->theData[4] == 0) { - releaseSections(signal); - return; - } - signal->theData[4]--; - sendSignalWithDelay(reference(), GSN_TESTSIG, signal, 100, 8); - return; - } - - NodeReceiverGroup rg(CMVMI, c_dbNodes); - - if(signal->getSendersBlockRef() == ref){ - /** - * Signal from API (not via NodeReceiverGroup) - */ - if((testType % 2) == 1){ - signal->theData[4] = 1; - } else { - signal->theData[1] --; - signal->theData[4] = rg.m_nodes.count(); - } - } - - switch(testType){ - case 1: - sendSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB); - break; - case 2: - sendSignal(rg, GSN_TESTSIG, signal, signal->length(), JBB); - break; - case 3: - case 4:{ - LinearSectionPtr ptr[3]; - const Uint32 secs = signal->getNoOfSections(); - for(i = 0; igetSection(sptr, i); - ptr[i].sz = sptr.sz; - ptr[i].p = new Uint32[sptr.sz]; - copy(ptr[i].p, sptr); - } - - if(testType == 3){ - sendSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB, ptr, secs); - } else { - sendSignal(rg, GSN_TESTSIG, signal, signal->length(), JBB, ptr, secs); - } - for(Uint32 i = 0; ilength(), - JBB, - fragmentLength); - int count = 1; - while(fragSend.m_status != FragmentSendInfo::SendComplete){ - count++; - if(g_print) - ndbout_c("Sending fragment %d", count); - sendNextSegmentedFragment(signal, fragSend); - } - break; - } - case 7: - case 8:{ - LinearSectionPtr ptr[3]; - const Uint32 secs = signal->getNoOfSections(); - for(i = 0; igetSection(sptr, i); - ptr[i].sz = sptr.sz; - ptr[i].p = new Uint32[sptr.sz]; - copy(ptr[i].p, sptr); - } - - NodeReceiverGroup tmp; - if(testType == 7){ - tmp = ref; - } else { - tmp = rg; - } - - FragmentSendInfo fragSend; - sendFirstFragment(fragSend, - tmp, - GSN_TESTSIG, - signal, - signal->length(), - JBB, - ptr, - secs, - fragmentLength); - - int count = 1; - while(fragSend.m_status != FragmentSendInfo::SendComplete){ - count++; - if(g_print) - ndbout_c("Sending fragment %d", count); - sendNextLinearFragment(signal, fragSend); - } - - for(i = 0; ilength(), JBB, - m_callBack, - fragmentLength); - } else { - m_callBack.m_callbackData = 10; - sendFragmentedSignal(rg, - GSN_TESTSIG, signal, signal->length(), JBB, - m_callBack, - fragmentLength); - } - break; - } - case 11: - case 12:{ - - const Uint32 secs = signal->getNoOfSections(); - memset(g_test, 0, sizeof(g_test)); - for(i = 0; igetSection(sptr, i); - g_test[i].sz = sptr.sz; - g_test[i].p = new Uint32[sptr.sz]; - copy(g_test[i].p, sptr); - } - - - Callback m_callBack; - m_callBack.m_callbackFunction = - safe_cast(&Cmvmi::sendFragmentedComplete); - - if(testType == 11){ - m_callBack.m_callbackData = 11; - sendFragmentedSignal(ref, - GSN_TESTSIG, signal, signal->length(), JBB, - g_test, secs, - m_callBack, - fragmentLength); - } else { - m_callBack.m_callbackData = 12; - sendFragmentedSignal(rg, - GSN_TESTSIG, signal, signal->length(), JBB, - g_test, secs, - m_callBack, - fragmentLength); - } - break; - } - case 13:{ - ndbrequire(signal->getNoOfSections() == 0); - Uint32 loop = signal->theData[9]; - if(loop > 0){ - signal->theData[9] --; - sendSignal(CMVMI_REF, GSN_TESTSIG, signal, signal->length(), JBB); - return; - } - sendSignal(ref, GSN_TESTSIG, signal, signal->length(), JBB); - return; - } - case 14:{ - Uint32 count = signal->theData[8]; - signal->theData[10] = count * rg.m_nodes.count(); - for(i = 0; ilength(), JBB); - } - return; - } - - default: - ndbrequire(false); - } - return; -} - -void -Cmvmi::sendFragmentedComplete(Signal* signal, Uint32 data, Uint32 returnCode){ - if(g_print) - ndbout_c("sendFragmentedComplete: %d", data); - if(data == 11 || data == 12){ - for(Uint32 i = 0; i<3; i++){ - if(g_test[i].p != 0) - delete[] g_test[i].p; - } - } -} diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp deleted file mode 100644 index 8e4f140ea4d..00000000000 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef Cmvmi_H_ -#define Cmvmi_H_ - -#include -#include -#include - -#include - -/** - * Cmvmi class - */ -class Cmvmi : public SimulatedBlock { -public: - Cmvmi(Block_context&); - virtual ~Cmvmi(); - -private: - /** - * These methods used to be reportXXX - * - * But they in a nasty way intefere with the execution model - * they been turned in to exec-Method used via prio A signals - */ - void execDISCONNECT_REP(Signal*); - void execCONNECT_REP(Signal*); - -private: - BLOCK_DEFINES(Cmvmi); - - // The signal processing functions - void execNDB_TAMPER(Signal* signal); - void execSET_LOGLEVELORD(Signal* signal); - void execEVENT_REP(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execSTTOR(Signal* signal); - void execCLOSE_COMREQ(Signal* signal); - void execENABLE_COMORD(Signal* signal); - void execOPEN_COMREQ(Signal* signal); - void execSIZEALT_ACK(Signal* signal); - void execTEST_ORD(Signal* signal); - - void execSTOP_ORD(Signal* signal); - void execSTART_ORD(Signal* signal); - void execTAMPER_ORD(Signal* signal); - - void execDUMP_STATE_ORD(Signal* signal); - - void execEVENT_SUBSCRIBE_REQ(Signal *); - void cancelSubscription(NodeId nodeId); - - void execTESTSIG(Signal* signal); - void execNODE_START_REP(Signal* signal); - - char theErrorMessage[256]; - void sendSTTORRY(Signal* signal); - - LogLevel clogLevel; - NdbNodeBitmask c_dbNodes; - - /** - * This struct defines the data needed for a EVENT_REP subscriber - */ - struct EventRepSubscriber { - /** - * What log level is the subscriber using - */ - LogLevel logLevel; - - /** - * What block reference does he use - * (Where should the EVENT_REP's be forwarded) - */ - BlockReference blockRef; - - /** - * Next ptr (used in pool/list) - */ - union { Uint32 nextPool; Uint32 nextList; }; - Uint32 prevList; - }; - typedef Ptr SubscriberPtr; - - /** - * Pool of EventRepSubscriber record - */ - ArrayPool subscriberPool; - - /** - * List of current subscribers - */ - DLList subscribers; - -private: - // Declared but not defined - Cmvmi(const Cmvmi &obj); - void operator = (const Cmvmi &); - - void sendFragmentedComplete(Signal* signal, Uint32 data, Uint32 returnCode); -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp deleted file mode 100644 index 267fc2ec8ef..00000000000 --- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ /dev/null @@ -1,1106 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBACC_H -#define DBACC_H - -#ifdef VM_TRACE -#define ACC_SAFE_QUEUE -#endif - -#include -#include - -#ifdef DBACC_C -// Debug Macros -#define dbgWord32(ptr, ind, val) - -/* -#define dbgWord32(ptr, ind, val) \ -if(debug_jan){ \ -tmp_val = val; \ -switch(ind){ \ -case 1: strcpy(tmp_string, "ZPOS_PAGE_TYPE "); \ -break; \ -case 2: strcpy(tmp_string, "ZPOS_NO_ELEM_IN_PAGE"); \ -break; \ -case 3: strcpy(tmp_string, "ZPOS_CHECKSUM "); \ -break; \ -case 4: strcpy(tmp_string, "ZPOS_OVERFLOWREC "); \ -break; \ -case 5: strcpy(tmp_string, "ZPOS_FREE_AREA_IN_PAGE"); \ -break; \ -case 6: strcpy(tmp_string, "ZPOS_LAST_INDEX "); \ -break; \ -case 7: strcpy(tmp_string, "ZPOS_INSERT_INDEX "); \ -break; \ -case 8: strcpy(tmp_string, "ZPOS_ARRAY_POS "); \ -break; \ -case 9: strcpy(tmp_string, "ZPOS_NEXT_FREE_INDEX"); \ -break; \ -case 10: strcpy(tmp_string, "ZPOS_NEXT_PAGE "); \ -break; \ -case 11: strcpy(tmp_string, "ZPOS_PREV_PAGE "); \ -break; \ -default: sprintf(tmp_string, "%-20d", ind);\ -} \ -ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: " << tmp_val << " \tLINE: " << __LINE__ << endl; \ -}\ -*/ - -// Constants -/** ------------------------------------------------------------------------ - * THESE ARE CONSTANTS THAT ARE USED FOR DEFINING THE SIZE OF BUFFERS, THE - * SIZE OF PAGE HEADERS, THE NUMBER OF BUFFERS IN A PAGE AND A NUMBER OF - * OTHER CONSTANTS WHICH ARE CHANGED WHEN THE BUFFER SIZE IS CHANGED. - * ----------------------------------------------------------------------- */ -#define ZHEAD_SIZE 32 -#define ZCON_HEAD_SIZE 2 -#define ZBUF_SIZE 28 -#define ZEMPTYLIST 72 -#define ZUP_LIMIT 14 -#define ZDOWN_LIMIT 12 -#define ZSHIFT_PLUS 5 -#define ZSHIFT_MINUS 2 -#define ZFREE_LIMIT 65 -#define ZNO_CONTAINERS 64 -#define ZELEM_HEAD_SIZE 1 -/* ------------------------------------------------------------------------- */ -/* THESE CONSTANTS DEFINE THE USE OF THE PAGE HEADER IN THE INDEX PAGES. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_PAGE_ID 0 -#define ZPOS_PAGE_TYPE 1 -#define ZPOS_PAGE_TYPE_BIT 14 -#define ZPOS_EMPTY_LIST 1 -#define ZPOS_ALLOC_CONTAINERS 2 -#define ZPOS_CHECKSUM 3 -#define ZPOS_OVERFLOWREC 4 -#define ZPOS_NO_ELEM_IN_PAGE 2 -#define ZPOS_FREE_AREA_IN_PAGE 5 -#define ZPOS_LAST_INDEX 6 -#define ZPOS_INSERT_INDEX 7 -#define ZPOS_ARRAY_POS 8 -#define ZPOS_NEXT_FREE_INDEX 9 -#define ZPOS_NEXT_PAGE 10 -#define ZPOS_PREV_PAGE 11 -#define ZNORMAL_PAGE_TYPE 0 -#define ZOVERFLOW_PAGE_TYPE 1 -#define ZDEFAULT_LIST 3 -#define ZWORDS_IN_PAGE 2048 -#define ZADDFRAG 0 -#define ZDIRARRAY 68 -#define ZDIRRANGESIZE 65 -//#define ZEMPTY_FRAGMENT 0 -#define ZFRAGMENTSIZE 64 -#define ZFIRSTTIME 1 -#define ZFS_CONNECTSIZE 300 -#define ZFS_OPSIZE 100 -#define ZKEYINKEYREQ 4 -#define ZLEFT 1 -#define ZLOCALLOGFILE 2 -#define ZLOCKED 0 -#define ZMAXSCANSIGNALLEN 20 -#define ZMAINKEYLEN 8 -#define ZNO_OF_DISK_VERSION 3 -#define ZNO_OF_OP_PER_SIGNAL 20 -//#define ZNOT_EMPTY_FRAGMENT 1 -#define ZOP_HEAD_INFO_LN 3 -#define ZOPRECSIZE 740 -#define ZOVERFLOWRECSIZE 5 -#define ZPAGE8_BASE_ADD 1 -#define ZPAGESIZE 128 -#define ZPARALLEL_QUEUE 1 -#define ZPDIRECTORY 1 -#define ZSCAN_MAX_LOCK 4 -#define ZSERIAL_QUEUE 2 -#define ZSPH1 1 -#define ZSPH2 2 -#define ZSPH3 3 -#define ZSPH6 6 -#define ZREADLOCK 0 -#define ZRIGHT 2 -#define ZROOTFRAGMENTSIZE 32 -#define ZSCAN_LOCK_ALL 3 -/** - * Check kernel_types for other operation types - */ -#define ZSCAN_OP 6 -#define ZSCAN_REC_SIZE 256 -#define ZSTAND_BY 2 -#define ZTABLESIZE 16 -#define ZTABMAXINDEX 3 -#define ZUNDEFINED_OP 6 -#define ZUNLOCKED 1 - -/* --------------------------------------------------------------------------------- */ -/* CONTINUEB CODES */ -/* --------------------------------------------------------------------------------- */ -#define ZINITIALISE_RECORDS 1 -#define ZSEND_SCAN_HBREP 4 -#define ZREL_ROOT_FRAG 5 -#define ZREL_FRAG 6 -#define ZREL_DIR 7 -#define ZREPORT_MEMORY_USAGE 8 - -/* ------------------------------------------------------------------------- */ -/* ERROR CODES */ -/* ------------------------------------------------------------------------- */ -#define ZLIMIT_OF_ERROR 600 // Limit check for error codes -#define ZCHECKROOT_ERROR 601 // Delete fragment error code -#define ZCONNECT_SIZE_ERROR 602 // ACC_SEIZEREF -#define ZDIR_RANGE_ERROR 603 // Add fragment error code -#define ZFULL_FRAGRECORD_ERROR 604 // Add fragment error code -#define ZFULL_ROOTFRAGRECORD_ERROR 605 // Add fragment error code -#define ZROOTFRAG_STATE_ERROR 606 // Add fragment -#define ZOVERTAB_REC_ERROR 607 // Add fragment - -#define ZSCAN_REFACC_CONNECT_ERROR 608 // ACC_SCANREF -#define ZFOUR_ACTIVE_SCAN_ERROR 609 // ACC_SCANREF -#define ZNULL_SCAN_REC_ERROR 610 // ACC_SCANREF - -#define ZDIRSIZE_ERROR 623 -#define ZOVER_REC_ERROR 624 // Insufficient Space -#define ZPAGESIZE_ERROR 625 -#define ZTUPLE_DELETED_ERROR 626 -#define ZREAD_ERROR 626 -#define ZWRITE_ERROR 630 -#define ZTO_OP_STATE_ERROR 631 -#define ZTOO_EARLY_ACCESS_ERROR 632 -#endif - -class ElementHeader { - /** - * - * l = Locked -- If true contains operation else scan bits + hash value - * s = Scan bits - * h = Hash value - * o = Operation ptr I - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * lssssssssssss hhhhhhhhhhhhhhhh - * ooooooooooooooooooooooooooooooo - */ -public: - STATIC_CONST( HASH_VALUE_PART_MASK = 0xFFFF ); - - static bool getLocked(Uint32 data); - static bool getUnlocked(Uint32 data); - static Uint32 getScanBits(Uint32 data); - static Uint32 getHashValuePart(Uint32 data); - static Uint32 getOpPtrI(Uint32 data); - - static Uint32 setLocked(Uint32 opPtrI); - static Uint32 setUnlocked(Uint32 hashValuePart, Uint32 scanBits); - static Uint32 setScanBit(Uint32 header, Uint32 scanBit); - static Uint32 clearScanBit(Uint32 header, Uint32 scanBit); -}; - -inline -bool -ElementHeader::getLocked(Uint32 data){ - return (data & 1) == 0; -} - -inline -bool -ElementHeader::getUnlocked(Uint32 data){ - return (data & 1) == 1; -} - -inline -Uint32 -ElementHeader::getScanBits(Uint32 data){ - assert(getUnlocked(data)); - return (data >> 1) & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1); -} - -inline -Uint32 -ElementHeader::getHashValuePart(Uint32 data){ - assert(getUnlocked(data)); - return data >> 16; -} - -inline -Uint32 -ElementHeader::getOpPtrI(Uint32 data){ - assert(getLocked(data)); - return data >> 1; -} - -inline -Uint32 -ElementHeader::setLocked(Uint32 opPtrI){ - return (opPtrI << 1) + 0; -} -inline -Uint32 -ElementHeader::setUnlocked(Uint32 hashValue, Uint32 scanBits){ - return (hashValue << 16) + (scanBits << 1) + 1; -} - -inline -Uint32 -ElementHeader::setScanBit(Uint32 header, Uint32 scanBit){ - assert(getUnlocked(header)); - return header | (scanBit << 1); -} - -inline -Uint32 -ElementHeader::clearScanBit(Uint32 header, Uint32 scanBit){ - assert(getUnlocked(header)); - return header & (~(scanBit << 1)); -} - - -class Dbacc: public SimulatedBlock { -public: -// State values -enum State { - FREEFRAG = 0, - ACTIVEFRAG = 1, - //SEND_QUE_OP = 2, - WAIT_NOTHING = 10, - WAIT_ONE_CONF = 26, - FREE_OP = 30, - WAIT_EXE_OP = 32, - WAIT_IN_QUEUE = 34, - EXE_OP = 35, - SCAN_ACTIVE = 36, - SCAN_WAIT_IN_QUEUE = 37, - IDLE = 39, - ACTIVE = 40, - WAIT_COMMIT_ABORT = 41, - ABORT = 42, - ABORTADDFRAG = 43, - REFUSEADDFRAG = 44, - DELETEFRAG = 45, - DELETETABLE = 46, - UNDEFINEDROOT = 47, - ADDFIRSTFRAG = 48, - ADDSECONDFRAG = 49, - DELETEFIRSTFRAG = 50, - DELETESECONDFRAG = 51, - ACTIVEROOT = 52 -}; - -// Records - -/* --------------------------------------------------------------------------------- */ -/* DIRECTORY RANGE */ -/* --------------------------------------------------------------------------------- */ - struct DirRange { - Uint32 dirArray[256]; - }; /* p2c: size = 1024 bytes */ - - typedef Ptr DirRangePtr; - -/* --------------------------------------------------------------------------------- */ -/* DIRECTORYARRAY */ -/* --------------------------------------------------------------------------------- */ -struct Directoryarray { - Uint32 pagep[256]; -}; /* p2c: size = 1024 bytes */ - - typedef Ptr DirectoryarrayPtr; - -/* --------------------------------------------------------------------------------- */ -/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */ -/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */ -/* --------------------------------------------------------------------------------- */ -struct Fragmentrec { - Uint32 scan[MAX_PARALLEL_SCANS_PER_FRAG]; - union { - Uint32 mytabptr; - Uint32 myTableId; - }; - union { - Uint32 fragmentid; - Uint32 myfid; - }; - Uint32 roothashcheck; - Uint32 noOfElements; - Uint32 m_commit_count; - State rootState; - -//----------------------------------------------------------------------------- -// These variables keep track of allocated pages, the number of them and the -// start file page of them. Used during local checkpoints. -//----------------------------------------------------------------------------- - Uint32 datapages[8]; - Uint32 activeDataPage; - -//----------------------------------------------------------------------------- -// Temporary variables used during shrink and expand process. -//----------------------------------------------------------------------------- - Uint32 expReceivePageptr; - Uint32 expReceiveIndex; - Uint32 expReceiveForward; - Uint32 expSenderDirIndex; - Uint32 expSenderDirptr; - Uint32 expSenderIndex; - Uint32 expSenderPageptr; - -//----------------------------------------------------------------------------- -// List of lock owners and list of lock waiters to support LCP handling -//----------------------------------------------------------------------------- - Uint32 lockOwnersList; - -//----------------------------------------------------------------------------- -// References to Directory Ranges (which in turn references directories, which -// in its turn references the pages) for the bucket pages and the overflow -// bucket pages. -//----------------------------------------------------------------------------- - Uint32 directory; - Uint32 dirsize; - Uint32 overflowdir; - Uint32 lastOverIndex; - -//----------------------------------------------------------------------------- -// We have a list of overflow pages with free areas. We have a special record, -// the overflow record representing these pages. The reason is that the -// same record is also used to represent pages in the directory array that have -// been released since they were empty (there were however higher indexes with -// data in them). These are put in the firstFreeDirIndexRec-list. -// An overflow record representing a page can only be in one of these lists. -//----------------------------------------------------------------------------- - Uint32 firstOverflowRec; - Uint32 lastOverflowRec; - Uint32 firstFreeDirindexRec; - -//----------------------------------------------------------------------------- -// Counter keeping track of how many times we have expanded. We need to ensure -// that we do not shrink so many times that this variable becomes negative. -//----------------------------------------------------------------------------- - Uint32 expandCounter; - -//----------------------------------------------------------------------------- -// These variables are important for the linear hashing algorithm. -// localkeylen is the size of the local key (1 and 2 is currently supported) -// maxloadfactor is the factor specifying when to expand -// minloadfactor is the factor specifying when to shrink (hysteresis model) -// maxp and p -// maxp and p is the variables most central to linear hashing. p + maxp + 1 is the -// current number of buckets. maxp is the largest value of the type 2**n - 1 -// which is smaller than the number of buckets. These values are used to find -// correct bucket with the aid of the hash value. -// -// slack is the variable keeping track of whether we have inserted more than -// the current size is suitable for or less. Slack together with the boundaries -// set by maxloadfactor and minloadfactor decides when to expand/shrink -// slackCheck When slack goes over this value it is time to expand. -// slackCheck = (maxp + p + 1)*(maxloadfactor - minloadfactor) or -// bucketSize * hysteresis -//----------------------------------------------------------------------------- - Uint32 localkeylen; - Uint32 maxp; - Uint32 maxloadfactor; - Uint32 minloadfactor; - Uint32 p; - Uint32 slack; - Uint32 slackCheck; - -//----------------------------------------------------------------------------- -// nextfreefrag is the next free fragment if linked into a free list -//----------------------------------------------------------------------------- - Uint32 nextfreefrag; - -//----------------------------------------------------------------------------- -// This variable is used during restore to keep track of page id of read pages. -// During read of bucket pages this is used to calculate the page id and also -// to verify that the page id of the read page is correct. During read of over- -// flow pages it is only used to keep track of the number of pages read. -//----------------------------------------------------------------------------- - Uint32 nextAllocPage; - -//----------------------------------------------------------------------------- -// Number of pages read from file during restore -//----------------------------------------------------------------------------- - Uint32 noOfExpectedPages; - -//----------------------------------------------------------------------------- -// Fragment State, mostly applicable during LCP and restore -//----------------------------------------------------------------------------- - State fragState; - -//----------------------------------------------------------------------------- -// elementLength: Length of element in bucket and overflow pages -// keyLength: Length of key -//----------------------------------------------------------------------------- - Uint8 elementLength; - Uint16 keyLength; - -//----------------------------------------------------------------------------- -// This flag is used to avoid sending a big number of expand or shrink signals -// when simultaneously committing many inserts or deletes. -//----------------------------------------------------------------------------- - Uint8 expandFlag; - -//----------------------------------------------------------------------------- -// hashcheckbit is the bit to check whether to send element to split bucket or not -// k (== 6) is the number of buckets per page -// lhfragbits is the number of bits used to calculate the fragment id -// lhdirbits is the number of bits used to calculate the page id -//----------------------------------------------------------------------------- - Uint8 hashcheckbit; - Uint8 k; - Uint8 lhfragbits; - Uint8 lhdirbits; - -//----------------------------------------------------------------------------- -// nodetype can only be STORED in this release. Is currently only set, never read -//----------------------------------------------------------------------------- - Uint8 nodetype; - -//----------------------------------------------------------------------------- -// flag to avoid accessing table record if no char attributes -//----------------------------------------------------------------------------- - Uint8 hasCharAttr; -}; - - typedef Ptr FragmentrecPtr; - -/* --------------------------------------------------------------------------------- */ -/* OPERATIONREC */ -/* --------------------------------------------------------------------------------- */ -struct Operationrec { - Uint32 m_op_bits; - Uint32 localdata[2]; - Uint32 elementIsforward; - Uint32 elementPage; - Uint32 elementPointer; - Uint32 fid; - Uint32 fragptr; - Uint32 hashvaluePart; - Uint32 hashValue; - Uint32 nextLockOwnerOp; - Uint32 nextOp; - Uint32 nextParallelQue; - union { - Uint32 nextSerialQue; - Uint32 m_lock_owner_ptr_i; // if nextParallelQue = RNIL, else undefined - }; - Uint32 prevOp; - Uint32 prevLockOwnerOp; - union { - Uint32 prevParallelQue; - Uint32 m_lo_last_parallel_op_ptr_i; - }; - union { - Uint32 prevSerialQue; - Uint32 m_lo_last_serial_op_ptr_i; - }; - Uint32 scanRecPtr; - Uint32 transId1; - Uint32 transId2; - Uint32 userptr; - Uint16 elementContainer; - Uint16 tupkeylen; - Uint32 xfrmtupkeylen; - Uint32 userblockref; - Uint32 scanBits; - - enum OpBits { - OP_MASK = 0x0000F // 4 bits for operation type - ,OP_LOCK_MODE = 0x00010 // 0 - shared lock, 1 = exclusive lock - ,OP_ACC_LOCK_MODE = 0x00020 // Or:de lock mode of all operation - // before me - ,OP_LOCK_OWNER = 0x00040 - ,OP_RUN_QUEUE = 0x00080 // In parallell queue of lock owner - ,OP_DIRTY_READ = 0x00100 - ,OP_LOCK_REQ = 0x00200 // isAccLockReq - ,OP_COMMIT_DELETE_CHECK = 0x00400 - ,OP_INSERT_IS_DONE = 0x00800 - ,OP_ELEMENT_DISAPPEARED = 0x01000 - - ,OP_STATE_MASK = 0xF0000 - ,OP_STATE_IDLE = 0xF0000 - ,OP_STATE_WAITING = 0x00000 - ,OP_STATE_RUNNING = 0x10000 - ,OP_STATE_EXECUTED = 0x30000 - - ,OP_EXECUTED_DIRTY_READ = 0x3050F - ,OP_INITIAL = ~(Uint32)0 - }; - - Operationrec() {} - bool is_same_trans(const Operationrec* op) const { - return - transId1 == op->transId1 && transId2 == op->transId2; - } - -}; /* p2c: size = 168 bytes */ - - typedef Ptr OperationrecPtr; - -/* --------------------------------------------------------------------------------- */ -/* OVERFLOW_RECORD */ -/* --------------------------------------------------------------------------------- */ -struct OverflowRecord { - Uint32 dirindex; - Uint32 nextOverRec; - Uint32 nextOverList; - Uint32 prevOverRec; - Uint32 prevOverList; - Uint32 overpage; - Uint32 nextfreeoverrec; -}; - - typedef Ptr OverflowRecordPtr; - -/* --------------------------------------------------------------------------------- */ -/* PAGE8 */ -/* --------------------------------------------------------------------------------- */ -struct Page8 { - Uint32 word32[2048]; -}; /* p2c: size = 8192 bytes */ - - typedef Ptr Page8Ptr; - -/* --------------------------------------------------------------------------------- */ -/* SCAN_REC */ -/* --------------------------------------------------------------------------------- */ -struct ScanRec { - enum ScanState { - WAIT_NEXT, - SCAN_DISCONNECT - }; - enum ScanBucketState { - FIRST_LAP, - SECOND_LAP, - SCAN_COMPLETED - }; - Uint32 activeLocalFrag; - Uint32 nextBucketIndex; - Uint32 scanNextfreerec; - Uint32 scanFirstActiveOp; - Uint32 scanFirstLockedOp; - Uint32 scanLastLockedOp; - Uint32 scanFirstQueuedOp; - Uint32 scanLastQueuedOp; - Uint32 scanUserptr; - Uint32 scanTrid1; - Uint32 scanTrid2; - Uint32 startNoOfBuckets; - Uint32 minBucketIndexToRescan; - Uint32 maxBucketIndexToRescan; - Uint32 scanOpsAllocated; - ScanBucketState scanBucketState; - ScanState scanState; - Uint16 scanLockHeld; - Uint32 scanUserblockref; - Uint32 scanMask; - Uint8 scanLockMode; - Uint8 scanTimer; - Uint8 scanContinuebCounter; - Uint8 scanReadCommittedFlag; -}; - - typedef Ptr ScanRecPtr; - - -/* --------------------------------------------------------------------------------- */ -/* TABREC */ -/* --------------------------------------------------------------------------------- */ -struct Tabrec { - Uint32 fragholder[MAX_FRAG_PER_NODE]; - Uint32 fragptrholder[MAX_FRAG_PER_NODE]; - Uint32 tabUserPtr; - BlockReference tabUserRef; -}; - typedef Ptr TabrecPtr; - -public: - Dbacc(Block_context&); - virtual ~Dbacc(); - - // pointer to TUP instance in this thread - class Dbtup* c_tup; - class Dblqh* c_lqh; - - void execACCMINUPDATE(Signal* signal); - -private: - BLOCK_DEFINES(Dbacc); - - // Transit signals - void execDEBUG_SIG(Signal* signal); - void execCONTINUEB(Signal* signal); - void execACC_CHECK_SCAN(Signal* signal); - void execEXPANDCHECK2(Signal* signal); - void execSHRINKCHECK2(Signal* signal); - void execACC_OVER_REC(Signal* signal); - void execNEXTOPERATION(Signal* signal); - void execREAD_PSEUDO_REQ(Signal* signal); - - // Received signals - void execSTTOR(Signal* signal); - void execACCKEYREQ(Signal* signal); - void execACCSEIZEREQ(Signal* signal); - void execACCFRAGREQ(Signal* signal); - void execNEXT_SCANREQ(Signal* signal); - void execACC_ABORTREQ(Signal* signal); - void execACC_SCANREQ(Signal* signal); - void execACC_COMMITREQ(Signal* signal); - void execACC_TO_REQ(Signal* signal); - void execACC_LOCKREQ(Signal* signal); - void execNDB_STTOR(Signal* signal); - void execDROP_TAB_REQ(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execDUMP_STATE_ORD(Signal* signal); - - // Statement blocks - void ACCKEY_error(Uint32 fromWhere); - - void commitDeleteCheck(); - void report_dealloc(Signal* signal, const Operationrec* opPtrP); - - typedef void * RootfragmentrecPtr; - void initRootFragPageZero(FragmentrecPtr, Page8Ptr); - void initFragAdd(Signal*, FragmentrecPtr); - void initFragPageZero(FragmentrecPtr, Page8Ptr); - void initFragGeneral(FragmentrecPtr); - void verifyFragCorrect(FragmentrecPtr regFragPtr); - void releaseFragResources(Signal* signal, Uint32 fragIndex); - void releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr); - void releaseRootFragResources(Signal* signal, Uint32 tableId); - void releaseDirResources(Signal* signal, - Uint32 fragIndex, - Uint32 dirIndex, - Uint32 startIndex); - void releaseDirectoryResources(Signal* signal, - Uint32 fragIndex, - Uint32 dirIndex, - Uint32 startIndex, - Uint32 directoryIndex); - void releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr); - void releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr); - void releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr); - void initScanFragmentPart(Signal* signal); - Uint32 checkScanExpand(Signal* signal); - Uint32 checkScanShrink(Signal* signal); - void initialiseDirRec(Signal* signal); - void initialiseDirRangeRec(Signal* signal); - void initialiseFragRec(Signal* signal); - void initialiseFsConnectionRec(Signal* signal); - void initialiseFsOpRec(Signal* signal); - void initialiseOperationRec(Signal* signal); - void initialiseOverflowRec(Signal* signal); - void initialisePageRec(Signal* signal); - void initialiseRootfragRec(Signal* signal); - void initialiseScanRec(Signal* signal); - void initialiseTableRec(Signal* signal); - bool addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fragId); - void initOpRec(Signal* signal); - void sendAcckeyconf(Signal* signal); - Uint32 getNoParallelTransaction(const Operationrec*); - -#ifdef VM_TRACE - Uint32 getNoParallelTransactionFull(const Operationrec*); -#endif -#ifdef ACC_SAFE_QUEUE - bool validate_lock_queue(OperationrecPtr opPtr); - Uint32 get_parallel_head(OperationrecPtr opPtr); - void dump_lock_queue(OperationrecPtr loPtr); -#else - bool validate_lock_queue(OperationrecPtr) { return true;} -#endif - -public: - void execACCKEY_ORD(Signal* signal, Uint32 opPtrI); - void startNext(Signal* signal, OperationrecPtr lastOp); - -private: - Uint32 placeReadInLockQueue(OperationrecPtr lockOwnerPtr); - Uint32 placeWriteInLockQueue(OperationrecPtr lockOwnerPtr); - void placeSerialQueue(OperationrecPtr lockOwner, OperationrecPtr op); - void abortSerieQueueOperation(Signal* signal, OperationrecPtr op); - void abortParallelQueueOperation(Signal* signal, OperationrecPtr op); - - void expandcontainer(Signal* signal); - void shrinkcontainer(Signal* signal); - void nextcontainerinfoExp(Signal* signal); - void releaseAndCommitActiveOps(Signal* signal); - void releaseAndCommitQueuedOps(Signal* signal); - void releaseAndAbortLockedOps(Signal* signal); - void containerinfo(Signal* signal); - bool getScanElement(Signal* signal); - void initScanOpRec(Signal* signal); - void nextcontainerinfo(Signal* signal); - void putActiveScanOp(Signal* signal); - void putOpScanLockQue(); - void putReadyScanQueue(Signal* signal, Uint32 scanRecIndex); - void releaseScanBucket(Signal* signal); - void releaseScanContainer(Signal* signal); - void releaseScanRec(Signal* signal); - bool searchScanContainer(Signal* signal); - void sendNextScanConf(Signal* signal); - void setlock(Signal* signal); - void takeOutActiveScanOp(Signal* signal); - void takeOutScanLockQueue(Uint32 scanRecIndex); - void takeOutReadyScanQueue(Signal* signal); - void insertElement(Signal* signal); - void insertContainer(Signal* signal); - void addnewcontainer(Signal* signal); - void getfreelist(Signal* signal); - void increaselistcont(Signal* signal); - void seizeLeftlist(Signal* signal); - void seizeRightlist(Signal* signal); - Uint32 readTablePk(Uint32 localkey1, Uint32 eh, OperationrecPtr); - Uint32 getElement(Signal* signal, OperationrecPtr& lockOwner); - void getdirindex(Signal* signal); - void commitdelete(Signal* signal); - void deleteElement(Signal* signal); - void getLastAndRemove(Signal* signal); - void releaseLeftlist(Signal* signal); - void releaseRightlist(Signal* signal); - void checkoverfreelist(Signal* signal); - void abortOperation(Signal* signal); - void commitOperation(Signal* signal); - void copyOpInfo(OperationrecPtr dst, OperationrecPtr src); - Uint32 executeNextOperation(Signal* signal); - void releaselock(Signal* signal); - void release_lockowner(Signal* signal, OperationrecPtr, bool commit); - void startNew(Signal* signal, OperationrecPtr newOwner); - void abortWaitingOperation(Signal*, OperationrecPtr); - void abortExecutedOperation(Signal*, OperationrecPtr); - - void takeOutFragWaitQue(Signal* signal); - void check_lock_upgrade(Signal* signal, OperationrecPtr release_op, bool lo); - void check_lock_upgrade(Signal* signal, OperationrecPtr lock_owner, - OperationrecPtr release_op); - void allocOverflowPage(Signal* signal); - bool getfragmentrec(Signal* signal, FragmentrecPtr&, Uint32 fragId); - void insertLockOwnersList(Signal* signal, const OperationrecPtr&); - void takeOutLockOwnersList(Signal* signal, const OperationrecPtr&); - - void initFsOpRec(Signal* signal); - void initOverpage(Signal* signal); - void initPage(Signal* signal); - void initRootfragrec(Signal* signal); - void putOpInFragWaitQue(Signal* signal); - void putOverflowRecInFrag(Signal* signal); - void putRecInFreeOverdir(Signal* signal); - void releaseDirectory(Signal* signal); - void releaseDirrange(Signal* signal); - void releaseFsConnRec(Signal* signal); - void releaseFsOpRec(Signal* signal); - void releaseOpRec(Signal* signal); - void releaseOverflowRec(Signal* signal); - void releaseOverpage(Signal* signal); - void releasePage(Signal* signal); - void releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId); - void seizeDirectory(Signal* signal); - void seizeDirrange(Signal* signal); - void seizeFragrec(Signal* signal); - void seizeFsConnectRec(Signal* signal); - void seizeFsOpRec(Signal* signal); - void seizeOpRec(Signal* signal); - void seizeOverRec(Signal* signal); - void seizePage(Signal* signal); - void seizeRootfragrec(Signal* signal); - void seizeScanRec(Signal* signal); - void sendSystemerror(Signal* signal, int line); - void takeRecOutOfFreeOverdir(Signal* signal); - void takeRecOutOfFreeOverpage(Signal* signal); - void sendScanHbRep(Signal* signal, Uint32); - - void addFragRefuse(Signal* signal, Uint32 errorCode); - void ndbsttorryLab(Signal* signal); - void acckeyref1Lab(Signal* signal, Uint32 result_code); - void insertelementLab(Signal* signal); - void checkNextFragmentLab(Signal* signal); - void endofexpLab(Signal* signal); - void endofshrinkbucketLab(Signal* signal); - void senddatapagesLab(Signal* signal); - void sttorrysignalLab(Signal* signal); - void sendholdconfsignalLab(Signal* signal); - void accIsLockedLab(Signal* signal, OperationrecPtr lockOwnerPtr); - void insertExistElemLab(Signal* signal, OperationrecPtr lockOwnerPtr); - void refaccConnectLab(Signal* signal); - void releaseScanLab(Signal* signal); - void ndbrestart1Lab(Signal* signal); - void initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data); - void checkNextBucketLab(Signal* signal); - void storeDataPageInDirectoryLab(Signal* signal); - - void zpagesize_error(const char* where); - - void reportMemoryUsage(Signal* signal, int gth); - void reenable_expand_after_redo_log_exection_complete(Signal*); - - // charsets - void xfrmKeyData(Signal* signal); - - // Initialisation - void initData(); - void initRecords(); - - // Variables -/* --------------------------------------------------------------------------------- */ -/* DIRECTORY RANGE */ -/* --------------------------------------------------------------------------------- */ - DirRange *dirRange; - DirRangePtr expDirRangePtr; - DirRangePtr gnsDirRangePtr; - DirRangePtr newDirRangePtr; - DirRangePtr rdDirRangePtr; - DirRangePtr nciOverflowrangeptr; - Uint32 cdirrangesize; - Uint32 cfirstfreeDirrange; -/* --------------------------------------------------------------------------------- */ -/* DIRECTORYARRAY */ -/* --------------------------------------------------------------------------------- */ - Directoryarray *directoryarray; - DirectoryarrayPtr expDirptr; - DirectoryarrayPtr rdDirptr; - DirectoryarrayPtr sdDirptr; - DirectoryarrayPtr nciOverflowDirptr; - Uint32 cdirarraysize; - Uint32 cdirmemory; - Uint32 cfirstfreedir; -/* --------------------------------------------------------------------------------- */ -/* FRAGMENTREC. ALL INFORMATION ABOUT FRAMENT AND HASH TABLE IS SAVED IN FRAGMENT */ -/* REC A POINTER TO FRAGMENT RECORD IS SAVED IN ROOTFRAGMENTREC FRAGMENT */ -/* --------------------------------------------------------------------------------- */ - Fragmentrec *fragmentrec; - FragmentrecPtr fragrecptr; - Uint32 cfirstfreefrag; - Uint32 cfragmentsize; -/* --------------------------------------------------------------------------------- */ -/* FS_CONNECTREC */ -/* --------------------------------------------------------------------------------- */ -/* OPERATIONREC */ -/* --------------------------------------------------------------------------------- */ - Operationrec *operationrec; - OperationrecPtr operationRecPtr; - OperationrecPtr idrOperationRecPtr; - OperationrecPtr mlpqOperPtr; - OperationrecPtr queOperPtr; - OperationrecPtr readWriteOpPtr; - Uint32 cfreeopRec; - Uint32 coprecsize; -/* --------------------------------------------------------------------------------- */ -/* OVERFLOW_RECORD */ -/* --------------------------------------------------------------------------------- */ - OverflowRecord *overflowRecord; - OverflowRecordPtr iopOverflowRecPtr; - OverflowRecordPtr tfoOverflowRecPtr; - OverflowRecordPtr porOverflowRecPtr; - OverflowRecordPtr priOverflowRecPtr; - OverflowRecordPtr rorOverflowRecPtr; - OverflowRecordPtr sorOverflowRecPtr; - OverflowRecordPtr troOverflowRecPtr; - Uint32 cfirstfreeoverrec; - Uint32 coverflowrecsize; - -/* --------------------------------------------------------------------------------- */ -/* PAGE8 */ -/* --------------------------------------------------------------------------------- */ - Page8 *page8; - /* 8 KB PAGE */ - Page8Ptr ancPageptr; - Page8Ptr colPageptr; - Page8Ptr ccoPageptr; - Page8Ptr datapageptr; - Page8Ptr delPageptr; - Page8Ptr excPageptr; - Page8Ptr expPageptr; - Page8Ptr gdiPageptr; - Page8Ptr gePageptr; - Page8Ptr gflPageptr; - Page8Ptr idrPageptr; - Page8Ptr ilcPageptr; - Page8Ptr inpPageptr; - Page8Ptr iopPageptr; - Page8Ptr lastPageptr; - Page8Ptr lastPrevpageptr; - Page8Ptr lcnPageptr; - Page8Ptr lcnCopyPageptr; - Page8Ptr lupPageptr; - Page8Ptr ciPageidptr; - Page8Ptr gsePageidptr; - Page8Ptr isoPageptr; - Page8Ptr nciPageidptr; - Page8Ptr rsbPageidptr; - Page8Ptr rscPageidptr; - Page8Ptr slPageidptr; - Page8Ptr sscPageidptr; - Page8Ptr rlPageptr; - Page8Ptr rlpPageptr; - Page8Ptr ropPageptr; - Page8Ptr rpPageptr; - Page8Ptr slPageptr; - Page8Ptr spPageptr; - Uint32 cfirstfreepage; - Uint32 cfreepage; - Uint32 cpagesize; - Uint32 cnoOfAllocatedPages; -/* --------------------------------------------------------------------------------- */ -/* ROOTFRAGMENTREC */ -/* DURING EXPAND FRAGMENT PROCESS, EACH FRAGMEND WILL BE EXPAND INTO TWO */ -/* NEW FRAGMENTS.TO MAKE THIS PROCESS EASIER, DURING ADD FRAGMENT PROCESS */ -/* NEXT FRAGMENT IDENTIIES WILL BE CALCULATED, AND TWO FRAGMENTS WILL BE */ -/* ADDED IN (NDBACC). THEREBY EXPAND OF FRAGMENT CAN BE PERFORMED QUICK AND */ -/* EASY.THE NEW FRAGMENT ID SENDS TO TUP MANAGER FOR ALL OPERATION PROCESS. */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* SCAN_REC */ -/* --------------------------------------------------------------------------------- */ - ScanRec *scanRec; - ScanRecPtr scanPtr; - Uint32 cscanRecSize; - Uint32 cfirstFreeScanRec; -/* --------------------------------------------------------------------------------- */ -/* TABREC */ -/* --------------------------------------------------------------------------------- */ - Tabrec *tabrec; - TabrecPtr tabptr; - Uint32 ctablesize; - Uint32 tgseElementptr; - Uint32 tgseContainerptr; - Uint32 trlHead; - Uint32 trlRelCon; - Uint32 trlNextused; - Uint32 trlPrevused; - Uint32 tlcnChecksum; - Uint32 tlupElemIndex; - Uint32 tlupIndex; - Uint32 tlupForward; - Uint32 tancNext; - Uint32 tancBufType; - Uint32 tancContainerptr; - Uint32 tancPageindex; - Uint32 tancPageid; - Uint32 tidrResult; - Uint32 tidrElemhead; - Uint32 tidrForward; - Uint32 tidrPageindex; - Uint32 tidrContainerptr; - Uint32 tidrContainerhead; - Uint32 tlastForward; - Uint32 tlastPageindex; - Uint32 tlastContainerlen; - Uint32 tlastElementptr; - Uint32 tlastContainerptr; - Uint32 tlastContainerhead; - Uint32 trlPageindex; - Uint32 tdelContainerptr; - Uint32 tdelElementptr; - Uint32 tdelForward; - Uint32 tiopPageId; - Uint32 tipPageId; - Uint32 tgeContainerptr; - Uint32 tgeElementptr; - Uint32 tgeForward; - Uint32 texpReceivedBucket; - Uint32 texpDirInd; - Uint32 texpDirRangeIndex; - Uint32 texpDirPageIndex; - Uint32 tdata0; - Uint32 tcheckpointid; - Uint32 tciContainerptr; - Uint32 tnciContainerptr; - Uint32 tisoContainerptr; - Uint32 trscContainerptr; - Uint32 tsscContainerptr; - Uint32 tciContainerlen; - Uint32 trscContainerlen; - Uint32 tsscContainerlen; - Uint32 tciContainerhead; - Uint32 tnciContainerhead; - Uint32 tslElementptr; - Uint32 tisoElementptr; - Uint32 tsscElementptr; - Uint32 tfid; - Uint32 tscanFlag; - Uint32 tgflBufType; - Uint32 tgseIsforward; - Uint32 tsscIsforward; - Uint32 trscIsforward; - Uint32 tciIsforward; - Uint32 tnciIsforward; - Uint32 tisoIsforward; - Uint32 tgseIsLocked; - Uint32 tsscIsLocked; - Uint32 tkeylen; - Uint32 tmp; - Uint32 tmpP; - Uint32 tmpP2; - Uint32 tmp1; - Uint32 tmp2; - Uint32 tgflPageindex; - Uint32 tmpindex; - Uint32 tslNextfree; - Uint32 tslPageindex; - Uint32 tgsePageindex; - Uint32 tnciNextSamePage; - Uint32 tslPrevfree; - Uint32 tciPageindex; - Uint32 trsbPageindex; - Uint32 tnciPageindex; - Uint32 tlastPrevconptr; - Uint32 tresult; - Uint32 tslUpdateHeader; - Uint32 tuserptr; - BlockReference tuserblockref; - Uint32 tlqhPointer; - Uint32 tholdSentOp; - Uint32 tholdMore; - Uint32 tgdiPageindex; - Uint32 tiopIndex; - Uint32 tnciTmp; - Uint32 tullIndex; - Uint32 turlIndex; - Uint32 tlfrTmp1; - Uint32 tlfrTmp2; - Uint32 tscanTrid1; - Uint32 tscanTrid2; - - Uint32 ctest; - Uint32 clqhPtr; - BlockReference clqhBlockRef; - Uint32 cminusOne; - NodeId cmynodeid; - BlockReference cownBlockref; - BlockReference cndbcntrRef; - Uint16 csignalkey; - Uint32 czero; - Uint32 csystemRestart; - Uint32 cexcForward; - Uint32 cexcPageindex; - Uint32 cexcContainerptr; - Uint32 cexcContainerhead; - Uint32 cexcContainerlen; - Uint32 cexcElementptr; - Uint32 cexcPrevconptr; - Uint32 cexcMovedLen; - Uint32 cexcPrevpageptr; - Uint32 cexcPrevpageindex; - Uint32 cexcPrevforward; - Uint32 clocalkey[32]; - union { - Uint32 ckeys[2048 * MAX_XFRM_MULTIPLY]; - Uint64 ckeys_align; - }; - - Uint32 c_errorInsert3000_TableId; - Uint32 c_memusage_report_frequency; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp deleted file mode 100644 index 29ea4801b7b..00000000000 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#define DBACC_C -#include "Dbacc.hpp" - -#define DEBUG(x) { ndbout << "ACC::" << x << endl; } - -void Dbacc::initData() -{ - cdirarraysize = ZDIRARRAY; - coprecsize = ZOPRECSIZE; - cpagesize = ZPAGESIZE; - ctablesize = ZTABLESIZE; - cfragmentsize = ZFRAGMENTSIZE; - cdirrangesize = ZDIRRANGESIZE; - coverflowrecsize = ZOVERFLOWRECSIZE; - cscanRecSize = ZSCAN_REC_SIZE; - - - dirRange = 0; - directoryarray = 0; - fragmentrec = 0; - operationrec = 0; - overflowRecord = 0; - page8 = 0; - scanRec = 0; - tabrec = 0; - - cnoOfAllocatedPages = cpagesize = 0; - // Records with constant sizes -}//Dbacc::initData() - -void Dbacc::initRecords() -{ - // Records with dynamic sizes - page8 = (Page8*)allocRecord("Page8", - sizeof(Page8), - cpagesize, - false, - CFG_DB_INDEX_MEM); - - operationrec = (Operationrec*)allocRecord("Operationrec", - sizeof(Operationrec), - coprecsize); - - dirRange = (DirRange*)allocRecord("DirRange", - sizeof(DirRange), - cdirrangesize); - - directoryarray = (Directoryarray*)allocRecord("Directoryarray", - sizeof(Directoryarray), - cdirarraysize); - - fragmentrec = (Fragmentrec*)allocRecord("Fragmentrec", - sizeof(Fragmentrec), - cfragmentsize); - - overflowRecord = (OverflowRecord*)allocRecord("OverflowRecord", - sizeof(OverflowRecord), - coverflowrecsize); - - scanRec = (ScanRec*)allocRecord("ScanRec", - sizeof(ScanRec), - cscanRecSize); - - tabrec = (Tabrec*)allocRecord("Tabrec", - sizeof(Tabrec), - ctablesize); -}//Dbacc::initRecords() - -Dbacc::Dbacc(Block_context& ctx): - SimulatedBlock(DBACC, ctx), - c_tup(0) -{ - BLOCK_CONSTRUCTOR(Dbacc); - - // Transit signals - addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD); - addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG); - addRecSignal(GSN_CONTINUEB, &Dbacc::execCONTINUEB); - addRecSignal(GSN_ACC_CHECK_SCAN, &Dbacc::execACC_CHECK_SCAN); - addRecSignal(GSN_EXPANDCHECK2, &Dbacc::execEXPANDCHECK2); - addRecSignal(GSN_SHRINKCHECK2, &Dbacc::execSHRINKCHECK2); - addRecSignal(GSN_READ_PSEUDO_REQ, &Dbacc::execREAD_PSEUDO_REQ); - - // Received signals - addRecSignal(GSN_STTOR, &Dbacc::execSTTOR); - addRecSignal(GSN_ACCKEYREQ, &Dbacc::execACCKEYREQ); - addRecSignal(GSN_ACCSEIZEREQ, &Dbacc::execACCSEIZEREQ); - addRecSignal(GSN_ACCFRAGREQ, &Dbacc::execACCFRAGREQ); - addRecSignal(GSN_NEXT_SCANREQ, &Dbacc::execNEXT_SCANREQ); - addRecSignal(GSN_ACC_ABORTREQ, &Dbacc::execACC_ABORTREQ); - addRecSignal(GSN_ACC_SCANREQ, &Dbacc::execACC_SCANREQ); - addRecSignal(GSN_ACCMINUPDATE, &Dbacc::execACCMINUPDATE); - addRecSignal(GSN_ACC_COMMITREQ, &Dbacc::execACC_COMMITREQ); - addRecSignal(GSN_ACC_TO_REQ, &Dbacc::execACC_TO_REQ); - addRecSignal(GSN_ACC_LOCKREQ, &Dbacc::execACC_LOCKREQ); - addRecSignal(GSN_NDB_STTOR, &Dbacc::execNDB_STTOR); - addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ); - addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true); - - initData(); - -#ifdef VM_TRACE - { - void* tmp[] = { &expDirRangePtr, - &gnsDirRangePtr, - &newDirRangePtr, - &rdDirRangePtr, - &nciOverflowrangeptr, - &expDirptr, - &rdDirptr, - &sdDirptr, - &nciOverflowDirptr, - &fragrecptr, - &operationRecPtr, - &idrOperationRecPtr, - &mlpqOperPtr, - &queOperPtr, - &readWriteOpPtr, - &iopOverflowRecPtr, - &tfoOverflowRecPtr, - &porOverflowRecPtr, - &priOverflowRecPtr, - &rorOverflowRecPtr, - &sorOverflowRecPtr, - &troOverflowRecPtr, - &ancPageptr, - &colPageptr, - &ccoPageptr, - &datapageptr, - &delPageptr, - &excPageptr, - &expPageptr, - &gdiPageptr, - &gePageptr, - &gflPageptr, - &idrPageptr, - &ilcPageptr, - &inpPageptr, - &iopPageptr, - &lastPageptr, - &lastPrevpageptr, - &lcnPageptr, - &lcnCopyPageptr, - &lupPageptr, - &ciPageidptr, - &gsePageidptr, - &isoPageptr, - &nciPageidptr, - &rsbPageidptr, - &rscPageidptr, - &slPageidptr, - &sscPageidptr, - &rlPageptr, - &rlpPageptr, - &ropPageptr, - &rpPageptr, - &slPageptr, - &spPageptr, - &scanPtr, - &tabptr - }; - init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0])); - } -#endif -}//Dbacc::Dbacc() - -Dbacc::~Dbacc() -{ - deallocRecord((void **)&dirRange, "DirRange", - sizeof(DirRange), - cdirrangesize); - - deallocRecord((void **)&directoryarray, "Directoryarray", - sizeof(Directoryarray), - cdirarraysize); - - deallocRecord((void **)&fragmentrec, "Fragmentrec", - sizeof(Fragmentrec), - cfragmentsize); - - deallocRecord((void **)&operationrec, "Operationrec", - sizeof(Operationrec), - coprecsize); - - deallocRecord((void **)&overflowRecord, "OverflowRecord", - sizeof(OverflowRecord), - coverflowrecsize); - - deallocRecord((void **)&page8, "Page8", - sizeof(Page8), - cpagesize); - - deallocRecord((void **)&scanRec, "ScanRec", - sizeof(ScanRec), - cscanRecSize); - - deallocRecord((void **)&tabrec, "Tabrec", - sizeof(Tabrec), - ctablesize); - }//Dbacc::~Dbacc() - -BLOCK_FUNCTIONS(Dbacc) diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp deleted file mode 100644 index da614b0276c..00000000000 --- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ /dev/null @@ -1,8549 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBACC_C -#include "Dbacc.hpp" -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// TO_DO_RONM is a label for comments on what needs to be improved in future versions -// when more time is given. - -#ifdef VM_TRACE -#define DEBUG(x) ndbout << "DBACC: "<< x << endl; -#else -#define DEBUG(x) -#endif - -#ifdef ACC_SAFE_QUEUE -#define vlqrequire(x) do { if (unlikely(!(x))) {\ - dump_lock_queue(loPtr); \ - ndbrequire(false); } } while(0) -#else -#define vlqrequire(x) ndbrequire(x) -#define dump_lock_queue(x) -#endif - - -// primary key is stored in TUP -#include "../dbtup/Dbtup.hpp" -#include "../dblqh/Dblqh.hpp" - - -// Signal entries and statement blocks -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* COMMON SIGNAL RECEPTION MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ - -/* --------------------------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/* CONTINUEB CONTINUE SIGNAL */ -/* ******************------------------------------+ */ -/* SENDER: ACC, LEVEL B */ -void Dbacc::execCONTINUEB(Signal* signal) -{ - Uint32 tcase; - - jamEntry(); - tcase = signal->theData[0]; - tdata0 = signal->theData[1]; - tresult = 0; - switch (tcase) { - case ZINITIALISE_RECORDS: - jam(); - initialiseRecordsLab(signal, signal->theData[3], signal->theData[4]); - return; - break; - case ZSEND_SCAN_HBREP: - jam(); - sendScanHbRep(signal, tdata0); - break; - case ZREL_ROOT_FRAG: - { - jam(); - Uint32 tableId = signal->theData[1]; - releaseRootFragResources(signal, tableId); - break; - } - case ZREL_FRAG: - { - jam(); - Uint32 fragIndex = signal->theData[1]; - releaseFragResources(signal, fragIndex); - break; - } - case ZREL_DIR: - { - jam(); - Uint32 fragIndex = signal->theData[1]; - Uint32 dirIndex = signal->theData[2]; - Uint32 startIndex = signal->theData[3]; - releaseDirResources(signal, fragIndex, dirIndex, startIndex); - break; - } - case ZREPORT_MEMORY_USAGE:{ - jam(); - Uint32 cnt = signal->theData[1]; - static int c_currentMemUsed = 0; - int now = cpagesize ? (cnoOfAllocatedPages * 100)/cpagesize : 0; - const int thresholds[] = { 99, 90, 80, 0}; - - Uint32 i = 0; - const Uint32 sz = sizeof(thresholds)/sizeof(thresholds[0]); - for(i = 0; i= thresholds[i]){ - now = thresholds[i]; - break; - } - } - - if(now != c_currentMemUsed || - (c_memusage_report_frequency && cnt + 1 == c_memusage_report_frequency)) - { - reportMemoryUsage(signal, - now > c_currentMemUsed ? 1 : - now < c_currentMemUsed ? -1 : 0); - cnt = 0; - c_currentMemUsed = now; - } - else - { - cnt ++; - } - signal->theData[0] = ZREPORT_MEMORY_USAGE; - signal->theData[1] = cnt; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); - return; - } - - default: - ndbrequire(false); - break; - }//switch - return; -}//Dbacc::execCONTINUEB() - -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* */ -/* END OF COMMON SIGNAL RECEPTION MODULE */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* */ -/* SYSTEM RESTART MODULE */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -void Dbacc::execNDB_STTOR(Signal* signal) -{ - Uint32 tstartphase; - Uint32 tStartType; - - jamEntry(); - cndbcntrRef = signal->theData[0]; - cmynodeid = signal->theData[1]; - tstartphase = signal->theData[2]; - tStartType = signal->theData[3]; - switch (tstartphase) { - case ZSPH1: - jam(); - ndbsttorryLab(signal); - return; - break; - case ZSPH2: - ndbsttorryLab(signal); - return; - break; - case ZSPH3: - if ((tStartType == NodeState::ST_NODE_RESTART) || - (tStartType == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - //--------------------------------------------- - // csystemRestart is used to check what is needed - // during log execution. When starting a node it - // is not a log execution and rather a normal - // execution. Thus we reset the variable here to - // avoid unnecessary system crashes. - //--------------------------------------------- - csystemRestart = ZFALSE; - }//if - break; - case ZSPH6: - jam(); - csystemRestart = ZFALSE; - - signal->theData[0] = ZREPORT_MEMORY_USAGE; - signal->theData[1] = 0; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); - break; - default: - jam(); - /*empty*/; - break; - }//switch - ndbsttorryLab(signal); - return; -}//Dbacc::execNDB_STTOR() - -/* ******************--------------------------------------------------------------- */ -/* STTOR START / RESTART */ -/* ******************------------------------------+ */ -/* SENDER: ANY, LEVEL B */ -void Dbacc::execSTTOR(Signal* signal) -{ - jamEntry(); - Uint32 tstartphase = signal->theData[1]; - switch (tstartphase) { - case 1: - jam(); - ndbrequire((c_tup = (Dbtup*)globalData.getBlock(DBTUP)) != 0); - ndbrequire((c_lqh = (Dblqh*)globalData.getBlock(DBLQH)) != 0); - break; - } - tuserblockref = signal->theData[3]; - csignalkey = signal->theData[6]; - sttorrysignalLab(signal); - return; -}//Dbacc::execSTTOR() - -/* --------------------------------------------------------------------------------- */ -/* ZSPH1 */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::ndbrestart1Lab(Signal* signal) -{ - cmynodeid = globalData.ownId; - cownBlockref = numberToRef(DBACC, cmynodeid); - czero = 0; - cminusOne = czero - 1; - ctest = 0; - csystemRestart = ZTRUE; - return; -}//Dbacc::ndbrestart1Lab() - -void Dbacc::initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data) -{ - switch (tdata0) { - case 0: - jam(); - initialiseTableRec(signal); - break; - case 1: - case 2: - break; - case 3: - jam(); - break; - case 4: - jam(); - initialiseDirRec(signal); - break; - case 5: - jam(); - initialiseDirRangeRec(signal); - break; - case 6: - jam(); - initialiseFragRec(signal); - break; - case 7: - jam(); - initialiseOverflowRec(signal); - break; - case 8: - jam(); - initialiseOperationRec(signal); - break; - case 9: - jam(); - initialisePageRec(signal); - break; - case 10: - jam(); - break; - case 11: - jam(); - initialiseScanRec(signal); - break; - case 12: - jam(); - - { - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = data; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); - } - return; - break; - default: - ndbrequire(false); - break; - }//switch - - signal->theData[0] = ZINITIALISE_RECORDS; - signal->theData[1] = tdata0 + 1; - signal->theData[2] = 0; - signal->theData[3] = ref; - signal->theData[4] = data; - sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); - return; -}//Dbacc::initialiseRecordsLab() - -/* *********************************<< */ -/* NDB_STTORRY */ -/* *********************************<< */ -void Dbacc::ndbsttorryLab(Signal* signal) -{ - signal->theData[0] = cownBlockref; - sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB); - return; -}//Dbacc::ndbsttorryLab() - -/* *********************************<< */ -/* SIZEALT_REP SIZE ALTERATION */ -/* *********************************<< */ -void Dbacc::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - jamEntry(); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_RANGE, &cdirrangesize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_DIR_ARRAY, &cdirarraysize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_FRAGMENT, &cfragmentsize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OP_RECS, &coprecsize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OVERFLOW_RECS, - &coverflowrecsize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_PAGE8, &cpagesize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_TABLE, &ctablesize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_SCAN, &cscanRecSize)); - initRecords(); - ndbrestart1Lab(signal); - - c_memusage_report_frequency = 0; - ndb_mgm_get_int_parameter(p, CFG_DB_MEMREPORT_FREQUENCY, - &c_memusage_report_frequency); - - tdata0 = 0; - initialiseRecordsLab(signal, ref, senderData); - return; -}//Dbacc::execSIZEALT_REP() - -/* *********************************<< */ -/* STTORRY */ -/* *********************************<< */ -void Dbacc::sttorrysignalLab(Signal* signal) -{ - signal->theData[0] = csignalkey; - signal->theData[1] = 3; - /* BLOCK CATEGORY */ - signal->theData[2] = 2; - /* SIGNAL VERSION NUMBER */ - signal->theData[3] = ZSPH1; - signal->theData[4] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB); - /* END OF START PHASES */ - return; -}//Dbacc::sttorrysignalLab() - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_DIR_REC */ -/* INITIALATES THE DIRECTORY RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseDirRec(Signal* signal) -{ - DirectoryarrayPtr idrDirptr; - ndbrequire(cdirarraysize > 0); - for (idrDirptr.i = 0; idrDirptr.i < cdirarraysize; idrDirptr.i++) { - refresh_watch_dog(); - ptrAss(idrDirptr, directoryarray); - for (Uint32 i = 0; i <= 255; i++) { - idrDirptr.p->pagep[i] = RNIL; - }//for - }//for - cdirmemory = 0; - cfirstfreedir = RNIL; -}//Dbacc::initialiseDirRec() - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_DIR_RANGE_REC */ -/* INITIALATES THE DIR_RANGE RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseDirRangeRec(Signal* signal) -{ - DirRangePtr idrDirRangePtr; - - ndbrequire(cdirrangesize > 0); - for (idrDirRangePtr.i = 0; idrDirRangePtr.i < cdirrangesize; idrDirRangePtr.i++) { - refresh_watch_dog(); - ptrAss(idrDirRangePtr, dirRange); - idrDirRangePtr.p->dirArray[0] = idrDirRangePtr.i + 1; - for (Uint32 i = 1; i < 256; i++) { - idrDirRangePtr.p->dirArray[i] = RNIL; - }//for - }//for - idrDirRangePtr.i = cdirrangesize - 1; - ptrAss(idrDirRangePtr, dirRange); - idrDirRangePtr.p->dirArray[0] = RNIL; - cfirstfreeDirrange = 0; -}//Dbacc::initialiseDirRangeRec() - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_FRAG_REC */ -/* INITIALATES THE FRAGMENT RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseFragRec(Signal* signal) -{ - FragmentrecPtr regFragPtr; - ndbrequire(cfragmentsize > 0); - for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) { - jam(); - refresh_watch_dog(); - ptrAss(regFragPtr, fragmentrec); - initFragGeneral(regFragPtr); - regFragPtr.p->nextfreefrag = regFragPtr.i + 1; - }//for - regFragPtr.i = cfragmentsize - 1; - ptrAss(regFragPtr, fragmentrec); - regFragPtr.p->nextfreefrag = RNIL; - cfirstfreefrag = 0; -}//Dbacc::initialiseFragRec() - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_OPERATION_REC */ -/* INITIALATES THE OPERATION RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseOperationRec(Signal* signal) -{ - ndbrequire(coprecsize > 0); - for (operationRecPtr.i = 0; operationRecPtr.i < coprecsize; operationRecPtr.i++) { - refresh_watch_dog(); - ptrAss(operationRecPtr, operationrec); - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - operationRecPtr.p->nextOp = operationRecPtr.i + 1; - }//for - operationRecPtr.i = coprecsize - 1; - ptrAss(operationRecPtr, operationrec); - operationRecPtr.p->nextOp = RNIL; - cfreeopRec = 0; -}//Dbacc::initialiseOperationRec() - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_OVERFLOW_REC */ -/* INITIALATES THE OVERFLOW RECORDS */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseOverflowRec(Signal* signal) -{ - OverflowRecordPtr iorOverflowRecPtr; - - ndbrequire(coverflowrecsize > 0); - for (iorOverflowRecPtr.i = 0; iorOverflowRecPtr.i < coverflowrecsize; iorOverflowRecPtr.i++) { - refresh_watch_dog(); - ptrAss(iorOverflowRecPtr, overflowRecord); - iorOverflowRecPtr.p->nextfreeoverrec = iorOverflowRecPtr.i + 1; - }//for - iorOverflowRecPtr.i = coverflowrecsize - 1; - ptrAss(iorOverflowRecPtr, overflowRecord); - iorOverflowRecPtr.p->nextfreeoverrec = RNIL; - cfirstfreeoverrec = 0; -}//Dbacc::initialiseOverflowRec() - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_PAGE_REC */ -/* INITIALATES THE PAGE RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialisePageRec(Signal* signal) -{ - ndbrequire(cpagesize > 0); - cfreepage = 0; - cfirstfreepage = RNIL; - cnoOfAllocatedPages = 0; -}//Dbacc::initialisePageRec() - - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_ROOTFRAG_REC */ -/* INITIALATES THE ROOTFRAG RECORDS. */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_SCAN_REC */ -/* INITIALATES THE QUE_SCAN RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseScanRec(Signal* signal) -{ - ndbrequire(cscanRecSize > 0); - for (scanPtr.i = 0; scanPtr.i < cscanRecSize; scanPtr.i++) { - ptrAss(scanPtr, scanRec); - scanPtr.p->scanNextfreerec = scanPtr.i + 1; - scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT; - scanPtr.p->scanTimer = 0; - scanPtr.p->scanContinuebCounter = 0; - }//for - scanPtr.i = cscanRecSize - 1; - ptrAss(scanPtr, scanRec); - scanPtr.p->scanNextfreerec = RNIL; - cfirstFreeScanRec = 0; -}//Dbacc::initialiseScanRec() - - -/* --------------------------------------------------------------------------------- */ -/* INITIALISE_TABLE_REC */ -/* INITIALATES THE TABLE RECORDS. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initialiseTableRec(Signal* signal) -{ - ndbrequire(ctablesize > 0); - for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) { - refresh_watch_dog(); - ptrAss(tabptr, tabrec); - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - tabptr.p->fragholder[i] = RNIL; - tabptr.p->fragptrholder[i] = RNIL; - }//for - }//for -}//Dbacc::initialiseTableRec() - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF SYSTEM RESTART MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* ADD/DELETE FRAGMENT MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ - -// JONAS This methods "aer ett saall" -void Dbacc::execACCFRAGREQ(Signal* signal) -{ - const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; - jamEntry(); - if (ERROR_INSERTED(3001)) { - jam(); - addFragRefuse(signal, 1); - CLEAR_ERROR_INSERT_VALUE; - return; - } - tabptr.i = req->tableId; -#ifndef VM_TRACE - // config mismatch - do not crash if release compiled - if (tabptr.i >= ctablesize) { - jam(); - addFragRefuse(signal, 640); - return; - } -#endif - ptrCheckGuard(tabptr, ctablesize, tabrec); - ndbrequire((req->reqInfo & 0xF) == ZADDFRAG); - ndbrequire(!getfragmentrec(signal, fragrecptr, req->fragId)); - if (cfirstfreefrag == RNIL) { - jam(); - addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR); - return; - }//if - - seizeFragrec(signal); - initFragGeneral(fragrecptr); - initFragAdd(signal, fragrecptr); - - if (!addfragtotab(signal, fragrecptr.i, req->fragId)) { - jam(); - releaseFragRecord(signal, fragrecptr); - addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR); - return; - }//if - if (cfirstfreeDirrange == RNIL) { - jam(); - releaseFragRecord(signal, fragrecptr); - addFragRefuse(signal, ZDIR_RANGE_ERROR); - return; - } else { - jam(); - seizeDirrange(signal); - }//if - - fragrecptr.p->directory = newDirRangePtr.i; - seizeDirectory(signal); - if (tresult < ZLIMIT_OF_ERROR) { - jam(); - newDirRangePtr.p->dirArray[0] = sdDirptr.i; - } else { - jam(); - addFragRefuse(signal, tresult); - return; - }//if - - seizePage(signal); - if (tresult > ZLIMIT_OF_ERROR) { - jam(); - addFragRefuse(signal, tresult); - return; - }//if - sdDirptr.p->pagep[0] = spPageptr.i; - tipPageId = 0; - inpPageptr = spPageptr; - initPage(signal); - if (cfirstfreeDirrange == RNIL) { - jam(); - addFragRefuse(signal, ZDIR_RANGE_ERROR); - return; - } else { - jam(); - seizeDirrange(signal); - }//if - fragrecptr.p->overflowdir = newDirRangePtr.i; - seizeDirectory(signal); - if (tresult < ZLIMIT_OF_ERROR) { - jam(); - newDirRangePtr.p->dirArray[0] = sdDirptr.i; - } else { - jam(); - addFragRefuse(signal, tresult); - return; - }//if - - Uint32 userPtr = req->userPtr; - BlockReference retRef = req->userRef; - fragrecptr.p->rootState = ACTIVEROOT; - - AccFragConf * const conf = (AccFragConf*)&signal->theData[0]; - conf->userPtr = userPtr; - conf->rootFragPtr = RNIL; - conf->fragId[0] = fragrecptr.p->fragmentid; - conf->fragId[1] = RNIL; - conf->fragPtr[0] = fragrecptr.i; - conf->fragPtr[1] = RNIL; - conf->rootHashCheck = fragrecptr.p->roothashcheck; - sendSignal(retRef, GSN_ACCFRAGCONF, signal, AccFragConf::SignalLength, JBB); -}//Dbacc::execACCFRAGREQ() - -void Dbacc::addFragRefuse(Signal* signal, Uint32 errorCode) -{ - const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; - AccFragRef * const ref = (AccFragRef*)&signal->theData[0]; - Uint32 userPtr = req->userPtr; - BlockReference retRef = req->userRef; - - ref->userPtr = userPtr; - ref->errorCode = errorCode; - sendSignal(retRef, GSN_ACCFRAGREF, signal, AccFragRef::SignalLength, JBB); - return; -}//Dbacc::addFragRefuseEarly() - -void -Dbacc::execDROP_TAB_REQ(Signal* signal){ - jamEntry(); - DropTabReq* req = (DropTabReq*)signal->getDataPtr(); - - TabrecPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctablesize, tabrec); - - tabPtr.p->tabUserRef = req->senderRef; - tabPtr.p->tabUserPtr = req->senderData; - - signal->theData[0] = ZREL_ROOT_FRAG; - signal->theData[1] = tabPtr.i; - sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); -} - -void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId) -{ - TabrecPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctablesize, tabrec); - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabPtr.p->fragholder[i] != RNIL) { - jam(); - tabPtr.p->fragholder[i] = RNIL; - releaseFragResources(signal, tabPtr.p->fragptrholder[i]); - return; - }//if - }//for - - /** - * Finished... - */ - - DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend(); - dropConf->senderRef = reference(); - dropConf->senderData = tabPtr.p->tabUserPtr; - dropConf->tableId = tabPtr.i; - sendSignal(tabPtr.p->tabUserRef, GSN_DROP_TAB_CONF, - signal, DropTabConf::SignalLength, JBB); - - tabPtr.p->tabUserPtr = RNIL; - tabPtr.p->tabUserRef = 0; -}//Dbacc::releaseRootFragResources() - -void Dbacc::releaseFragResources(Signal* signal, Uint32 fragIndex) -{ - FragmentrecPtr regFragPtr; - regFragPtr.i = fragIndex; - ptrCheckGuard(regFragPtr, cfragmentsize, fragmentrec); - verifyFragCorrect(regFragPtr); - if (regFragPtr.p->directory != RNIL) { - jam(); - releaseDirResources(signal, regFragPtr.i, regFragPtr.p->directory, 0); - regFragPtr.p->directory = RNIL; - } else if (regFragPtr.p->overflowdir != RNIL) { - jam(); - releaseDirResources(signal, regFragPtr.i, regFragPtr.p->overflowdir, 0); - regFragPtr.p->overflowdir = RNIL; - } else if (regFragPtr.p->firstOverflowRec != RNIL) { - jam(); - releaseOverflowResources(signal, regFragPtr); - } else if (regFragPtr.p->firstFreeDirindexRec != RNIL) { - jam(); - releaseDirIndexResources(signal, regFragPtr); - } else { - jam(); - Uint32 tab = regFragPtr.p->mytabptr; - releaseFragRecord(signal, regFragPtr); - signal->theData[0] = ZREL_ROOT_FRAG; - signal->theData[1] = tab; - sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); - }//if -}//Dbacc::releaseFragResources() - -void Dbacc::verifyFragCorrect(FragmentrecPtr regFragPtr) -{ - ndbrequire(regFragPtr.p->lockOwnersList == RNIL); -}//Dbacc::verifyFragCorrect() - -void Dbacc::releaseDirResources(Signal* signal, - Uint32 fragIndex, - Uint32 dirIndex, - Uint32 startIndex) -{ - DirRangePtr regDirRangePtr; - regDirRangePtr.i = dirIndex; - ptrCheckGuard(regDirRangePtr, cdirrangesize, dirRange); - for (Uint32 i = startIndex; i < 256; i++) { - jam(); - if (regDirRangePtr.p->dirArray[i] != RNIL) { - jam(); - Uint32 directoryIndex = regDirRangePtr.p->dirArray[i]; - regDirRangePtr.p->dirArray[i] = RNIL; - releaseDirectoryResources(signal, fragIndex, dirIndex, (i + 1), directoryIndex); - return; - }//if - }//for - rdDirRangePtr = regDirRangePtr; - releaseDirrange(signal); - signal->theData[0] = ZREL_FRAG; - signal->theData[1] = fragIndex; - sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); -}//Dbacc::releaseDirResources() - -void Dbacc::releaseDirectoryResources(Signal* signal, - Uint32 fragIndex, - Uint32 dirIndex, - Uint32 startIndex, - Uint32 directoryIndex) -{ - DirectoryarrayPtr regDirPtr; - regDirPtr.i = directoryIndex; - ptrCheckGuard(regDirPtr, cdirarraysize, directoryarray); - for (Uint32 i = 0; i < 256; i++) { - jam(); - if (regDirPtr.p->pagep[i] != RNIL) { - jam(); - rpPageptr.i = regDirPtr.p->pagep[i]; - ptrCheckGuard(rpPageptr, cpagesize, page8); - releasePage(signal); - regDirPtr.p->pagep[i] = RNIL; - }//if - }//for - rdDirptr = regDirPtr; - releaseDirectory(signal); - signal->theData[0] = ZREL_DIR; - signal->theData[1] = fragIndex; - signal->theData[2] = dirIndex; - signal->theData[3] = startIndex; - sendSignal(cownBlockref, GSN_CONTINUEB, signal, 4, JBB); -}//Dbacc::releaseDirectoryResources() - -void Dbacc::releaseOverflowResources(Signal* signal, FragmentrecPtr regFragPtr) -{ - Uint32 loopCount = 0; - OverflowRecordPtr regOverflowRecPtr; - while ((regFragPtr.p->firstOverflowRec != RNIL) && - (loopCount < 1)) { - jam(); - regOverflowRecPtr.i = regFragPtr.p->firstOverflowRec; - ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord); - regFragPtr.p->firstOverflowRec = regOverflowRecPtr.p->nextOverRec; - rorOverflowRecPtr = regOverflowRecPtr; - releaseOverflowRec(signal); - loopCount++; - }//while - signal->theData[0] = ZREL_FRAG; - signal->theData[1] = regFragPtr.i; - sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); -}//Dbacc::releaseOverflowResources() - -void Dbacc::releaseDirIndexResources(Signal* signal, FragmentrecPtr regFragPtr) -{ - Uint32 loopCount = 0; - OverflowRecordPtr regOverflowRecPtr; - while ((regFragPtr.p->firstFreeDirindexRec != RNIL) && - (loopCount < 1)) { - jam(); - regOverflowRecPtr.i = regFragPtr.p->firstFreeDirindexRec; - ptrCheckGuard(regOverflowRecPtr, coverflowrecsize, overflowRecord); - regFragPtr.p->firstFreeDirindexRec = regOverflowRecPtr.p->nextOverList; - rorOverflowRecPtr = regOverflowRecPtr; - releaseOverflowRec(signal); - loopCount++; - }//while - signal->theData[0] = ZREL_FRAG; - signal->theData[1] = regFragPtr.i; - sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); -}//Dbacc::releaseDirIndexResources() - -void Dbacc::releaseFragRecord(Signal* signal, FragmentrecPtr regFragPtr) -{ - regFragPtr.p->nextfreefrag = cfirstfreefrag; - cfirstfreefrag = regFragPtr.i; - initFragGeneral(regFragPtr); -}//Dbacc::releaseFragRecord() - -/* -------------------------------------------------------------------------- */ -/* ADDFRAGTOTAB */ -/* DESCRIPTION: PUTS A FRAGMENT ID AND A POINTER TO ITS RECORD INTO */ -/* TABLE ARRRAY OF THE TABLE RECORD. */ -/* -------------------------------------------------------------------------- */ -bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragholder[i] == RNIL) { - jam(); - tabptr.p->fragholder[i] = fid; - tabptr.p->fragptrholder[i] = rootIndex; - return true; - }//if - }//for - return false; -}//Dbacc::addfragtotab() - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF ADD/DELETE FRAGMENT MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* CONNECTION MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/* ACCSEIZEREQ SEIZE REQ */ -/* SENDER: LQH, LEVEL B */ -/* ENTER ACCSEIZEREQ WITH */ -/* TUSERPTR , CONECTION PTR OF LQH */ -/* TUSERBLOCKREF BLOCK REFERENCE OF LQH */ -/* ******************--------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/* ACCSEIZEREQ SEIZE REQ */ -/* ******************------------------------------+ */ -/* SENDER: LQH, LEVEL B */ -void Dbacc::execACCSEIZEREQ(Signal* signal) -{ - jamEntry(); - tuserptr = signal->theData[0]; - /* CONECTION PTR OF LQH */ - tuserblockref = signal->theData[1]; - /* BLOCK REFERENCE OF LQH */ - tresult = 0; - if (cfreeopRec == RNIL) { - jam(); - refaccConnectLab(signal); - return; - }//if - seizeOpRec(signal); - ptrGuard(operationRecPtr); - operationRecPtr.p->userptr = tuserptr; - operationRecPtr.p->userblockref = tuserblockref; - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - /* ******************************< */ - /* ACCSEIZECONF */ - /* ******************************< */ - signal->theData[0] = tuserptr; - signal->theData[1] = operationRecPtr.i; - sendSignal(tuserblockref, GSN_ACCSEIZECONF, signal, 2, JBB); - return; -}//Dbacc::execACCSEIZEREQ() - -void Dbacc::refaccConnectLab(Signal* signal) -{ - tresult = ZCONNECT_SIZE_ERROR; - /* ******************************< */ - /* ACCSEIZEREF */ - /* ******************************< */ - signal->theData[0] = tuserptr; - signal->theData[1] = tresult; - sendSignal(tuserblockref, GSN_ACCSEIZEREF, signal, 2, JBB); - return; -}//Dbacc::refaccConnectLab() - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF CONNECTION MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* EXECUTE OPERATION MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* INIT_OP_REC */ -/* INFORMATION WHICH IS RECIEVED BY ACCKEYREQ WILL BE SAVED */ -/* IN THE OPERATION RECORD. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initOpRec(Signal* signal) -{ - register Uint32 Treqinfo; - - Treqinfo = signal->theData[2]; - - operationRecPtr.p->hashValue = signal->theData[3]; - operationRecPtr.p->tupkeylen = signal->theData[4]; - operationRecPtr.p->xfrmtupkeylen = signal->theData[4]; - operationRecPtr.p->transId1 = signal->theData[5]; - operationRecPtr.p->transId2 = signal->theData[6]; - - Uint32 readFlag = (((Treqinfo >> 4) & 0x3) == 0); // Only 1 if Read - Uint32 dirtyFlag = (((Treqinfo >> 6) & 0x1) == 1); // Only 1 if Dirty - Uint32 dirtyReadFlag = readFlag & dirtyFlag; - - Uint32 opbits = 0; - opbits |= Treqinfo & 0x7; - opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_LOCK_MODE : 0; - opbits |= ((Treqinfo >> 4) & 0x3) ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; - opbits |= (dirtyReadFlag) ? (Uint32) Operationrec::OP_DIRTY_READ : 0; - opbits |= ((Treqinfo >> 31) & 0x1) ? (Uint32) Operationrec::OP_LOCK_REQ : 0; - - //operationRecPtr.p->nodeType = (Treqinfo >> 7) & 0x3; - operationRecPtr.p->fid = fragrecptr.p->myfid; - operationRecPtr.p->fragptr = fragrecptr.i; - operationRecPtr.p->nextParallelQue = RNIL; - operationRecPtr.p->prevParallelQue = RNIL; - operationRecPtr.p->nextSerialQue = RNIL; - operationRecPtr.p->prevSerialQue = RNIL; - operationRecPtr.p->elementPage = RNIL; - operationRecPtr.p->scanRecPtr = RNIL; - operationRecPtr.p->m_op_bits = opbits; - - // bit to mark lock operation - // undo log is not run via ACCKEYREQ - -}//Dbacc::initOpRec() - -/* --------------------------------------------------------------------------------- */ -/* SEND_ACCKEYCONF */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::sendAcckeyconf(Signal* signal) -{ - signal->theData[0] = operationRecPtr.p->userptr; - signal->theData[1] = operationRecPtr.p->m_op_bits & Operationrec::OP_MASK; - signal->theData[2] = operationRecPtr.p->fid; - signal->theData[3] = operationRecPtr.p->localdata[0]; - signal->theData[4] = operationRecPtr.p->localdata[1]; - signal->theData[5] = fragrecptr.p->localkeylen; -}//Dbacc::sendAcckeyconf() - - -void -Dbacc::ACCKEY_error(Uint32 fromWhere) -{ - switch(fromWhere) { - case 0: - ndbrequire(false); - case 1: - ndbrequire(false); - case 2: - ndbrequire(false); - case 3: - ndbrequire(false); - case 4: - ndbrequire(false); - case 5: - ndbrequire(false); - case 6: - ndbrequire(false); - case 7: - ndbrequire(false); - case 8: - ndbrequire(false); - case 9: - ndbrequire(false); - default: - ndbrequire(false); - }//switch -}//Dbacc::ACCKEY_error() - -/* ******************--------------------------------------------------------------- */ -/* ACCKEYREQ REQUEST FOR INSERT, DELETE, */ -/* RERAD AND UPDATE, A TUPLE. */ -/* SENDER: LQH, LEVEL B */ -/* SIGNAL DATA: OPERATION_REC_PTR, CONNECTION PTR */ -/* TABPTR, TABLE ID = TABLE RECORD POINTER */ -/* TREQINFO, */ -/* THASHVALUE, HASH VALUE OF THE TUP */ -/* TKEYLEN, LENGTH OF THE PRIMARY KEYS */ -/* TKEY1, PRIMARY KEY 1 */ -/* TKEY2, PRIMARY KEY 2 */ -/* TKEY3, PRIMARY KEY 3 */ -/* TKEY4, PRIMARY KEY 4 */ -/* ******************--------------------------------------------------------------- */ -void Dbacc::execACCKEYREQ(Signal* signal) -{ - jamEntry(); - operationRecPtr.i = signal->theData[0]; /* CONNECTION PTR */ - fragrecptr.i = signal->theData[1]; /* FRAGMENT RECORD POINTER */ - if (!((operationRecPtr.i < coprecsize) || - (fragrecptr.i < cfragmentsize))) { - ACCKEY_error(0); - return; - }//if - ptrAss(operationRecPtr, operationrec); - ptrAss(fragrecptr, fragmentrec); - - ndbrequire(operationRecPtr.p->m_op_bits == Operationrec::OP_INITIAL); - - initOpRec(signal); - // normalize key if any char attr - if (operationRecPtr.p->tupkeylen && fragrecptr.p->hasCharAttr) - xfrmKeyData(signal); - - /*---------------------------------------------------------------*/ - /* */ - /* WE WILL USE THE HASH VALUE TO LOOK UP THE PROPER MEMORY */ - /* PAGE AND MEMORY PAGE INDEX TO START THE SEARCH WITHIN. */ - /* WE REMEMBER THESE ADDRESS IF WE LATER NEED TO INSERT */ - /* THE ITEM AFTER NOT FINDING THE ITEM. */ - /*---------------------------------------------------------------*/ - OperationrecPtr lockOwnerPtr; - const Uint32 found = getElement(signal, lockOwnerPtr); - - Uint32 opbits = operationRecPtr.p->m_op_bits; - Uint32 op = opbits & Operationrec::OP_MASK; - if (found == ZTRUE) - { - switch (op) { - case ZREAD: - case ZUPDATE: - case ZDELETE: - case ZWRITE: - case ZSCAN_OP: - if (!lockOwnerPtr.p) - { - if(op == ZWRITE) - { - jam(); - opbits &= ~(Uint32)Operationrec::OP_MASK; - opbits |= (op = ZUPDATE); - operationRecPtr.p->m_op_bits = opbits; // store to get correct ACCKEYCONF - } - opbits |= Operationrec::OP_STATE_RUNNING; - opbits |= Operationrec::OP_RUN_QUEUE; - sendAcckeyconf(signal); - if (! (opbits & Operationrec::OP_DIRTY_READ)) { - /*---------------------------------------------------------------*/ - // It is not a dirty read. We proceed by locking and continue with - // the operation. - /*---------------------------------------------------------------*/ - Uint32 eh = gePageptr.p->word32[tgeElementptr]; - operationRecPtr.p->scanBits = ElementHeader::getScanBits(eh); - operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(eh); - operationRecPtr.p->elementPage = gePageptr.i; - operationRecPtr.p->elementContainer = tgeContainerptr; - operationRecPtr.p->elementPointer = tgeElementptr; - operationRecPtr.p->elementIsforward = tgeForward; - - eh = ElementHeader::setLocked(operationRecPtr.i); - dbgWord32(gePageptr, tgeElementptr, eh); - gePageptr.p->word32[tgeElementptr] = eh; - - opbits |= Operationrec::OP_LOCK_OWNER; - insertLockOwnersList(signal, operationRecPtr); - } else { - jam(); - /*---------------------------------------------------------------*/ - // It is a dirty read. We do not lock anything. Set state to - // IDLE since no COMMIT call will come. - /*---------------------------------------------------------------*/ - opbits = Operationrec::OP_EXECUTED_DIRTY_READ; - }//if - operationRecPtr.p->m_op_bits = opbits; - return; - } else { - jam(); - accIsLockedLab(signal, lockOwnerPtr); - return; - }//if - break; - case ZINSERT: - jam(); - insertExistElemLab(signal, lockOwnerPtr); - return; - break; - default: - ndbrequire(false); - break; - }//switch - } else if (found == ZFALSE) { - switch (op){ - case ZWRITE: - opbits &= ~(Uint32)Operationrec::OP_MASK; - opbits |= (op = ZINSERT); - case ZINSERT: - jam(); - opbits |= Operationrec::OP_INSERT_IS_DONE; - opbits |= Operationrec::OP_STATE_RUNNING; - opbits |= Operationrec::OP_RUN_QUEUE; - operationRecPtr.p->m_op_bits = opbits; - insertelementLab(signal); - return; - break; - case ZREAD: - case ZUPDATE: - case ZDELETE: - case ZSCAN_OP: - jam(); - acckeyref1Lab(signal, ZREAD_ERROR); - return; - break; - default: - ndbrequire(false); - break; - }//switch - } else { - jam(); - acckeyref1Lab(signal, found); - return; - }//if - return; -}//Dbacc::execACCKEYREQ() - -void -Dbacc::execACCKEY_ORD(Signal* signal, Uint32 opPtrI) -{ - jamEntry(); - OperationrecPtr lastOp; - lastOp.i = opPtrI; - ptrCheckGuard(lastOp, coprecsize, operationrec); - Uint32 opbits = lastOp.p->m_op_bits; - Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; - - if (likely(opbits == Operationrec::OP_EXECUTED_DIRTY_READ)) - { - jam(); - lastOp.p->m_op_bits = Operationrec::OP_INITIAL; - return; - } - else if (likely(opstate == Operationrec::OP_STATE_RUNNING)) - { - opbits |= Operationrec::OP_STATE_EXECUTED; - lastOp.p->m_op_bits = opbits; - startNext(signal, lastOp); - return; - } - else - { - } - - ndbout_c("bits: %.8x state: %.8x", opbits, opstate); - ndbrequire(false); -} - -void -Dbacc::startNext(Signal* signal, OperationrecPtr lastOp) -{ - jam(); - OperationrecPtr nextOp; - OperationrecPtr loPtr; - nextOp.i = lastOp.p->nextParallelQue; - loPtr.i = lastOp.p->m_lock_owner_ptr_i; - Uint32 opbits = lastOp.p->m_op_bits; - - if ((opbits & Operationrec::OP_STATE_MASK)!= Operationrec::OP_STATE_EXECUTED) - { - jam(); - return; - } - - Uint32 nextbits; - if (nextOp.i != RNIL) - { - jam(); - ptrCheckGuard(nextOp, coprecsize, operationrec); - nextbits = nextOp.p->m_op_bits; - goto checkop; - } - - if ((opbits & Operationrec::OP_LOCK_OWNER) == 0) - { - jam(); - ptrCheckGuard(loPtr, coprecsize, operationrec); - } - else - { - jam(); - loPtr = lastOp; - } - - nextOp.i = loPtr.p->nextSerialQue; - ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - - if (nextOp.i == RNIL) - { - jam(); - return; - } - - /** - * There is an op in serie queue... - * Check if it can run - */ - ptrCheckGuard(nextOp, coprecsize, operationrec); - nextbits = nextOp.p->m_op_bits; - - { - const bool same = nextOp.p->is_same_trans(lastOp.p); - - if (!same && ((opbits & Operationrec::OP_ACC_LOCK_MODE) || - (nextbits & Operationrec::OP_LOCK_MODE))) - { - jam(); - /** - * Not same transaction - * and either last had exclusive lock - * or next had exclusive lock - */ - return; - } - - /** - * same trans and X-lock - */ - if (same && (opbits & Operationrec::OP_ACC_LOCK_MODE)) - { - jam(); - goto upgrade; - } - } - - /** - * all shared lock... - */ - if ((opbits & Operationrec::OP_ACC_LOCK_MODE) == 0 && - (nextbits & Operationrec::OP_LOCK_MODE) == 0) - { - jam(); - goto upgrade; - } - - /** - * There is a shared parallell queue & and exclusive op is first in queue - */ - ndbassert((opbits & Operationrec::OP_ACC_LOCK_MODE) == 0 && - (nextbits & Operationrec::OP_LOCK_MODE)); - - /** - * We must check if there are many transactions in parallel queue... - */ - OperationrecPtr tmp; - tmp= loPtr; - while (tmp.i != RNIL) - { - ptrCheckGuard(tmp, coprecsize, operationrec); - if (!nextOp.p->is_same_trans(tmp.p)) - { - jam(); - /** - * parallel queue contained another transaction, dont let it run - */ - return; - } - tmp.i = tmp.p->nextParallelQue; - } - -upgrade: - /** - * Move first op in serie queue to end of parallell queue - */ - - tmp.i = loPtr.p->nextSerialQue = nextOp.p->nextSerialQue; - loPtr.p->m_lo_last_parallel_op_ptr_i = nextOp.i; - nextOp.p->nextSerialQue = RNIL; - nextOp.p->prevSerialQue = RNIL; - nextOp.p->m_lock_owner_ptr_i = loPtr.i; - nextOp.p->prevParallelQue = lastOp.i; - lastOp.p->nextParallelQue = nextOp.i; - - if (tmp.i != RNIL) - { - jam(); - ptrCheckGuard(tmp, coprecsize, operationrec); - tmp.p->prevSerialQue = loPtr.i; - } - else - { - jam(); - loPtr.p->m_lo_last_serial_op_ptr_i = RNIL; - } - - nextbits |= Operationrec::OP_RUN_QUEUE; - - /** - * Currently no grouping of ops in serie queue - */ - ndbrequire(nextOp.p->nextParallelQue == RNIL); - -checkop: - Uint32 errCode = 0; - OperationrecPtr save = operationRecPtr; - operationRecPtr = nextOp; - - Uint32 lastop = opbits & Operationrec::OP_MASK; - Uint32 nextop = nextbits & Operationrec::OP_MASK; - - nextbits &= nextbits & ~(Uint32)Operationrec::OP_STATE_MASK; - nextbits |= Operationrec::OP_STATE_RUNNING; - - if (lastop == ZDELETE) - { - jam(); - if (nextop != ZINSERT && nextop != ZWRITE) - { - errCode = ZREAD_ERROR; - goto ref; - } - - nextbits &= ~(Uint32)Operationrec::OP_MASK; - nextbits &= ~(Uint32)Operationrec::OP_ELEMENT_DISAPPEARED; - nextbits |= (nextop = ZINSERT); - nextbits |= Operationrec::OP_INSERT_IS_DONE; - goto conf; - } - else if (nextop == ZINSERT) - { - jam(); - errCode = ZWRITE_ERROR; - goto ref; - } - else if (nextop == ZWRITE) - { - jam(); - nextbits &= ~(Uint32)Operationrec::OP_MASK; - nextbits |= (nextop = ZUPDATE); - goto conf; - } - else - { - jam(); - } - -conf: - nextOp.p->m_op_bits = nextbits; - nextOp.p->localdata[0] = lastOp.p->localdata[0]; - nextOp.p->localdata[1] = lastOp.p->localdata[1]; - - if (nextop == ZSCAN_OP && (nextbits & Operationrec::OP_LOCK_REQ) == 0) - { - jam(); - takeOutScanLockQueue(nextOp.p->scanRecPtr); - putReadyScanQueue(signal, nextOp.p->scanRecPtr); - } - else - { - jam(); - fragrecptr.i = nextOp.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - - sendAcckeyconf(signal); - sendSignal(nextOp.p->userblockref, GSN_ACCKEYCONF, - signal, 6, JBB); - } - - operationRecPtr = save; - return; - -ref: - nextOp.p->m_op_bits = nextbits; - - if (nextop == ZSCAN_OP && (nextbits & Operationrec::OP_LOCK_REQ) == 0) - { - jam(); - nextOp.p->m_op_bits |= Operationrec::OP_ELEMENT_DISAPPEARED; - takeOutScanLockQueue(nextOp.p->scanRecPtr); - putReadyScanQueue(signal, nextOp.p->scanRecPtr); - } - else - { - jam(); - signal->theData[0] = nextOp.p->userptr; - signal->theData[1] = errCode; - sendSignal(nextOp.p->userblockref, GSN_ACCKEYREF, signal, - 2, JBB); - } - - operationRecPtr = save; - return; -} - - -#if 0 -void -Dbacc::execACCKEY_REP_REF(Signal* signal, Uint32 opPtrI) -{ -} -#endif - -void -Dbacc::xfrmKeyData(Signal* signal) -{ - Uint32 table = fragrecptr.p->myTableId; - Uint32 dst[MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY]; - Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]; - Uint32* src = &signal->theData[7]; - Uint32 len = xfrm_key(table, src, dst, sizeof(dst) >> 2, keyPartLen); - ndbrequire(len); // 0 means error - memcpy(src, dst, len << 2); - operationRecPtr.p->xfrmtupkeylen = len; -} - -void -Dbacc::accIsLockedLab(Signal* signal, OperationrecPtr lockOwnerPtr) -{ - ndbrequire(csystemRestart == ZFALSE); - - Uint32 bits = operationRecPtr.p->m_op_bits; - validate_lock_queue(lockOwnerPtr); - - if ((bits & Operationrec::OP_DIRTY_READ) == 0){ - Uint32 return_result; - if ((bits & Operationrec::OP_LOCK_MODE) == ZREADLOCK) { - jam(); - return_result = placeReadInLockQueue(lockOwnerPtr); - } else { - jam(); - return_result = placeWriteInLockQueue(lockOwnerPtr); - }//if - if (return_result == ZPARALLEL_QUEUE) { - jam(); - sendAcckeyconf(signal); - return; - } else if (return_result == ZSERIAL_QUEUE) { - jam(); - signal->theData[0] = RNIL; - return; - } else { - jam(); - acckeyref1Lab(signal, return_result); - return; - }//if - ndbrequire(false); - } - else - { - if (!(lockOwnerPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED) && - lockOwnerPtr.p->localdata[0] != ~(Uint32)0) - { - jam(); - /* --------------------------------------------------------------- - * It is a dirty read. We do not lock anything. Set state to - *IDLE since no COMMIT call will arrive. - * ---------------------------------------------------------------*/ - sendAcckeyconf(signal); - operationRecPtr.p->m_op_bits = Operationrec::OP_EXECUTED_DIRTY_READ; - return; - } - else - { - jam(); - /*---------------------------------------------------------------*/ - // The tuple does not exist in the committed world currently. - // Report read error. - /*---------------------------------------------------------------*/ - acckeyref1Lab(signal, ZREAD_ERROR); - return; - }//if - }//if -}//Dbacc::accIsLockedLab() - -/* ------------------------------------------------------------------------ */ -/* I N S E R T E X I S T E L E M E N T */ -/* ------------------------------------------------------------------------ */ -void Dbacc::insertExistElemLab(Signal* signal, OperationrecPtr lockOwnerPtr) -{ - if (!lockOwnerPtr.p) - { - jam(); - acckeyref1Lab(signal, ZWRITE_ERROR);/* THE ELEMENT ALREADY EXIST */ - return; - }//if - accIsLockedLab(signal, lockOwnerPtr); -}//Dbacc::insertExistElemLab() - -/* --------------------------------------------------------------------------------- */ -/* INSERTELEMENT */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::insertelementLab(Signal* signal) -{ - if (fragrecptr.p->firstOverflowRec == RNIL) { - jam(); - allocOverflowPage(signal); - if (tresult > ZLIMIT_OF_ERROR) { - jam(); - acckeyref1Lab(signal, tresult); - return; - }//if - }//if - ndbrequire(operationRecPtr.p->tupkeylen <= fragrecptr.p->keyLength); - ndbassert(!(operationRecPtr.p->m_op_bits & Operationrec::OP_LOCK_REQ)); - Uint32 localKey = ~(Uint32)0; - - insertLockOwnersList(signal, operationRecPtr); - - const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits; - operationRecPtr.p->hashvaluePart = - (operationRecPtr.p->hashValue >> tmp) & 0xFFFF; - operationRecPtr.p->scanBits = 0; /* NOT ANY ACTIVE SCAN */ - tidrElemhead = ElementHeader::setLocked(operationRecPtr.i); - idrPageptr = gdiPageptr; - tidrPageindex = tgdiPageindex; - tidrForward = ZTRUE; - idrOperationRecPtr = operationRecPtr; - clocalkey[0] = localKey; - operationRecPtr.p->localdata[0] = localKey; - /* ----------------------------------------------------------------------- */ - /* WE SET THE LOCAL KEY TO MINUS ONE TO INDICATE IT IS NOT YET VALID. */ - /* ----------------------------------------------------------------------- */ - insertElement(signal); - sendAcckeyconf(signal); - return; -}//Dbacc::insertelementLab() - - -/* ------------------------------------------------------------------------ */ -/* GET_NO_PARALLEL_TRANSACTION */ -/* ------------------------------------------------------------------------ */ -Uint32 -Dbacc::getNoParallelTransaction(const Operationrec * op) -{ - OperationrecPtr tmp; - - tmp.i= op->nextParallelQue; - Uint32 transId[2] = { op->transId1, op->transId2 }; - while (tmp.i != RNIL) - { - jam(); - ptrCheckGuard(tmp, coprecsize, operationrec); - if (tmp.p->transId1 == transId[0] && tmp.p->transId2 == transId[1]) - tmp.i = tmp.p->nextParallelQue; - else - return 2; - } - return 1; -}//Dbacc::getNoParallelTransaction() - -#ifdef VM_TRACE -Uint32 -Dbacc::getNoParallelTransactionFull(const Operationrec * op) -{ - ConstPtr tmp; - - tmp.p = op; - while ((tmp.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) - { - tmp.i = tmp.p->prevParallelQue; - if (tmp.i != RNIL) - { - ptrCheckGuard(tmp, coprecsize, operationrec); - } - else - { - break; - } - } - - return getNoParallelTransaction(tmp.p); -} -#endif - -#ifdef ACC_SAFE_QUEUE - -Uint32 -Dbacc::get_parallel_head(OperationrecPtr opPtr) -{ - while ((opPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && - opPtr.p->prevParallelQue != RNIL) - { - opPtr.i = opPtr.p->prevParallelQue; - ptrCheckGuard(opPtr, coprecsize, operationrec); - } - - return opPtr.i; -} - -bool -Dbacc::validate_lock_queue(OperationrecPtr opPtr) -{ - OperationrecPtr loPtr; - loPtr.i = get_parallel_head(opPtr); - ptrCheckGuard(loPtr, coprecsize, operationrec); - - while((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && - loPtr.p->prevSerialQue != RNIL) - { - loPtr.i = loPtr.p->prevSerialQue; - ptrCheckGuard(loPtr, coprecsize, operationrec); - } - - // Now we have lock owner... - vlqrequire(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - vlqrequire(loPtr.p->m_op_bits & Operationrec::OP_RUN_QUEUE); - - // 1 Validate page pointer - { - Page8Ptr pagePtr; - pagePtr.i = loPtr.p->elementPage; - ptrCheckGuard(pagePtr, cpagesize, page8); - arrGuard(loPtr.p->elementPointer, 2048); - Uint32 eh = pagePtr.p->word32[loPtr.p->elementPointer]; - vlqrequire(ElementHeader::getLocked(eh)); - vlqrequire(ElementHeader::getOpPtrI(eh) == loPtr.i); - } - - // 2 Lock owner should always have same LOCK_MODE and ACC_LOCK_MODE - if (loPtr.p->m_op_bits & Operationrec::OP_LOCK_MODE) - { - vlqrequire(loPtr.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE); - } - else - { - vlqrequire((loPtr.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE) == 0); - } - - // 3 Lock owner should never be waiting... - bool running = false; - { - Uint32 opstate = loPtr.p->m_op_bits & Operationrec::OP_STATE_MASK; - if (opstate == Operationrec::OP_STATE_RUNNING) - running = true; - else - { - vlqrequire(opstate == Operationrec::OP_STATE_EXECUTED); - } - } - - // Validate parallel queue - { - bool many = false; - bool orlockmode = loPtr.p->m_op_bits & Operationrec::OP_LOCK_MODE; - OperationrecPtr lastP = loPtr; - - while (lastP.p->nextParallelQue != RNIL) - { - Uint32 prev = lastP.i; - lastP.i = lastP.p->nextParallelQue; - ptrCheckGuard(lastP, coprecsize, operationrec); - - vlqrequire(lastP.p->prevParallelQue == prev); - - Uint32 opbits = lastP.p->m_op_bits; - many |= loPtr.p->is_same_trans(lastP.p) ? 0 : 1; - orlockmode |= !!(opbits & Operationrec::OP_LOCK_MODE); - - vlqrequire(opbits & Operationrec::OP_RUN_QUEUE); - vlqrequire((opbits & Operationrec::OP_LOCK_OWNER) == 0); - - Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; - if (running) - { - // If I found a running operation, - // all following should be waiting - vlqrequire(opstate == Operationrec::OP_STATE_WAITING); - } - else - { - if (opstate == Operationrec::OP_STATE_RUNNING) - running = true; - else - vlqrequire(opstate == Operationrec::OP_STATE_EXECUTED); - } - - if (lastP.p->m_op_bits & Operationrec::OP_LOCK_MODE) - { - vlqrequire(lastP.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE); - } - else - { - vlqrequire((lastP.p->m_op_bits && orlockmode) == orlockmode); - vlqrequire((lastP.p->m_op_bits & Operationrec::OP_MASK) == ZREAD || - (lastP.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP); - } - - if (many) - { - vlqrequire(orlockmode == 0); - } - } - - if (lastP.i != loPtr.i) - { - vlqrequire(loPtr.p->m_lo_last_parallel_op_ptr_i == lastP.i); - vlqrequire(lastP.p->m_lock_owner_ptr_i == loPtr.i); - } - else - { - vlqrequire(loPtr.p->m_lo_last_parallel_op_ptr_i == RNIL); - } - } - - // Validate serie queue - if (loPtr.p->nextSerialQue != RNIL) - { - Uint32 prev = loPtr.i; - OperationrecPtr lastS; - lastS.i = loPtr.p->nextSerialQue; - while (true) - { - ptrCheckGuard(lastS, coprecsize, operationrec); - vlqrequire(lastS.p->prevSerialQue == prev); - vlqrequire(getNoParallelTransaction(lastS.p) == 1); - vlqrequire((lastS.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0); - vlqrequire((lastS.p->m_op_bits & Operationrec::OP_RUN_QUEUE) == 0); - vlqrequire((lastS.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_WAITING); - if (lastS.p->nextSerialQue == RNIL) - break; - prev = lastS.i; - lastS.i = lastS.p->nextSerialQue; - } - - vlqrequire(loPtr.p->m_lo_last_serial_op_ptr_i == lastS.i); - } - else - { - vlqrequire(loPtr.p->m_lo_last_serial_op_ptr_i == RNIL); - } - return true; -} - -NdbOut& -operator<<(NdbOut & out, Dbacc::OperationrecPtr ptr) -{ - Uint32 opbits = ptr.p->m_op_bits; - out << "[ " << dec << ptr.i - << " [ " << hex << ptr.p->transId1 - << " " << hex << ptr.p->transId2 << "] " - << " bits: H'" << hex << opbits << " "; - - bool read = false; - switch(opbits & Dbacc::Operationrec::OP_MASK){ - case ZREAD: out << "READ "; read = true; break; - case ZINSERT: out << "INSERT "; break; - case ZUPDATE: out << "UPDATE "; break; - case ZDELETE: out << "DELETE "; break; - case ZWRITE: out << "WRITE "; break; - case ZSCAN_OP: out << "SCAN "; read = true; break; - default: - out << " "; - } - - if (read) - { - if (opbits & Dbacc::Operationrec::OP_LOCK_MODE) - out << "(X)"; - else - out << "(S)"; - if (opbits & Dbacc::Operationrec::OP_ACC_LOCK_MODE) - out << "(X)"; - else - out << "(S)"; - } - - if (opbits) - { - out << "(RQ)"; - } - - switch(opbits & Dbacc::Operationrec::OP_STATE_MASK){ - case Dbacc::Operationrec::OP_STATE_WAITING: - out << " WAITING "; break; - case Dbacc::Operationrec::OP_STATE_RUNNING: - out << " RUNNING "; break; - case Dbacc::Operationrec::OP_STATE_EXECUTED: - out << " EXECUTED "; break; - case Dbacc::Operationrec::OP_STATE_IDLE: - out << " IDLE "; break; - default: - out << " "; - } - -/* - OP_MASK = 0x000F // 4 bits for operation type - ,OP_LOCK_MODE = 0x0010 // 0 - shared lock, 1 = exclusive lock - ,OP_ACC_LOCK_MODE = 0x0020 // Or:de lock mode of all operation - // before me - ,OP_LOCK_OWNER = 0x0040 - ,OP_DIRTY_READ = 0x0080 - ,OP_LOCK_REQ = 0x0100 // isAccLockReq - ,OP_COMMIT_DELETE_CHECK = 0x0200 - ,OP_INSERT_IS_DONE = 0x0400 - ,OP_ELEMENT_DISAPPEARED = 0x0800 - - ,OP_STATE_MASK = 0xF000 - ,OP_STATE_IDLE = 0xF000 - ,OP_STATE_WAITING = 0x0000 - ,OP_STATE_RUNNING = 0x1000 - ,OP_STATE_EXECUTED = 0x3000 - }; -*/ - if (opbits & Dbacc::Operationrec::OP_LOCK_OWNER) - out << "LO "; - - if (opbits & Dbacc::Operationrec::OP_DIRTY_READ) - out << "DR "; - - if (opbits & Dbacc::Operationrec::OP_LOCK_REQ) - out << "LOCK_REQ "; - - if (opbits & Dbacc::Operationrec::OP_COMMIT_DELETE_CHECK) - out << "COMMIT_DELETE_CHECK "; - - if (opbits & Dbacc::Operationrec::OP_INSERT_IS_DONE) - out << "INSERT_IS_DONE "; - - if (opbits & Dbacc::Operationrec::OP_ELEMENT_DISAPPEARED) - out << "ELEMENT_DISAPPEARED "; - - if (opbits & Dbacc::Operationrec::OP_LOCK_OWNER) - { - out << "last_parallel: " << dec << ptr.p->m_lo_last_parallel_op_ptr_i << " "; - out << "last_serial: " << dec << ptr.p->m_lo_last_serial_op_ptr_i << " "; - } - - out << "]"; - return out; -} - -void -Dbacc::dump_lock_queue(OperationrecPtr loPtr) -{ - if ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) - { - while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && - loPtr.p->prevParallelQue != RNIL) - { - loPtr.i = loPtr.p->prevParallelQue; - ptrCheckGuard(loPtr, coprecsize, operationrec); - } - - while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && - loPtr.p->prevSerialQue != RNIL) - { - loPtr.i = loPtr.p->prevSerialQue; - ptrCheckGuard(loPtr, coprecsize, operationrec); - } - - ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - } - - ndbout << "-- HEAD --" << endl; - OperationrecPtr tmp = loPtr; - while (tmp.i != RNIL) - { - ptrCheckGuard(tmp, coprecsize, operationrec); - ndbout << tmp << " "; - tmp.i = tmp.p->nextParallelQue; - - if (tmp.i == loPtr.i) - { - ndbout << " "; - break; - } - } - ndbout << endl; - - tmp.i = loPtr.p->nextSerialQue; - while (tmp.i != RNIL) - { - ptrCheckGuard(tmp, coprecsize, operationrec); - OperationrecPtr tmp2 = tmp; - - if (tmp.i == loPtr.i) - { - ndbout << "" << endl; - break; - } - - while (tmp2.i != RNIL) - { - ptrCheckGuard(tmp2, coprecsize, operationrec); - ndbout << tmp2 << " "; - tmp2.i = tmp2.p->nextParallelQue; - - if (tmp2.i == tmp.i) - { - ndbout << ""; - break; - } - } - ndbout << endl; - tmp.i = tmp.p->nextSerialQue; - } -} -#endif - -/* ------------------------------------------------------------------------- - * PLACE_WRITE_IN_LOCK_QUEUE - * INPUT: OPERATION_REC_PTR OUR OPERATION POINTER - * QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER - * PWI_PAGEPTR PAGE POINTER OF ELEMENT - * TPWI_ELEMENTPTR ELEMENT POINTER OF ELEMENT - * OUTPUT TRESULT = - * ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE - * OPERATION CAN PROCEED NOW. - * ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE - * ERROR CODE OPERATION NEEDS ABORTING - * ------------------------------------------------------------------------- */ -Uint32 -Dbacc::placeWriteInLockQueue(OperationrecPtr lockOwnerPtr) -{ - OperationrecPtr lastOpPtr; - lastOpPtr.i = lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i; - Uint32 opbits = operationRecPtr.p->m_op_bits; - - if (lastOpPtr.i == RNIL) - { - lastOpPtr = lockOwnerPtr; - } - else - { - ptrCheckGuard(lastOpPtr, coprecsize, operationrec); - } - - ndbassert(get_parallel_head(lastOpPtr) == lockOwnerPtr.i); - - Uint32 lastbits = lastOpPtr.p->m_op_bits; - if (lastbits & Operationrec::OP_ACC_LOCK_MODE) - { - if(operationRecPtr.p->is_same_trans(lastOpPtr.p)) - { - goto checkop; - } - } - else - { - /** - * We dont have an exclusive lock on operation and - * - */ - jam(); - - /** - * Scan parallell queue to see if we are the only one - */ - OperationrecPtr loopPtr = lockOwnerPtr; - do - { - ptrCheckGuard(loopPtr, coprecsize, operationrec); - if (!loopPtr.p->is_same_trans(operationRecPtr.p)) - { - goto serial; - } - loopPtr.i = loopPtr.p->nextParallelQue; - } while (loopPtr.i != RNIL); - - goto checkop; - } - -serial: - jam(); - placeSerialQueue(lockOwnerPtr, operationRecPtr); - - validate_lock_queue(lockOwnerPtr); - - return ZSERIAL_QUEUE; - -checkop: - /* - WE ARE PERFORMING AN READ EXCLUSIVE, INSERT, UPDATE OR DELETE IN THE SAME - TRANSACTION WHERE WE PREVIOUSLY HAVE EXECUTED AN OPERATION. - Read-All, Update-All, Insert-All and Delete-Insert are allowed - combinations. - Delete-Read, Delete-Update and Delete-Delete are not an allowed - combination and will result in tuple not found error. - */ - Uint32 lstate = lastbits & Operationrec::OP_STATE_MASK; - - Uint32 retValue = ZSERIAL_QUEUE; // So that it gets blocked... - if (lstate == Operationrec::OP_STATE_EXECUTED) - { - jam(); - - /** - * Since last operation has executed...we can now check operation types - * if not, we have to wait until it has executed - */ - Uint32 op = opbits & Operationrec::OP_MASK; - Uint32 lop = lastbits & Operationrec::OP_MASK; - if (op == ZINSERT && lop != ZDELETE) - { - jam(); - return ZWRITE_ERROR; - }//if - - /** - * NOTE. No checking op operation types, as one can read different save - * points... - */ -#if 0 - if (lop == ZDELETE && (op != ZINSERT && op != ZWRITE)) - { - jam(); - return ZREAD_ERROR; - } -#else - if (lop == ZDELETE && (op == ZUPDATE && op == ZDELETE)) - { - jam(); - return ZREAD_ERROR; - } -#endif - - if(op == ZWRITE) - { - opbits &= ~(Uint32)Operationrec::OP_MASK; - opbits |= (lop == ZDELETE) ? ZINSERT : ZUPDATE; - } - - opbits |= Operationrec::OP_STATE_RUNNING; - operationRecPtr.p->localdata[0] = lastOpPtr.p->localdata[0]; - operationRecPtr.p->localdata[1] = lastOpPtr.p->localdata[1]; - retValue = ZPARALLEL_QUEUE; - } - - opbits |= Operationrec::OP_RUN_QUEUE; - operationRecPtr.p->m_op_bits = opbits; - operationRecPtr.p->prevParallelQue = lastOpPtr.i; - operationRecPtr.p->m_lock_owner_ptr_i = lockOwnerPtr.i; - lastOpPtr.p->nextParallelQue = operationRecPtr.i; - lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i = operationRecPtr.i; - - validate_lock_queue(lockOwnerPtr); - - return retValue; -}//Dbacc::placeWriteInLockQueue() - -Uint32 -Dbacc::placeReadInLockQueue(OperationrecPtr lockOwnerPtr) -{ - OperationrecPtr lastOpPtr; - OperationrecPtr loopPtr = lockOwnerPtr; - lastOpPtr.i = lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i; - Uint32 opbits = operationRecPtr.p->m_op_bits; - - if (lastOpPtr.i == RNIL) - { - lastOpPtr = lockOwnerPtr; - } - else - { - ptrCheckGuard(lastOpPtr, coprecsize, operationrec); - } - - ndbassert(get_parallel_head(lastOpPtr) == lockOwnerPtr.i); - - /** - * Last operation in parallell queue of lock owner is same trans - * and ACC_LOCK_MODE is exlusive, then we can proceed - */ - Uint32 lastbits = lastOpPtr.p->m_op_bits; - bool same = operationRecPtr.p->is_same_trans(lastOpPtr.p); - if (same && (lastbits & Operationrec::OP_ACC_LOCK_MODE)) - { - jam(); - opbits |= Operationrec::OP_LOCK_MODE; // Upgrade to X-lock - goto checkop; - } - - if ((lastbits & Operationrec::OP_ACC_LOCK_MODE) && !same) - { - jam(); - /** - * Last op in serial queue had X-lock and was not our transaction... - */ - goto serial; - } - - if (lockOwnerPtr.p->nextSerialQue == RNIL) - { - jam(); - goto checkop; - } - - /** - * Scan parallell queue to see if we are already there... - */ - do - { - ptrCheckGuard(loopPtr, coprecsize, operationrec); - if (loopPtr.p->is_same_trans(operationRecPtr.p)) - goto checkop; - loopPtr.i = loopPtr.p->nextParallelQue; - } while (loopPtr.i != RNIL); - -serial: - placeSerialQueue(lockOwnerPtr, operationRecPtr); - - validate_lock_queue(lockOwnerPtr); - - return ZSERIAL_QUEUE; - -checkop: - Uint32 lstate = lastbits & Operationrec::OP_STATE_MASK; - - Uint32 retValue = ZSERIAL_QUEUE; // So that it gets blocked... - if (lstate == Operationrec::OP_STATE_EXECUTED) - { - jam(); - - /** - * NOTE. No checking op operation types, as one can read different save - * points... - */ - -#if 0 - /** - * Since last operation has executed...we can now check operation types - * if not, we have to wait until it has executed - */ - if (lop == ZDELETE) - { - jam(); - return ZREAD_ERROR; - } -#endif - - opbits |= Operationrec::OP_STATE_RUNNING; - operationRecPtr.p->localdata[0] = lastOpPtr.p->localdata[0]; - operationRecPtr.p->localdata[1] = lastOpPtr.p->localdata[1]; - retValue = ZPARALLEL_QUEUE; - } - opbits |= (lastbits & Operationrec::OP_ACC_LOCK_MODE); - opbits |= Operationrec::OP_RUN_QUEUE; - operationRecPtr.p->m_op_bits = opbits; - - operationRecPtr.p->prevParallelQue = lastOpPtr.i; - operationRecPtr.p->m_lock_owner_ptr_i = lockOwnerPtr.i; - lastOpPtr.p->nextParallelQue = operationRecPtr.i; - lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i = operationRecPtr.i; - - validate_lock_queue(lockOwnerPtr); - - return retValue; -}//Dbacc::placeReadInLockQueue - -void Dbacc::placeSerialQueue(OperationrecPtr lockOwnerPtr, - OperationrecPtr opPtr) -{ - OperationrecPtr lastOpPtr; - lastOpPtr.i = lockOwnerPtr.p->m_lo_last_serial_op_ptr_i; - - if (lastOpPtr.i == RNIL) - { - // Lock owner is last... - ndbrequire(lockOwnerPtr.p->nextSerialQue == RNIL); - lastOpPtr = lockOwnerPtr; - } - else - { - ptrCheckGuard(lastOpPtr, coprecsize, operationrec); - } - - operationRecPtr.p->prevSerialQue = lastOpPtr.i; - lastOpPtr.p->nextSerialQue = opPtr.i; - lockOwnerPtr.p->m_lo_last_serial_op_ptr_i = opPtr.i; -} - -/* ------------------------------------------------------------------------- */ -/* ACC KEYREQ END */ -/* ------------------------------------------------------------------------- */ -void Dbacc::acckeyref1Lab(Signal* signal, Uint32 result_code) -{ - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - /* ************************<< */ - /* ACCKEYREF */ - /* ************************<< */ - signal->theData[0] = cminusOne; - signal->theData[1] = result_code; - return; -}//Dbacc::acckeyref1Lab() - -/* ******************----------------------------------------------------- */ -/* ACCMINUPDATE UPDATE LOCAL KEY REQ */ -/* DESCRIPTION: UPDATES LOCAL KEY OF AN ELEMENTS IN THE HASH TABLE */ -/* THIS SIGNAL IS WAITED AFTER ANY INSERT REQ */ -/* ENTER ACCMINUPDATE WITH SENDER: LQH, LEVEL B */ -/* OPERATION_REC_PTR, OPERATION RECORD PTR */ -/* CLOCALKEY(0), LOCAL KEY 1 */ -/* CLOCALKEY(1) LOCAL KEY 2 */ -/* ******************----------------------------------------------------- */ -void Dbacc::execACCMINUPDATE(Signal* signal) -{ - Page8Ptr ulkPageidptr; - Uint32 tulkLocalPtr; - Uint32 tlocalkey1, tlocalkey2; - - jamEntry(); - operationRecPtr.i = signal->theData[0]; - tlocalkey1 = signal->theData[1]; - tlocalkey2 = signal->theData[2]; - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - Uint32 opbits = operationRecPtr.p->m_op_bits; - fragrecptr.i = operationRecPtr.p->fragptr; - ulkPageidptr.i = operationRecPtr.p->elementPage; - tulkLocalPtr = operationRecPtr.p->elementPointer + - operationRecPtr.p->elementIsforward; - - if ((opbits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_RUNNING) - { - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - ptrCheckGuard(ulkPageidptr, cpagesize, page8); - dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey1); - arrGuard(tulkLocalPtr, 2048); - ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey1; - operationRecPtr.p->localdata[0] = tlocalkey1; - if (likely(fragrecptr.p->localkeylen == 1)) - { - return; - } - else if (fragrecptr.p->localkeylen == 2) - { - jam(); - tulkLocalPtr = tulkLocalPtr + operationRecPtr.p->elementIsforward; - operationRecPtr.p->localdata[1] = tlocalkey2; - dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey2); - arrGuard(tulkLocalPtr, 2048); - ulkPageidptr.p->word32[tulkLocalPtr] = tlocalkey2; - return; - } else { - jam(); - }//if - }//if - ndbrequire(false); -}//Dbacc::execACCMINUPDATE() - -/* ******************--------------------------------------------------------------- */ -/* ACC_COMMITREQ COMMIT TRANSACTION */ -/* SENDER: LQH, LEVEL B */ -/* INPUT: OPERATION_REC_PTR , */ -/* ******************--------------------------------------------------------------- */ -void Dbacc::execACC_COMMITREQ(Signal* signal) -{ - Uint8 Toperation; - jamEntry(); - Uint32 tmp = operationRecPtr.i = signal->theData[0]; - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - void* ptr = operationRecPtr.p; - Uint32 opbits = operationRecPtr.p->m_op_bits; - fragrecptr.i = operationRecPtr.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - Toperation = opbits & Operationrec::OP_MASK; - commitOperation(signal); - ndbassert(operationRecPtr.i == tmp); - ndbassert(operationRecPtr.p == ptr); - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - if(Toperation != ZREAD){ - fragrecptr.p->m_commit_count++; - if (Toperation != ZINSERT) { - if (Toperation != ZDELETE) { - return; - } else { - jam(); - fragrecptr.p->noOfElements--; - fragrecptr.p->slack += fragrecptr.p->elementLength; - if (fragrecptr.p->slack > fragrecptr.p->slackCheck) { - /* TIME FOR JOIN BUCKETS PROCESS */ - if (fragrecptr.p->expandCounter > 0) { - if (fragrecptr.p->expandFlag < 2) { - jam(); - signal->theData[0] = fragrecptr.i; - signal->theData[1] = fragrecptr.p->p; - signal->theData[2] = fragrecptr.p->maxp; - signal->theData[3] = fragrecptr.p->expandFlag; - fragrecptr.p->expandFlag = 2; - sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB); - }//if - }//if - }//if - }//if - } else { - jam(); /* EXPAND PROCESS HANDLING */ - fragrecptr.p->noOfElements++; - fragrecptr.p->slack -= fragrecptr.p->elementLength; - if (fragrecptr.p->slack >= (1u << 31)) { - /* IT MEANS THAT IF SLACK < ZERO */ - if (fragrecptr.p->expandFlag == 0) { - jam(); - fragrecptr.p->expandFlag = 2; - signal->theData[0] = fragrecptr.i; - signal->theData[1] = fragrecptr.p->p; - signal->theData[2] = fragrecptr.p->maxp; - sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB); - }//if - }//if - } - } - return; -}//Dbacc::execACC_COMMITREQ() - -/* ******************------------------------------------------------------- */ -/* ACC ABORT REQ ABORT ALL OPERATION OF THE TRANSACTION */ -/* ******************------------------------------+ */ -/* SENDER: LQH, LEVEL B */ -/* ******************------------------------------------------------------- */ -/* ACC ABORT REQ ABORT TRANSACTION */ -/* ******************------------------------------+ */ -/* SENDER: LQH, LEVEL B */ -void Dbacc::execACC_ABORTREQ(Signal* signal) -{ - jamEntry(); - operationRecPtr.i = signal->theData[0]; - Uint32 sendConf = signal->theData[1]; - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - fragrecptr.i = operationRecPtr.p->fragptr; - Uint32 opbits = operationRecPtr.p->m_op_bits; - Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; - tresult = 0; /* ZFALSE */ - - if (opbits == Operationrec::OP_EXECUTED_DIRTY_READ) - { - jam(); - } - else if (opstate == Operationrec::OP_STATE_EXECUTED || - opstate == Operationrec::OP_STATE_WAITING || - opstate == Operationrec::OP_STATE_RUNNING) - { - jam(); - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - abortOperation(signal); - } - - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - - signal->theData[0] = operationRecPtr.p->userptr; - signal->theData[1] = 0; - switch(sendConf){ - case 0: - return; - case 2: - if (opstate != Operationrec::OP_STATE_RUNNING) - { - return; - } - case 1: - sendSignal(operationRecPtr.p->userblockref, GSN_ACC_ABORTCONF, - signal, 1, JBB); - } - - signal->theData[1] = RNIL; -} - -/* - * Lock or unlock tuple. - */ -void Dbacc::execACC_LOCKREQ(Signal* signal) -{ - jamEntry(); - AccLockReq* sig = (AccLockReq*)signal->getDataPtrSend(); - AccLockReq reqCopy = *sig; - AccLockReq* const req = &reqCopy; - Uint32 lockOp = (req->requestInfo & 0xFF); - if (lockOp == AccLockReq::LockShared || - lockOp == AccLockReq::LockExclusive) { - jam(); - // find table - tabptr.i = req->tableId; - ptrCheckGuard(tabptr, ctablesize, tabrec); - // find fragment (TUX will know it) - if (req->fragPtrI == RNIL) { - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragholder[i] == req->fragId){ - jam(); - req->fragPtrI = tabptr.p->fragptrholder[i]; - break; - } - } - } - fragrecptr.i = req->fragPtrI; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - ndbrequire(req->fragId == fragrecptr.p->myfid); - // caller must be explicit here - ndbrequire(req->accOpPtr == RNIL); - // seize operation to hold the lock - if (cfreeopRec != RNIL) { - jam(); - seizeOpRec(signal); - // init as in ACCSEIZEREQ - operationRecPtr.p->userptr = req->userPtr; - operationRecPtr.p->userblockref = req->userRef; - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - operationRecPtr.p->scanRecPtr = RNIL; - // do read with lock via ACCKEYREQ - Uint32 lockMode = (lockOp == AccLockReq::LockShared) ? 0 : 1; - Uint32 opCode = ZSCAN_OP; - signal->theData[0] = operationRecPtr.i; - signal->theData[1] = fragrecptr.i; - signal->theData[2] = opCode | (lockMode << 4) | (1u << 31); - signal->theData[3] = req->hashValue; - signal->theData[4] = 0; // search local key - signal->theData[5] = req->transId1; - signal->theData[6] = req->transId2; - // enter local key in place of PK - signal->theData[7] = req->tupAddr; - EXECUTE_DIRECT(DBACC, GSN_ACCKEYREQ, signal, 8); - // translate the result - if (signal->theData[0] < RNIL) { - jam(); - req->returnCode = AccLockReq::Success; - req->accOpPtr = operationRecPtr.i; - } else if (signal->theData[0] == RNIL) { - jam(); - req->returnCode = AccLockReq::IsBlocked; - req->accOpPtr = operationRecPtr.i; - } else { - ndbrequire(signal->theData[0] == (UintR)-1); - releaseOpRec(signal); - req->returnCode = AccLockReq::Refused; - req->accOpPtr = RNIL; - } - } else { - jam(); - req->returnCode = AccLockReq::NoFreeOp; - } - *sig = *req; - return; - } - if (lockOp == AccLockReq::Unlock) { - jam(); - // do unlock via ACC_COMMITREQ (immediate) - signal->theData[0] = req->accOpPtr; - EXECUTE_DIRECT(DBACC, GSN_ACC_COMMITREQ, signal, 1); - releaseOpRec(signal); - req->returnCode = AccLockReq::Success; - *sig = *req; - return; - } - if (lockOp == AccLockReq::Abort) { - jam(); - // do abort via ACC_ABORTREQ (immediate) - signal->theData[0] = req->accOpPtr; - signal->theData[1] = 0; // Dont send abort - execACC_ABORTREQ(signal); - releaseOpRec(signal); - req->returnCode = AccLockReq::Success; - *sig = *req; - return; - } - if (lockOp == AccLockReq::AbortWithConf) { - jam(); - // do abort via ACC_ABORTREQ (with conf signal) - signal->theData[0] = req->accOpPtr; - signal->theData[1] = 1; // send abort - execACC_ABORTREQ(signal); - releaseOpRec(signal); - req->returnCode = AccLockReq::Success; - *sig = *req; - return; - } - ndbrequire(false); -} - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF EXECUTE OPERATION MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* MODULE: INSERT */ -/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY INSERT_ELEMENT. THIS */ -/* ROUTINE IS THE SOLE INTERFACE TO INSERT ELEMENTS INTO THE INDEX. */ -/* CURRENT USERS ARE INSERT REQUESTS, EXPAND CONTAINER AND SHRINK */ -/* CONTAINER. */ -/* */ -/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */ -/* INSERT_ELEMENT */ -/* INSERT_CONTAINER */ -/* ADDNEWCONTAINER */ -/* GETFREELIST */ -/* INCREASELISTCONT */ -/* SEIZE_LEFTLIST */ -/* SEIZE_RIGHTLIST */ -/* */ -/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */ -/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */ -/* TAKE_REC_OUT_OF_FREE_OVERPAGE AND RELEASE_OVERFLOW_REC ARE */ -/* EXCEPTIONS TO THIS RULE. */ -/* */ -/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */ -/* THOSE DEFINED AS INPUT AND OUTPUT IN INSERT_ELEMENT */ -/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */ -/* AND POINTER VARIABLES. */ -/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */ -/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */ -/* EXECUTION. */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* INSERT_ELEMENT */ -/* INPUT: */ -/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */ -/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */ -/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */ -/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */ -/* CIDR_KEYS(ARRAY OF TUPLE KEYS) */ -/* CLOCALKEY(ARRAY OF LOCAL KEYS). */ -/* FRAGRECPTR */ -/* IDR_OPERATION_REC_PTR */ -/* TIDR_KEY_LEN */ -/* */ -/* OUTPUT: */ -/* TIDR_PAGEINDEX (PAGE INDEX OF INSERTED ELEMENT) */ -/* IDR_PAGEPTR (PAGE POINTER OF INSERTED ELEMENT) */ -/* TIDR_FORWARD (CONTAINER DIRECTION OF INSERTED ELEMENT) */ -/* NONE */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::insertElement(Signal* signal) -{ - DirRangePtr inrOverflowrangeptr; - DirectoryarrayPtr inrOverflowDirptr; - OverflowRecordPtr inrOverflowRecPtr; - Page8Ptr inrNewPageptr; - Uint32 tinrNextSamePage; - Uint32 tinrTmp; - - do { - insertContainer(signal); - if (tidrResult != ZFALSE) { - jam(); - return; - /* INSERTION IS DONE, OR */ - /* AN ERROR IS DETECTED */ - }//if - if (((tidrContainerhead >> 7) & 0x3) != 0) { - tinrNextSamePage = (tidrContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */ - /* THE NEXT CONTAINER IS IN THE SAME PAGE */ - tidrPageindex = tidrContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */ - if (((tidrContainerhead >> 7) & 3) == ZLEFT) { - jam(); - tidrForward = ZTRUE; - } else if (((tidrContainerhead >> 7) & 3) == ZRIGHT) { - jam(); - tidrForward = cminusOne; - } else { - ndbrequire(false); - return; - }//if - if (tinrNextSamePage == ZFALSE) { - jam(); /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */ - tinrTmp = idrPageptr.p->word32[tidrContainerptr + 1]; - inrOverflowrangeptr.i = fragrecptr.p->overflowdir; - ptrCheckGuard(inrOverflowrangeptr, cdirrangesize, dirRange); - arrGuard((tinrTmp >> 8), 256); - inrOverflowDirptr.i = inrOverflowrangeptr.p->dirArray[tinrTmp >> 8]; - ptrCheckGuard(inrOverflowDirptr, cdirarraysize, directoryarray); - idrPageptr.i = inrOverflowDirptr.p->pagep[tinrTmp & 0xff]; - ptrCheckGuard(idrPageptr, cpagesize, page8); - }//if - ndbrequire(tidrPageindex < ZEMPTYLIST); - } else { - break; - }//if - } while (1); - gflPageptr.p = idrPageptr.p; - getfreelist(signal); - if (tgflPageindex == ZEMPTYLIST) { - jam(); - /* NO FREE BUFFER IS FOUND */ - if (fragrecptr.p->firstOverflowRec == RNIL) { - jam(); - allocOverflowPage(signal); - ndbrequire(tresult <= ZLIMIT_OF_ERROR); - }//if - inrOverflowRecPtr.i = fragrecptr.p->firstOverflowRec; - ptrCheckGuard(inrOverflowRecPtr, coverflowrecsize, overflowRecord); - inrNewPageptr.i = inrOverflowRecPtr.p->overpage; - ptrCheckGuard(inrNewPageptr, cpagesize, page8); - gflPageptr.p = inrNewPageptr.p; - getfreelist(signal); - ndbrequire(tgflPageindex != ZEMPTYLIST); - tancNext = 0; - } else { - jam(); - inrNewPageptr = idrPageptr; - tancNext = 1; - }//if - tslUpdateHeader = ZTRUE; - tslPageindex = tgflPageindex; - slPageptr.p = inrNewPageptr.p; - if (tgflBufType == ZLEFT) { - seizeLeftlist(signal); - tidrForward = ZTRUE; - } else { - seizeRightlist(signal); - tidrForward = cminusOne; - }//if - tancPageindex = tgflPageindex; - tancPageid = inrNewPageptr.p->word32[ZPOS_PAGE_ID]; - tancBufType = tgflBufType; - tancContainerptr = tidrContainerptr; - ancPageptr.p = idrPageptr.p; - addnewcontainer(signal); - - idrPageptr = inrNewPageptr; - tidrPageindex = tgflPageindex; - insertContainer(signal); - ndbrequire(tidrResult == ZTRUE); -}//Dbacc::insertElement() - -/* --------------------------------------------------------------------------------- */ -/* INSERT_CONTAINER */ -/* INPUT: */ -/* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */ -/* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */ -/* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */ -/* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */ -/* CKEYS(ARRAY OF TUPLE KEYS) */ -/* CLOCALKEY(ARRAY 0F LOCAL KEYS). */ -/* TIDR_KEY_LEN */ -/* FRAGRECPTR */ -/* IDR_OPERATION_REC_PTR */ -/* OUTPUT: */ -/* TIDR_RESULT (ZTRUE FOR SUCCESS AND ZFALSE OTHERWISE) */ -/* TIDR_CONTAINERHEAD (HEADER OF CONTAINER) */ -/* TIDR_CONTAINERPTR (POINTER TO CONTAINER HEADER) */ -/* */ -/* DESCRIPTION: */ -/* THE FREE AREA OF THE CONTAINER WILL BE CALCULATED. IF IT IS */ -/* LARGER THAN OR EQUAL THE ELEMENT LENGTH. THE ELEMENT WILL BE */ -/* INSERT IN THE CONTAINER AND CONTAINER HEAD WILL BE UPDATED. */ -/* THIS ROUTINE ALWAYS DEALS WITH ONLY ONE CONTAINER AND DO NEVER */ -/* START ANYTHING OUTSIDE OF THIS CONTAINER. */ -/* */ -/* SHORT FORM: IDR */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::insertContainer(Signal* signal) -{ - Uint32 tidrContainerlen; - Uint32 tidrConfreelen; - Uint32 tidrNextSide; - Uint32 tidrNextConLen; - Uint32 tidrIndex; - Uint32 tidrInputIndex; - Uint32 tidrContLen; - Uint32 guard26; - - tidrResult = ZFALSE; - tidrContainerptr = (tidrPageindex << ZSHIFT_PLUS) - (tidrPageindex << ZSHIFT_MINUS); - tidrContainerptr = tidrContainerptr + ZHEAD_SIZE; - /* --------------------------------------------------------------------------------- */ - /* CALCULATE THE POINTER TO THE ELEMENT TO BE INSERTED AND THE POINTER TO THE */ - /* CONTAINER HEADER OF THE OTHER SIDE OF THE BUFFER. */ - /* --------------------------------------------------------------------------------- */ - if (tidrForward == ZTRUE) { - jam(); - tidrNextSide = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE); - arrGuard(tidrNextSide + 1, 2048); - tidrContainerhead = idrPageptr.p->word32[tidrContainerptr]; - tidrContainerlen = tidrContainerhead >> 26; - tidrIndex = tidrContainerptr + tidrContainerlen; - } else { - jam(); - tidrNextSide = tidrContainerptr; - tidrContainerptr = tidrContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE); - arrGuard(tidrContainerptr + 1, 2048); - tidrContainerhead = idrPageptr.p->word32[tidrContainerptr]; - tidrContainerlen = tidrContainerhead >> 26; - tidrIndex = (tidrContainerptr - tidrContainerlen) + (ZCON_HEAD_SIZE - 1); - }//if - if (tidrContainerlen > (ZBUF_SIZE - 3)) { - return; - }//if - tidrConfreelen = ZBUF_SIZE - tidrContainerlen; - /* --------------------------------------------------------------------------------- */ - /* WE CALCULATE THE TOTAL LENGTH THE CONTAINER CAN EXPAND TO */ - /* THIS INCLUDES THE OTHER SIDE OF THE BUFFER IF POSSIBLE TO EXPAND THERE. */ - /* --------------------------------------------------------------------------------- */ - if (((tidrContainerhead >> 10) & 1) == 0) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* WE HAVE NOT EXPANDED TO THE ENTIRE BUFFER YET. WE CAN THUS READ THE OTHER */ - /* SIDE'S CONTAINER HEADER TO READ HIS LENGTH. */ - /* --------------------------------------------------------------------------------- */ - tidrNextConLen = idrPageptr.p->word32[tidrNextSide] >> 26; - tidrConfreelen = tidrConfreelen - tidrNextConLen; - if (tidrConfreelen > ZBUF_SIZE) { - ndbrequire(false); - /* --------------------------------------------------------------------------------- */ - /* THE BUFFERS ARE PLACED ON TOP OF EACH OTHER. THIS SHOULD NEVER OCCUR. */ - /* --------------------------------------------------------------------------------- */ - return; - }//if - } else { - jam(); - tidrNextConLen = 1; /* INDICATE OTHER SIDE IS NOT PART OF FREE LIST */ - }//if - if (tidrConfreelen < fragrecptr.p->elementLength) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THE CONTAINER COULD NOT BE EXPANDED TO FIT THE NEW ELEMENT. WE HAVE TO */ - /* RETURN AND FIND A NEW CONTAINER TO INSERT IT INTO. */ - /* --------------------------------------------------------------------------------- */ - return; - }//if - tidrContainerlen = tidrContainerlen + fragrecptr.p->elementLength; - if (tidrNextConLen == 0) { - /* EACH SIDE OF THE BUFFER WHICH BELONG TO A FREE */ - /* LIST, HAS ZERO AS LENGTH. */ - if (tidrContainerlen > ZUP_LIMIT) { - dbgWord32(idrPageptr, tidrContainerptr, idrPageptr.p->word32[tidrContainerptr] | (1 << 10)); - idrPageptr.p->word32[tidrContainerptr] = idrPageptr.p->word32[tidrContainerptr] | (1 << 10); - tslUpdateHeader = ZFALSE; - tslPageindex = tidrPageindex; - slPageptr.p = idrPageptr.p; - if (tidrForward == ZTRUE) { - jam(); - seizeRightlist(signal); /* REMOVE THE RIGHT SIDE OF THE BUFFER FROM THE LIST */ - } else { - jam(); - /* OF THE FREE CONTAINERS */ - seizeLeftlist(signal); /* REMOVE THE LEFT SIDE OF THE BUFFER FROM THE LIST */ - }//if - }//if - }//if - /* OF THE FREE CONTAINERS */ - /* --------------------------------------------------------------------------------- */ - /* WE HAVE NOW FOUND A FREE SPOT IN THE CURRENT CONTAINER. WE INSERT THE */ - /* ELEMENT HERE. THE ELEMENT CONTAINS A HEADER, A LOCAL KEY AND A TUPLE KEY. */ - /* BEFORE INSERTING THE ELEMENT WE WILL UPDATE THE OPERATION RECORD WITH THE */ - /* DATA CONCERNING WHERE WE INSERTED THE ELEMENT. THIS MAKES IT EASY TO FIND */ - /* THIS INFORMATION WHEN WE RETURN TO UPDATE THE LOCAL KEY OR RETURN TO COMMIT */ - /* OR ABORT THE INSERT. IF NO OPERATION RECORD EXIST IT MEANS THAT WE ARE */ - /* PERFORMING THIS AS A PART OF THE EXPAND OR SHRINK PROCESS. */ - /* --------------------------------------------------------------------------------- */ - if (idrOperationRecPtr.i != RNIL) { - jam(); - idrOperationRecPtr.p->elementIsforward = tidrForward; - idrOperationRecPtr.p->elementPage = idrPageptr.i; - idrOperationRecPtr.p->elementContainer = tidrContainerptr; - idrOperationRecPtr.p->elementPointer = tidrIndex; - }//if - /* --------------------------------------------------------------------------------- */ - /* WE CHOOSE TO UNDO LOG INSERTS BY WRITING THE BEFORE VALUE TO THE UNDO LOG. */ - /* WE COULD ALSO HAVE DONE THIS BY WRITING THIS BEFORE VALUE WHEN DELETING */ - /* ELEMENTS. WE CHOOSE TO PUT IT HERE SINCE WE THEREBY ENSURE THAT WE ALWAYS */ - /* UNDO LOG ALL WRITES TO PAGE MEMORY. IT SHOULD BE EASIER TO MAINTAIN SUCH A */ - /* STRUCTURE. IT IS RATHER DIFFICULT TO MAINTAIN A LOGICAL STRUCTURE WHERE */ - /* DELETES ARE INSERTS AND INSERTS ARE PURELY DELETES. */ - /* --------------------------------------------------------------------------------- */ - dbgWord32(idrPageptr, tidrIndex, tidrElemhead); - idrPageptr.p->word32[tidrIndex] = tidrElemhead; /* INSERTS THE HEAD OF THE ELEMENT */ - tidrIndex += tidrForward; - guard26 = fragrecptr.p->localkeylen - 1; - arrGuard(guard26, 2); - for (tidrInputIndex = 0; tidrInputIndex <= guard26; tidrInputIndex++) { - dbgWord32(idrPageptr, tidrIndex, clocalkey[tidrInputIndex]); - arrGuard(tidrIndex, 2048); - idrPageptr.p->word32[tidrIndex] = clocalkey[tidrInputIndex]; /* INSERTS LOCALKEY */ - tidrIndex += tidrForward; - }//for - tidrContLen = idrPageptr.p->word32[tidrContainerptr] << 6; - tidrContLen = tidrContLen >> 6; - dbgWord32(idrPageptr, tidrContainerptr, (tidrContainerlen << 26) | tidrContLen); - idrPageptr.p->word32[tidrContainerptr] = (tidrContainerlen << 26) | tidrContLen; - tidrResult = ZTRUE; -}//Dbacc::insertContainer() - -/* --------------------------------------------------------------------------------- */ -/* ADDNEWCONTAINER */ -/* INPUT: */ -/* TANC_CONTAINERPTR */ -/* ANC_PAGEPTR */ -/* TANC_NEXT */ -/* TANC_PAGEINDEX */ -/* TANC_BUF_TYPE */ -/* TANC_PAGEID */ -/* OUTPUT: */ -/* NONE */ -/* */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::addnewcontainer(Signal* signal) -{ - Uint32 tancTmp1; - - /* THE OLD DATA IS STORED ON AN UNDO PAGE */ - /* --------------------------------------------------------------------------------- */ - /* KEEP LENGTH INFORMATION IN BIT 26-31. */ - /* SET BIT 9 INDICATING IF NEXT BUFFER IN THE SAME PAGE USING TANC_NEXT. */ - /* SET TYPE OF NEXT CONTAINER IN BIT 7-8. */ - /* SET PAGE INDEX OF NEXT CONTAINER IN BIT 0-6. */ - /* KEEP INDICATOR OF OWNING OTHER SIDE OF BUFFER IN BIT 10. */ - /* --------------------------------------------------------------------------------- */ - tancTmp1 = ancPageptr.p->word32[tancContainerptr] >> 10; - tancTmp1 = tancTmp1 << 1; - tancTmp1 = tancTmp1 | tancNext; - tancTmp1 = tancTmp1 << 2; - tancTmp1 = tancTmp1 | tancBufType; /* TYPE OF THE NEXT CONTAINER */ - tancTmp1 = tancTmp1 << 7; - tancTmp1 = tancTmp1 | tancPageindex; - dbgWord32(ancPageptr, tancContainerptr, tancTmp1); - ancPageptr.p->word32[tancContainerptr] = tancTmp1; /* HEAD OF THE CONTAINER IS UPDATED */ - dbgWord32(ancPageptr, tancContainerptr + 1, tancPageid); - ancPageptr.p->word32[tancContainerptr + 1] = tancPageid; -}//Dbacc::addnewcontainer() - -/* --------------------------------------------------------------------------------- */ -/* GETFREELIST */ -/* INPUT: */ -/* GFL_PAGEPTR (POINTER TO A PAGE RECORD). */ -/* OUTPUT: */ -/* TGFL_PAGEINDEX(POINTER TO A FREE BUFFER IN THE FREEPAGE), AND */ -/* TGFL_BUF_TYPE( TYPE OF THE FREE BUFFER). */ -/* DESCRIPTION: SEARCHS IN THE FREE LIST OF THE FREE BUFFER IN THE PAGE HEAD */ -/* (WORD32(1)),AND RETURN ADDRESS OF A FREE BUFFER OR NIL. */ -/* THE FREE BUFFER CAN BE A RIGHT CONTAINER OR A LEFT ONE */ -/* THE KIND OF THE CONTAINER IS NOTED BY TGFL_BUF_TYPE. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::getfreelist(Signal* signal) -{ - Uint32 tgflTmp; - - tgflTmp = gflPageptr.p->word32[ZPOS_EMPTY_LIST]; - tgflPageindex = (tgflTmp >> 7) & 0x7f; /* LEFT FREE LIST */ - tgflBufType = ZLEFT; - if (tgflPageindex == ZEMPTYLIST) { - jam(); - tgflPageindex = tgflTmp & 0x7f; /* RIGHT FREE LIST */ - tgflBufType = ZRIGHT; - }//if - ndbrequire(tgflPageindex <= ZEMPTYLIST); -}//Dbacc::getfreelist() - -/* --------------------------------------------------------------------------------- */ -/* INCREASELISTCONT */ -/* INPUT: */ -/* ILC_PAGEPTR PAGE POINTER TO INCREASE NUMBER OF CONTAINERS IN */ -/* A CONTAINER OF AN OVERFLOW PAGE (FREEPAGEPTR) IS ALLOCATED, NR OF */ -/* ALLOCATED CONTAINER HAVE TO BE INCRESE BY ONE . */ -/* IF THE NUMBER OF ALLOCATED CONTAINERS IS ABOVE THE FREE LIMIT WE WILL */ -/* REMOVE THE PAGE FROM THE FREE LIST. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::increaselistcont(Signal* signal) -{ - OverflowRecordPtr ilcOverflowRecPtr; - - dbgWord32(ilcPageptr, ZPOS_ALLOC_CONTAINERS, ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1); - ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1; - if (ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) { - if (ilcPageptr.p->word32[ZPOS_OVERFLOWREC] != RNIL) { - jam(); - ilcOverflowRecPtr.i = ilcPageptr.p->word32[ZPOS_OVERFLOWREC]; - dbgWord32(ilcPageptr, ZPOS_OVERFLOWREC, RNIL); - ilcPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL; - ptrCheckGuard(ilcOverflowRecPtr, coverflowrecsize, overflowRecord); - tfoOverflowRecPtr = ilcOverflowRecPtr; - takeRecOutOfFreeOverpage(signal); - rorOverflowRecPtr = ilcOverflowRecPtr; - releaseOverflowRec(signal); - }//if - }//if -}//Dbacc::increaselistcont() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_LEFTLIST */ -/* INPUT: */ -/* TSL_PAGEINDEX PAGE INDEX OF CONTAINER TO SEIZE */ -/* SL_PAGEPTR PAGE POINTER OF CONTAINER TO SEIZE */ -/* TSL_UPDATE_HEADER SHOULD WE UPDATE THE CONTAINER HEADER */ -/* */ -/* OUTPUT: */ -/* NONE */ -/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */ -/* LIST OF LEFT FREE CONTAINER, IN THE HEADER OF THE PAGE */ -/* (FREEPAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */ -/* WILL BE UPDATED. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeLeftlist(Signal* signal) -{ - Uint32 tsllTmp1; - Uint32 tsllNewHead; - Uint32 tsllHeadIndex; - Uint32 tsllTmp; - - tsllHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ZHEAD_SIZE; - arrGuard(tsllHeadIndex + 1, 2048); - tslNextfree = slPageptr.p->word32[tsllHeadIndex]; - tslPrevfree = slPageptr.p->word32[tsllHeadIndex + 1]; - if (tslPrevfree == ZEMPTYLIST) { - jam(); - /* UPDATE FREE LIST OF LEFT CONTAINER IN PAGE HEAD */ - tsllTmp1 = slPageptr.p->word32[ZPOS_EMPTY_LIST]; - tsllTmp = tsllTmp1 & 0x7f; - tsllTmp1 = (tsllTmp1 >> 14) << 14; - tsllTmp1 = (tsllTmp1 | (tslNextfree << 7)) | tsllTmp; - dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp1); - slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp1; - } else { - ndbrequire(tslPrevfree < ZEMPTYLIST); - jam(); - tsllTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ZHEAD_SIZE; - dbgWord32(slPageptr, tsllTmp, tslNextfree); - slPageptr.p->word32[tsllTmp] = tslNextfree; - }//if - if (tslNextfree < ZEMPTYLIST) { - jam(); - tsllTmp = (((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE) + 1; - dbgWord32(slPageptr, tsllTmp, tslPrevfree); - slPageptr.p->word32[tsllTmp] = tslPrevfree; - } else { - ndbrequire(tslNextfree == ZEMPTYLIST); - jam(); - }//if - /* --------------------------------------------------------------------------------- */ - /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */ - /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */ - /* LISTS IN THE PAGE. */ - /* */ - /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */ - /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */ - /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */ - /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */ - /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */ - /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */ - /* WE ALSO SET BIT 25 TO INDICATE THAT IT IS A CONTAINER HEADER. */ - /* --------------------------------------------------------------------------------- */ - if (tslUpdateHeader == ZTRUE) { - jam(); - tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f; - tsllNewHead = ZCON_HEAD_SIZE; - tsllNewHead = ((tsllNewHead << 8) + ZEMPTYLIST) + (1 << 7); - tsllNewHead = (tsllNewHead << 7) + tslNextfree; - tsllNewHead = tsllNewHead << 11; - dbgWord32(slPageptr, tsllHeadIndex, tsllNewHead); - slPageptr.p->word32[tsllHeadIndex] = tsllNewHead; - tsllTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff; - tsllTmp = tsllTmp | (tslPageindex << 23); - dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp); - slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp; - if (tslNextfree < ZEMPTYLIST) { - jam(); - tsllTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ZHEAD_SIZE; - tsllTmp1 = slPageptr.p->word32[tsllTmp] & 0xfe03ffff; - tsllTmp1 = tsllTmp1 | (tslPageindex << 18); - dbgWord32(slPageptr, tsllTmp, tsllTmp1); - slPageptr.p->word32[tsllTmp] = tsllTmp1; - } else { - ndbrequire(tslNextfree == ZEMPTYLIST); - jam(); - }//if - }//if - ilcPageptr.p = slPageptr.p; - increaselistcont(signal); -}//Dbacc::seizeLeftlist() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_RIGHTLIST */ -/* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */ -/* LIST OF RIGHT FREE CONTAINER, IN THE HEADER OF THE PAGE */ -/* (SL_PAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */ -/* WILL BE UPDATED. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeRightlist(Signal* signal) -{ - Uint32 tsrlTmp1; - Uint32 tsrlNewHead; - Uint32 tsrlHeadIndex; - Uint32 tsrlTmp; - - tsrlHeadIndex = ((tslPageindex << ZSHIFT_PLUS) - (tslPageindex << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE); - arrGuard(tsrlHeadIndex + 1, 2048); - tslNextfree = slPageptr.p->word32[tsrlHeadIndex]; - tslPrevfree = slPageptr.p->word32[tsrlHeadIndex + 1]; - if (tslPrevfree == ZEMPTYLIST) { - jam(); - tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST]; - dbgWord32(slPageptr, ZPOS_EMPTY_LIST, ((tsrlTmp >> 7) << 7) | tslNextfree); - slPageptr.p->word32[ZPOS_EMPTY_LIST] = ((tsrlTmp >> 7) << 7) | tslNextfree; - } else { - ndbrequire(tslPrevfree < ZEMPTYLIST); - jam(); - tsrlTmp = ((tslPrevfree << ZSHIFT_PLUS) - (tslPrevfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE); - dbgWord32(slPageptr, tsrlTmp, tslNextfree); - slPageptr.p->word32[tsrlTmp] = tslNextfree; - }//if - if (tslNextfree < ZEMPTYLIST) { - jam(); - tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1)); - dbgWord32(slPageptr, tsrlTmp, tslPrevfree); - slPageptr.p->word32[tsrlTmp] = tslPrevfree; - } else { - ndbrequire(tslNextfree == ZEMPTYLIST); - jam(); - }//if - /* --------------------------------------------------------------------------------- */ - /* IF WE ARE UPDATING THE HEADER WE ARE CREATING A NEW CONTAINER IN THE PAGE. */ - /* TO BE ABLE TO FIND ALL LOCKED ELEMENTS WE KEEP ALL CONTAINERS IN LINKED */ - /* LISTS IN THE PAGE. */ - /* */ - /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 16-22 THAT REFERS TO THE */ - /* FIRST CONTAINER IN A LIST OF USED RIGHT CONTAINERS IN THE PAGE. */ - /* ZPOS_EMPTY_LIST CONTAINS A NEXT POINTER IN BIT 23-29 THAT REFERS TO THE */ - /* FIRST CONTAINER IN A LIST OF USED LEFT CONTAINERS IN THE PAGE. */ - /* EACH CONTAINER IN THE LIST CONTAINS A NEXT POINTER IN BIT 11-17 AND IT */ - /* CONTAINS A PREVIOUS POINTER IN BIT 18-24. */ - /* --------------------------------------------------------------------------------- */ - if (tslUpdateHeader == ZTRUE) { - jam(); - tslNextfree = (slPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f; - tsrlNewHead = ZCON_HEAD_SIZE; - tsrlNewHead = ((tsrlNewHead << 8) + ZEMPTYLIST) + (1 << 7); - tsrlNewHead = (tsrlNewHead << 7) + tslNextfree; - tsrlNewHead = tsrlNewHead << 11; - dbgWord32(slPageptr, tsrlHeadIndex, tsrlNewHead); - slPageptr.p->word32[tsrlHeadIndex] = tsrlNewHead; - tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff; - dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsrlTmp | (tslPageindex << 16)); - slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsrlTmp | (tslPageindex << 16); - if (tslNextfree < ZEMPTYLIST) { - jam(); - tsrlTmp = ((tslNextfree << ZSHIFT_PLUS) - (tslNextfree << ZSHIFT_MINUS)) + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE); - tsrlTmp1 = slPageptr.p->word32[tsrlTmp] & 0xfe03ffff; - dbgWord32(slPageptr, tsrlTmp, tsrlTmp1 | (tslPageindex << 18)); - slPageptr.p->word32[tsrlTmp] = tsrlTmp1 | (tslPageindex << 18); - } else { - ndbrequire(tslNextfree == ZEMPTYLIST); - jam(); - }//if - }//if - ilcPageptr.p = slPageptr.p; - increaselistcont(signal); -}//Dbacc::seizeRightlist() - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF INSERT_ELEMENT MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* MODULE: GET_ELEMENT */ -/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY GET_ELEMENT AND */ -/* GETDIRINDEX. THIS ROUTINE IS THE SOLE INTERFACE TO GET ELEMENTS */ -/* FROM THE INDEX. CURRENT USERS ARE ALL REQUESTS AND EXECUTE UNDO LOG */ -/* */ -/* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */ -/* GET_ELEMENT */ -/* GET_DIRINDEX */ -/* SEARCH_LONG_KEY */ -/* */ -/* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */ -/* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */ -/* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */ -/* THOSE DEFINED AS INPUT AND OUTPUT IN GET_ELEMENT AND GETDIRINDEX */ -/* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */ -/* AND POINTER VARIABLES. */ -/* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */ -/* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */ -/* EXECUTION. */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* GETDIRINDEX */ -/* SUPPORT ROUTINE FOR INSERT ELEMENT, GET ELEMENT AND COMMITDELETE */ -/* INPUT:FRAGRECPTR ( POINTER TO THE ACTIVE FRAGMENT REC) */ -/* OPERATION_REC_PTR (POINTER TO THE OPERATION REC). */ -/* */ -/* OUTPUT:GDI_PAGEPTR ( POINTER TO THE PAGE OF THE ELEMENT) */ -/* TGDI_PAGEINDEX ( INDEX OF THE ELEMENT IN THE PAGE). */ -/* */ -/* DESCRIPTION: CHECK THE HASH VALUE OF THE OPERATION REC AND CALCULATE THE */ -/* THE ADDRESS OF THE ELEMENT IN THE HASH TABLE,(GDI_PAGEPTR, */ -/* TGDI_PAGEINDEX) ACCORDING TO LH3. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::getdirindex(Signal* signal) -{ - DirRangePtr gdiDirRangePtr; - DirectoryarrayPtr gdiDirptr; - Uint32 tgdiTmp; - Uint32 tgdiAddress; - - tgdiTmp = fragrecptr.p->k + fragrecptr.p->lhfragbits; /* OBS K = 6 */ - tgdiPageindex = operationRecPtr.p->hashValue & ((1 << fragrecptr.p->k) - 1); - tgdiTmp = operationRecPtr.p->hashValue >> tgdiTmp; - tgdiTmp = (tgdiTmp << fragrecptr.p->k) | tgdiPageindex; - tgdiAddress = tgdiTmp & fragrecptr.p->maxp; - gdiDirRangePtr.i = fragrecptr.p->directory; - ptrCheckGuard(gdiDirRangePtr, cdirrangesize, dirRange); - if (tgdiAddress < fragrecptr.p->p) { - jam(); - tgdiAddress = tgdiTmp & ((fragrecptr.p->maxp << 1) | 1); - }//if - tgdiTmp = tgdiAddress >> fragrecptr.p->k; - arrGuard((tgdiTmp >> 8), 256); - gdiDirptr.i = gdiDirRangePtr.p->dirArray[tgdiTmp >> 8]; - ptrCheckGuard(gdiDirptr, cdirarraysize, directoryarray); - gdiPageptr.i = gdiDirptr.p->pagep[tgdiTmp & 0xff]; /* DIRECTORY INDEX OF SEND BUCKET PAGE */ - ptrCheckGuard(gdiPageptr, cpagesize, page8); -}//Dbacc::getdirindex() - -Uint32 -Dbacc::readTablePk(Uint32 localkey1, Uint32 eh, Ptr opPtr) -{ - int ret; - Uint32 tableId = fragrecptr.p->myTableId; - Uint32 fragId = fragrecptr.p->myfid; - bool xfrm = fragrecptr.p->hasCharAttr; - -#ifdef VM_TRACE - memset(ckeys, 0x1f, (fragrecptr.p->keyLength * MAX_XFRM_MULTIPLY) << 2); -#endif - - if (likely(localkey1 != ~(Uint32)0)) - { - Uint32 fragPageId = localkey1 >> MAX_TUPLES_BITS; - Uint32 pageIndex = localkey1 & ((1 << MAX_TUPLES_BITS ) - 1); - ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, - ckeys, true); - } - else - { - ndbrequire(ElementHeader::getLocked(eh)); - if (unlikely((opPtr.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP)) - { - dump_lock_queue(opPtr); - ndbrequire(opPtr.p->nextParallelQue == RNIL); - ndbrequire(opPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED); - ndbrequire(opPtr.p->m_op_bits & Operationrec::OP_COMMIT_DELETE_CHECK); - ndbrequire((opPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_RUNNING); - return 0; - } - ret = c_lqh->readPrimaryKeys(opPtr.p->userptr, ckeys, xfrm); - } - jamEntry(); - ndbrequire(ret >= 0); - return ret; -} - -/* --------------------------------------------------------------------------------- */ -/* GET_ELEMENT */ -/* INPUT: */ -/* OPERATION_REC_PTR */ -/* FRAGRECPTR */ -/* OUTPUT: */ -/* TGE_RESULT RESULT SUCCESS = ZTRUE OTHERWISE ZFALSE */ -/* TGE_LOCKED LOCK INFORMATION IF SUCCESSFUL RESULT */ -/* GE_PAGEPTR PAGE POINTER OF FOUND ELEMENT */ -/* TGE_CONTAINERPTR CONTAINER INDEX OF FOUND ELEMENT */ -/* TGE_ELEMENTPTR ELEMENT INDEX OF FOUND ELEMENT */ -/* TGE_FORWARD DIRECTION OF CONTAINER WHERE ELEMENT FOUND */ -/* */ -/* DESCRIPTION: THE SUBROUTIN GOES THROUGH ALL CONTAINERS OF THE ACTIVE */ -/* BUCKET, AND SERCH FOR ELEMENT.THE PRIMARY KEYS WHICH IS SAVED */ -/* IN THE OPERATION REC ARE THE CHECK ITEMS IN THE SEARCHING. */ -/* --------------------------------------------------------------------------------- */ - -#if __ia64 == 1 -#if __INTEL_COMPILER == 810 -int ndb_acc_ia64_icc810_dummy_var = 0; -void ndb_acc_ia64_icc810_dummy_func() -{ - ndb_acc_ia64_icc810_dummy_var++; -} -#endif -#endif - -Uint32 -Dbacc::getElement(Signal* signal, OperationrecPtr& lockOwnerPtr) -{ - Uint32 errcode; - DirRangePtr geOverflowrangeptr; - DirectoryarrayPtr geOverflowDirptr; - Uint32 tgeElementHeader; - Uint32 tgeElemStep; - Uint32 tgeContainerhead; - Uint32 tgePageindex; - Uint32 tgeActivePageDir; - Uint32 tgeNextptrtype; - register Uint32 tgeKeyptr; - register Uint32 tgeRemLen; - register Uint32 TelemLen = fragrecptr.p->elementLength; - register Uint32* Tkeydata = (Uint32*)&signal->theData[7]; - - getdirindex(signal); - tgePageindex = tgdiPageindex; - gePageptr = gdiPageptr; - /* - * The value seached is - * - table key for ACCKEYREQ, stored in TUP - * - local key (1 word) for ACC_LOCKREQ and UNDO, stored in ACC - */ - const bool searchLocalKey = operationRecPtr.p->tupkeylen == 0; - - ndbrequire(TelemLen == ZELEM_HEAD_SIZE + fragrecptr.p->localkeylen); - tgeNextptrtype = ZLEFT; - - const Uint32 tmp = fragrecptr.p->k + fragrecptr.p->lhfragbits; - const Uint32 opHashValuePart = (operationRecPtr.p->hashValue >> tmp) &0xFFFF; - do { - tgeContainerptr = (tgePageindex << ZSHIFT_PLUS) - (tgePageindex << ZSHIFT_MINUS); - if (tgeNextptrtype == ZLEFT) { - jam(); - tgeContainerptr = tgeContainerptr + ZHEAD_SIZE; - tgeElementptr = tgeContainerptr + ZCON_HEAD_SIZE; - tgeKeyptr = (tgeElementptr + ZELEM_HEAD_SIZE) + fragrecptr.p->localkeylen; - tgeElemStep = TelemLen; - tgeForward = 1; - if (unlikely(tgeContainerptr >= 2048)) - { - errcode = 4; - goto error; - } - tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26; - if (unlikely(((tgeContainerptr + tgeRemLen - 1) >= 2048))) - { - errcode = 5; - goto error; - } - } else if (tgeNextptrtype == ZRIGHT) { - jam(); - tgeContainerptr = tgeContainerptr + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE); - tgeElementptr = tgeContainerptr - 1; - tgeKeyptr = (tgeElementptr - ZELEM_HEAD_SIZE) - fragrecptr.p->localkeylen; - tgeElemStep = 0 - TelemLen; - tgeForward = (Uint32)-1; - if (unlikely(tgeContainerptr >= 2048)) - { - errcode = 4; - goto error; - } - tgeRemLen = gePageptr.p->word32[tgeContainerptr] >> 26; - if (unlikely((tgeContainerptr - tgeRemLen) >= 2048)) - { - errcode = 5; - goto error; - } - } else { - errcode = 6; - goto error; - }//if - if (tgeRemLen >= ZCON_HEAD_SIZE + TelemLen) { - if (unlikely(tgeRemLen > ZBUF_SIZE)) - { - errcode = 7; - goto error; - }//if - /* ------------------------------------------------------------------- */ - // There is at least one element in this container. - // Check if it is the element searched for. - /* ------------------------------------------------------------------- */ - do { - tgeElementHeader = gePageptr.p->word32[tgeElementptr]; - tgeRemLen = tgeRemLen - TelemLen; - Uint32 hashValuePart; - Uint32 localkey1, localkey2; - lockOwnerPtr.i = RNIL; - lockOwnerPtr.p = NULL; - if (ElementHeader::getLocked(tgeElementHeader)) { - jam(); - lockOwnerPtr.i = ElementHeader::getOpPtrI(tgeElementHeader); - ptrCheckGuard(lockOwnerPtr, coprecsize, operationrec); - hashValuePart = lockOwnerPtr.p->hashvaluePart; - localkey1 = lockOwnerPtr.p->localdata[0]; - localkey2 = lockOwnerPtr.p->localdata[1]; - } else { - jam(); - hashValuePart = ElementHeader::getHashValuePart(tgeElementHeader); - localkey1 = gePageptr.p->word32[tgeElementptr + tgeForward]; - localkey2 = 0; - } - if (hashValuePart == opHashValuePart) { - jam(); - bool found; - if (! searchLocalKey) - { - Uint32 len = readTablePk(localkey1, tgeElementHeader, - lockOwnerPtr); - found = (len == operationRecPtr.p->xfrmtupkeylen) && - (memcmp(Tkeydata, ckeys, len << 2) == 0); - } else { - jam(); - found = (localkey1 == Tkeydata[0]); - } - if (found) - { - jam(); - operationRecPtr.p->localdata[0] = localkey1; - operationRecPtr.p->localdata[1] = localkey2; - return ZTRUE; - } - } - if (tgeRemLen <= ZCON_HEAD_SIZE) { - break; - } - tgeElementptr = tgeElementptr + tgeElemStep; - } while (true); - }//if - if (unlikely(tgeRemLen != ZCON_HEAD_SIZE)) - { - errcode = 8; - goto error; - }//if - tgeContainerhead = gePageptr.p->word32[tgeContainerptr]; - tgeNextptrtype = (tgeContainerhead >> 7) & 0x3; - if (tgeNextptrtype == 0) { - jam(); - return ZFALSE; /* NO MORE CONTAINER */ - }//if - tgePageindex = tgeContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */ - if (unlikely(tgePageindex > ZEMPTYLIST)) - { - errcode = 9; - goto error; - }//if - if (((tgeContainerhead >> 9) & 1) == ZFALSE) { - jam(); - tgeActivePageDir = gePageptr.p->word32[tgeContainerptr + 1]; /* NEXT PAGE ID */ - geOverflowrangeptr.i = fragrecptr.p->overflowdir; - ptrCheckGuard(geOverflowrangeptr, cdirrangesize, dirRange); - arrGuard((tgeActivePageDir >> 8), 256); - geOverflowDirptr.i = geOverflowrangeptr.p->dirArray[tgeActivePageDir >> 8]; - ptrCheckGuard(geOverflowDirptr, cdirarraysize, directoryarray); - gePageptr.i = geOverflowDirptr.p->pagep[tgeActivePageDir & 0xff]; - ptrCheckGuard(gePageptr, cpagesize, page8); - }//if - } while (1); - - return ZFALSE; - -error: - ACCKEY_error(errcode); - return ~0; -}//Dbacc::getElement() - -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* */ -/* END OF GET_ELEMENT MODULE */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* */ -/* MODULE: DELETE */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* COMMITDELETE */ -/* INPUT: OPERATION_REC_PTR, PTR TO AN OPERATION RECORD. */ -/* FRAGRECPTR, PTR TO A FRAGMENT RECORD */ -/* */ -/* OUTPUT: */ -/* NONE */ -/* DESCRIPTION: DELETE OPERATIONS WILL BE COMPLETED AT THE - * COMMIT OF TRANSACTION. THIS SUBROUTINE SEARCHS FOR ELEMENT AND - * DELETES IT. IT DOES SO BY REPLACING IT WITH THE LAST - * ELEMENT IN THE BUCKET. IF THE DELETED ELEMENT IS ALSO THE LAST - * ELEMENT THEN IT IS ONLY NECESSARY TO REMOVE THE ELEMENT - * ------------------------------------------------------------------------- */ -void -Dbacc::report_dealloc(Signal* signal, const Operationrec* opPtrP) -{ - Uint32 localKey = opPtrP->localdata[0]; - Uint32 opbits = opPtrP->m_op_bits; - Uint32 userptr= opPtrP->userptr; - Uint32 scanInd = - ((opbits & Operationrec::OP_MASK) == ZSCAN_OP) || - (opbits & Operationrec::OP_LOCK_REQ); - - if (localKey != ~(Uint32)0) - { - signal->theData[0] = fragrecptr.p->myfid; - signal->theData[1] = fragrecptr.p->myTableId; - Uint32 pageId = localKey >> MAX_TUPLES_BITS; - Uint32 pageIndex = localKey & ((1 << MAX_TUPLES_BITS) - 1); - signal->theData[2] = pageId; - signal->theData[3] = pageIndex; - signal->theData[4] = userptr; - signal->theData[5] = scanInd; - EXECUTE_DIRECT(DBLQH, GSN_TUP_DEALLOCREQ, signal, 6); - jamEntry(); - } -} - -void Dbacc::commitdelete(Signal* signal) -{ - jam(); - report_dealloc(signal, operationRecPtr.p); - - getdirindex(signal); - tlastPageindex = tgdiPageindex; - lastPageptr.i = gdiPageptr.i; - lastPageptr.p = gdiPageptr.p; - tlastForward = ZTRUE; - tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS); - tlastContainerptr = tlastContainerptr + ZHEAD_SIZE; - arrGuard(tlastContainerptr, 2048); - tlastContainerhead = lastPageptr.p->word32[tlastContainerptr]; - tlastContainerlen = tlastContainerhead >> 26; - lastPrevpageptr.i = RNIL; - ptrNull(lastPrevpageptr); - tlastPrevconptr = 0; - getLastAndRemove(signal); - - delPageptr.i = operationRecPtr.p->elementPage; - ptrCheckGuard(delPageptr, cpagesize, page8); - tdelElementptr = operationRecPtr.p->elementPointer; - /* --------------------------------------------------------------------------------- */ - // Here we have to take extreme care since we do not want locks to end up after the - // log execution. Thus it is necessary to put back the element in unlocked shape. - // We thus update the element header to ensure we log an unlocked element. We do not - // need to restore it later since it is deleted immediately anyway. - /* --------------------------------------------------------------------------------- */ - const Uint32 hv = operationRecPtr.p->hashvaluePart; - const Uint32 eh = ElementHeader::setUnlocked(hv, 0); - delPageptr.p->word32[tdelElementptr] = eh; - if (operationRecPtr.p->elementPage == lastPageptr.i) { - if (operationRecPtr.p->elementPointer == tlastElementptr) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THE LAST ELEMENT WAS THE ELEMENT TO BE DELETED. WE NEED NOT COPY IT. */ - /* --------------------------------------------------------------------------------- */ - return; - }//if - }//if - /* --------------------------------------------------------------------------------- */ - /* THE DELETED ELEMENT IS NOT THE LAST. WE READ THE LAST ELEMENT AND OVERWRITE THE */ - /* DELETED ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - tdelContainerptr = operationRecPtr.p->elementContainer; - tdelForward = operationRecPtr.p->elementIsforward; - deleteElement(signal); -}//Dbacc::commitdelete() - -/* --------------------------------------------------------------------------------- */ -/* DELETE_ELEMENT */ -/* INPUT: FRAGRECPTR, POINTER TO A FRAGMENT RECORD */ -/* LAST_PAGEPTR, POINTER TO THE PAGE OF THE LAST ELEMENT */ -/* DEL_PAGEPTR, POINTER TO THE PAGE OF THE DELETED ELEMENT */ -/* TLAST_ELEMENTPTR, ELEMENT POINTER OF THE LAST ELEMENT */ -/* TDEL_ELEMENTPTR, ELEMENT POINTER OF THE DELETED ELEMENT */ -/* TLAST_FORWARD, DIRECTION OF LAST ELEMENT */ -/* TDEL_FORWARD, DIRECTION OF DELETED ELEMENT */ -/* TDEL_CONTAINERPTR, CONTAINER POINTER OF DELETED ELEMENT */ -/* DESCRIPTION: COPY LAST ELEMENT TO DELETED ELEMENT AND UPDATE UNDO LOG AND */ -/* UPDATE ANY ACTIVE OPERATION ON THE MOVED ELEMENT. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::deleteElement(Signal* signal) -{ - OperationrecPtr deOperationRecPtr; - Uint32 tdeIndex; - Uint32 tlastMoveElemptr; - Uint32 tdelMoveElemptr; - Uint32 guard31; - - if (tlastElementptr >= 2048) - goto deleteElement_index_error1; - { - const Uint32 tdeElemhead = lastPageptr.p->word32[tlastElementptr]; - tlastMoveElemptr = tlastElementptr; - tdelMoveElemptr = tdelElementptr; - guard31 = fragrecptr.p->elementLength - 1; - for (tdeIndex = 0; tdeIndex <= guard31; tdeIndex++) { - dbgWord32(delPageptr, tdelMoveElemptr, lastPageptr.p->word32[tlastMoveElemptr]); - if ((tlastMoveElemptr >= 2048) || - (tdelMoveElemptr >= 2048)) - goto deleteElement_index_error2; - delPageptr.p->word32[tdelMoveElemptr] = lastPageptr.p->word32[tlastMoveElemptr]; - tdelMoveElemptr = tdelMoveElemptr + tdelForward; - tlastMoveElemptr = tlastMoveElemptr + tlastForward; - }//for - if (ElementHeader::getLocked(tdeElemhead)) { - /* --------------------------------------------------------------------------------- */ - /* THE LAST ELEMENT IS LOCKED AND IS THUS REFERENCED BY AN OPERATION RECORD. WE NEED */ - /* TO UPDATE THE OPERATION RECORD WITH THE NEW REFERENCE TO THE ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - deOperationRecPtr.i = ElementHeader::getOpPtrI(tdeElemhead); - ptrCheckGuard(deOperationRecPtr, coprecsize, operationrec); - deOperationRecPtr.p->elementPage = delPageptr.i; - deOperationRecPtr.p->elementContainer = tdelContainerptr; - deOperationRecPtr.p->elementPointer = tdelElementptr; - deOperationRecPtr.p->elementIsforward = tdelForward; - /* --------------------------------------------------------------------------------- */ - // We need to take extreme care to not install locked records after system restart. - // An undo of the delete will reinstall the moved record. We have to ensure that the - // lock is removed to ensure that no such thing happen. - /* --------------------------------------------------------------------------------- */ - Uint32 eh = ElementHeader::setUnlocked(deOperationRecPtr.p->hashvaluePart, - 0); - lastPageptr.p->word32[tlastElementptr] = eh; - }//if - return; - } - - deleteElement_index_error1: - arrGuard(tlastElementptr, 2048); - return; - - deleteElement_index_error2: - arrGuard(tdelMoveElemptr + guard31, 2048); - arrGuard(tlastMoveElemptr, 2048); - return; - -}//Dbacc::deleteElement() - -/* --------------------------------------------------------------------------------- */ -/* GET_LAST_AND_REMOVE */ -/* INPUT: */ -/* LAST_PAGEPTR PAGE POINTER OF FIRST CONTAINER IN SEARCH OF LAST*/ -/* TLAST_CONTAINERPTR CONTAINER INDEX OF THE SAME */ -/* TLAST_CONTAINERHEAD CONTAINER HEADER OF THE SAME */ -/* TLAST_PAGEINDEX PAGE INDEX OF THE SAME */ -/* TLAST_FORWARD CONTAINER DIRECTION OF THE SAME */ -/* TLAST_CONTAINERLEN CONTAINER LENGTH OF THE SAME */ -/* LAST_PREVPAGEPTR PAGE POINTER OF PREVIOUS CONTAINER OF THE SAME */ -/* TLAST_PREVCONPTR CONTAINER INDEX OF PREVIOUS CONTAINER OF THE SAME*/ -/* */ -/* OUTPUT: */ -/* ALL VARIABLES FROM INPUT BUT NOW CONTAINING INFO ABOUT LAST */ -/* CONTAINER. */ -/* TLAST_ELEMENTPTR LAST ELEMENT POINTER IN LAST CONTAINER */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::getLastAndRemove(Signal* signal) -{ - DirRangePtr glrOverflowrangeptr; - DirectoryarrayPtr glrOverflowDirptr; - Uint32 tglrHead; - Uint32 tglrTmp; - - GLR_LOOP_10: - if (((tlastContainerhead >> 7) & 0x3) != 0) { - jam(); - lastPrevpageptr.i = lastPageptr.i; - lastPrevpageptr.p = lastPageptr.p; - tlastPrevconptr = tlastContainerptr; - tlastPageindex = tlastContainerhead & 0x7f; - if (((tlastContainerhead >> 9) & 0x1) == ZFALSE) { - jam(); - arrGuard(tlastContainerptr + 1, 2048); - tglrTmp = lastPageptr.p->word32[tlastContainerptr + 1]; - glrOverflowrangeptr.i = fragrecptr.p->overflowdir; - ptrCheckGuard(glrOverflowrangeptr, cdirrangesize, dirRange); - arrGuard((tglrTmp >> 8), 256); - glrOverflowDirptr.i = glrOverflowrangeptr.p->dirArray[tglrTmp >> 8]; - ptrCheckGuard(glrOverflowDirptr, cdirarraysize, directoryarray); - lastPageptr.i = glrOverflowDirptr.p->pagep[tglrTmp & 0xff]; - ptrCheckGuard(lastPageptr, cpagesize, page8); - }//if - tlastContainerptr = (tlastPageindex << ZSHIFT_PLUS) - (tlastPageindex << ZSHIFT_MINUS); - if (((tlastContainerhead >> 7) & 3) == ZLEFT) { - jam(); - tlastForward = ZTRUE; - tlastContainerptr = tlastContainerptr + ZHEAD_SIZE; - } else if (((tlastContainerhead >> 7) & 3) == ZRIGHT) { - jam(); - tlastForward = cminusOne; - tlastContainerptr = ((tlastContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE; - } else { - ndbrequire(false); - return; - }//if - arrGuard(tlastContainerptr, 2048); - tlastContainerhead = lastPageptr.p->word32[tlastContainerptr]; - tlastContainerlen = tlastContainerhead >> 26; - ndbrequire(tlastContainerlen >= ((Uint32)ZCON_HEAD_SIZE + fragrecptr.p->elementLength)); - goto GLR_LOOP_10; - }//if - tlastContainerlen = tlastContainerlen - fragrecptr.p->elementLength; - if (tlastForward == ZTRUE) { - jam(); - tlastElementptr = tlastContainerptr + tlastContainerlen; - } else { - jam(); - tlastElementptr = (tlastContainerptr + (ZCON_HEAD_SIZE - 1)) - tlastContainerlen; - }//if - rlPageptr.i = lastPageptr.i; - rlPageptr.p = lastPageptr.p; - trlPageindex = tlastPageindex; - if (((tlastContainerhead >> 10) & 1) == 1) { - /* --------------------------------------------------------------------------------- */ - /* WE HAVE OWNERSHIP OF BOTH PARTS OF THE CONTAINER ENDS. */ - /* --------------------------------------------------------------------------------- */ - if (tlastContainerlen < ZDOWN_LIMIT) { - /* --------------------------------------------------------------------------------- */ - /* WE HAVE DECREASED THE SIZE BELOW THE DOWN LIMIT, WE MUST GIVE UP THE OTHER */ - /* SIDE OF THE BUFFER. */ - /* --------------------------------------------------------------------------------- */ - tlastContainerhead = tlastContainerhead ^ (1 << 10); - trlRelCon = ZFALSE; - if (tlastForward == ZTRUE) { - jam(); - turlIndex = tlastContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE); - releaseRightlist(signal); - } else { - jam(); - tullIndex = tlastContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE); - releaseLeftlist(signal); - }//if - }//if - }//if - if (tlastContainerlen <= 2) { - ndbrequire(tlastContainerlen == 2); - if (lastPrevpageptr.i != RNIL) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THE LAST CONTAINER IS EMPTY AND IS NOT THE FIRST CONTAINER WHICH IS NOT REMOVED. */ - /* DELETE THE LAST CONTAINER AND UPDATE THE PREVIOUS CONTAINER. ALSO PUT THIS */ - /* CONTAINER IN FREE CONTAINER LIST OF THE PAGE. */ - /* --------------------------------------------------------------------------------- */ - ndbrequire(tlastPrevconptr < 2048); - tglrTmp = lastPrevpageptr.p->word32[tlastPrevconptr] >> 9; - dbgWord32(lastPrevpageptr, tlastPrevconptr, tglrTmp << 9); - lastPrevpageptr.p->word32[tlastPrevconptr] = tglrTmp << 9; - trlRelCon = ZTRUE; - if (tlastForward == ZTRUE) { - jam(); - tullIndex = tlastContainerptr; - releaseLeftlist(signal); - } else { - jam(); - turlIndex = tlastContainerptr; - releaseRightlist(signal); - }//if - return; - }//if - }//if - tglrHead = tlastContainerhead << 6; - tglrHead = tglrHead >> 6; - tglrHead = tglrHead | (tlastContainerlen << 26); - dbgWord32(lastPageptr, tlastContainerptr, tglrHead); - arrGuard(tlastContainerptr, 2048); - lastPageptr.p->word32[tlastContainerptr] = tglrHead; -}//Dbacc::getLastAndRemove() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_LEFTLIST */ -/* INPUT: */ -/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */ -/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */ -/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */ -/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */ -/* A PART IS RELEASED. */ -/* */ -/* OUTPUT: */ -/* NONE */ -/* */ -/* THE FREE LIST OF LEFT FREE BUFFER IN THE PAGE WILL BE UPDATE */ -/* TULL_INDEX IS INDEX TO THE FIRST WORD IN THE LEFT SIDE OF THE BUFFER */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseLeftlist(Signal* signal) -{ - Uint32 tullTmp; - Uint32 tullTmp1; - - /* --------------------------------------------------------------------------------- */ - /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */ - /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */ - /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */ - /* --------------------------------------------------------------------------------- */ - if (trlRelCon == ZTRUE) { - arrGuard(tullIndex, 2048); - trlHead = rlPageptr.p->word32[tullIndex]; - trlNextused = (trlHead >> 11) & 0x7f; - trlPrevused = (trlHead >> 18) & 0x7f; - if (trlNextused < ZEMPTYLIST) { - jam(); - tullTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS); - tullTmp1 = tullTmp1 + ZHEAD_SIZE; - tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfe03ffff; - dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlPrevused << 18)); - rlPageptr.p->word32[tullTmp1] = tullTmp | (trlPrevused << 18); - } else { - ndbrequire(trlNextused == ZEMPTYLIST); - jam(); - }//if - if (trlPrevused < ZEMPTYLIST) { - jam(); - tullTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS); - tullTmp1 = tullTmp1 + ZHEAD_SIZE; - tullTmp = rlPageptr.p->word32[tullTmp1] & 0xfffc07ff; - dbgWord32(rlPageptr, tullTmp1, tullTmp | (trlNextused << 11)); - rlPageptr.p->word32[tullTmp1] = tullTmp | (trlNextused << 11); - } else { - ndbrequire(trlPrevused == ZEMPTYLIST); - jam(); - /* --------------------------------------------------------------------------------- */ - /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER. */ - /* --------------------------------------------------------------------------------- */ - tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xc07fffff; - dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp | (trlNextused << 23)); - rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp | (trlNextused << 23); - }//if - }//if - dbgWord32(rlPageptr, tullIndex + 1, ZEMPTYLIST); - arrGuard(tullIndex + 1, 2048); - rlPageptr.p->word32[tullIndex + 1] = ZEMPTYLIST; - tullTmp1 = (rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> 7) & 0x7f; - dbgWord32(rlPageptr, tullIndex, tullTmp1); - arrGuard(tullIndex, 2048); - rlPageptr.p->word32[tullIndex] = tullTmp1; - if (tullTmp1 < ZEMPTYLIST) { - jam(); - tullTmp1 = (tullTmp1 << ZSHIFT_PLUS) - (tullTmp1 << ZSHIFT_MINUS); - tullTmp1 = (tullTmp1 + ZHEAD_SIZE) + 1; - dbgWord32(rlPageptr, tullTmp1, trlPageindex); - rlPageptr.p->word32[tullTmp1] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */ - } else { - ndbrequire(tullTmp1 == ZEMPTYLIST); - }//if - tullTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST]; - tullTmp = (((tullTmp >> 14) << 14) | (trlPageindex << 7)) | (tullTmp & 0x7f); - dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, tullTmp); - rlPageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp; - dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1); - rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1; - ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL); - if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) { - jam(); - colPageptr.i = rlPageptr.i; - colPageptr.p = rlPageptr.p; - ptrCheck(colPageptr, cpagesize, page8); - checkoverfreelist(signal); - }//if -}//Dbacc::releaseLeftlist() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_RIGHTLIST */ -/* INPUT: */ -/* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */ -/* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */ -/* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */ -/* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */ -/* A PART IS RELEASED. */ -/* */ -/* OUTPUT: */ -/* NONE */ -/* */ -/* THE FREE LIST OF RIGHT FREE BUFFER IN THE PAGE WILL BE UPDATE. */ -/* TURL_INDEX IS INDEX TO THE FIRST WORD IN THE RIGHT SIDE OF */ -/* THE BUFFER, WHICH IS THE LAST WORD IN THE BUFFER. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseRightlist(Signal* signal) -{ - Uint32 turlTmp1; - Uint32 turlTmp; - - /* --------------------------------------------------------------------------------- */ - /* IF A CONTAINER IS RELEASED AND NOT ONLY A PART THEN WE HAVE TO REMOVE IT */ - /* FROM THE LIST OF USED CONTAINERS IN THE PAGE. THIS IN ORDER TO ENSURE THAT */ - /* WE CAN FIND ALL LOCKED ELEMENTS DURING LOCAL CHECKPOINT. */ - /* --------------------------------------------------------------------------------- */ - if (trlRelCon == ZTRUE) { - jam(); - arrGuard(turlIndex, 2048); - trlHead = rlPageptr.p->word32[turlIndex]; - trlNextused = (trlHead >> 11) & 0x7f; - trlPrevused = (trlHead >> 18) & 0x7f; - if (trlNextused < ZEMPTYLIST) { - jam(); - turlTmp1 = (trlNextused << ZSHIFT_PLUS) - (trlNextused << ZSHIFT_MINUS); - turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE); - turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfe03ffff; - dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlPrevused << 18)); - rlPageptr.p->word32[turlTmp1] = turlTmp | (trlPrevused << 18); - } else { - ndbrequire(trlNextused == ZEMPTYLIST); - jam(); - }//if - if (trlPrevused < ZEMPTYLIST) { - jam(); - turlTmp1 = (trlPrevused << ZSHIFT_PLUS) - (trlPrevused << ZSHIFT_MINUS); - turlTmp1 = turlTmp1 + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE); - turlTmp = rlPageptr.p->word32[turlTmp1] & 0xfffc07ff; - dbgWord32(rlPageptr, turlTmp1, turlTmp | (trlNextused << 11)); - rlPageptr.p->word32[turlTmp1] = turlTmp | (trlNextused << 11); - } else { - ndbrequire(trlPrevused == ZEMPTYLIST); - jam(); - /* --------------------------------------------------------------------------------- */ - /* WE ARE FIRST IN THE LIST AND THUS WE NEED TO UPDATE THE FIRST POINTER */ - /* OF THE RIGHT CONTAINER LIST. */ - /* --------------------------------------------------------------------------------- */ - turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0xff80ffff; - dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, turlTmp | (trlNextused << 16)); - rlPageptr.p->word32[ZPOS_EMPTY_LIST] = turlTmp | (trlNextused << 16); - }//if - }//if - dbgWord32(rlPageptr, turlIndex + 1, ZEMPTYLIST); - arrGuard(turlIndex + 1, 2048); - rlPageptr.p->word32[turlIndex + 1] = ZEMPTYLIST; - turlTmp1 = rlPageptr.p->word32[ZPOS_EMPTY_LIST] & 0x7f; - dbgWord32(rlPageptr, turlIndex, turlTmp1); - arrGuard(turlIndex, 2048); - rlPageptr.p->word32[turlIndex] = turlTmp1; - if (turlTmp1 < ZEMPTYLIST) { - jam(); - turlTmp = (turlTmp1 << ZSHIFT_PLUS) - (turlTmp1 << ZSHIFT_MINUS); - turlTmp = turlTmp + ((ZHEAD_SIZE + ZBUF_SIZE) - (ZCON_HEAD_SIZE - 1)); - dbgWord32(rlPageptr, turlTmp, trlPageindex); - rlPageptr.p->word32[turlTmp] = trlPageindex; /* UPDATES PREV POINTER IN THE NEXT FREE */ - } else { - ndbrequire(turlTmp1 == ZEMPTYLIST); - }//if - turlTmp = rlPageptr.p->word32[ZPOS_EMPTY_LIST]; - dbgWord32(rlPageptr, ZPOS_EMPTY_LIST, ((turlTmp >> 7) << 7) | trlPageindex); - rlPageptr.p->word32[ZPOS_EMPTY_LIST] = ((turlTmp >> 7) << 7) | trlPageindex; - dbgWord32(rlPageptr, ZPOS_ALLOC_CONTAINERS, rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1); - rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1; - ndbrequire(rlPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL); - if (((rlPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) { - jam(); - colPageptr.i = rlPageptr.i; - colPageptr.p = rlPageptr.p; - checkoverfreelist(signal); - }//if -}//Dbacc::releaseRightlist() - -/* --------------------------------------------------------------------------------- */ -/* CHECKOVERFREELIST */ -/* INPUT: COL_PAGEPTR, POINTER OF AN OVERFLOW PAGE RECORD. */ -/* DESCRIPTION: CHECKS IF THE PAGE HAVE TO PUT IN FREE LIST OF OVER FLOW */ -/* PAGES. WHEN IT HAVE TO, AN OVERFLOW REC PTR WILL BE ALLOCATED */ -/* TO KEEP NFORMATION ABOUT THE PAGE. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::checkoverfreelist(Signal* signal) -{ - Uint32 tcolTmp; - - tcolTmp = colPageptr.p->word32[ZPOS_ALLOC_CONTAINERS]; - if (tcolTmp <= ZFREE_LIMIT) { - if (tcolTmp == 0) { - jam(); - ropPageptr = colPageptr; - releaseOverpage(signal); - } else { - jam(); - if (colPageptr.p->word32[ZPOS_OVERFLOWREC] == RNIL) { - ndbrequire(cfirstfreeoverrec != RNIL); - jam(); - seizeOverRec(signal); - sorOverflowRecPtr.p->dirindex = colPageptr.p->word32[ZPOS_PAGE_ID]; - sorOverflowRecPtr.p->overpage = colPageptr.i; - dbgWord32(colPageptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i); - colPageptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i; - porOverflowRecPtr = sorOverflowRecPtr; - putOverflowRecInFrag(signal); - }//if - }//if - }//if -}//Dbacc::checkoverfreelist() - -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* */ -/* END OF DELETE MODULE */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* */ -/* COMMIT AND ABORT MODULE */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ABORT_OPERATION */ -/*DESCRIPTION: AN OPERATION RECORD CAN BE IN A LOCK QUEUE OF AN ELEMENT OR */ -/*OWNS THE LOCK. BY THIS SUBROUTINE THE LOCK STATE OF THE OPERATION WILL */ -/*BE CHECKED. THE OPERATION RECORD WILL BE REMOVED FROM THE QUEUE IF IT */ -/*BELONGED TO ANY ONE, OTHERWISE THE ELEMENT HEAD WILL BE UPDATED. */ -/* ------------------------------------------------------------------------- */ - -/** - * - * P0 - P1 - P2 - P3 - * S0 - * S1 - * S2 - */ -void -Dbacc::abortParallelQueueOperation(Signal* signal, OperationrecPtr opPtr) -{ - jam(); - OperationrecPtr nextP; - OperationrecPtr prevP; - OperationrecPtr loPtr; - - Uint32 opbits = opPtr.p->m_op_bits; - Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; - nextP.i = opPtr.p->nextParallelQue; - prevP.i = opPtr.p->prevParallelQue; - loPtr.i = opPtr.p->m_lock_owner_ptr_i; - - ndbassert(! (opbits & Operationrec::OP_LOCK_OWNER)); - ndbassert(opbits & Operationrec::OP_RUN_QUEUE); - - ptrCheckGuard(prevP, coprecsize, operationrec); - ndbassert(prevP.p->nextParallelQue == opPtr.i); - prevP.p->nextParallelQue = nextP.i; - - if (nextP.i != RNIL) - { - ptrCheckGuard(nextP, coprecsize, operationrec); - ndbassert(nextP.p->prevParallelQue == opPtr.i); - nextP.p->prevParallelQue = prevP.i; - } - else if (prevP.i != loPtr.i) - { - jam(); - ptrCheckGuard(loPtr, coprecsize, operationrec); - ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - ndbassert(loPtr.p->m_lo_last_parallel_op_ptr_i == opPtr.i); - loPtr.p->m_lo_last_parallel_op_ptr_i = prevP.i; - prevP.p->m_lock_owner_ptr_i = loPtr.i; - - /** - * Abort P3...check start next - */ - startNext(signal, prevP); - validate_lock_queue(prevP); - return; - } - else - { - jam(); - /** - * P0 - P1 - * - * Abort P1, check start next - */ - ndbassert(prevP.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - prevP.p->m_lo_last_parallel_op_ptr_i = RNIL; - startNext(signal, prevP); - validate_lock_queue(prevP); - return; - } - - /** - * Abort P1/P2 - */ - if (opbits & Operationrec::OP_LOCK_MODE) - { - Uint32 nextbits = nextP.p->m_op_bits; - while ((nextbits & Operationrec::OP_LOCK_MODE) == 0) - { - ndbassert(nextbits & Operationrec::OP_ACC_LOCK_MODE); - nextbits &= ~(Uint32)Operationrec::OP_ACC_LOCK_MODE; - nextP.p->m_op_bits = nextbits; - - if (nextP.p->nextParallelQue != RNIL) - { - nextP.i = nextP.p->nextParallelQue; - ptrCheckGuard(nextP, coprecsize, operationrec); - nextbits = nextP.p->m_op_bits; - } - else - { - break; - } - } - } - - /** - * Abort P1, P2 - */ - if (opstate == Operationrec::OP_STATE_RUNNING) - { - jam(); - startNext(signal, prevP); - validate_lock_queue(prevP); - return; - } - - ndbassert(opstate == Operationrec::OP_STATE_EXECUTED || - opstate == Operationrec::OP_STATE_WAITING); - - /** - * Scan to last of run queue - */ - while (nextP.p->nextParallelQue != RNIL) - { - jam(); - nextP.i = nextP.p->nextParallelQue; - ptrCheckGuard(nextP, coprecsize, operationrec); - } - -#ifdef VM_TRACE - loPtr.i = nextP.p->m_lock_owner_ptr_i; - ptrCheckGuard(loPtr, coprecsize, operationrec); - ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - ndbassert(loPtr.p->m_lo_last_parallel_op_ptr_i == nextP.i); -#endif - startNext(signal, nextP); - validate_lock_queue(nextP); - - return; -} - -void -Dbacc::abortSerieQueueOperation(Signal* signal, OperationrecPtr opPtr) -{ - jam(); - OperationrecPtr prevS, nextS; - OperationrecPtr prevP, nextP; - OperationrecPtr loPtr; - - Uint32 opbits = opPtr.p->m_op_bits; - - prevS.i = opPtr.p->prevSerialQue; - nextS.i = opPtr.p->nextSerialQue; - - prevP.i = opPtr.p->prevParallelQue; - nextP.i = opPtr.p->nextParallelQue; - - ndbassert((opbits & Operationrec::OP_LOCK_OWNER) == 0); - ndbassert((opbits & Operationrec::OP_RUN_QUEUE) == 0); - - if (prevP.i != RNIL) - { - /** - * We're not list head... - */ - ptrCheckGuard(prevP, coprecsize, operationrec); - ndbassert(prevP.p->nextParallelQue == opPtr.i); - prevP.p->nextParallelQue = nextP.i; - - if (nextP.i != RNIL) - { - ptrCheckGuard(nextP, coprecsize, operationrec); - ndbassert(nextP.p->prevParallelQue == opPtr.i); - ndbassert((nextP.p->m_op_bits & Operationrec::OP_STATE_MASK) == - Operationrec::OP_STATE_WAITING); - nextP.p->prevParallelQue = prevP.i; - - if ((prevP.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE) == 0 && - opbits & Operationrec::OP_LOCK_MODE) - { - /** - * Scan right in parallel queue to fix OP_ACC_LOCK_MODE - */ - while ((nextP.p->m_op_bits & Operationrec::OP_LOCK_MODE) == 0) - { - ndbassert(nextP.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE); - nextP.p->m_op_bits &= ~(Uint32)Operationrec::OP_ACC_LOCK_MODE; - nextP.i = nextP.p->nextParallelQue; - if (nextP.i == RNIL) - break; - ptrCheckGuard(nextP, coprecsize, operationrec); - } - } - } - validate_lock_queue(prevP); - return; - } - else - { - /** - * We're a list head - */ - ptrCheckGuard(prevS, coprecsize, operationrec); - ndbassert(prevS.p->nextSerialQue == opPtr.i); - - if (nextP.i != RNIL) - { - /** - * Promote nextP to list head - */ - ptrCheckGuard(nextP, coprecsize, operationrec); - ndbassert(nextP.p->prevParallelQue == opPtr.i); - prevS.p->nextSerialQue = nextP.i; - nextP.p->prevParallelQue = RNIL; - nextP.p->nextSerialQue = nextS.i; - if (nextS.i != RNIL) - { - jam(); - ptrCheckGuard(nextS, coprecsize, operationrec); - ndbassert(nextS.p->prevSerialQue == opPtr.i); - nextS.p->prevSerialQue = nextP.i; - validate_lock_queue(prevS); - return; - } - else - { - // nextS is RNIL, i.e we're last in serie queue... - // we must update lockOwner.m_lo_last_serial_op_ptr_i - loPtr = prevS; - while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) - { - loPtr.i = loPtr.p->prevSerialQue; - ptrCheckGuard(loPtr, coprecsize, operationrec); - } - ndbassert(loPtr.p->m_lo_last_serial_op_ptr_i == opPtr.i); - loPtr.p->m_lo_last_serial_op_ptr_i = nextP.i; - validate_lock_queue(loPtr); - return; - } - } - - if (nextS.i == RNIL) - { - /** - * Abort S2 - */ - - // nextS is RNIL, i.e we're last in serie queue... - // and we have no parallel queue, - // we must update lockOwner.m_lo_last_serial_op_ptr_i - prevS.p->nextSerialQue = RNIL; - - loPtr = prevS; - while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) - { - loPtr.i = loPtr.p->prevSerialQue; - ptrCheckGuard(loPtr, coprecsize, operationrec); - } - ndbassert(loPtr.p->m_lo_last_serial_op_ptr_i == opPtr.i); - if (prevS.i != loPtr.i) - { - jam(); - loPtr.p->m_lo_last_serial_op_ptr_i = prevS.i; - } - else - { - loPtr.p->m_lo_last_serial_op_ptr_i = RNIL; - } - validate_lock_queue(loPtr); - } - else if (nextP.i == RNIL) - { - ptrCheckGuard(nextS, coprecsize, operationrec); - ndbassert(nextS.p->prevSerialQue == opPtr.i); - prevS.p->nextSerialQue = nextS.i; - nextS.p->prevSerialQue = prevS.i; - - if (prevS.p->m_op_bits & Operationrec::OP_LOCK_OWNER) - { - /** - * Abort S0 - */ - OperationrecPtr lastOp; - lastOp.i = prevS.p->m_lo_last_parallel_op_ptr_i; - if (lastOp.i != RNIL) - { - jam(); - ptrCheckGuard(lastOp, coprecsize, operationrec); - ndbassert(lastOp.p->m_lock_owner_ptr_i == prevS.i); - } - else - { - jam(); - lastOp = prevS; - } - startNext(signal, lastOp); - validate_lock_queue(lastOp); - } - else - { - validate_lock_queue(prevS); - } - } - } -} - - -void Dbacc::abortOperation(Signal* signal) -{ - Uint32 opbits = operationRecPtr.p->m_op_bits; - - validate_lock_queue(operationRecPtr); - - if (opbits & Operationrec::OP_LOCK_OWNER) - { - takeOutLockOwnersList(signal, operationRecPtr); - opbits &= ~(Uint32)Operationrec::OP_LOCK_OWNER; - if (opbits & Operationrec::OP_INSERT_IS_DONE) - { - jam(); - opbits |= Operationrec::OP_ELEMENT_DISAPPEARED; - }//if - operationRecPtr.p->m_op_bits = opbits; - const bool queue = (operationRecPtr.p->nextParallelQue != RNIL || - operationRecPtr.p->nextSerialQue != RNIL); - - if (queue) - { - jam(); - release_lockowner(signal, operationRecPtr, false); - } - else - { - /* ------------------------------------------------------------------- - * WE ARE OWNER OF THE LOCK AND NO OTHER OPERATIONS ARE QUEUED. - * IF INSERT OR STANDBY WE DELETE THE ELEMENT OTHERWISE WE REMOVE - * THE LOCK FROM THE ELEMENT. - * ------------------------------------------------------------------ */ - if ((opbits & Operationrec::OP_ELEMENT_DISAPPEARED) == 0) - { - jam(); - Page8Ptr aboPageidptr; - Uint32 taboElementptr; - Uint32 tmp2Olq; - - taboElementptr = operationRecPtr.p->elementPointer; - aboPageidptr.i = operationRecPtr.p->elementPage; - tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart, - operationRecPtr.p->scanBits); - ptrCheckGuard(aboPageidptr, cpagesize, page8); - dbgWord32(aboPageidptr, taboElementptr, tmp2Olq); - arrGuard(taboElementptr, 2048); - aboPageidptr.p->word32[taboElementptr] = tmp2Olq; - return; - } - else - { - jam(); - commitdelete(signal); - }//if - }//if - } - else if (opbits & Operationrec::OP_RUN_QUEUE) - { - abortParallelQueueOperation(signal, operationRecPtr); - } - else - { - abortSerieQueueOperation(signal, operationRecPtr); - } -} - -void -Dbacc::commitDeleteCheck() -{ - OperationrecPtr opPtr; - OperationrecPtr lastOpPtr; - OperationrecPtr deleteOpPtr; - Uint32 elementDeleted = 0; - bool deleteCheckOngoing = true; - Uint32 hashValue = 0; - lastOpPtr = operationRecPtr; - opPtr.i = operationRecPtr.p->nextParallelQue; - while (opPtr.i != RNIL) { - jam(); - ptrCheckGuard(opPtr, coprecsize, operationrec); - lastOpPtr = opPtr; - opPtr.i = opPtr.p->nextParallelQue; - }//while - deleteOpPtr = lastOpPtr; - do { - Uint32 opbits = deleteOpPtr.p->m_op_bits; - Uint32 op = opbits & Operationrec::OP_MASK; - if (op == ZDELETE) { - jam(); - /* ------------------------------------------------------------------- - * IF THE CURRENT OPERATION TO BE COMMITTED IS A DELETE OPERATION DUE TO - * A SCAN-TAKEOVER THE ACTUAL DELETE WILL BE PERFORMED BY THE PREVIOUS - * OPERATION (SCAN) IN THE PARALLEL QUEUE WHICH OWNS THE LOCK. - * THE PROBLEM IS THAT THE SCAN OPERATION DOES NOT HAVE A HASH VALUE - * ASSIGNED TO IT SO WE COPY IT FROM THIS OPERATION. - * - * WE ASSUME THAT THIS SOLUTION WILL WORK BECAUSE THE ONLY WAY A - * SCAN CAN PERFORM A DELETE IS BY BEING FOLLOWED BY A NORMAL - * DELETE-OPERATION THAT HAS A HASH VALUE. - * ----------------------------------------------------------------- */ - hashValue = deleteOpPtr.p->hashValue; - elementDeleted = Operationrec::OP_ELEMENT_DISAPPEARED; - deleteCheckOngoing = false; - } else if (op == ZREAD || op == ZSCAN_OP) { - /* ------------------------------------------------------------------- - * We are trying to find out whether the commit will in the end delete - * the tuple. Normally the delete will be the last operation in the - * list of operations on this. It is however possible to issue reads - * and scans in the same savepoint as the delete operation was issued - * and these can end up after the delete in the list of operations - * in the parallel queue. Thus if we discover a read or a scan - * we have to continue scanning the list looking for a delete operation. - */ - deleteOpPtr.i = deleteOpPtr.p->prevParallelQue; - if (opbits & Operationrec::OP_LOCK_OWNER) { - jam(); - deleteCheckOngoing = false; - } else { - jam(); - ptrCheckGuard(deleteOpPtr, coprecsize, operationrec); - }//if - } else { - jam(); - /* ------------------------------------------------------------------ */ - /* Finding an UPDATE or INSERT before finding a DELETE - * means we cannot be deleting as the end result of this transaction. - */ - deleteCheckOngoing = false; - }//if - } while (deleteCheckOngoing); - opPtr = lastOpPtr; - do { - jam(); - opPtr.p->m_op_bits |= Operationrec::OP_COMMIT_DELETE_CHECK; - if (elementDeleted) { - jam(); - opPtr.p->m_op_bits |= elementDeleted; - opPtr.p->hashValue = hashValue; - }//if - opPtr.i = opPtr.p->prevParallelQue; - if (opPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) { - jam(); - break; - }//if - ptrCheckGuard(opPtr, coprecsize, operationrec); - } while (true); -}//Dbacc::commitDeleteCheck() - -/* ------------------------------------------------------------------------- */ -/* COMMIT_OPERATION */ -/* INPUT: OPERATION_REC_PTR, POINTER TO AN OPERATION RECORD */ -/* DESCRIPTION: THE OPERATION RECORD WILL BE TAKE OUT OF ANY LOCK QUEUE. */ -/* IF IT OWNS THE ELEMENT LOCK. HEAD OF THE ELEMENT WILL BE UPDATED. */ -/* ------------------------------------------------------------------------- */ -void Dbacc::commitOperation(Signal* signal) -{ - validate_lock_queue(operationRecPtr); - - Uint32 opbits = operationRecPtr.p->m_op_bits; - Uint32 op = opbits & Operationrec::OP_MASK; - ndbrequire((opbits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_EXECUTED); - if ((opbits & Operationrec::OP_COMMIT_DELETE_CHECK) == 0 && - (op != ZREAD && op != ZSCAN_OP)) - { - jam(); - /* This method is used to check whether the end result of the transaction - will be to delete the tuple. In this case all operation will be marked - with elementIsDisappeared = true to ensure that the last operation - committed will remove the tuple. We only run this once per transaction - (commitDeleteCheckFlag = true if performed earlier) and we don't - execute this code when committing a scan operation since committing - a scan operation only means that the scan is continuing and the scan - lock is released. - */ - commitDeleteCheck(); - opbits = operationRecPtr.p->m_op_bits; - }//if - - ndbassert(opbits & Operationrec::OP_RUN_QUEUE); - - if (opbits & Operationrec::OP_LOCK_OWNER) - { - takeOutLockOwnersList(signal, operationRecPtr); - opbits &= ~(Uint32)Operationrec::OP_LOCK_OWNER; - operationRecPtr.p->m_op_bits = opbits; - - const bool queue = (operationRecPtr.p->nextParallelQue != RNIL || - operationRecPtr.p->nextSerialQue != RNIL); - - if (!queue && (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) == 0) - { - /* - * This is the normal path through the commit for operations owning the - * lock without any queues and not a delete operation. - */ - Page8Ptr coPageidptr; - Uint32 tcoElementptr; - Uint32 tmp2Olq; - - coPageidptr.i = operationRecPtr.p->elementPage; - tcoElementptr = operationRecPtr.p->elementPointer; - tmp2Olq = ElementHeader::setUnlocked(operationRecPtr.p->hashvaluePart, - operationRecPtr.p->scanBits); - ptrCheckGuard(coPageidptr, cpagesize, page8); - dbgWord32(coPageidptr, tcoElementptr, tmp2Olq); - arrGuard(tcoElementptr, 2048); - coPageidptr.p->word32[tcoElementptr] = tmp2Olq; - return; - } - else if (queue) - { - jam(); - /* - * The case when there is a queue lined up. - * Release the lock and pass it to the next operation lined up. - */ - release_lockowner(signal, operationRecPtr, true); - return; - } - else - { - jam(); - /* - * No queue and elementIsDisappeared is true. - * We perform the actual delete operation. - */ - commitdelete(signal); - return; - }//if - } - else - { - /** - * THE OPERATION DOES NOT OWN THE LOCK. IT MUST BE IN A LOCK QUEUE OF THE - * ELEMENT. - */ - jam(); - OperationrecPtr prev, next, lockOwner; - prev.i = operationRecPtr.p->prevParallelQue; - next.i = operationRecPtr.p->nextParallelQue; - lockOwner.i = operationRecPtr.p->m_lock_owner_ptr_i; - ptrCheckGuard(prev, coprecsize, operationrec); - - prev.p->nextParallelQue = next.i; - if (next.i != RNIL) - { - jam(); - ptrCheckGuard(next, coprecsize, operationrec); - next.p->prevParallelQue = prev.i; - } - else if (prev.p->m_op_bits & Operationrec::OP_LOCK_OWNER) - { - jam(); - ndbassert(lockOwner.i == prev.i); - prev.p->m_lo_last_parallel_op_ptr_i = RNIL; - next = prev; - } - else - { - jam(); - /** - * Last operation in parallell queue - */ - ndbassert(prev.i != lockOwner.i); - ptrCheckGuard(lockOwner, coprecsize, operationrec); - ndbassert(lockOwner.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - lockOwner.p->m_lo_last_parallel_op_ptr_i = prev.i; - prev.p->m_lock_owner_ptr_i = lockOwner.i; - next = prev; - } - - /** - * Check possible lock upgrade - */ - if(opbits & Operationrec::OP_ACC_LOCK_MODE) - { - jam(); - - /** - * Not lock owner...committing a exclusive operation... - * - * e.g - * T1(R) T1(X) - * T2(R/X) - * - * If T1(X) commits T2(R/X) is not supposed to run - * as T1(R) should also commit - * - * e.g - * T1(R) T1(X) T1*(R) - * T2(R/X) - * - * If T1*(R) commits T2(R/X) is not supposed to run - * as T1(R),T2(x) should also commit - */ - validate_lock_queue(prev); - return; - } - - /** - * We committed a shared lock - * Check if we can start next... - */ - while(next.p->nextParallelQue != RNIL) - { - jam(); - next.i = next.p->nextParallelQue; - ptrCheckGuard(next, coprecsize, operationrec); - - if ((next.p->m_op_bits & Operationrec::OP_STATE_MASK) != - Operationrec::OP_STATE_EXECUTED) - { - jam(); - return; - } - } - - startNext(signal, next); - - validate_lock_queue(prev); - } -}//Dbacc::commitOperation() - -void -Dbacc::release_lockowner(Signal* signal, OperationrecPtr opPtr, bool commit) -{ - OperationrecPtr nextP; - OperationrecPtr nextS; - OperationrecPtr newOwner; - OperationrecPtr lastP; - - Uint32 opbits = opPtr.p->m_op_bits; - nextP.i = opPtr.p->nextParallelQue; - nextS.i = opPtr.p->nextSerialQue; - lastP.i = opPtr.p->m_lo_last_parallel_op_ptr_i; - Uint32 lastS = opPtr.p->m_lo_last_serial_op_ptr_i; - - ndbassert(lastP.i != RNIL || lastS != RNIL); - ndbassert(nextP.i != RNIL || nextS.i != RNIL); - - enum { - NOTHING, - CHECK_LOCK_UPGRADE, - START_NEW - } action = NOTHING; - - if (nextP.i != RNIL) - { - jam(); - ptrCheckGuard(nextP, coprecsize, operationrec); - newOwner = nextP; - - if (lastP.i == newOwner.i) - { - newOwner.p->m_lo_last_parallel_op_ptr_i = RNIL; - lastP = nextP; - } - else - { - ptrCheckGuard(lastP, coprecsize, operationrec); - newOwner.p->m_lo_last_parallel_op_ptr_i = lastP.i; - lastP.p->m_lock_owner_ptr_i = newOwner.i; - } - - newOwner.p->m_lo_last_serial_op_ptr_i = lastS; - newOwner.p->nextSerialQue = nextS.i; - - if (nextS.i != RNIL) - { - jam(); - ptrCheckGuard(nextS, coprecsize, operationrec); - ndbassert(nextS.p->prevSerialQue == opPtr.i); - nextS.p->prevSerialQue = newOwner.i; - } - - if (commit) - { - if ((opbits & Operationrec::OP_ACC_LOCK_MODE) == ZREADLOCK) - { - jam(); - /** - * Lock owner...committing a shared operation... - * this can be a lock upgrade - * - * e.g - * T1(R) T2(R) - * T2(X) - * - * If T1(R) commits T2(X) is supposed to run - * - * e.g - * T1(X) T1(R) - * T2(R) - * - * If T1(X) commits, then T1(R) _should_ commit before T2(R) is - * allowed to proceed - */ - action = CHECK_LOCK_UPGRADE; - } - else - { - jam(); - newOwner.p->m_op_bits |= Operationrec::OP_LOCK_MODE; - } - } - else - { - /** - * Aborting an operation can *always* lead to lock upgrade - */ - action = CHECK_LOCK_UPGRADE; - Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; - if (opstate != Operationrec::OP_STATE_EXECUTED) - { - ndbassert(opstate == Operationrec::OP_STATE_RUNNING); - if (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) - { - jam(); - report_dealloc(signal, opPtr.p); - newOwner.p->localdata[0] = ~(Uint32)0; - } - else - { - jam(); - newOwner.p->localdata[0] = opPtr.p->localdata[0]; - newOwner.p->localdata[1] = opPtr.p->localdata[1]; - } - action = START_NEW; - } - - /** - * Update ACC_LOCK_MODE - */ - if (opbits & Operationrec::OP_LOCK_MODE) - { - Uint32 nextbits = nextP.p->m_op_bits; - while ((nextbits & Operationrec::OP_LOCK_MODE) == 0) - { - ndbassert(nextbits & Operationrec::OP_ACC_LOCK_MODE); - nextbits &= ~(Uint32)Operationrec::OP_ACC_LOCK_MODE; - nextP.p->m_op_bits = nextbits; - - if (nextP.p->nextParallelQue != RNIL) - { - nextP.i = nextP.p->nextParallelQue; - ptrCheckGuard(nextP, coprecsize, operationrec); - nextbits = nextP.p->m_op_bits; - } - else - { - break; - } - } - } - } - } - else - { - jam(); - ptrCheckGuard(nextS, coprecsize, operationrec); - newOwner = nextS; - - newOwner.p->m_op_bits |= Operationrec::OP_RUN_QUEUE; - - if (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) - { - report_dealloc(signal, opPtr.p); - newOwner.p->localdata[0] = ~(Uint32)0; - } - else - { - jam(); - newOwner.p->localdata[0] = opPtr.p->localdata[0]; - newOwner.p->localdata[1] = opPtr.p->localdata[1]; - } - - lastP = newOwner; - while (lastP.p->nextParallelQue != RNIL) - { - lastP.i = lastP.p->nextParallelQue; - ptrCheckGuard(lastP, coprecsize, operationrec); - lastP.p->m_op_bits |= Operationrec::OP_RUN_QUEUE; - } - - if (newOwner.i != lastP.i) - { - jam(); - newOwner.p->m_lo_last_parallel_op_ptr_i = lastP.i; - } - else - { - jam(); - newOwner.p->m_lo_last_parallel_op_ptr_i = RNIL; - } - - if (newOwner.i != lastS) - { - jam(); - newOwner.p->m_lo_last_serial_op_ptr_i = lastS; - } - else - { - jam(); - newOwner.p->m_lo_last_serial_op_ptr_i = RNIL; - } - - action = START_NEW; - } - - insertLockOwnersList(signal, newOwner); - - /** - * Copy op info, and store op in element - * - */ - { - newOwner.p->elementPage = opPtr.p->elementPage; - newOwner.p->elementIsforward = opPtr.p->elementIsforward; - newOwner.p->elementPointer = opPtr.p->elementPointer; - newOwner.p->elementContainer = opPtr.p->elementContainer; - newOwner.p->scanBits = opPtr.p->scanBits; - newOwner.p->hashvaluePart = opPtr.p->hashvaluePart; - newOwner.p->m_op_bits |= (opbits & Operationrec::OP_ELEMENT_DISAPPEARED); - if (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) - { - /* ------------------------------------------------------------------- */ - // If the elementIsDisappeared is set then we know that the - // hashValue is also set since it always originates from a - // committing abort or a aborting insert. - // Scans do not initialise the hashValue and must have this - // value initialised if they are - // to successfully commit the delete. - /* ------------------------------------------------------------------- */ - jam(); - newOwner.p->hashValue = opPtr.p->hashValue; - }//if - - Page8Ptr pagePtr; - pagePtr.i = newOwner.p->elementPage; - ptrCheckGuard(pagePtr, cpagesize, page8); - const Uint32 tmp = ElementHeader::setLocked(newOwner.i); - arrGuard(newOwner.p->elementPointer, 2048); - pagePtr.p->word32[newOwner.p->elementPointer] = tmp; - } - - switch(action){ - case NOTHING: - validate_lock_queue(newOwner); - return; - case START_NEW: - startNew(signal, newOwner); - validate_lock_queue(newOwner); - return; - case CHECK_LOCK_UPGRADE: - startNext(signal, lastP); - validate_lock_queue(lastP); - break; - } - -} - -void -Dbacc::startNew(Signal* signal, OperationrecPtr newOwner) -{ - OperationrecPtr save = operationRecPtr; - operationRecPtr = newOwner; - - Uint32 opbits = newOwner.p->m_op_bits; - Uint32 op = opbits & Operationrec::OP_MASK; - Uint32 opstate = (opbits & Operationrec::OP_STATE_MASK); - ndbassert(opstate == Operationrec::OP_STATE_WAITING); - ndbassert(opbits & Operationrec::OP_LOCK_OWNER); - const bool deleted = opbits & Operationrec::OP_ELEMENT_DISAPPEARED; - Uint32 errCode = 0; - - opbits &= opbits & ~(Uint32)Operationrec::OP_STATE_MASK; - opbits |= Operationrec::OP_STATE_RUNNING; - - if (op == ZSCAN_OP && (opbits & Operationrec::OP_LOCK_REQ) == 0) - goto scan; - - if (deleted) - { - jam(); - if (op != ZINSERT && op != ZWRITE) - { - errCode = ZREAD_ERROR; - goto ref; - } - - opbits &= ~(Uint32)Operationrec::OP_MASK; - opbits &= ~(Uint32)Operationrec::OP_ELEMENT_DISAPPEARED; - opbits |= (op = ZINSERT); - opbits |= Operationrec::OP_INSERT_IS_DONE; - goto conf; - } - else if (op == ZINSERT) - { - jam(); - errCode = ZWRITE_ERROR; - goto ref; - } - else if (op == ZWRITE) - { - jam(); - opbits &= ~(Uint32)Operationrec::OP_MASK; - opbits |= (op = ZUPDATE); - goto conf; - } - -conf: - newOwner.p->m_op_bits = opbits; - - sendAcckeyconf(signal); - sendSignal(newOwner.p->userblockref, GSN_ACCKEYCONF, - signal, 6, JBB); - - operationRecPtr = save; - return; - -scan: - jam(); - newOwner.p->m_op_bits = opbits; - - takeOutScanLockQueue(newOwner.p->scanRecPtr); - putReadyScanQueue(signal, newOwner.p->scanRecPtr); - - operationRecPtr = save; - return; - -ref: - newOwner.p->m_op_bits = opbits; - - signal->theData[0] = newOwner.p->userptr; - signal->theData[1] = errCode; - sendSignal(newOwner.p->userblockref, GSN_ACCKEYREF, signal, - 2, JBB); - - operationRecPtr = save; - return; -} - -/** - * takeOutLockOwnersList - * - * Description: Take out an operation from the doubly linked - * lock owners list on the fragment. - * - */ -void Dbacc::takeOutLockOwnersList(Signal* signal, - const OperationrecPtr& outOperPtr) -{ - const Uint32 Tprev = outOperPtr.p->prevLockOwnerOp; - const Uint32 Tnext = outOperPtr.p->nextLockOwnerOp; -#ifdef VM_TRACE - // Check that operation is already in the list - OperationrecPtr tmpOperPtr; - bool inList = false; - tmpOperPtr.i = fragrecptr.p->lockOwnersList; - while (tmpOperPtr.i != RNIL){ - ptrCheckGuard(tmpOperPtr, coprecsize, operationrec); - if (tmpOperPtr.i == outOperPtr.i) - inList = true; - tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp; - } - ndbrequire(inList == true); -#endif - - ndbassert(outOperPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); - - // Fast path through the code for the common case. - if ((Tprev == RNIL) && (Tnext == RNIL)) { - ndbrequire(fragrecptr.p->lockOwnersList == outOperPtr.i); - fragrecptr.p->lockOwnersList = RNIL; - return; - } - - // Check previous operation - if (Tprev != RNIL) { - jam(); - arrGuard(Tprev, coprecsize); - operationrec[Tprev].nextLockOwnerOp = Tnext; - } else { - fragrecptr.p->lockOwnersList = Tnext; - }//if - - // Check next operation - if (Tnext == RNIL) { - return; - } else { - jam(); - arrGuard(Tnext, coprecsize); - operationrec[Tnext].prevLockOwnerOp = Tprev; - }//if - - return; -}//Dbacc::takeOutLockOwnersList() - -/** - * insertLockOwnersList - * - * Description: Insert an operation first in the dubly linked lock owners - * list on the fragment. - * - */ -void Dbacc::insertLockOwnersList(Signal* signal, - const OperationrecPtr& insOperPtr) -{ - OperationrecPtr tmpOperPtr; -#ifdef VM_TRACE - // Check that operation is not already in list - tmpOperPtr.i = fragrecptr.p->lockOwnersList; - while(tmpOperPtr.i != RNIL){ - ptrCheckGuard(tmpOperPtr, coprecsize, operationrec); - ndbrequire(tmpOperPtr.i != insOperPtr.i); - tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp; - } -#endif - tmpOperPtr.i = fragrecptr.p->lockOwnersList; - - ndbrequire(! (insOperPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER)); - - insOperPtr.p->m_op_bits |= Operationrec::OP_LOCK_OWNER; - insOperPtr.p->prevLockOwnerOp = RNIL; - insOperPtr.p->nextLockOwnerOp = tmpOperPtr.i; - - fragrecptr.p->lockOwnersList = insOperPtr.i; - if (tmpOperPtr.i == RNIL) { - return; - } else { - jam(); - ptrCheckGuard(tmpOperPtr, coprecsize, operationrec); - tmpOperPtr.p->prevLockOwnerOp = insOperPtr.i; - }//if -}//Dbacc::insertLockOwnersList() - - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF COMMIT AND ABORT MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* ALLOC_OVERFLOW_PAGE */ -/* DESCRIPTION: */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::allocOverflowPage(Signal* signal) -{ - DirRangePtr aopDirRangePtr; - DirectoryarrayPtr aopOverflowDirptr; - OverflowRecordPtr aopOverflowRecPtr; - Uint32 taopTmp1; - Uint32 taopTmp2; - Uint32 taopTmp3; - - tresult = 0; - if ((cfirstfreepage == RNIL) && - (cfreepage >= cpagesize)) { - jam(); - zpagesize_error("Dbacc::allocOverflowPage"); - tresult = ZPAGESIZE_ERROR; - return; - }//if - if (fragrecptr.p->firstFreeDirindexRec != RNIL) { - jam(); - /* FRAGRECPTR:FIRST_FREE_DIRINDEX_REC POINTS */ - /* TO THE FIRST ELEMENT IN A FREE LIST OF THE */ - /* DIRECTORY INDEX WICH HAVE NULL AS PAGE */ - aopOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec; - ptrCheckGuard(aopOverflowRecPtr, coverflowrecsize, overflowRecord); - troOverflowRecPtr.p = aopOverflowRecPtr.p; - takeRecOutOfFreeOverdir(signal); - } else if (cfirstfreeoverrec == RNIL) { - jam(); - tresult = ZOVER_REC_ERROR; - return; - } else if ((cfirstfreedir == RNIL) && - (cdirarraysize <= cdirmemory)) { - jam(); - tresult = ZDIRSIZE_ERROR; - return; - } else { - jam(); - seizeOverRec(signal); - aopOverflowRecPtr = sorOverflowRecPtr; - aopOverflowRecPtr.p->dirindex = fragrecptr.p->lastOverIndex; - }//if - aopOverflowRecPtr.p->nextOverRec = RNIL; - aopOverflowRecPtr.p->prevOverRec = RNIL; - fragrecptr.p->firstOverflowRec = aopOverflowRecPtr.i; - fragrecptr.p->lastOverflowRec = aopOverflowRecPtr.i; - taopTmp1 = aopOverflowRecPtr.p->dirindex; - aopDirRangePtr.i = fragrecptr.p->overflowdir; - taopTmp2 = taopTmp1 >> 8; - taopTmp3 = taopTmp1 & 0xff; - ptrCheckGuard(aopDirRangePtr, cdirrangesize, dirRange); - arrGuard(taopTmp2, 256); - if (aopDirRangePtr.p->dirArray[taopTmp2] == RNIL) { - jam(); - seizeDirectory(signal); - ndbrequire(tresult <= ZLIMIT_OF_ERROR); - aopDirRangePtr.p->dirArray[taopTmp2] = sdDirptr.i; - }//if - aopOverflowDirptr.i = aopDirRangePtr.p->dirArray[taopTmp2]; - seizePage(signal); - ndbrequire(tresult <= ZLIMIT_OF_ERROR); - ptrCheckGuard(aopOverflowDirptr, cdirarraysize, directoryarray); - aopOverflowDirptr.p->pagep[taopTmp3] = spPageptr.i; - tiopPageId = aopOverflowRecPtr.p->dirindex; - iopOverflowRecPtr = aopOverflowRecPtr; - iopPageptr = spPageptr; - initOverpage(signal); - aopOverflowRecPtr.p->overpage = spPageptr.i; - if (fragrecptr.p->lastOverIndex <= aopOverflowRecPtr.p->dirindex) { - jam(); - ndbrequire(fragrecptr.p->lastOverIndex == aopOverflowRecPtr.p->dirindex); - fragrecptr.p->lastOverIndex++; - }//if -}//Dbacc::allocOverflowPage() - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* EXPAND/SHRINK MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/*EXPANDCHECK EXPAND BUCKET ORD */ -/* SENDER: ACC, LEVEL B */ -/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */ -/* DESCRIPTION: A BUCKET OF A FRAGMENT PAGE WILL BE EXPAND INTO TWO BUCKETS */ -/* ACCORDING TO LH3. */ -/* ******************--------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/* EXPANDCHECK EXPAND BUCKET ORD */ -/* ******************------------------------------+ */ -/* SENDER: ACC, LEVEL B */ -/* A BUCKET OF THE FRAGMENT WILL */ -/* BE EXPANDED ACORDING TO LH3, */ -/* AND COMMIT TRANSACTION PROCESS */ -/* WILL BE CONTINUED */ -Uint32 Dbacc::checkScanExpand(Signal* signal) -{ - Uint32 Ti; - Uint32 TreturnCode = 0; - Uint32 TPageIndex; - Uint32 TDirInd; - Uint32 TSplit; - Uint32 TreleaseInd = 0; - Uint32 TreleaseScanBucket; - Uint32 TreleaseScanIndicator[4]; - DirectoryarrayPtr TDirptr; - DirRangePtr TDirRangePtr; - Page8Ptr TPageptr; - ScanRecPtr TscanPtr; - - TSplit = fragrecptr.p->p; - for (Ti = 0; Ti < 4; Ti++) { - TreleaseScanIndicator[Ti] = 0; - if (fragrecptr.p->scan[Ti] != RNIL) { - //------------------------------------------------------------- - // A scan is ongoing on this particular local fragment. We have - // to check its current state. - //------------------------------------------------------------- - TscanPtr.i = fragrecptr.p->scan[Ti]; - ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); - if (TscanPtr.p->activeLocalFrag == fragrecptr.i) { - if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) { - if (TSplit == TscanPtr.p->nextBucketIndex) { - jam(); - //------------------------------------------------------------- - // We are currently scanning this bucket. We cannot split it - // simultaneously with the scan. We have to pass this offer for - // splitting the bucket. - //------------------------------------------------------------- - TreturnCode = 1; - return TreturnCode; - } else if (TSplit > TscanPtr.p->nextBucketIndex) { - jam(); - //------------------------------------------------------------- - // This bucket has not yet been scanned. We must reset the scanned - // bit indicator for this scan on this bucket. - //------------------------------------------------------------- - TreleaseScanIndicator[Ti] = 1; - TreleaseInd = 1; - } else { - jam(); - }//if - } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) { - jam(); - //------------------------------------------------------------- - // We are performing a second lap to handle buckets that was - // merged during the first lap of scanning. During this second - // lap we do not allow any splits or merges. - //------------------------------------------------------------- - TreturnCode = 1; - return TreturnCode; - } else { - ndbrequire(TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED); - jam(); - //------------------------------------------------------------- - // The scan is completed and we can thus go ahead and perform - // the split. - //------------------------------------------------------------- - }//if - }//if - }//if - }//for - if (TreleaseInd == 1) { - TreleaseScanBucket = TSplit; - TDirRangePtr.i = fragrecptr.p->directory; - TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */ - TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */ - ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange); - arrGuard((TDirInd >> 8), 256); - TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8]; - ptrCheckGuard(TDirptr, cdirarraysize, directoryarray); - TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff]; - ptrCheckGuard(TPageptr, cpagesize, page8); - for (Ti = 0; Ti < 4; Ti++) { - if (TreleaseScanIndicator[Ti] == 1) { - jam(); - scanPtr.i = fragrecptr.p->scan[Ti]; - ptrCheckGuard(scanPtr, cscanRecSize, scanRec); - rsbPageidptr = TPageptr; - trsbPageindex = TPageIndex; - releaseScanBucket(signal); - }//if - }//for - }//if - return TreturnCode; -}//Dbacc::checkScanExpand() - -void Dbacc::execEXPANDCHECK2(Signal* signal) -{ - jamEntry(); - - if(refToBlock(signal->getSendersBlockRef()) == DBLQH) - { - jam(); - return; - } - - DirectoryarrayPtr newDirptr; - - fragrecptr.i = signal->theData[0]; - tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */ - Uint32 tmp = 1; - tmp = tmp << 31; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - fragrecptr.p->expandFlag = 0; - if (fragrecptr.p->slack < tmp) { - jam(); - /* IT MEANS THAT IF SLACK > ZERO */ - /*--------------------------------------------------------------*/ - /* THE SLACK HAS IMPROVED AND IS NOW ACCEPTABLE AND WE */ - /* CAN FORGET ABOUT THE EXPAND PROCESS. */ - /*--------------------------------------------------------------*/ - return; - }//if - if (fragrecptr.p->firstOverflowRec == RNIL) { - jam(); - allocOverflowPage(signal); - if (tresult > ZLIMIT_OF_ERROR) { - jam(); - /*--------------------------------------------------------------*/ - /* WE COULD NOT ALLOCATE ANY OVERFLOW PAGE. THUS WE HAVE TO STOP*/ - /* THE EXPAND SINCE WE CANNOT GUARANTEE ITS COMPLETION. */ - /*--------------------------------------------------------------*/ - return; - }//if - }//if - if (cfirstfreepage == RNIL) { - if (cfreepage >= cpagesize) { - jam(); - /*--------------------------------------------------------------*/ - /* WE HAVE TO STOP THE EXPAND PROCESS SINCE THERE ARE NO FREE */ - /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */ - /* CANNOT COMPLETE THE EXPAND. TO AVOID THE CRASH WE EXIT HERE. */ - /*--------------------------------------------------------------*/ - return; - }//if - }//if - if (checkScanExpand(signal) == 1) { - jam(); - /*--------------------------------------------------------------*/ - // A scan state was inconsistent with performing an expand - // operation. - /*--------------------------------------------------------------*/ - return; - }//if - - /*--------------------------------------------------------------------------*/ - /* WE START BY FINDING THE PAGE, THE PAGE INDEX AND THE PAGE DIRECTORY*/ - /* OF THE NEW BUCKET WHICH SHALL RECEIVE THE ELEMENT WHICH HAVE A 1 IN*/ - /* THE NEXT HASH BIT. THIS BIT IS USED IN THE SPLIT MECHANISM TO */ - /* DECIDE WHICH ELEMENT GOES WHERE. */ - /*--------------------------------------------------------------------------*/ - expDirRangePtr.i = fragrecptr.p->directory; - texpReceivedBucket = (fragrecptr.p->maxp + fragrecptr.p->p) + 1; /* RECEIVED BUCKET */ - texpDirInd = texpReceivedBucket >> fragrecptr.p->k; - newDirptr.i = RNIL; - ptrNull(newDirptr); - texpDirRangeIndex = texpDirInd >> 8; - ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange); - arrGuard(texpDirRangeIndex, 256); - expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex]; - if (expDirptr.i == RNIL) { - jam(); - seizeDirectory(signal); - if (tresult > ZLIMIT_OF_ERROR) { - jam(); - return; - } else { - jam(); - newDirptr = sdDirptr; - expDirptr = sdDirptr; - expDirRangePtr.p->dirArray[texpDirRangeIndex] = sdDirptr.i; - }//if - } else { - ptrCheckGuard(expDirptr, cdirarraysize, directoryarray); - }//if - texpDirPageIndex = texpDirInd & 0xff; - expPageptr.i = expDirptr.p->pagep[texpDirPageIndex]; - if (expPageptr.i == RNIL) { - jam(); - seizePage(signal); - if (tresult > ZLIMIT_OF_ERROR) { - jam(); - if (newDirptr.i != RNIL) { - jam(); - rdDirptr.i = newDirptr.i; - releaseDirectory(signal); - }//if - return; - }//if - expDirptr.p->pagep[texpDirPageIndex] = spPageptr.i; - tipPageId = texpDirInd; - inpPageptr = spPageptr; - initPage(signal); - fragrecptr.p->dirsize++; - expPageptr = spPageptr; - } else { - ptrCheckGuard(expPageptr, cpagesize, page8); - }//if - - fragrecptr.p->expReceivePageptr = expPageptr.i; - fragrecptr.p->expReceiveIndex = texpReceivedBucket & ((1 << fragrecptr.p->k) - 1); - /*--------------------------------------------------------------------------*/ - /* THE NEXT ACTION IS TO FIND THE PAGE, THE PAGE INDEX AND THE PAGE */ - /* DIRECTORY OF THE BUCKET TO BE SPLIT. */ - /*--------------------------------------------------------------------------*/ - expDirRangePtr.i = fragrecptr.p->directory; - cexcPageindex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */ - texpDirInd = fragrecptr.p->p >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */ - ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange); - arrGuard((texpDirInd >> 8), 256); - expDirptr.i = expDirRangePtr.p->dirArray[texpDirInd >> 8]; - ptrCheckGuard(expDirptr, cdirarraysize, directoryarray); - excPageptr.i = expDirptr.p->pagep[texpDirInd & 0xff]; - fragrecptr.p->expSenderIndex = cexcPageindex; - fragrecptr.p->expSenderPageptr = excPageptr.i; - if (excPageptr.i == RNIL) { - jam(); - endofexpLab(signal); /* EMPTY BUCKET */ - return; - }//if - fragrecptr.p->expReceiveForward = ZTRUE; - ptrCheckGuard(excPageptr, cpagesize, page8); - expandcontainer(signal); - endofexpLab(signal); - return; -}//Dbacc::execEXPANDCHECK2() - -void Dbacc::endofexpLab(Signal* signal) -{ - fragrecptr.p->p++; - fragrecptr.p->slack += fragrecptr.p->maxloadfactor; - fragrecptr.p->expandCounter++; - if (fragrecptr.p->p > fragrecptr.p->maxp) { - jam(); - fragrecptr.p->maxp = (fragrecptr.p->maxp << 1) | 1; - fragrecptr.p->lhdirbits++; - fragrecptr.p->hashcheckbit++; - fragrecptr.p->p = 0; - }//if - Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p; - Uint32 Thysteres = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor; - fragrecptr.p->slackCheck = noOfBuckets * Thysteres; - if (fragrecptr.p->slack > (1u << 31)) { - jam(); - /* IT MEANS THAT IF SLACK < ZERO */ - /* --------------------------------------------------------------------------------- */ - /* IT IS STILL NECESSARY TO EXPAND THE FRAGMENT EVEN MORE. START IT FROM HERE */ - /* WITHOUT WAITING FOR NEXT COMMIT ON THE FRAGMENT. */ - /* --------------------------------------------------------------------------------- */ - fragrecptr.p->expandFlag = 2; - signal->theData[0] = fragrecptr.i; - signal->theData[1] = fragrecptr.p->p; - signal->theData[2] = fragrecptr.p->maxp; - sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB); - }//if - return; -}//Dbacc::endofexpLab() - -void Dbacc::reenable_expand_after_redo_log_exection_complete(Signal* signal){ - - tabptr.i = signal->theData[0]; - Uint32 fragId = signal->theData[1]; - - ptrCheckGuard(tabptr, ctablesize, tabrec); - ndbrequire(getfragmentrec(signal, fragrecptr, fragId)); -#if 0 - ndbout_c("reenable expand check for table %d fragment: %d", - tabptr.i, fragId); -#endif - - switch(fragrecptr.p->expandFlag){ - case 0: - /** - * Hmm... this means that it's alreay has been reenabled... - */ - fragrecptr.p->expandFlag = 1; - break; - case 1: - /** - * Nothing is going on start expand check - */ - case 2: - /** - * A shrink is running, do expand check anyway - * (to reset expandFlag) - */ - fragrecptr.p->expandFlag = 2; - signal->theData[0] = fragrecptr.i; - signal->theData[1] = fragrecptr.p->p; - signal->theData[2] = fragrecptr.p->maxp; - sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB); - break; - } -} - -void Dbacc::execDEBUG_SIG(Signal* signal) -{ - jamEntry(); - expPageptr.i = signal->theData[0]; - - progError(__LINE__, NDBD_EXIT_SR_UNDOLOG); - return; -}//Dbacc::execDEBUG_SIG() - -/* --------------------------------------------------------------------------------- */ -/* EXPANDCONTAINER */ -/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */ -/* CEXC_PAGEINDEX (INDEX OF THE BUCKET). */ -/* */ -/* DESCRIPTION: THE HASH VALUE OF ALL ELEMENTS IN THE CONTAINER WILL BE */ -/* CHECKED. SOME OF THIS ELEMENTS HAVE TO MOVE TO THE NEW CONTAINER */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::expandcontainer(Signal* signal) -{ - Uint32 texcHashvalue; - Uint32 texcTmp; - Uint32 texcIndex; - Uint32 guard20; - - cexcPrevpageptr = RNIL; - cexcPrevconptr = 0; - cexcForward = ZTRUE; - EXP_CONTAINER_LOOP: - cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS); - if (cexcForward == ZTRUE) { - jam(); - cexcContainerptr = cexcContainerptr + ZHEAD_SIZE; - cexcElementptr = cexcContainerptr + ZCON_HEAD_SIZE; - } else { - jam(); - cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE; - cexcElementptr = cexcContainerptr - 1; - }//if - arrGuard(cexcContainerptr, 2048); - cexcContainerhead = excPageptr.p->word32[cexcContainerptr]; - cexcContainerlen = cexcContainerhead >> 26; - cexcMovedLen = ZCON_HEAD_SIZE; - if (cexcContainerlen <= ZCON_HEAD_SIZE) { - ndbrequire(cexcContainerlen >= ZCON_HEAD_SIZE); - jam(); - goto NEXT_ELEMENT; - }//if - NEXT_ELEMENT_LOOP: - idrOperationRecPtr.i = RNIL; - ptrNull(idrOperationRecPtr); - /* --------------------------------------------------------------------------------- */ - /* CEXC_PAGEINDEX PAGE INDEX OF CURRENT CONTAINER BEING EXAMINED. */ - /* CEXC_CONTAINERPTR INDEX OF CURRENT CONTAINER BEING EXAMINED. */ - /* CEXC_ELEMENTPTR INDEX OF CURRENT ELEMENT BEING EXAMINED. */ - /* EXC_PAGEPTR PAGE WHERE CURRENT ELEMENT RESIDES. */ - /* CEXC_PREVPAGEPTR PAGE OF PREVIOUS CONTAINER. */ - /* CEXC_PREVCONPTR INDEX OF PREVIOUS CONTAINER */ - /* CEXC_FORWARD DIRECTION OF CURRENT CONTAINER */ - /* --------------------------------------------------------------------------------- */ - arrGuard(cexcElementptr, 2048); - tidrElemhead = excPageptr.p->word32[cexcElementptr]; - if (ElementHeader::getUnlocked(tidrElemhead)){ - jam(); - texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead); - } else { - jam(); - idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead); - ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec); - texcHashvalue = idrOperationRecPtr.p->hashvaluePart; - }//if - if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THIS ELEMENT IS NOT TO BE MOVED. WE CALCULATE THE WHEREABOUTS OF THE NEXT */ - /* ELEMENT AND PROCEED WITH THAT OR END THE SEARCH IF THERE ARE NO MORE */ - /* ELEMENTS IN THIS CONTAINER. */ - /* --------------------------------------------------------------------------------- */ - goto NEXT_ELEMENT; - }//if - /* --------------------------------------------------------------------------------- */ - /* THE HASH BIT WAS SET AND WE SHALL MOVE THIS ELEMENT TO THE NEW BUCKET. */ - /* WE START BY READING THE ELEMENT TO BE ABLE TO INSERT IT INTO THE NEW BUCKET.*/ - /* THEN WE INSERT THE ELEMENT INTO THE NEW BUCKET. THE NEXT STEP IS TO DELETE */ - /* THE ELEMENT FROM THIS BUCKET. THIS IS PERFORMED BY REPLACING IT WITH THE */ - /* LAST ELEMENT IN THE BUCKET. IF THIS ELEMENT IS TO BE MOVED WE MOVE IT AND */ - /* GET THE LAST ELEMENT AGAIN UNTIL WE EITHER FIND ONE THAT STAYS OR THIS */ - /* ELEMENT IS THE LAST ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - texcTmp = cexcElementptr + cexcForward; - guard20 = fragrecptr.p->localkeylen - 1; - for (texcIndex = 0; texcIndex <= guard20; texcIndex++) { - arrGuard(texcIndex, 2); - arrGuard(texcTmp, 2048); - clocalkey[texcIndex] = excPageptr.p->word32[texcTmp]; - texcTmp = texcTmp + cexcForward; - }//for - tidrPageindex = fragrecptr.p->expReceiveIndex; - idrPageptr.i = fragrecptr.p->expReceivePageptr; - ptrCheckGuard(idrPageptr, cpagesize, page8); - tidrForward = fragrecptr.p->expReceiveForward; - insertElement(signal); - fragrecptr.p->expReceiveIndex = tidrPageindex; - fragrecptr.p->expReceivePageptr = idrPageptr.i; - fragrecptr.p->expReceiveForward = tidrForward; - REMOVE_LAST_LOOP: - jam(); - lastPageptr.i = excPageptr.i; - lastPageptr.p = excPageptr.p; - tlastContainerptr = cexcContainerptr; - lastPrevpageptr.i = cexcPrevpageptr; - ptrCheck(lastPrevpageptr, cpagesize, page8); - tlastPrevconptr = cexcPrevconptr; - arrGuard(tlastContainerptr, 2048); - tlastContainerhead = lastPageptr.p->word32[tlastContainerptr]; - tlastContainerlen = tlastContainerhead >> 26; - tlastForward = cexcForward; - tlastPageindex = cexcPageindex; - getLastAndRemove(signal); - if (excPageptr.i == lastPageptr.i) { - if (cexcElementptr == tlastElementptr) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THE CURRENT ELEMENT WAS ALSO THE LAST ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - return; - }//if - }//if - /* --------------------------------------------------------------------------------- */ - /* THE CURRENT ELEMENT WAS NOT THE LAST ELEMENT. IF THE LAST ELEMENT SHOULD */ - /* STAY WE COPY IT TO THE POSITION OF THE CURRENT ELEMENT, OTHERWISE WE INSERT */ - /* INTO THE NEW BUCKET, REMOVE IT AND TRY WITH THE NEW LAST ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - idrOperationRecPtr.i = RNIL; - ptrNull(idrOperationRecPtr); - arrGuard(tlastElementptr, 2048); - tidrElemhead = lastPageptr.p->word32[tlastElementptr]; - if (ElementHeader::getUnlocked(tidrElemhead)) { - jam(); - texcHashvalue = ElementHeader::getHashValuePart(tidrElemhead); - } else { - jam(); - idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead); - ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec); - texcHashvalue = idrOperationRecPtr.p->hashvaluePart; - }//if - if (((texcHashvalue >> fragrecptr.p->hashcheckbit) & 1) == 0) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THE LAST ELEMENT IS NOT TO BE MOVED. WE COPY IT TO THE CURRENT ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - delPageptr = excPageptr; - tdelContainerptr = cexcContainerptr; - tdelForward = cexcForward; - tdelElementptr = cexcElementptr; - deleteElement(signal); - } else { - jam(); - /* --------------------------------------------------------------------------------- */ - /* THE LAST ELEMENT IS ALSO TO BE MOVED. */ - /* --------------------------------------------------------------------------------- */ - texcTmp = tlastElementptr + tlastForward; - for (texcIndex = 0; texcIndex < fragrecptr.p->localkeylen; texcIndex++) { - arrGuard(texcIndex, 2); - arrGuard(texcTmp, 2048); - clocalkey[texcIndex] = lastPageptr.p->word32[texcTmp]; - texcTmp = texcTmp + tlastForward; - }//for - tidrPageindex = fragrecptr.p->expReceiveIndex; - idrPageptr.i = fragrecptr.p->expReceivePageptr; - ptrCheckGuard(idrPageptr, cpagesize, page8); - tidrForward = fragrecptr.p->expReceiveForward; - insertElement(signal); - fragrecptr.p->expReceiveIndex = tidrPageindex; - fragrecptr.p->expReceivePageptr = idrPageptr.i; - fragrecptr.p->expReceiveForward = tidrForward; - goto REMOVE_LAST_LOOP; - }//if - NEXT_ELEMENT: - arrGuard(cexcContainerptr, 2048); - cexcContainerhead = excPageptr.p->word32[cexcContainerptr]; - cexcMovedLen = cexcMovedLen + fragrecptr.p->elementLength; - if ((cexcContainerhead >> 26) > cexcMovedLen) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* WE HAVE NOT YET MOVED THE COMPLETE CONTAINER. WE PROCEED WITH THE NEXT */ - /* ELEMENT IN THE CONTAINER. IT IS IMPORTANT TO READ THE CONTAINER LENGTH */ - /* FROM THE CONTAINER HEADER SINCE IT MIGHT CHANGE BY REMOVING THE LAST */ - /* ELEMENT IN THE BUCKET. */ - /* --------------------------------------------------------------------------------- */ - cexcElementptr = cexcElementptr + (cexcForward * fragrecptr.p->elementLength); - goto NEXT_ELEMENT_LOOP; - }//if - if (((cexcContainerhead >> 7) & 3) != 0) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* WE PROCEED TO THE NEXT CONTAINER IN THE BUCKET. */ - /* --------------------------------------------------------------------------------- */ - cexcPrevpageptr = excPageptr.i; - cexcPrevconptr = cexcContainerptr; - nextcontainerinfoExp(signal); - goto EXP_CONTAINER_LOOP; - }//if -}//Dbacc::expandcontainer() - -/* ******************--------------------------------------------------------------- */ -/* SHRINKCHECK JOIN BUCKET ORD */ -/* SENDER: ACC, LEVEL B */ -/* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */ -/* DESCRIPTION: TWO BUCKET OF A FRAGMENT PAGE WILL BE JOINED TOGETHER */ -/* ACCORDING TO LH3. */ -/* ******************--------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/* SHRINKCHECK JOIN BUCKET ORD */ -/* ******************------------------------------+ */ -/* SENDER: ACC, LEVEL B */ -/* TWO BUCKETS OF THE FRAGMENT */ -/* WILL BE JOINED ACORDING TO LH3 */ -/* AND COMMIT TRANSACTION PROCESS */ -/* WILL BE CONTINUED */ -Uint32 Dbacc::checkScanShrink(Signal* signal) -{ - Uint32 Ti; - Uint32 TreturnCode = 0; - Uint32 TPageIndex; - Uint32 TDirInd; - Uint32 TmergeDest; - Uint32 TmergeSource; - Uint32 TreleaseScanBucket; - Uint32 TreleaseInd = 0; - Uint32 TreleaseScanIndicator[4]; - DirectoryarrayPtr TDirptr; - DirRangePtr TDirRangePtr; - Page8Ptr TPageptr; - ScanRecPtr TscanPtr; - - if (fragrecptr.p->p == 0) { - jam(); - TmergeDest = fragrecptr.p->maxp >> 1; - } else { - jam(); - TmergeDest = fragrecptr.p->p - 1; - }//if - TmergeSource = fragrecptr.p->maxp + fragrecptr.p->p; - for (Ti = 0; Ti < 4; Ti++) { - TreleaseScanIndicator[Ti] = 0; - if (fragrecptr.p->scan[Ti] != RNIL) { - TscanPtr.i = fragrecptr.p->scan[Ti]; - ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); - if (TscanPtr.p->activeLocalFrag == fragrecptr.i) { - //------------------------------------------------------------- - // A scan is ongoing on this particular local fragment. We have - // to check its current state. - //------------------------------------------------------------- - if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) { - jam(); - if ((TmergeDest == TscanPtr.p->nextBucketIndex) || - (TmergeSource == TscanPtr.p->nextBucketIndex)) { - jam(); - //------------------------------------------------------------- - // We are currently scanning one of the buckets involved in the - // merge. We cannot merge while simultaneously performing a scan. - // We have to pass this offer for merging the buckets. - //------------------------------------------------------------- - TreturnCode = 1; - return TreturnCode; - } else if (TmergeDest < TscanPtr.p->nextBucketIndex) { - jam(); - TreleaseScanIndicator[Ti] = 1; - TreleaseInd = 1; - }//if - } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) { - jam(); - //------------------------------------------------------------- - // We are performing a second lap to handle buckets that was - // merged during the first lap of scanning. During this second - // lap we do not allow any splits or merges. - //------------------------------------------------------------- - TreturnCode = 1; - return TreturnCode; - } else if (TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) { - jam(); - //------------------------------------------------------------- - // The scan is completed and we can thus go ahead and perform - // the split. - //------------------------------------------------------------- - } else { - jam(); - sendSystemerror(signal, __LINE__); - return TreturnCode; - }//if - }//if - }//if - }//for - if (TreleaseInd == 1) { - jam(); - TreleaseScanBucket = TmergeSource; - TDirRangePtr.i = fragrecptr.p->directory; - TPageIndex = TreleaseScanBucket & ((1 << fragrecptr.p->k) - 1); /* PAGE INDEX OBS K = 6 */ - TDirInd = TreleaseScanBucket >> fragrecptr.p->k; /* DIRECTORY INDEX OBS K = 6 */ - ptrCheckGuard(TDirRangePtr, cdirrangesize, dirRange); - arrGuard((TDirInd >> 8), 256); - TDirptr.i = TDirRangePtr.p->dirArray[TDirInd >> 8]; - ptrCheckGuard(TDirptr, cdirarraysize, directoryarray); - TPageptr.i = TDirptr.p->pagep[TDirInd & 0xff]; - ptrCheckGuard(TPageptr, cpagesize, page8); - for (Ti = 0; Ti < 4; Ti++) { - if (TreleaseScanIndicator[Ti] == 1) { - jam(); - scanPtr.i = fragrecptr.p->scan[Ti]; - ptrCheckGuard(scanPtr, cscanRecSize, scanRec); - rsbPageidptr.i = TPageptr.i; - rsbPageidptr.p = TPageptr.p; - trsbPageindex = TPageIndex; - releaseScanBucket(signal); - if (TmergeDest < scanPtr.p->minBucketIndexToRescan) { - jam(); - //------------------------------------------------------------- - // We have to keep track of the starting bucket to Rescan in the - // second lap. - //------------------------------------------------------------- - scanPtr.p->minBucketIndexToRescan = TmergeDest; - }//if - if (TmergeDest > scanPtr.p->maxBucketIndexToRescan) { - jam(); - //------------------------------------------------------------- - // We have to keep track of the ending bucket to Rescan in the - // second lap. - //------------------------------------------------------------- - scanPtr.p->maxBucketIndexToRescan = TmergeDest; - }//if - }//if - }//for - }//if - return TreturnCode; -}//Dbacc::checkScanShrink() - -void Dbacc::execSHRINKCHECK2(Signal* signal) -{ - Uint32 tshrTmp1; - - jamEntry(); - fragrecptr.i = signal->theData[0]; - Uint32 oldFlag = signal->theData[3]; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - fragrecptr.p->expandFlag = oldFlag; - tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */ - if (fragrecptr.p->slack <= fragrecptr.p->slackCheck) { - jam(); - /* TIME FOR JOIN BUCKETS PROCESS */ - /*--------------------------------------------------------------*/ - /* NO LONGER NECESSARY TO SHRINK THE FRAGMENT. */ - /*--------------------------------------------------------------*/ - return; - }//if - if (fragrecptr.p->slack > (1u << 31)) { - jam(); - /*--------------------------------------------------------------*/ - /* THE SLACK IS NEGATIVE, IN THIS CASE WE WILL NOT NEED ANY */ - /* SHRINK. */ - /*--------------------------------------------------------------*/ - return; - }//if - texpDirInd = (fragrecptr.p->maxp + fragrecptr.p->p) >> fragrecptr.p->k; - if (fragrecptr.p->firstOverflowRec == RNIL) { - jam(); - allocOverflowPage(signal); - if (tresult > ZLIMIT_OF_ERROR) { - jam(); - return; - }//if - }//if - if (cfirstfreepage == RNIL) { - if (cfreepage >= cpagesize) { - jam(); - /*--------------------------------------------------------------*/ - /* WE HAVE TO STOP THE SHRINK PROCESS SINCE THERE ARE NO FREE */ - /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */ - /* CANNOT COMPLETE THE SHRINK. TO AVOID THE CRASH WE EXIT HERE. */ - /*--------------------------------------------------------------*/ - return; - }//if - }//if - if (checkScanShrink(signal) == 1) { - jam(); - /*--------------------------------------------------------------*/ - // A scan state was inconsistent with performing a shrink - // operation. - /*--------------------------------------------------------------*/ - return; - }//if - if (fragrecptr.p->p == 0) { - jam(); - fragrecptr.p->maxp = fragrecptr.p->maxp >> 1; - fragrecptr.p->p = fragrecptr.p->maxp; - fragrecptr.p->lhdirbits--; - fragrecptr.p->hashcheckbit--; - } else { - jam(); - fragrecptr.p->p--; - }//if - - /*--------------------------------------------------------------------------*/ - /* WE START BY FINDING THE NECESSARY INFORMATION OF THE BUCKET TO BE */ - /* REMOVED WHICH WILL SEND ITS ELEMENTS TO THE RECEIVING BUCKET. */ - /*--------------------------------------------------------------------------*/ - expDirRangePtr.i = fragrecptr.p->directory; - cexcPageindex = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) & ((1 << fragrecptr.p->k) - 1); - texpDirInd = ((fragrecptr.p->maxp + fragrecptr.p->p) + 1) >> fragrecptr.p->k; - texpDirRangeIndex = texpDirInd >> 8; - texpDirPageIndex = texpDirInd & 0xff; - ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange); - arrGuard(texpDirRangeIndex, 256); - expDirptr.i = expDirRangePtr.p->dirArray[texpDirRangeIndex]; - ptrCheckGuard(expDirptr, cdirarraysize, directoryarray); - excPageptr.i = expDirptr.p->pagep[texpDirPageIndex]; - fragrecptr.p->expSenderDirptr = expDirptr.i; - fragrecptr.p->expSenderIndex = cexcPageindex; - fragrecptr.p->expSenderPageptr = excPageptr.i; - fragrecptr.p->expSenderDirIndex = texpDirInd; - /*--------------------------------------------------------------------------*/ - /* WE NOW PROCEED BY FINDING THE NECESSARY INFORMATION ABOUT THE */ - /* RECEIVING BUCKET. */ - /*--------------------------------------------------------------------------*/ - expDirRangePtr.i = fragrecptr.p->directory; - texpReceivedBucket = fragrecptr.p->p >> fragrecptr.p->k; - ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange); - arrGuard((texpReceivedBucket >> 8), 256); - expDirptr.i = expDirRangePtr.p->dirArray[texpReceivedBucket >> 8]; - ptrCheckGuard(expDirptr, cdirarraysize, directoryarray); - fragrecptr.p->expReceivePageptr = expDirptr.p->pagep[texpReceivedBucket & 0xff]; - fragrecptr.p->expReceiveIndex = fragrecptr.p->p & ((1 << fragrecptr.p->k) - 1); - fragrecptr.p->expReceiveForward = ZTRUE; - if (excPageptr.i == RNIL) { - jam(); - endofshrinkbucketLab(signal); /* EMPTY BUCKET */ - return; - }//if - /*--------------------------------------------------------------------------*/ - /* INITIALISE THE VARIABLES FOR THE SHRINK PROCESS. */ - /*--------------------------------------------------------------------------*/ - ptrCheckGuard(excPageptr, cpagesize, page8); - cexcForward = ZTRUE; - cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS); - cexcContainerptr = cexcContainerptr + ZHEAD_SIZE; - arrGuard(cexcContainerptr, 2048); - cexcContainerhead = excPageptr.p->word32[cexcContainerptr]; - cexcContainerlen = cexcContainerhead >> 26; - if (cexcContainerlen <= ZCON_HEAD_SIZE) { - ndbrequire(cexcContainerlen == ZCON_HEAD_SIZE); - } else { - jam(); - shrinkcontainer(signal); - }//if - /*--------------------------------------------------------------------------*/ - /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */ - /*--------------------------------------------------------------------------*/ - if (((cexcContainerhead >> 10) & 1) == 1) { - jam(); - rlPageptr = excPageptr; - trlPageindex = cexcPageindex; - trlRelCon = ZFALSE; - turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE); - releaseRightlist(signal); - }//if - tshrTmp1 = ZCON_HEAD_SIZE; - tshrTmp1 = tshrTmp1 << 26; - dbgWord32(excPageptr, cexcContainerptr, tshrTmp1); - arrGuard(cexcContainerptr, 2048); - excPageptr.p->word32[cexcContainerptr] = tshrTmp1; - if (((cexcContainerhead >> 7) & 0x3) == 0) { - jam(); - endofshrinkbucketLab(signal); - return; - }//if - nextcontainerinfoExp(signal); - do { - cexcContainerptr = (cexcPageindex << ZSHIFT_PLUS) - (cexcPageindex << ZSHIFT_MINUS); - if (cexcForward == ZTRUE) { - jam(); - cexcContainerptr = cexcContainerptr + ZHEAD_SIZE; - } else { - jam(); - cexcContainerptr = ((cexcContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE; - }//if - arrGuard(cexcContainerptr, 2048); - cexcContainerhead = excPageptr.p->word32[cexcContainerptr]; - cexcContainerlen = cexcContainerhead >> 26; - ndbrequire(cexcContainerlen > ZCON_HEAD_SIZE); - /*--------------------------------------------------------------------------*/ - /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */ - /*--------------------------------------------------------------------------*/ - shrinkcontainer(signal); - cexcPrevpageptr = excPageptr.i; - cexcPrevpageindex = cexcPageindex; - cexcPrevforward = cexcForward; - if (((cexcContainerhead >> 7) & 0x3) != 0) { - jam(); - /*--------------------------------------------------------------------------*/ - /* WE MUST CALL THE NEXT CONTAINER INFO ROUTINE BEFORE WE RELEASE THE */ - /* CONTAINER SINCE THE RELEASE WILL OVERWRITE THE NEXT POINTER. */ - /*--------------------------------------------------------------------------*/ - nextcontainerinfoExp(signal); - }//if - rlPageptr.i = cexcPrevpageptr; - ptrCheckGuard(rlPageptr, cpagesize, page8); - trlPageindex = cexcPrevpageindex; - if (cexcPrevforward == ZTRUE) { - jam(); - if (((cexcContainerhead >> 10) & 1) == 1) { - jam(); - trlRelCon = ZFALSE; - turlIndex = cexcContainerptr + (ZBUF_SIZE - ZCON_HEAD_SIZE); - releaseRightlist(signal); - }//if - trlRelCon = ZTRUE; - tullIndex = cexcContainerptr; - releaseLeftlist(signal); - } else { - jam(); - if (((cexcContainerhead >> 10) & 1) == 1) { - jam(); - trlRelCon = ZFALSE; - tullIndex = cexcContainerptr - (ZBUF_SIZE - ZCON_HEAD_SIZE); - releaseLeftlist(signal); - }//if - trlRelCon = ZTRUE; - turlIndex = cexcContainerptr; - releaseRightlist(signal); - }//if - } while (((cexcContainerhead >> 7) & 0x3) != 0); - endofshrinkbucketLab(signal); - return; -}//Dbacc::execSHRINKCHECK2() - -void Dbacc::endofshrinkbucketLab(Signal* signal) -{ - fragrecptr.p->expandCounter--; - fragrecptr.p->slack -= fragrecptr.p->maxloadfactor; - if (fragrecptr.p->expSenderIndex == 0) { - jam(); - fragrecptr.p->dirsize--; - if (fragrecptr.p->expSenderPageptr != RNIL) { - jam(); - rpPageptr.i = fragrecptr.p->expSenderPageptr; - ptrCheckGuard(rpPageptr, cpagesize, page8); - releasePage(signal); - expDirptr.i = fragrecptr.p->expSenderDirptr; - ptrCheckGuard(expDirptr, cdirarraysize, directoryarray); - expDirptr.p->pagep[fragrecptr.p->expSenderDirIndex & 0xff] = RNIL; - }//if - if (((((fragrecptr.p->p + fragrecptr.p->maxp) + 1) >> fragrecptr.p->k) & 0xff) == 0) { - jam(); - rdDirptr.i = fragrecptr.p->expSenderDirptr; - releaseDirectory(signal); - expDirRangePtr.i = fragrecptr.p->directory; - ptrCheckGuard(expDirRangePtr, cdirrangesize, dirRange); - arrGuard((fragrecptr.p->expSenderDirIndex >> 8), 256); - expDirRangePtr.p->dirArray[fragrecptr.p->expSenderDirIndex >> 8] = RNIL; - }//if - }//if - if (fragrecptr.p->slack < (1u << 31)) { - jam(); - /*--------------------------------------------------------------*/ - /* THE SLACK IS POSITIVE, IN THIS CASE WE WILL CHECK WHETHER */ - /* WE WILL CONTINUE PERFORM ANOTHER SHRINK. */ - /*--------------------------------------------------------------*/ - Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p; - Uint32 Thysteresis = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor; - fragrecptr.p->slackCheck = noOfBuckets * Thysteresis; - if (fragrecptr.p->slack > Thysteresis) { - /*--------------------------------------------------------------*/ - /* IT IS STILL NECESSARY TO SHRINK THE FRAGMENT MORE. THIS*/ - /* CAN HAPPEN WHEN A NUMBER OF SHRINKS GET REJECTED */ - /* DURING A LOCAL CHECKPOINT. WE START A NEW SHRINK */ - /* IMMEDIATELY FROM HERE WITHOUT WAITING FOR A COMMIT TO */ - /* START IT. */ - /*--------------------------------------------------------------*/ - if (fragrecptr.p->expandCounter > 0) { - jam(); - /*--------------------------------------------------------------*/ - /* IT IS VERY IMPORTANT TO NOT TRY TO SHRINK MORE THAN */ - /* WAS EXPANDED. IF MAXP IS SET TO A VALUE BELOW 63 THEN */ - /* WE WILL LOSE RECORDS SINCE GETDIRINDEX CANNOT HANDLE */ - /* SHRINKING BELOW 2^K - 1 (NOW 63). THIS WAS A BUG THAT */ - /* WAS REMOVED 2000-05-12. */ - /*--------------------------------------------------------------*/ - signal->theData[0] = fragrecptr.i; - signal->theData[1] = fragrecptr.p->p; - signal->theData[2] = fragrecptr.p->maxp; - signal->theData[3] = fragrecptr.p->expandFlag; - ndbrequire(fragrecptr.p->expandFlag < 2); - fragrecptr.p->expandFlag = 2; - sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB); - }//if - }//if - }//if - ndbrequire(fragrecptr.p->maxp >= (Uint32)((1 << fragrecptr.p->k) - 1)); - return; -}//Dbacc::endofshrinkbucketLab() - -/* --------------------------------------------------------------------------------- */ -/* SHRINKCONTAINER */ -/* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */ -/* CEXC_CONTAINERLEN (LENGTH OF THE CONTAINER). */ -/* CEXC_CONTAINERPTR (ARRAY INDEX OF THE CONTAINER). */ -/* CEXC_FORWARD (CONTAINER FORWARD (+1) OR BACKWARD (-1)) */ -/* */ -/* DESCRIPTION: ALL ELEMENTS OF THE ACTIVE CONTAINER HAVE TO MOVE TO THE NEW */ -/* CONTAINER. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::shrinkcontainer(Signal* signal) -{ - Uint32 tshrElementptr; - Uint32 tshrRemLen; - Uint32 tshrInc; - Uint32 tshrTmp; - Uint32 tshrIndex; - Uint32 guard21; - - tshrRemLen = cexcContainerlen - ZCON_HEAD_SIZE; - tshrInc = fragrecptr.p->elementLength; - if (cexcForward == ZTRUE) { - jam(); - tshrElementptr = cexcContainerptr + ZCON_HEAD_SIZE; - } else { - jam(); - tshrElementptr = cexcContainerptr - 1; - }//if - SHR_LOOP: - idrOperationRecPtr.i = RNIL; - ptrNull(idrOperationRecPtr); - /* --------------------------------------------------------------------------------- */ - /* THE CODE BELOW IS ALL USED TO PREPARE FOR THE CALL TO INSERT_ELEMENT AND */ - /* HANDLE THE RESULT FROM INSERT_ELEMENT. INSERT_ELEMENT INSERTS THE ELEMENT */ - /* INTO ANOTHER BUCKET. */ - /* --------------------------------------------------------------------------------- */ - arrGuard(tshrElementptr, 2048); - tidrElemhead = excPageptr.p->word32[tshrElementptr]; - if (ElementHeader::getLocked(tidrElemhead)) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* IF THE ELEMENT IS LOCKED WE MUST UPDATE THE ELEMENT INFO IN THE OPERATION */ - /* RECORD OWNING THE LOCK. WE DO THIS BY READING THE OPERATION RECORD POINTER */ - /* FROM THE ELEMENT HEADER. */ - /* --------------------------------------------------------------------------------- */ - idrOperationRecPtr.i = ElementHeader::getOpPtrI(tidrElemhead); - ptrCheckGuard(idrOperationRecPtr, coprecsize, operationrec); - }//if - tshrTmp = tshrElementptr + cexcForward; - guard21 = fragrecptr.p->localkeylen - 1; - for (tshrIndex = 0; tshrIndex <= guard21; tshrIndex++) { - arrGuard(tshrIndex, 2); - arrGuard(tshrTmp, 2048); - clocalkey[tshrIndex] = excPageptr.p->word32[tshrTmp]; - tshrTmp = tshrTmp + cexcForward; - }//for - tidrPageindex = fragrecptr.p->expReceiveIndex; - idrPageptr.i = fragrecptr.p->expReceivePageptr; - ptrCheckGuard(idrPageptr, cpagesize, page8); - tidrForward = fragrecptr.p->expReceiveForward; - insertElement(signal); - /* --------------------------------------------------------------------------------- */ - /* TAKE CARE OF RESULT FROM INSERT_ELEMENT. */ - /* --------------------------------------------------------------------------------- */ - fragrecptr.p->expReceiveIndex = tidrPageindex; - fragrecptr.p->expReceivePageptr = idrPageptr.i; - fragrecptr.p->expReceiveForward = tidrForward; - if (tshrRemLen < tshrInc) { - jam(); - sendSystemerror(signal, __LINE__); - }//if - tshrRemLen = tshrRemLen - tshrInc; - if (tshrRemLen != 0) { - jam(); - tshrElementptr = tshrTmp; - goto SHR_LOOP; - }//if -}//Dbacc::shrinkcontainer() - -/* --------------------------------------------------------------------------------- */ -/* NEXTCONTAINERINFO_EXP */ -/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */ -/* ABOUT NEXT CONTAINER IN THE BUCKET. */ -/* INPUT: CEXC_CONTAINERHEAD */ -/* CEXC_CONTAINERPTR */ -/* EXC_PAGEPTR */ -/* OUTPUT: */ -/* CEXC_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED. */ -/* EXC_PAGEPTR (PAGE REFERENCE OF NEXT CONTAINER) */ -/* CEXC_FORWARD */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::nextcontainerinfoExp(Signal* signal) -{ - tnciNextSamePage = (cexcContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */ - /* THE NEXT CONTAINER IS IN THE SAME PAGE */ - cexcPageindex = cexcContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */ - if (((cexcContainerhead >> 7) & 3) == ZLEFT) { - jam(); - cexcForward = ZTRUE; - } else if (((cexcContainerhead >> 7) & 3) == ZRIGHT) { - jam(); - cexcForward = cminusOne; - } else { - jam(); - sendSystemerror(signal, __LINE__); - cexcForward = 0; /* DUMMY FOR COMPILER */ - }//if - if (tnciNextSamePage == ZFALSE) { - jam(); - /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */ - arrGuard(cexcContainerptr + 1, 2048); - tnciTmp = excPageptr.p->word32[cexcContainerptr + 1]; - nciOverflowrangeptr.i = fragrecptr.p->overflowdir; - ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange); - arrGuard((tnciTmp >> 8), 256); - nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8]; - ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray); - excPageptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff]; - ptrCheckGuard(excPageptr, cpagesize, page8); - }//if -}//Dbacc::nextcontainerinfoExp() - -void Dbacc::initFragAdd(Signal* signal, - FragmentrecPtr regFragPtr) -{ - const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; - Uint32 lhFragBits = req->lhFragBits + 1; - Uint32 minLoadFactor = (req->minLoadFactor * ZBUF_SIZE) / 100; - Uint32 maxLoadFactor = (req->maxLoadFactor * ZBUF_SIZE) / 100; - if (minLoadFactor >= maxLoadFactor) { - jam(); - minLoadFactor = maxLoadFactor - 1; - }//if - regFragPtr.p->fragState = ACTIVEFRAG; - // NOTE: next line must match calculation in Dblqh::execLQHFRAGREQ - regFragPtr.p->myfid = req->fragId; - regFragPtr.p->myTableId = req->tableId; - ndbrequire(req->kValue == 6); - regFragPtr.p->k = req->kValue; /* TK_SIZE = 6 IN THIS VERSION */ - regFragPtr.p->expandCounter = 0; - - /** - * Only allow shrink during SR - * - to make sure we don't run out of pages during REDO log execution - * - * Is later restored to 0 by LQH at end of REDO log execution - */ - regFragPtr.p->expandFlag = 0; - regFragPtr.p->p = 0; - regFragPtr.p->maxp = (1 << req->kValue) - 1; - regFragPtr.p->minloadfactor = minLoadFactor; - regFragPtr.p->maxloadfactor = maxLoadFactor; - regFragPtr.p->slack = (regFragPtr.p->maxp + 1) * maxLoadFactor; - regFragPtr.p->lhfragbits = lhFragBits; - regFragPtr.p->lhdirbits = 0; - regFragPtr.p->hashcheckbit = 0; //lhFragBits; - regFragPtr.p->localkeylen = req->localKeyLen; - regFragPtr.p->nodetype = (req->reqInfo >> 4) & 0x3; - regFragPtr.p->lastOverIndex = 0; - regFragPtr.p->dirsize = 1; - regFragPtr.p->keyLength = req->keyLength; - ndbrequire(req->keyLength != 0); - regFragPtr.p->elementLength = ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen; - Uint32 Tmp1 = (regFragPtr.p->maxp + 1) + regFragPtr.p->p; - Uint32 Tmp2 = regFragPtr.p->maxloadfactor - regFragPtr.p->minloadfactor; - Tmp2 = Tmp1 * Tmp2; - regFragPtr.p->slackCheck = Tmp2; - regFragPtr.p->mytabptr = req->tableId; - regFragPtr.p->roothashcheck = req->kValue + req->lhFragBits; - regFragPtr.p->noOfElements = 0; - for (Uint32 i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) { - regFragPtr.p->scan[i] = RNIL; - }//for - - Uint32 hasCharAttr = g_key_descriptor_pool.getPtr(req->tableId)->hasCharAttr; - regFragPtr.p->hasCharAttr = hasCharAttr; -}//Dbacc::initFragAdd() - -void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr) -{ - regFragPtr.p->directory = RNIL; - regFragPtr.p->overflowdir = RNIL; - regFragPtr.p->firstOverflowRec = RNIL; - regFragPtr.p->lastOverflowRec = RNIL; - regFragPtr.p->lockOwnersList = RNIL; - regFragPtr.p->firstFreeDirindexRec = RNIL; - - regFragPtr.p->activeDataPage = 0; - regFragPtr.p->hasCharAttr = ZFALSE; - regFragPtr.p->nextAllocPage = 0; - regFragPtr.p->fragState = FREEFRAG; -}//Dbacc::initFragGeneral() - - -void -Dbacc::releaseLogicalPage(Fragmentrec * fragP, Uint32 logicalPageId){ - Ptr dirRangePtr; - dirRangePtr.i = fragP->directory; - ptrCheckGuard(dirRangePtr, cdirrangesize, dirRange); - - const Uint32 lp1 = logicalPageId >> 8; - const Uint32 lp2 = logicalPageId & 0xFF; - ndbrequire(lp1 < 256); - - Ptr dirArrPtr; - dirArrPtr.i = dirRangePtr.p->dirArray[lp1]; - ptrCheckGuard(dirArrPtr, cdirarraysize, directoryarray); - - const Uint32 physicalPageId = dirArrPtr.p->pagep[lp2]; - - rpPageptr.i = physicalPageId; - ptrCheckGuard(rpPageptr, cpagesize, page8); - releasePage(0); - - dirArrPtr.p->pagep[lp2] = RNIL; -} - -void Dbacc::execACC_SCANREQ(Signal* signal) -{ - jamEntry(); - AccScanReq * req = (AccScanReq*)&signal->theData[0]; - tuserptr = req->senderData; - tuserblockref = req->senderRef; - tabptr.i = req->tableId; - tfid = req->fragmentNo; - tscanFlag = req->requestInfo; - tscanTrid1 = req->transId1; - tscanTrid2 = req->transId2; - - tresult = 0; - ptrCheckGuard(tabptr, ctablesize, tabrec); - ndbrequire(getfragmentrec(signal, fragrecptr, tfid)); - - Uint32 i; - for (i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) { - jam(); - if (fragrecptr.p->scan[i] == RNIL) { - jam(); - break; - } - } - ndbrequire(i != MAX_PARALLEL_SCANS_PER_FRAG); - ndbrequire(cfirstFreeScanRec != RNIL); - seizeScanRec(signal); - - fragrecptr.p->scan[i] = scanPtr.i; - scanPtr.p->scanBucketState = ScanRec::FIRST_LAP; - scanPtr.p->scanLockMode = AccScanReq::getLockMode(tscanFlag); - scanPtr.p->scanReadCommittedFlag = AccScanReq::getReadCommittedFlag(tscanFlag); - - /* TWELVE BITS OF THE ELEMENT HEAD ARE SCAN */ - /* CHECK BITS. THE MASK NOTES WHICH BIT IS */ - /* ALLOCATED FOR THE ACTIVE SCAN */ - scanPtr.p->scanMask = 1 << i; - scanPtr.p->scanUserptr = tuserptr; - scanPtr.p->scanUserblockref = tuserblockref; - scanPtr.p->scanTrid1 = tscanTrid1; - scanPtr.p->scanTrid2 = tscanTrid2; - scanPtr.p->scanLockHeld = 0; - scanPtr.p->scanOpsAllocated = 0; - scanPtr.p->scanFirstActiveOp = RNIL; - scanPtr.p->scanFirstQueuedOp = RNIL; - scanPtr.p->scanLastQueuedOp = RNIL; - scanPtr.p->scanFirstLockedOp = RNIL; - scanPtr.p->scanLastLockedOp = RNIL; - scanPtr.p->scanState = ScanRec::WAIT_NEXT; - initScanFragmentPart(signal); - - /*------------------------------------------------------*/ - /* We start the timeout loop for the scan process here. */ - /*------------------------------------------------------*/ - ndbrequire(scanPtr.p->scanTimer == 0); - if (scanPtr.p->scanContinuebCounter == 0) { - jam(); - scanPtr.p->scanContinuebCounter = 1; - signal->theData[0] = ZSEND_SCAN_HBREP; - signal->theData[1] = scanPtr.i; - sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2); - }//if - scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter; - /* ************************ */ - /* ACC_SCANCONF */ - /* ************************ */ - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = scanPtr.i; - signal->theData[2] = 1; /* NR OF LOCAL FRAGMENT */ - signal->theData[3] = fragrecptr.p->fragmentid; - signal->theData[4] = RNIL; - signal->theData[7] = AccScanConf::ZNOT_EMPTY_FRAGMENT; - sendSignal(scanPtr.p->scanUserblockref, GSN_ACC_SCANCONF, signal, 8, JBB); - /* NOT EMPTY FRAGMENT */ - return; -}//Dbacc::execACC_SCANREQ() - -/* ******************--------------------------------------------------------------- */ -/* NEXT_SCANREQ REQUEST FOR NEXT ELEMENT OF */ -/* ******************------------------------------+ A FRAGMENT. */ -/* SENDER: LQH, LEVEL B */ -void Dbacc::execNEXT_SCANREQ(Signal* signal) -{ - Uint32 tscanNextFlag; - jamEntry(); - scanPtr.i = signal->theData[0]; - operationRecPtr.i = signal->theData[1]; - tscanNextFlag = signal->theData[2]; - /* ------------------------------------------ */ - /* 1 = ZCOPY_NEXT GET NEXT ELEMENT */ - /* 2 = ZCOPY_NEXT_COMMIT COMMIT THE */ - /* ACTIVE ELEMENT AND GET THE NEXT ONE */ - /* 3 = ZCOPY_COMMIT COMMIT THE ACTIVE ELEMENT */ - /* 4 = ZCOPY_REPEAT GET THE ACTIVE ELEMENT */ - /* 5 = ZCOPY_ABORT RELOCK THE ACTIVE ELEMENT */ - /* 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY */ - /* ------------------------------------------ */ - tresult = 0; - ptrCheckGuard(scanPtr, cscanRecSize, scanRec); - ndbrequire(scanPtr.p->scanState == ScanRec::WAIT_NEXT); - - scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter; - switch (tscanNextFlag) { - case NextScanReq::ZSCAN_NEXT: - jam(); - /*empty*/; - break; - case NextScanReq::ZSCAN_NEXT_COMMIT: - case NextScanReq::ZSCAN_COMMIT: - jam(); - /* --------------------------------------------------------------------- */ - /* COMMIT ACTIVE OPERATION. - * SEND NEXT SCAN ELEMENT IF IT IS ZCOPY_NEXT_COMMIT. - * --------------------------------------------------------------------- */ - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - fragrecptr.i = operationRecPtr.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - if (!scanPtr.p->scanReadCommittedFlag) { - commitOperation(signal); - }//if - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - takeOutActiveScanOp(signal); - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - if (tscanNextFlag == NextScanReq::ZSCAN_COMMIT) { - jam(); - signal->theData[0] = scanPtr.p->scanUserptr; - Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref); - EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 1); - return; - }//if - break; - case NextScanReq::ZSCAN_CLOSE: - jam(); - fragrecptr.i = scanPtr.p->activeLocalFrag; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - /* --------------------------------------------------------------------- - * THE SCAN PROCESS IS FINISHED. RELOCK ALL LOCKED EL. - * RELESE ALL INVOLVED REC. - * ------------------------------------------------------------------- */ - releaseScanLab(signal); - return; - break; - default: - ndbrequire(false); - break; - }//switch - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP; - execACC_CHECK_SCAN(signal); - return; -}//Dbacc::execNEXT_SCANREQ() - -void Dbacc::checkNextBucketLab(Signal* signal) -{ - DirRangePtr cscDirRangePtr; - DirectoryarrayPtr cscDirptr; - DirectoryarrayPtr tnsDirptr; - Page8Ptr nsPageptr; - Page8Ptr cscPageidptr; - Page8Ptr gnsPageidptr; - Page8Ptr tnsPageidptr; - Uint32 tnsElementptr; - Uint32 tnsContainerptr; - Uint32 tnsIsLocked; - Uint32 tnsTmp1; - Uint32 tnsTmp2; - Uint32 tnsCopyIndex1; - Uint32 tnsCopyIndex2; - Uint32 tnsCopyDir; - - tnsCopyDir = scanPtr.p->nextBucketIndex >> fragrecptr.p->k; - tnsCopyIndex1 = tnsCopyDir >> 8; - tnsCopyIndex2 = tnsCopyDir & 0xff; - arrGuard(tnsCopyIndex1, 256); - tnsDirptr.i = gnsDirRangePtr.p->dirArray[tnsCopyIndex1]; - ptrCheckGuard(tnsDirptr, cdirarraysize, directoryarray); - tnsPageidptr.i = tnsDirptr.p->pagep[tnsCopyIndex2]; - ptrCheckGuard(tnsPageidptr, cpagesize, page8); - gnsPageidptr.i = tnsPageidptr.i; - gnsPageidptr.p = tnsPageidptr.p; - tnsTmp1 = (1 << fragrecptr.p->k) - 1; - tgsePageindex = scanPtr.p->nextBucketIndex & tnsTmp1; - gsePageidptr.i = gnsPageidptr.i; - gsePageidptr.p = gnsPageidptr.p; - if (!getScanElement(signal)) { - scanPtr.p->nextBucketIndex++; - if (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP) { - if (scanPtr.p->nextBucketIndex > scanPtr.p->maxBucketIndexToRescan) { - /* ---------------------------------------------------------------- */ - // We have finished the rescan phase. - // We are ready to proceed with the next fragment part. - /* ---------------------------------------------------------------- */ - jam(); - checkNextFragmentLab(signal); - return; - }//if - } else if (scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) { - if ((fragrecptr.p->p + fragrecptr.p->maxp) < scanPtr.p->nextBucketIndex) { - /* ---------------------------------------------------------------- */ - // All buckets have been scanned a first time. - /* ---------------------------------------------------------------- */ - if (scanPtr.p->minBucketIndexToRescan == 0xFFFFFFFF) { - jam(); - /* -------------------------------------------------------------- */ - // We have not had any merges behind the scan. - // Thus it is not necessary to perform any rescan any buckets - // and we can proceed immediately with the next fragment part. - /* --------------------------------------------------------------- */ - checkNextFragmentLab(signal); - return; - } else { - jam(); - /* --------------------------------------------------------------------------------- */ - // Some buckets are in the need of rescanning due to merges that have moved records - // from in front of the scan to behind the scan. During the merges we kept track of - // which buckets that need a rescan. We start with the minimum and end with maximum. - /* --------------------------------------------------------------------------------- */ - scanPtr.p->nextBucketIndex = scanPtr.p->minBucketIndexToRescan; - scanPtr.p->scanBucketState = ScanRec::SECOND_LAP; - if (scanPtr.p->maxBucketIndexToRescan > (fragrecptr.p->p + fragrecptr.p->maxp)) { - jam(); - /* --------------------------------------------------------------------------------- */ - // If we have had so many merges that the maximum is bigger than the number of buckets - // then we will simply satisfy ourselves with scanning to the end. This can only happen - // after bringing down the total of buckets to less than half and the minimum should - // be 0 otherwise there is some problem. - /* --------------------------------------------------------------------------------- */ - if (scanPtr.p->minBucketIndexToRescan != 0) { - jam(); - sendSystemerror(signal, __LINE__); - return; - }//if - scanPtr.p->maxBucketIndexToRescan = fragrecptr.p->p + fragrecptr.p->maxp; - }//if - }//if - }//if - }//if - if ((scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) && - (scanPtr.p->nextBucketIndex <= scanPtr.p->startNoOfBuckets)) { - /* --------------------------------------------------------------------------------- */ - // We will only reset the scan indicator on the buckets that existed at the start of the - // scan. The others will be handled by the split and merge code. - /* --------------------------------------------------------------------------------- */ - tnsTmp2 = (1 << fragrecptr.p->k) - 1; - trsbPageindex = scanPtr.p->nextBucketIndex & tnsTmp2; - if (trsbPageindex != 0) { - jam(); - rsbPageidptr.i = gnsPageidptr.i; - rsbPageidptr.p = gnsPageidptr.p; - } else { - jam(); - cscDirRangePtr.i = fragrecptr.p->directory; - tmpP = scanPtr.p->nextBucketIndex >> fragrecptr.p->k; - tmpP2 = tmpP >> 8; - tmpP = tmpP & 0xff; - ptrCheckGuard(cscDirRangePtr, cdirrangesize, dirRange); - arrGuard(tmpP2, 256); - cscDirptr.i = cscDirRangePtr.p->dirArray[tmpP2]; - ptrCheckGuard(cscDirptr, cdirarraysize, directoryarray); - cscPageidptr.i = cscDirptr.p->pagep[tmpP]; - ptrCheckGuard(cscPageidptr, cpagesize, page8); - tmp1 = (1 << fragrecptr.p->k) - 1; - trsbPageindex = scanPtr.p->nextBucketIndex & tmp1; - rsbPageidptr.i = cscPageidptr.i; - rsbPageidptr.p = cscPageidptr.p; - }//if - releaseScanBucket(signal); - }//if - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); - return; - }//if - /* ----------------------------------------------------------------------- */ - /* AN ELEMENT WHICH HAVE NOT BEEN SCANNED WAS FOUND. WE WILL PREPARE IT */ - /* TO BE SENT TO THE LQH BLOCK FOR FURTHER PROCESSING. */ - /* WE ASSUME THERE ARE OPERATION RECORDS AVAILABLE SINCE LQH SHOULD HAVE*/ - /* GUARANTEED THAT THROUGH EARLY BOOKING. */ - /* ----------------------------------------------------------------------- */ - tnsIsLocked = tgseIsLocked; - tnsElementptr = tgseElementptr; - tnsContainerptr = tgseContainerptr; - nsPageptr.i = gsePageidptr.i; - nsPageptr.p = gsePageidptr.p; - seizeOpRec(signal); - tisoIsforward = tgseIsforward; - tisoContainerptr = tnsContainerptr; - tisoElementptr = tnsElementptr; - isoPageptr.i = nsPageptr.i; - isoPageptr.p = nsPageptr.p; - initScanOpRec(signal); - - if (!tnsIsLocked){ - if (!scanPtr.p->scanReadCommittedFlag) { - jam(); - slPageidptr = nsPageptr; - tslElementptr = tnsElementptr; - setlock(signal); - insertLockOwnersList(signal, operationRecPtr); - operationRecPtr.p->m_op_bits |= - Operationrec::OP_STATE_RUNNING | Operationrec::OP_RUN_QUEUE; - }//if - } else { - arrGuard(tnsElementptr, 2048); - queOperPtr.i = - ElementHeader::getOpPtrI(nsPageptr.p->word32[tnsElementptr]); - ptrCheckGuard(queOperPtr, coprecsize, operationrec); - if (queOperPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED || - queOperPtr.p->localdata[0] == ~(Uint32)0) - { - jam(); - /* ------------------------------------------------------------------ */ - // If the lock owner indicates the element is disappeared then - // we will not report this tuple. We will continue with the next tuple. - /* ------------------------------------------------------------------ */ - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); - return; - }//if - if (!scanPtr.p->scanReadCommittedFlag) { - Uint32 return_result; - if (scanPtr.p->scanLockMode == ZREADLOCK) { - jam(); - return_result = placeReadInLockQueue(queOperPtr); - } else { - jam(); - return_result = placeWriteInLockQueue(queOperPtr); - }//if - if (return_result == ZSERIAL_QUEUE) { - /* ----------------------------------------------------------------- - * WE PLACED THE OPERATION INTO A SERIAL QUEUE AND THUS WE HAVE TO - * WAIT FOR THE LOCK TO BE RELEASED. WE CONTINUE WITH THE NEXT ELEMENT - * ----------------------------------------------------------------- */ - putOpScanLockQue(); /* PUT THE OP IN A QUE IN THE SCAN REC */ - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); - return; - } else if (return_result != ZPARALLEL_QUEUE) { - jam(); - /* ----------------------------------------------------------------- */ - // The tuple is either not committed yet or a delete in - // the same transaction (not possible here since we are a scan). - // Thus we simply continue with the next tuple. - /* ----------------------------------------------------------------- */ - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); - return; - }//if - ndbassert(return_result == ZPARALLEL_QUEUE); - }//if - }//if - /* ----------------------------------------------------------------------- */ - // Committed read proceed without caring for locks immediately - // down here except when the tuple was deleted permanently - // and no new operation has inserted it again. - /* ----------------------------------------------------------------------- */ - putActiveScanOp(signal); - sendNextScanConf(signal); - return; -}//Dbacc::checkNextBucketLab() - - -void Dbacc::checkNextFragmentLab(Signal* signal) -{ - scanPtr.p->scanBucketState = ScanRec::SCAN_COMPLETED; - // The scan is completed. ACC_CHECK_SCAN will perform all the necessary - // checks to see - // what the next step is. - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - execACC_CHECK_SCAN(signal); - return; -}//Dbacc::checkNextFragmentLab() - -void Dbacc::initScanFragmentPart(Signal* signal) -{ - DirRangePtr cnfDirRangePtr; - DirectoryarrayPtr cnfDirptr; - Page8Ptr cnfPageidptr; - /* ----------------------------------------------------------------------- */ - // Set the active fragment part. - // Set the current bucket scanned to the first. - // Start with the first lap. - // Remember the number of buckets at start of the scan. - // Set the minimum and maximum to values that will always be smaller and - // larger than. - // Reset the scan indicator on the first bucket. - /* ----------------------------------------------------------------------- */ - scanPtr.p->activeLocalFrag = fragrecptr.i; - scanPtr.p->nextBucketIndex = 0; /* INDEX OF SCAN BUCKET */ - scanPtr.p->scanBucketState = ScanRec::FIRST_LAP; - scanPtr.p->startNoOfBuckets = fragrecptr.p->p + fragrecptr.p->maxp; - scanPtr.p->minBucketIndexToRescan = 0xFFFFFFFF; - scanPtr.p->maxBucketIndexToRescan = 0; - cnfDirRangePtr.i = fragrecptr.p->directory; - ptrCheckGuard(cnfDirRangePtr, cdirrangesize, dirRange); - cnfDirptr.i = cnfDirRangePtr.p->dirArray[0]; - ptrCheckGuard(cnfDirptr, cdirarraysize, directoryarray); - cnfPageidptr.i = cnfDirptr.p->pagep[0]; - ptrCheckGuard(cnfPageidptr, cpagesize, page8); - trsbPageindex = scanPtr.p->nextBucketIndex & ((1 << fragrecptr.p->k) - 1); - rsbPageidptr.i = cnfPageidptr.i; - rsbPageidptr.p = cnfPageidptr.p; - releaseScanBucket(signal); -}//Dbacc::initScanFragmentPart() - -/* ------------------------------------------------------------------------- - * FLAG = 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY OR ABORTED. - * ALL OPERATION IN THE ACTIVE OR WAIT QUEUE ARE RELEASED, - * SCAN FLAG OF ROOT FRAG IS RESET AND THE SCAN RECORD IS RELEASED. - * ------------------------------------------------------------------------ */ -void Dbacc::releaseScanLab(Signal* signal) -{ - releaseAndCommitActiveOps(signal); - releaseAndCommitQueuedOps(signal); - releaseAndAbortLockedOps(signal); - - fragrecptr.i = scanPtr.p->activeLocalFrag; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - for (tmp = 0; tmp < MAX_PARALLEL_SCANS_PER_FRAG; tmp++) { - jam(); - if (fragrecptr.p->scan[tmp] == scanPtr.i) { - jam(); - fragrecptr.p->scan[tmp] = RNIL; - }//if - }//for - // Stops the heartbeat. - scanPtr.p->scanTimer = 0; - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = RNIL; - signal->theData[2] = RNIL; - sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB); - releaseScanRec(signal); - return; -}//Dbacc::releaseScanLab() - - -void Dbacc::releaseAndCommitActiveOps(Signal* signal) -{ - OperationrecPtr trsoOperPtr; - operationRecPtr.i = scanPtr.p->scanFirstActiveOp; - while (operationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - trsoOperPtr.i = operationRecPtr.p->nextOp; - fragrecptr.i = operationRecPtr.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - if (!scanPtr.p->scanReadCommittedFlag) { - jam(); - if ((operationRecPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == - Operationrec::OP_STATE_EXECUTED) - { - commitOperation(signal); - } - else - { - abortOperation(signal); - } - }//if - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - takeOutActiveScanOp(signal); - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - operationRecPtr.i = trsoOperPtr.i; - }//if -}//Dbacc::releaseAndCommitActiveOps() - - -void Dbacc::releaseAndCommitQueuedOps(Signal* signal) -{ - OperationrecPtr trsoOperPtr; - operationRecPtr.i = scanPtr.p->scanFirstQueuedOp; - while (operationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - trsoOperPtr.i = operationRecPtr.p->nextOp; - fragrecptr.i = operationRecPtr.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - if (!scanPtr.p->scanReadCommittedFlag) { - jam(); - if ((operationRecPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == - Operationrec::OP_STATE_EXECUTED) - { - commitOperation(signal); - } - else - { - abortOperation(signal); - } - }//if - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - takeOutReadyScanQueue(signal); - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - operationRecPtr.i = trsoOperPtr.i; - }//if -}//Dbacc::releaseAndCommitQueuedOps() - -void Dbacc::releaseAndAbortLockedOps(Signal* signal) { - - OperationrecPtr trsoOperPtr; - operationRecPtr.i = scanPtr.p->scanFirstLockedOp; - while (operationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - trsoOperPtr.i = operationRecPtr.p->nextOp; - fragrecptr.i = operationRecPtr.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - if (!scanPtr.p->scanReadCommittedFlag) { - jam(); - abortOperation(signal); - }//if - takeOutScanLockQueue(scanPtr.i); - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - operationRecPtr.i = trsoOperPtr.i; - }//if -}//Dbacc::releaseAndAbortLockedOps() - -/* 3.18.3 ACC_CHECK_SCAN */ -/* ******************--------------------------------------------------------------- */ -/* ACC_CHECK_SCAN */ -/* ENTER ACC_CHECK_SCAN WITH */ -/* SCAN_PTR */ -/* ******************--------------------------------------------------------------- */ -/* ******************--------------------------------------------------------------- */ -/* ACC_CHECK_SCAN */ -/* ******************------------------------------+ */ -void Dbacc::execACC_CHECK_SCAN(Signal* signal) -{ - Uint32 TcheckLcpStop; - jamEntry(); - scanPtr.i = signal->theData[0]; - TcheckLcpStop = signal->theData[1]; - ptrCheckGuard(scanPtr, cscanRecSize, scanRec); - while (scanPtr.p->scanFirstQueuedOp != RNIL) { - jam(); - //--------------------------------------------------------------------- - // An operation has been released from the lock queue. - // We are in the parallel queue of this tuple. We are - // ready to report the tuple now. - //------------------------------------------------------------------------ - operationRecPtr.i = scanPtr.p->scanFirstQueuedOp; - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - takeOutReadyScanQueue(signal); - fragrecptr.i = operationRecPtr.p->fragptr; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - if (operationRecPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED) - { - jam(); - abortOperation(signal); - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; - releaseOpRec(signal); - scanPtr.p->scanOpsAllocated--; - continue; - }//if - putActiveScanOp(signal); - sendNextScanConf(signal); - return; - }//while - - - if ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) && - (scanPtr.p->scanLockHeld == 0)) { - jam(); - //---------------------------------------------------------------------------- - // The scan is now completed and there are no more locks outstanding. Thus we - // we will report the scan as completed to LQH. - //---------------------------------------------------------------------------- - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = RNIL; - signal->theData[2] = RNIL; - sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB); - return; - }//if - if (TcheckLcpStop == AccCheckScan::ZCHECK_LCP_STOP) { - //--------------------------------------------------------------------------- - // To ensure that the block of the fragment occurring at the start of a local - // checkpoint is not held for too long we insert a release and reacquiring of - // that lock here. This is performed in LQH. If we are blocked or if we have - // requested a sleep then we will receive RNIL in the returning signal word. - //--------------------------------------------------------------------------- - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = - ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) || - (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED)); - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - if (signal->theData[0] == RNIL) { - jam(); - return; - }//if - }//if - /** - * If we have more than max locks held OR - * scan is completed AND at least one lock held - * - Inform LQH about this condition - */ - if ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) || - (cfreeopRec == RNIL) || - ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) && - (scanPtr.p->scanLockHeld > 0))) { - jam(); - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = RNIL; // No operation is returned - signal->theData[2] = 512; // MASV - sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB); - return; - } - if (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) { - jam(); - signal->theData[0] = scanPtr.i; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - execACC_CHECK_SCAN(signal); - return; - }//if - - scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter; - - fragrecptr.i = scanPtr.p->activeLocalFrag; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - gnsDirRangePtr.i = fragrecptr.p->directory; - ptrCheckGuard(gnsDirRangePtr, cdirrangesize, dirRange); - checkNextBucketLab(signal); - return; -}//Dbacc::execACC_CHECK_SCAN() - -/* ******************---------------------------------------------------- */ -/* ACC_TO_REQ PERFORM A TAKE OVER */ -/* ******************-------------------+ */ -/* SENDER: LQH, LEVEL B */ -void Dbacc::execACC_TO_REQ(Signal* signal) -{ - OperationrecPtr tatrOpPtr; - - jamEntry(); - tatrOpPtr.i = signal->theData[1]; /* OPER PTR OF ACC */ - ptrCheckGuard(tatrOpPtr, coprecsize, operationrec); - if ((tatrOpPtr.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP) - { - tatrOpPtr.p->transId1 = signal->theData[2]; - tatrOpPtr.p->transId2 = signal->theData[3]; - validate_lock_queue(tatrOpPtr); - } else { - jam(); - signal->theData[0] = cminusOne; - signal->theData[1] = ZTO_OP_STATE_ERROR; - }//if - return; -}//Dbacc::execACC_TO_REQ() - -/* --------------------------------------------------------------------------------- */ -/* CONTAINERINFO */ -/* INPUT: */ -/* CI_PAGEIDPTR (PAGE POINTER WHERE CONTAINER RESIDES) */ -/* TCI_PAGEINDEX (INDEX OF CONTAINER, USED TO CALCULATE PAGE INDEX) */ -/* TCI_ISFORWARD (DIRECTION OF CONTAINER FORWARD OR BACKWARD) */ -/* */ -/* OUTPUT: */ -/* TCI_CONTAINERPTR (A POINTER TO THE HEAD OF THE CONTAINER) */ -/* TCI_CONTAINERLEN (LENGTH OF THE CONTAINER */ -/* TCI_CONTAINERHEAD (THE HEADER OF THE CONTAINER) */ -/* */ -/* DESCRIPTION: THE ADDRESS OF THE CONTAINER WILL BE CALCULATED AND */ -/* ALL INFORMATION ABOUT THE CONTAINER WILL BE READ */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::containerinfo(Signal* signal) -{ - tciContainerptr = (tciPageindex << ZSHIFT_PLUS) - (tciPageindex << ZSHIFT_MINUS); - if (tciIsforward == ZTRUE) { - jam(); - tciContainerptr = tciContainerptr + ZHEAD_SIZE; - } else { - jam(); - tciContainerptr = ((tciContainerptr + ZHEAD_SIZE) + ZBUF_SIZE) - ZCON_HEAD_SIZE; - }//if - arrGuard(tciContainerptr, 2048); - tciContainerhead = ciPageidptr.p->word32[tciContainerptr]; - tciContainerlen = tciContainerhead >> 26; -}//Dbacc::containerinfo() - -/* --------------------------------------------------------------------------------- */ -/* GET_SCAN_ELEMENT */ -/* INPUT: GSE_PAGEIDPTR */ -/* TGSE_PAGEINDEX */ -/* OUTPUT: TGSE_IS_LOCKED (IF TRESULT /= ZFALSE) */ -/* GSE_PAGEIDPTR */ -/* TGSE_PAGEINDEX */ -/* --------------------------------------------------------------------------------- */ -bool Dbacc::getScanElement(Signal* signal) -{ - tgseIsforward = ZTRUE; - NEXTSEARCH_SCAN_LOOP: - ciPageidptr.i = gsePageidptr.i; - ciPageidptr.p = gsePageidptr.p; - tciPageindex = tgsePageindex; - tciIsforward = tgseIsforward; - containerinfo(signal); - sscPageidptr.i = gsePageidptr.i; - sscPageidptr.p = gsePageidptr.p; - tsscContainerlen = tciContainerlen; - tsscContainerptr = tciContainerptr; - tsscIsforward = tciIsforward; - if (searchScanContainer(signal)) { - jam(); - tgseIsLocked = tsscIsLocked; - tgseElementptr = tsscElementptr; - tgseContainerptr = tsscContainerptr; - return true; - }//if - if (((tciContainerhead >> 7) & 0x3) != 0) { - jam(); - nciPageidptr.i = gsePageidptr.i; - nciPageidptr.p = gsePageidptr.p; - tnciContainerhead = tciContainerhead; - tnciContainerptr = tciContainerptr; - nextcontainerinfo(signal); - tgsePageindex = tnciPageindex; - gsePageidptr.i = nciPageidptr.i; - gsePageidptr.p = nciPageidptr.p; - tgseIsforward = tnciIsforward; - goto NEXTSEARCH_SCAN_LOOP; - }//if - return false; -}//Dbacc::getScanElement() - -/* --------------------------------------------------------------------------------- */ -/* INIT_SCAN_OP_REC */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initScanOpRec(Signal* signal) -{ - Uint32 tisoTmp; - Uint32 tisoLocalPtr; - Uint32 guard24; - - scanPtr.p->scanOpsAllocated++; - - Uint32 opbits = 0; - opbits |= ZSCAN_OP; - opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_LOCK_MODE : 0; - opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; - opbits |= (scanPtr.p->scanReadCommittedFlag ? - (Uint32) Operationrec::OP_EXECUTED_DIRTY_READ : 0); - opbits |= Operationrec::OP_COMMIT_DELETE_CHECK; - operationRecPtr.p->userptr = RNIL; - operationRecPtr.p->scanRecPtr = scanPtr.i; - operationRecPtr.p->fid = fragrecptr.p->myfid; - operationRecPtr.p->fragptr = fragrecptr.i; - operationRecPtr.p->nextParallelQue = RNIL; - operationRecPtr.p->prevParallelQue = RNIL; - operationRecPtr.p->nextSerialQue = RNIL; - operationRecPtr.p->prevSerialQue = RNIL; - operationRecPtr.p->transId1 = scanPtr.p->scanTrid1; - operationRecPtr.p->transId2 = scanPtr.p->scanTrid2; - operationRecPtr.p->elementIsforward = tisoIsforward; - operationRecPtr.p->elementContainer = tisoContainerptr; - operationRecPtr.p->elementPointer = tisoElementptr; - operationRecPtr.p->elementPage = isoPageptr.i; - operationRecPtr.p->m_op_bits = opbits; - tisoLocalPtr = tisoElementptr + tisoIsforward; - guard24 = fragrecptr.p->localkeylen - 1; - for (tisoTmp = 0; tisoTmp <= guard24; tisoTmp++) { - arrGuard(tisoTmp, 2); - arrGuard(tisoLocalPtr, 2048); - operationRecPtr.p->localdata[tisoTmp] = isoPageptr.p->word32[tisoLocalPtr]; - tisoLocalPtr = tisoLocalPtr + tisoIsforward; - }//for - arrGuard(tisoLocalPtr, 2048); - operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength; - operationRecPtr.p->xfrmtupkeylen = 0; // not used -}//Dbacc::initScanOpRec() - -/* --------------------------------------------------------------------------------- */ -/* NEXTCONTAINERINFO */ -/* DESCRIPTION:THE CONTAINER HEAD WILL BE CHECKED TO CALCULATE INFORMATION */ -/* ABOUT NEXT CONTAINER IN THE BUCKET. */ -/* INPUT: TNCI_CONTAINERHEAD */ -/* NCI_PAGEIDPTR */ -/* TNCI_CONTAINERPTR */ -/* OUTPUT: */ -/* TNCI_PAGEINDEX (INDEX FROM WHICH PAGE INDEX CAN BE CALCULATED). */ -/* TNCI_ISFORWARD (IS THE NEXT CONTAINER FORWARD (+1) OR BACKWARD (-1) */ -/* NCI_PAGEIDPTR (PAGE REFERENCE OF NEXT CONTAINER) */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::nextcontainerinfo(Signal* signal) -{ - tnciNextSamePage = (tnciContainerhead >> 9) & 0x1; /* CHECK BIT FOR CHECKING WHERE */ - /* THE NEXT CONTAINER IS IN THE SAME PAGE */ - tnciPageindex = tnciContainerhead & 0x7f; /* NEXT CONTAINER PAGE INDEX 7 BITS */ - if (((tnciContainerhead >> 7) & 3) == ZLEFT) { - jam(); - tnciIsforward = ZTRUE; - } else { - jam(); - tnciIsforward = cminusOne; - }//if - if (tnciNextSamePage == ZFALSE) { - jam(); - /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */ - arrGuard(tnciContainerptr + 1, 2048); - tnciTmp = nciPageidptr.p->word32[tnciContainerptr + 1]; - nciOverflowrangeptr.i = fragrecptr.p->overflowdir; - ptrCheckGuard(nciOverflowrangeptr, cdirrangesize, dirRange); - arrGuard((tnciTmp >> 8), 256); - nciOverflowDirptr.i = nciOverflowrangeptr.p->dirArray[tnciTmp >> 8]; - ptrCheckGuard(nciOverflowDirptr, cdirarraysize, directoryarray); - nciPageidptr.i = nciOverflowDirptr.p->pagep[tnciTmp & 0xff]; - ptrCheckGuard(nciPageidptr, cpagesize, page8); - }//if -}//Dbacc::nextcontainerinfo() - -/* --------------------------------------------------------------------------------- */ -/* PUT_ACTIVE_SCAN_OP */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::putActiveScanOp(Signal* signal) -{ - OperationrecPtr pasOperationRecPtr; - pasOperationRecPtr.i = scanPtr.p->scanFirstActiveOp; - if (pasOperationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(pasOperationRecPtr, coprecsize, operationrec); - pasOperationRecPtr.p->prevOp = operationRecPtr.i; - }//if - operationRecPtr.p->nextOp = pasOperationRecPtr.i; - operationRecPtr.p->prevOp = RNIL; - scanPtr.p->scanFirstActiveOp = operationRecPtr.i; -}//Dbacc::putActiveScanOp() - -/** - * putOpScanLockQueue - * - * Description: Put an operation in the doubly linked - * lock list on a scan record. The list is used to - * keep track of which operations belonging - * to the scan are put in serial lock list of another - * operation - * - * @note Use takeOutScanLockQueue to remove an operation - * from the list - * - */ -void Dbacc::putOpScanLockQue() -{ - -#ifdef VM_TRACE - // DEBUG CODE - // Check that there are as many operations in the lockqueue as - // scanLockHeld indicates - OperationrecPtr tmpOp; - int numLockedOpsBefore = 0; - tmpOp.i = scanPtr.p->scanFirstLockedOp; - while(tmpOp.i != RNIL){ - numLockedOpsBefore++; - ptrCheckGuard(tmpOp, coprecsize, operationrec); - if (tmpOp.p->nextOp == RNIL) - ndbrequire(tmpOp.i == scanPtr.p->scanLastLockedOp); - tmpOp.i = tmpOp.p->nextOp; - } - ndbrequire(numLockedOpsBefore==scanPtr.p->scanLockHeld); -#endif - - OperationrecPtr pslOperationRecPtr; - ScanRec theScanRec; - theScanRec = *scanPtr.p; - - pslOperationRecPtr.i = scanPtr.p->scanLastLockedOp; - operationRecPtr.p->prevOp = pslOperationRecPtr.i; - operationRecPtr.p->nextOp = RNIL; - if (pslOperationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(pslOperationRecPtr, coprecsize, operationrec); - pslOperationRecPtr.p->nextOp = operationRecPtr.i; - } else { - jam(); - scanPtr.p->scanFirstLockedOp = operationRecPtr.i; - }//if - scanPtr.p->scanLastLockedOp = operationRecPtr.i; - scanPtr.p->scanLockHeld++; - -}//Dbacc::putOpScanLockQue() - -/* --------------------------------------------------------------------------------- */ -/* PUT_READY_SCAN_QUEUE */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::putReadyScanQueue(Signal* signal, Uint32 scanRecIndex) -{ - OperationrecPtr prsOperationRecPtr; - ScanRecPtr TscanPtr; - - TscanPtr.i = scanRecIndex; - ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); - - prsOperationRecPtr.i = TscanPtr.p->scanLastQueuedOp; - operationRecPtr.p->prevOp = prsOperationRecPtr.i; - operationRecPtr.p->nextOp = RNIL; - TscanPtr.p->scanLastQueuedOp = operationRecPtr.i; - if (prsOperationRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(prsOperationRecPtr, coprecsize, operationrec); - prsOperationRecPtr.p->nextOp = operationRecPtr.i; - } else { - jam(); - TscanPtr.p->scanFirstQueuedOp = operationRecPtr.i; - }//if -}//Dbacc::putReadyScanQueue() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_SCAN_BUCKET */ -// Input: -// rsbPageidptr.i Index to page where buckets starts -// rsbPageidptr.p Pointer to page where bucket starts -// trsbPageindex Page index of starting container in bucket -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseScanBucket(Signal* signal) -{ - Uint32 trsbIsforward; - - trsbIsforward = ZTRUE; - NEXTRELEASESCANLOOP: - ciPageidptr.i = rsbPageidptr.i; - ciPageidptr.p = rsbPageidptr.p; - tciPageindex = trsbPageindex; - tciIsforward = trsbIsforward; - containerinfo(signal); - rscPageidptr.i = rsbPageidptr.i; - rscPageidptr.p = rsbPageidptr.p; - trscContainerlen = tciContainerlen; - trscContainerptr = tciContainerptr; - trscIsforward = trsbIsforward; - releaseScanContainer(signal); - if (((tciContainerhead >> 7) & 0x3) != 0) { - jam(); - nciPageidptr.i = rsbPageidptr.i; - nciPageidptr.p = rsbPageidptr.p; - tnciContainerhead = tciContainerhead; - tnciContainerptr = tciContainerptr; - nextcontainerinfo(signal); - rsbPageidptr.i = nciPageidptr.i; - rsbPageidptr.p = nciPageidptr.p; - trsbPageindex = tnciPageindex; - trsbIsforward = tnciIsforward; - goto NEXTRELEASESCANLOOP; - }//if -}//Dbacc::releaseScanBucket() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_SCAN_CONTAINER */ -/* INPUT: TRSC_CONTAINERLEN */ -/* RSC_PAGEIDPTR */ -/* TRSC_CONTAINERPTR */ -/* TRSC_ISFORWARD */ -/* SCAN_PTR */ -/* */ -/* DESCRIPTION: SEARCHS IN A CONTAINER, AND THE SCAN BIT OF THE ELEMENTS */ -/* OF THE CONTAINER IS RESET */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseScanContainer(Signal* signal) -{ - OperationrecPtr rscOperPtr; - Uint32 trscElemStep; - Uint32 trscElementptr; - Uint32 trscElemlens; - Uint32 trscElemlen; - - if (trscContainerlen < 4) { - if (trscContainerlen != ZCON_HEAD_SIZE) { - jam(); - sendSystemerror(signal, __LINE__); - }//if - return; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */ - }//if - trscElemlens = trscContainerlen - ZCON_HEAD_SIZE; - trscElemlen = fragrecptr.p->elementLength; - if (trscIsforward == 1) { - jam(); - trscElementptr = trscContainerptr + ZCON_HEAD_SIZE; - trscElemStep = trscElemlen; - } else { - jam(); - trscElementptr = trscContainerptr - 1; - trscElemStep = 0 - trscElemlen; - }//if - do { - arrGuard(trscElementptr, 2048); - const Uint32 eh = rscPageidptr.p->word32[trscElementptr]; - const Uint32 scanMask = scanPtr.p->scanMask; - if (ElementHeader::getUnlocked(eh)) { - jam(); - const Uint32 tmp = ElementHeader::clearScanBit(eh, scanMask); - dbgWord32(rscPageidptr, trscElementptr, tmp); - rscPageidptr.p->word32[trscElementptr] = tmp; - } else { - jam(); - rscOperPtr.i = ElementHeader::getOpPtrI(eh); - ptrCheckGuard(rscOperPtr, coprecsize, operationrec); - rscOperPtr.p->scanBits &= ~scanMask; - }//if - trscElemlens = trscElemlens - trscElemlen; - trscElementptr = trscElementptr + trscElemStep; - } while (trscElemlens > 1); - if (trscElemlens != 0) { - jam(); - sendSystemerror(signal, __LINE__); - }//if -}//Dbacc::releaseScanContainer() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_SCAN_REC */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseScanRec(Signal* signal) -{ - // Check that all ops this scan has allocated have been - // released - ndbrequire(scanPtr.p->scanOpsAllocated==0); - - // Check that all locks this scan might have aquired - // have been properly released - ndbrequire(scanPtr.p->scanLockHeld == 0); - ndbrequire(scanPtr.p->scanFirstLockedOp == RNIL); - ndbrequire(scanPtr.p->scanLastLockedOp == RNIL); - - // Check that all active operations have been - // properly released - ndbrequire(scanPtr.p->scanFirstActiveOp == RNIL); - - // Check that all queued operations have been - // properly released - ndbrequire(scanPtr.p->scanFirstQueuedOp == RNIL); - ndbrequire(scanPtr.p->scanLastQueuedOp == RNIL); - - // Put scan record in free list - scanPtr.p->scanNextfreerec = cfirstFreeScanRec; - scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT; - cfirstFreeScanRec = scanPtr.i; - -}//Dbacc::releaseScanRec() - -/* --------------------------------------------------------------------------------- */ -/* SEARCH_SCAN_CONTAINER */ -/* INPUT: TSSC_CONTAINERLEN */ -/* TSSC_CONTAINERPTR */ -/* TSSC_ISFORWARD */ -/* SSC_PAGEIDPTR */ -/* SCAN_PTR */ -/* OUTPUT: TSSC_IS_LOCKED */ -/* */ -/* DESCRIPTION: SEARCH IN A CONTAINER TO FIND THE NEXT SCAN ELEMENT. */ -/* TO DO THIS THE SCAN BIT OF THE ELEMENT HEADER IS CHECKED. IF */ -/* THIS BIT IS ZERO, IT IS SET TO ONE AND THE ELEMENT IS RETURNED.*/ -/* --------------------------------------------------------------------------------- */ -bool Dbacc::searchScanContainer(Signal* signal) -{ - OperationrecPtr sscOperPtr; - Uint32 tsscScanBits; - Uint32 tsscElemlens; - Uint32 tsscElemlen; - Uint32 tsscElemStep; - - if (tsscContainerlen < 4) { - jam(); - return false; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */ - }//if - tsscElemlens = tsscContainerlen - ZCON_HEAD_SIZE; - tsscElemlen = fragrecptr.p->elementLength; - /* LENGTH OF THE ELEMENT */ - if (tsscIsforward == 1) { - jam(); - tsscElementptr = tsscContainerptr + ZCON_HEAD_SIZE; - tsscElemStep = tsscElemlen; - } else { - jam(); - tsscElementptr = tsscContainerptr - 1; - tsscElemStep = 0 - tsscElemlen; - }//if - SCANELEMENTLOOP001: - arrGuard(tsscElementptr, 2048); - const Uint32 eh = sscPageidptr.p->word32[tsscElementptr]; - tsscIsLocked = ElementHeader::getLocked(eh); - if (!tsscIsLocked){ - jam(); - tsscScanBits = ElementHeader::getScanBits(eh); - if ((scanPtr.p->scanMask & tsscScanBits) == 0) { - jam(); - const Uint32 tmp = ElementHeader::setScanBit(eh, scanPtr.p->scanMask); - dbgWord32(sscPageidptr, tsscElementptr, tmp); - sscPageidptr.p->word32[tsscElementptr] = tmp; - return true; - }//if - } else { - jam(); - sscOperPtr.i = ElementHeader::getOpPtrI(eh); - ptrCheckGuard(sscOperPtr, coprecsize, operationrec); - if ((sscOperPtr.p->scanBits & scanPtr.p->scanMask) == 0) { - jam(); - sscOperPtr.p->scanBits |= scanPtr.p->scanMask; - return true; - }//if - }//if - /* THE ELEMENT IS ALREADY SENT. */ - /* SEARCH FOR NEXT ONE */ - tsscElemlens = tsscElemlens - tsscElemlen; - if (tsscElemlens > 1) { - jam(); - tsscElementptr = tsscElementptr + tsscElemStep; - goto SCANELEMENTLOOP001; - }//if - return false; -}//Dbacc::searchScanContainer() - -/* --------------------------------------------------------------------------------- */ -/* SEND THE RESPONSE NEXT_SCANCONF AND POSSIBLE KEYINFO SIGNALS AS WELL. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::sendNextScanConf(Signal* signal) -{ - scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter; - Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref); - jam(); - /** --------------------------------------------------------------------- - * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND - * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED. - * ---------------------------------------------------------------------- */ - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = operationRecPtr.i; - signal->theData[2] = operationRecPtr.p->fid; - signal->theData[3] = operationRecPtr.p->localdata[0]; - signal->theData[4] = operationRecPtr.p->localdata[1]; - signal->theData[5] = fragrecptr.p->localkeylen; - EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6); - return; -}//Dbacc::sendNextScanConf() - -/*--------------------------------------------------------------------------- - * sendScanHbRep - * Description: Using Dispatcher::execute() to send a heartbeat to DBTC - * from DBLQH telling the scan is alive. We use the sendScanHbRep() - * in DBLQH, this needs to be done here in DBACC since it can take - * a while before LQH receives an answer the normal way from ACC. - *--------------------------------------------------------------------------*/ -void Dbacc::sendScanHbRep(Signal* signal, Uint32 scanPtrIndex) -{ - scanPtr.i = scanPtrIndex; - ptrCheckGuard(scanPtr, cscanRecSize, scanRec); - - // If the timer status is on we continue with a new heartbeat in one second, - // else the loop stops and we will not send a new CONTINUEB - if (scanPtr.p->scanTimer != 0){ - if (scanPtr.p->scanTimer == scanPtr.p->scanContinuebCounter){ - jam(); - ndbrequire(scanPtr.p->scanState != ScanRec::SCAN_DISCONNECT); - - signal->theData[0] = scanPtr.p->scanUserptr; - signal->theData[1] = scanPtr.p->scanTrid1; - signal->theData[2] = scanPtr.p->scanTrid2; - EXECUTE_DIRECT(DBLQH, GSN_SCAN_HBREP, signal, 3); - jamEntry(); - }//if - scanPtr.p->scanContinuebCounter++; - signal->theData[0] = ZSEND_SCAN_HBREP; - signal->theData[1] = scanPtr.i; - sendSignalWithDelay(cownBlockref, GSN_CONTINUEB, signal, 100, 2); - } else { - jam(); - scanPtr.p->scanContinuebCounter = 0; - }//if -}//Dbacc::sendScanHbRep() - -/* --------------------------------------------------------------------------------- */ -/* SETLOCK */ -/* DESCRIPTION:SETS LOCK ON AN ELEMENT. INFORMATION ABOUT THE ELEMENT IS */ -/* SAVED IN THE ELEMENT HEAD.A COPY OF THIS INFORMATION WILL */ -/* BE PUT IN THE OPERATION RECORD. A FIELD IN THE HEADER OF */ -/* THE ELEMENT POINTS TO THE OPERATION RECORD. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::setlock(Signal* signal) -{ - Uint32 tselTmp1; - - arrGuard(tslElementptr, 2048); - tselTmp1 = slPageidptr.p->word32[tslElementptr]; - operationRecPtr.p->scanBits = ElementHeader::getScanBits(tselTmp1); - operationRecPtr.p->hashvaluePart = ElementHeader::getHashValuePart(tselTmp1); - - tselTmp1 = ElementHeader::setLocked(operationRecPtr.i); - dbgWord32(slPageidptr, tslElementptr, tselTmp1); - slPageidptr.p->word32[tslElementptr] = tselTmp1; -}//Dbacc::setlock() - -/* --------------------------------------------------------------------------------- */ -/* TAKE_OUT_ACTIVE_SCAN_OP */ -/* DESCRIPTION: AN ACTIVE SCAN OPERATION IS BELOGED TO AN ACTIVE LIST OF THE */ -/* SCAN RECORD. BY THIS SUBRUTIN THE LIST IS UPDATED. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::takeOutActiveScanOp(Signal* signal) -{ - OperationrecPtr tasOperationRecPtr; - - if (operationRecPtr.p->prevOp != RNIL) { - jam(); - tasOperationRecPtr.i = operationRecPtr.p->prevOp; - ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec); - tasOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp; - } else { - jam(); - scanPtr.p->scanFirstActiveOp = operationRecPtr.p->nextOp; - }//if - if (operationRecPtr.p->nextOp != RNIL) { - jam(); - tasOperationRecPtr.i = operationRecPtr.p->nextOp; - ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec); - tasOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp; - }//if -}//Dbacc::takeOutActiveScanOp() - -/** - * takeOutScanLockQueue - * - * Description: Take out an operation from the doubly linked - * lock list on a scan record. - * - * @note Use putOpScanLockQue to insert a operation in - * the list - * - */ -void Dbacc::takeOutScanLockQueue(Uint32 scanRecIndex) -{ - OperationrecPtr tslOperationRecPtr; - ScanRecPtr TscanPtr; - - TscanPtr.i = scanRecIndex; - ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); - - if (operationRecPtr.p->prevOp != RNIL) { - jam(); - tslOperationRecPtr.i = operationRecPtr.p->prevOp; - ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec); - tslOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp; - } else { - jam(); - // Check that first are pointing at operation to take out - ndbrequire(TscanPtr.p->scanFirstLockedOp==operationRecPtr.i); - TscanPtr.p->scanFirstLockedOp = operationRecPtr.p->nextOp; - }//if - if (operationRecPtr.p->nextOp != RNIL) { - jam(); - tslOperationRecPtr.i = operationRecPtr.p->nextOp; - ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec); - tslOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp; - } else { - jam(); - // Check that last are pointing at operation to take out - ndbrequire(TscanPtr.p->scanLastLockedOp==operationRecPtr.i); - TscanPtr.p->scanLastLockedOp = operationRecPtr.p->prevOp; - }//if - TscanPtr.p->scanLockHeld--; - -#ifdef VM_TRACE - // DEBUG CODE - // Check that there are as many operations in the lockqueue as - // scanLockHeld indicates - OperationrecPtr tmpOp; - int numLockedOps = 0; - tmpOp.i = TscanPtr.p->scanFirstLockedOp; - while(tmpOp.i != RNIL){ - numLockedOps++; - ptrCheckGuard(tmpOp, coprecsize, operationrec); - if (tmpOp.p->nextOp == RNIL) - ndbrequire(tmpOp.i == TscanPtr.p->scanLastLockedOp); - tmpOp.i = tmpOp.p->nextOp; - } - ndbrequire(numLockedOps==TscanPtr.p->scanLockHeld); -#endif -}//Dbacc::takeOutScanLockQueue() - -/* --------------------------------------------------------------------------------- */ -/* TAKE_OUT_READY_SCAN_QUEUE */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::takeOutReadyScanQueue(Signal* signal) -{ - OperationrecPtr trsOperationRecPtr; - - if (operationRecPtr.p->prevOp != RNIL) { - jam(); - trsOperationRecPtr.i = operationRecPtr.p->prevOp; - ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec); - trsOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp; - } else { - jam(); - scanPtr.p->scanFirstQueuedOp = operationRecPtr.p->nextOp; - }//if - if (operationRecPtr.p->nextOp != RNIL) { - jam(); - trsOperationRecPtr.i = operationRecPtr.p->nextOp; - ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec); - trsOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp; - } else { - jam(); - scanPtr.p->scanLastQueuedOp = operationRecPtr.p->nextOp; - }//if -}//Dbacc::takeOutReadyScanQueue() - -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* */ -/* END OF SCAN MODULE */ -/* */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ - -bool Dbacc::getfragmentrec(Signal* signal, FragmentrecPtr& rootPtr, Uint32 fid) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragholder[i] == fid) { - jam(); - fragrecptr.i = tabptr.p->fragptrholder[i]; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - return true; - }//if - }//for - return false; -}//Dbacc::getrootfragmentrec() - -/* --------------------------------------------------------------------------------- */ -/* INIT_OVERPAGE */ -/* INPUT. IOP_PAGEPTR, POINTER TO AN OVERFLOW PAGE RECORD */ -/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */ -/* ACCORDING TO LH3 AND PAGE STRUCTOR DESCRIPTION OF NDBACC BLOCK */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initOverpage(Signal* signal) -{ - Uint32 tiopTmp; - Uint32 tiopPrevFree; - Uint32 tiopNextFree; - - for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) { - iopPageptr.p->word32[tiopIndex] = 0; - }//for - iopPageptr.p->word32[ZPOS_OVERFLOWREC] = iopOverflowRecPtr.i; - iopPageptr.p->word32[ZPOS_CHECKSUM] = 0; - iopPageptr.p->word32[ZPOS_PAGE_ID] = tiopPageId; - iopPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0; - tiopTmp = ZEMPTYLIST; - tiopTmp = (tiopTmp << 16) + (tiopTmp << 23); - iopPageptr.p->word32[ZPOS_EMPTY_LIST] = tiopTmp + (1 << ZPOS_PAGE_TYPE_BIT); - /* --------------------------------------------------------------------------------- */ - /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ - /* --------------------------------------------------------------------------------- */ - tiopIndex = ZHEAD_SIZE + 1; - iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; - for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) { - tiopIndex = tiopIndex + ZBUF_SIZE; - iopPageptr.p->word32[tiopIndex] = tiopPrevFree; - }//for - /* --------------------------------------------------------------------------------- */ - /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ - /* --------------------------------------------------------------------------------- */ - tiopIndex = ZHEAD_SIZE; - for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) { - iopPageptr.p->word32[tiopIndex] = tiopNextFree; - tiopIndex = tiopIndex + ZBUF_SIZE; - }//for - iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* LEFT_LIST IS UPDATED */ - /* --------------------------------------------------------------------------------- */ - /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ - /* --------------------------------------------------------------------------------- */ - tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 1; - iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; - for (tiopPrevFree = 0; tiopPrevFree <= ZEMPTYLIST - 2; tiopPrevFree++) { - tiopIndex = tiopIndex + ZBUF_SIZE; - iopPageptr.p->word32[tiopIndex] = tiopPrevFree; - }//for - /* --------------------------------------------------------------------------------- */ - /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ - /* --------------------------------------------------------------------------------- */ - tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 2; - for (tiopNextFree = 1; tiopNextFree <= ZEMPTYLIST - 1; tiopNextFree++) { - iopPageptr.p->word32[tiopIndex] = tiopNextFree; - tiopIndex = tiopIndex + ZBUF_SIZE; - }//for - iopPageptr.p->word32[tiopIndex] = ZEMPTYLIST; /* RIGHT_LIST IS UPDATED */ -}//Dbacc::initOverpage() - -/* --------------------------------------------------------------------------------- */ -/* INIT_PAGE */ -/* INPUT. INP_PAGEPTR, POINTER TO A PAGE RECORD */ -/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */ -/* ACCORDING TO LH3 AND PAGE STRUCTOR DISACRIPTION OF NDBACC BLOCK */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::initPage(Signal* signal) -{ - Uint32 tinpTmp1; - Uint32 tinpIndex; - Uint32 tinpTmp; - Uint32 tinpPrevFree; - Uint32 tinpNextFree; - - for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) { - inpPageptr.p->word32[tiopIndex] = 0; - }//for - /* --------------------------------------------------------------------------------- */ - /* SET PAGE ID FOR USE OF CHECKPOINTER. */ - /* PREPARE CONTAINER HEADERS INDICATING EMPTY CONTAINERS WITHOUT NEXT. */ - /* --------------------------------------------------------------------------------- */ - inpPageptr.p->word32[ZPOS_PAGE_ID] = tipPageId; - tinpTmp1 = ZCON_HEAD_SIZE; - tinpTmp1 = tinpTmp1 << 26; - /* --------------------------------------------------------------------------------- */ - /* INITIALISE ZNO_CONTAINERS PREDEFINED HEADERS ON LEFT SIZE. */ - /* --------------------------------------------------------------------------------- */ - tinpIndex = ZHEAD_SIZE; - for (tinpTmp = 0; tinpTmp <= ZNO_CONTAINERS - 1; tinpTmp++) { - inpPageptr.p->word32[tinpIndex] = tinpTmp1; - tinpIndex = tinpIndex + ZBUF_SIZE; - }//for - /* WORD32(ZPOS_EMPTY_LIST) DATA STRUCTURE:*/ - /*--------------------------------------- */ - /*| PAGE TYPE|LEFT FREE|RIGHT FREE */ - /*| 1 | LIST | LIST */ - /*| BIT | 7 BITS | 7 BITS */ - /*--------------------------------------- */ - /* --------------------------------------------------------------------------------- */ - /* INITIALISE FIRST POINTER TO DOUBLY LINKED LIST OF FREE CONTAINERS. */ - /* INITIALISE EMPTY LISTS OF USED CONTAINERS. */ - /* INITIALISE LEFT FREE LIST TO 64 AND RIGHT FREE LIST TO ZERO. */ - /* ALSO INITIALISE PAGE TYPE TO NOT OVERFLOW PAGE. */ - /* --------------------------------------------------------------------------------- */ - tinpTmp = ZEMPTYLIST; - tinpTmp = (tinpTmp << 16) + (tinpTmp << 23); - tinpTmp = tinpTmp + (ZNO_CONTAINERS << 7); - inpPageptr.p->word32[ZPOS_EMPTY_LIST] = tinpTmp; - /* --------------------------------------------------------------------------------- */ - /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ - /* --------------------------------------------------------------------------------- */ - tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 1; - inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST; - for (tinpPrevFree = 0; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) { - tinpIndex = tinpIndex + ZBUF_SIZE; - inpPageptr.p->word32[tinpIndex] = tinpPrevFree; - }//for - /* --------------------------------------------------------------------------------- */ - /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ - /* --------------------------------------------------------------------------------- */ - tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 2; - for (tinpNextFree = 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) { - inpPageptr.p->word32[tinpIndex] = tinpNextFree; - tinpIndex = tinpIndex + ZBUF_SIZE; - }//for - inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST; - /* --------------------------------------------------------------------------------- */ - /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ - /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */ - /* PREDEFINED AS OCCUPIED. */ - /* --------------------------------------------------------------------------------- */ - tinpIndex = (ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE; - for (tinpNextFree = ZNO_CONTAINERS + 1; tinpNextFree <= ZEMPTYLIST - 1; tinpNextFree++) { - inpPageptr.p->word32[tinpIndex] = tinpNextFree; - tinpIndex = tinpIndex + ZBUF_SIZE; - }//for - inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST; - /* --------------------------------------------------------------------------------- */ - /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ - /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */ - /* PREDEFINED AS OCCUPIED. */ - /* --------------------------------------------------------------------------------- */ - tinpIndex = ((ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE) + 1; - inpPageptr.p->word32[tinpIndex] = ZEMPTYLIST; - for (tinpPrevFree = ZNO_CONTAINERS; tinpPrevFree <= ZEMPTYLIST - 2; tinpPrevFree++) { - tinpIndex = tinpIndex + ZBUF_SIZE; - inpPageptr.p->word32[tinpIndex] = tinpPrevFree; - }//for - /* --------------------------------------------------------------------------------- */ - /* INITIALISE HEADER POSITIONS NOT CURRENTLY USED AND ENSURE USE OF OVERFLOW */ - /* RECORD POINTER ON THIS PAGE LEADS TO ERROR. */ - /* --------------------------------------------------------------------------------- */ - inpPageptr.p->word32[ZPOS_CHECKSUM] = 0; - inpPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0; - inpPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL; -}//Dbacc::initPage() - -/* --------------------------------------------------------------------------------- */ -/* PUT_OVERFLOW_REC_IN_FRAG */ -/* DESCRIPTION: AN OVERFLOW RECORD WITCH IS USED TO KEEP INFORMATION ABOUT */ -/* OVERFLOW PAGE WILL BE PUT IN A LIST OF OVERFLOW RECORDS IN */ -/* THE FRAGMENT RECORD. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::putOverflowRecInFrag(Signal* signal) -{ - OverflowRecordPtr tpifNextOverrecPtr; - OverflowRecordPtr tpifPrevOverrecPtr; - - tpifNextOverrecPtr.i = fragrecptr.p->firstOverflowRec; - LINT_INIT(tpifPrevOverrecPtr.p); - tpifPrevOverrecPtr.i = RNIL; - while (tpifNextOverrecPtr.i != RNIL) { - ptrCheckGuard(tpifNextOverrecPtr, coverflowrecsize, overflowRecord); - if (tpifNextOverrecPtr.p->dirindex < porOverflowRecPtr.p->dirindex) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/ - /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */ - /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */ - /* PERFORMED. */ - /* --------------------------------------------------------------------------------- */ - tpifPrevOverrecPtr = tpifNextOverrecPtr; - tpifNextOverrecPtr.i = tpifNextOverrecPtr.p->nextOverRec; - } else { - jam(); - ndbrequire(tpifNextOverrecPtr.p->dirindex != porOverflowRecPtr.p->dirindex); - /* --------------------------------------------------------------------------------- */ - /* TRYING TO INSERT THE SAME PAGE TWICE. SYSTEM ERROR. */ - /* --------------------------------------------------------------------------------- */ - break; - }//if - }//while - if (tpifNextOverrecPtr.i == RNIL) { - jam(); - fragrecptr.p->lastOverflowRec = porOverflowRecPtr.i; - } else { - jam(); - tpifNextOverrecPtr.p->prevOverRec = porOverflowRecPtr.i; - }//if - if (tpifPrevOverrecPtr.i == RNIL) { - jam(); - fragrecptr.p->firstOverflowRec = porOverflowRecPtr.i; - } else { - jam(); - tpifPrevOverrecPtr.p->nextOverRec = porOverflowRecPtr.i; - }//if - porOverflowRecPtr.p->prevOverRec = tpifPrevOverrecPtr.i; - porOverflowRecPtr.p->nextOverRec = tpifNextOverrecPtr.i; -}//Dbacc::putOverflowRecInFrag() - -/* --------------------------------------------------------------------------------- */ -/* PUT_REC_IN_FREE_OVERDIR */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::putRecInFreeOverdir(Signal* signal) -{ - OverflowRecordPtr tpfoNextOverrecPtr; - OverflowRecordPtr tpfoPrevOverrecPtr; - - tpfoNextOverrecPtr.i = fragrecptr.p->firstFreeDirindexRec; - LINT_INIT(tpfoPrevOverrecPtr.p); - tpfoPrevOverrecPtr.i = RNIL; - while (tpfoNextOverrecPtr.i != RNIL) { - ptrCheckGuard(tpfoNextOverrecPtr, coverflowrecsize, overflowRecord); - if (tpfoNextOverrecPtr.p->dirindex < priOverflowRecPtr.p->dirindex) { - jam(); - /* --------------------------------------------------------------------------------- */ - /* PROCEED IN LIST TO THE NEXT IN THE LIST SINCE THE ENTRY HAD A LOWER PAGE ID.*/ - /* WE WANT TO ENSURE THAT LOWER PAGE ID'S ARE KEPT FULL RATHER THAN THE */ - /* OPPOSITE TO ENSURE THAT HIGH PAGE ID'S CAN BE REMOVED WHEN SHRINKS ARE */ - /* PERFORMED. */ - /* --------------------------------------------------------------------------------- */ - tpfoPrevOverrecPtr = tpfoNextOverrecPtr; - tpfoNextOverrecPtr.i = tpfoNextOverrecPtr.p->nextOverList; - } else { - jam(); - ndbrequire(tpfoNextOverrecPtr.p->dirindex != priOverflowRecPtr.p->dirindex); - /* --------------------------------------------------------------------------------- */ - /* ENSURE WE ARE NOT TRYING TO INSERT THE SAME PAGE TWICE. */ - /* --------------------------------------------------------------------------------- */ - break; - }//if - }//while - if (tpfoNextOverrecPtr.i != RNIL) { - jam(); - tpfoNextOverrecPtr.p->prevOverList = priOverflowRecPtr.i; - }//if - if (tpfoPrevOverrecPtr.i == RNIL) { - jam(); - fragrecptr.p->firstFreeDirindexRec = priOverflowRecPtr.i; - } else { - jam(); - tpfoPrevOverrecPtr.p->nextOverList = priOverflowRecPtr.i; - }//if - priOverflowRecPtr.p->prevOverList = tpfoPrevOverrecPtr.i; - priOverflowRecPtr.p->nextOverList = tpfoNextOverrecPtr.i; -}//Dbacc::putRecInFreeOverdir() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_DIRECTORY */ -/* --------------------------------------- ----------------------------------------- */ -void Dbacc::releaseDirectory(Signal* signal) -{ - ptrCheckGuard(rdDirptr, cdirarraysize, directoryarray); - rdDirptr.p->pagep[0] = cfirstfreedir; - cfirstfreedir = rdDirptr.i; -}//Dbacc::releaseDirectory() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_DIRRANGE */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseDirrange(Signal* signal) -{ - ptrCheckGuard(rdDirRangePtr, cdirrangesize, dirRange); - rdDirRangePtr.p->dirArray[0] = cfirstfreeDirrange; - cfirstfreeDirrange = rdDirRangePtr.i; -}//Dbacc::releaseDirrange() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE OP RECORD */ -/* PUT A FREE OPERATION IN A FREE LIST OF THE OPERATIONS */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseOpRec(Signal* signal) -{ -#if 0 - // DEBUG CODE - // Check that the operation to be released isn't - // already in the list of free operations - // Since this code loops through the entire list of free operations - // it's only enabled in VM_TRACE mode - OperationrecPtr opRecPtr; - bool opInList = false; - opRecPtr.i = cfreeopRec; - while (opRecPtr.i != RNIL){ - if (opRecPtr.i == operationRecPtr.i){ - opInList = true; - break; - } - ptrCheckGuard(opRecPtr, coprecsize, operationrec); - opRecPtr.i = opRecPtr.p->nextOp; - } - ndbrequire(opInList == false); -#endif - ndbrequire(operationRecPtr.p->m_op_bits == Operationrec::OP_INITIAL); - - operationRecPtr.p->nextOp = cfreeopRec; - cfreeopRec = operationRecPtr.i; /* UPDATE FREE LIST OF OP RECORDS */ - operationRecPtr.p->prevOp = RNIL; - operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; -}//Dbacc::releaseOpRec() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_OVERFLOW_REC */ -/* PUT A FREE OVERFLOW REC IN A FREE LIST OF THE OVERFLOW RECORDS */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseOverflowRec(Signal* signal) -{ - rorOverflowRecPtr.p->nextfreeoverrec = cfirstfreeoverrec; - cfirstfreeoverrec = rorOverflowRecPtr.i; -}//Dbacc::releaseOverflowRec() - -/* --------------------------------------------------------------------------------- */ -/* RELEASE_OVERPAGE */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::releaseOverpage(Signal* signal) -{ - DirRangePtr ropOverflowrangeptr; - DirectoryarrayPtr ropOverflowDirptr; - OverflowRecordPtr ropOverflowRecPtr; - OverflowRecordPtr tuodOverflowRecPtr; - Uint32 tropTmp; - Uint32 tropTmp1; - Uint32 tropTmp2; - - ropOverflowRecPtr.i = ropPageptr.p->word32[ZPOS_OVERFLOWREC]; - ndbrequire(ropOverflowRecPtr.i != RNIL); - /* THE OVERFLOW REC WILL BE TAKEN OUT OF THE */ - /* FREELIST OF OVERFLOW PAGE WITH FREE */ - /* CONTAINER AND WILL BE PUT IN THE FREE LIST */ - /* OF THE FREE DIRECTORY INDEXES. */ - if ((fragrecptr.p->lastOverflowRec == ropOverflowRecPtr.i) && - (fragrecptr.p->firstOverflowRec == ropOverflowRecPtr.i)) { - jam(); - return; /* THERE IS ONLY ONE OVERFLOW PAGE */ - }//if -#if kalle - logicalPage = 0; - - i = fragrecptr.p->directory; - p = dirRange.getPtr(i); - - i1 = logicalPage >> 8; - i2 = logicalPage & 0xFF; - - ndbrequire(i1 < 256); - - i = p->dirArray[i1]; - p = directoryarray.getPtr(i); - - physicPageId = p->pagep[i2]; - physicPageP = page8.getPtr(physicPageId); - - p->pagep[i2] = RNIL; - rpPageptr = { physicPageId, physicPageP }; - releasePage(signal); - -#endif - - /* ----------------------------------------------------------------------- */ - /* IT WAS OK TO RELEASE THE PAGE. */ - /* ----------------------------------------------------------------------- */ - ptrCheckGuard(ropOverflowRecPtr, coverflowrecsize, overflowRecord); - tfoOverflowRecPtr = ropOverflowRecPtr; - takeRecOutOfFreeOverpage(signal); - ropOverflowRecPtr.p->overpage = RNIL; - priOverflowRecPtr = ropOverflowRecPtr; - putRecInFreeOverdir(signal); - tropTmp = ropPageptr.p->word32[ZPOS_PAGE_ID]; - ropOverflowrangeptr.i = fragrecptr.p->overflowdir; - tropTmp1 = tropTmp >> 8; - tropTmp2 = tropTmp & 0xff; - ptrCheckGuard(ropOverflowrangeptr, cdirrangesize, dirRange); - arrGuard(tropTmp1, 256); - ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1]; - ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray); - ropOverflowDirptr.p->pagep[tropTmp2] = RNIL; - rpPageptr = ropPageptr; - releasePage(signal); - if (ropOverflowRecPtr.p->dirindex != (fragrecptr.p->lastOverIndex - 1)) { - jam(); - return; - }//if - /* ----------------------------------------------------------------------- */ - /* THE LAST PAGE IN THE DIRECTORY WAS RELEASED IT IS NOW NECESSARY - * TO REMOVE ALL RELEASED OVERFLOW DIRECTORIES AT THE END OF THE LIST. - * ---------------------------------------------------------------------- */ - do { - fragrecptr.p->lastOverIndex--; - if (tropTmp2 == 0) { - jam(); - ndbrequire(tropTmp1 != 0); - ropOverflowrangeptr.p->dirArray[tropTmp1] = RNIL; - rdDirptr.i = ropOverflowDirptr.i; - releaseDirectory(signal); - tropTmp1--; - tropTmp2 = 255; - } else { - jam(); - tropTmp2--; - }//if - ropOverflowDirptr.i = ropOverflowrangeptr.p->dirArray[tropTmp1]; - ptrCheckGuard(ropOverflowDirptr, cdirarraysize, directoryarray); - } while (ropOverflowDirptr.p->pagep[tropTmp2] == RNIL); - /* ----------------------------------------------------------------------- */ - /* RELEASE ANY OVERFLOW RECORDS THAT ARE PART OF THE FREE INDEX LIST WHICH */ - /* DIRECTORY INDEX NOW HAS BEEN RELEASED. */ - /* ----------------------------------------------------------------------- */ - tuodOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec; - jam(); - while (tuodOverflowRecPtr.i != RNIL) { - jam(); - ptrCheckGuard(tuodOverflowRecPtr, coverflowrecsize, overflowRecord); - if (tuodOverflowRecPtr.p->dirindex >= fragrecptr.p->lastOverIndex) { - jam(); - rorOverflowRecPtr = tuodOverflowRecPtr; - troOverflowRecPtr.p = tuodOverflowRecPtr.p; - tuodOverflowRecPtr.i = troOverflowRecPtr.p->nextOverList; - takeRecOutOfFreeOverdir(signal); - releaseOverflowRec(signal); - } else { - jam(); - tuodOverflowRecPtr.i = tuodOverflowRecPtr.p->nextOverList; - }//if - }//while -}//Dbacc::releaseOverpage() - -/* ------------------------------------------------------------------------- */ -/* RELEASE_PAGE */ -/* ------------------------------------------------------------------------- */ -void Dbacc::releasePage(Signal* signal) -{ -#ifdef VM_TRACE - bool inList = false; - Uint32 numInList = 0; - Page8Ptr tmpPagePtr; - tmpPagePtr.i = cfirstfreepage; - while (tmpPagePtr.i != RNIL){ - ptrCheckGuard(tmpPagePtr, cpagesize, page8); - if (tmpPagePtr.i == rpPageptr.i){ - jam(); inList = true; - break; - } - numInList++; - tmpPagePtr.i = tmpPagePtr.p->word32[0]; - } - ndbrequire(inList == false); - // ndbrequire(numInList == cnoOfAllocatedPages); -#endif - rpPageptr.p->word32[0] = cfirstfreepage; - cfirstfreepage = rpPageptr.i; - cnoOfAllocatedPages--; -}//Dbacc::releasePage() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_DIRECTORY */ -/* DESCRIPTION: A DIRECTORY BLOCK (ZDIRBLOCKSIZE NUMBERS OF DIRECTORY */ -/* RECORDS WILL BE ALLOCATED AND RETURNED. */ -/* SIZE OF DIRECTORY ERROR_CODE, WILL BE RETURNED IF THERE IS NO ANY */ -/* FREE BLOCK */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeDirectory(Signal* signal) -{ - Uint32 tsdyIndex; - - if (cfirstfreedir == RNIL) { - jam(); - if (cdirarraysize <= cdirmemory) { - jam(); - tresult = ZDIRSIZE_ERROR; - return; - } else { - jam(); - sdDirptr.i = cdirmemory; - ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray); - cdirmemory = cdirmemory + 1; - }//if - } else { - jam(); - sdDirptr.i = cfirstfreedir; - ptrCheckGuard(sdDirptr, cdirarraysize, directoryarray); - cfirstfreedir = sdDirptr.p->pagep[0]; - sdDirptr.p->pagep[0] = RNIL; - }//if - for (tsdyIndex = 0; tsdyIndex <= 255; tsdyIndex++) { - sdDirptr.p->pagep[tsdyIndex] = RNIL; - }//for -}//Dbacc::seizeDirectory() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_DIRRANGE */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeDirrange(Signal* signal) -{ - Uint32 tsdeIndex; - - newDirRangePtr.i = cfirstfreeDirrange; - ptrCheckGuard(newDirRangePtr, cdirrangesize, dirRange); - cfirstfreeDirrange = newDirRangePtr.p->dirArray[0]; - for (tsdeIndex = 0; tsdeIndex <= 255; tsdeIndex++) { - newDirRangePtr.p->dirArray[tsdeIndex] = RNIL; - }//for -}//Dbacc::seizeDirrange() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE FRAGREC */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeFragrec(Signal* signal) -{ - fragrecptr.i = cfirstfreefrag; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - cfirstfreefrag = fragrecptr.p->nextfreefrag; - fragrecptr.p->nextfreefrag = RNIL; -}//Dbacc::seizeFragrec() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_OP_REC */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeOpRec(Signal* signal) -{ - operationRecPtr.i = cfreeopRec; - ptrCheckGuard(operationRecPtr, coprecsize, operationrec); - cfreeopRec = operationRecPtr.p->nextOp; /* UPDATE FREE LIST OF OP RECORDS */ - /* PUTS OPERTION RECORD PTR IN THE LIST */ - /* OF OPERATION IN CONNECTION RECORD */ - operationRecPtr.p->nextOp = RNIL; -}//Dbacc::seizeOpRec() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE OVERFLOW RECORD */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeOverRec(Signal* signal) { - sorOverflowRecPtr.i = cfirstfreeoverrec; - ptrCheckGuard(sorOverflowRecPtr, coverflowrecsize, overflowRecord); - cfirstfreeoverrec = sorOverflowRecPtr.p->nextfreeoverrec; - sorOverflowRecPtr.p->nextfreeoverrec = RNIL; - sorOverflowRecPtr.p->prevOverRec = RNIL; - sorOverflowRecPtr.p->nextOverRec = RNIL; -}//Dbacc::seizeOverRec() - - -/** - * A ZPAGESIZE_ERROR has occured, out of index pages - * Print some debug info if debug compiled - */ -void Dbacc::zpagesize_error(const char* where){ - DEBUG(where << endl - << " ZPAGESIZE_ERROR" << endl - << " cfirstfreepage=" << cfirstfreepage << endl - << " cfreepage=" <word32[0]; - cnoOfAllocatedPages++; - }//if -}//Dbacc::seizePage() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_ROOTFRAGREC */ -/* --------------------------------------------------------------------------------- */ - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_SCAN_REC */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::seizeScanRec(Signal* signal) -{ - scanPtr.i = cfirstFreeScanRec; - ptrCheckGuard(scanPtr, cscanRecSize, scanRec); - ndbrequire(scanPtr.p->scanState == ScanRec::SCAN_DISCONNECT); - cfirstFreeScanRec = scanPtr.p->scanNextfreerec; -}//Dbacc::seizeScanRec() - -/* --------------------------------------------------------------------------------- */ -/* SEIZE_SR_VERSION_REC */ -/* --------------------------------------------------------------------------------- */ -/* --------------------------------------------------------------------------------- */ -/* SEND_SYSTEMERROR */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::sendSystemerror(Signal* signal, int line) -{ - progError(line, NDBD_EXIT_PRGERR); -}//Dbacc::sendSystemerror() - -/* --------------------------------------------------------------------------------- */ -/* TAKE_REC_OUT_OF_FREE_OVERDIR */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::takeRecOutOfFreeOverdir(Signal* signal) -{ - OverflowRecordPtr tofoOverrecPtr; - if (troOverflowRecPtr.p->nextOverList != RNIL) { - jam(); - tofoOverrecPtr.i = troOverflowRecPtr.p->nextOverList; - ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord); - tofoOverrecPtr.p->prevOverList = troOverflowRecPtr.p->prevOverList; - }//if - if (troOverflowRecPtr.p->prevOverList != RNIL) { - jam(); - tofoOverrecPtr.i = troOverflowRecPtr.p->prevOverList; - ptrCheckGuard(tofoOverrecPtr, coverflowrecsize, overflowRecord); - tofoOverrecPtr.p->nextOverList = troOverflowRecPtr.p->nextOverList; - } else { - jam(); - fragrecptr.p->firstFreeDirindexRec = troOverflowRecPtr.p->nextOverList; - }//if -}//Dbacc::takeRecOutOfFreeOverdir() - -/* --------------------------------------------------------------------------------- */ -/* TAKE_REC_OUT_OF_FREE_OVERPAGE */ -/* DESCRIPTION: AN OVERFLOW PAGE WHICH IS EMPTY HAVE TO BE TAKE OUT OF THE */ -/* FREE LIST OF OVERFLOW PAGE. BY THIS SUBROUTINE THIS LIST */ -/* WILL BE UPDATED. */ -/* --------------------------------------------------------------------------------- */ -void Dbacc::takeRecOutOfFreeOverpage(Signal* signal) -{ - OverflowRecordPtr tfoNextOverflowRecPtr; - OverflowRecordPtr tfoPrevOverflowRecPtr; - - if (tfoOverflowRecPtr.p->nextOverRec != RNIL) { - jam(); - tfoNextOverflowRecPtr.i = tfoOverflowRecPtr.p->nextOverRec; - ptrCheckGuard(tfoNextOverflowRecPtr, coverflowrecsize, overflowRecord); - tfoNextOverflowRecPtr.p->prevOverRec = tfoOverflowRecPtr.p->prevOverRec; - } else { - ndbrequire(fragrecptr.p->lastOverflowRec == tfoOverflowRecPtr.i); - jam(); - fragrecptr.p->lastOverflowRec = tfoOverflowRecPtr.p->prevOverRec; - }//if - if (tfoOverflowRecPtr.p->prevOverRec != RNIL) { - jam(); - tfoPrevOverflowRecPtr.i = tfoOverflowRecPtr.p->prevOverRec; - ptrCheckGuard(tfoPrevOverflowRecPtr, coverflowrecsize, overflowRecord); - tfoPrevOverflowRecPtr.p->nextOverRec = tfoOverflowRecPtr.p->nextOverRec; - } else { - ndbrequire(fragrecptr.p->firstOverflowRec == tfoOverflowRecPtr.i); - jam(); - fragrecptr.p->firstOverflowRec = tfoOverflowRecPtr.p->nextOverRec; - }//if -}//Dbacc::takeRecOutOfFreeOverpage() - -void -Dbacc::reportMemoryUsage(Signal* signal, int gth){ - signal->theData[0] = NDB_LE_MemoryUsage; - signal->theData[1] = gth; - signal->theData[2] = sizeof(* rpPageptr.p); - signal->theData[3] = cnoOfAllocatedPages; - signal->theData[4] = cpagesize; - signal->theData[5] = DBACC; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB); -} - -void -Dbacc::execDUMP_STATE_ORD(Signal* signal) -{ - DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0]; - if (dumpState->args[0] == DumpStateOrd::AccDumpOneScanRec){ - Uint32 recordNo = RNIL; - if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - if (recordNo >= cscanRecSize) - return; - - scanPtr.i = recordNo; - ptrAss(scanPtr, scanRec); - infoEvent("Dbacc::ScanRec[%d]: state=%d, transid(0x%x, 0x%x)", - scanPtr.i, scanPtr.p->scanState,scanPtr.p->scanTrid1, - scanPtr.p->scanTrid2); - infoEvent(" timer=%d, continueBCount=%d, " - "activeLocalFrag=%d, nextBucketIndex=%d", - scanPtr.p->scanTimer, - scanPtr.p->scanContinuebCounter, - scanPtr.p->activeLocalFrag, - scanPtr.p->nextBucketIndex); - infoEvent(" scanNextfreerec=%d firstActOp=%d firstLockedOp=%d, " - "scanLastLockedOp=%d firstQOp=%d lastQOp=%d", - scanPtr.p->scanNextfreerec, - scanPtr.p->scanFirstActiveOp, - scanPtr.p->scanFirstLockedOp, - scanPtr.p->scanLastLockedOp, - scanPtr.p->scanFirstQueuedOp, - scanPtr.p->scanLastQueuedOp); - infoEvent(" scanUserP=%d, startNoBuck=%d, minBucketIndexToRescan=%d, " - "maxBucketIndexToRescan=%d", - scanPtr.p->scanUserptr, - scanPtr.p->startNoOfBuckets, - scanPtr.p->minBucketIndexToRescan, - scanPtr.p->maxBucketIndexToRescan); - infoEvent(" scanBucketState=%d, scanLockHeld=%d, userBlockRef=%d, " - "scanMask=%d scanLockMode=%d", - scanPtr.p->scanBucketState, - scanPtr.p->scanLockHeld, - scanPtr.p->scanUserblockref, - scanPtr.p->scanMask, - scanPtr.p->scanLockMode); - return; - } - - // Dump all ScanRec(ords) - if (dumpState->args[0] == DumpStateOrd::AccDumpAllScanRec){ - Uint32 recordNo = 0; - if (signal->length() == 1) - infoEvent("ACC: Dump all ScanRec - size: %d", - cscanRecSize); - else if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - - if (recordNo < cscanRecSize-1){ - dumpState->args[0] = DumpStateOrd::AccDumpAllScanRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - return; - } - - // Dump all active ScanRec(ords) - if (dumpState->args[0] == DumpStateOrd::AccDumpAllActiveScanRec){ - Uint32 recordNo = 0; - if (signal->length() == 1) - infoEvent("ACC: Dump active ScanRec - size: %d", - cscanRecSize); - else if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - ScanRecPtr sp; - sp.i = recordNo; - ptrAss(sp, scanRec); - if (sp.p->scanState != ScanRec::SCAN_DISCONNECT){ - dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - } - - if (recordNo < cscanRecSize-1){ - dumpState->args[0] = DumpStateOrd::AccDumpAllActiveScanRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - return; - } - - if(dumpState->args[0] == DumpStateOrd::DumpPageMemory && - signal->getLength() == 1){ - reportMemoryUsage(signal, 0); - return; - } - - if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){ - ndbout << "Dbacc:: delay write of datapages for table = " - << dumpState->args[1]<< endl; - c_errorInsert3000_TableId = dumpState->args[1]; - SET_ERROR_INSERT_VALUE(3000); - return; - } - - if(dumpState->args[0] == DumpStateOrd::AccDumpOneOperationRec){ - Uint32 recordNo = RNIL; - if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - if (recordNo >= coprecsize) - return; - - OperationrecPtr tmpOpPtr; - tmpOpPtr.i = recordNo; - ptrAss(tmpOpPtr, operationrec); - infoEvent("Dbacc::operationrec[%d]: transid(0x%x, 0x%x)", - tmpOpPtr.i, tmpOpPtr.p->transId1, - tmpOpPtr.p->transId2); - infoEvent("elementIsforward=%d, elementPage=%d, elementPointer=%d ", - tmpOpPtr.p->elementIsforward, tmpOpPtr.p->elementPage, - tmpOpPtr.p->elementPointer); - infoEvent("fid=%d, fragptr=%d, hashvaluePart=%d ", - tmpOpPtr.p->fid, tmpOpPtr.p->fragptr, - tmpOpPtr.p->hashvaluePart); - infoEvent("hashValue=%d", tmpOpPtr.p->hashValue); - infoEvent("nextLockOwnerOp=%d, nextOp=%d, nextParallelQue=%d ", - tmpOpPtr.p->nextLockOwnerOp, tmpOpPtr.p->nextOp, - tmpOpPtr.p->nextParallelQue); - infoEvent("nextSerialQue=%d, prevOp=%d ", - tmpOpPtr.p->nextSerialQue, - tmpOpPtr.p->prevOp); - infoEvent("prevLockOwnerOp=%d, prevParallelQue=%d", - tmpOpPtr.p->prevLockOwnerOp, tmpOpPtr.p->nextParallelQue); - infoEvent("prevSerialQue=%d, scanRecPtr=%d", - tmpOpPtr.p->prevSerialQue, tmpOpPtr.p->scanRecPtr); - infoEvent("m_op_bits=0x%x, scanBits=%d ", - tmpOpPtr.p->m_op_bits, tmpOpPtr.p->scanBits); - return; - } - - if(dumpState->args[0] == DumpStateOrd::AccDumpNumOpRecs){ - - Uint32 freeOpRecs = 0; - OperationrecPtr opRecPtr; - opRecPtr.i = cfreeopRec; - while (opRecPtr.i != RNIL){ - freeOpRecs++; - ptrCheckGuard(opRecPtr, coprecsize, operationrec); - opRecPtr.i = opRecPtr.p->nextOp; - } - - infoEvent("Dbacc::OperationRecords: num=%d, free=%d", - coprecsize, freeOpRecs); - - return; - } - if(dumpState->args[0] == DumpStateOrd::AccDumpFreeOpRecs){ - - OperationrecPtr opRecPtr; - opRecPtr.i = cfreeopRec; - while (opRecPtr.i != RNIL){ - - dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec; - dumpState->args[1] = opRecPtr.i; - execDUMP_STATE_ORD(signal); - - ptrCheckGuard(opRecPtr, coprecsize, operationrec); - opRecPtr.i = opRecPtr.p->nextOp; - } - - - return; - } - - if(dumpState->args[0] == DumpStateOrd::AccDumpNotFreeOpRecs){ - Uint32 recordStart = RNIL; - if (signal->length() == 2) - recordStart = dumpState->args[1]; - else - return; - - if (recordStart >= coprecsize) - return; - - for (Uint32 i = recordStart; i < coprecsize; i++){ - - bool inFreeList = false; - OperationrecPtr opRecPtr; - opRecPtr.i = cfreeopRec; - while (opRecPtr.i != RNIL){ - if (opRecPtr.i == i){ - inFreeList = true; - break; - } - ptrCheckGuard(opRecPtr, coprecsize, operationrec); - opRecPtr.i = opRecPtr.p->nextOp; - } - if (inFreeList == false){ - dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec; - dumpState->args[1] = i; - execDUMP_STATE_ORD(signal); - } - } - return; - } - -#if 0 - if (type == 100) { - RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); - req->primaryTableId = 2; - req->secondaryTableId = RNIL; - req->userPtr = 2; - req->userRef = DBDICT_REF; - sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal, - RelTabMemReq::SignalLength, JBB); - return; - }//if - if (type == 101) { - RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); - req->primaryTableId = 4; - req->secondaryTableId = 5; - req->userPtr = 4; - req->userRef = DBDICT_REF; - sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal, - RelTabMemReq::SignalLength, JBB); - return; - }//if - if (type == 102) { - RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); - req->primaryTableId = 6; - req->secondaryTableId = 8; - req->userPtr = 6; - req->userRef = DBDICT_REF; - sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal, - RelTabMemReq::SignalLength, JBB); - return; - }//if - if (type == 103) { - DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); - req->primaryTableId = 2; - req->secondaryTableId = RNIL; - req->userPtr = 2; - req->userRef = DBDICT_REF; - sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal, - DropTabFileReq::SignalLength, JBB); - return; - }//if - if (type == 104) { - DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); - req->primaryTableId = 4; - req->secondaryTableId = 5; - req->userPtr = 4; - req->userRef = DBDICT_REF; - sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal, - DropTabFileReq::SignalLength, JBB); - return; - }//if - if (type == 105) { - DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); - req->primaryTableId = 6; - req->secondaryTableId = 8; - req->userPtr = 6; - req->userRef = DBDICT_REF; - sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal, - DropTabFileReq::SignalLength, JBB); - return; - }//if -#endif -}//Dbacc::execDUMP_STATE_ORD() - -void -Dbacc::execREAD_PSEUDO_REQ(Signal* signal){ - jamEntry(); - fragrecptr.i = signal->theData[0]; - Uint32 attrId = signal->theData[1]; - ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); - Uint64 tmp; - switch(attrId){ - case AttributeHeader::ROW_COUNT: - tmp = fragrecptr.p->noOfElements; - break; - case AttributeHeader::COMMIT_COUNT: - tmp = fragrecptr.p->m_commit_count; - break; - default: - tmp = 0; - } - memcpy(signal->theData, &tmp, 8); /* must be memcpy, gives strange results on - * ithanium gcc (GCC) 3.4.1 smp linux 2.4 - * otherwise - */ - // Uint32 * src = (Uint32*)&tmp; - // signal->theData[0] = src[0]; - // signal->theData[1] = src[1]; -} diff --git a/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt deleted file mode 100644 index 3d11e501c07..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/CreateIndex.txt +++ /dev/null @@ -1,152 +0,0 @@ -Unique Hash Index -================= - -unique hash index X on T(A1,...,An) becomes: -table X with primary key A1,...,An and extra attribute NDB$PK - -NDB$PK is primary key of T concatenated at 4-byte boundaries - -Protocols: - -U - user, initiator of protocol -C - coordinator -P - participants, including coordinator node - -RT_ - request type, current state - -P always replies to C with current RT_ (initially RT_DICT_PREPARE) -C replies to U at the end - -CREATE INDEX ------------- - -U: RT_USER - -C: forward request to P's -P: check and reply - -C: invoke CREATE TABLE for index table - -C: invoke ALTER INDEX online - -C: send RT_DICT_COMMIT to P's -P: reply - -C: reply to U - -DROP INDEX ----------- - -[ todo ] - -ALTER INDEX online ------------------- - -U: RT_USER, RT_CREATE_INDEX, RT_NODERESTART, RT_SYSTEMRESTART - -C: forward request to P's -P: check and reply - -C: send RT_DICT_TC to P's -P: create index in local TC, and reply - -C: invoke CREATE TRIGGER for insert/update/delete triggers - -C: invoke BUILD INDEX - -C: send RT_DICT_COMMIT to P's -P: reply - -C: reply to U - -ALTER INDEX offline -------------------- - -[ todo ] - -BUILD INDEX ------------ - -U: RT_USER, RT_ALTER_INDEX - -C: forward request to P's -P: check and reply - -C: invoke CREATE TRIGGER for read-only constraint on NDB$PK - -C: send RT_DICT_TRIX to P's -P: build index via local TRIX, and reply - -C: invoke DROP TRIGGER for read-only constraint on NDB$PK - -C: send RT_DICT_TC to P's -P: online index in local TC, and reply - -CREATE TRIGGER --------------- - -U: RT_USER, RT_ALTER_INDEX, RT_BUILD_INDEX - -C: forward request to P's -P: check and reply - -C: seize trigger id and send RT_DICT_CREATE to P's -P: create trigger in DICT (also connect to index record), and reply - -C: invoke ALTER TRIGGER online [ not if subscription trigger ] - -C: send RT_DICT_COMMIT to P's -P: reply - -C: reply to U - -DROP TRIGGER ------------- - -[ todo ] - -ALTER TRIGGER online --------------------- - -U: RT_USER, RT_CREATE_TRIGGER - -C: forward request to P's -P: check and reply - -C: send RT_DICT_TC to P's -P: create trigger in local TC, and reply - -C: send RT_DICT_LQH to P's -P: create trigger in local LQH (which just forwards to TUP), and reply - -C: send RT_DICT_COMMIT to P's -P: reply - -C: reply to U - -ALTER TRIGGER offline ---------------------- - -[ todo ] - -Ordered Index << under work >> -============= - -created as DICT table, as before, to reuse the code - -keep NDB$PK as last attribute (not used but logically correct) - -create fragments and attributes must be modified - -global metadata? implemented but will use signals anyway - -create (after-) insert/update/delete triggers as DICT objects, as before - -skip following: -- create index in TC -- create triggers in TC -- read-only constraint on NDB$PK - -create (before-) commit trigger in TUP - -alter online (in TUX, instead of TC) is needed diff --git a/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt deleted file mode 100644 index d37732dcda1..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/CreateTable.new.txt +++ /dev/null @@ -1,29 +0,0 @@ - -1) Receive from client (sequence of DICTTABINFO) - -2) CREATE_FRAGMENTATION_REQ -> local DIH - Returns all fragments for table + some other stuff - NOTE without side effects in DIH - -3) Pack table description - -4) CREATE_TAB -> all DICTs (including table data) - 1) Write schema file (ADD_STARTED) - 2) Write table descriptor to file - 3) CREATE_TAB (DIADDTABREQ) -> local DIH (including fragment info) - 4) DIH - 1) write table descriptor - 2) For each local fragment - ADD_FRAG -> local DICT - LQHFRAGREQ -> local LQH - LQHADDATTREQ -> local LQH - 5) TAB_COMMITREQ -> local LQH - -5) WAIT_GCP - -6) ALTER_TAB (activate) -> all DICTs - 1) Write schema file (CREATED) - 2) TAB_COMMITREQ -> local DIH - 3) TC_SCHVERREQ -> local TC - - diff --git a/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt b/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt deleted file mode 100644 index 0b37e5d767f..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/CreateTable.txt +++ /dev/null @@ -1,35 +0,0 @@ - -1) Receive from client (sequence of DICTTABINFO) - -2) DICT_SCHEMAREQ -> all DICTs - Write ADD_STARTED in schema file - -3) Pack table description - -4) DICTTABINFO -> all DICTs (but self) (containing packed table info) - self -> Write 2 file - 1) Write 2 file - -5) DICT_SCHEMAREQ -> all DICTs - Write UPDATE_PAGE_COUNT in schema file - -6) DIADDTABREQ -> local DIH - 1) Create fragments - 2) For each fragment - DIHADDFRAGREQ -> all DIH - 3) For each fragment - DICTFRAGSREQ -> local DICT - 1) LQHFRAGREQ -> concerned LQH - 2) For each attribute - LQHADDATTREQ -> concerned LQH - -7) WAIT_GCP -> local DIH - -8) DICT_SCHEMAREQ -> all DICTs - Write TABLE_ADD_COMMITTED in schema file - -9) TAB_COMMITREQ -> all LQH & DIH - -10) TC_SCHVERREQ -> all TC - -11) UNBLO_DICTREQ -> all DICT diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp deleted file mode 100644 index e0e0a496e0a..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ /dev/null @@ -1,16702 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include - -#define DBDICT_C -#include "Dbdict.hpp" -#include "diskpage.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -extern EventLogger g_eventLogger; - -#define ZNOT_FOUND 626 -#define ZALREADYEXIST 630 - -//#define EVENT_PH2_DEBUG -//#define EVENT_PH3_DEBUG -//#define EVENT_DEBUG - -static const char EVENT_SYSTEM_TABLE_NAME[] = "sys/def/NDB$EVENTS_0"; - -#define EVENT_TRACE \ -// ndbout_c("Event debug trace: File: %s Line: %u", __FILE__, __LINE__) - -#define DIV(x,y) (((x)+(y)-1)/(y)) -#define WORDS2PAGES(x) DIV(x, (ZSIZE_OF_PAGES_IN_WORDS - ZPAGE_HEADER_SIZE)) -#include - -static -struct { - Uint32 m_gsn_user_req; - Uint32 m_gsn_req; - Uint32 m_gsn_ref; - Uint32 m_gsn_conf; - void (Dbdict::* m_trans_commit_start)(Signal*, Dbdict::SchemaTransaction*); - void (Dbdict::* m_trans_commit_complete)(Signal*,Dbdict::SchemaTransaction*); - void (Dbdict::* m_trans_abort_start)(Signal*, Dbdict::SchemaTransaction*); - void (Dbdict::* m_trans_abort_complete)(Signal*, Dbdict::SchemaTransaction*); - - void (Dbdict::* m_prepare_start)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_prepare_complete)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_commit)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_commit_start)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_commit_complete)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_abort)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_abort_start)(Signal*, Dbdict::SchemaOp*); - void (Dbdict::* m_abort_complete)(Signal*, Dbdict::SchemaOp*); - -} f_dict_op[] = { - /** - * Create filegroup - */ - { - GSN_CREATE_FILEGROUP_REQ, - GSN_CREATE_OBJ_REQ, GSN_CREATE_OBJ_REF, GSN_CREATE_OBJ_CONF, - 0, 0, 0, 0, - &Dbdict::create_fg_prepare_start, &Dbdict::create_fg_prepare_complete, - &Dbdict::createObj_commit, - 0, 0, - &Dbdict::createObj_abort, - &Dbdict::create_fg_abort_start, &Dbdict::create_fg_abort_complete, - } - - /** - * Create file - */ - ,{ - GSN_CREATE_FILE_REQ, - GSN_CREATE_OBJ_REQ, GSN_CREATE_OBJ_REF, GSN_CREATE_OBJ_CONF, - 0, 0, 0, 0, - &Dbdict::create_file_prepare_start, &Dbdict::create_file_prepare_complete, - &Dbdict::createObj_commit, - &Dbdict::create_file_commit_start, 0, - &Dbdict::createObj_abort, - &Dbdict::create_file_abort_start, &Dbdict::create_file_abort_complete, - } - - /** - * Drop file - */ - ,{ - GSN_DROP_FILE_REQ, - GSN_DROP_OBJ_REQ, GSN_DROP_OBJ_REF, GSN_DROP_OBJ_CONF, - 0, 0, 0, 0, - &Dbdict::drop_file_prepare_start, 0, - &Dbdict::dropObj_commit, - &Dbdict::drop_file_commit_start, &Dbdict::drop_file_commit_complete, - &Dbdict::dropObj_abort, - &Dbdict::drop_file_abort_start, 0 - } - - /** - * Drop filegroup - */ - ,{ - GSN_DROP_FILEGROUP_REQ, - GSN_DROP_OBJ_REQ, GSN_DROP_OBJ_REF, GSN_DROP_OBJ_CONF, - 0, 0, 0, 0, - &Dbdict::drop_fg_prepare_start, 0, - &Dbdict::dropObj_commit, - &Dbdict::drop_fg_commit_start, &Dbdict::drop_fg_commit_complete, - &Dbdict::dropObj_abort, - &Dbdict::drop_fg_abort_start, 0 - } - - /** - * Drop undofile - */ - ,{ - GSN_DROP_FILE_REQ, - GSN_DROP_OBJ_REQ, GSN_DROP_OBJ_REF, GSN_DROP_OBJ_CONF, - 0, 0, 0, 0, - &Dbdict::drop_undofile_prepare_start, 0, - 0, - 0, &Dbdict::drop_undofile_commit_complete, - 0, 0, 0 - } -}; - -Uint32 -alter_obj_inc_schema_version(Uint32 old) -{ - return (old & 0x00FFFFFF) + ((old + 0x1000000) & 0xFF000000); -} - -static -Uint32 -alter_obj_dec_schema_version(Uint32 old) -{ - return (old & 0x00FFFFFF) + ((old - 0x1000000) & 0xFF000000); -} - -static -Uint32 -create_obj_inc_schema_version(Uint32 old) -{ - return (old + 0x00000001) & 0x00FFFFFF; -} - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: GENERAL MODULE -------------------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains general stuff. Mostly debug signals and */ -/* general signals that go into a specific module after checking a */ -/* state variable. Also general subroutines used by many. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -/* ---------------------------------------------------------------- */ -// This signal is used to dump states of various variables in the -// block by command. -/* ---------------------------------------------------------------- */ -void -Dbdict::execDUMP_STATE_ORD(Signal* signal) -{ - jamEntry(); - -#ifdef VM_TRACE - if(signal->theData[0] == 1222){ - const Uint32 tab = signal->theData[1]; - PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr(); - req->senderRef = reference(); - req->senderData = 1222; - req->tableId = tab; - sendSignal(DBLQH_REF, GSN_PREP_DROP_TAB_REQ, signal, - PrepDropTabReq::SignalLength, JBB); - } - - if(signal->theData[0] == 1223){ - const Uint32 tab = signal->theData[1]; - PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr(); - req->senderRef = reference(); - req->senderData = 1222; - req->tableId = tab; - sendSignal(DBTC_REF, GSN_PREP_DROP_TAB_REQ, signal, - PrepDropTabReq::SignalLength, JBB); - } - - if(signal->theData[0] == 1224){ - const Uint32 tab = signal->theData[1]; - PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr(); - req->senderRef = reference(); - req->senderData = 1222; - req->tableId = tab; - sendSignal(DBDIH_REF, GSN_PREP_DROP_TAB_REQ, signal, - PrepDropTabReq::SignalLength, JBB); - } - - if(signal->theData[0] == 1225){ - const Uint32 tab = signal->theData[1]; - const Uint32 ver = signal->theData[2]; - TableRecordPtr tabRecPtr; - c_tableRecordPool.getPtr(tabRecPtr, tab); - DropTableReq * req = (DropTableReq*)signal->getDataPtr(); - req->senderData = 1225; - req->senderRef = numberToRef(1,1); - req->tableId = tab; - req->tableVersion = tabRecPtr.p->tableVersion + ver; - sendSignal(DBDICT_REF, GSN_DROP_TABLE_REQ, signal, - DropTableReq::SignalLength, JBB); - } -#endif -#define MEMINFO(x, y) infoEvent(x ": %d %d", y.getSize(), y.getNoOfFree()) - if(signal->theData[0] == 1226){ - MEMINFO("c_obj_pool", c_obj_pool); - MEMINFO("c_opRecordPool", c_opRecordPool); - MEMINFO("c_rope_pool", c_rope_pool); - } - - if (signal->theData[0] == 1227) - { - DLHashTable::Iterator iter; - bool ok = c_obj_hash.first(iter); - for(; ok; ok = c_obj_hash.next(iter)) - { - Rope name(c_rope_pool, iter.curr.p->m_name); - char buf[1024]; - name.copy(buf); - ndbout_c("%s m_ref_count: %d", buf, iter.curr.p->m_ref_count); - } - } - - return; -}//Dbdict::execDUMP_STATE_ORD() - -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -// CONTINUEB is used when a real-time break is needed for long -// processes. -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -void Dbdict::execCONTINUEB(Signal* signal) -{ - jamEntry(); - switch (signal->theData[0]) { - case ZPACK_TABLE_INTO_PAGES : - jam(); - packTableIntoPages(signal); - break; - - case ZSEND_GET_TAB_RESPONSE : - jam(); - sendGetTabResponse(signal); - break; - - case ZDICT_LOCK_POLL: - jam(); - checkDictLockQueue(signal, true); - break; - - default : - ndbrequire(false); - break; - }//switch - return; -}//execCONTINUEB() - -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -// Routine to handle pack table into pages. -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ - -void Dbdict::packTableIntoPages(Signal* signal) -{ - const Uint32 tableId= signal->theData[1]; - const Uint32 type= signal->theData[2]; - const Uint32 pageId= signal->theData[3]; - - PageRecordPtr pagePtr; - c_pageRecordArray.getPtr(pagePtr, pageId); - - memset(&pagePtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE); - LinearWriter w(&pagePtr.p->word[ZPAGE_HEADER_SIZE], - ZMAX_PAGES_OF_TABLE_DEFINITION * ZSIZE_OF_PAGES_IN_WORDS); - w.first(); - switch((DictTabInfo::TableType)type) { - case DictTabInfo::SystemTable: - case DictTabInfo::UserTable: - case DictTabInfo::UniqueHashIndex: - case DictTabInfo::HashIndex: - case DictTabInfo::UniqueOrderedIndex: - case DictTabInfo::OrderedIndex:{ - jam(); - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - packTableIntoPages(w, tablePtr, signal); - break; - } - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup:{ - FilegroupPtr fg_ptr; - ndbrequire(c_filegroup_hash.find(fg_ptr, tableId)); - const Uint32 free_hi= signal->theData[4]; - const Uint32 free_lo= signal->theData[5]; - packFilegroupIntoPages(w, fg_ptr, free_hi, free_lo); - break; - } - case DictTabInfo::Datafile:{ - FilePtr fg_ptr; - ndbrequire(c_file_hash.find(fg_ptr, tableId)); - const Uint32 free_extents= signal->theData[4]; - packFileIntoPages(w, fg_ptr, free_extents); - break; - } - case DictTabInfo::Undofile:{ - FilePtr fg_ptr; - ndbrequire(c_file_hash.find(fg_ptr, tableId)); - packFileIntoPages(w, fg_ptr, 0); - break; - } - case DictTabInfo::UndefTableType: - case DictTabInfo::HashIndexTrigger: - case DictTabInfo::SubscriptionTrigger: - case DictTabInfo::ReadOnlyConstraint: - case DictTabInfo::IndexTrigger: - ndbrequire(false); - } - - Uint32 wordsOfTable = w.getWordsUsed(); - Uint32 pagesUsed = WORDS2PAGES(wordsOfTable); - pagePtr.p->word[ZPOS_CHECKSUM] = - computeChecksum(&pagePtr.p->word[0], pagesUsed * ZSIZE_OF_PAGES_IN_WORDS); - - switch (c_packTable.m_state) { - case PackTable::PTS_IDLE: - ndbrequire(false); - break; - case PackTable::PTS_GET_TAB: - jam(); - c_retrieveRecord.retrievedNoOfPages = pagesUsed; - c_retrieveRecord.retrievedNoOfWords = wordsOfTable; - sendGetTabResponse(signal); - return; - break; - }//switch - ndbrequire(false); - return; -}//packTableIntoPages() - -void -Dbdict::packTableIntoPages(SimpleProperties::Writer & w, - TableRecordPtr tablePtr, - Signal* signal){ - - union { - char tableName[MAX_TAB_NAME_SIZE]; - char frmData[MAX_FRM_DATA_SIZE]; - char rangeData[16*MAX_NDB_PARTITIONS]; - char ngData[2*MAX_NDB_PARTITIONS]; - char tsData[2*2*MAX_NDB_PARTITIONS]; - char defaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE]; - char attributeName[MAX_ATTR_NAME_SIZE]; - }; - ConstRope r(c_rope_pool, tablePtr.p->tableName); - r.copy(tableName); - w.add(DictTabInfo::TableName, tableName); - w.add(DictTabInfo::TableId, tablePtr.i); - w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion); - w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey); - w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes); - w.add(DictTabInfo::NoOfNullable, tablePtr.p->noOfNullAttr); - w.add(DictTabInfo::NoOfVariable, (Uint32)0); - w.add(DictTabInfo::KeyLength, tablePtr.p->tupKeyLength); - - w.add(DictTabInfo::TableLoggedFlag, - !!(tablePtr.p->m_bits & TableRecord::TR_Logged)); - w.add(DictTabInfo::RowGCIFlag, - !!(tablePtr.p->m_bits & TableRecord::TR_RowGCI)); - w.add(DictTabInfo::RowChecksumFlag, - !!(tablePtr.p->m_bits & TableRecord::TR_RowChecksum)); - w.add(DictTabInfo::TableTemporaryFlag, - !!(tablePtr.p->m_bits & TableRecord::TR_Temporary)); - w.add(DictTabInfo::ForceVarPartFlag, - !!(tablePtr.p->m_bits & TableRecord::TR_ForceVarPart)); - - w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor); - w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor); - w.add(DictTabInfo::TableKValue, tablePtr.p->kValue); - w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType); - w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType); - w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow); - w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh); - w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag); - w.add(DictTabInfo::LinearHashFlag, tablePtr.p->linearHashFlag); - w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); - w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); - w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); - w.add(DictTabInfo::SingleUserMode, tablePtr.p->singleUserMode); - - if(signal) - { - /* Denna branch körs vid GET_TABINFOREQ */ - - Uint32 * theData = signal->getDataPtrSend(); - CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; - req->senderRef = 0; - req->senderData = RNIL; - req->fragmentationType = tablePtr.p->fragmentType; - req->noOfFragments = 0; - req->primaryTableId = tablePtr.i; - EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal, - CreateFragmentationReq::SignalLength); - ndbrequire(signal->theData[0] == 0); - Uint16 *data = (Uint16*)&signal->theData[25]; - Uint32 count = 2 + (1 + data[0]) * data[1]; - w.add(DictTabInfo::ReplicaDataLen, 2*count); - for (Uint32 i = 0; i < count; i++) - data[i] = htons(data[i]); - w.add(DictTabInfo::ReplicaData, data, 2*count); - } - else - { - /* Denna del körs vid CREATE_TABLEREQ, ALTER_TABLEREQ */ - ; - } - - if (tablePtr.p->primaryTableId != RNIL){ - TableRecordPtr primTab; - c_tableRecordPool.getPtr(primTab, tablePtr.p->primaryTableId); - ConstRope r2(c_rope_pool, primTab.p->tableName); - r2.copy(tableName); - w.add(DictTabInfo::PrimaryTable, tableName); - w.add(DictTabInfo::PrimaryTableId, tablePtr.p->primaryTableId); - w.add(DictTabInfo::IndexState, tablePtr.p->indexState); - w.add(DictTabInfo::InsertTriggerId, tablePtr.p->insertTriggerId); - w.add(DictTabInfo::UpdateTriggerId, tablePtr.p->updateTriggerId); - w.add(DictTabInfo::DeleteTriggerId, tablePtr.p->deleteTriggerId); - w.add(DictTabInfo::CustomTriggerId, tablePtr.p->customTriggerId); - } - - ConstRope frm(c_rope_pool, tablePtr.p->frmData); - frm.copy(frmData); - w.add(DictTabInfo::FrmLen, frm.size()); - w.add(DictTabInfo::FrmData, frmData, frm.size()); - - { - jam(); - ConstRope ts(c_rope_pool, tablePtr.p->tsData); - ts.copy(tsData); - w.add(DictTabInfo::TablespaceDataLen, ts.size()); - w.add(DictTabInfo::TablespaceData, tsData, ts.size()); - - ConstRope ng(c_rope_pool, tablePtr.p->ngData); - ng.copy(ngData); - w.add(DictTabInfo::FragmentDataLen, ng.size()); - w.add(DictTabInfo::FragmentData, ngData, ng.size()); - - ConstRope range(c_rope_pool, tablePtr.p->rangeData); - range.copy(rangeData); - w.add(DictTabInfo::RangeListDataLen, range.size()); - w.add(DictTabInfo::RangeListData, rangeData, range.size()); - } - - if(tablePtr.p->m_tablespace_id != RNIL) - { - w.add(DictTabInfo::TablespaceId, tablePtr.p->m_tablespace_id); - FilegroupPtr tsPtr; - ndbrequire(c_filegroup_hash.find(tsPtr, tablePtr.p->m_tablespace_id)); - w.add(DictTabInfo::TablespaceVersion, tsPtr.p->m_version); - } - - AttributeRecordPtr attrPtr; - LocalDLFifoList list(c_attributeRecordPool, - tablePtr.p->m_attributes); - for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)){ - jam(); - - ConstRope name(c_rope_pool, attrPtr.p->attributeName); - name.copy(attributeName); - - w.add(DictTabInfo::AttributeName, attributeName); - w.add(DictTabInfo::AttributeId, attrPtr.p->attributeId); - w.add(DictTabInfo::AttributeKeyFlag, attrPtr.p->tupleKey > 0); - - const Uint32 desc = attrPtr.p->attributeDescriptor; - const Uint32 attrType = AttributeDescriptor::getType(desc); - const Uint32 attrSize = AttributeDescriptor::getSize(desc); - const Uint32 arraySize = AttributeDescriptor::getArraySize(desc); - const Uint32 arrayType = AttributeDescriptor::getArrayType(desc); - const Uint32 nullable = AttributeDescriptor::getNullable(desc); - const Uint32 DKey = AttributeDescriptor::getDKey(desc); - const Uint32 disk= AttributeDescriptor::getDiskBased(desc); - - - // AttributeType deprecated - w.add(DictTabInfo::AttributeSize, attrSize); - w.add(DictTabInfo::AttributeArraySize, arraySize); - w.add(DictTabInfo::AttributeArrayType, arrayType); - w.add(DictTabInfo::AttributeNullableFlag, nullable); - w.add(DictTabInfo::AttributeDKey, DKey); - w.add(DictTabInfo::AttributeExtType, attrType); - w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision); - w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale); - w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength); - w.add(DictTabInfo::AttributeAutoIncrement, - (Uint32)attrPtr.p->autoIncrement); - - if(disk) - w.add(DictTabInfo::AttributeStorageType, (Uint32)NDB_STORAGETYPE_DISK); - else - w.add(DictTabInfo::AttributeStorageType, (Uint32)NDB_STORAGETYPE_MEMORY); - - ConstRope def(c_rope_pool, attrPtr.p->defaultValue); - def.copy(defaultValue); - w.add(DictTabInfo::AttributeDefaultValue, defaultValue); - - w.add(DictTabInfo::AttributeEnd, 1); - } - - w.add(DictTabInfo::TableEnd, 1); -} - -void -Dbdict::packFilegroupIntoPages(SimpleProperties::Writer & w, - FilegroupPtr fg_ptr, - const Uint32 undo_free_hi, - const Uint32 undo_free_lo){ - - DictFilegroupInfo::Filegroup fg; fg.init(); - ConstRope r(c_rope_pool, fg_ptr.p->m_name); - r.copy(fg.FilegroupName); - - fg.FilegroupId = fg_ptr.p->key; - fg.FilegroupType = fg_ptr.p->m_type; - fg.FilegroupVersion = fg_ptr.p->m_version; - - switch(fg.FilegroupType){ - case DictTabInfo::Tablespace: - //fg.TS_DataGrow = group.m_grow_spec; - fg.TS_ExtentSize = fg_ptr.p->m_tablespace.m_extent_size; - fg.TS_LogfileGroupId = fg_ptr.p->m_tablespace.m_default_logfile_group_id; - FilegroupPtr lfg_ptr; - ndbrequire(c_filegroup_hash.find(lfg_ptr, fg.TS_LogfileGroupId)); - fg.TS_LogfileGroupVersion = lfg_ptr.p->m_version; - break; - case DictTabInfo::LogfileGroup: - fg.LF_UndoBufferSize = fg_ptr.p->m_logfilegroup.m_undo_buffer_size; - fg.LF_UndoFreeWordsHi= undo_free_hi; - fg.LF_UndoFreeWordsLo= undo_free_lo; - //fg.LF_UndoGrow = ; - break; - default: - ndbrequire(false); - } - - SimpleProperties::UnpackStatus s; - s = SimpleProperties::pack(w, - &fg, - DictFilegroupInfo::Mapping, - DictFilegroupInfo::MappingSize, true); - - ndbrequire(s == SimpleProperties::Eof); -} - -void -Dbdict::packFileIntoPages(SimpleProperties::Writer & w, - FilePtr f_ptr, const Uint32 free_extents){ - - DictFilegroupInfo::File f; f.init(); - ConstRope r(c_rope_pool, f_ptr.p->m_path); - r.copy(f.FileName); - - f.FileType = f_ptr.p->m_type; - f.FilegroupId = f_ptr.p->m_filegroup_id;; //group.m_id; - f.FileSizeHi = (f_ptr.p->m_file_size >> 32); - f.FileSizeLo = (f_ptr.p->m_file_size & 0xFFFFFFFF); - f.FileFreeExtents= free_extents; - f.FileId = f_ptr.p->key; - f.FileVersion = f_ptr.p->m_version; - - FilegroupPtr lfg_ptr; - ndbrequire(c_filegroup_hash.find(lfg_ptr, f.FilegroupId)); - f.FilegroupVersion = lfg_ptr.p->m_version; - - SimpleProperties::UnpackStatus s; - s = SimpleProperties::pack(w, - &f, - DictFilegroupInfo::FileMapping, - DictFilegroupInfo::FileMappingSize, true); - - ndbrequire(s == SimpleProperties::Eof); -} - -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -// The routines to handle responses from file system. -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ - -/* ---------------------------------------------------------------- */ -// A file was successfully closed. -/* ---------------------------------------------------------------- */ -void Dbdict::execFSCLOSECONF(Signal* signal) -{ - FsConnectRecordPtr fsPtr; - FsConf * const fsConf = (FsConf *)&signal->theData[0]; - jamEntry(); - c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer); - switch (fsPtr.p->fsState) { - case FsConnectRecord::CLOSE_WRITE_SCHEMA: - jam(); - closeWriteSchemaConf(signal, fsPtr); - break; - case FsConnectRecord::CLOSE_READ_SCHEMA: - jam(); - closeReadSchemaConf(signal, fsPtr); - break; - case FsConnectRecord::CLOSE_READ_TAB_FILE: - jam(); - closeReadTableConf(signal, fsPtr); - break; - case FsConnectRecord::CLOSE_WRITE_TAB_FILE: - jam(); - closeWriteTableConf(signal, fsPtr); - break; - case FsConnectRecord::OPEN_READ_SCHEMA2: - openSchemaFile(signal, 1, fsPtr.i, false, false); - break; - case FsConnectRecord::OPEN_READ_TAB_FILE2: - openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false); - break; - default: - jamLine((fsPtr.p->fsState & 0xFFF)); - ndbrequire(false); - break; - }//switch -}//execFSCLOSECONF() - - -/* ---------------------------------------------------------------- */ -// A file was successfully opened. -/* ---------------------------------------------------------------- */ -void Dbdict::execFSOPENCONF(Signal* signal) -{ - FsConnectRecordPtr fsPtr; - jamEntry(); - FsConf * const fsConf = (FsConf *)&signal->theData[0]; - c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer); - - Uint32 filePointer = fsConf->filePointer; - fsPtr.p->filePtr = filePointer; - switch (fsPtr.p->fsState) { - case FsConnectRecord::OPEN_WRITE_SCHEMA: - jam(); - fsPtr.p->fsState = FsConnectRecord::WRITE_SCHEMA; - writeSchemaFile(signal, filePointer, fsPtr.i); - break; - case FsConnectRecord::OPEN_READ_SCHEMA1: - jam(); - fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA1; - readSchemaFile(signal, filePointer, fsPtr.i); - break; - case FsConnectRecord::OPEN_READ_SCHEMA2: - jam(); - fsPtr.p->fsState = FsConnectRecord::READ_SCHEMA2; - readSchemaFile(signal, filePointer, fsPtr.i); - break; - case FsConnectRecord::OPEN_READ_TAB_FILE1: - jam(); - fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE1; - readTableFile(signal, filePointer, fsPtr.i); - break; - case FsConnectRecord::OPEN_READ_TAB_FILE2: - jam(); - fsPtr.p->fsState = FsConnectRecord::READ_TAB_FILE2; - readTableFile(signal, filePointer, fsPtr.i); - break; - case FsConnectRecord::OPEN_WRITE_TAB_FILE: - jam(); - fsPtr.p->fsState = FsConnectRecord::WRITE_TAB_FILE; - writeTableFile(signal, filePointer, fsPtr.i); - break; - default: - jamLine((fsPtr.p->fsState & 0xFFF)); - ndbrequire(false); - break; - }//switch -}//execFSOPENCONF() - -/* ---------------------------------------------------------------- */ -// An open file was refused. -/* ---------------------------------------------------------------- */ -void Dbdict::execFSOPENREF(Signal* signal) -{ - jamEntry(); - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer); - switch (fsPtr.p->fsState) { - case FsConnectRecord::OPEN_READ_SCHEMA1: - jam(); - openReadSchemaRef(signal, fsPtr); - return; - case FsConnectRecord::OPEN_READ_TAB_FILE1: - jam(); - openReadTableRef(signal, fsPtr); - return; - default: - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system open failed during FsConnectRecord state %d", (Uint32)fsPtr.p->fsState); - fsRefError(signal,__LINE__,msg); - } -}//execFSOPENREF() - -/* ---------------------------------------------------------------- */ -// A file was successfully read. -/* ---------------------------------------------------------------- */ -void Dbdict::execFSREADCONF(Signal* signal) -{ - jamEntry(); - FsConf * const fsConf = (FsConf *)&signal->theData[0]; - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer); - switch (fsPtr.p->fsState) { - case FsConnectRecord::READ_SCHEMA1: - case FsConnectRecord::READ_SCHEMA2: - readSchemaConf(signal ,fsPtr); - break; - case FsConnectRecord::READ_TAB_FILE1: - if(ERROR_INSERTED(6007)){ - jam(); - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = fsConf->userPointer; - fsRef->setErrorCode(fsRef->errorCode, NDBD_EXIT_AFS_UNKNOWN); - fsRef->osErrorCode = ~0; // Indicate local error - execFSREADREF(signal); - return; - }//Testing how DICT behave if read of file 1 fails (Bug#28770) - case FsConnectRecord::READ_TAB_FILE2: - jam(); - readTableConf(signal ,fsPtr); - break; - default: - jamLine((fsPtr.p->fsState & 0xFFF)); - ndbrequire(false); - break; - }//switch -}//execFSREADCONF() - -/* ---------------------------------------------------------------- */ -// A read file was refused. -/* ---------------------------------------------------------------- */ -void Dbdict::execFSREADREF(Signal* signal) -{ - jamEntry(); - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.getPtr(fsPtr, fsRef->userPointer); - switch (fsPtr.p->fsState) { - case FsConnectRecord::READ_SCHEMA1: - jam(); - readSchemaRef(signal, fsPtr); - return; - case FsConnectRecord::READ_TAB_FILE1: - jam(); - readTableRef(signal, fsPtr); - return; - default: - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system read failed during FsConnectRecord state %d", (Uint32)fsPtr.p->fsState); - fsRefError(signal,__LINE__,msg); - } -}//execFSREADREF() - -/* ---------------------------------------------------------------- */ -// A file was successfully written. -/* ---------------------------------------------------------------- */ -void Dbdict::execFSWRITECONF(Signal* signal) -{ - FsConf * const fsConf = (FsConf *)&signal->theData[0]; - FsConnectRecordPtr fsPtr; - jamEntry(); - c_fsConnectRecordPool.getPtr(fsPtr, fsConf->userPointer); - switch (fsPtr.p->fsState) { - case FsConnectRecord::WRITE_TAB_FILE: - writeTableConf(signal, fsPtr); - break; - case FsConnectRecord::WRITE_SCHEMA: - jam(); - writeSchemaConf(signal, fsPtr); - break; - default: - jamLine((fsPtr.p->fsState & 0xFFF)); - ndbrequire(false); - break; - }//switch -}//execFSWRITECONF() - - -/* ---------------------------------------------------------------- */ -// Routines to handle Read/Write of Table Files -/* ---------------------------------------------------------------- */ -void -Dbdict::writeTableFile(Signal* signal, Uint32 tableId, - SegmentedSectionPtr tabInfoPtr, Callback* callback){ - - ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE); - - Uint32 pages = WORDS2PAGES(tabInfoPtr.sz); - c_writeTableRecord.no_of_words = tabInfoPtr.sz; - c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK; - c_writeTableRecord.m_callback = * callback; - - c_writeTableRecord.pageId = 0; - ndbrequire(pages == 1); - - PageRecordPtr pageRecPtr; - c_pageRecordArray.getPtr(pageRecPtr, c_writeTableRecord.pageId); - copy(&pageRecPtr.p->word[ZPAGE_HEADER_SIZE], tabInfoPtr); - - memset(&pageRecPtr.p->word[0], 0, 4 * ZPAGE_HEADER_SIZE); - pageRecPtr.p->word[ZPOS_CHECKSUM] = - computeChecksum(&pageRecPtr.p->word[0], - pages * ZSIZE_OF_PAGES_IN_WORDS); - - startWriteTableFile(signal, tableId); -} - -void Dbdict::startWriteTableFile(Signal* signal, Uint32 tableId) -{ - FsConnectRecordPtr fsPtr; - c_writeTableRecord.tableId = tableId; - c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord()); - fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE; - openTableFile(signal, 0, fsPtr.i, tableId, true); - c_writeTableRecord.noOfTableFilesHandled = 0; -}//Dbdict::startWriteTableFile() - -void Dbdict::openTableFile(Signal* signal, - Uint32 fileNo, - Uint32 fsConPtr, - Uint32 tableId, - bool writeFlag) -{ - FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0]; - - fsOpenReq->userReference = reference(); - fsOpenReq->userPointer = fsConPtr; - if (writeFlag) { - jam(); - fsOpenReq->fileFlags = - FsOpenReq::OM_WRITEONLY | - FsOpenReq::OM_TRUNCATE | - FsOpenReq::OM_CREATE | - FsOpenReq::OM_SYNC; - } else { - jam(); - fsOpenReq->fileFlags = FsOpenReq::OM_READONLY; - }//if - fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes - FsOpenReq::setVersion(fsOpenReq->fileNumber, 1); - FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_TABLELIST); - FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1)); - FsOpenReq::v1_setTable(fsOpenReq->fileNumber, tableId); - FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1); - FsOpenReq::v1_setS(fsOpenReq->fileNumber, 0); - FsOpenReq::v1_setP(fsOpenReq->fileNumber, 255); -/* ---------------------------------------------------------------- */ -// File name : D1/DBDICT/T0/S1.TableList -// D1 means Disk 1 (set by fileNo + 1) -// T0 means table id = 0 -// S1 means tableVersion 1 -// TableList indicates that this is a file for a table description. -/* ---------------------------------------------------------------- */ - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); -}//openTableFile() - -void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) -{ - FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0]; - - fsRWReq->filePointer = filePtr; - fsRWReq->userReference = reference(); - fsRWReq->userPointer = fsConPtr; - fsRWReq->operationFlag = 0; // Initialise before bit changes - FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1); - FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, - FsReadWriteReq::fsFormatArrayOfPages); - fsRWReq->varIndex = ZBAT_TABLE_FILE; - fsRWReq->numberOfPages = WORDS2PAGES(c_writeTableRecord.no_of_words); - fsRWReq->data.arrayOfPages.varIndex = c_writeTableRecord.pageId; - fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0 - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA); -}//writeTableFile() - -void Dbdict::writeTableConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_TAB_FILE; - closeFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; -}//Dbdict::writeTableConf() - -void Dbdict::closeWriteTableConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - c_writeTableRecord.noOfTableFilesHandled++; - if (c_writeTableRecord.noOfTableFilesHandled < 2) { - jam(); - fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_TAB_FILE; - openTableFile(signal, 1, fsPtr.i, c_writeTableRecord.tableId, true); - return; - } - ndbrequire(c_writeTableRecord.noOfTableFilesHandled == 2); - c_fsConnectRecordPool.release(fsPtr); - WriteTableRecord::TableWriteState state = c_writeTableRecord.tableWriteState; - c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE; - switch (state) { - case WriteTableRecord::IDLE: - case WriteTableRecord::WRITE_ADD_TABLE_MASTER : - case WriteTableRecord::WRITE_ADD_TABLE_SLAVE : - case WriteTableRecord::WRITE_RESTART_FROM_MASTER : - case WriteTableRecord::WRITE_RESTART_FROM_OWN : - ndbrequire(false); - break; - case WriteTableRecord::TWR_CALLBACK: - jam(); - execute(signal, c_writeTableRecord.m_callback, 0); - return; - } - ndbrequire(false); -}//Dbdict::closeWriteTableConf() - -void Dbdict::startReadTableFile(Signal* signal, Uint32 tableId) -{ - //globalSignalLoggers.log(number(), "startReadTableFile"); - ndbrequire(!c_readTableRecord.inUse); - - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord()); - c_readTableRecord.inUse = true; - c_readTableRecord.tableId = tableId; - fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE1; - openTableFile(signal, 0, fsPtr.i, tableId, false); -}//Dbdict::startReadTableFile() - -void Dbdict::openReadTableRef(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2; - openTableFile(signal, 1, fsPtr.i, c_readTableRecord.tableId, false); - return; -}//Dbdict::openReadTableConf() - -void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) -{ - FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0]; - - fsRWReq->filePointer = filePtr; - fsRWReq->userReference = reference(); - fsRWReq->userPointer = fsConPtr; - fsRWReq->operationFlag = 0; // Initialise before bit changes - FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0); - FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, - FsReadWriteReq::fsFormatArrayOfPages); - fsRWReq->varIndex = ZBAT_TABLE_FILE; - fsRWReq->numberOfPages = WORDS2PAGES(c_readTableRecord.no_of_words); - fsRWReq->data.arrayOfPages.varIndex = c_readTableRecord.pageId; - fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0 - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA); -}//readTableFile() - -void Dbdict::readTableConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - /* ---------------------------------------------------------------- */ - // Verify the data read from disk - /* ---------------------------------------------------------------- */ - bool crashInd; - if (fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1) { - jam(); - crashInd = false; - } else { - jam(); - crashInd = true; - }//if - - PageRecordPtr tmpPagePtr; - c_pageRecordArray.getPtr(tmpPagePtr, c_readTableRecord.pageId); - Uint32 sz = - WORDS2PAGES(c_readTableRecord.no_of_words)*ZSIZE_OF_PAGES_IN_WORDS; - Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz); - - ndbrequire((chk == 0) || !crashInd); - if(chk != 0){ - jam(); - ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_TAB_FILE1); - readTableRef(signal, fsPtr); - return; - }//if - - fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_TAB_FILE; - closeFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; -}//Dbdict::readTableConf() - -void Dbdict::readTableRef(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - /** - * First close corrupt file - */ - fsPtr.p->fsState = FsConnectRecord::OPEN_READ_TAB_FILE2; - closeFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; -}//Dbdict::readTableRef() - -void Dbdict::closeReadTableConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - c_fsConnectRecordPool.release(fsPtr); - c_readTableRecord.inUse = false; - - execute(signal, c_readTableRecord.m_callback, 0); - return; -}//Dbdict::closeReadTableConf() - -/* ---------------------------------------------------------------- */ -// Routines to handle Read/Write of Schema Files -/* ---------------------------------------------------------------- */ -void -Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, - SchemaFile::TableEntry* te, Callback* callback, - bool savetodisk){ - jam(); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId); - - SchemaFile::TableState newState = - (SchemaFile::TableState)te->m_tableState; - SchemaFile::TableState oldState = - (SchemaFile::TableState)tableEntry->m_tableState; - - Uint32 newVersion = te->m_tableVersion; - Uint32 oldVersion = tableEntry->m_tableVersion; - - bool ok = false; - switch(newState){ - case SchemaFile::ADD_STARTED: - jam(); - ok = true; - ndbrequire(create_obj_inc_schema_version(oldVersion) == newVersion); - ndbrequire(oldState == SchemaFile::INIT || - oldState == SchemaFile::DROP_TABLE_COMMITTED); - break; - case SchemaFile::TABLE_ADD_COMMITTED: - jam(); - ok = true; - ndbrequire(newVersion == oldVersion); - ndbrequire(oldState == SchemaFile::ADD_STARTED || - oldState == SchemaFile::DROP_TABLE_STARTED); - break; - case SchemaFile::ALTER_TABLE_COMMITTED: - jam(); - ok = true; - ndbrequire(alter_obj_inc_schema_version(oldVersion) == newVersion); - ndbrequire(oldState == SchemaFile::TABLE_ADD_COMMITTED || - oldState == SchemaFile::ALTER_TABLE_COMMITTED); - break; - case SchemaFile::DROP_TABLE_STARTED: - jam(); - case SchemaFile::DROP_TABLE_COMMITTED: - jam(); - ok = true; - break; - case SchemaFile::TEMPORARY_TABLE_COMMITTED: - jam(); - ndbrequire(oldState == SchemaFile::ADD_STARTED || - oldState == SchemaFile::TEMPORARY_TABLE_COMMITTED); - ok = true; - break; - case SchemaFile::INIT: - jam(); - ok = true; - ndbrequire((oldState == SchemaFile::ADD_STARTED)); - }//if - ndbrequire(ok); - - * tableEntry = * te; - computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES); - - if (savetodisk) - { - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.newFile = false; - c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES; - c_writeSchemaRecord.noOfPages = 1; - c_writeSchemaRecord.m_callback = * callback; - - startWriteSchemaFile(signal); - } - else - { - execute(signal, *callback, 0); - } -} - -void Dbdict::startWriteSchemaFile(Signal* signal) -{ - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord()); - fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA; - openSchemaFile(signal, 0, fsPtr.i, true, c_writeSchemaRecord.newFile); - c_writeSchemaRecord.noOfSchemaFilesHandled = 0; -}//Dbdict::startWriteSchemaFile() - -void Dbdict::openSchemaFile(Signal* signal, - Uint32 fileNo, - Uint32 fsConPtr, - bool writeFlag, - bool newFile) -{ - FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0]; - fsOpenReq->userReference = reference(); - fsOpenReq->userPointer = fsConPtr; - if (writeFlag) { - jam(); - fsOpenReq->fileFlags = - FsOpenReq::OM_WRITEONLY | - FsOpenReq::OM_SYNC; - if (newFile) - fsOpenReq->fileFlags |= - FsOpenReq::OM_TRUNCATE | - FsOpenReq::OM_CREATE; - } else { - jam(); - fsOpenReq->fileFlags = FsOpenReq::OM_READONLY; - }//if - fsOpenReq->fileNumber[3] = 0; // Initialise before byte changes - FsOpenReq::setVersion(fsOpenReq->fileNumber, 1); - FsOpenReq::setSuffix(fsOpenReq->fileNumber, FsOpenReq::S_SCHEMALOG); - FsOpenReq::v1_setDisk(fsOpenReq->fileNumber, (fileNo + 1)); - FsOpenReq::v1_setTable(fsOpenReq->fileNumber, (Uint32)-1); - FsOpenReq::v1_setFragment(fsOpenReq->fileNumber, (Uint32)-1); - FsOpenReq::v1_setS(fsOpenReq->fileNumber, (Uint32)-1); - FsOpenReq::v1_setP(fsOpenReq->fileNumber, 0); -/* ---------------------------------------------------------------- */ -// File name : D1/DBDICT/P0.SchemaLog -// D1 means Disk 1 (set by fileNo + 1). Writes to both D1 and D2 -// SchemaLog indicates that this is a file giving a list of current tables. -/* ---------------------------------------------------------------- */ - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); -}//openSchemaFile() - -void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) -{ - FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0]; - - // check write record - WriteSchemaRecord & wr = c_writeSchemaRecord; - ndbrequire(wr.pageId == (wr.pageId != 0) * NDB_SF_MAX_PAGES); - ndbrequire(wr.noOfPages != 0); - ndbrequire(wr.firstPage + wr.noOfPages <= NDB_SF_MAX_PAGES); - - fsRWReq->filePointer = filePtr; - fsRWReq->userReference = reference(); - fsRWReq->userPointer = fsConPtr; - fsRWReq->operationFlag = 0; // Initialise before bit changes - FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1); - FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, - FsReadWriteReq::fsFormatArrayOfPages); - fsRWReq->varIndex = ZBAT_SCHEMA_FILE; - fsRWReq->numberOfPages = wr.noOfPages; - // Write from memory page - fsRWReq->data.arrayOfPages.varIndex = wr.pageId + wr.firstPage; - fsRWReq->data.arrayOfPages.fileOffset = wr.firstPage; - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA); -}//writeSchemaFile() - -void Dbdict::writeSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - fsPtr.p->fsState = FsConnectRecord::CLOSE_WRITE_SCHEMA; - closeFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; -}//Dbdict::writeSchemaConf() - -void Dbdict::closeFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) -{ - FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0]; - fsCloseReq->filePointer = filePtr; - fsCloseReq->userReference = reference(); - fsCloseReq->userPointer = fsConPtr; - FsCloseReq::setRemoveFileFlag(fsCloseReq->fileFlag, false); - sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA); - return; -}//closeFile() - -void Dbdict::closeWriteSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - c_writeSchemaRecord.noOfSchemaFilesHandled++; - if (c_writeSchemaRecord.noOfSchemaFilesHandled < 2) { - jam(); - fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA; - openSchemaFile(signal, 1, fsPtr.i, true, c_writeSchemaRecord.newFile); - return; - } - ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2); - - c_fsConnectRecordPool.release(fsPtr); - - c_writeSchemaRecord.inUse = false; - execute(signal, c_writeSchemaRecord.m_callback, 0); - return; -}//Dbdict::closeWriteSchemaConf() - -void Dbdict::startReadSchemaFile(Signal* signal) -{ - //globalSignalLoggers.log(number(), "startReadSchemaFile"); - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord()); - fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA1; - openSchemaFile(signal, 0, fsPtr.i, false, false); -}//Dbdict::startReadSchemaFile() - -void Dbdict::openReadSchemaRef(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2; - openSchemaFile(signal, 1, fsPtr.i, false, false); -}//Dbdict::openReadSchemaRef() - -void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr) -{ - FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0]; - - // check read record - ReadSchemaRecord & rr = c_readSchemaRecord; - ndbrequire(rr.pageId == (rr.pageId != 0) * NDB_SF_MAX_PAGES); - ndbrequire(rr.noOfPages != 0); - ndbrequire(rr.firstPage + rr.noOfPages <= NDB_SF_MAX_PAGES); - - fsRWReq->filePointer = filePtr; - fsRWReq->userReference = reference(); - fsRWReq->userPointer = fsConPtr; - fsRWReq->operationFlag = 0; // Initialise before bit changes - FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0); - FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag, - FsReadWriteReq::fsFormatArrayOfPages); - fsRWReq->varIndex = ZBAT_SCHEMA_FILE; - fsRWReq->numberOfPages = rr.noOfPages; - fsRWReq->data.arrayOfPages.varIndex = rr.pageId + rr.firstPage; - fsRWReq->data.arrayOfPages.fileOffset = rr.firstPage; - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA); -}//readSchemaFile() - -void Dbdict::readSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ -/* ---------------------------------------------------------------- */ -// Verify the data read from disk -/* ---------------------------------------------------------------- */ - bool crashInd; - if (fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1) { - jam(); - crashInd = false; - } else { - jam(); - crashInd = true; - }//if - - ReadSchemaRecord & rr = c_readSchemaRecord; - XSchemaFile * xsf = &c_schemaFile[rr.pageId != 0]; - - if (rr.schemaReadState == ReadSchemaRecord::INITIAL_READ_HEAD) { - jam(); - ndbrequire(rr.firstPage == 0); - SchemaFile * sf = &xsf->schemaPage[0]; - Uint32 noOfPages; - if (sf->NdbVersion < NDB_SF_VERSION_5_0_6) { - jam(); - const Uint32 pageSize_old = 32 * 1024; - noOfPages = pageSize_old / NDB_SF_PAGE_SIZE - 1; - } else { - noOfPages = sf->FileSize / NDB_SF_PAGE_SIZE - 1; - } - rr.schemaReadState = ReadSchemaRecord::INITIAL_READ; - if (noOfPages != 0) { - rr.firstPage = 1; - rr.noOfPages = noOfPages; - readSchemaFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; - } - } - - SchemaFile * sf0 = &xsf->schemaPage[0]; - xsf->noOfPages = sf0->FileSize / NDB_SF_PAGE_SIZE; - - if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6 && - ! convertSchemaFileTo_5_0_6(xsf)) { - jam(); - ndbrequire(! crashInd); - ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1); - readSchemaRef(signal, fsPtr); - return; - } - - for (Uint32 n = 0; n < xsf->noOfPages; n++) { - SchemaFile * sf = &xsf->schemaPage[n]; - bool ok = false; - const char *reason; - if (memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) != 0) - { jam(); reason = "magic code"; } - else if (sf->FileSize == 0) - { jam(); reason = "file size == 0"; } - else if (sf->FileSize % NDB_SF_PAGE_SIZE != 0) - { jam(); reason = "invalid size multiple"; } - else if (sf->FileSize != sf0->FileSize) - { jam(); reason = "invalid size"; } - else if (sf->PageNumber != n) - { jam(); reason = "invalid page number"; } - else if (computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) != 0) - { jam(); reason = "invalid checksum"; } - else - ok = true; - - if (!ok) - { - char reason_msg[128]; - snprintf(reason_msg, sizeof(reason_msg), - "schema file corrupt, page %u (%s, " - "sz=%u sz0=%u pn=%u)", - n, reason, sf->FileSize, sf0->FileSize, sf->PageNumber); - if (crashInd) - progError(__LINE__, NDBD_EXIT_SR_SCHEMAFILE, reason_msg); - ndbrequireErr(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1, - NDBD_EXIT_SR_SCHEMAFILE); - jam(); - infoEvent("primary %s, trying backup", reason_msg); - readSchemaRef(signal, fsPtr); - return; - } - } - - fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_SCHEMA; - closeFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; -}//Dbdict::readSchemaConf() - -void Dbdict::readSchemaRef(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - /** - * First close corrupt file - */ - fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2; - closeFile(signal, fsPtr.p->filePtr, fsPtr.i); - return; -} - -void Dbdict::closeReadSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr) -{ - c_fsConnectRecordPool.release(fsPtr); - ReadSchemaRecord::SchemaReadState state = c_readSchemaRecord.schemaReadState; - c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE; - - switch(state) { - case ReadSchemaRecord::INITIAL_READ : - jam(); - { - // write back both copies - - ndbrequire(c_writeSchemaRecord.inUse == false); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0 ]; - Uint32 noOfPages = - (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1) / - NDB_SF_PAGE_ENTRIES; - resizeSchemaFile(xsf, noOfPages); - - c_writeSchemaRecord.inUse = true; - c_writeSchemaRecord.pageId = c_schemaRecord.oldSchemaPage; - c_writeSchemaRecord.newFile = true; - c_writeSchemaRecord.firstPage = 0; - c_writeSchemaRecord.noOfPages = xsf->noOfPages; - - c_writeSchemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::initSchemaFile_conf); - - startWriteSchemaFile(signal); - } - break; - - default : - ndbrequire(false); - break; - - }//switch -}//Dbdict::closeReadSchemaConf() - -bool -Dbdict::convertSchemaFileTo_5_0_6(XSchemaFile * xsf) -{ - const Uint32 pageSize_old = 32 * 1024; - Uint32 page_old[pageSize_old >> 2]; - SchemaFile * sf_old = (SchemaFile *)page_old; - - if (xsf->noOfPages * NDB_SF_PAGE_SIZE != pageSize_old) - return false; - SchemaFile * sf0 = &xsf->schemaPage[0]; - memcpy(sf_old, sf0, pageSize_old); - - // init max number new pages needed - xsf->noOfPages = (sf_old->NoOfTableEntries + NDB_SF_PAGE_ENTRIES - 1) / - NDB_SF_PAGE_ENTRIES; - initSchemaFile(xsf, 0, xsf->noOfPages, true); - - Uint32 noOfPages = 1; - Uint32 n, i, j; - for (n = 0; n < xsf->noOfPages; n++) { - jam(); - for (i = 0; i < NDB_SF_PAGE_ENTRIES; i++) { - j = n * NDB_SF_PAGE_ENTRIES + i; - if (j >= sf_old->NoOfTableEntries) - continue; - const SchemaFile::TableEntry_old & te_old = sf_old->TableEntries_old[j]; - if (te_old.m_tableState == SchemaFile::INIT || - te_old.m_tableState == SchemaFile::DROP_TABLE_COMMITTED || - te_old.m_noOfPages == 0) - continue; - SchemaFile * sf = &xsf->schemaPage[n]; - SchemaFile::TableEntry & te = sf->TableEntries[i]; - te.m_tableState = te_old.m_tableState; - te.m_tableVersion = te_old.m_tableVersion; - te.m_tableType = te_old.m_tableType; - te.m_info_words = te_old.m_noOfPages * ZSIZE_OF_PAGES_IN_WORDS - - ZPAGE_HEADER_SIZE; - te.m_gcp = te_old.m_gcp; - if (noOfPages < n) - noOfPages = n; - } - } - xsf->noOfPages = noOfPages; - initSchemaFile(xsf, 0, xsf->noOfPages, false); - - return true; -} - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: INITIALISATION MODULE ------------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains initialisation of data at start/restart. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -Dbdict::Dbdict(Block_context& ctx): - SimulatedBlock(DBDICT, ctx), - c_attributeRecordHash(c_attributeRecordPool), - c_file_hash(c_file_pool), - c_filegroup_hash(c_filegroup_pool), - c_obj_hash(c_obj_pool), - c_opCreateTable(c_opRecordPool), - c_opDropTable(c_opRecordPool), - c_opCreateIndex(c_opRecordPool), - c_opDropIndex(c_opRecordPool), - c_opAlterIndex(c_opRecordPool), - c_opBuildIndex(c_opRecordPool), - c_opCreateEvent(c_opRecordPool), - c_opSubEvent(c_opRecordPool), - c_opDropEvent(c_opRecordPool), - c_opSignalUtil(c_opRecordPool), - c_opCreateTrigger(c_opRecordPool), - c_opDropTrigger(c_opRecordPool), - c_opAlterTrigger(c_opRecordPool), - c_schemaOp(c_opRecordPool), - c_Trans(c_opRecordPool), - c_opCreateObj(c_schemaOp), - c_opDropObj(c_schemaOp), - c_opRecordSequence(0), - c_dictLockQueue(c_dictLockPool), - c_dictLockPoll(false) -{ - BLOCK_CONSTRUCTOR(Dbdict); - - // Transit signals - addRecSignal(GSN_DUMP_STATE_ORD, &Dbdict::execDUMP_STATE_ORD); - addRecSignal(GSN_GET_TABINFOREQ, &Dbdict::execGET_TABINFOREQ); - addRecSignal(GSN_GET_TABLEID_REQ, &Dbdict::execGET_TABLEDID_REQ); - addRecSignal(GSN_GET_TABINFO_CONF, &Dbdict::execGET_TABINFO_CONF); - addRecSignal(GSN_CONTINUEB, &Dbdict::execCONTINUEB); - - addRecSignal(GSN_CREATE_TABLE_REQ, &Dbdict::execCREATE_TABLE_REQ); - addRecSignal(GSN_CREATE_TAB_REQ, &Dbdict::execCREATE_TAB_REQ); - addRecSignal(GSN_CREATE_TAB_REF, &Dbdict::execCREATE_TAB_REF); - addRecSignal(GSN_CREATE_TAB_CONF, &Dbdict::execCREATE_TAB_CONF); - addRecSignal(GSN_CREATE_FRAGMENTATION_REF, &Dbdict::execCREATE_FRAGMENTATION_REF); - addRecSignal(GSN_CREATE_FRAGMENTATION_CONF, &Dbdict::execCREATE_FRAGMENTATION_CONF); - addRecSignal(GSN_DIADDTABCONF, &Dbdict::execDIADDTABCONF); - addRecSignal(GSN_DIADDTABREF, &Dbdict::execDIADDTABREF); - addRecSignal(GSN_ADD_FRAGREQ, &Dbdict::execADD_FRAGREQ); - addRecSignal(GSN_TAB_COMMITCONF, &Dbdict::execTAB_COMMITCONF); - addRecSignal(GSN_TAB_COMMITREF, &Dbdict::execTAB_COMMITREF); - addRecSignal(GSN_ALTER_TABLE_REQ, &Dbdict::execALTER_TABLE_REQ); - addRecSignal(GSN_ALTER_TAB_REQ, &Dbdict::execALTER_TAB_REQ); - addRecSignal(GSN_ALTER_TAB_REF, &Dbdict::execALTER_TAB_REF); - addRecSignal(GSN_ALTER_TAB_CONF, &Dbdict::execALTER_TAB_CONF); - - // Index signals - addRecSignal(GSN_CREATE_INDX_REQ, &Dbdict::execCREATE_INDX_REQ); - addRecSignal(GSN_CREATE_INDX_CONF, &Dbdict::execCREATE_INDX_CONF); - addRecSignal(GSN_CREATE_INDX_REF, &Dbdict::execCREATE_INDX_REF); - - addRecSignal(GSN_ALTER_INDX_REQ, &Dbdict::execALTER_INDX_REQ); - addRecSignal(GSN_ALTER_INDX_CONF, &Dbdict::execALTER_INDX_CONF); - addRecSignal(GSN_ALTER_INDX_REF, &Dbdict::execALTER_INDX_REF); - - addRecSignal(GSN_CREATE_TABLE_CONF, &Dbdict::execCREATE_TABLE_CONF); - addRecSignal(GSN_CREATE_TABLE_REF, &Dbdict::execCREATE_TABLE_REF); - - addRecSignal(GSN_DROP_INDX_REQ, &Dbdict::execDROP_INDX_REQ); - addRecSignal(GSN_DROP_INDX_CONF, &Dbdict::execDROP_INDX_CONF); - addRecSignal(GSN_DROP_INDX_REF, &Dbdict::execDROP_INDX_REF); - - addRecSignal(GSN_DROP_TABLE_CONF, &Dbdict::execDROP_TABLE_CONF); - addRecSignal(GSN_DROP_TABLE_REF, &Dbdict::execDROP_TABLE_REF); - - addRecSignal(GSN_BUILDINDXREQ, &Dbdict::execBUILDINDXREQ); - addRecSignal(GSN_BUILDINDXCONF, &Dbdict::execBUILDINDXCONF); - addRecSignal(GSN_BUILDINDXREF, &Dbdict::execBUILDINDXREF); - - // Util signals - addRecSignal(GSN_UTIL_PREPARE_CONF, &Dbdict::execUTIL_PREPARE_CONF); - addRecSignal(GSN_UTIL_PREPARE_REF, &Dbdict::execUTIL_PREPARE_REF); - - addRecSignal(GSN_UTIL_EXECUTE_CONF, &Dbdict::execUTIL_EXECUTE_CONF); - addRecSignal(GSN_UTIL_EXECUTE_REF, &Dbdict::execUTIL_EXECUTE_REF); - - addRecSignal(GSN_UTIL_RELEASE_CONF, &Dbdict::execUTIL_RELEASE_CONF); - addRecSignal(GSN_UTIL_RELEASE_REF, &Dbdict::execUTIL_RELEASE_REF); - - // Event signals - addRecSignal(GSN_CREATE_EVNT_REQ, &Dbdict::execCREATE_EVNT_REQ); - addRecSignal(GSN_CREATE_EVNT_CONF, &Dbdict::execCREATE_EVNT_CONF); - addRecSignal(GSN_CREATE_EVNT_REF, &Dbdict::execCREATE_EVNT_REF); - - addRecSignal(GSN_CREATE_SUBID_CONF, &Dbdict::execCREATE_SUBID_CONF); - addRecSignal(GSN_CREATE_SUBID_REF, &Dbdict::execCREATE_SUBID_REF); - - addRecSignal(GSN_SUB_CREATE_CONF, &Dbdict::execSUB_CREATE_CONF); - addRecSignal(GSN_SUB_CREATE_REF, &Dbdict::execSUB_CREATE_REF); - - addRecSignal(GSN_SUB_START_REQ, &Dbdict::execSUB_START_REQ); - addRecSignal(GSN_SUB_START_CONF, &Dbdict::execSUB_START_CONF); - addRecSignal(GSN_SUB_START_REF, &Dbdict::execSUB_START_REF); - - addRecSignal(GSN_SUB_STOP_REQ, &Dbdict::execSUB_STOP_REQ); - addRecSignal(GSN_SUB_STOP_CONF, &Dbdict::execSUB_STOP_CONF); - addRecSignal(GSN_SUB_STOP_REF, &Dbdict::execSUB_STOP_REF); - - addRecSignal(GSN_DROP_EVNT_REQ, &Dbdict::execDROP_EVNT_REQ); - - addRecSignal(GSN_SUB_REMOVE_REQ, &Dbdict::execSUB_REMOVE_REQ); - addRecSignal(GSN_SUB_REMOVE_CONF, &Dbdict::execSUB_REMOVE_CONF); - addRecSignal(GSN_SUB_REMOVE_REF, &Dbdict::execSUB_REMOVE_REF); - - // Trigger signals - addRecSignal(GSN_CREATE_TRIG_REQ, &Dbdict::execCREATE_TRIG_REQ); - addRecSignal(GSN_CREATE_TRIG_CONF, &Dbdict::execCREATE_TRIG_CONF); - addRecSignal(GSN_CREATE_TRIG_REF, &Dbdict::execCREATE_TRIG_REF); - addRecSignal(GSN_ALTER_TRIG_REQ, &Dbdict::execALTER_TRIG_REQ); - addRecSignal(GSN_ALTER_TRIG_CONF, &Dbdict::execALTER_TRIG_CONF); - addRecSignal(GSN_ALTER_TRIG_REF, &Dbdict::execALTER_TRIG_REF); - addRecSignal(GSN_DROP_TRIG_REQ, &Dbdict::execDROP_TRIG_REQ); - addRecSignal(GSN_DROP_TRIG_CONF, &Dbdict::execDROP_TRIG_CONF); - addRecSignal(GSN_DROP_TRIG_REF, &Dbdict::execDROP_TRIG_REF); - - // Received signals - addRecSignal(GSN_HOT_SPAREREP, &Dbdict::execHOT_SPAREREP); - addRecSignal(GSN_GET_SCHEMA_INFOREQ, &Dbdict::execGET_SCHEMA_INFOREQ); - addRecSignal(GSN_SCHEMA_INFO, &Dbdict::execSCHEMA_INFO); - addRecSignal(GSN_SCHEMA_INFOCONF, &Dbdict::execSCHEMA_INFOCONF); - addRecSignal(GSN_DICTSTARTREQ, &Dbdict::execDICTSTARTREQ); - addRecSignal(GSN_READ_NODESCONF, &Dbdict::execREAD_NODESCONF); - addRecSignal(GSN_FSOPENCONF, &Dbdict::execFSOPENCONF); - addRecSignal(GSN_FSOPENREF, &Dbdict::execFSOPENREF, true); - addRecSignal(GSN_FSCLOSECONF, &Dbdict::execFSCLOSECONF); - addRecSignal(GSN_FSWRITECONF, &Dbdict::execFSWRITECONF); - addRecSignal(GSN_FSREADCONF, &Dbdict::execFSREADCONF); - addRecSignal(GSN_FSREADREF, &Dbdict::execFSREADREF, true); - addRecSignal(GSN_LQHFRAGCONF, &Dbdict::execLQHFRAGCONF); - addRecSignal(GSN_LQHADDATTCONF, &Dbdict::execLQHADDATTCONF); - addRecSignal(GSN_LQHADDATTREF, &Dbdict::execLQHADDATTREF); - addRecSignal(GSN_LQHFRAGREF, &Dbdict::execLQHFRAGREF); - addRecSignal(GSN_NDB_STTOR, &Dbdict::execNDB_STTOR); - addRecSignal(GSN_READ_CONFIG_REQ, &Dbdict::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_STTOR, &Dbdict::execSTTOR); - addRecSignal(GSN_TC_SCHVERCONF, &Dbdict::execTC_SCHVERCONF); - addRecSignal(GSN_NODE_FAILREP, &Dbdict::execNODE_FAILREP); - addRecSignal(GSN_INCL_NODEREQ, &Dbdict::execINCL_NODEREQ); - addRecSignal(GSN_API_FAILREQ, &Dbdict::execAPI_FAILREQ); - - addRecSignal(GSN_WAIT_GCP_REF, &Dbdict::execWAIT_GCP_REF); - addRecSignal(GSN_WAIT_GCP_CONF, &Dbdict::execWAIT_GCP_CONF); - - addRecSignal(GSN_LIST_TABLES_REQ, &Dbdict::execLIST_TABLES_REQ); - - addRecSignal(GSN_DROP_TABLE_REQ, &Dbdict::execDROP_TABLE_REQ); - - addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdict::execPREP_DROP_TAB_REQ); - addRecSignal(GSN_PREP_DROP_TAB_REF, &Dbdict::execPREP_DROP_TAB_REF); - addRecSignal(GSN_PREP_DROP_TAB_CONF, &Dbdict::execPREP_DROP_TAB_CONF); - - addRecSignal(GSN_DROP_TAB_REQ, &Dbdict::execDROP_TAB_REQ); - addRecSignal(GSN_DROP_TAB_REF, &Dbdict::execDROP_TAB_REF); - addRecSignal(GSN_DROP_TAB_CONF, &Dbdict::execDROP_TAB_CONF); - - addRecSignal(GSN_CREATE_FILE_REQ, &Dbdict::execCREATE_FILE_REQ); - addRecSignal(GSN_CREATE_FILEGROUP_REQ, &Dbdict::execCREATE_FILEGROUP_REQ); - - addRecSignal(GSN_DROP_FILE_REQ, &Dbdict::execDROP_FILE_REQ); - addRecSignal(GSN_DROP_FILE_REF, &Dbdict::execDROP_FILE_REF); - addRecSignal(GSN_DROP_FILE_CONF, &Dbdict::execDROP_FILE_CONF); - - addRecSignal(GSN_DROP_FILEGROUP_REQ, &Dbdict::execDROP_FILEGROUP_REQ); - addRecSignal(GSN_DROP_FILEGROUP_REF, &Dbdict::execDROP_FILEGROUP_REF); - addRecSignal(GSN_DROP_FILEGROUP_CONF, &Dbdict::execDROP_FILEGROUP_CONF); - - addRecSignal(GSN_CREATE_OBJ_REQ, &Dbdict::execCREATE_OBJ_REQ); - addRecSignal(GSN_CREATE_OBJ_REF, &Dbdict::execCREATE_OBJ_REF); - addRecSignal(GSN_CREATE_OBJ_CONF, &Dbdict::execCREATE_OBJ_CONF); - addRecSignal(GSN_DROP_OBJ_REQ, &Dbdict::execDROP_OBJ_REQ); - addRecSignal(GSN_DROP_OBJ_REF, &Dbdict::execDROP_OBJ_REF); - addRecSignal(GSN_DROP_OBJ_CONF, &Dbdict::execDROP_OBJ_CONF); - - addRecSignal(GSN_CREATE_FILE_REF, &Dbdict::execCREATE_FILE_REF); - addRecSignal(GSN_CREATE_FILE_CONF, &Dbdict::execCREATE_FILE_CONF); - addRecSignal(GSN_CREATE_FILEGROUP_REF, &Dbdict::execCREATE_FILEGROUP_REF); - addRecSignal(GSN_CREATE_FILEGROUP_CONF, &Dbdict::execCREATE_FILEGROUP_CONF); - - addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Dbdict::execBACKUP_FRAGMENT_REQ); - - addRecSignal(GSN_DICT_COMMIT_REQ, &Dbdict::execDICT_COMMIT_REQ); - addRecSignal(GSN_DICT_COMMIT_REF, &Dbdict::execDICT_COMMIT_REF); - addRecSignal(GSN_DICT_COMMIT_CONF, &Dbdict::execDICT_COMMIT_CONF); - - addRecSignal(GSN_DICT_ABORT_REQ, &Dbdict::execDICT_ABORT_REQ); - addRecSignal(GSN_DICT_ABORT_REF, &Dbdict::execDICT_ABORT_REF); - addRecSignal(GSN_DICT_ABORT_CONF, &Dbdict::execDICT_ABORT_CONF); - - addRecSignal(GSN_DICT_LOCK_REQ, &Dbdict::execDICT_LOCK_REQ); - addRecSignal(GSN_DICT_UNLOCK_ORD, &Dbdict::execDICT_UNLOCK_ORD); -}//Dbdict::Dbdict() - -Dbdict::~Dbdict() -{ -}//Dbdict::~Dbdict() - -BLOCK_FUNCTIONS(Dbdict) - -void Dbdict::initCommonData() -{ -/* ---------------------------------------------------------------- */ -// Initialise all common variables. -/* ---------------------------------------------------------------- */ - initRetrieveRecord(0, 0, 0); - initSchemaRecord(); - initRestartRecord(); - initSendSchemaRecord(); - initReadTableRecord(); - initWriteTableRecord(); - initReadSchemaRecord(); - initWriteSchemaRecord(); - - c_masterNodeId = ZNIL; - c_numberNode = 0; - c_noNodesFailed = 0; - c_failureNr = 0; - c_blockState = BS_IDLE; - c_packTable.m_state = PackTable::PTS_IDLE; - c_startPhase = 0; - c_restartType = 255; //Ensure not used restartType - c_tabinfoReceived = 0; - c_initialStart = false; - c_systemRestart = false; - c_initialNodeRestart = false; - c_nodeRestart = false; -}//Dbdict::initCommonData() - -void Dbdict::initRecords() -{ - initNodeRecords(); - initPageRecords(); - initTableRecords(); - initTriggerRecords(); -}//Dbdict::initRecords() - -void Dbdict::initSendSchemaRecord() -{ - c_sendSchemaRecord.noOfWords = (Uint32)-1; - c_sendSchemaRecord.pageId = RNIL; - c_sendSchemaRecord.noOfWordsCurrentlySent = 0; - c_sendSchemaRecord.noOfSignalsSentSinceDelay = 0; - c_sendSchemaRecord.inUse = false; - //c_sendSchemaRecord.sendSchemaState = SendSchemaRecord::IDLE; -}//initSendSchemaRecord() - -void Dbdict::initReadTableRecord() -{ - c_readTableRecord.no_of_words= 0; - c_readTableRecord.pageId = RNIL; - c_readTableRecord.tableId = ZNIL; - c_readTableRecord.inUse = false; -}//initReadTableRecord() - -void Dbdict::initWriteTableRecord() -{ - c_writeTableRecord.no_of_words= 0; - c_writeTableRecord.pageId = RNIL; - c_writeTableRecord.noOfTableFilesHandled = 3; - c_writeTableRecord.tableId = ZNIL; - c_writeTableRecord.tableWriteState = WriteTableRecord::IDLE; -}//initWriteTableRecord() - -void Dbdict::initReadSchemaRecord() -{ - c_readSchemaRecord.pageId = RNIL; - c_readSchemaRecord.schemaReadState = ReadSchemaRecord::IDLE; -}//initReadSchemaRecord() - -void Dbdict::initWriteSchemaRecord() -{ - c_writeSchemaRecord.inUse = false; - c_writeSchemaRecord.pageId = RNIL; - c_writeSchemaRecord.noOfSchemaFilesHandled = 3; -}//initWriteSchemaRecord() - -void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode) -{ - c_retrieveRecord.busyState = false; - c_retrieveRecord.blockRef = 0; - c_retrieveRecord.m_senderData = RNIL; - c_retrieveRecord.tableId = RNIL; - c_retrieveRecord.currentSent = 0; - c_retrieveRecord.retrievedNoOfPages = 0; - c_retrieveRecord.retrievedNoOfWords = 0; - c_retrieveRecord.m_useLongSig = false; -}//initRetrieveRecord() - -void Dbdict::initSchemaRecord() -{ - c_schemaRecord.schemaPage = RNIL; - c_schemaRecord.oldSchemaPage = RNIL; -}//Dbdict::initSchemaRecord() - -void Dbdict::initRestartRecord() -{ - c_restartRecord.gciToRestart = 0; - c_restartRecord.activeTable = ZNIL; - c_restartRecord.m_pass = 0; -}//Dbdict::initRestartRecord() - -void Dbdict::initNodeRecords() -{ - jam(); - for (unsigned i = 1; i < MAX_NODES; i++) { - NodeRecordPtr nodePtr; - c_nodes.getPtr(nodePtr, i); - nodePtr.p->hotSpare = false; - nodePtr.p->nodeState = NodeRecord::API_NODE; - }//for -}//Dbdict::initNodeRecords() - -void Dbdict::initPageRecords() -{ - c_retrieveRecord.retrievePage = ZMAX_PAGES_OF_TABLE_DEFINITION; - ndbrequire(ZNUMBER_OF_PAGES >= (ZMAX_PAGES_OF_TABLE_DEFINITION + 1)); - c_schemaRecord.schemaPage = 0; - c_schemaRecord.oldSchemaPage = NDB_SF_MAX_PAGES; -}//Dbdict::initPageRecords() - -void Dbdict::initTableRecords() -{ - TableRecordPtr tablePtr; - while (1) { - jam(); - refresh_watch_dog(); - c_tableRecordPool.seize(tablePtr); - if (tablePtr.i == RNIL) { - jam(); - break; - }//if - initialiseTableRecord(tablePtr); - }//while -}//Dbdict::initTableRecords() - -void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) -{ - new (tablePtr.p) TableRecord(); - tablePtr.p->activePage = RNIL; - tablePtr.p->filePtr[0] = RNIL; - tablePtr.p->filePtr[1] = RNIL; - tablePtr.p->firstPage = RNIL; - tablePtr.p->tableId = tablePtr.i; - tablePtr.p->tableVersion = (Uint32)-1; - tablePtr.p->tabState = TableRecord::NOT_DEFINED; - tablePtr.p->tabReturnState = TableRecord::TRS_IDLE; - tablePtr.p->fragmentType = DictTabInfo::AllNodesSmallTable; - tablePtr.p->gciTableCreated = 0; - tablePtr.p->noOfAttributes = ZNIL; - tablePtr.p->noOfNullAttr = 0; - tablePtr.p->fragmentCount = 0; - /* - tablePtr.p->lh3PageIndexBits = 0; - tablePtr.p->lh3DistrBits = 0; - tablePtr.p->lh3PageBits = 6; - */ - tablePtr.p->kValue = 6; - tablePtr.p->localKeyLen = 1; - tablePtr.p->maxLoadFactor = 80; - tablePtr.p->minLoadFactor = 70; - tablePtr.p->noOfPrimkey = 1; - tablePtr.p->tupKeyLength = 1; - tablePtr.p->maxRowsLow = 0; - tablePtr.p->maxRowsHigh = 0; - tablePtr.p->defaultNoPartFlag = true; - tablePtr.p->linearHashFlag = true; - tablePtr.p->m_bits = 0; - tablePtr.p->minRowsLow = 0; - tablePtr.p->minRowsHigh = 0; - tablePtr.p->singleUserMode = 0; - tablePtr.p->tableType = DictTabInfo::UserTable; - tablePtr.p->primaryTableId = RNIL; - // volatile elements - tablePtr.p->indexState = TableRecord::IS_UNDEFINED; - tablePtr.p->insertTriggerId = RNIL; - tablePtr.p->updateTriggerId = RNIL; - tablePtr.p->deleteTriggerId = RNIL; - tablePtr.p->customTriggerId = RNIL; - tablePtr.p->buildTriggerId = RNIL; - tablePtr.p->indexLocal = 0; -}//Dbdict::initialiseTableRecord() - -void Dbdict::initTriggerRecords() -{ - TriggerRecordPtr triggerPtr; - while (1) { - jam(); - refresh_watch_dog(); - c_triggerRecordPool.seize(triggerPtr); - if (triggerPtr.i == RNIL) { - jam(); - break; - }//if - initialiseTriggerRecord(triggerPtr); - }//while -} - -void Dbdict::initialiseTriggerRecord(TriggerRecordPtr triggerPtr) -{ - new (triggerPtr.p) TriggerRecord(); - triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED; - triggerPtr.p->triggerLocal = 0; - triggerPtr.p->triggerId = RNIL; - triggerPtr.p->tableId = RNIL; - triggerPtr.p->triggerType = (TriggerType::Value)~0; - triggerPtr.p->triggerActionTime = (TriggerActionTime::Value)~0; - triggerPtr.p->triggerEvent = (TriggerEvent::Value)~0; - triggerPtr.p->monitorReplicas = false; - triggerPtr.p->monitorAllAttributes = false; - triggerPtr.p->attributeMask.clear(); - triggerPtr.p->indexId = RNIL; -} - -Uint32 Dbdict::getFsConnRecord() -{ - FsConnectRecordPtr fsPtr; - c_fsConnectRecordPool.seize(fsPtr); - ndbrequire(fsPtr.i != RNIL); - fsPtr.p->filePtr = (Uint32)-1; - fsPtr.p->ownerPtr = RNIL; - fsPtr.p->fsState = FsConnectRecord::IDLE; - return fsPtr.i; -}//Dbdict::getFsConnRecord() - -/* - * Search schemafile for free entry. Its index is used as 'logical id' - * of new disk-stored object. - */ -Uint32 Dbdict::getFreeObjId(Uint32 minId) -{ - const XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - Uint32 noOfPages = xsf->noOfPages; - Uint32 n, i; - for (n = 0; n < noOfPages; n++) { - jam(); - const SchemaFile * sf = &xsf->schemaPage[n]; - for (i = 0; i < NDB_SF_PAGE_ENTRIES; i++) { - const SchemaFile::TableEntry& te = sf->TableEntries[i]; - if (te.m_tableState == (Uint32)SchemaFile::INIT || - te.m_tableState == (Uint32)SchemaFile::DROP_TABLE_COMMITTED) { - // minId is obsolete anyway - if (minId <= n * NDB_SF_PAGE_ENTRIES + i) - return n * NDB_SF_PAGE_ENTRIES + i; - } - } - } - return RNIL; -} - -Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId) -{ - Uint32 minId = (primaryTableId == RNIL ? 0 : primaryTableId + 1); - Uint32 i = getFreeObjId(minId); - if (i == RNIL) { - jam(); - return RNIL; - } - if (i >= c_tableRecordPool.getSize()) { - jam(); - return RNIL; - } - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, i); - ndbrequire(tablePtr.p->tabState == TableRecord::NOT_DEFINED); - initialiseTableRecord(tablePtr); - tablePtr.p->tabState = TableRecord::DEFINING; - return i; -} - -Uint32 Dbdict::getFreeTriggerRecord() -{ - const Uint32 size = c_triggerRecordPool.getSize(); - TriggerRecordPtr triggerPtr; - for (triggerPtr.i = 0; triggerPtr.i < size; triggerPtr.i++) { - jam(); - c_triggerRecordPool.getPtr(triggerPtr); - if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) { - jam(); - initialiseTriggerRecord(triggerPtr); - return triggerPtr.i; - } - } - return RNIL; -} - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: START/RESTART HANDLING ------------------------ */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains the code that is common for all */ -/* start/restart types. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -/* ---------------------------------------------------------------- */ -// This is sent as the first signal during start/restart. -/* ---------------------------------------------------------------- */ -void Dbdict::execSTTOR(Signal* signal) -{ - jamEntry(); - c_startPhase = signal->theData[1]; - switch (c_startPhase) { - case 1: - break; - case 3: - c_restartType = signal->theData[7]; /* valid if 3 */ - ndbrequire(c_restartType == NodeState::ST_INITIAL_START || - c_restartType == NodeState::ST_SYSTEM_RESTART || - c_restartType == NodeState::ST_INITIAL_NODE_RESTART || - c_restartType == NodeState::ST_NODE_RESTART); - break; - } - sendSTTORRY(signal); -}//execSTTOR() - -void Dbdict::sendSTTORRY(Signal* signal) -{ - signal->theData[0] = 0; /* garbage SIGNAL KEY */ - signal->theData[1] = 0; /* garbage SIGNAL VERSION NUMBER */ - signal->theData[2] = 0; /* garbage */ - signal->theData[3] = 1; /* first wanted start phase */ - signal->theData[4] = 3; /* get type of start */ - signal->theData[5] = ZNOMOREPHASES; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB); -} - -/* ---------------------------------------------------------------- */ -// We receive information about sizes of records. -/* ---------------------------------------------------------------- */ -void Dbdict::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - jamEntry(); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - Uint32 attributesize, tablerecSize; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, - &c_maxNoOfTriggers)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,&attributesize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &tablerecSize)); - - c_attributeRecordPool.setSize(attributesize); - c_attributeRecordHash.setSize(64); - c_fsConnectRecordPool.setSize(ZFS_CONNECT_SIZE); - c_nodes.setSize(MAX_NODES); - c_pageRecordArray.setSize(ZNUMBER_OF_PAGES); - c_schemaPageRecordArray.setSize(2 * NDB_SF_MAX_PAGES); - c_tableRecordPool.setSize(tablerecSize); - g_key_descriptor_pool.setSize(tablerecSize); - c_triggerRecordPool.setSize(c_maxNoOfTriggers); - - c_obj_pool.setSize(tablerecSize+c_maxNoOfTriggers); - c_obj_hash.setSize((tablerecSize+c_maxNoOfTriggers+1)/2); - - Pool_context pc; - pc.m_block = this; - - c_file_hash.setSize(16); - c_filegroup_hash.setSize(16); - - c_file_pool.init(RT_DBDICT_FILE, pc); - c_filegroup_pool.init(RT_DBDICT_FILEGROUP, pc); - - c_opRecordPool.setSize(256); // XXX need config params - c_opCreateTable.setSize(8); - c_opDropTable.setSize(8); - c_opCreateIndex.setSize(8); - c_opCreateEvent.setSize(2); - c_opSubEvent.setSize(2); - c_opDropEvent.setSize(2); - c_opSignalUtil.setSize(8); - c_opDropIndex.setSize(8); - c_opAlterIndex.setSize(8); - c_opBuildIndex.setSize(8); - c_opCreateTrigger.setSize(8); - c_opDropTrigger.setSize(8); - c_opAlterTrigger.setSize(8); - - c_dictLockPool.setSize(32); - - // Initialize schema file copies - c_schemaFile[0].schemaPage = - (SchemaFile*)c_schemaPageRecordArray.getPtr(0 * NDB_SF_MAX_PAGES); - c_schemaFile[0].noOfPages = 0; - c_schemaFile[1].schemaPage = - (SchemaFile*)c_schemaPageRecordArray.getPtr(1 * NDB_SF_MAX_PAGES); - c_schemaFile[1].noOfPages = 0; - - c_schemaOp.setSize(8); - //c_opDropObj.setSize(8); - c_Trans.setSize(8); - - Uint32 rps = 0; - rps += tablerecSize * (MAX_TAB_NAME_SIZE + MAX_FRM_DATA_SIZE); - rps += attributesize * (MAX_ATTR_NAME_SIZE + MAX_ATTR_DEFAULT_VALUE_SIZE); - rps += c_maxNoOfTriggers * MAX_TAB_NAME_SIZE; - rps += (10 + 10) * MAX_TAB_NAME_SIZE; - - Uint32 sm = 5; - ndb_mgm_get_int_parameter(p, CFG_DB_STRING_MEMORY, &sm); - if (sm == 0) - sm = 5; - - Uint32 sb = 0; - if (sm < 100) - { - sb = (rps * sm) / 100; - } - else - { - sb = sm; - } - - c_rope_pool.setSize(sb/28 + 100); - - // Initialize BAT for interface to file system - NewVARIABLE* bat = allocateBat(2); - bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0]; - bat[0].nrr = 2 * NDB_SF_MAX_PAGES; - bat[0].ClusterSize = NDB_SF_PAGE_SIZE; - bat[0].bits.q = NDB_SF_PAGE_SIZE_IN_WORDS_LOG2; - bat[0].bits.v = 5; // 32 bits per element - bat[1].WA = &c_pageRecordArray.getPtr(0)->word[0]; - bat[1].nrr = ZNUMBER_OF_PAGES; - bat[1].ClusterSize = ZSIZE_OF_PAGES_IN_WORDS * 4; - bat[1].bits.q = ZLOG_SIZE_OF_PAGES_IN_WORDS; // 2**13 = 8192 elements - bat[1].bits.v = 5; // 32 bits per element - - initCommonData(); - initRecords(); - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); - - { - Ptr ptr; - SLList objs(c_obj_pool); - while(objs.seize(ptr)) - new (ptr.p) DictObject(); - objs.release(); - } -}//execSIZEALT_REP() - -/* ---------------------------------------------------------------- */ -// Start phase signals sent by CNTR. We reply with NDB_STTORRY when -// we completed this phase. -/* ---------------------------------------------------------------- */ -void Dbdict::execNDB_STTOR(Signal* signal) -{ - jamEntry(); - c_startPhase = signal->theData[2]; - const Uint32 restartType = signal->theData[3]; - if (restartType == NodeState::ST_INITIAL_START) { - jam(); - c_initialStart = true; - } else if (restartType == NodeState::ST_SYSTEM_RESTART) { - jam(); - c_systemRestart = true; - } else if (restartType == NodeState::ST_INITIAL_NODE_RESTART) { - jam(); - c_initialNodeRestart = true; - } else if (restartType == NodeState::ST_NODE_RESTART) { - jam(); - c_nodeRestart = true; - } else { - ndbrequire(false); - }//if - switch (c_startPhase) { - case 1: - jam(); - initSchemaFile(signal); - break; - case 3: - jam(); - signal->theData[0] = reference(); - sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB); - break; - case 6: - jam(); - c_initialStart = false; - c_systemRestart = false; - c_initialNodeRestart = false; - c_nodeRestart = false; - sendNDB_STTORRY(signal); - break; - case 7: - // uses c_restartType - if(restartType == NodeState::ST_SYSTEM_RESTART && - c_masterNodeId == getOwnNodeId()){ - rebuildIndexes(signal, 0); - return; - } - sendNDB_STTORRY(signal); - break; - default: - jam(); - sendNDB_STTORRY(signal); - break; - }//switch -}//execNDB_STTOR() - -void Dbdict::sendNDB_STTORRY(Signal* signal) -{ - signal->theData[0] = reference(); - sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB); - return; -}//sendNDB_STTORRY() - -/* ---------------------------------------------------------------- */ -// We receive the information about which nodes that are up and down. -/* ---------------------------------------------------------------- */ -void Dbdict::execREAD_NODESCONF(Signal* signal) -{ - jamEntry(); - - ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0]; - c_numberNode = readNodes->noOfNodes; - c_masterNodeId = readNodes->masterNodeId; - - c_noNodesFailed = 0; - c_aliveNodes.clear(); - for (unsigned i = 1; i < MAX_NDB_NODES; i++) { - jam(); - NodeRecordPtr nodePtr; - c_nodes.getPtr(nodePtr, i); - - if (NodeBitmask::get(readNodes->allNodes, i)) { - jam(); - nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE; - if (NodeBitmask::get(readNodes->inactiveNodes, i)) { - jam(); - /**------------------------------------------------------------------- - * - * THIS NODE IS DEFINED IN THE CLUSTER BUT IS NOT ALIVE CURRENTLY. - * WE ADD THE NODE TO THE SET OF FAILED NODES AND ALSO SET THE - * BLOCKSTATE TO BUSY TO AVOID ADDING TABLES WHILE NOT ALL NODES ARE - * ALIVE. - *------------------------------------------------------------------*/ - nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD; - c_noNodesFailed++; - } else { - c_aliveNodes.set(i); - } - }//if - }//for - sendNDB_STTORRY(signal); -}//execREAD_NODESCONF() - -/* ---------------------------------------------------------------- */ -// HOT_SPAREREP informs DBDICT about which nodes that have become -// hot spare nodes. -/* ---------------------------------------------------------------- */ -void Dbdict::execHOT_SPAREREP(Signal* signal) -{ - Uint32 hotSpareNodes = 0; - jamEntry(); - HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0]; - for (unsigned i = 1; i < MAX_NDB_NODES; i++) { - if (NodeBitmask::get(hotSpare->theHotSpareNodes, i)) { - NodeRecordPtr nodePtr; - c_nodes.getPtr(nodePtr, i); - nodePtr.p->hotSpare = true; - hotSpareNodes++; - }//if - }//for - ndbrequire(hotSpareNodes == hotSpare->noHotSpareNodes); - c_noHotSpareNodes = hotSpareNodes; - return; -}//execHOT_SPAREREP() - -void Dbdict::initSchemaFile(Signal* signal) -{ - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - xsf->noOfPages = (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1) - / NDB_SF_PAGE_ENTRIES; - initSchemaFile(xsf, 0, xsf->noOfPages, true); - // init alt copy too for INR - XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0]; - oldxsf->noOfPages = xsf->noOfPages; - memcpy(&oldxsf->schemaPage[0], &xsf->schemaPage[0], xsf->schemaPage[0].FileSize); - - if (c_initialStart || c_initialNodeRestart) { - jam(); - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.newFile = true; - c_writeSchemaRecord.firstPage = 0; - c_writeSchemaRecord.noOfPages = xsf->noOfPages; - - c_writeSchemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::initSchemaFile_conf); - - startWriteSchemaFile(signal); - } else if (c_systemRestart || c_nodeRestart) { - jam(); - ndbrequire(c_readSchemaRecord.schemaReadState == ReadSchemaRecord::IDLE); - c_readSchemaRecord.pageId = c_schemaRecord.oldSchemaPage; - c_readSchemaRecord.firstPage = 0; - c_readSchemaRecord.noOfPages = 1; - c_readSchemaRecord.schemaReadState = ReadSchemaRecord::INITIAL_READ_HEAD; - startReadSchemaFile(signal); - } else { - ndbrequire(false); - }//if -}//Dbdict::initSchemaFile() - -void -Dbdict::initSchemaFile_conf(Signal* signal, Uint32 callbackData, Uint32 rv){ - jam(); - sendNDB_STTORRY(signal); -} - -void -Dbdict::activateIndexes(Signal* signal, Uint32 i) -{ - AlterIndxReq* req = (AlterIndxReq*)signal->getDataPtrSend(); - TableRecordPtr tablePtr; - for (; i < c_tableRecordPool.getSize(); i++) { - tablePtr.i = i; - c_tableRecordPool.getPtr(tablePtr); - if (tablePtr.p->tabState != TableRecord::DEFINED) - continue; - if (! tablePtr.p->isIndex()) - continue; - jam(); - req->setUserRef(reference()); - req->setConnectionPtr(i); - req->setTableId(tablePtr.p->primaryTableId); - req->setIndexId(tablePtr.i); - req->setIndexVersion(tablePtr.p->tableVersion); - req->setOnline(true); - if (c_restartType == NodeState::ST_SYSTEM_RESTART) { - if (c_masterNodeId != getOwnNodeId()) - continue; - // from file index state is not defined currently - req->setRequestType(AlterIndxReq::RT_SYSTEMRESTART); - req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD); - } - else if ( - c_restartType == NodeState::ST_NODE_RESTART || - c_restartType == NodeState::ST_INITIAL_NODE_RESTART) { - // from master index must be online - if (tablePtr.p->indexState != TableRecord::IS_ONLINE) - continue; - req->setRequestType(AlterIndxReq::RT_NODERESTART); - // activate locally, rebuild not needed - req->addRequestFlag((Uint32)RequestFlag::RF_LOCAL); - req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD); - } else { - ndbrequire(false); - } - sendSignal(reference(), GSN_ALTER_INDX_REQ, - signal, AlterIndxReq::SignalLength, JBB); - return; - } - signal->theData[0] = reference(); - sendSignal(c_restartRecord.returnBlockRef, GSN_DICTSTARTCONF, - signal, 1, JBB); -} - -void -Dbdict::rebuildIndexes(Signal* signal, Uint32 i){ - BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend(); - - TableRecordPtr indexPtr; - for (; i < c_tableRecordPool.getSize(); i++) { - indexPtr.i = i; - c_tableRecordPool.getPtr(indexPtr); - if (indexPtr.p->tabState != TableRecord::DEFINED) - continue; - if (! indexPtr.p->isIndex()) - continue; - - jam(); - - req->setUserRef(reference()); - req->setConnectionPtr(i); - req->setRequestType(BuildIndxReq::RT_SYSTEMRESTART); - req->setBuildId(0); // not used - req->setBuildKey(0); // not used - req->setIndexType(indexPtr.p->tableType); - req->setIndexId(indexPtr.i); - req->setTableId(indexPtr.p->primaryTableId); - req->setParallelism(16); - - // from file index state is not defined currently - if (indexPtr.p->m_bits & TableRecord::TR_Logged) { - // rebuild not needed - req->addRequestFlag((Uint32)RequestFlag::RF_NOBUILD); - } - - // send - sendSignal(reference(), GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); - return; - } - sendNDB_STTORRY(signal); -} - - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: SYSTEM RESTART MODULE ------------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains code specific for system restart */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -/* ---------------------------------------------------------------- */ -// DIH asks DICT to read in table data from disk during system -// restart. DIH also asks DICT to send information about which -// tables that should be started as part of this system restart. -// DICT will also activate the tables in TC as part of this process. -/* ---------------------------------------------------------------- */ -void Dbdict::execDICTSTARTREQ(Signal* signal) -{ - jamEntry(); - c_restartRecord.gciToRestart = signal->theData[0]; - c_restartRecord.returnBlockRef = signal->theData[1]; - if (c_nodeRestart || c_initialNodeRestart) { - jam(); - - CRASH_INSERTION(6000); - - BlockReference dictRef = calcDictBlockRef(c_masterNodeId); - signal->theData[0] = getOwnNodeId(); - sendSignal(dictRef, GSN_GET_SCHEMA_INFOREQ, signal, 1, JBB); - return; - } - ndbrequire(c_systemRestart); - ndbrequire(c_masterNodeId == getOwnNodeId()); - - c_schemaRecord.m_callback.m_callbackData = 0; - c_schemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete); - - c_restartRecord.m_pass = 0; - c_restartRecord.activeTable = 0; - c_schemaRecord.schemaPage = c_schemaRecord.oldSchemaPage; // ugly - checkSchemaStatus(signal); -}//execDICTSTARTREQ() - -void -Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - - c_schemaRecord.schemaPage = 0; // ugly - XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0]; - ndbrequire(oldxsf->noOfPages != 0); - - LinearSectionPtr ptr[3]; - ptr[0].p = (Uint32*)&oldxsf->schemaPage[0]; - ptr[0].sz = oldxsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS; - - c_sendSchemaRecord.m_SCHEMAINFO_Counter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - - rg.m_nodes.clear(getOwnNodeId()); - Callback c = { 0, 0 }; - sendFragmentedSignal(rg, - GSN_SCHEMA_INFO, - signal, - 1, //SchemaInfo::SignalLength, - JBB, - ptr, - 1, - c); - - XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - newxsf->noOfPages = oldxsf->noOfPages; - memcpy(&newxsf->schemaPage[0], &oldxsf->schemaPage[0], - oldxsf->noOfPages * NDB_SF_PAGE_SIZE); - - signal->theData[0] = getOwnNodeId(); - sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB); -} - -void -Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){ - - const Uint32 ref = signal->getSendersBlockRef(); - //const Uint32 senderData = signal->theData[0]; - - ndbrequire(c_sendSchemaRecord.inUse == false); - c_sendSchemaRecord.inUse = true; - - LinearSectionPtr ptr[3]; - - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - ndbrequire(xsf->noOfPages != 0); - - ptr[0].p = (Uint32*)&xsf->schemaPage[0]; - ptr[0].sz = xsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS; - - Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 }; - sendFragmentedSignal(ref, - GSN_SCHEMA_INFO, - signal, - 1, //GetSchemaInfoConf::SignalLength, - JBB, - ptr, - 1, - c); -}//Dbdict::execGET_SCHEMA_INFOREQ() - -void -Dbdict::sendSchemaComplete(Signal * signal, - Uint32 callbackData, - Uint32 returnCode){ - ndbrequire(c_sendSchemaRecord.inUse == true); - c_sendSchemaRecord.inUse = false; - -} - - -/* ---------------------------------------------------------------- */ -// We receive the schema info from master as part of all restarts -// except the initial start where no tables exists. -/* ---------------------------------------------------------------- */ -void Dbdict::execSCHEMA_INFO(Signal* signal) -{ - jamEntry(); - if(!assembleFragments(signal)){ - jam(); - return; - } - - if(getNodeState().getNodeRestartInProgress()){ - CRASH_INSERTION(6001); - } - - SegmentedSectionPtr schemaDataPtr; - signal->getSection(schemaDataPtr, 0); - - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - ndbrequire(schemaDataPtr.sz % NDB_SF_PAGE_SIZE_IN_WORDS == 0); - xsf->noOfPages = schemaDataPtr.sz / NDB_SF_PAGE_SIZE_IN_WORDS; - copy((Uint32*)&xsf->schemaPage[0], schemaDataPtr); - releaseSections(signal); - - SchemaFile * sf0 = &xsf->schemaPage[0]; - if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6) { - bool ok = convertSchemaFileTo_5_0_6(xsf); - ndbrequire(ok); - } - - validateChecksum(xsf); - - XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0]; - resizeSchemaFile(xsf, oldxsf->noOfPages); - - ndbrequire(signal->getSendersBlockRef() != reference()); - - /* ---------------------------------------------------------------- */ - // Synchronise our view on data with other nodes in the cluster. - // This is an important part of restart handling where we will handle - // cases where the table have been added but only partially, where - // tables have been deleted but not completed the deletion yet and - // other scenarios needing synchronisation. - /* ---------------------------------------------------------------- */ - c_schemaRecord.m_callback.m_callbackData = 0; - c_schemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::restart_checkSchemaStatusComplete); - - c_restartRecord.m_pass= 0; - c_restartRecord.activeTable = 0; - checkSchemaStatus(signal); -}//execSCHEMA_INFO() - -void -Dbdict::restart_checkSchemaStatusComplete(Signal * signal, - Uint32 callbackData, - Uint32 returnCode){ - - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.newFile = true; - c_writeSchemaRecord.firstPage = 0; - c_writeSchemaRecord.noOfPages = xsf->noOfPages; - c_writeSchemaRecord.m_callback.m_callbackData = 0; - c_writeSchemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::restart_writeSchemaConf); - - for(Uint32 i = 0; inoOfPages; i++) - computeChecksum(xsf, i); - - startWriteSchemaFile(signal); -} - -void -Dbdict::restart_writeSchemaConf(Signal * signal, - Uint32 callbackData, - Uint32 returnCode){ - - if(c_systemRestart){ - jam(); - signal->theData[0] = getOwnNodeId(); - sendSignal(calcDictBlockRef(c_masterNodeId), GSN_SCHEMA_INFOCONF, - signal, 1, JBB); - return; - } - - ndbrequire(c_nodeRestart || c_initialNodeRestart); - c_blockState = BS_IDLE; - activateIndexes(signal, 0); - return; -} - -void Dbdict::execSCHEMA_INFOCONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - -/* ---------------------------------------------------------------- */ -// This signal is received in the master as part of system restart -// from all nodes (including the master) after they have synchronised -// their data with the master node's schema information. -/* ---------------------------------------------------------------- */ - const Uint32 nodeId = signal->theData[0]; - c_sendSchemaRecord.m_SCHEMAINFO_Counter.clearWaitingFor(nodeId); - - if (!c_sendSchemaRecord.m_SCHEMAINFO_Counter.done()){ - jam(); - return; - }//if - activateIndexes(signal, 0); -}//execSCHEMA_INFOCONF() - -static bool -checkSchemaStatus(Uint32 tableType, Uint32 pass) -{ - switch(tableType){ - case DictTabInfo::UndefTableType: - return true; - case DictTabInfo::HashIndexTrigger: - case DictTabInfo::SubscriptionTrigger: - case DictTabInfo::ReadOnlyConstraint: - case DictTabInfo::IndexTrigger: - return false; - case DictTabInfo::LogfileGroup: - return pass == 0 || pass == 9 || pass == 10; - case DictTabInfo::Tablespace: - return pass == 1 || pass == 8 || pass == 11; - case DictTabInfo::Datafile: - case DictTabInfo::Undofile: - return pass == 2 || pass == 7 || pass == 12; - case DictTabInfo::SystemTable: - case DictTabInfo::UserTable: - return /* pass == 3 || pass == 6 || */ pass == 13; - case DictTabInfo::UniqueHashIndex: - case DictTabInfo::HashIndex: - case DictTabInfo::UniqueOrderedIndex: - case DictTabInfo::OrderedIndex: - return /* pass == 4 || pass == 5 || */ pass == 14; - } - - return false; -} - -static const Uint32 CREATE_OLD_PASS = 4; -static const Uint32 DROP_OLD_PASS = 9; -static const Uint32 CREATE_NEW_PASS = 14; -static const Uint32 LAST_PASS = 14; - -NdbOut& -operator<<(NdbOut& out, const SchemaFile::TableEntry entry) -{ - out << "["; - out << " state: " << entry.m_tableState; - out << " version: " << hex << entry.m_tableVersion << dec; - out << " type: " << entry.m_tableType; - out << " words: " << entry.m_info_words; - out << " gcp: " << entry.m_gcp; - out << " ]"; - return out; -} - -/** - * Pass 0 Create old LogfileGroup - * Pass 1 Create old Tablespace - * Pass 2 Create old Datafile/Undofile - * Pass 3 Create old Table // NOT DONE DUE TO DIH - * Pass 4 Create old Index // NOT DONE DUE TO DIH - - * Pass 5 Drop old Index // NOT DONE DUE TO DIH - * Pass 6 Drop old Table // NOT DONE DUE TO DIH - * Pass 7 Drop old Datafile/Undofile - * Pass 8 Drop old Tablespace - * Pass 9 Drop old Logfilegroup - - * Pass 10 Create new LogfileGroup - * Pass 11 Create new Tablespace - * Pass 12 Create new Datafile/Undofile - * Pass 13 Create new Table - * Pass 14 Create new Index - */ - -void Dbdict::checkSchemaStatus(Signal* signal) -{ - XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0]; - ndbrequire(newxsf->noOfPages == oldxsf->noOfPages); - const Uint32 noOfEntries = newxsf->noOfPages * NDB_SF_PAGE_ENTRIES; - - for (; c_restartRecord.activeTable < noOfEntries; - c_restartRecord.activeTable++) { - jam(); - - Uint32 tableId = c_restartRecord.activeTable; - SchemaFile::TableEntry *newEntry = getTableEntry(newxsf, tableId); - SchemaFile::TableEntry *oldEntry = getTableEntry(oldxsf, tableId); - SchemaFile::TableState newSchemaState = - (SchemaFile::TableState)newEntry->m_tableState; - SchemaFile::TableState oldSchemaState = - (SchemaFile::TableState)oldEntry->m_tableState; - - if (c_restartRecord.activeTable >= c_tableRecordPool.getSize()) { - jam(); - ndbrequire(newSchemaState == SchemaFile::INIT); - ndbrequire(oldSchemaState == SchemaFile::INIT); - continue; - }//if - -//#define PRINT_SCHEMA_RESTART -#ifdef PRINT_SCHEMA_RESTART - char buf[100]; - snprintf(buf, sizeof(buf), "checkSchemaStatus: pass: %d table: %d", - c_restartRecord.m_pass, tableId); -#endif - - if (c_restartRecord.m_pass <= CREATE_OLD_PASS) - { - if (!::checkSchemaStatus(oldEntry->m_tableType, c_restartRecord.m_pass)) - continue; - - switch(oldSchemaState){ - case SchemaFile::INIT: jam(); - case SchemaFile::DROP_TABLE_COMMITTED: jam(); - case SchemaFile::ADD_STARTED: jam(); - case SchemaFile::DROP_TABLE_STARTED: jam(); - case SchemaFile::TEMPORARY_TABLE_COMMITTED: jam(); - continue; - case SchemaFile::TABLE_ADD_COMMITTED: jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: jam(); - jam(); -#ifdef PRINT_SCHEMA_RESTART - ndbout_c("%s -> restartCreateTab", buf); - ndbout << *newEntry << " " << *oldEntry << endl; -#endif - restartCreateTab(signal, tableId, oldEntry, oldEntry, true); - return; - } - } - - if (c_restartRecord.m_pass <= DROP_OLD_PASS) - { - if (!::checkSchemaStatus(oldEntry->m_tableType, c_restartRecord.m_pass)) - continue; - - switch(oldSchemaState){ - case SchemaFile::INIT: jam(); - case SchemaFile::DROP_TABLE_COMMITTED: jam(); - case SchemaFile::TEMPORARY_TABLE_COMMITTED: jam(); - continue; - case SchemaFile::ADD_STARTED: jam(); - case SchemaFile::DROP_TABLE_STARTED: jam(); -#ifdef PRINT_SCHEMA_RESTART - ndbout_c("%s -> restartDropTab", buf); - ndbout << *newEntry << " " << *oldEntry << endl; -#endif - restartDropTab(signal, tableId, oldEntry, newEntry); - return; - case SchemaFile::TABLE_ADD_COMMITTED: jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: jam(); - if (! (* oldEntry == * newEntry)) - { -#ifdef PRINT_SCHEMA_RESTART - ndbout_c("%s -> restartDropTab", buf); - ndbout << *newEntry << " " << *oldEntry << endl; -#endif - restartDropTab(signal, tableId, oldEntry, newEntry); - return; - } - continue; - } - } - - if (c_restartRecord.m_pass <= CREATE_NEW_PASS) - { - if (!::checkSchemaStatus(newEntry->m_tableType, c_restartRecord.m_pass)) - continue; - - switch(newSchemaState){ - case SchemaFile::INIT: jam(); - case SchemaFile::DROP_TABLE_COMMITTED: jam(); - case SchemaFile::TEMPORARY_TABLE_COMMITTED: jam(); - * oldEntry = * newEntry; - continue; - case SchemaFile::ADD_STARTED: jam(); - case SchemaFile::DROP_TABLE_STARTED: jam(); - ndbrequire(DictTabInfo::isTable(newEntry->m_tableType) || - DictTabInfo::isIndex(newEntry->m_tableType)); - newEntry->m_tableState = SchemaFile::INIT; - continue; - case SchemaFile::TABLE_ADD_COMMITTED: jam(); - case SchemaFile::ALTER_TABLE_COMMITTED: jam(); - if (DictTabInfo::isIndex(newEntry->m_tableType) || - DictTabInfo::isTable(newEntry->m_tableType)) - { - bool file = * oldEntry == *newEntry && - (!DictTabInfo::isIndex(newEntry->m_tableType) || c_systemRestart); - -#ifdef PRINT_SCHEMA_RESTART - ndbout_c("%s -> restartCreateTab (file: %d)", buf, file); - ndbout << *newEntry << " " << *oldEntry << endl; -#endif - restartCreateTab(signal, tableId, newEntry, newEntry, file); - * oldEntry = * newEntry; - return; - } - else if (! (* oldEntry == *newEntry)) - { -#ifdef PRINT_SCHEMA_RESTART - ndbout_c("%s -> restartCreateTab", buf); - ndbout << *newEntry << " " << *oldEntry << endl; -#endif - restartCreateTab(signal, tableId, oldEntry, newEntry, false); - * oldEntry = * newEntry; - return; - } - * oldEntry = * newEntry; - continue; - } - } - } - - c_restartRecord.m_pass++; - c_restartRecord.activeTable= 0; - if(c_restartRecord.m_pass <= LAST_PASS) - { - checkSchemaStatus(signal); - } - else - { - execute(signal, c_schemaRecord.m_callback, 0); - } -}//checkSchemaStatus() - -void -Dbdict::restartCreateTab(Signal* signal, Uint32 tableId, - const SchemaFile::TableEntry * old_entry, - const SchemaFile::TableEntry * new_entry, - bool file){ - jam(); - - switch(new_entry->m_tableType){ - case DictTabInfo::UndefTableType: - case DictTabInfo::HashIndexTrigger: - case DictTabInfo::SubscriptionTrigger: - case DictTabInfo::ReadOnlyConstraint: - case DictTabInfo::IndexTrigger: - ndbrequire(false); - case DictTabInfo::SystemTable: - case DictTabInfo::UserTable: - case DictTabInfo::UniqueHashIndex: - case DictTabInfo::HashIndex: - case DictTabInfo::UniqueOrderedIndex: - case DictTabInfo::OrderedIndex: - break; - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup: - case DictTabInfo::Datafile: - case DictTabInfo::Undofile: - restartCreateObj(signal, tableId, old_entry, new_entry, file); - return; - } - - CreateTableRecordPtr createTabPtr; - c_opCreateTable.seize(createTabPtr); - ndbrequire(!createTabPtr.isNull()); - - createTabPtr.p->key = ++c_opRecordSequence; - c_opCreateTable.add(createTabPtr); - - createTabPtr.p->m_errorCode = 0; - createTabPtr.p->m_tablePtrI = tableId; - createTabPtr.p->m_coordinatorRef = reference(); - createTabPtr.p->m_senderRef = 0; - createTabPtr.p->m_senderData = RNIL; - createTabPtr.p->m_tabInfoPtrI = RNIL; - createTabPtr.p->m_dihAddFragPtr = RNIL; - - if(file && !ERROR_INSERTED(6002)){ - jam(); - - c_readTableRecord.no_of_words = old_entry->m_info_words; - c_readTableRecord.pageId = 0; - c_readTableRecord.m_callback.m_callbackData = createTabPtr.p->key; - c_readTableRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateTab_readTableConf); - - startReadTableFile(signal, tableId); - return; - } else { - - ndbrequire(c_masterNodeId != getOwnNodeId()); - - /** - * Get from master - */ - GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = createTabPtr.p->key; - req->requestType = GetTabInfoReq::RequestById | - GetTabInfoReq::LongSignalConf; - req->tableId = tableId; - sendSignal(calcDictBlockRef(c_masterNodeId), GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB); - - if(ERROR_INSERTED(6002)){ - NdbSleep_MilliSleep(10); - CRASH_INSERTION(6002); - } - } -} - -void -Dbdict::restartCreateTab_readTableConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - PageRecordPtr pageRecPtr; - c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId); - - ParseDictTabInfoRecord parseRecord; - parseRecord.requestType = DictTabInfo::GetTabInfoConf; - parseRecord.errorCode = 0; - - Uint32 sz = c_readTableRecord.no_of_words; - SimplePropertiesLinearReader r(pageRecPtr.p->word+ZPAGE_HEADER_SIZE, sz); - handleTabInfoInit(r, &parseRecord); - if (parseRecord.errorCode != 0) - { - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Unable to restart, fail while creating table %d" - " error: %d. Most likely change of configuration", - c_readTableRecord.tableId, - parseRecord.errorCode); - progError(__LINE__, - NDBD_EXIT_INVALID_CONFIG, - buf); - ndbrequire(parseRecord.errorCode == 0); - } - - /* ---------------------------------------------------------------- */ - // We have read the table description from disk as part of system restart. - // We will also write it back again to ensure that both copies are ok. - /* ---------------------------------------------------------------- */ - ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE); - c_writeTableRecord.no_of_words = c_readTableRecord.no_of_words; - c_writeTableRecord.pageId = c_readTableRecord.pageId; - c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK; - c_writeTableRecord.m_callback.m_callbackData = callbackData; - c_writeTableRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateTab_writeTableConf); - startWriteTableFile(signal, c_readTableRecord.tableId); -} - -void -Dbdict::execGET_TABINFO_CONF(Signal* signal){ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr(); - - switch(conf->tableType){ - case DictTabInfo::UndefTableType: - case DictTabInfo::HashIndexTrigger: - case DictTabInfo::SubscriptionTrigger: - case DictTabInfo::ReadOnlyConstraint: - case DictTabInfo::IndexTrigger: - ndbrequire(false); - case DictTabInfo::SystemTable: - case DictTabInfo::UserTable: - case DictTabInfo::UniqueHashIndex: - case DictTabInfo::HashIndex: - case DictTabInfo::UniqueOrderedIndex: - case DictTabInfo::OrderedIndex: - break; - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup: - case DictTabInfo::Datafile: - case DictTabInfo::Undofile: - if(refToBlock(conf->senderRef) == TSMAN - && (refToNode(conf->senderRef) == 0 - || refToNode(conf->senderRef) == getOwnNodeId())) - { - jam(); - FilePtr fg_ptr; - ndbrequire(c_file_hash.find(fg_ptr, conf->tableId)); - const Uint32 free_extents= conf->freeExtents; - const Uint32 id= conf->tableId; - const Uint32 type= conf->tableType; - const Uint32 data= conf->senderData; - signal->theData[0]= ZPACK_TABLE_INTO_PAGES; - signal->theData[1]= id; - signal->theData[2]= type; - signal->theData[3]= data; - signal->theData[4]= free_extents; - sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); - } - else if(refToBlock(conf->senderRef) == LGMAN - && (refToNode(conf->senderRef) == 0 - || refToNode(conf->senderRef) == getOwnNodeId())) - { - jam(); - FilegroupPtr fg_ptr; - ndbrequire(c_filegroup_hash.find(fg_ptr, conf->tableId)); - const Uint32 free_hi= conf->freeWordsHi; - const Uint32 free_lo= conf->freeWordsLo; - const Uint32 id= conf->tableId; - const Uint32 type= conf->tableType; - const Uint32 data= conf->senderData; - signal->theData[0]= ZPACK_TABLE_INTO_PAGES; - signal->theData[1]= id; - signal->theData[2]= type; - signal->theData[3]= data; - signal->theData[4]= free_hi; - signal->theData[5]= free_lo; - sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB); - } - else - { - jam(); - restartCreateObj_getTabInfoConf(signal); - } - return; - } - - const Uint32 tableId = conf->tableId; - const Uint32 senderData = conf->senderData; - - SegmentedSectionPtr tabInfoPtr; - signal->getSection(tabInfoPtr, GetTabInfoConf::DICT_TAB_INFO); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, senderData)); - ndbrequire(!createTabPtr.isNull()); - ndbrequire(createTabPtr.p->m_tablePtrI == tableId); - - /** - * Put data into table record - */ - ParseDictTabInfoRecord parseRecord; - parseRecord.requestType = DictTabInfo::GetTabInfoConf; - parseRecord.errorCode = 0; - - SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool()); - handleTabInfoInit(r, &parseRecord); - ndbrequire(parseRecord.errorCode == 0); - - // save to disk - - ndbrequire(tableId < c_tableRecordPool.getSize()); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId); - tableEntry->m_info_words= tabInfoPtr.sz; - - Callback callback; - callback.m_callbackData = createTabPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateTab_writeTableConf); - - signal->header.m_noOfSections = 0; - writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback); - signal->setSection(tabInfoPtr, 0); - releaseSections(signal); -} - -void -Dbdict::restartCreateTab_writeTableConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - Callback callback; - callback.m_callbackData = callbackData; - callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateTab_dihComplete); - - SegmentedSectionPtr fragDataPtr; - fragDataPtr.sz = 0; - fragDataPtr.setNull(); - createTab_dih(signal, createTabPtr, fragDataPtr, &callback); -} - -void -Dbdict::restartCreateTab_dihComplete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - if(createTabPtr.p->m_errorCode) - { - char buf[100]; - BaseString::snprintf(buf, sizeof(buf), "Failed to create table during" - " restart, Error: %u", - createTabPtr.p->m_errorCode); - progError(__LINE__, NDBD_EXIT_RESOURCE_ALLOC_ERROR, buf); - } - - Callback callback; - callback.m_callbackData = callbackData; - callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateTab_activateComplete); - - alterTab_activate(signal, createTabPtr, &callback); -} - -void -Dbdict::restartCreateTab_activateComplete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - tabPtr.p->tabState = TableRecord::DEFINED; - - releaseCreateTableOp(signal,createTabPtr); - - c_restartRecord.activeTable++; - checkSchemaStatus(signal); -} - -void -Dbdict::releaseCreateTableOp(Signal* signal, CreateTableRecordPtr createTabPtr) -{ - if (createTabPtr.p->m_tabInfoPtrI != RNIL) - { - jam(); - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI); - signal->setSection(tabInfoPtr, 0); - releaseSections(signal); - } - c_opCreateTable.release(createTabPtr); -} - -void -Dbdict::restartDropTab(Signal* signal, Uint32 tableId, - const SchemaFile::TableEntry * old_entry, - const SchemaFile::TableEntry * new_entry) -{ - switch(old_entry->m_tableType){ - case DictTabInfo::UndefTableType: - case DictTabInfo::HashIndexTrigger: - case DictTabInfo::SubscriptionTrigger: - case DictTabInfo::ReadOnlyConstraint: - case DictTabInfo::IndexTrigger: - ndbrequire(false); - case DictTabInfo::SystemTable: - case DictTabInfo::UserTable: - case DictTabInfo::UniqueHashIndex: - case DictTabInfo::HashIndex: - case DictTabInfo::UniqueOrderedIndex: - case DictTabInfo::OrderedIndex: - break; - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup: - case DictTabInfo::Datafile: - case DictTabInfo::Undofile: - restartDropObj(signal, tableId, old_entry); - return; - } - - const Uint32 key = ++c_opRecordSequence; - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.seize(dropTabPtr)); - - dropTabPtr.p->key = key; - c_opDropTable.add(dropTabPtr); - - dropTabPtr.p->m_errorCode = 0; - dropTabPtr.p->m_request.tableId = tableId; - dropTabPtr.p->m_coordinatorRef = 0; - dropTabPtr.p->m_requestType = DropTabReq::RestartDropTab; - dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ; - - dropTabPtr.p->m_participantData.m_block = 0; - dropTabPtr.p->m_participantData.m_callback.m_callbackData = key; - dropTabPtr.p->m_participantData.m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartDropTab_complete); - dropTab_nextStep(signal, dropTabPtr); -} - -void -Dbdict::restartDropTab_complete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, callbackData)); - - //@todo check error - - releaseTableObject(c_restartRecord.activeTable); - c_opDropTable.release(dropTabPtr); - - c_restartRecord.activeTable++; - checkSchemaStatus(signal); -} - -/** - * Create Obj during NR/SR - */ -void -Dbdict::restartCreateObj(Signal* signal, - Uint32 tableId, - const SchemaFile::TableEntry * old_entry, - const SchemaFile::TableEntry * new_entry, - bool file){ - jam(); - - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.seize(createObjPtr)); - - const Uint32 key = ++c_opRecordSequence; - createObjPtr.p->key = key; - c_opCreateObj.add(createObjPtr); - createObjPtr.p->m_errorCode = 0; - createObjPtr.p->m_senderRef = reference(); - createObjPtr.p->m_senderData = tableId; - createObjPtr.p->m_clientRef = reference(); - createObjPtr.p->m_clientData = tableId; - - createObjPtr.p->m_obj_id = tableId; - createObjPtr.p->m_obj_type = new_entry->m_tableType; - createObjPtr.p->m_obj_version = new_entry->m_tableVersion; - - createObjPtr.p->m_callback.m_callbackData = key; - createObjPtr.p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::restartCreateObj_prepare_start_done); - - createObjPtr.p->m_restart= file ? 1 : 2; - switch(new_entry->m_tableType){ - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup: - createObjPtr.p->m_vt_index = 0; - break; - case DictTabInfo::Datafile: - case DictTabInfo::Undofile: - createObjPtr.p->m_vt_index = 1; - break; - default: - ndbrequire(false); - } - - createObjPtr.p->m_obj_info_ptr_i = RNIL; - if(file) - { - c_readTableRecord.no_of_words = old_entry->m_info_words; - c_readTableRecord.pageId = 0; - c_readTableRecord.m_callback.m_callbackData = key; - c_readTableRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateObj_readConf); - - startReadTableFile(signal, tableId); - } - else - { - /** - * Get from master - */ - GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = key; - req->requestType = GetTabInfoReq::RequestById | - GetTabInfoReq::LongSignalConf; - req->tableId = tableId; - sendSignal(calcDictBlockRef(c_masterNodeId), GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB); - } -} - -void -Dbdict::restartCreateObj_getTabInfoConf(Signal* signal) -{ - jam(); - - GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr(); - - const Uint32 objId = conf->tableId; - const Uint32 senderData = conf->senderData; - - SegmentedSectionPtr objInfoPtr; - signal->getSection(objInfoPtr, GetTabInfoConf::DICT_TAB_INFO); - - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, senderData)); - ndbrequire(createObjPtr.p->m_obj_id == objId); - - createObjPtr.p->m_obj_info_ptr_i= objInfoPtr.i; - signal->header.m_noOfSections = 0; - - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_start) - (signal, createObjPtr.p); -} - -void -Dbdict::restartCreateObj_readConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - ndbrequire(createObjPtr.p->m_errorCode == 0); - - PageRecordPtr pageRecPtr; - c_pageRecordArray.getPtr(pageRecPtr, c_readTableRecord.pageId); - - Uint32 sz = c_readTableRecord.no_of_words; - - Ptr ptr; - ndbrequire(import(ptr, pageRecPtr.p->word+ZPAGE_HEADER_SIZE, sz)); - createObjPtr.p->m_obj_info_ptr_i= ptr.i; - - if (f_dict_op[createObjPtr.p->m_vt_index].m_prepare_start) - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_start) - (signal, createObjPtr.p); - else - execute(signal, createObjPtr.p->m_callback, 0); -} - -void -Dbdict::restartCreateObj_prepare_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - ndbrequire(createObjPtr.p->m_errorCode == 0); - - Callback callback; - callback.m_callbackData = callbackData; - callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateObj_write_complete); - - SegmentedSectionPtr objInfoPtr; - getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); - - writeTableFile(signal, createObjPtr.p->m_obj_id, objInfoPtr, &callback); -} - -void -Dbdict::restartCreateObj_write_complete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - ndbrequire(returnCode == 0); - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - ndbrequire(createObjPtr.p->m_errorCode == 0); - - SegmentedSectionPtr objInfoPtr; - getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); - signal->setSection(objInfoPtr, 0); - releaseSections(signal); - createObjPtr.p->m_obj_info_ptr_i = RNIL; - - createObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateObj_prepare_complete_done); - - if (f_dict_op[createObjPtr.p->m_vt_index].m_prepare_complete) - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_complete) - (signal, createObjPtr.p); - else - execute(signal, createObjPtr.p->m_callback, 0); -} - -void -Dbdict::restartCreateObj_prepare_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - ndbrequire(createObjPtr.p->m_errorCode == 0); - - createObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateObj_commit_start_done); - - if (f_dict_op[createObjPtr.p->m_vt_index].m_commit_start) - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_commit_start) - (signal, createObjPtr.p); - else - execute(signal, createObjPtr.p->m_callback, 0); -} - -void -Dbdict::restartCreateObj_commit_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - ndbrequire(createObjPtr.p->m_errorCode == 0); - - createObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartCreateObj_commit_complete_done); - - if (f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) - (signal, createObjPtr.p); - else - execute(signal, createObjPtr.p->m_callback, 0); -} - - -void -Dbdict::restartCreateObj_commit_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - ndbrequire(createObjPtr.p->m_errorCode == 0); - - c_opCreateObj.release(createObjPtr); - - c_restartRecord.activeTable++; - checkSchemaStatus(signal); -} - -/** - * Drop object during NR/SR - */ -void -Dbdict::restartDropObj(Signal* signal, - Uint32 tableId, - const SchemaFile::TableEntry * entry) -{ - jam(); - - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.seize(dropObjPtr)); - - const Uint32 key = ++c_opRecordSequence; - dropObjPtr.p->key = key; - c_opDropObj.add(dropObjPtr); - dropObjPtr.p->m_errorCode = 0; - dropObjPtr.p->m_senderRef = reference(); - dropObjPtr.p->m_senderData = tableId; - dropObjPtr.p->m_clientRef = reference(); - dropObjPtr.p->m_clientData = tableId; - - dropObjPtr.p->m_obj_id = tableId; - dropObjPtr.p->m_obj_type = entry->m_tableType; - dropObjPtr.p->m_obj_version = entry->m_tableVersion; - - dropObjPtr.p->m_callback.m_callbackData = key; - dropObjPtr.p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::restartDropObj_prepare_start_done); - - ndbout_c("Dropping %d %d", tableId, entry->m_tableType); - switch(entry->m_tableType){ - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup:{ - jam(); - Ptr fg_ptr; - ndbrequire(c_filegroup_hash.find(fg_ptr, tableId)); - dropObjPtr.p->m_obj_ptr_i = fg_ptr.i; - dropObjPtr.p->m_vt_index = 3; - break; - } - case DictTabInfo::Datafile:{ - jam(); - Ptr file_ptr; - dropObjPtr.p->m_vt_index = 2; - ndbrequire(c_file_hash.find(file_ptr, tableId)); - dropObjPtr.p->m_obj_ptr_i = file_ptr.i; - break; - } - case DictTabInfo::Undofile:{ - jam(); - Ptr file_ptr; - dropObjPtr.p->m_vt_index = 4; - ndbrequire(c_file_hash.find(file_ptr, tableId)); - dropObjPtr.p->m_obj_ptr_i = file_ptr.i; - - /** - * Undofiles are only removed from logfile groups file list - * as drop undofile is currently not supported... - * file will be dropped by lgman when dropping filegroup - */ - dropObjPtr.p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::restartDropObj_commit_complete_done); - - if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) - (signal, dropObjPtr.p); - else - execute(signal, dropObjPtr.p->m_callback, 0); - return; - } - default: - jamLine(entry->m_tableType); - ndbrequire(false); - } - - if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) - (signal, dropObjPtr.p); - else - execute(signal, dropObjPtr.p->m_callback, 0); -} - -void -Dbdict::restartDropObj_prepare_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - ndbrequire(dropObjPtr.p->m_errorCode == 0); - - dropObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartDropObj_prepare_complete_done); - - if (f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) - (signal, dropObjPtr.p); - else - execute(signal, dropObjPtr.p->m_callback, 0); -} - -void -Dbdict::restartDropObj_prepare_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - ndbrequire(dropObjPtr.p->m_errorCode == 0); - - dropObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartDropObj_commit_start_done); - - if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start) - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_start) - (signal, dropObjPtr.p); - else - execute(signal, dropObjPtr.p->m_callback, 0); -} - -void -Dbdict::restartDropObj_commit_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - ndbrequire(dropObjPtr.p->m_errorCode == 0); - - dropObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::restartDropObj_commit_complete_done); - - if (f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) - (signal, dropObjPtr.p); - else - execute(signal, dropObjPtr.p->m_callback, 0); -} - - -void -Dbdict::restartDropObj_commit_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - ndbrequire(returnCode == 0); - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - ndbrequire(dropObjPtr.p->m_errorCode == 0); - - c_opDropObj.release(dropObjPtr); - - c_restartRecord.activeTable++; - checkSchemaStatus(signal); -} - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: NODE FAILURE HANDLING ------------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains the code that is used when nodes */ -/* (kernel/api) fails. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -/* ---------------------------------------------------------------- */ -// We receive a report of an API that failed. -/* ---------------------------------------------------------------- */ -void Dbdict::execAPI_FAILREQ(Signal* signal) -{ - jamEntry(); - Uint32 failedApiNode = signal->theData[0]; - BlockReference retRef = signal->theData[1]; - -#if 0 - Uint32 userNode = refToNode(c_connRecord.userBlockRef); - if (userNode == failedApiNode) { - jam(); - c_connRecord.userBlockRef = (Uint32)-1; - }//if -#endif - - signal->theData[0] = failedApiNode; - signal->theData[1] = reference(); - sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB); -}//execAPI_FAILREQ() - -/* ---------------------------------------------------------------- */ -// We receive a report of one or more node failures of kernel nodes. -/* ---------------------------------------------------------------- */ -void Dbdict::execNODE_FAILREP(Signal* signal) -{ - jamEntry(); - NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; - - c_failureNr = nodeFail->failNo; - const Uint32 numberOfFailedNodes = nodeFail->noOfNodes; - const bool masterFailed = (c_masterNodeId != nodeFail->masterNodeId); - c_masterNodeId = nodeFail->masterNodeId; - - c_noNodesFailed += numberOfFailedNodes; - Uint32 theFailedNodes[NodeBitmask::Size]; - memcpy(theFailedNodes, nodeFail->theNodes, sizeof(theFailedNodes)); - - c_counterMgr.execNODE_FAILREP(signal); - - bool ok = false; - switch(c_blockState){ - case BS_IDLE: - jam(); - ok = true; - if(c_opRecordPool.getSize() != - (c_opRecordPool.getNoOfFree() + - c_opSubEvent.get_count() + c_opCreateEvent.get_count() + - c_opDropEvent.get_count() + c_opSignalUtil.get_count())) - { - jam(); - c_blockState = BS_NODE_FAILURE; - } - break; - case BS_CREATE_TAB: - jam(); - ok = true; - if(!masterFailed) - break; - // fall through - case BS_BUSY: - case BS_NODE_FAILURE: - jam(); - c_blockState = BS_NODE_FAILURE; - ok = true; - break; - case BS_NODE_RESTART: - jam(); - ok = true; - break; - } - ndbrequire(ok); - - for(unsigned i = 1; i < MAX_NDB_NODES; i++) { - jam(); - if(NodeBitmask::get(theFailedNodes, i)) { - jam(); - NodeRecordPtr nodePtr; - c_nodes.getPtr(nodePtr, i); - - nodePtr.p->nodeState = NodeRecord::NDB_NODE_DEAD; - NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0]; - nfCompRep->blockNo = DBDICT; - nfCompRep->nodeId = getOwnNodeId(); - nfCompRep->failedNodeId = nodePtr.i; - sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); - - c_aliveNodes.clear(i); - }//if - }//for - - /* - * NODE_FAILREP guarantees that no "in flight" signal from - * a dead node is accepted, and also that the job buffer contains - * no such (un-executed) signals. Therefore no DICT_UNLOCK_ORD - * from a dead node (leading to master crash) is possible after - * this clean-up removes the lock record. - */ - removeStaleDictLocks(signal, theFailedNodes); - -}//execNODE_FAILREP() - - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: NODE START HANDLING --------------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains the code that is used when kernel nodes */ -/* starts. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -/* ---------------------------------------------------------------- */ -// Include a starting node in list of nodes to be part of adding -// and dropping tables. -/* ---------------------------------------------------------------- */ -void Dbdict::execINCL_NODEREQ(Signal* signal) -{ - jamEntry(); - NodeRecordPtr nodePtr; - BlockReference retRef = signal->theData[0]; - nodePtr.i = signal->theData[1]; - - ndbrequire(c_noNodesFailed > 0); - c_noNodesFailed--; - - c_nodes.getPtr(nodePtr); - ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD); - nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE; - signal->theData[0] = nodePtr.i; - signal->theData[1] = reference(); - sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB); - - c_aliveNodes.set(nodePtr.i); -}//execINCL_NODEREQ() - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: ADD TABLE HANDLING ---------------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains the code that is used when adding a table. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -/* ---------------------------------------------------------------- */ -// This signal receives information about a table from either: -// API, Ndbcntr or from other DICT. -/* ---------------------------------------------------------------- */ -void -Dbdict::execCREATE_TABLE_REQ(Signal* signal){ - jamEntry(); - if(!assembleFragments(signal)){ - return; - } - - CreateTableReq* const req = (CreateTableReq*)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - - ParseDictTabInfoRecord parseRecord; - do { - if(getOwnNodeId() != c_masterNodeId){ - jam(); - parseRecord.errorCode = CreateTableRef::NotMaster; - break; - } - - if (c_blockState == BS_NODE_RESTART){ - jam(); - parseRecord.errorCode = CreateTableRef::BusyWithNR; - break; - } - - if (c_blockState != BS_IDLE){ - jam(); - parseRecord.errorCode = CreateTableRef::Busy; - break; - } - - if (checkSingleUserMode(signal->getSendersBlockRef())) - { - jam(); - parseRecord.errorCode = CreateTableRef::SingleUser; - break; - } - - CreateTableRecordPtr createTabPtr; - c_opCreateTable.seize(createTabPtr); - - if(createTabPtr.isNull()){ - jam(); - parseRecord.errorCode = CreateTableRef::Busy; - break; - } - - parseRecord.requestType = DictTabInfo::CreateTableFromAPI; - parseRecord.errorCode = 0; - - SegmentedSectionPtr ptr; - signal->getSection(ptr, CreateTableReq::DICT_TAB_INFO); - SimplePropertiesSectionReader r(ptr, getSectionSegmentPool()); - - handleTabInfoInit(r, &parseRecord); - releaseSections(signal); - - if(parseRecord.errorCode != 0){ - jam(); - c_opCreateTable.release(createTabPtr); - break; - } - - createTabPtr.p->m_errorCode = 0; - createTabPtr.p->m_senderRef = senderRef; - createTabPtr.p->m_senderData = senderData; - createTabPtr.p->m_tablePtrI = parseRecord.tablePtr.i; - createTabPtr.p->m_coordinatorRef = reference(); - createTabPtr.p->m_fragmentsPtrI = RNIL; - createTabPtr.p->m_dihAddFragPtr = RNIL; - - Uint32 key = c_opRecordSequence + 1; - Uint32 *theData = signal->getDataPtrSend(); - Uint16 *frag_data= (Uint16*)&signal->theData[25]; - CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; - req->senderRef = reference(); - req->senderData = key; - req->primaryTableId = parseRecord.tablePtr.p->primaryTableId; - req->noOfFragments = parseRecord.tablePtr.p->fragmentCount; - req->fragmentationType = parseRecord.tablePtr.p->fragmentType; - MEMCOPY_NO_WORDS(frag_data, c_fragData, c_fragDataLen); - - if (parseRecord.tablePtr.p->isOrderedIndex()) { - jam(); - // ordered index has same fragmentation as the table - req->primaryTableId = parseRecord.tablePtr.p->primaryTableId; - req->fragmentationType = DictTabInfo::DistrKeyOrderedIndex; - } - else if (parseRecord.tablePtr.p->isHashIndex()) - { - jam(); - /* - Unique hash indexes has same amount of fragments as primary table - and distributed in the same manner but has always a normal hash - fragmentation. - */ - req->primaryTableId = parseRecord.tablePtr.p->primaryTableId; - req->fragmentationType = DictTabInfo::DistrKeyUniqueHashIndex; - } - else - { - jam(); - /* - Blob tables come here with primaryTableId != RNIL but we only need - it for creating the fragments so we set it to RNIL now that we got - what we wanted from it to avoid other side effects. - */ - parseRecord.tablePtr.p->primaryTableId = RNIL; - } - EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal, - CreateFragmentationReq::SignalLength); - jamEntry(); - if (signal->theData[0] != 0) - { - jam(); - parseRecord.errorCode= signal->theData[0]; - c_opCreateTable.release(createTabPtr); - releaseTableObject(parseRecord.tablePtr.i, true); - break; - } - createTabPtr.p->key = key; - c_opRecordSequence++; - c_opCreateTable.add(createTabPtr); - c_blockState = BS_CREATE_TAB; - return; - } while(0); - - /** - * Something went wrong - */ - - releaseSections(signal); - CreateTableRef * ref = (CreateTableRef*)signal->getDataPtrSend(); - ref->senderData = senderData; - ref->senderRef = reference(); - ref->masterNodeId = c_masterNodeId; - ref->errorCode = parseRecord.errorCode; - ref->errorLine = parseRecord.errorLine; - ref->errorKey = parseRecord.errorKey; - ref->status = parseRecord.status; - sendSignal(senderRef, GSN_CREATE_TABLE_REF, signal, - CreateTableRef::SignalLength, JBB); -} - -void -Dbdict::execBACKUP_FRAGMENT_REQ(Signal* signal) -{ - jamEntry(); - Uint32 tableId = signal->theData[0]; - Uint32 lock = signal->theData[1]; - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId, true); - - if(lock) - { - ndbrequire(tablePtr.p->tabState == TableRecord::DEFINED); - tablePtr.p->tabState = TableRecord::BACKUP_ONGOING; - } - else if(tablePtr.p->tabState == TableRecord::BACKUP_ONGOING) - { - tablePtr.p->tabState = TableRecord::DEFINED; - } -} - -bool -Dbdict::check_ndb_versions() const -{ - Uint32 node = 0; - Uint32 version = getNodeInfo(getOwnNodeId()).m_version; - while((node = c_aliveNodes.find(node + 1)) != BitmaskImpl::NotFound) - { - if(getNodeInfo(node).m_version != version) - { - return false; - } - } - return true; -} - -void -Dbdict::execALTER_TABLE_REQ(Signal* signal) -{ - // Received by master - jamEntry(); - if(!assembleFragments(signal)){ - return; - } - AlterTableReq* const req = (AlterTableReq*)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - const Uint32 changeMask = req->changeMask; - const Uint32 tableId = req->tableId; - const Uint32 tableVersion = req->tableVersion; - ParseDictTabInfoRecord* aParseRecord; - - // Get table definition - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId, false); - if(tablePtr.isNull()){ - jam(); - alterTableRef(signal, req, AlterTableRef::NoSuchTable); - return; - } - - if(getOwnNodeId() != c_masterNodeId){ - jam(); - alterTableRef(signal, req, AlterTableRef::NotMaster); - return; - } - - if(c_blockState == BS_NODE_RESTART){ - jam(); - alterTableRef(signal, req, AlterTableRef::BusyWithNR); - return; - } - - if(c_blockState != BS_IDLE){ - jam(); - alterTableRef(signal, req, AlterTableRef::Busy); - return; - } - - if (!check_ndb_versions()) - { - jam(); - alterTableRef(signal, req, AlterTableRef::IncompatibleVersions); - return; - } - - if (checkSingleUserMode(signal->getSendersBlockRef())) - { - jam(); - alterTableRef(signal, req, AlterTableRef::SingleUser); - return; - } - - const TableRecord::TabState tabState = tablePtr.p->tabState; - bool ok = false; - switch(tabState){ - case TableRecord::NOT_DEFINED: - case TableRecord::DEFINING: - jam(); - alterTableRef(signal, req, AlterTableRef::NoSuchTable); - return; - case TableRecord::DEFINED: - ok = true; - jam(); - break; - case TableRecord::BACKUP_ONGOING: - jam(); - alterTableRef(signal, req, AlterTableRef::BackupInProgress); - return; - case TableRecord::PREPARE_DROPPING: - case TableRecord::DROPPING: - jam(); - alterTableRef(signal, req, AlterTableRef::DropInProgress); - return; - } - ndbrequire(ok); - - if(tablePtr.p->tableVersion != tableVersion){ - jam(); - alterTableRef(signal, req, AlterTableRef::InvalidTableVersion); - return; - } - // Parse new table defintion - ParseDictTabInfoRecord parseRecord; - aParseRecord = &parseRecord; - - CreateTableRecordPtr alterTabPtr; // Reuse create table records - c_opCreateTable.seize(alterTabPtr); - - if(alterTabPtr.isNull()){ - jam(); - alterTableRef(signal, req, AlterTableRef::Busy); - return; - } - - alterTabPtr.p->m_changeMask = changeMask; - parseRecord.requestType = DictTabInfo::AlterTableFromAPI; - parseRecord.errorCode = 0; - - SegmentedSectionPtr ptr; - signal->getSection(ptr, AlterTableReq::DICT_TAB_INFO); - SimplePropertiesSectionReader r(ptr, getSectionSegmentPool()); - - handleTabInfoInit(r, &parseRecord, false); // Will not save info - - if(parseRecord.errorCode != 0){ - jam(); - c_opCreateTable.release(alterTabPtr); - alterTableRef(signal, req, - (AlterTableRef::ErrorCode) parseRecord.errorCode, - aParseRecord); - return; - } - - releaseSections(signal); - alterTabPtr.p->key = ++c_opRecordSequence; - c_opCreateTable.add(alterTabPtr); - ndbrequire(c_opCreateTable.find(alterTabPtr, alterTabPtr.p->key)); - alterTabPtr.p->m_errorCode = 0; - alterTabPtr.p->m_senderRef = senderRef; - alterTabPtr.p->m_senderData = senderData; - alterTabPtr.p->m_tablePtrI = parseRecord.tablePtr.i; - alterTabPtr.p->m_alterTableFailed = false; - alterTabPtr.p->m_coordinatorRef = reference(); - alterTabPtr.p->m_fragmentsPtrI = RNIL; - alterTabPtr.p->m_dihAddFragPtr = RNIL; - alterTabPtr.p->m_alterTableId = tablePtr.p->tableId; - - // Send prepare request to all alive nodes - SimplePropertiesSectionWriter w(getSectionSegmentPool()); - packTableIntoPages(w, parseRecord.tablePtr); - - SegmentedSectionPtr tabInfoPtr; - w.getPtr(tabInfoPtr); - - alterTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i; - - // Alter table on all nodes - c_blockState = BS_BUSY; - - Mutex mutex(signal, c_mutexMgr, alterTabPtr.p->m_startLcpMutex); - Callback c = { safe_cast(&Dbdict::alterTable_backup_mutex_locked), - alterTabPtr.p->key }; - - ndbrequire(mutex.lock(c)); -} - -void -Dbdict::alterTable_backup_mutex_locked(Signal* signal, - Uint32 callbackData, - Uint32 retValue) -{ - jamEntry(); - - ndbrequire(retValue == 0); - - CreateTableRecordPtr alterTabPtr; - ndbrequire(c_opCreateTable.find(alterTabPtr, callbackData)); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, alterTabPtr.p->m_alterTableId, true); - - Mutex mutex(signal, c_mutexMgr, alterTabPtr.p->m_startLcpMutex); - mutex.unlock(); // ignore response - - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); - signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); - - alterTabPtr.p->m_tabInfoPtrI = RNIL; - - if(tablePtr.p->tabState == TableRecord::BACKUP_ONGOING) - { - jam(); - AlterTableReq* req = (AlterTableReq*)signal->getDataPtr(); - req->senderData = alterTabPtr.p->m_senderData; - req->senderRef = alterTabPtr.p->m_senderRef; - alterTableRef(signal, req, AlterTableRef::BackupInProgress); - - c_tableRecordPool.getPtr(tablePtr, alterTabPtr.p->m_tablePtrI); - releaseTableObject(tablePtr.i, false); - - c_opCreateTable.release(alterTabPtr); - c_blockState = BS_IDLE; - return; - } - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - alterTabPtr.p->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ; - SafeCounter safeCounter(c_counterMgr, - alterTabPtr.p->m_coordinatorData.m_counter); - safeCounter.init(rg, alterTabPtr.p->key); - - AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend(); - lreq->senderRef = reference(); - lreq->senderData = alterTabPtr.p->key; - lreq->clientRef = alterTabPtr.p->m_senderRef; - lreq->clientData = alterTabPtr.p->m_senderData; - lreq->changeMask = alterTabPtr.p->m_changeMask; - lreq->tableId = tablePtr.p->tableId; - lreq->tableVersion = alter_obj_inc_schema_version(tablePtr.p->tableVersion); - lreq->gci = tablePtr.p->gciTableCreated; - lreq->requestType = AlterTabReq::AlterTablePrepare; - - sendFragmentedSignal(rg, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); -} - -void Dbdict::alterTableRef(Signal * signal, - AlterTableReq * req, - AlterTableRef::ErrorCode errCode, - ParseDictTabInfoRecord* parseRecord) -{ - jam(); - releaseSections(signal); - AlterTableRef * ref = (AlterTableRef*)signal->getDataPtrSend(); - Uint32 senderRef = req->senderRef; - ref->senderData = req->senderData; - ref->senderRef = reference(); - ref->masterNodeId = c_masterNodeId; - if (parseRecord) { - ref->errorCode = parseRecord->errorCode; - ref->errorLine = parseRecord->errorLine; - ref->errorKey = parseRecord->errorKey; - ref->status = parseRecord->status; - } - else { - ref->errorCode = errCode; - ref->errorLine = 0; - ref->errorKey = 0; - ref->status = 0; - } - sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal, - AlterTableRef::SignalLength, JBB); -} - -void -Dbdict::execALTER_TAB_REQ(Signal * signal) -{ - // Received in all nodes to handle change locally - jamEntry(); - - if(!assembleFragments(signal)){ - return; - } - AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - const Uint32 changeMask = req->changeMask; - const Uint32 tableId = req->tableId; - const Uint32 tableVersion = req->tableVersion; - const Uint32 gci = req->gci; - AlterTabReq::RequestType requestType = - (AlterTabReq::RequestType) req->requestType; - - SegmentedSectionPtr tabInfoPtr; - signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); - - CreateTableRecordPtr alterTabPtr; // Reuse create table records - - if (senderRef != reference()) { - jam(); - c_blockState = BS_BUSY; - } - if ((requestType == AlterTabReq::AlterTablePrepare) - && (senderRef != reference())) { - jam(); - c_opCreateTable.seize(alterTabPtr); - if(!alterTabPtr.isNull()) - alterTabPtr.p->m_changeMask = changeMask; - } - else { - jam(); - ndbrequire(c_opCreateTable.find(alterTabPtr, senderData)); - } - if(alterTabPtr.isNull()){ - jam(); - alterTabRef(signal, req, AlterTableRef::Busy); - return; - } - - if (!check_ndb_versions()) - { - jam(); - alterTabRef(signal, req, AlterTableRef::IncompatibleVersions); - return; - } - - alterTabPtr.p->m_alterTableId = tableId; - alterTabPtr.p->m_coordinatorRef = senderRef; - - // Get table definition - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId, false); - if(tablePtr.isNull()){ - jam(); - alterTabRef(signal, req, AlterTableRef::NoSuchTable); - return; - } - - switch(requestType) { - case(AlterTabReq::AlterTablePrepare): { - ParseDictTabInfoRecord* aParseRecord; - - const TableRecord::TabState tabState = tablePtr.p->tabState; - bool ok = false; - switch(tabState){ - case TableRecord::NOT_DEFINED: - case TableRecord::DEFINING: - jam(); - alterTabRef(signal, req, AlterTableRef::NoSuchTable); - return; - case TableRecord::DEFINED: - ok = true; - jam(); - break; - case TableRecord::PREPARE_DROPPING: - case TableRecord::DROPPING: - jam(); - alterTabRef(signal, req, AlterTableRef::DropInProgress); - return; - case TableRecord::BACKUP_ONGOING: - jam(); - alterTabRef(signal, req, AlterTableRef::BackupInProgress); - return; - } - ndbrequire(ok); - - if(alter_obj_inc_schema_version(tablePtr.p->tableVersion) != tableVersion){ - jam(); - alterTabRef(signal, req, AlterTableRef::InvalidTableVersion); - return; - } - TableRecordPtr newTablePtr; - if (senderRef != reference()) { - jam(); - // Parse altered table defintion - ParseDictTabInfoRecord parseRecord; - aParseRecord = &parseRecord; - - parseRecord.requestType = DictTabInfo::AlterTableFromAPI; - parseRecord.errorCode = 0; - - SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool()); - - handleTabInfoInit(r, &parseRecord, false); // Will not save info - - if(parseRecord.errorCode != 0){ - jam(); - c_opCreateTable.release(alterTabPtr); - alterTabRef(signal, req, - (AlterTableRef::ErrorCode) parseRecord.errorCode, - aParseRecord); - return; - } - alterTabPtr.p->key = senderData; - c_opCreateTable.add(alterTabPtr); - alterTabPtr.p->m_errorCode = 0; - alterTabPtr.p->m_senderRef = senderRef; - alterTabPtr.p->m_senderData = senderData; - alterTabPtr.p->m_tablePtrI = parseRecord.tablePtr.i; - alterTabPtr.p->m_fragmentsPtrI = RNIL; - alterTabPtr.p->m_dihAddFragPtr = RNIL; - newTablePtr = parseRecord.tablePtr; - newTablePtr.p->tableVersion = tableVersion; - } - else { // (req->senderRef == reference()) - jam(); - c_tableRecordPool.getPtr(newTablePtr, alterTabPtr.p->m_tablePtrI); - newTablePtr.p->tableVersion = tableVersion; - } - if (handleAlterTab(req, alterTabPtr.p, tablePtr, newTablePtr) == -1) { - jam(); - c_opCreateTable.release(alterTabPtr); - alterTabRef(signal, req, AlterTableRef::UnsupportedChange); - return; - } - releaseSections(signal); - // Propagate alter table to other local blocks - AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = senderData; - req->changeMask = changeMask; - req->tableId = tableId; - req->tableVersion = tableVersion; - req->gci = gci; - req->requestType = requestType; - sendSignal(DBLQH_REF, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); - return; - } - case(AlterTabReq::AlterTableCommit): { - jam(); - // Write schema for altered table to disk - SegmentedSectionPtr tabInfoPtr; - signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); - alterTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i; - bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); - - signal->header.m_noOfSections = 0; - - // Update table record - tablePtr.p->packedSize = tabInfoPtr.sz; - tablePtr.p->tableVersion = tableVersion; - tablePtr.p->gciTableCreated = gci; - - SchemaFile::TableEntry tabEntry; - tabEntry.m_tableVersion = tableVersion; - tabEntry.m_tableType = tablePtr.p->tableType; - if (savetodisk) - tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED; - else - tabEntry.m_tableState = SchemaFile::TEMPORARY_TABLE_COMMITTED; - tabEntry.m_gcp = gci; - tabEntry.m_info_words = tabInfoPtr.sz; - memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused)); - - Callback callback; - callback.m_callbackData = senderData; - callback.m_callbackFunction = - safe_cast(&Dbdict::alterTab_writeSchemaConf); - - updateSchemaState(signal, tableId, &tabEntry, &callback, savetodisk); - break; - } - case(AlterTabReq::AlterTableRevert): { - jam(); - // Revert failed alter table - revertAlterTable(signal, changeMask, tableId, alterTabPtr.p); - // Acknowledge the reverted alter table - AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->changeMask = changeMask; - conf->tableId = tableId; - conf->tableVersion = tableVersion; - conf->gci = gci; - conf->requestType = requestType; - sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal, - AlterTabConf::SignalLength, JBB); - break; - } - default: ndbrequire(false); - } -} - -void Dbdict::alterTabRef(Signal * signal, - AlterTabReq * req, - AlterTableRef::ErrorCode errCode, - ParseDictTabInfoRecord* parseRecord) -{ - jam(); - releaseSections(signal); - AlterTabRef * ref = (AlterTabRef*)signal->getDataPtrSend(); - Uint32 senderRef = req->senderRef; - ref->senderData = req->senderData; - ref->senderRef = reference(); - if (parseRecord) { - jam(); - ref->errorCode = parseRecord->errorCode; - ref->errorLine = parseRecord->errorLine; - ref->errorKey = parseRecord->errorKey; - ref->errorStatus = parseRecord->status; - } - else { - jam(); - ref->errorCode = errCode; - ref->errorLine = 0; - ref->errorKey = 0; - ref->errorStatus = 0; - } - sendSignal(senderRef, GSN_ALTER_TAB_REF, signal, - AlterTabRef::SignalLength, JBB); - - c_blockState = BS_IDLE; -} - -void Dbdict::execALTER_TAB_REF(Signal * signal){ - jamEntry(); - - AlterTabRef * ref = (AlterTabRef*)signal->getDataPtr(); - - Uint32 senderRef = ref->senderRef; - Uint32 senderData = ref->senderData; - Uint32 errorCode = ref->errorCode; - Uint32 errorLine = ref->errorLine; - Uint32 errorKey = ref->errorKey; - Uint32 errorStatus = ref->errorStatus; - AlterTabReq::RequestType requestType = - (AlterTabReq::RequestType) ref->requestType; - CreateTableRecordPtr alterTabPtr; - ndbrequire(c_opCreateTable.find(alterTabPtr, senderData)); - Uint32 changeMask = alterTabPtr.p->m_changeMask; - SafeCounter safeCounter(c_counterMgr, alterTabPtr.p->m_coordinatorData.m_counter); - safeCounter.clearWaitingFor(refToNode(senderRef)); - switch (requestType) { - case(AlterTabReq::AlterTablePrepare): { - if (safeCounter.done()) { - jam(); - // Send revert request to all alive nodes - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, alterTabPtr.p->m_alterTableId); - Uint32 tableId = tablePtr.p->tableId; - Uint32 tableVersion = tablePtr.p->tableVersion; - Uint32 gci = tablePtr.p->gciTableCreated; - SimplePropertiesSectionWriter w(getSectionSegmentPool()); - packTableIntoPages(w, tablePtr); - SegmentedSectionPtr spDataPtr; - w.getPtr(spDataPtr); - signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - alterTabPtr.p->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ; - safeCounter.init(rg, alterTabPtr.p->key); - - AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend(); - lreq->senderRef = reference(); - lreq->senderData = alterTabPtr.p->key; - lreq->clientRef = alterTabPtr.p->m_senderRef; - lreq->clientData = alterTabPtr.p->m_senderData; - lreq->changeMask = changeMask; - lreq->tableId = tableId; - lreq->tableVersion = tableVersion; - lreq->gci = gci; - lreq->requestType = AlterTabReq::AlterTableRevert; - - sendSignal(rg, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); - } - else { - jam(); - alterTabPtr.p->m_alterTableFailed = true; - } - break; - } - case(AlterTabReq::AlterTableCommit): - jam(); - case(AlterTabReq::AlterTableRevert): { - AlterTableRef * apiRef = (AlterTableRef*)signal->getDataPtrSend(); - - apiRef->senderData = senderData; - apiRef->senderRef = reference(); - apiRef->masterNodeId = c_masterNodeId; - apiRef->errorCode = errorCode; - apiRef->errorLine = errorLine; - apiRef->errorKey = errorKey; - apiRef->status = errorStatus; - if (safeCounter.done()) { - jam(); - sendSignal(senderRef, GSN_ALTER_TABLE_REF, signal, - AlterTableRef::SignalLength, JBB); - c_blockState = BS_IDLE; - } - else { - jam(); - alterTabPtr.p->m_alterTableFailed = true; - alterTabPtr.p->m_alterTableRef = *apiRef; - } - break; - } - default: ndbrequire(false); - } -} - -void -Dbdict::execALTER_TAB_CONF(Signal * signal){ - jamEntry(); - AlterTabConf * const conf = (AlterTabConf*)signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - Uint32 senderData = conf->senderData; - Uint32 changeMask = conf->changeMask; - Uint32 tableId = conf->tableId; - Uint32 tableVersion = conf->tableVersion; - Uint32 gci = conf->gci; - AlterTabReq::RequestType requestType = - (AlterTabReq::RequestType) conf->requestType; - CreateTableRecordPtr alterTabPtr; - ndbrequire(c_opCreateTable.find(alterTabPtr, senderData)); - - switch (requestType) { - case(AlterTabReq::AlterTablePrepare): { - switch(refToBlock(signal->getSendersBlockRef())) { - case DBLQH: { - jam(); - AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = senderData; - req->changeMask = changeMask; - req->tableId = tableId; - req->tableVersion = tableVersion; - req->gci = gci; - req->requestType = requestType; - sendSignal(DBDIH_REF, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); - return; - } - case DBDIH: { - jam(); - AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = senderData; - req->changeMask = changeMask; - req->tableId = tableId; - req->tableVersion = tableVersion; - req->gci = gci; - req->requestType = requestType; - sendSignal(DBTC_REF, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); - return; - } - case DBTC: { - jam(); - // Participant is done with prepare phase, send conf to coordinator - AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->changeMask = changeMask; - conf->tableId = tableId; - conf->tableVersion = tableVersion; - conf->gci = gci; - conf->requestType = requestType; - sendSignal(alterTabPtr.p->m_coordinatorRef, GSN_ALTER_TAB_CONF, signal, - AlterTabConf::SignalLength, JBB); - return; - } - default :break; - } - // Coordinator only - SafeCounter safeCounter(c_counterMgr, alterTabPtr.p->m_coordinatorData.m_counter); - safeCounter.clearWaitingFor(refToNode(senderRef)); - if (safeCounter.done()) { - jam(); - // We have received all local confirmations - if (alterTabPtr.p->m_alterTableFailed) { - jam(); - // Send revert request to all alive nodes - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, alterTabPtr.p->m_alterTableId); - Uint32 tableId = tablePtr.p->tableId; - Uint32 tableVersion = tablePtr.p->tableVersion; - Uint32 gci = tablePtr.p->gciTableCreated; - SimplePropertiesSectionWriter w(getSectionSegmentPool()); - packTableIntoPages(w, tablePtr); - SegmentedSectionPtr spDataPtr; - w.getPtr(spDataPtr); - signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - alterTabPtr.p->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ; - safeCounter.init(rg, alterTabPtr.p->key); - - AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend(); - lreq->senderRef = reference(); - lreq->senderData = alterTabPtr.p->key; - lreq->clientRef = alterTabPtr.p->m_senderRef; - lreq->clientData = alterTabPtr.p->m_senderData; - lreq->changeMask = changeMask; - lreq->tableId = tableId; - lreq->tableVersion = tableVersion; - lreq->gci = gci; - lreq->requestType = AlterTabReq::AlterTableRevert; - - sendSignal(rg, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); - } - else { - jam(); - // Send commit request to all alive nodes - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - SimplePropertiesSectionWriter w(getSectionSegmentPool()); - packTableIntoPages(w, tablePtr); - SegmentedSectionPtr spDataPtr; - w.getPtr(spDataPtr); - signal->setSection(spDataPtr, AlterTabReq::DICT_TAB_INFO); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - alterTabPtr.p->m_coordinatorData.m_gsn = GSN_ALTER_TAB_REQ; - safeCounter.init(rg, alterTabPtr.p->key); - - AlterTabReq * const lreq = (AlterTabReq*)signal->getDataPtrSend(); - lreq->senderRef = reference(); - lreq->senderData = alterTabPtr.p->key; - lreq->clientRef = alterTabPtr.p->m_senderRef; - lreq->clientData = alterTabPtr.p->m_senderData; - lreq->changeMask = changeMask; - lreq->tableId = tableId; - lreq->tableVersion = tableVersion; - lreq->gci = gci; - lreq->requestType = AlterTabReq::AlterTableCommit; - - sendFragmentedSignal(rg, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength, JBB); - } - } - else { - // (!safeCounter.done()) - jam(); - } - break; - } - case(AlterTabReq::AlterTableRevert): - jam(); - case(AlterTabReq::AlterTableCommit): { - SafeCounter safeCounter(c_counterMgr, alterTabPtr.p->m_coordinatorData.m_counter); - safeCounter.clearWaitingFor(refToNode(senderRef)); - if (safeCounter.done()) { - jam(); - // We have received all local confirmations - releaseSections(signal); - if (alterTabPtr.p->m_alterTableFailed) { - jam(); - AlterTableRef * apiRef = - (AlterTableRef*)signal->getDataPtrSend(); - *apiRef = alterTabPtr.p->m_alterTableRef; - sendSignal(alterTabPtr.p->m_senderRef, GSN_ALTER_TABLE_REF, signal, - AlterTableRef::SignalLength, JBB); - } - else { - jam(); - // Alter table completed, inform API - AlterTableConf * const apiConf = - (AlterTableConf*)signal->getDataPtrSend(); - apiConf->senderRef = reference(); - apiConf->senderData = alterTabPtr.p->m_senderData; - apiConf->tableId = tableId; - apiConf->tableVersion = tableVersion; - - //@todo check api failed - sendSignal(alterTabPtr.p->m_senderRef, GSN_ALTER_TABLE_CONF, signal, - AlterTableConf::SignalLength, JBB); - } - - // Release resources - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, alterTabPtr.p->m_tablePtrI); - releaseTableObject(tabPtr.i, false); - releaseCreateTableOp(signal,alterTabPtr); - c_blockState = BS_IDLE; - } - else { - // (!safeCounter.done()) - jam(); - } - break; - } - default: ndbrequire(false); - } -} - -// For debugging -inline -void Dbdict::printTables() -{ - DLHashTable::Iterator iter; - bool moreTables = c_obj_hash.first(iter); - printf("OBJECTS IN DICT:\n"); - char name[MAX_TAB_NAME_SIZE]; - while (moreTables) { - Ptr tablePtr = iter.curr; - ConstRope r(c_rope_pool, tablePtr.p->m_name); - r.copy(name); - printf("%s ", name); - moreTables = c_obj_hash.next(iter); - } - printf("\n"); -} - -int Dbdict::handleAlterTab(AlterTabReq * req, - CreateTableRecord * alterTabPtrP, - TableRecordPtr origTablePtr, - TableRecordPtr newTablePtr) -{ - bool supportedAlteration = false; - Uint32 changeMask = req->changeMask; - - if (AlterTableReq::getNameFlag(changeMask)) { - jam(); - // Table rename - supportedAlteration = true; - // Remove from hashtable - Ptr obj_ptr; - c_obj_pool.getPtr(obj_ptr, origTablePtr.p->m_obj_ptr_i); - c_obj_hash.remove(obj_ptr); - { - Rope org(c_rope_pool, origTablePtr.p->tableName); - org.copy(alterTabPtrP->previousTableName); - - ConstRope src(c_rope_pool, newTablePtr.p->tableName); - char tmp[MAX_TAB_NAME_SIZE]; - const int len = src.size(); - src.copy(tmp); - ndbrequire(org.assign(tmp, len)); - } - obj_ptr.p->m_name = origTablePtr.p->tableName; - // Put it back - c_obj_hash.add(obj_ptr); - } - - if (AlterTableReq::getFrmFlag(changeMask)) { - // Table definition changed (new frm) - supportedAlteration = true; - // Save old definition - Rope org(c_rope_pool, origTablePtr.p->frmData); - org.copy(alterTabPtrP->previousFrmData); - alterTabPtrP->previousFrmLen = org.size(); - - // Set new definition - ConstRope src(c_rope_pool, newTablePtr.p->frmData); - char tmp[MAX_FRM_DATA_SIZE]; - src.copy(tmp); - ndbrequire(org.assign(tmp, src.size())); - } - -/* - TODO RONM: Lite ny kod för FragmentData och RangeOrListData -*/ - if (supportedAlteration) - { - // Set new schema version - origTablePtr.p->tableVersion = newTablePtr.p->tableVersion; - return 0; - } - else - { - jam(); - return -1; - } -} - -void Dbdict::revertAlterTable(Signal * signal, - Uint32 changeMask, - Uint32 tableId, - CreateTableRecord * alterTabPtrP) -{ - bool supportedAlteration = false; - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - - if (AlterTableReq::getNameFlag(changeMask)) { - jam(); - // Table rename - supportedAlteration = true; - // Restore previous name - - Ptr obj_ptr; - c_obj_pool.getPtr(obj_ptr, tablePtr.p->m_obj_ptr_i); - c_obj_hash.remove(obj_ptr); - - { - // Restore name - Rope org(c_rope_pool, tablePtr.p->tableName); - ndbrequire(org.assign(alterTabPtrP->previousTableName)); - } - obj_ptr.p->m_name = tablePtr.p->tableName; - // Put it back - c_obj_hash.add(obj_ptr); - } - - if (AlterTableReq::getFrmFlag(changeMask)) - { - jam(); - // Table redefinition - supportedAlteration = true; - // Restore previous frm - Rope org(c_rope_pool, tablePtr.p->tableName); - ndbrequire(org.assign(alterTabPtrP->previousFrmData, - alterTabPtrP->previousFrmLen)); - - } - - - if (supportedAlteration) - { - tablePtr.p->tableVersion = - alter_obj_dec_schema_version(tablePtr.p->tableVersion); - return; - } - - ndbrequire(false); -} - -void -Dbdict::alterTab_writeSchemaConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - Uint32 key = callbackData; - CreateTableRecordPtr alterTabPtr; - ndbrequire(c_opCreateTable.find(alterTabPtr, key)); - Uint32 tableId = alterTabPtr.p->m_alterTableId; - - Callback callback; - callback.m_callbackData = alterTabPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::alterTab_writeTableConf); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); - if (savetodisk) - { - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); - writeTableFile(signal, tableId, tabInfoPtr, &callback); - } - else - { - execute(signal, callback, 0); - } -} - -void -Dbdict::alterTab_writeTableConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - CreateTableRecordPtr alterTabPtr; - ndbrequire(c_opCreateTable.find(alterTabPtr, callbackData)); - Uint32 coordinatorRef = alterTabPtr.p->m_coordinatorRef; - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, alterTabPtr.p->m_alterTableId); - // Alter table commit request handled successfully - // Inform Suma so it can send events to any subscribers of the table - AlterTabReq * req = (AlterTabReq*)signal->getDataPtrSend(); - if (coordinatorRef == reference()) - req->senderRef = alterTabPtr.p->m_senderRef; - else - req->senderRef = 0; - req->senderData = callbackData; - req->tableId = tabPtr.p->tableId; - req->tableVersion = tabPtr.p->tableVersion; - req->gci = tabPtr.p->gciTableCreated; - req->requestType = AlterTabReq::AlterTableCommit; - req->changeMask = alterTabPtr.p->m_changeMask; - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); - signal->setSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); - EXECUTE_DIRECT(SUMA, GSN_ALTER_TAB_REQ, signal, - AlterTabReq::SignalLength); - releaseSections(signal); - alterTabPtr.p->m_tabInfoPtrI = RNIL; - jamEntry(); - AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = callbackData; - conf->tableId = tabPtr.p->tableId; - conf->tableVersion = tabPtr.p->tableVersion; - conf->gci = tabPtr.p->gciTableCreated; - conf->requestType = AlterTabReq::AlterTableCommit; - conf->changeMask = alterTabPtr.p->m_changeMask; - sendSignal(coordinatorRef, GSN_ALTER_TAB_CONF, signal, - AlterTabConf::SignalLength, JBB); - - - { - ApiBroadcastRep* api= (ApiBroadcastRep*)signal->getDataPtrSend(); - api->gsn = GSN_ALTER_TABLE_REP; - api->minVersion = MAKE_VERSION(4,1,15); - - AlterTableRep* rep = (AlterTableRep*)api->theData; - rep->tableId = tabPtr.p->tableId; - rep->tableVersion = alter_obj_dec_schema_version(tabPtr.p->tableVersion); - rep->changeType = AlterTableRep::CT_ALTERED; - - LinearSectionPtr ptr[3]; - ptr[0].p = (Uint32*)alterTabPtr.p->previousTableName; - ptr[0].sz = (sizeof(alterTabPtr.p->previousTableName) + 3) >> 2; - - sendSignal(QMGR_REF, GSN_API_BROADCAST_REP, signal, - ApiBroadcastRep::SignalLength + AlterTableRep::SignalLength, - JBB, ptr,1); - } - - if(coordinatorRef != reference()) { - jam(); - // Release resources - c_tableRecordPool.getPtr(tabPtr, alterTabPtr.p->m_tablePtrI); - releaseTableObject(tabPtr.i, false); - releaseCreateTableOp(signal,alterTabPtr); - c_blockState = BS_IDLE; - } -} - -void -Dbdict::execCREATE_FRAGMENTATION_REF(Signal * signal){ - jamEntry(); - const Uint32 * theData = signal->getDataPtr(); - CreateFragmentationRef * const ref = (CreateFragmentationRef*)theData; - (void)ref; - ndbrequire(false); -} - -void -Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){ - jamEntry(); - const Uint32 * theData = signal->getDataPtr(); - CreateFragmentationConf * const conf = (CreateFragmentationConf*)theData; - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData)); - - ndbrequire(signal->getNoOfSections() == 1); - - SegmentedSectionPtr fragDataPtr; - signal->getSection(fragDataPtr, CreateFragmentationConf::FRAGMENTS); - signal->header.m_noOfSections = 0; - - /** - * Get table - */ - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - - /** - * Save fragment count - */ - tabPtr.p->fragmentCount = conf->noOfFragments; - - /** - * Update table version - */ - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry * tabEntry = getTableEntry(xsf, tabPtr.i); - - tabPtr.p->tableVersion = - create_obj_inc_schema_version(tabEntry->m_tableVersion); - - /** - * Pack - */ - SimplePropertiesSectionWriter w(getSectionSegmentPool()); - packTableIntoPages(w, tabPtr); - - SegmentedSectionPtr spDataPtr; - w.getPtr(spDataPtr); - - signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO); - signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter); - createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ; - createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTablePrepare; - tmp.init(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key); - - CreateTabReq * const req = (CreateTabReq*)theData; - req->senderRef = reference(); - req->senderData = createTabPtr.p->key; - req->clientRef = createTabPtr.p->m_senderRef; - req->clientData = createTabPtr.p->m_senderData; - req->requestType = CreateTabReq::CreateTablePrepare; - - req->gci = 0; - req->tableId = tabPtr.i; - req->tableVersion = create_obj_inc_schema_version(tabEntry->m_tableVersion); - - sendFragmentedSignal(rg, GSN_CREATE_TAB_REQ, signal, - CreateTabReq::SignalLength, JBB); - - return; -} - -void -Dbdict::execCREATE_TAB_REF(Signal* signal){ - jamEntry(); - - CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData)); - - ndbrequire(createTabPtr.p->m_coordinatorRef == reference()); - ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ); - - if(ref->errorCode != CreateTabRef::NF_FakeErrorREF){ - createTabPtr.p->setErrorCode(ref->errorCode); - } - createTab_reply(signal, createTabPtr, refToNode(ref->senderRef)); -} - -void -Dbdict::execCREATE_TAB_CONF(Signal* signal){ - jamEntry(); - - ndbrequire(signal->getNoOfSections() == 0); - - CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData)); - - ndbrequire(createTabPtr.p->m_coordinatorRef == reference()); - ndbrequire(createTabPtr.p->m_coordinatorData.m_gsn == GSN_CREATE_TAB_REQ); - - createTab_reply(signal, createTabPtr, refToNode(conf->senderRef)); -} - -void -Dbdict::createTab_reply(Signal* signal, - CreateTableRecordPtr createTabPtr, - Uint32 nodeId) -{ - - SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter); - if(!tmp.clearWaitingFor(nodeId)){ - jam(); - return; - } - - switch(createTabPtr.p->m_coordinatorData.m_requestType){ - case CreateTabReq::CreateTablePrepare:{ - - if(createTabPtr.p->m_errorCode != 0){ - jam(); - /** - * Failed to prepare on atleast one node -> abort on all - */ - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ; - createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableDrop; - ndbrequire(tmp.init(rg, createTabPtr.p->key)); - - CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = createTabPtr.p->key; - req->requestType = CreateTabReq::CreateTableDrop; - - sendSignal(rg, GSN_CREATE_TAB_REQ, signal, - CreateTabReq::SignalLength, JBB); - return; - } - - /** - * Lock mutex before commiting table - */ - Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex); - Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_locked), - createTabPtr.p->key}; - - ndbrequire(mutex.lock(c)); - return; - } - case CreateTabReq::CreateTableCommit:{ - jam(); - ndbrequire(createTabPtr.p->m_errorCode == 0); - - /** - * Unlock mutex before commiting table - */ - Mutex mutex(signal, c_mutexMgr, createTabPtr.p->m_startLcpMutex); - Callback c = { safe_cast(&Dbdict::createTab_startLcpMutex_unlocked), - createTabPtr.p->key}; - mutex.unlock(c); - return; - } - case CreateTabReq::CreateTableDrop:{ - jam(); - CreateTableRef * const ref = (CreateTableRef*)signal->getDataPtr(); - ref->senderRef = reference(); - ref->senderData = createTabPtr.p->m_senderData; - ref->errorCode = createTabPtr.p->m_errorCode; - ref->masterNodeId = c_masterNodeId; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = 0; - - //@todo check api failed - sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal, - CreateTableRef::SignalLength, JBB); - releaseCreateTableOp(signal,createTabPtr); - c_blockState = BS_IDLE; - return; - } - } - ndbrequire(false); -} - -void -Dbdict::createTab_startLcpMutex_locked(Signal* signal, - Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ; - createTabPtr.p->m_coordinatorData.m_requestType = CreateTabReq::CreateTableCommit; - SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter); - tmp.init(rg, GSN_CREATE_TAB_REF, createTabPtr.p->key); - - CreateTabReq * const req = (CreateTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = createTabPtr.p->key; - req->requestType = CreateTabReq::CreateTableCommit; - - sendSignal(rg, GSN_CREATE_TAB_REQ, signal, - CreateTabReq::SignalLength, JBB); -} - -void -Dbdict::createTab_startLcpMutex_unlocked(Signal* signal, - Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - createTabPtr.p->m_startLcpMutex.release(c_mutexMgr); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - - CreateTableConf * const conf = (CreateTableConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createTabPtr.p->m_senderData; - conf->tableId = createTabPtr.p->m_tablePtrI; - conf->tableVersion = tabPtr.p->tableVersion; - - //@todo check api failed - sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_CONF, signal, - CreateTableConf::SignalLength, JBB); - releaseCreateTableOp(signal,createTabPtr); - c_blockState = BS_IDLE; - return; -} - -/*********************************************************** - * CreateTable participant code - **********************************************************/ -void -Dbdict::execCREATE_TAB_REQ(Signal* signal){ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - CreateTabReq * const req = (CreateTabReq*)signal->getDataPtr(); - - CreateTabReq::RequestType rt = (CreateTabReq::RequestType)req->requestType; - switch(rt){ - case CreateTabReq::CreateTablePrepare: - CRASH_INSERTION2(6003, getOwnNodeId() != c_masterNodeId); - createTab_prepare(signal, req); - return; - case CreateTabReq::CreateTableCommit: - CRASH_INSERTION2(6004, getOwnNodeId() != c_masterNodeId); - createTab_commit(signal, req); - return; - case CreateTabReq::CreateTableDrop: - CRASH_INSERTION2(6005, getOwnNodeId() != c_masterNodeId); - createTab_drop(signal, req); - return; - } - ndbrequire(false); -} - -void -Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){ - - const Uint32 gci = req->gci; - const Uint32 tableId = req->tableId; - const Uint32 tableVersion = req->tableVersion; - - SegmentedSectionPtr tabInfoPtr; - signal->getSection(tabInfoPtr, CreateTabReq::DICT_TAB_INFO); - - CreateTableRecordPtr createTabPtr; - if(req->senderRef == reference()){ - jam(); - ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData)); - } else { - jam(); - c_opCreateTable.seize(createTabPtr); - - ndbrequire(!createTabPtr.isNull()); - - createTabPtr.p->key = req->senderData; - c_opCreateTable.add(createTabPtr); - createTabPtr.p->m_errorCode = 0; - createTabPtr.p->m_tablePtrI = tableId; - createTabPtr.p->m_coordinatorRef = req->senderRef; - createTabPtr.p->m_senderRef = req->clientRef; - createTabPtr.p->m_senderData = req->clientData; - createTabPtr.p->m_dihAddFragPtr = RNIL; - - /** - * Put data into table record - */ - ParseDictTabInfoRecord parseRecord; - parseRecord.requestType = DictTabInfo::AddTableFromDict; - parseRecord.errorCode = 0; - - SimplePropertiesSectionReader r(tabInfoPtr, getSectionSegmentPool()); - - handleTabInfoInit(r, &parseRecord); - - ndbrequire(parseRecord.errorCode == 0); - } - - ndbrequire(!createTabPtr.isNull()); - - SegmentedSectionPtr fragPtr; - signal->getSection(fragPtr, CreateTabReq::FRAGMENTATION); - - createTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i; - createTabPtr.p->m_fragmentsPtrI = fragPtr.i; - - signal->header.m_noOfSections = 0; - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, tableId); - tabPtr.p->packedSize = tabInfoPtr.sz; - tabPtr.p->tableVersion = tableVersion; - tabPtr.p->gciTableCreated = gci; - - SchemaFile::TableEntry tabEntry; - tabEntry.m_tableVersion = tableVersion; - tabEntry.m_tableType = tabPtr.p->tableType; - tabEntry.m_tableState = SchemaFile::ADD_STARTED; - tabEntry.m_gcp = gci; - tabEntry.m_info_words = tabInfoPtr.sz; - memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused)); - - Callback callback; - callback.m_callbackData = createTabPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::createTab_writeSchemaConf1); - - bool savetodisk = !(tabPtr.p->m_bits & TableRecord::TR_Temporary); - updateSchemaState(signal, tableId, &tabEntry, &callback, savetodisk); -} - -void getSection(SegmentedSectionPtr & ptr, Uint32 i); - -void -Dbdict::createTab_writeSchemaConf1(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - Callback callback; - callback.m_callbackData = createTabPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::createTab_writeTableConf); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - bool savetodisk = !(tabPtr.p->m_bits & TableRecord::TR_Temporary); - if (savetodisk) - { - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI); - writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback); - } - else - { - execute(signal, callback, 0); - } -#if 0 - createTabPtr.p->m_tabInfoPtrI = RNIL; - signal->setSection(tabInfoPtr, 0); - releaseSections(signal); -#endif -} - -void -Dbdict::createTab_writeTableConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - SegmentedSectionPtr fragDataPtr; - getSection(fragDataPtr, createTabPtr.p->m_fragmentsPtrI); - - Callback callback; - callback.m_callbackData = callbackData; - callback.m_callbackFunction = - safe_cast(&Dbdict::createTab_dihComplete); - - createTab_dih(signal, createTabPtr, fragDataPtr, &callback); -} - -void -Dbdict::createTab_dih(Signal* signal, - CreateTableRecordPtr createTabPtr, - SegmentedSectionPtr fragDataPtr, - Callback * c){ - jam(); - - createTabPtr.p->m_callback = * c; - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - - DiAddTabReq * req = (DiAddTabReq*)signal->getDataPtrSend(); - req->connectPtr = createTabPtr.p->key; - req->tableId = tabPtr.i; - req->fragType = tabPtr.p->fragmentType; - req->kValue = tabPtr.p->kValue; - req->noOfReplicas = 0; - req->loggedTable = !!(tabPtr.p->m_bits & TableRecord::TR_Logged); - req->tableType = tabPtr.p->tableType; - req->schemaVersion = tabPtr.p->tableVersion; - req->primaryTableId = tabPtr.p->primaryTableId; - req->temporaryTable = !!(tabPtr.p->m_bits & TableRecord::TR_Temporary); - -/* - Behöver fiska upp fragDataPtr från table object istället -*/ - if(!fragDataPtr.isNull()){ - signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); - } - - sendSignal(DBDIH_REF, GSN_DIADDTABREQ, signal, - DiAddTabReq::SignalLength, JBB); - - /** - * Create KeyDescriptor - */ - KeyDescriptor* desc= g_key_descriptor_pool.getPtr(tabPtr.i); - new (desc) KeyDescriptor(); - - Uint32 key = 0; - Ptr attrPtr; - LocalDLFifoList list(c_attributeRecordPool, - tabPtr.p->m_attributes); - for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)) - { - AttributeRecord* aRec = attrPtr.p; - if (aRec->tupleKey) - { - Uint32 attr = aRec->attributeDescriptor; - - desc->noOfKeyAttr ++; - desc->keyAttr[key].attributeDescriptor = attr; - Uint32 csNumber = (aRec->extPrecision >> 16); - if (csNumber) - { - desc->keyAttr[key].charsetInfo = all_charsets[csNumber]; - ndbrequire(all_charsets[csNumber] != 0); - desc->hasCharAttr = 1; - } - else - { - desc->keyAttr[key].charsetInfo = 0; - } - if (AttributeDescriptor::getDKey(attr)) - { - desc->noOfDistrKeys ++; - } - if (AttributeDescriptor::getArrayType(attr) != NDB_ARRAYTYPE_FIXED) - { - desc->noOfVarKeys ++; - } - key++; - } - } - ndbrequire(key == tabPtr.p->noOfPrimkey); -} - -static -void -calcLHbits(Uint32 * lhPageBits, Uint32 * lhDistrBits, - Uint32 fid, Uint32 totalFragments) -{ - Uint32 distrBits = 0; - Uint32 pageBits = 0; - - Uint32 tmp = 1; - while (tmp < totalFragments) { - jam(); - tmp <<= 1; - distrBits++; - }//while -#ifdef ndb_classical_lhdistrbits - if (tmp != totalFragments) { - tmp >>= 1; - if ((fid >= (totalFragments - tmp)) && (fid < (tmp - 1))) { - distrBits--; - }//if - }//if -#endif - * lhPageBits = pageBits; - * lhDistrBits = distrBits; - -}//calcLHbits() - - -void -Dbdict::execADD_FRAGREQ(Signal* signal) { - jamEntry(); - - AddFragReq * const req = (AddFragReq*)signal->getDataPtr(); - - Uint32 dihPtr = req->dihPtr; - Uint32 senderData = req->senderData; - Uint32 tableId = req->tableId; - Uint32 fragId = req->fragmentId; - Uint32 node = req->nodeId; - Uint32 lcpNo = req->nextLCP; - Uint32 fragCount = req->totalFragments; - Uint32 requestInfo = req->requestInfo; - Uint32 startGci = req->startGci; - Uint32 logPart = req->logPartId; - - ndbrequire(node == getOwnNodeId()); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, senderData)); - - createTabPtr.p->m_dihAddFragPtr = dihPtr; - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, tableId); - -#if 0 - tabPtr.p->gciTableCreated = (startGci > tabPtr.p->gciTableCreated ? startGci: - startGci > tabPtr.p->gciTableCreated); -#endif - - /** - * Calc lh3PageBits - */ - Uint32 lhDistrBits = 0; - Uint32 lhPageBits = 0; - ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount); - - Uint64 maxRows = tabPtr.p->maxRowsLow + - (((Uint64)tabPtr.p->maxRowsHigh) << 32); - Uint64 minRows = tabPtr.p->minRowsLow + - (((Uint64)tabPtr.p->minRowsHigh) << 32); - maxRows = (maxRows + fragCount - 1) / fragCount; - minRows = (minRows + fragCount - 1) / fragCount; - - { - LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend(); - req->senderData = senderData; - req->senderRef = reference(); - req->fragmentId = fragId; - req->requestInfo = requestInfo; - req->tableId = tableId; - req->localKeyLength = tabPtr.p->localKeyLen; - req->maxLoadFactor = tabPtr.p->maxLoadFactor; - req->minLoadFactor = tabPtr.p->minLoadFactor; - req->kValue = tabPtr.p->kValue; - req->lh3DistrBits = 0; //lhDistrBits; - req->lh3PageBits = 0; //lhPageBits; - req->noOfAttributes = tabPtr.p->noOfAttributes; - req->noOfNullAttributes = tabPtr.p->noOfNullBits; - req->maxRowsLow = maxRows & 0xFFFFFFFF; - req->maxRowsHigh = maxRows >> 32; - req->minRowsLow = minRows & 0xFFFFFFFF; - req->minRowsHigh = minRows >> 32; - req->schemaVersion = tabPtr.p->tableVersion; - Uint32 keyLen = tabPtr.p->tupKeyLength; - req->keyLength = keyLen; // wl-2066 no more "long keys" - req->nextLCP = lcpNo; - - req->noOfKeyAttr = tabPtr.p->noOfPrimkey; - req->noOfCharsets = tabPtr.p->noOfCharsets; - req->checksumIndicator = 1; - req->GCPIndicator = 1; - req->startGci = startGci; - req->tableType = tabPtr.p->tableType; - req->primaryTableId = tabPtr.p->primaryTableId; - req->tablespace_id= tabPtr.p->m_tablespace_id; - req->logPartId = logPart; - req->forceVarPartFlag = !!(tabPtr.p->m_bits& TableRecord::TR_ForceVarPart); - sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal, - LqhFragReq::SignalLength, JBB); - } -} - -void -Dbdict::execLQHFRAGREF(Signal * signal){ - jamEntry(); - LqhFragRef * const ref = (LqhFragRef*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData)); - - createTabPtr.p->setErrorCode(ref->errorCode); - - { - AddFragRef * const ref = (AddFragRef*)signal->getDataPtr(); - ref->dihPtr = createTabPtr.p->m_dihAddFragPtr; - sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal, - AddFragRef::SignalLength, JBB); - } -} - -void -Dbdict::execLQHFRAGCONF(Signal * signal){ - jamEntry(); - LqhFragConf * const conf = (LqhFragConf*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData)); - - createTabPtr.p->m_lqhFragPtr = conf->lqhFragPtr; - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - sendLQHADDATTRREQ(signal, createTabPtr, tabPtr.p->m_attributes.firstItem); -} - -void -Dbdict::sendLQHADDATTRREQ(Signal* signal, - CreateTableRecordPtr createTabPtr, - Uint32 attributePtrI){ - jam(); - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtrSend(); - Uint32 i = 0; - for(i = 0; iattributes[i]; - entry.attrId = attrPtr.p->attributeId; - entry.attrDescriptor = attrPtr.p->attributeDescriptor; - entry.extTypeInfo = 0; - // charset number passed to TUP, TUX in upper half - entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF); - if (tabPtr.p->isIndex()) { - Uint32 primaryAttrId; - if (attrPtr.p->nextList != RNIL) { - getIndexAttr(tabPtr, attributePtrI, &primaryAttrId); - } else { - primaryAttrId = ZNIL; - if (tabPtr.p->isOrderedIndex()) - entry.attrId = 0; // attribute goes to TUP - } - entry.attrId |= (primaryAttrId << 16); - } - attributePtrI = attrPtr.p->nextList; - } - req->lqhFragPtr = createTabPtr.p->m_lqhFragPtr; - req->senderData = createTabPtr.p->key; - req->senderAttrPtr = attributePtrI; - req->noOfAttributes = i; - - sendSignal(DBLQH_REF, GSN_LQHADDATTREQ, signal, - LqhAddAttrReq::HeaderLength + LqhAddAttrReq::EntryLength * i, JBB); -} - -void -Dbdict::execLQHADDATTREF(Signal * signal){ - jamEntry(); - LqhAddAttrRef * const ref = (LqhAddAttrRef*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData)); - - createTabPtr.p->setErrorCode(ref->errorCode); - - { - AddFragRef * const ref = (AddFragRef*)signal->getDataPtr(); - ref->dihPtr = createTabPtr.p->m_dihAddFragPtr; - sendSignal(DBDIH_REF, GSN_ADD_FRAGREF, signal, - AddFragRef::SignalLength, JBB); - } - -} - -void -Dbdict::execLQHADDATTCONF(Signal * signal){ - jamEntry(); - LqhAddAttrConf * const conf = (LqhAddAttrConf*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData)); - - const Uint32 fragId = conf->fragId; - const Uint32 nextAttrPtr = conf->senderAttrPtr; - if(nextAttrPtr != RNIL){ - jam(); - sendLQHADDATTRREQ(signal, createTabPtr, nextAttrPtr); - return; - } - - { - AddFragConf * const conf = (AddFragConf*)signal->getDataPtr(); - conf->dihPtr = createTabPtr.p->m_dihAddFragPtr; - conf->fragId = fragId; - sendSignal(DBDIH_REF, GSN_ADD_FRAGCONF, signal, - AddFragConf::SignalLength, JBB); - } -} - -void -Dbdict::execDIADDTABREF(Signal* signal){ - jam(); - - DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, ref->senderData)); - - createTabPtr.p->setErrorCode(ref->errorCode); - execute(signal, createTabPtr.p->m_callback, 0); -} - -void -Dbdict::execDIADDTABCONF(Signal* signal){ - jam(); - - DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, conf->senderData)); - - signal->theData[0] = createTabPtr.p->key; - signal->theData[1] = reference(); - signal->theData[2] = createTabPtr.p->m_tablePtrI; - - if(createTabPtr.p->m_dihAddFragPtr != RNIL){ - jam(); - - /** - * We did perform at least one LQHFRAGREQ - */ - sendSignal(DBLQH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB); - return; - } else { - /** - * No local fragment (i.e. no LQHFRAGREQ) - */ - execute(signal, createTabPtr.p->m_callback, 0); - return; - //sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB); - } -} - -void -Dbdict::execTAB_COMMITREF(Signal* signal) { - jamEntry(); - ndbrequire(false); -}//execTAB_COMMITREF() - -void -Dbdict::execTAB_COMMITCONF(Signal* signal){ - jamEntry(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[0])); - - if(refToBlock(signal->getSendersBlockRef()) == DBLQH){ - - execute(signal, createTabPtr.p->m_callback, 0); - return; - } - - if(refToBlock(signal->getSendersBlockRef()) == DBDIH){ - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - - signal->theData[0] = tabPtr.i; - signal->theData[1] = tabPtr.p->tableVersion; - signal->theData[2] = (Uint32)!!(tabPtr.p->m_bits & TableRecord::TR_Logged); - signal->theData[3] = reference(); - signal->theData[4] = (Uint32)tabPtr.p->tableType; - signal->theData[5] = createTabPtr.p->key; - signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey; - signal->theData[7] = (Uint32)tabPtr.p->singleUserMode; - - sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 8, JBB); - return; - } - - ndbrequire(false); -} - -void -Dbdict::createTab_dihComplete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - //@todo check for master failed - - if(createTabPtr.p->m_errorCode == 0){ - jam(); - - CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createTabPtr.p->key; - sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF, - signal, CreateTabConf::SignalLength, JBB); - return; - } - - CreateTabRef * const ref = (CreateTabRef*)signal->getDataPtr(); - ref->senderRef = reference(); - ref->senderData = createTabPtr.p->key; - ref->errorCode = createTabPtr.p->m_errorCode; - ref->errorLine = 0; - ref->errorKey = 0; - ref->errorStatus = 0; - - sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_REF, - signal, CreateTabRef::SignalLength, JBB); -} - -void -Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, req->senderData)); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - bool savetodisk = !(tabPtr.p->m_bits & TableRecord::TR_Temporary); - - SchemaFile::TableEntry tabEntry; - tabEntry.m_tableVersion = tabPtr.p->tableVersion; - tabEntry.m_tableType = tabPtr.p->tableType; - if (savetodisk) - tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED; - else - tabEntry.m_tableState = SchemaFile::TEMPORARY_TABLE_COMMITTED; - - tabEntry.m_gcp = tabPtr.p->gciTableCreated; - tabEntry.m_info_words = tabPtr.p->packedSize; - memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused)); - - Callback callback; - callback.m_callbackData = createTabPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::createTab_writeSchemaConf2); - - updateSchemaState(signal, tabPtr.i, &tabEntry, &callback, savetodisk); -} - -void -Dbdict::createTab_writeSchemaConf2(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - Callback c; - c.m_callbackData = callbackData; - c.m_callbackFunction = safe_cast(&Dbdict::createTab_alterComplete); - alterTab_activate(signal, createTabPtr, &c); -} - -void -Dbdict::createTab_alterComplete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - tabPtr.p->tabState = TableRecord::DEFINED; - - //@todo check error - //@todo check master failed - - CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createTabPtr.p->key; - { - CreateTabConf tmp= *conf; - conf->senderData = createTabPtr.p->m_tablePtrI; -#if 0 - signal->header.m_noOfSections = 1; - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI); - signal->setSection(tabInfoPtr, 0); -#endif - sendSignal(SUMA_REF, GSN_CREATE_TAB_CONF, signal, - CreateTabConf::SignalLength, JBB); - *conf= tmp; -#if 0 - signal->header.m_noOfSections = 0; -#endif - } - sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF, - signal, CreateTabConf::SignalLength, JBB); - - if(createTabPtr.p->m_coordinatorRef != reference()){ - jam(); - releaseCreateTableOp(signal,createTabPtr); - } -} - -void -Dbdict::createTab_drop(Signal* signal, CreateTabReq * req){ - jam(); - - const Uint32 key = req->senderData; - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, key)); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - tabPtr.p->tabState = TableRecord::DROPPING; - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.seize(dropTabPtr)); - - dropTabPtr.p->key = key; - c_opDropTable.add(dropTabPtr); - - dropTabPtr.p->m_errorCode = 0; - dropTabPtr.p->m_request.tableId = createTabPtr.p->m_tablePtrI; - dropTabPtr.p->m_requestType = DropTabReq::CreateTabDrop; - dropTabPtr.p->m_coordinatorRef = createTabPtr.p->m_coordinatorRef; - dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ; - - dropTabPtr.p->m_participantData.m_block = 0; - dropTabPtr.p->m_participantData.m_callback.m_callbackData = req->senderData; - dropTabPtr.p->m_participantData.m_callback.m_callbackFunction = - safe_cast(&Dbdict::createTab_dropComplete); - dropTab_nextStep(signal, dropTabPtr); - - if (tabPtr.p->m_tablespace_id != RNIL) - { - FilegroupPtr ptr; - ndbrequire(c_filegroup_hash.find(ptr, tabPtr.p->m_tablespace_id)); - decrease_ref_count(ptr.p->m_obj_ptr_i); - } -} - -void -Dbdict::createTab_dropComplete(Signal* signal, - Uint32 callbackData, - Uint32 returnCode){ - jam(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, callbackData)); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, callbackData)); - - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - - releaseTableObject(tabPtr.i); - - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tabPtr.i); - tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED; - - //@todo check error - //@todo check master failed - - CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createTabPtr.p->key; - sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF, - signal, CreateTabConf::SignalLength, JBB); - - if(createTabPtr.p->m_coordinatorRef != reference()){ - jam(); - releaseCreateTableOp(signal,createTabPtr); - } - - c_opDropTable.release(dropTabPtr); -} - -void -Dbdict::alterTab_activate(Signal* signal, CreateTableRecordPtr createTabPtr, - Callback * c){ - - createTabPtr.p->m_callback = * c; - - signal->theData[0] = createTabPtr.p->key; - signal->theData[1] = reference(); - signal->theData[2] = createTabPtr.p->m_tablePtrI; - sendSignal(DBDIH_REF, GSN_TAB_COMMITREQ, signal, 3, JBB); -} - -void -Dbdict::execTC_SCHVERCONF(Signal* signal){ - jamEntry(); - - CreateTableRecordPtr createTabPtr; - ndbrequire(c_opCreateTable.find(createTabPtr, signal->theData[1])); - - execute(signal, createTabPtr.p->m_callback, 0); -} - -#define tabRequire(cond, error) \ - if (!(cond)) { \ - jam(); \ - parseP->errorCode = error; parseP->errorLine = __LINE__; \ - parseP->errorKey = it.getKey(); \ - return; \ - }//if - -// handleAddTableFailure(signal, __LINE__, allocatedTable); - -Dbdict::DictObject * -Dbdict::get_object(const char * name, Uint32 len, Uint32 hash){ - DictObject key; - key.m_key.m_name_ptr = name; - key.m_key.m_name_len = len; - key.m_key.m_pool = &c_rope_pool; - key.m_name.m_hash = hash; - Ptr old_ptr; - c_obj_hash.find(old_ptr, key); - return old_ptr.p; -} - -void -Dbdict::release_object(Uint32 obj_ptr_i, DictObject* obj_ptr_p){ - Rope name(c_rope_pool, obj_ptr_p->m_name); - name.erase(); - - Ptr ptr = { obj_ptr_p, obj_ptr_i }; - c_obj_hash.release(ptr); -} - -void -Dbdict::increase_ref_count(Uint32 obj_ptr_i) -{ - DictObject* ptr = c_obj_pool.getPtr(obj_ptr_i); - ptr->m_ref_count++; -} - -void -Dbdict::decrease_ref_count(Uint32 obj_ptr_i) -{ - DictObject* ptr = c_obj_pool.getPtr(obj_ptr_i); - ndbrequire(ptr->m_ref_count); - ptr->m_ref_count--; -} - -void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, - ParseDictTabInfoRecord * parseP, - bool checkExist) -{ -/* ---------------------------------------------------------------- */ -// We always start by handling table name since this must be the first -// item in the list. Through the table name we can derive if it is a -// correct name, a new name or an already existing table. -/* ---------------------------------------------------------------- */ - - it.first(); - - SimpleProperties::UnpackStatus status; - c_tableDesc.init(); - status = SimpleProperties::unpack(it, &c_tableDesc, - DictTabInfo::TableMapping, - DictTabInfo::TableMappingSize, - true, true); - - if(status != SimpleProperties::Break){ - parseP->errorCode = CreateTableRef::InvalidFormat; - parseP->status = status; - parseP->errorKey = it.getKey(); - parseP->errorLine = __LINE__; - return; - } - - if(parseP->requestType == DictTabInfo::AlterTableFromAPI) - { - ndbrequire(!checkExist); - } - if(!checkExist) - { - ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI); - } - - /* ---------------------------------------------------------------- */ - // Verify that table name is an allowed table name. - // TODO - /* ---------------------------------------------------------------- */ - const Uint32 tableNameLength = strlen(c_tableDesc.TableName) + 1; - const Uint32 name_hash = Rope::hash(c_tableDesc.TableName, tableNameLength); - - if(checkExist){ - jam(); - tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0, - CreateTableRef::TableAlreadyExist); - } - - TableRecordPtr tablePtr; - switch (parseP->requestType) { - case DictTabInfo::CreateTableFromAPI: { - jam(); - } - case DictTabInfo::AlterTableFromAPI:{ - jam(); - tablePtr.i = getFreeTableRecord(c_tableDesc.PrimaryTableId); - /* ---------------------------------------------------------------- */ - // Check if no free tables existed. - /* ---------------------------------------------------------------- */ - tabRequire(tablePtr.i != RNIL, CreateTableRef::NoMoreTableRecords); - - c_tableRecordPool.getPtr(tablePtr); - break; - } - case DictTabInfo::AddTableFromDict: - case DictTabInfo::ReadTableFromDiskSR: - case DictTabInfo::GetTabInfoConf: - { -/* ---------------------------------------------------------------- */ -// Get table id and check that table doesn't already exist -/* ---------------------------------------------------------------- */ - tablePtr.i = c_tableDesc.TableId; - - if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) { - ndbrequire(tablePtr.i == c_restartRecord.activeTable); - }//if - if (parseP->requestType == DictTabInfo::GetTabInfoConf) { - ndbrequire(tablePtr.i == c_restartRecord.activeTable); - }//if - - c_tableRecordPool.getPtr(tablePtr); - ndbrequire(tablePtr.p->tabState == TableRecord::NOT_DEFINED); - - //Uint32 oldTableVersion = tablePtr.p->tableVersion; - initialiseTableRecord(tablePtr); - if (parseP->requestType == DictTabInfo::AddTableFromDict) { - jam(); - tablePtr.p->tabState = TableRecord::DEFINING; - }//if - -/* ---------------------------------------------------------------- */ -// Set table version -/* ---------------------------------------------------------------- */ - Uint32 tableVersion = c_tableDesc.TableVersion; - tablePtr.p->tableVersion = tableVersion; - - break; - } - default: - ndbrequire(false); - break; - }//switch - parseP->tablePtr = tablePtr; - - { - Rope name(c_rope_pool, tablePtr.p->tableName); - tabRequire(name.assign(c_tableDesc.TableName, tableNameLength, name_hash), - CreateTableRef::OutOfStringBuffer); - } - - Ptr obj_ptr; - if (parseP->requestType != DictTabInfo::AlterTableFromAPI) { - jam(); - ndbrequire(c_obj_hash.seize(obj_ptr)); - obj_ptr.p->m_id = tablePtr.i; - obj_ptr.p->m_type = c_tableDesc.TableType; - obj_ptr.p->m_name = tablePtr.p->tableName; - obj_ptr.p->m_ref_count = 0; - c_obj_hash.add(obj_ptr); - tablePtr.p->m_obj_ptr_i = obj_ptr.i; - -#ifdef VM_TRACE - ndbout_c("Dbdict: name=%s,id=%u,obj_ptr_i=%d", - c_tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i); -#endif - } - - // Disallow logging of a temporary table. - tabRequire(!(c_tableDesc.TableTemporaryFlag && c_tableDesc.TableLoggedFlag), - CreateTableRef::NoLoggingTemporaryTable); - - tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes; - tablePtr.p->m_bits |= - (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0); - tablePtr.p->m_bits |= - (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0); - tablePtr.p->m_bits |= - (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0); - tablePtr.p->m_bits |= - (c_tableDesc.TableTemporaryFlag ? TableRecord::TR_Temporary : 0); - tablePtr.p->m_bits |= - (c_tableDesc.ForceVarPartFlag ? TableRecord::TR_ForceVarPart : 0); - tablePtr.p->minLoadFactor = c_tableDesc.MinLoadFactor; - tablePtr.p->maxLoadFactor = c_tableDesc.MaxLoadFactor; - tablePtr.p->fragmentType = (DictTabInfo::FragmentType)c_tableDesc.FragmentType; - tablePtr.p->tableType = (DictTabInfo::TableType)c_tableDesc.TableType; - tablePtr.p->kValue = c_tableDesc.TableKValue; - tablePtr.p->fragmentCount = c_tableDesc.FragmentCount; - tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId; - tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow; - tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh; - tablePtr.p->minRowsLow = c_tableDesc.MinRowsLow; - tablePtr.p->minRowsHigh = c_tableDesc.MinRowsHigh; - tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; - tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; - tablePtr.p->singleUserMode = c_tableDesc.SingleUserMode; - - { - Rope frm(c_rope_pool, tablePtr.p->frmData); - tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen), - CreateTableRef::OutOfStringBuffer); - Rope range(c_rope_pool, tablePtr.p->rangeData); - tabRequire(range.assign(c_tableDesc.RangeListData, - c_tableDesc.RangeListDataLen), - CreateTableRef::OutOfStringBuffer); - Rope fd(c_rope_pool, tablePtr.p->ngData); - tabRequire(fd.assign((const char*)c_tableDesc.FragmentData, - c_tableDesc.FragmentDataLen), - CreateTableRef::OutOfStringBuffer); - Rope ts(c_rope_pool, tablePtr.p->tsData); - tabRequire(ts.assign((const char*)c_tableDesc.TablespaceData, - c_tableDesc.TablespaceDataLen), - CreateTableRef::OutOfStringBuffer); - } - - c_fragDataLen = c_tableDesc.FragmentDataLen; - memcpy(c_fragData, c_tableDesc.FragmentData, - c_tableDesc.FragmentDataLen); - - if(c_tableDesc.PrimaryTableId != RNIL) { - - tablePtr.p->primaryTableId = c_tableDesc.PrimaryTableId; - tablePtr.p->indexState = (TableRecord::IndexState)c_tableDesc.IndexState; - tablePtr.p->insertTriggerId = c_tableDesc.InsertTriggerId; - tablePtr.p->updateTriggerId = c_tableDesc.UpdateTriggerId; - tablePtr.p->deleteTriggerId = c_tableDesc.DeleteTriggerId; - tablePtr.p->customTriggerId = c_tableDesc.CustomTriggerId; - } else { - tablePtr.p->primaryTableId = RNIL; - tablePtr.p->indexState = TableRecord::IS_UNDEFINED; - tablePtr.p->insertTriggerId = RNIL; - tablePtr.p->updateTriggerId = RNIL; - tablePtr.p->deleteTriggerId = RNIL; - tablePtr.p->customTriggerId = RNIL; - } - tablePtr.p->buildTriggerId = RNIL; - tablePtr.p->indexLocal = 0; - - handleTabInfo(it, parseP, c_tableDesc); - - if(parseP->errorCode != 0) - { - /** - * Release table - */ - releaseTableObject(tablePtr.i, checkExist); - return; - } - - if (checkExist && tablePtr.p->m_tablespace_id != RNIL) - { - /** - * Increase ref count - */ - FilegroupPtr ptr; - ndbrequire(c_filegroup_hash.find(ptr, tablePtr.p->m_tablespace_id)); - increase_ref_count(ptr.p->m_obj_ptr_i); - } -}//handleTabInfoInit() - -void Dbdict::handleTabInfo(SimpleProperties::Reader & it, - ParseDictTabInfoRecord * parseP, - DictTabInfo::Table &tableDesc) -{ - TableRecordPtr tablePtr = parseP->tablePtr; - - SimpleProperties::UnpackStatus status; - - Uint32 keyCount = 0; - Uint32 keyLength = 0; - Uint32 attrCount = tablePtr.p->noOfAttributes; - Uint32 nullCount = 0; - Uint32 nullBits = 0; - Uint32 noOfCharsets = 0; - Uint16 charsets[128]; - Uint32 recordLength = 0; - AttributeRecordPtr attrPtr; - c_attributeRecordHash.removeAll(); - - LocalDLFifoList list(c_attributeRecordPool, - tablePtr.p->m_attributes); - - Uint32 counts[] = {0,0,0,0,0}; - - for(Uint32 i = 0; ierrorCode = CreateTableRef::InvalidFormat; - parseP->status = status; - parseP->errorKey = it.getKey(); - parseP->errorLine = __LINE__; - return; - } - - /** - * Check that attribute is not defined twice - */ - const size_t len = strlen(attrDesc.AttributeName)+1; - const Uint32 name_hash = Rope::hash(attrDesc.AttributeName, len); - { - AttributeRecord key; - key.m_key.m_name_ptr = attrDesc.AttributeName; - key.m_key.m_name_len = len; - key.attributeName.m_hash = name_hash; - key.m_key.m_pool = &c_rope_pool; - Ptr old_ptr; - c_attributeRecordHash.find(old_ptr, key); - - if(old_ptr.i != RNIL){ - parseP->errorCode = CreateTableRef::AttributeNameTwice; - return; - } - } - - list.seize(attrPtr); - if(attrPtr.i == RNIL){ - jam(); - parseP->errorCode = CreateTableRef::NoMoreAttributeRecords; - return; - } - - new (attrPtr.p) AttributeRecord(); - attrPtr.p->attributeDescriptor = 0x00012255; //Default value - attrPtr.p->tupleKey = 0; - - /** - * TmpAttrib to Attribute mapping - */ - { - Rope name(c_rope_pool, attrPtr.p->attributeName); - if (!name.assign(attrDesc.AttributeName, len, name_hash)) - { - jam(); - parseP->errorCode = CreateTableRef::OutOfStringBuffer; - parseP->errorLine = __LINE__; - return; - } - } - attrPtr.p->attributeId = i; - //attrPtr.p->attributeId = attrDesc.AttributeId; - attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag; - - attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision; - attrPtr.p->extScale = attrDesc.AttributeExtScale; - attrPtr.p->extLength = attrDesc.AttributeExtLength; - // charset in upper half of precision - unsigned csNumber = (attrPtr.p->extPrecision >> 16); - if (csNumber != 0) { - /* - * A new charset is first accessed here on this node. - * TODO use separate thread (e.g. via NDBFS) if need to load from file - */ - CHARSET_INFO* cs = get_charset(csNumber, MYF(0)); - if (cs == NULL) { - parseP->errorCode = CreateTableRef::InvalidCharset; - parseP->errorLine = __LINE__; - return; - } - // XXX should be done somewhere in mysql - all_charsets[cs->number] = cs; - unsigned i = 0; - while (i < noOfCharsets) { - if (charsets[i] == csNumber) - break; - i++; - } - if (i == noOfCharsets) { - noOfCharsets++; - if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) { - parseP->errorCode = CreateTableRef::InvalidFormat; - parseP->errorLine = __LINE__; - return; - } - charsets[i] = csNumber; - } - } - - // compute attribute size and array size - bool translateOk = attrDesc.translateExtType(); - tabRequire(translateOk, CreateTableRef::Inconsistency); - - if(attrDesc.AttributeArraySize > 65535){ - parseP->errorCode = CreateTableRef::ArraySizeTooBig; - parseP->status = status; - parseP->errorKey = it.getKey(); - parseP->errorLine = __LINE__; - return; - } - - // XXX old test option, remove - if(!attrDesc.AttributeKeyFlag && - tablePtr.i > 1 && - !tablePtr.p->isIndex()) - { - //attrDesc.AttributeStorageType= NDB_STORAGETYPE_DISK; - } - - Uint32 desc = 0; - AttributeDescriptor::setType(desc, attrDesc.AttributeExtType); - AttributeDescriptor::setSize(desc, attrDesc.AttributeSize); - AttributeDescriptor::setArraySize(desc, attrDesc.AttributeArraySize); - AttributeDescriptor::setArrayType(desc, attrDesc.AttributeArrayType); - AttributeDescriptor::setNullable(desc, attrDesc.AttributeNullableFlag); - AttributeDescriptor::setDKey(desc, attrDesc.AttributeDKey); - AttributeDescriptor::setPrimaryKey(desc, attrDesc.AttributeKeyFlag); - AttributeDescriptor::setDiskBased(desc, attrDesc.AttributeStorageType == NDB_STORAGETYPE_DISK); - attrPtr.p->attributeDescriptor = desc; - attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement; - { - Rope defaultValue(c_rope_pool, attrPtr.p->defaultValue); - defaultValue.assign(attrDesc.AttributeDefaultValue); - } - - keyCount += attrDesc.AttributeKeyFlag; - nullCount += attrDesc.AttributeNullableFlag; - - const Uint32 aSz = (1 << attrDesc.AttributeSize); - Uint32 sz; - if(aSz != 1) - { - sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5; - } - else - { - sz = 0; - nullBits += attrDesc.AttributeArraySize; - } - - if(attrDesc.AttributeArraySize == 0) - { - parseP->errorCode = CreateTableRef::InvalidArraySize; - parseP->status = status; - parseP->errorKey = it.getKey(); - parseP->errorLine = __LINE__; - return; - } - - recordLength += sz; - if(attrDesc.AttributeKeyFlag){ - keyLength += sz; - - if(attrDesc.AttributeNullableFlag){ - parseP->errorCode = CreateTableRef::NullablePrimaryKey; - parseP->status = status; - parseP->errorKey = it.getKey(); - parseP->errorLine = __LINE__; - return; - } - } - - c_attributeRecordHash.add(attrPtr); - - int a= AttributeDescriptor::getDiskBased(desc); - int b= AttributeDescriptor::getArrayType(desc); - Uint32 pos= 2*(a ? 1 : 0) + (b == NDB_ARRAYTYPE_FIXED ? 0 : 1); - counts[pos+1]++; - - if(b != NDB_ARRAYTYPE_FIXED && sz == 0) - { - parseP->errorCode = CreateTableRef::VarsizeBitfieldNotSupported; - parseP->status = status; - parseP->errorKey = it.getKey(); - parseP->errorLine = __LINE__; - return; - } - - if(!it.next()) - break; - - if(it.getKey() != DictTabInfo::AttributeName) - break; - }//while - - tablePtr.p->noOfPrimkey = keyCount; - tablePtr.p->noOfNullAttr = nullCount; - tablePtr.p->noOfCharsets = noOfCharsets; - tablePtr.p->tupKeyLength = keyLength; - tablePtr.p->noOfNullBits = nullCount + nullBits; - - tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS, - CreateTableRef::RecordTooBig); - tabRequire(keyLength <= MAX_KEY_SIZE_IN_WORDS, - CreateTableRef::InvalidPrimaryKeySize); - tabRequire(keyLength > 0, - CreateTableRef::InvalidPrimaryKeySize); - - if(tablePtr.p->m_tablespace_id != RNIL || counts[3] || counts[4]) - { - FilegroupPtr tablespacePtr; - if(!c_filegroup_hash.find(tablespacePtr, tablePtr.p->m_tablespace_id)) - { - tabRequire(false, CreateTableRef::InvalidTablespace); - } - - if(tablespacePtr.p->m_type != DictTabInfo::Tablespace) - { - tabRequire(false, CreateTableRef::NotATablespace); - } - - if(tablespacePtr.p->m_version != tableDesc.TablespaceVersion) - { - tabRequire(false, CreateTableRef::InvalidTablespaceVersion); - } - } -}//handleTabInfo() - - -/* ---------------------------------------------------------------- */ -// DICTTABCONF is sent when participants have received all DICTTABINFO -// and successfully handled it. -// Also sent to self (DICT master) when index table creation ready. -/* ---------------------------------------------------------------- */ -void Dbdict::execCREATE_TABLE_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - - CreateTableConf * const conf = (CreateTableConf *)signal->getDataPtr(); - // assume part of create index operation - OpCreateIndexPtr opPtr; - c_opCreateIndex.find(opPtr, conf->senderData); - ndbrequire(! opPtr.isNull()); - opPtr.p->m_request.setIndexId(conf->tableId); - opPtr.p->m_request.setIndexVersion(conf->tableVersion); - createIndex_fromCreateTable(signal, opPtr); -}//execCREATE_TABLE_CONF() - -void Dbdict::execCREATE_TABLE_REF(Signal* signal) -{ - jamEntry(); - - CreateTableRef * const ref = (CreateTableRef *)signal->getDataPtr(); - // assume part of create index operation - OpCreateIndexPtr opPtr; - c_opCreateIndex.find(opPtr, ref->senderData); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - createIndex_fromCreateTable(signal, opPtr); -}//execCREATE_TABLE_REF() - -/* ---------------------------------------------------------------- */ -// New global checkpoint created. -/* ---------------------------------------------------------------- */ -void Dbdict::execWAIT_GCP_CONF(Signal* signal) -{ -#if 0 - TableRecordPtr tablePtr; - jamEntry(); - WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0]; - c_tableRecordPool.getPtr(tablePtr, c_connRecord.connTableId); - tablePtr.p->gciTableCreated = conf->gcp; - sendUpdateSchemaState(signal, - tablePtr.i, - SchemaFile::TABLE_ADD_COMMITTED, - c_connRecord.noOfPagesForTable, - conf->gcp); -#endif -}//execWAIT_GCP_CONF() - -/* ---------------------------------------------------------------- */ -// Refused new global checkpoint. -/* ---------------------------------------------------------------- */ -void Dbdict::execWAIT_GCP_REF(Signal* signal) -{ - jamEntry(); - WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0]; -/* ---------------------------------------------------------------- */ -// Error Handling code needed -/* ---------------------------------------------------------------- */ - char buf[32]; - BaseString::snprintf(buf, sizeof(buf), "WAIT_GCP_REF ErrorCode=%d", - ref->errorCode); - progError(__LINE__, NDBD_EXIT_NDBREQUIRE, buf); -}//execWAIT_GCP_REF() - - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: DROP TABLE -------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains the code used to drop a table. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ -void -Dbdict::execDROP_TABLE_REQ(Signal* signal){ - jamEntry(); - DropTableReq* req = (DropTableReq*)signal->getDataPtr(); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, req->tableId, false); - if(tablePtr.isNull()){ - jam(); - dropTableRef(signal, req, DropTableRef::NoSuchTable); - return; - } - - if(getOwnNodeId() != c_masterNodeId){ - jam(); - dropTableRef(signal, req, DropTableRef::NotMaster); - return; - } - - if(c_blockState == BS_NODE_RESTART){ - jam(); - dropTableRef(signal, req, DropTableRef::BusyWithNR); - return; - } - - if(c_blockState != BS_IDLE){ - jam(); - dropTableRef(signal, req, DropTableRef::Busy); - return; - } - - if (checkSingleUserMode(signal->getSendersBlockRef())) - { - jam(); - dropTableRef(signal, req, DropTableRef::SingleUser); - return; - } - - const TableRecord::TabState tabState = tablePtr.p->tabState; - bool ok = false; - switch(tabState){ - case TableRecord::NOT_DEFINED: - case TableRecord::DEFINING: - jam(); - dropTableRef(signal, req, DropTableRef::NoSuchTable); - return; - case TableRecord::DEFINED: - ok = true; - jam(); - break; - case TableRecord::PREPARE_DROPPING: - case TableRecord::DROPPING: - jam(); - dropTableRef(signal, req, DropTableRef::DropInProgress); - return; - case TableRecord::BACKUP_ONGOING: - jam(); - dropTableRef(signal, req, DropTableRef::BackupInProgress); - return; - } - ndbrequire(ok); - - if(tablePtr.p->tableVersion != req->tableVersion){ - jam(); - dropTableRef(signal, req, DropTableRef::InvalidTableVersion); - return; - } - - /** - * Seems ok - */ - DropTableRecordPtr dropTabPtr; - c_opDropTable.seize(dropTabPtr); - - if(dropTabPtr.isNull()){ - jam(); - dropTableRef(signal, req, DropTableRef::NoDropTableRecordAvailable); - return; - } - - c_blockState = BS_BUSY; - - dropTabPtr.p->key = ++c_opRecordSequence; - c_opDropTable.add(dropTabPtr); - - dropTabPtr.p->m_request = * req; - dropTabPtr.p->m_errorCode = 0; - dropTabPtr.p->m_requestType = DropTabReq::OnlineDropTab; - dropTabPtr.p->m_coordinatorRef = reference(); - dropTabPtr.p->m_coordinatorData.m_gsn = GSN_PREP_DROP_TAB_REQ; - dropTabPtr.p->m_coordinatorData.m_block = 0; - - Mutex mutex(signal, c_mutexMgr, dropTabPtr.p->m_define_backup_mutex); - Callback c = { safe_cast(&Dbdict::dropTable_backup_mutex_locked), - dropTabPtr.p->key}; - - ndbrequire(mutex.lock(c)); - -} - -void -Dbdict::dropTable_backup_mutex_locked(Signal* signal, - Uint32 callbackData, - Uint32 retValue){ - jamEntry(); - - ndbrequire(retValue == 0); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, callbackData)); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId, true); - - Mutex mutex(signal, c_mutexMgr, dropTabPtr.p->m_define_backup_mutex); - mutex.unlock(); // ignore response - - if(tablePtr.p->tabState == TableRecord::BACKUP_ONGOING) - { - jam(); - dropTableRef(signal, &dropTabPtr.p->m_request, - DropTableRef::BackupInProgress); - - c_blockState = BS_IDLE; - c_opDropTable.release(dropTabPtr); - } - else - { - jam(); - tablePtr.p->tabState = TableRecord::PREPARE_DROPPING; - prepDropTab_nextStep(signal, dropTabPtr); - } -} - -void -Dbdict::dropTableRef(Signal * signal, - DropTableReq * req, DropTableRef::ErrorCode errCode){ - - Uint32 tableId = req->tableId; - Uint32 tabVersion = req->tableVersion; - Uint32 senderData = req->senderData; - Uint32 senderRef = req->senderRef; - - DropTableRef * ref = (DropTableRef*)signal->getDataPtrSend(); - ref->tableId = tableId; - ref->tableVersion = tabVersion; - ref->senderData = senderData; - ref->senderRef = reference(); - ref->errorCode = errCode; - ref->masterNodeId = c_masterNodeId; - sendSignal(senderRef, GSN_DROP_TABLE_REF, signal, - DropTableRef::SignalLength, JBB); -} - -void -Dbdict::prepDropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){ - - /** - * No errors currently allowed - */ - ndbrequire(dropTabPtr.p->m_errorCode == 0); - - Uint32 block = 0; - switch(dropTabPtr.p->m_coordinatorData.m_block){ - case 0: - jam(); - block = dropTabPtr.p->m_coordinatorData.m_block = DBDICT; - break; - case DBDICT: - jam(); - block = dropTabPtr.p->m_coordinatorData.m_block = DBLQH; - break; - case DBLQH: - jam(); - block = dropTabPtr.p->m_coordinatorData.m_block = DBTC; - break; - case DBTC: - jam(); - block = dropTabPtr.p->m_coordinatorData.m_block = DBDIH; - break; - case DBDIH: - jam(); - prepDropTab_complete(signal, dropTabPtr); - return; - default: - ndbrequire(false); - } - - PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend(); - prep->senderRef = reference(); - prep->senderData = dropTabPtr.p->key; - prep->tableId = dropTabPtr.p->m_request.tableId; - prep->requestType = dropTabPtr.p->m_requestType; - - dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(block, c_aliveNodes); - sendSignal(rg, GSN_PREP_DROP_TAB_REQ, signal, - PrepDropTabReq::SignalLength, JBB); - -#if 0 - for (Uint32 i = 1; i < MAX_NDB_NODES; i++){ - if(c_aliveNodes.get(i)){ - jam(); - BlockReference ref = numberToRef(block, i); - - dropTabPtr.p->m_coordinatorData.m_signalCounter.setWaitingFor(i); - } - } -#endif -} - -void -Dbdict::execPREP_DROP_TAB_CONF(Signal * signal){ - jamEntry(); - - PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData)); - - ndbrequire(dropTabPtr.p->m_coordinatorRef == reference()); - ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId); - ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ); - - Uint32 nodeId = refToNode(prep->senderRef); - dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId); - - if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){ - jam(); - return; - } - prepDropTab_nextStep(signal, dropTabPtr); -} - -void -Dbdict::execPREP_DROP_TAB_REF(Signal* signal){ - jamEntry(); - - PrepDropTabRef * prep = (PrepDropTabRef*)signal->getDataPtr(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData)); - - ndbrequire(dropTabPtr.p->m_coordinatorRef == reference()); - ndbrequire(dropTabPtr.p->m_request.tableId == prep->tableId); - ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_PREP_DROP_TAB_REQ); - - Uint32 nodeId = refToNode(prep->senderRef); - dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId); - - Uint32 block = refToBlock(prep->senderRef); - if((prep->errorCode == PrepDropTabRef::NoSuchTable && block == DBLQH) || - (prep->errorCode == PrepDropTabRef::NF_FakeErrorREF)){ - jam(); - /** - * Ignore errors: - * 1) no such table and LQH, it might not exists in different LQH's - * 2) node failure... - */ - } else { - dropTabPtr.p->setErrorCode((Uint32)prep->errorCode); - } - - if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){ - jam(); - return; - } - prepDropTab_nextStep(signal, dropTabPtr); -} - -void -Dbdict::prepDropTab_complete(Signal* signal, DropTableRecordPtr dropTabPtr){ - jam(); - - dropTabPtr.p->m_coordinatorData.m_gsn = GSN_DROP_TAB_REQ; - dropTabPtr.p->m_coordinatorData.m_block = DBDICT; - - DropTabReq * req = (DropTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = dropTabPtr.p->key; - req->tableId = dropTabPtr.p->m_request.tableId; - req->requestType = dropTabPtr.p->m_requestType; - - dropTabPtr.p->m_coordinatorData.m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_DROP_TAB_REQ, signal, - DropTabReq::SignalLength, JBB); -} - -void -Dbdict::execDROP_TAB_REF(Signal* signal){ - jamEntry(); - - DropTabRef * const req = (DropTabRef*)signal->getDataPtr(); - - Uint32 block = refToBlock(req->senderRef); - ndbrequire(req->errorCode == DropTabRef::NF_FakeErrorREF || - (req->errorCode == DropTabRef::NoSuchTable && - (block == DBTUP || block == DBACC || block == DBLQH))); - - if(block != DBDICT){ - jam(); - ndbrequire(refToNode(req->senderRef) == getOwnNodeId()); - dropTab_localDROP_TAB_CONF(signal); - return; - } - ndbrequire(false); -} - -void -Dbdict::execDROP_TAB_CONF(Signal* signal){ - jamEntry(); - - DropTabConf * const req = (DropTabConf*)signal->getDataPtr(); - - if(refToBlock(req->senderRef) != DBDICT){ - jam(); - ndbrequire(refToNode(req->senderRef) == getOwnNodeId()); - dropTab_localDROP_TAB_CONF(signal); - return; - } - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData)); - - ndbrequire(dropTabPtr.p->m_coordinatorRef == reference()); - ndbrequire(dropTabPtr.p->m_request.tableId == req->tableId); - ndbrequire(dropTabPtr.p->m_coordinatorData.m_gsn == GSN_DROP_TAB_REQ); - - Uint32 nodeId = refToNode(req->senderRef); - dropTabPtr.p->m_coordinatorData.m_signalCounter.clearWaitingFor(nodeId); - - if(!dropTabPtr.p->m_coordinatorData.m_signalCounter.done()){ - jam(); - return; - } - - DropTableConf* conf = (DropTableConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = dropTabPtr.p->m_request.senderData; - conf->tableId = dropTabPtr.p->m_request.tableId; - conf->tableVersion = dropTabPtr.p->m_request.tableVersion; - Uint32 ref = dropTabPtr.p->m_request.senderRef; - sendSignal(ref, GSN_DROP_TABLE_CONF, signal, - DropTableConf::SignalLength, JBB); - - c_opDropTable.release(dropTabPtr); - c_blockState = BS_IDLE; -} - -/** - * DROP TABLE PARTICIPANT CODE - */ -void -Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){ - jamEntry(); - PrepDropTabReq * prep = (PrepDropTabReq*)signal->getDataPtrSend(); - - DropTableRecordPtr dropTabPtr; - if(prep->senderRef == reference()){ - jam(); - ndbrequire(c_opDropTable.find(dropTabPtr, prep->senderData)); - ndbrequire(dropTabPtr.p->m_requestType == prep->requestType); - } else { - jam(); - c_opDropTable.seize(dropTabPtr); - if(!dropTabPtr.isNull()){ - dropTabPtr.p->key = prep->senderData; - c_opDropTable.add(dropTabPtr); - } - } - - ndbrequire(!dropTabPtr.isNull()); - - dropTabPtr.p->m_errorCode = 0; - dropTabPtr.p->m_request.tableId = prep->tableId; - dropTabPtr.p->m_requestType = prep->requestType; - dropTabPtr.p->m_coordinatorRef = prep->senderRef; - dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_REQ; - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, prep->tableId); - tablePtr.p->tabState = TableRecord::PREPARE_DROPPING; - - /** - * Modify schema - */ - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tablePtr.i); - SchemaFile::TableState tabState = - (SchemaFile::TableState)tableEntry->m_tableState; - ndbrequire(tabState == SchemaFile::TABLE_ADD_COMMITTED || - tabState == SchemaFile::ALTER_TABLE_COMMITTED || - tabState == SchemaFile::TEMPORARY_TABLE_COMMITTED); - tableEntry->m_tableState = SchemaFile::DROP_TABLE_STARTED; - computeChecksum(xsf, tablePtr.i / NDB_SF_PAGE_ENTRIES); - - bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); - Callback callback; - callback.m_callbackData = dropTabPtr.p->key; - callback.m_callbackFunction = safe_cast(&Dbdict::prepDropTab_writeSchemaConf); - if (savetodisk) - { - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.newFile = false; - c_writeSchemaRecord.firstPage = tablePtr.i / NDB_SF_PAGE_ENTRIES; - c_writeSchemaRecord.noOfPages = 1; - c_writeSchemaRecord.m_callback = callback; - startWriteSchemaFile(signal); - } - else - { - execute(signal, callback, 0); - } -} - -void -Dbdict::prepDropTab_writeSchemaConf(Signal* signal, - Uint32 dropTabPtrI, - Uint32 returnCode){ - jam(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI)); - - ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_REQ); - - /** - * There probably should be node fail handlign here - * - * To check that coordinator hasn't died - */ - - PrepDropTabConf * prep = (PrepDropTabConf*)signal->getDataPtr(); - prep->senderRef = reference(); - prep->senderData = dropTabPtrI; - prep->tableId = dropTabPtr.p->m_request.tableId; - - dropTabPtr.p->m_participantData.m_gsn = GSN_PREP_DROP_TAB_CONF; - sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_PREP_DROP_TAB_CONF, signal, - PrepDropTabConf::SignalLength, JBB); -} - -void -Dbdict::execDROP_TAB_REQ(Signal* signal){ - jamEntry(); - DropTabReq * req = (DropTabReq*)signal->getDataPtrSend(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, req->senderData)); - - ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_PREP_DROP_TAB_CONF); - dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_REQ; - - ndbrequire(dropTabPtr.p->m_requestType == req->requestType); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId); - tablePtr.p->tabState = TableRecord::DROPPING; - - dropTabPtr.p->m_participantData.m_block = 0; - dropTabPtr.p->m_participantData.m_callback.m_callbackData = dropTabPtr.p->key; - dropTabPtr.p->m_participantData.m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropTab_complete); - dropTab_nextStep(signal, dropTabPtr); - - if (tablePtr.p->m_tablespace_id != RNIL) - { - FilegroupPtr ptr; - ndbrequire(c_filegroup_hash.find(ptr, tablePtr.p->m_tablespace_id)); - decrease_ref_count(ptr.p->m_obj_ptr_i); - } -} - -#include - -void -Dbdict::dropTab_nextStep(Signal* signal, DropTableRecordPtr dropTabPtr){ - - /** - * No errors currently allowed - */ - ndbrequire(dropTabPtr.p->m_errorCode == 0); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, dropTabPtr.p->m_request.tableId); - - Uint32 block = 0; - switch(dropTabPtr.p->m_participantData.m_block){ - case 0: - jam(); - block = DBTC; - break; - case DBTC: - jam(); - if (tablePtr.p->isTable() || tablePtr.p->isHashIndex()) - block = DBACC; - if (tablePtr.p->isOrderedIndex()) - block = DBTUP; - break; - case DBACC: - jam(); - block = DBTUP; - break; - case DBTUP: - jam(); - if (tablePtr.p->isTable() || tablePtr.p->isHashIndex()) - block = DBLQH; - if (tablePtr.p->isOrderedIndex()) - block = DBTUX; - break; - case DBTUX: - jam(); - block = DBLQH; - break; - case DBLQH: - jam(); - block = DBDIH; - break; - case DBDIH: - jam(); - execute(signal, dropTabPtr.p->m_participantData.m_callback, 0); - return; - } - ndbrequire(block != 0); - dropTabPtr.p->m_participantData.m_block = block; - - DropTabReq * req = (DropTabReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = dropTabPtr.p->key; - req->tableId = dropTabPtr.p->m_request.tableId; - req->requestType = dropTabPtr.p->m_requestType; - - const Uint32 nodeId = getOwnNodeId(); - dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor(); - dropTabPtr.p->m_participantData.m_signalCounter.setWaitingFor(nodeId); - BlockReference ref = numberToRef(block, 0); - sendSignal(ref, GSN_DROP_TAB_REQ, signal, DropTabReq::SignalLength, JBB); -} - -void -Dbdict::dropTab_localDROP_TAB_CONF(Signal* signal){ - jamEntry(); - - DropTabConf * conf = (DropTabConf*)signal->getDataPtr(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, conf->senderData)); - - ndbrequire(dropTabPtr.p->m_request.tableId == conf->tableId); - ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ); - - Uint32 nodeId = refToNode(conf->senderRef); - dropTabPtr.p->m_participantData.m_signalCounter.clearWaitingFor(nodeId); - - if(!dropTabPtr.p->m_participantData.m_signalCounter.done()){ - jam(); - ndbrequire(false); - return; - } - dropTab_nextStep(signal, dropTabPtr); -} - -void -Dbdict::dropTab_complete(Signal* signal, - Uint32 dropTabPtrI, - Uint32 returnCode){ - jam(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI)); - - Uint32 tableId = dropTabPtr.p->m_request.tableId; - - /** - * Write to schema file - */ - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId); - SchemaFile::TableState tabState = - (SchemaFile::TableState)tableEntry->m_tableState; - ndbrequire(tabState == SchemaFile::DROP_TABLE_STARTED); - tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED; - computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES); - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); - Callback callback; - callback.m_callbackData = dropTabPtr.p->key; - callback.m_callbackFunction = safe_cast(&Dbdict::dropTab_writeSchemaConf); - if (savetodisk) - { - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES; - c_writeSchemaRecord.noOfPages = 1; - c_writeSchemaRecord.m_callback = callback; - startWriteSchemaFile(signal); - } - else - { - execute(signal, callback, 0); - } -} - -void -Dbdict::dropTab_writeSchemaConf(Signal* signal, - Uint32 dropTabPtrI, - Uint32 returnCode){ - jam(); - - DropTableRecordPtr dropTabPtr; - ndbrequire(c_opDropTable.find(dropTabPtr, dropTabPtrI)); - - ndbrequire(dropTabPtr.p->m_participantData.m_gsn == GSN_DROP_TAB_REQ); - - dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF; - - releaseTableObject(dropTabPtr.p->m_request.tableId); - - DropTabConf * conf = (DropTabConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = dropTabPtrI; - conf->tableId = dropTabPtr.p->m_request.tableId; - { - DropTabConf tmp= *conf; - if (dropTabPtr.p->m_coordinatorRef == reference()) - conf->senderRef = dropTabPtr.p->m_request.senderRef; - else - conf->senderRef = 0; - EXECUTE_DIRECT(SUMA, GSN_DROP_TAB_CONF, signal, - DropTabConf::SignalLength); - jamEntry(); - *conf= tmp; - } - dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF; - sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_DROP_TAB_CONF, signal, - DropTabConf::SignalLength, JBB); - - if(dropTabPtr.p->m_coordinatorRef != reference()){ - c_opDropTable.release(dropTabPtr); - } -} - -void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash) -{ - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - if (removeFromHash) - { - jam(); - release_object(tablePtr.p->m_obj_ptr_i); - } - else - { - Rope tmp(c_rope_pool, tablePtr.p->tableName); - tmp.erase(); - } - - { - Rope tmp(c_rope_pool, tablePtr.p->frmData); - tmp.erase(); - } - - { - Rope tmp(c_rope_pool, tablePtr.p->tsData); - tmp.erase(); - } - - { - Rope tmp(c_rope_pool, tablePtr.p->ngData); - tmp.erase(); - } - - { - Rope tmp(c_rope_pool, tablePtr.p->rangeData); - tmp.erase(); - } - - tablePtr.p->tabState = TableRecord::NOT_DEFINED; - - LocalDLFifoList list(c_attributeRecordPool, - tablePtr.p->m_attributes); - AttributeRecordPtr attrPtr; - for(list.first(attrPtr); !attrPtr.isNull(); list.next(attrPtr)){ - Rope name(c_rope_pool, attrPtr.p->attributeName); - Rope def(c_rope_pool, attrPtr.p->defaultValue); - name.erase(); - def.erase(); - } - list.release(); -}//releaseTableObject() - -/** - * DICT receives these on index create and drop. - */ -void Dbdict::execDROP_TABLE_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - - DropTableConf * const conf = (DropTableConf *)signal->getDataPtr(); - // assume part of drop index operation - OpDropIndexPtr opPtr; - c_opDropIndex.find(opPtr, conf->senderData); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_request.getIndexId() == conf->tableId); - ndbrequire(opPtr.p->m_request.getIndexVersion() == conf->tableVersion); - dropIndex_fromDropTable(signal, opPtr); -} - -void Dbdict::execDROP_TABLE_REF(Signal* signal) -{ - jamEntry(); - - DropTableRef * const ref = (DropTableRef *)signal->getDataPtr(); - // assume part of drop index operation - OpDropIndexPtr opPtr; - c_opDropIndex.find(opPtr, ref->senderData); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - opPtr.p->m_errorLine = __LINE__; - dropIndex_fromDropTable(signal, opPtr); -} - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: EXTERNAL INTERFACE TO DATA -------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* This module contains the code that is used by other modules to. */ -/* access the data within DBDICT. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -void Dbdict::execGET_TABLEDID_REQ(Signal * signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 1); - GetTableIdReq const * req = (GetTableIdReq *)signal->getDataPtr(); - Uint32 senderData = req->senderData; - Uint32 senderRef = req->senderRef; - Uint32 len = req->len; - - if(len>MAX_TAB_NAME_SIZE) - { - jam(); - sendGET_TABLEID_REF((Signal*)signal, - (GetTableIdReq *)req, - GetTableIdRef::TableNameTooLong); - return; - } - - char tableName[MAX_TAB_NAME_SIZE]; - SegmentedSectionPtr ssPtr; - signal->getSection(ssPtr,GetTableIdReq::TABLE_NAME); - copy((Uint32*)tableName, ssPtr); - releaseSections(signal); - - DictObject * obj_ptr_p = get_object(tableName, len); - if(obj_ptr_p == 0 || !DictTabInfo::isTable(obj_ptr_p->m_type)){ - jam(); - sendGET_TABLEID_REF(signal, - (GetTableIdReq *)req, - GetTableIdRef::TableNotDefined); - return; - } - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id); - - GetTableIdConf * conf = (GetTableIdConf *)req; - conf->tableId = tablePtr.p->tableId; - conf->schemaVersion = tablePtr.p->tableVersion; - conf->senderData = senderData; - sendSignal(senderRef, GSN_GET_TABLEID_CONF, signal, - GetTableIdConf::SignalLength, JBB); -} - - -void Dbdict::sendGET_TABLEID_REF(Signal* signal, - GetTableIdReq * req, - GetTableIdRef::ErrorCode errorCode) -{ - GetTableIdRef * const ref = (GetTableIdRef *)req; - /** - * The format of GetTabInfo Req/Ref is the same - */ - BlockReference retRef = req->senderRef; - ref->err = errorCode; - sendSignal(retRef, GSN_GET_TABLEID_REF, signal, - GetTableIdRef::SignalLength, JBB); -}//sendGET_TABINFOREF() - -/* ---------------------------------------------------------------- */ -// Get a full table description. -/* ---------------------------------------------------------------- */ -void Dbdict::execGET_TABINFOREQ(Signal* signal) -{ - jamEntry(); - if(!assembleFragments(signal)) - { - return; - } - - GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0]; - - /** - * If I get a GET_TABINFO_REQ from myself - * it's is a one from the time queue - */ - bool fromTimeQueue = (signal->senderBlockRef() == reference()); - - if (c_retrieveRecord.busyState && fromTimeQueue == true) { - jam(); - - sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30, - signal->length()); - return; - }//if - - const Uint32 MAX_WAITERS = 5; - - if(c_retrieveRecord.busyState && fromTimeQueue == false){ - jam(); - if(c_retrieveRecord.noOfWaiters < MAX_WAITERS){ - jam(); - c_retrieveRecord.noOfWaiters++; - - sendSignalWithDelay(reference(), GSN_GET_TABINFOREQ, signal, 30, - signal->length()); - return; - } - - sendGET_TABINFOREF(signal, req, GetTabInfoRef::Busy); - return; - } - - if(fromTimeQueue){ - jam(); - c_retrieveRecord.noOfWaiters--; - } - - const bool useLongSig = (req->requestType & GetTabInfoReq::LongSignalConf); - const Uint32 reqType = req->requestType & (~GetTabInfoReq::LongSignalConf); - - Uint32 obj_id = RNIL; - if(reqType == GetTabInfoReq::RequestByName){ - jam(); - ndbrequire(signal->getNoOfSections() == 1); - const Uint32 len = req->tableNameLen; - - if(len > MAX_TAB_NAME_SIZE){ - jam(); - releaseSections(signal); - sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNameTooLong); - return; - } - - char tableName[MAX_TAB_NAME_SIZE]; - SegmentedSectionPtr ssPtr; - signal->getSection(ssPtr,GetTabInfoReq::TABLE_NAME); - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); - r0.reset(); // undo implicit first() - if(!r0.getWords((Uint32*)tableName, (len+3)/4)){ - jam(); - releaseSections(signal); - sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); - return; - } - releaseSections(signal); - - DictObject * old_ptr_p = get_object(tableName, len); - if(old_ptr_p) - obj_id = old_ptr_p->m_id; - } else { - jam(); - obj_id = req->tableId; - } - - SchemaFile::TableEntry *objEntry = 0; - if(obj_id != RNIL){ - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - objEntry = getTableEntry(xsf, obj_id); - } - - // The table seached for was not found - if(objEntry == 0){ - jam(); - sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); - return; - }//if - - // If istable/index, allow ADD_STARTED (not to ref) - - if (objEntry->m_tableState != SchemaFile::TABLE_ADD_COMMITTED && - objEntry->m_tableState != SchemaFile::ALTER_TABLE_COMMITTED && - objEntry->m_tableState != SchemaFile::TEMPORARY_TABLE_COMMITTED){ - jam(); - sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); - return; - }//if - - if (DictTabInfo::isTable(objEntry->m_tableType) || - DictTabInfo::isIndex(objEntry->m_tableType)) - { - jam(); - TableRecordPtr tabPtr; - c_tableRecordPool.getPtr(tabPtr, obj_id); - if (tabPtr.p->tabState != TableRecord::DEFINED && - tabPtr.p->tabState != TableRecord::BACKUP_ONGOING) - { - jam(); - sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); - return; - } - ndbrequire(objEntry->m_tableState == SchemaFile::TEMPORARY_TABLE_COMMITTED || - !(tabPtr.p->m_bits & TableRecord::TR_Temporary)); - } - - c_retrieveRecord.busyState = true; - c_retrieveRecord.blockRef = req->senderRef; - c_retrieveRecord.m_senderData = req->senderData; - c_retrieveRecord.tableId = obj_id; - c_retrieveRecord.currentSent = 0; - c_retrieveRecord.m_useLongSig = useLongSig; - c_retrieveRecord.m_table_type = objEntry->m_tableType; - c_packTable.m_state = PackTable::PTS_GET_TAB; - - if(objEntry->m_tableType==DictTabInfo::Datafile) - { - jam(); - GetTabInfoReq *req= (GetTabInfoReq*)signal->getDataPtrSend(); - req->senderData= c_retrieveRecord.retrievePage; - req->senderRef= reference(); - req->requestType= GetTabInfoReq::RequestById; - req->tableId= obj_id; - - sendSignal(TSMAN_REF, GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB); - } - else if(objEntry->m_tableType==DictTabInfo::LogfileGroup) - { - jam(); - GetTabInfoReq *req= (GetTabInfoReq*)signal->getDataPtrSend(); - req->senderData= c_retrieveRecord.retrievePage; - req->senderRef= reference(); - req->requestType= GetTabInfoReq::RequestById; - req->tableId= obj_id; - - sendSignal(LGMAN_REF, GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB); - } - else - { - jam(); - signal->theData[0] = ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = obj_id; - signal->theData[2] = objEntry->m_tableType; - signal->theData[3] = c_retrieveRecord.retrievePage; - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); - } - jam(); -}//execGET_TABINFOREQ() - -void Dbdict::sendGetTabResponse(Signal* signal) -{ - PageRecordPtr pagePtr; - DictTabInfo * const conf = (DictTabInfo *)&signal->theData[0]; - conf->senderRef = reference(); - conf->senderData = c_retrieveRecord.m_senderData; - conf->requestType = DictTabInfo::GetTabInfoConf; - conf->totalLen = c_retrieveRecord.retrievedNoOfWords; - - c_pageRecordArray.getPtr(pagePtr, c_retrieveRecord.retrievePage); - Uint32* pagePointer = (Uint32*)&pagePtr.p->word[0] + ZPAGE_HEADER_SIZE; - - if(c_retrieveRecord.m_useLongSig){ - jam(); - GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr(); - conf->gci = 0; - conf->tableId = c_retrieveRecord.tableId; - conf->senderData = c_retrieveRecord.m_senderData; - conf->totalLen = c_retrieveRecord.retrievedNoOfWords; - conf->tableType = c_retrieveRecord.m_table_type; - - Callback c = { safe_cast(&Dbdict::initRetrieveRecord), 0 }; - LinearSectionPtr ptr[3]; - ptr[0].p = pagePointer; - ptr[0].sz = c_retrieveRecord.retrievedNoOfWords; - sendFragmentedSignal(c_retrieveRecord.blockRef, - GSN_GET_TABINFO_CONF, - signal, - GetTabInfoConf::SignalLength, - JBB, - ptr, - 1, - c); - return; - } - - ndbrequire(false); -}//sendGetTabResponse() - -void Dbdict::sendGET_TABINFOREF(Signal* signal, - GetTabInfoReq * req, - GetTabInfoRef::ErrorCode errorCode) -{ - jamEntry(); - GetTabInfoRef * const ref = (GetTabInfoRef *)&signal->theData[0]; - /** - * The format of GetTabInfo Req/Ref is the same - */ - BlockReference retRef = req->senderRef; - ref->errorCode = errorCode; - - sendSignal(retRef, GSN_GET_TABINFOREF, signal, signal->length(), JBB); -}//sendGET_TABINFOREF() - -void -Dbdict::execLIST_TABLES_REQ(Signal* signal) -{ - jamEntry(); - ListTablesReq * req = (ListTablesReq*)signal->getDataPtr(); - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - // save req flags - const Uint32 reqTableId = req->getTableId(); - const Uint32 reqTableType = req->getTableType(); - const bool reqListNames = req->getListNames(); - const bool reqListIndexes = req->getListIndexes(); - // init the confs - ListTablesConf * conf = (ListTablesConf *)signal->getDataPtrSend(); - conf->senderData = senderData; - conf->counter = 0; - Uint32 pos = 0; - - DLHashTable::Iterator iter; - bool ok = c_obj_hash.first(iter); - for(; ok; ok = c_obj_hash.next(iter)){ - Uint32 type = iter.curr.p->m_type; - if ((reqTableType != (Uint32)0) && (reqTableType != type)) - continue; - - if (reqListIndexes && !DictTabInfo::isIndex(type)) - continue; - - TableRecordPtr tablePtr; - if (DictTabInfo::isTable(type) || DictTabInfo::isIndex(type)){ - c_tableRecordPool.getPtr(tablePtr, iter.curr.p->m_id); - - if(reqListIndexes && (reqTableId != tablePtr.p->primaryTableId)) - continue; - - conf->tableData[pos] = 0; - conf->setTableId(pos, tablePtr.i); // id - conf->setTableType(pos, type); // type - // state - - if(DictTabInfo::isTable(type)){ - switch (tablePtr.p->tabState) { - case TableRecord::DEFINING: - conf->setTableState(pos, DictTabInfo::StateBuilding); - break; - case TableRecord::PREPARE_DROPPING: - case TableRecord::DROPPING: - conf->setTableState(pos, DictTabInfo::StateDropping); - break; - case TableRecord::DEFINED: - conf->setTableState(pos, DictTabInfo::StateOnline); - break; - case TableRecord::BACKUP_ONGOING: - conf->setTableState(pos, DictTabInfo::StateBackup); - break; - default: - conf->setTableState(pos, DictTabInfo::StateBroken); - break; - } - } - if (tablePtr.p->isIndex()) { - switch (tablePtr.p->indexState) { - case TableRecord::IS_OFFLINE: - conf->setTableState(pos, DictTabInfo::StateOffline); - break; - case TableRecord::IS_BUILDING: - conf->setTableState(pos, DictTabInfo::StateBuilding); - break; - case TableRecord::IS_DROPPING: - conf->setTableState(pos, DictTabInfo::StateDropping); - break; - case TableRecord::IS_ONLINE: - conf->setTableState(pos, DictTabInfo::StateOnline); - break; - default: - conf->setTableState(pos, DictTabInfo::StateBroken); - break; - } - } - // Logging status - if (! (tablePtr.p->m_bits & TableRecord::TR_Logged)) { - conf->setTableStore(pos, DictTabInfo::StoreNotLogged); - } else { - conf->setTableStore(pos, DictTabInfo::StorePermanent); - } - // Temporary status - if (tablePtr.p->m_bits & TableRecord::TR_Temporary) { - conf->setTableTemp(pos, NDB_TEMP_TAB_TEMPORARY); - } else { - conf->setTableTemp(pos, NDB_TEMP_TAB_PERMANENT); - } - pos++; - } - if(DictTabInfo::isTrigger(type)){ - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, iter.curr.p->m_id); - - conf->tableData[pos] = 0; - conf->setTableId(pos, triggerPtr.i); - conf->setTableType(pos, type); - switch (triggerPtr.p->triggerState) { - case TriggerRecord::TS_OFFLINE: - conf->setTableState(pos, DictTabInfo::StateOffline); - break; - case TriggerRecord::TS_ONLINE: - conf->setTableState(pos, DictTabInfo::StateOnline); - break; - default: - conf->setTableState(pos, DictTabInfo::StateBroken); - break; - } - conf->setTableStore(pos, DictTabInfo::StoreNotLogged); - pos++; - } - if (DictTabInfo::isFilegroup(type)){ - jam(); - conf->tableData[pos] = 0; - conf->setTableId(pos, iter.curr.p->m_id); - conf->setTableType(pos, type); // type - conf->setTableState(pos, DictTabInfo::StateOnline); // XXX todo - pos++; - } - if (DictTabInfo::isFile(type)){ - jam(); - conf->tableData[pos] = 0; - conf->setTableId(pos, iter.curr.p->m_id); - conf->setTableType(pos, type); // type - conf->setTableState(pos, DictTabInfo::StateOnline); // XXX todo - pos++; - } - - if (pos >= ListTablesConf::DataLength) { - sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal, - ListTablesConf::SignalLength, JBB); - conf->counter++; - pos = 0; - } - - if (! reqListNames) - continue; - - Rope name(c_rope_pool, iter.curr.p->m_name); - const Uint32 size = name.size(); - conf->tableData[pos] = size; - pos++; - if (pos >= ListTablesConf::DataLength) { - sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal, - ListTablesConf::SignalLength, JBB); - conf->counter++; - pos = 0; - } - Uint32 i = 0; - char tmp[MAX_TAB_NAME_SIZE]; - name.copy(tmp); - while (i < size) { - char* p = (char*)&conf->tableData[pos]; - for (Uint32 j = 0; j < 4; j++) { - if (i < size) - *p++ = tmp[i++]; - else - *p++ = 0; - } - pos++; - if (pos >= ListTablesConf::DataLength) { - sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal, - ListTablesConf::SignalLength, JBB); - conf->counter++; - pos = 0; - } - } - } - // last signal must have less than max length - sendSignal(senderRef, GSN_LIST_TABLES_CONF, signal, - ListTablesConf::HeaderLength + pos, JBB); -} - -/** - * MODULE: Create index - * - * Create index in DICT via create table operation. Then invoke alter - * index opearation to online the index. - * - * Request type in CREATE_INDX signals: - * - * RT_USER - from API to DICT master - * RT_DICT_PREPARE - prepare participants - * RT_DICT_COMMIT - commit participants - * RT_TC - create index in TC (part of alter index operation) - */ - -void -Dbdict::execCREATE_INDX_REQ(Signal* signal) -{ - jamEntry(); - CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend(); - OpCreateIndexPtr opPtr; - const Uint32 senderRef = signal->senderBlockRef(); - const CreateIndxReq::RequestType requestType = req->getRequestType(); - if (requestType == CreateIndxReq::RT_USER) { - jam(); - if (! assembleFragments(signal)) { - jam(); - return; - } - if (signal->getLength() == CreateIndxReq::SignalLength) { - jam(); - CreateIndxRef::ErrorCode tmperr = CreateIndxRef::NoError; - if (getOwnNodeId() != c_masterNodeId) { - jam(); - tmperr = CreateIndxRef::NotMaster; - } else if (c_blockState == BS_NODE_RESTART) { - jam(); - tmperr = CreateIndxRef::BusyWithNR; - } else if (c_blockState != BS_IDLE) { - jam(); - tmperr = CreateIndxRef::Busy; - } - else if (checkSingleUserMode(senderRef)) - { - jam(); - tmperr = CreateIndxRef::SingleUser; - } - if (tmperr != CreateIndxRef::NoError) { - releaseSections(signal); - OpCreateIndex opBusy; - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = 0; - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE; - opPtr.p->m_errorCode = tmperr; - opPtr.p->m_errorLine = __LINE__; - opPtr.p->m_errorNode = c_masterNodeId; - createIndex_sendReply(signal, opPtr, true); - return; - } - // forward initial request plus operation key to all - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_CREATE_INDX_REQ, - signal, CreateIndxReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == CreateIndxReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpCreateIndex opBusy; - if (! c_opCreateIndex.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::Busy; - opPtr.p->m_errorLine = __LINE__; - releaseSections(signal); - createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opCreateIndex.add(opPtr); - // save attribute list - SegmentedSectionPtr ssPtr; - signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION); - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); - r0.reset(); // undo implicit first() - if (! r0.getWord(&opPtr.p->m_attrList.sz) || - ! r0.getWords(opPtr.p->m_attrList.id, opPtr.p->m_attrList.sz)) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidName; - opPtr.p->m_errorLine = __LINE__; - releaseSections(signal); - createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - // save name and index table properties - signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION); - SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool()); - c_tableDesc.init(); - SimpleProperties::UnpackStatus status = SimpleProperties::unpack( - r1, &c_tableDesc, - DictTabInfo::TableMapping, DictTabInfo::TableMappingSize, - true, true); - if (status != SimpleProperties::Eof) { - opPtr.p->m_errorCode = CreateIndxRef::InvalidName; - opPtr.p->m_errorLine = __LINE__; - releaseSections(signal); - createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - memcpy(opPtr.p->m_indexName, c_tableDesc.TableName, MAX_TAB_NAME_SIZE); - opPtr.p->m_loggedIndex = c_tableDesc.TableLoggedFlag; - opPtr.p->m_temporaryIndex = c_tableDesc.TableTemporaryFlag; - releaseSections(signal); - // master expects to hear from all - if (opPtr.p->m_isMaster) - opPtr.p->m_signalCounter = c_aliveNodes; - createIndex_slavePrepare(signal, opPtr); - createIndex_sendReply(signal, opPtr, false); - return; - } - c_opCreateIndex.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == CreateIndxReq::RT_DICT_COMMIT || - requestType == CreateIndxReq::RT_DICT_ABORT) { - jam(); - if (requestType == CreateIndxReq::RT_DICT_COMMIT) { - opPtr.p->m_request.setIndexId(req->getIndexId()); - opPtr.p->m_request.setIndexVersion(req->getIndexVersion()); - createIndex_slaveCommit(signal, opPtr); - } else { - createIndex_slaveAbort(signal, opPtr); - } - createIndex_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opCreateIndex.release(opPtr); - return; - } - } - jam(); - // return to sender - releaseSections(signal); - OpCreateIndex opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = CreateIndxRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - createIndex_sendReply(signal, opPtr, true); -} - -void -Dbdict::execCREATE_INDX_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - CreateIndxConf* conf = (CreateIndxConf*)signal->getDataPtrSend(); - createIndex_recvReply(signal, conf, 0); -} - -void -Dbdict::execCREATE_INDX_REF(Signal* signal) -{ - jamEntry(); - CreateIndxRef* ref = (CreateIndxRef*)signal->getDataPtrSend(); - createIndex_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::createIndex_recvReply(Signal* signal, const CreateIndxConf* conf, - const CreateIndxRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const CreateIndxReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == CreateIndxReq::RT_TC) { - jam(); - // part of alter index operation - OpAlterIndexPtr opPtr; - c_opAlterIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterIndex_fromCreateTc(signal, opPtr); - return; - } - OpCreateIndexPtr opPtr; - c_opCreateIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - opPtr.p->setError(ref); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == CreateIndxReq::RT_DICT_COMMIT || - requestType == CreateIndxReq::RT_DICT_ABORT) { - jam(); - // send reply to user - createIndex_sendReply(signal, opPtr, true); - c_opCreateIndex.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT; - createIndex_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == CreateIndxReq::RT_DICT_PREPARE) { - jam(); - // start index table create - createIndex_toCreateTable(signal, opPtr); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT; - createIndex_sendSlaveReq(signal, opPtr); - return; - } - return; - } - ndbrequire(false); -} - -void -Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - if (ERROR_INSERTED(6006) && ! opPtr.p->m_isMaster) { - ndbrequire(false); - } -} - -void -Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) -{ - union { - char tableName[MAX_TAB_NAME_SIZE]; - char attributeName[MAX_ATTR_NAME_SIZE]; - }; - Uint32 k; - Uint32 attrid_map[MAX_ATTRIBUTES_IN_INDEX]; - - jam(); - const CreateIndxReq* const req = &opPtr.p->m_request; - // signal data writer - Uint32* wbuffer = &c_indexPage.word[0]; - LinearWriter w(wbuffer, sizeof(c_indexPage) >> 2); - w.first(); - // get table being indexed - if (! (req->getTableId() < c_tableRecordPool.getSize())) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable; - opPtr.p->m_errorLine = __LINE__; - return; - } - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, req->getTableId()); - if (tablePtr.p->tabState != TableRecord::DEFINED && - tablePtr.p->tabState != TableRecord::BACKUP_ONGOING) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable; - opPtr.p->m_errorLine = __LINE__; - return; - } - if (! tablePtr.p->isTable()) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidPrimaryTable; - opPtr.p->m_errorLine = __LINE__; - return; - } - - // Check that the temporary status of index is compatible with table. - if (!opPtr.p->m_temporaryIndex && - tablePtr.p->m_bits & TableRecord::TR_Temporary) - { - jam(); - opPtr.p->m_errorCode= CreateIndxRef::TableIsTemporary; - opPtr.p->m_errorLine= __LINE__; - return; - } - if (opPtr.p->m_temporaryIndex && - !(tablePtr.p->m_bits & TableRecord::TR_Temporary)) - { - // This could be implemented later, but mysqld does currently not detect - // that the index disappears after SR, and it appears not too useful. - jam(); - opPtr.p->m_errorCode= CreateIndxRef::TableIsNotTemporary; - opPtr.p->m_errorLine= __LINE__; - return; - } - if (opPtr.p->m_temporaryIndex && opPtr.p->m_loggedIndex) - { - jam(); - opPtr.p->m_errorCode= CreateIndxRef::NoLoggingTemporaryIndex; - opPtr.p->m_errorLine= __LINE__; - return; - } - - // compute index table record - TableRecord indexRec; - TableRecordPtr indexPtr; - indexPtr.i = RNIL; // invalid - indexPtr.p = &indexRec; - initialiseTableRecord(indexPtr); - indexPtr.p->m_bits = TableRecord::TR_RowChecksum; - if (req->getIndexType() == DictTabInfo::UniqueHashIndex) { - indexPtr.p->m_bits |= (opPtr.p->m_loggedIndex ? TableRecord::TR_Logged:0); - indexPtr.p->m_bits |= - (opPtr.p->m_temporaryIndex ? TableRecord::TR_Temporary : 0); - indexPtr.p->fragmentType = DictTabInfo::DistrKeyUniqueHashIndex; - } else if (req->getIndexType() == DictTabInfo::OrderedIndex) { - // first version will not supported logging - if (opPtr.p->m_loggedIndex) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType; - opPtr.p->m_errorLine = __LINE__; - return; - } - indexPtr.p->m_bits |= - (opPtr.p->m_temporaryIndex ? TableRecord::TR_Temporary : 0); - indexPtr.p->fragmentType = DictTabInfo::DistrKeyOrderedIndex; - } else { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType; - opPtr.p->m_errorLine = __LINE__; - return; - } - indexPtr.p->tableType = (DictTabInfo::TableType)req->getIndexType(); - indexPtr.p->primaryTableId = req->getTableId(); - indexPtr.p->noOfAttributes = opPtr.p->m_attrList.sz; - indexPtr.p->tupKeyLength = 0; - if (indexPtr.p->noOfAttributes == 0) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType; - opPtr.p->m_errorLine = __LINE__; - return; - } - - if (indexPtr.p->isOrderedIndex()) { - // tree node size in words (make configurable later) - indexPtr.p->tupKeyLength = MAX_TTREE_NODE_SIZE; - } - - AttributeMask mask; - mask.clear(); - for (k = 0; k < opPtr.p->m_attrList.sz; k++) { - jam(); - unsigned current_id= opPtr.p->m_attrList.id[k]; - Uint32 tAttr = tablePtr.p->m_attributes.firstItem; - AttributeRecord* aRec = NULL; - for (; tAttr != RNIL; ) - { - aRec = c_attributeRecordPool.getPtr(tAttr); - if (aRec->attributeId != current_id) - { - tAttr= aRec->nextList; - continue; - } - jam(); - break; - } - if (tAttr == RNIL) { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - return; - } - if (mask.get(current_id)) - { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::DuplicateAttributes; - opPtr.p->m_errorLine = __LINE__; - return; - } - const Uint32 a = aRec->attributeDescriptor; - - if (AttributeDescriptor::getDiskBased(a)) - { - jam(); - opPtr.p->m_errorCode = CreateIndxRef::IndexOnDiskAttributeError; - opPtr.p->m_errorLine = __LINE__; - return; - } - - mask.set(current_id); - unsigned kk= k; - if (indexPtr.p->isHashIndex()) { - const Uint32 s1 = AttributeDescriptor::getSize(a); - const Uint32 s2 = AttributeDescriptor::getArraySize(a); - indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5; - - for (; kk > 0 && current_id < attrid_map[kk-1]>>16; kk--) - attrid_map[kk]= attrid_map[kk-1]; - } - attrid_map[kk]= k | (current_id << 16); - } - - indexPtr.p->noOfPrimkey = indexPtr.p->noOfAttributes; - // plus concatenated primary table key attribute - indexPtr.p->noOfAttributes += 1; - indexPtr.p->noOfNullAttr = 0; - // write index table - w.add(DictTabInfo::TableName, opPtr.p->m_indexName); - w.add(DictTabInfo::TableLoggedFlag, !!(indexPtr.p->m_bits & TableRecord::TR_Logged)); - w.add(DictTabInfo::TableTemporaryFlag, !!(indexPtr.p->m_bits & TableRecord::TR_Temporary)); - w.add(DictTabInfo::FragmentTypeVal, indexPtr.p->fragmentType); - w.add(DictTabInfo::TableTypeVal, indexPtr.p->tableType); - Rope name(c_rope_pool, tablePtr.p->tableName); - name.copy(tableName); - w.add(DictTabInfo::PrimaryTable, tableName); - w.add(DictTabInfo::PrimaryTableId, tablePtr.i); - w.add(DictTabInfo::NoOfAttributes, indexPtr.p->noOfAttributes); - w.add(DictTabInfo::NoOfKeyAttr, indexPtr.p->noOfPrimkey); - w.add(DictTabInfo::NoOfNullable, indexPtr.p->noOfNullAttr); - w.add(DictTabInfo::KeyLength, indexPtr.p->tupKeyLength); - w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE); - // write index key attributes - for (k = 0; k < opPtr.p->m_attrList.sz; k++) { - // insert the attributes in the order decided above in attrid_map - // k is new order, current_id is in previous order - // ToDo: make sure "current_id" is stored with the table and - // passed up to NdbDictionary - unsigned current_id= opPtr.p->m_attrList.id[attrid_map[k] & 0xffff]; - jam(); - for (Uint32 tAttr = tablePtr.p->m_attributes.firstItem; tAttr != RNIL; ) { - AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr); - tAttr = aRec->nextList; - if (aRec->attributeId != current_id) - continue; - jam(); - const Uint32 a = aRec->attributeDescriptor; - bool isNullable = AttributeDescriptor::getNullable(a); - Uint32 arrayType = AttributeDescriptor::getArrayType(a); - Rope attrName(c_rope_pool, aRec->attributeName); - attrName.copy(attributeName); - w.add(DictTabInfo::AttributeName, attributeName); - Uint32 attrType = AttributeDescriptor::getType(a); - // computed - w.add(DictTabInfo::AttributeId, k); - if (indexPtr.p->isHashIndex()) { - w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true); - w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false); - } - if (indexPtr.p->isOrderedIndex()) { - w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false); - w.add(DictTabInfo::AttributeNullableFlag, (Uint32)isNullable); - } - w.add(DictTabInfo::AttributeArrayType, arrayType); - w.add(DictTabInfo::AttributeExtType, attrType); - w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision); - w.add(DictTabInfo::AttributeExtScale, aRec->extScale); - w.add(DictTabInfo::AttributeExtLength, aRec->extLength); - w.add(DictTabInfo::AttributeEnd, (Uint32)true); - } - } - if (indexPtr.p->isHashIndex()) { - jam(); - - Uint32 key_type = NDB_ARRAYTYPE_FIXED; - AttributeRecordPtr attrPtr; - LocalDLFifoList alist(c_attributeRecordPool, - tablePtr.p->m_attributes); - for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) - { - const Uint32 desc = attrPtr.p->attributeDescriptor; - if (AttributeDescriptor::getPrimaryKey(desc) && - AttributeDescriptor::getArrayType(desc) != NDB_ARRAYTYPE_FIXED) - { - key_type = NDB_ARRAYTYPE_MEDIUM_VAR; - break; - } - } - - // write concatenated primary table key attribute i.e. keyinfo - w.add(DictTabInfo::AttributeName, "NDB$PK"); - w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz); - w.add(DictTabInfo::AttributeArrayType, key_type); - w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false); - w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false); - w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned); - w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength+1); - w.add(DictTabInfo::AttributeEnd, (Uint32)true); - } - if (indexPtr.p->isOrderedIndex()) { - jam(); - // write index tree node as Uint32 array attribute - w.add(DictTabInfo::AttributeName, "NDB$TNODE"); - w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz); - // should not matter but VAR crashes in TUP - w.add(DictTabInfo::AttributeArrayType, (Uint32)NDB_ARRAYTYPE_FIXED); - w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true); - w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false); - w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned); - w.add(DictTabInfo::AttributeExtLength, indexPtr.p->tupKeyLength); - w.add(DictTabInfo::AttributeEnd, (Uint32)true); - } - // finish - w.add(DictTabInfo::TableEnd, (Uint32)true); - // remember to... - releaseSections(signal); - // send create index table request - CreateTableReq * const cre = (CreateTableReq*)signal->getDataPtrSend(); - cre->senderRef = reference(); - cre->senderData = opPtr.p->key; - LinearSectionPtr lsPtr[3]; - lsPtr[0].p = wbuffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(DBDICT_REF, GSN_CREATE_TABLE_REQ, - signal, CreateTableReq::SignalLength, JBB, lsPtr, 1); -} - -void -Dbdict::createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT; - createIndex_sendSlaveReq(signal, opPtr); - return; - } - if (! opPtr.p->m_request.getOnline()) { - jam(); - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT; - createIndex_sendSlaveReq(signal, opPtr); - return; - } - createIndex_toAlterIndex(signal, opPtr); -} - -void -Dbdict::createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(AlterIndxReq::RT_CREATE_INDEX); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setTableId(opPtr.p->m_request.getTableId()); - req->setIndexId(opPtr.p->m_request.getIndexId()); - req->setIndexVersion(opPtr.p->m_request.getIndexVersion()); - req->setOnline(true); - sendSignal(reference(), GSN_ALTER_INDX_REQ, - signal, AlterIndxReq::SignalLength, JBB); -} - -void -Dbdict::createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_ABORT; - createIndex_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = CreateIndxReq::RT_DICT_COMMIT; - createIndex_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - const Uint32 indexId = opPtr.p->m_request.getIndexId(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, indexId); - if (! opPtr.p->m_request.getOnline()) { - ndbrequire(indexPtr.p->indexState == TableRecord::IS_UNDEFINED); - indexPtr.p->indexState = TableRecord::IS_OFFLINE; - } else { - ndbrequire(indexPtr.p->indexState == TableRecord::IS_ONLINE); - } -} - -void -Dbdict::createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - CreateIndxReq* const req = &opPtr.p->m_request; - const Uint32 indexId = req->getIndexId(); - if (indexId >= c_tableRecordPool.getSize()) { - jam(); - return; - } - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, indexId); - if (! indexPtr.p->isIndex()) { - jam(); - return; - } - indexPtr.p->indexState = TableRecord::IS_BROKEN; -} - -void -Dbdict::createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr) -{ - jam(); - CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - opPtr.p->m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_CREATE_INDX_REQ, - signal, CreateIndxReq::SignalLength, JBB); -} - -void -Dbdict::createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr, - bool toUser) -{ - CreateIndxRef* rep = (CreateIndxRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_CREATE_INDX_CONF; - Uint32 length = CreateIndxConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == CreateIndxReq::RT_DICT_ABORT) - sendRef = false; - } else { - sendRef = opPtr.p->hasError(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = CreateIndxConf::SignalLength; - } - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setIndexId(opPtr.p->m_request.getIndexId()); - rep->setIndexVersion(opPtr.p->m_request.getIndexVersion()); - if (sendRef) { - if (opPtr.p->m_errorNode == 0) - opPtr.p->m_errorNode = getOwnNodeId(); - rep->setErrorCode(opPtr.p->m_errorCode); - rep->setErrorLine(opPtr.p->m_errorLine); - rep->setErrorNode(opPtr.p->m_errorNode); - gsn = GSN_CREATE_INDX_REF; - length = CreateIndxRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/** - * MODULE: Drop index. - * - * Drop index. First alters the index offline (i.e. drops metadata in - * other blocks) and then drops the index table. - */ - -void -Dbdict::execDROP_INDX_REQ(Signal* signal) -{ - jamEntry(); - DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend(); - OpDropIndexPtr opPtr; - - int err = DropIndxRef::BadRequestType; - const Uint32 senderRef = signal->senderBlockRef(); - const DropIndxReq::RequestType requestType = req->getRequestType(); - if (requestType == DropIndxReq::RT_USER) { - jam(); - if (signal->getLength() == DropIndxReq::SignalLength) { - jam(); - DropIndxRef::ErrorCode tmperr = DropIndxRef::NoError; - if (getOwnNodeId() != c_masterNodeId) { - jam(); - tmperr = DropIndxRef::NotMaster; - } else if (c_blockState == BS_NODE_RESTART) { - jam(); - tmperr = DropIndxRef::BusyWithNR; - } else if (c_blockState != BS_IDLE) { - jam(); - tmperr = DropIndxRef::Busy; - } - else if (checkSingleUserMode(senderRef)) - { - jam(); - tmperr = DropIndxRef::SingleUser; - } - if (tmperr != DropIndxRef::NoError) { - err = tmperr; - goto error; - } - // forward initial request plus operation key to all - Uint32 indexId= req->getIndexId(); - Uint32 indexVersion= req->getIndexVersion(); - - if(indexId >= c_tableRecordPool.getSize()) - { - err = DropIndxRef::IndexNotFound; - goto error; - } - - TableRecordPtr tmp; - c_tableRecordPool.getPtr(tmp, indexId); - if(tmp.p->tabState == TableRecord::NOT_DEFINED || - tmp.p->tableVersion != indexVersion) - { - err = DropIndxRef::InvalidIndexVersion; - goto error; - } - - if (! tmp.p->isIndex()) { - jam(); - err = DropIndxRef::NotAnIndex; - goto error; - } - - if (tmp.p->indexState != TableRecord::IS_ONLINE) - req->addRequestFlag(RequestFlag::RF_FORCE); - - tmp.p->indexState = TableRecord::IS_DROPPING; - - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_DROP_INDX_REQ, - signal, DropIndxReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == DropIndxReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpDropIndex opBusy; - if (! c_opDropIndex.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = DropIndxReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = DropIndxRef::Busy; - opPtr.p->m_errorLine = __LINE__; - dropIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opDropIndex.add(opPtr); - // master expects to hear from all - if (opPtr.p->m_isMaster) - opPtr.p->m_signalCounter = c_aliveNodes; - dropIndex_slavePrepare(signal, opPtr); - dropIndex_sendReply(signal, opPtr, false); - return; - } - c_opDropIndex.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == DropIndxReq::RT_DICT_COMMIT || - requestType == DropIndxReq::RT_DICT_ABORT) { - jam(); - if (requestType == DropIndxReq::RT_DICT_COMMIT) - dropIndex_slaveCommit(signal, opPtr); - else - dropIndex_slaveAbort(signal, opPtr); - dropIndex_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opDropIndex.release(opPtr); - return; - } - } -error: - jam(); - // return to sender - OpDropIndex opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err; - opPtr.p->m_errorLine = __LINE__; - opPtr.p->m_errorNode = c_masterNodeId; - dropIndex_sendReply(signal, opPtr, true); -} - -void -Dbdict::execDROP_INDX_CONF(Signal* signal) -{ - jamEntry(); - DropIndxConf* conf = (DropIndxConf*)signal->getDataPtrSend(); - dropIndex_recvReply(signal, conf, 0); -} - -void -Dbdict::execDROP_INDX_REF(Signal* signal) -{ - jamEntry(); - DropIndxRef* ref = (DropIndxRef*)signal->getDataPtrSend(); - dropIndex_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::dropIndex_recvReply(Signal* signal, const DropIndxConf* conf, - const DropIndxRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const DropIndxReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == DropIndxReq::RT_TC) { - jam(); - // part of alter index operation - OpAlterIndexPtr opPtr; - c_opAlterIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterIndex_fromDropTc(signal, opPtr); - return; - } - OpDropIndexPtr opPtr; - c_opDropIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - opPtr.p->setError(ref); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == DropIndxReq::RT_DICT_COMMIT || - requestType == DropIndxReq::RT_DICT_ABORT) { - jam(); - // send reply to user - dropIndex_sendReply(signal, opPtr, true); - c_opDropIndex.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT; - dropIndex_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == DropIndxReq::RT_DICT_PREPARE) { - jam(); - // start alter offline - dropIndex_toAlterIndex(signal, opPtr); - return; - } - ndbrequire(false); -} - -void -Dbdict::dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); - DropIndxReq* const req = &opPtr.p->m_request; - // check index exists - TableRecordPtr indexPtr; - if (! (req->getIndexId() < c_tableRecordPool.getSize())) { - jam(); - opPtr.p->m_errorCode = DropIndxRef::IndexNotFound; - opPtr.p->m_errorLine = __LINE__; - return; - } - c_tableRecordPool.getPtr(indexPtr, req->getIndexId()); - if (indexPtr.p->tabState != TableRecord::DEFINED) { - jam(); - opPtr.p->m_errorCode = DropIndxRef::IndexNotFound; - opPtr.p->m_errorLine = __LINE__; - return; - } - if (! indexPtr.p->isIndex()) { - jam(); - opPtr.p->m_errorCode = DropIndxRef::NotAnIndex; - opPtr.p->m_errorLine = __LINE__; - return; - } - // ignore incoming primary table id - req->setTableId(indexPtr.p->primaryTableId); -} - -void -Dbdict::dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); - AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(AlterIndxReq::RT_DROP_INDEX); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setTableId(opPtr.p->m_request.getTableId()); - req->setIndexId(opPtr.p->m_request.getIndexId()); - req->setIndexVersion(opPtr.p->m_request.getIndexVersion()); - req->setOnline(false); - sendSignal(reference(), GSN_ALTER_INDX_REQ, - signal, AlterIndxReq::SignalLength, JBB); -} - -void -Dbdict::dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT; - dropIndex_sendSlaveReq(signal, opPtr); - return; - } - dropIndex_toDropTable(signal, opPtr); -} - -void -Dbdict::dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); - DropTableReq* const req = (DropTableReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = opPtr.p->key; - req->tableId = opPtr.p->m_request.getIndexId(); - req->tableVersion = opPtr.p->m_request.getIndexVersion(); - sendSignal(reference(), GSN_DROP_TABLE_REQ, - signal,DropTableReq::SignalLength, JBB); -} - -void -Dbdict::dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = DropIndxReq::RT_DICT_ABORT; - dropIndex_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = DropIndxReq::RT_DICT_COMMIT; - dropIndex_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); -} - -void -Dbdict::dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr) -{ - jam(); - DropIndxReq* const req = &opPtr.p->m_request; - const Uint32 indexId = req->getIndexId(); - if (indexId >= c_tableRecordPool.getSize()) { - jam(); - return; - } - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, indexId); - indexPtr.p->indexState = TableRecord::IS_BROKEN; -} - -void -Dbdict::dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr) -{ - DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - opPtr.p->m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_DROP_INDX_REQ, - signal, DropIndxReq::SignalLength, JBB); -} - -void -Dbdict::dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr, - bool toUser) -{ - DropIndxRef* rep = (DropIndxRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_DROP_INDX_CONF; - Uint32 length = DropIndxConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == DropIndxReq::RT_DICT_ABORT) - sendRef = false; - } else { - sendRef = opPtr.p->hasError(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = DropIndxConf::SignalLength; - } - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setIndexId(opPtr.p->m_request.getIndexId()); - rep->setIndexVersion(opPtr.p->m_request.getIndexVersion()); - if (sendRef) { - if (opPtr.p->m_errorNode == 0) - opPtr.p->m_errorNode = getOwnNodeId(); - rep->setErrorCode(opPtr.p->m_errorCode); - rep->setErrorLine(opPtr.p->m_errorLine); - rep->setErrorNode(opPtr.p->m_errorNode); - gsn = GSN_DROP_INDX_REF; - length = DropIndxRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/***************************************************** - * - * Util signalling - * - *****************************************************/ - -int -Dbdict::sendSignalUtilReq(Callback *pcallback, - BlockReference ref, - GlobalSignalNumber gsn, - Signal* signal, - Uint32 length, - JobBufferLevel jbuf, - LinearSectionPtr ptr[3], - Uint32 noOfSections) -{ - jam(); - EVENT_TRACE; - OpSignalUtilPtr utilRecPtr; - - // Seize a Util Send record - if (!c_opSignalUtil.seize(utilRecPtr)) { - // Failed to allocate util record - return -1; - } - utilRecPtr.p->m_callback = *pcallback; - - // should work for all util signal classes - UtilPrepareReq *req = (UtilPrepareReq*)signal->getDataPtrSend(); - utilRecPtr.p->m_userData = req->getSenderData(); - req->setSenderData(utilRecPtr.i); - - if (ptr) { - jam(); - sendSignal(ref, gsn, signal, length, jbuf, ptr, noOfSections); - } else { - jam(); - sendSignal(ref, gsn, signal, length, jbuf); - } - - return 0; -} - -int -Dbdict::recvSignalUtilReq(Signal* signal, Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - UtilPrepareConf * const req = (UtilPrepareConf*)signal->getDataPtr(); - OpSignalUtilPtr utilRecPtr; - utilRecPtr.i = req->getSenderData(); - if ((utilRecPtr.p = c_opSignalUtil.getPtr(utilRecPtr.i)) == NULL) { - jam(); - return -1; - } - - req->setSenderData(utilRecPtr.p->m_userData); - Callback c = utilRecPtr.p->m_callback; - c_opSignalUtil.release(utilRecPtr); - - execute(signal, c, returnCode); - return 0; -} - -void Dbdict::execUTIL_PREPARE_CONF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(recvSignalUtilReq(signal, 0) == 0); -} - -void -Dbdict::execUTIL_PREPARE_REF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(recvSignalUtilReq(signal, 1) == 0); -} - -void Dbdict::execUTIL_EXECUTE_CONF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(recvSignalUtilReq(signal, 0) == 0); -} - -void Dbdict::execUTIL_EXECUTE_REF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - -#ifdef EVENT_DEBUG - UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend(); - - ndbout_c("execUTIL_EXECUTE_REF"); - ndbout_c("senderData %u",ref->getSenderData()); - ndbout_c("errorCode %u",ref->getErrorCode()); - ndbout_c("TCErrorCode %u",ref->getTCErrorCode()); -#endif - - ndbrequire(recvSignalUtilReq(signal, 1) == 0); -} -void Dbdict::execUTIL_RELEASE_CONF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(false); - ndbrequire(recvSignalUtilReq(signal, 0) == 0); -} -void Dbdict::execUTIL_RELEASE_REF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(false); - ndbrequire(recvSignalUtilReq(signal, 1) == 0); -} - -/** - * MODULE: Create event - * - * Create event in DICT. - * - * - * Request type in CREATE_EVNT signals: - * - * Signalflow see Dbdict.txt - * - */ - -/***************************************************************** - * - * Systable stuff - * - */ - -const Uint32 Dbdict::sysTab_NDBEVENTS_0_szs[EVENT_SYSTEM_TABLE_LENGTH] = { - sizeof(((sysTab_NDBEVENTS_0*)0)->NAME), - sizeof(((sysTab_NDBEVENTS_0*)0)->EVENT_TYPE), - sizeof(((sysTab_NDBEVENTS_0*)0)->TABLEID), - sizeof(((sysTab_NDBEVENTS_0*)0)->TABLEVERSION), - sizeof(((sysTab_NDBEVENTS_0*)0)->TABLE_NAME), - sizeof(((sysTab_NDBEVENTS_0*)0)->ATTRIBUTE_MASK), - sizeof(((sysTab_NDBEVENTS_0*)0)->SUBID), - sizeof(((sysTab_NDBEVENTS_0*)0)->SUBKEY) -}; - -void -Dbdict::prepareTransactionEventSysTable (Callback *pcallback, - Signal* signal, - Uint32 senderData, - UtilPrepareReq::OperationTypeValue prepReq) -{ - // find table id for event system table - DictObject * opj_ptr_p = get_object(EVENT_SYSTEM_TABLE_NAME, - sizeof(EVENT_SYSTEM_TABLE_NAME)); - - ndbrequire(opj_ptr_p != 0); - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, opj_ptr_p->m_id); - ndbrequire(tablePtr.i != RNIL); // system table must exist - - Uint32 tableId = tablePtr.p->tableId; /* System table */ - Uint32 noAttr = tablePtr.p->noOfAttributes; - ndbrequire(noAttr == EVENT_SYSTEM_TABLE_LENGTH); - - switch (prepReq) { - case UtilPrepareReq::Update: - case UtilPrepareReq::Insert: - case UtilPrepareReq::Write: - case UtilPrepareReq::Read: - jam(); - break; - case UtilPrepareReq::Delete: - jam(); - noAttr = 1; // only involves Primary key which should be the first - break; - } - prepareUtilTransaction(pcallback, signal, senderData, tableId, NULL, - prepReq, noAttr, NULL, NULL); -} - -void -Dbdict::prepareUtilTransaction(Callback *pcallback, - Signal* signal, - Uint32 senderData, - Uint32 tableId, - const char* tableName, - UtilPrepareReq::OperationTypeValue prepReq, - Uint32 noAttr, - Uint32 attrIds[], - const char *attrNames[]) -{ - jam(); - EVENT_TRACE; - - UtilPrepareReq * utilPrepareReq = - (UtilPrepareReq *)signal->getDataPtrSend(); - - utilPrepareReq->setSenderRef(reference()); - utilPrepareReq->setSenderData(senderData); - - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, prepReq); - if (tableName) { - jam(); - w.add(UtilPrepareReq::TableName, tableName); - } else { - jam(); - w.add(UtilPrepareReq::TableId, tableId); - } - for(Uint32 i = 0; i < noAttr; i++) - if (tableName) { - jam(); - w.add(UtilPrepareReq::AttributeName, attrNames[i]); - } else { - if (attrIds) { - jam(); - w.add(UtilPrepareReq::AttributeId, attrIds[i]); - } else { - jam(); - w.add(UtilPrepareReq::AttributeId, i); - } - } -#ifdef EVENT_DEBUG - // Debugging - SimplePropertiesLinearReader reader(propPage, w.getWordsUsed()); - printf("Dict::prepareInsertTransactions: Sent SimpleProperties:\n"); - reader.printAll(ndbout); -#endif - - struct LinearSectionPtr sectionsPtr[UtilPrepareReq::NoOfSections]; - sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].p = propPage; - sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed(); - - sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal, - UtilPrepareReq::SignalLength, JBB, - sectionsPtr, UtilPrepareReq::NoOfSections); -} - -/***************************************************************** - * - * CREATE_EVNT_REQ has three types RT_CREATE, RT_GET (from user) - * and RT_DICT_AFTER_GET send from master DICT to slaves - * - * This function just dscpaches these to - * - * createEvent_RT_USER_CREATE - * createEvent_RT_USER_GET - * createEvent_RT_DICT_AFTER_GET - * - * repectively - * - */ - -void -Dbdict::execCREATE_EVNT_REQ(Signal* signal) -{ - jamEntry(); - -#if 0 - { - SafeCounterHandle handle; - { - SafeCounter tmp(c_counterMgr, handle); - tmp.init(CMVMI, GSN_DUMP_STATE_ORD, /* senderData */ 13); - tmp.clearWaitingFor(); - tmp.setWaitingFor(3); - ndbrequire(!tmp.done()); - ndbout_c("Allocted"); - } - ndbrequire(!handle.done()); - { - SafeCounter tmp(c_counterMgr, handle); - tmp.clearWaitingFor(3); - ndbrequire(tmp.done()); - ndbout_c("Deallocted"); - } - ndbrequire(handle.done()); - } - { - NodeBitmask nodes; - nodes.clear(); - - nodes.set(2); - nodes.set(3); - nodes.set(4); - nodes.set(5); - - { - Uint32 i = 0; - while((i = nodes.find(i)) != NodeBitmask::NotFound){ - ndbout_c("1 Node id = %u", i); - i++; - } - } - - NodeReceiverGroup rg(DBDICT, nodes); - RequestTracker rt2; - ndbrequire(rt2.done()); - ndbrequire(!rt2.hasRef()); - ndbrequire(!rt2.hasConf()); - rt2.init(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13); - - RequestTracker rt3; - rt3.init(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportRef(c_counterMgr, 2); - rt3.reportConf(c_counterMgr, 2); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportConf(c_counterMgr, 3); - rt3.reportConf(c_counterMgr, 3); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportConf(c_counterMgr, 4); - rt3.reportConf(c_counterMgr, 4); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportConf(c_counterMgr, 5); - rt3.reportConf(c_counterMgr, 5); - - ndbrequire(rt2.done()); - ndbrequire(rt3.done()); - } -#endif - - if (! assembleFragments(signal)) { - jam(); - return; - } - - CreateEvntReq *req = (CreateEvntReq*)signal->getDataPtr(); - const CreateEvntReq::RequestType requestType = req->getRequestType(); - const Uint32 requestFlag = req->getRequestFlag(); - - if (refToBlock(signal->senderBlockRef()) != DBDICT && - getOwnNodeId() != c_masterNodeId) - { - jam(); - releaseSections(signal); - - CreateEvntRef * ref = (CreateEvntRef *)signal->getDataPtrSend(); - ref->setUserRef(reference()); - ref->setErrorCode(CreateEvntRef::NotMaster); - ref->setErrorLine(__LINE__); - ref->setErrorNode(reference()); - ref->setMasterNode(c_masterNodeId); - sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal, - CreateEvntRef::SignalLength2, JBB); - return; - } - - OpCreateEventPtr evntRecPtr; - // Seize a Create Event record - if (!c_opCreateEvent.seize(evntRecPtr)) { - // Failed to allocate event record - jam(); - releaseSections(signal); - - CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend(); - ret->senderRef = reference(); - ret->setErrorCode(747); - ret->setErrorLine(__LINE__); - ret->setErrorNode(reference()); - sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal, - CreateEvntRef::SignalLength, JBB); - return; - } - -#ifdef EVENT_DEBUG - ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i); -#endif - - ndbrequire(req->getUserRef() == signal->getSendersBlockRef()); - - evntRecPtr.p->init(req,this); - - if (requestFlag & (Uint32)CreateEvntReq::RT_DICT_AFTER_GET) { - jam(); - EVENT_TRACE; - createEvent_RT_DICT_AFTER_GET(signal, evntRecPtr); - return; - } - if (requestType == CreateEvntReq::RT_USER_GET) { - jam(); - EVENT_TRACE; - createEvent_RT_USER_GET(signal, evntRecPtr); - return; - } - if (requestType == CreateEvntReq::RT_USER_CREATE) { - jam(); - EVENT_TRACE; - createEvent_RT_USER_CREATE(signal, evntRecPtr); - return; - } - -#ifdef EVENT_DEBUG - ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ other" << endl; -#endif - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); -} - -/******************************************************************** - * - * Event creation - * - *****************************************************************/ - -void -Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr) -{ - jam(); - DBUG_ENTER("Dbdict::createEvent_RT_USER_CREATE"); - evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef()); - -#ifdef EVENT_DEBUG - ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl; - char buf[128] = {0}; - AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask(); - mask.getText(buf); - ndbout_c("mask = %s", buf); -#endif - - // Interpret the long signal - - SegmentedSectionPtr ssPtr; - // save name and event properties - signal->getSection(ssPtr, CreateEvntReq::EVENT_NAME_SECTION); - - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); -#ifdef EVENT_DEBUG - r0.printAll(ndbout); -#endif - // event name - if ((!r0.first()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - DBUG_VOID_RETURN; - } - r0.getString(evntRecPtr.p->m_eventRec.NAME); - { - int len = strlen(evntRecPtr.p->m_eventRec.NAME); - memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len); -#ifdef EVENT_DEBUG - printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n", - evntRecPtr.p->m_eventRec.NAME, len); - for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++) - printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]); - printf("\n"); -#endif - } - // table name - if ((!r0.next()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - DBUG_VOID_RETURN; - } - r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME); - { - int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME); - memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len); - } - - releaseSections(signal); - - // Send request to SUMA - - CreateSubscriptionIdReq * sumaIdReq = - (CreateSubscriptionIdReq *)signal->getDataPtrSend(); - - // make sure we save the original sender for later - sumaIdReq->senderRef = reference(); - sumaIdReq->senderData = evntRecPtr.i; -#ifdef EVENT_DEBUG - ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl; -#endif - sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal, - CreateSubscriptionIdReq::SignalLength, JBB); - // we should now return in either execCREATE_SUBID_CONF - // or execCREATE_SUBID_REF - DBUG_VOID_RETURN; -} - -void Dbdict::execCREATE_SUBID_REF(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execCREATE_SUBID_REF"); - CreateSubscriptionIdRef * const ref = - (CreateSubscriptionIdRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->senderData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - if (ref->errorCode) - { - evntRecPtr.p->m_errorCode = ref->errorCode; - evntRecPtr.p->m_errorLine = __LINE__; - } - else - { - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - } - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - DBUG_VOID_RETURN; -} - -void Dbdict::execCREATE_SUBID_CONF(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execCREATE_SUBID_CONF"); - - CreateSubscriptionIdConf const * sumaIdConf = - (CreateSubscriptionIdConf *)signal->getDataPtr(); - - Uint32 evntRecId = sumaIdConf->senderData; - OpCreateEvent *evntRec; - - ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL); - - evntRec->m_request.setEventId(sumaIdConf->subscriptionId); - evntRec->m_request.setEventKey(sumaIdConf->subscriptionKey); - - releaseSections(signal); - - Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecId, - UtilPrepareReq::Insert); - DBUG_VOID_RETURN; -} - -void -Dbdict::createEventComplete_RT_USER_CREATE(Signal* signal, - OpCreateEventPtr evntRecPtr){ - jam(); - createEvent_sendReply(signal, evntRecPtr); -} - -/********************************************************************* - * - * UTIL_PREPARE, UTIL_EXECUTE - * - * insert or read systable NDB$EVENTS_0 - */ - -void interpretUtilPrepareErrorCode(UtilPrepareRef::ErrorCode errorCode, - Uint32& error, Uint32& line) -{ - DBUG_ENTER("interpretUtilPrepareErrorCode"); - switch (errorCode) { - case UtilPrepareRef::NO_ERROR: - jam(); - error = 1; - line = __LINE__; - DBUG_VOID_RETURN; - case UtilPrepareRef::PREPARE_SEIZE_ERROR: - jam(); - error = 748; - line = __LINE__; - DBUG_VOID_RETURN; - case UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR: - jam(); - error = 1; - line = __LINE__; - DBUG_VOID_RETURN; - case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR: - jam(); - error = 1; - line = __LINE__; - DBUG_VOID_RETURN; - case UtilPrepareRef::DICT_TAB_INFO_ERROR: - jam(); - error = 1; - line = __LINE__; - DBUG_VOID_RETURN; - case UtilPrepareRef::MISSING_PROPERTIES_SECTION: - jam(); - error = 1; - line = __LINE__; - DBUG_VOID_RETURN; - default: - jam(); - error = 1; - line = __LINE__; - DBUG_VOID_RETURN; - } - DBUG_VOID_RETURN; -} - -void -Dbdict::createEventUTIL_PREPARE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode == 0) { - UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - jam(); - evntRecPtr.i = req->getSenderData(); - const Uint32 prepareId = req->getPrepareId(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 }; - - switch (evntRecPtr.p->m_requestType) { - case CreateEvntReq::RT_USER_GET: - jam(); - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Read); - break; - case CreateEvntReq::RT_USER_CREATE: - { - evntRecPtr.p->m_eventRec.EVENT_TYPE = - evntRecPtr.p->m_request.getEventType() | evntRecPtr.p->m_request.getReportFlags(); - evntRecPtr.p->m_eventRec.TABLEID = evntRecPtr.p->m_request.getTableId(); - evntRecPtr.p->m_eventRec.TABLEVERSION=evntRecPtr.p->m_request.getTableVersion(); - AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask(); - memcpy(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK, &m, - sizeof(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK)); - evntRecPtr.p->m_eventRec.SUBID = evntRecPtr.p->m_request.getEventId(); - evntRecPtr.p->m_eventRec.SUBKEY = evntRecPtr.p->m_request.getEventKey(); - DBUG_PRINT("info", - ("CREATE: event name: %s table name: %s table id: %u table version: %u", - evntRecPtr.p->m_eventRec.NAME, - evntRecPtr.p->m_eventRec.TABLE_NAME, - evntRecPtr.p->m_eventRec.TABLEID, - evntRecPtr.p->m_eventRec.TABLEVERSION)); - - } - jam(); - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Insert); - break; - default: -#ifdef EVENT_DEBUG - printf("type = %d\n", evntRecPtr.p->m_requestType); - printf("bet type = %d\n", CreateEvntReq::RT_USER_GET); - printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE); -#endif - ndbrequire(false); - } - } else { // returnCode != 0 - UtilPrepareRef* const ref = (UtilPrepareRef*)signal->getDataPtr(); - - const UtilPrepareRef::ErrorCode errorCode = - (UtilPrepareRef::ErrorCode)ref->getErrorCode(); - - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - interpretUtilPrepareErrorCode(errorCode, evntRecPtr.p->m_errorCode, - evntRecPtr.p->m_errorLine); - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - } -} - -void Dbdict::executeTransEventSysTable(Callback *pcallback, Signal *signal, - const Uint32 ptrI, - sysTab_NDBEVENTS_0& m_eventRec, - const Uint32 prepareId, - UtilPrepareReq::OperationTypeValue prepReq) -{ - jam(); - const Uint32 noAttr = EVENT_SYSTEM_TABLE_LENGTH; - Uint32 total_len = 0; - - Uint32* attrHdr = signal->theData + 25; - Uint32* attrPtr = attrHdr; - - Uint32 id=0; - // attribute 0 event name: Primary Key - { - AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]); - total_len += sysTab_NDBEVENTS_0_szs[id]; - attrPtr++; id++; - } - - switch (prepReq) { - case UtilPrepareReq::Read: - jam(); - EVENT_TRACE; - // no more - while ( id < noAttr ) - AttributeHeader::init(attrPtr++, id++, 0); - ndbrequire(id == (Uint32) noAttr); - break; - case UtilPrepareReq::Insert: - jam(); - EVENT_TRACE; - while ( id < noAttr ) { - AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]); - total_len += sysTab_NDBEVENTS_0_szs[id]; - attrPtr++; id++; - } - ndbrequire(id == (Uint32) noAttr); - break; - case UtilPrepareReq::Delete: - ndbrequire(id == 1); - break; - default: - ndbrequire(false); - } - - LinearSectionPtr headerPtr; - LinearSectionPtr dataPtr; - - headerPtr.p = attrHdr; - headerPtr.sz = noAttr; - - dataPtr.p = (Uint32*)&m_eventRec; - dataPtr.sz = total_len/4; - - ndbrequire((total_len == sysTab_NDBEVENTS_0_szs[0]) || - (total_len == sizeof(sysTab_NDBEVENTS_0))); - -#if 0 - printf("Header size %u\n", headerPtr.sz); - for(int i = 0; i < (int)headerPtr.sz; i++) - printf("H'%.8x ", attrHdr[i]); - printf("\n"); - - printf("Data size %u\n", dataPtr.sz); - for(int i = 0; i < (int)dataPtr.sz; i++) - printf("H'%.8x ", dataPage[i]); - printf("\n"); -#endif - - executeTransaction(pcallback, signal, - ptrI, - prepareId, - id, - headerPtr, - dataPtr); -} - -void Dbdict::executeTransaction(Callback *pcallback, - Signal* signal, - Uint32 senderData, - Uint32 prepareId, - Uint32 noAttr, - LinearSectionPtr headerPtr, - LinearSectionPtr dataPtr) -{ - jam(); - EVENT_TRACE; - - UtilExecuteReq * utilExecuteReq = - (UtilExecuteReq *)signal->getDataPtrSend(); - - utilExecuteReq->setSenderRef(reference()); - utilExecuteReq->setSenderData(senderData); - utilExecuteReq->setPrepareId(prepareId); - utilExecuteReq->setReleaseFlag(); // must be done after setting prepareId - -#if 0 - printf("Header size %u\n", headerPtr.sz); - for(int i = 0; i < (int)headerPtr.sz; i++) - printf("H'%.8x ", headerBuffer[i]); - printf("\n"); - - printf("Data size %u\n", dataPtr.sz); - for(int i = 0; i < (int)dataPtr.sz; i++) - printf("H'%.8x ", dataBuffer[i]); - printf("\n"); -#endif - - struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections]; - sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = headerPtr.p; - sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr; - sectionsPtr[UtilExecuteReq::DATA_SECTION].p = dataPtr.p; - sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataPtr.sz; - - sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal, - UtilExecuteReq::SignalLength, JBB, - sectionsPtr, UtilExecuteReq::NoOfSections); -} - -void Dbdict::parseReadEventSys(Signal* signal, sysTab_NDBEVENTS_0& m_eventRec) -{ - SegmentedSectionPtr headerPtr, dataPtr; - jam(); - signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION); - SectionReader headerReader(headerPtr, getSectionSegmentPool()); - - signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION); - SectionReader dataReader(dataPtr, getSectionSegmentPool()); - - AttributeHeader header; - Uint32 *dst = (Uint32*)&m_eventRec; - - for (int i = 0; i < EVENT_SYSTEM_TABLE_LENGTH; i++) { - headerReader.getWord((Uint32 *)&header); - int sz = header.getDataSize(); - for (int i=0; i < sz; i++) - dataReader.getWord(dst++); - } - - ndbrequire( ((char*)dst-(char*)&m_eventRec) == sizeof(m_eventRec) ); - - releaseSections(signal); -} - -void Dbdict::createEventUTIL_EXECUTE(Signal *signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode == 0) { - // Entry into system table all set - UtilExecuteConf* const conf = (UtilExecuteConf*)signal->getDataPtr(); - jam(); - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = conf->getSenderData(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - OpCreateEvent *evntRec = evntRecPtr.p; - - switch (evntRec->m_requestType) { - case CreateEvntReq::RT_USER_GET: { - parseReadEventSys(signal, evntRecPtr.p->m_eventRec); - - evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE); - evntRec->m_request.setReportFlags(evntRecPtr.p->m_eventRec.EVENT_TYPE); - evntRec->m_request.setTableId(evntRecPtr.p->m_eventRec.TABLEID); - evntRec->m_request.setTableVersion(evntRecPtr.p->m_eventRec.TABLEVERSION); - evntRec->m_request.setAttrListBitmask(*(AttributeMask*) - evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK); - evntRec->m_request.setEventId(evntRecPtr.p->m_eventRec.SUBID); - evntRec->m_request.setEventKey(evntRecPtr.p->m_eventRec.SUBKEY); - - DBUG_PRINT("info", - ("GET: event name: %s table name: %s table id: %u table version: %u", - evntRecPtr.p->m_eventRec.NAME, - evntRecPtr.p->m_eventRec.TABLE_NAME, - evntRecPtr.p->m_eventRec.TABLEID, - evntRecPtr.p->m_eventRec.TABLEVERSION)); - - // find table id for event table - DictObject* obj_ptr_p = get_object(evntRecPtr.p->m_eventRec.TABLE_NAME); - if(!obj_ptr_p){ - jam(); - evntRecPtr.p->m_errorCode = 723; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - return; - } - - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, obj_ptr_p->m_id); - evntRec->m_request.setTableId(tablePtr.p->tableId); - evntRec->m_request.setTableVersion(tablePtr.p->tableVersion); - - createEventComplete_RT_USER_GET(signal, evntRecPtr); - return; - } - case CreateEvntReq::RT_USER_CREATE: { -#ifdef EVENT_DEBUG - printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE); -#endif - jam(); - createEventComplete_RT_USER_CREATE(signal, evntRecPtr); - return; - } - break; - default: - ndbrequire(false); - } - } else { // returnCode != 0 - UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - jam(); - evntRecPtr.p->m_errorNode = reference(); - evntRecPtr.p->m_errorLine = __LINE__; - - switch (ref->getErrorCode()) { - case UtilExecuteRef::TCError: - switch (ref->getTCErrorCode()) { - case ZNOT_FOUND: - jam(); - evntRecPtr.p->m_errorCode = 4710; - break; - case ZALREADYEXIST: - jam(); - evntRecPtr.p->m_errorCode = 746; - break; - default: - jam(); - evntRecPtr.p->m_errorCode = ref->getTCErrorCode(); - break; - } - break; - default: - jam(); - evntRecPtr.p->m_errorCode = ref->getErrorCode(); - break; - } - - createEvent_sendReply(signal, evntRecPtr); - } -} - -/*********************************************************************** - * - * NdbEventOperation, reading systable, creating event in suma - * - */ - -void -Dbdict::createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){ - jam(); - EVENT_TRACE; -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REQ::RT_USER_GET evntRecPtr.i = (%d), ref = %u", evntRecPtr.i, evntRecPtr.p->m_request.getUserRef()); -#endif - - SegmentedSectionPtr ssPtr; - - signal->getSection(ssPtr, 0); - - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); -#ifdef EVENT_DEBUG - r0.printAll(ndbout); -#endif - if ((!r0.first()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - return; - } - - r0.getString(evntRecPtr.p->m_eventRec.NAME); - int len = strlen(evntRecPtr.p->m_eventRec.NAME); - memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len); - - releaseSections(signal); - - Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecPtr.i, - UtilPrepareReq::Read); - /* - * Will read systable and fill an OpCreateEventPtr - * and return below - */ -} - -void -Dbdict::createEventComplete_RT_USER_GET(Signal* signal, - OpCreateEventPtr evntRecPtr){ - jam(); - - // Send to oneself and the other DICT's - CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend(); - - *req = evntRecPtr.p->m_request; - req->senderRef = reference(); - req->senderData = evntRecPtr.i; - - req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = evntRecPtr.p->m_reqTracker; - if (!p.init(c_counterMgr, rg, GSN_CREATE_EVNT_REF, - evntRecPtr.i)) - { - jam(); - evntRecPtr.p->m_errorCode = 701; - createEvent_sendReply(signal, evntRecPtr); - return; - } - - sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB); -} - -void -Dbdict::createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI, - Uint32 returnCode){ - OpCreateEventPtr evntRecPtr; - c_opCreateEvent.getPtr(evntRecPtr, eventRecPtrI); - createEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::execCREATE_EVNT_REF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->getUserData(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REF evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF){ - jam(); - evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(ref->senderRef)); - } else { - jam(); - evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(ref->senderRef)); - } - createEvent_sendReply(signal, evntRecPtr); - - return; -} - -void Dbdict::execCREATE_EVNT_CONF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - CreateEvntConf * const conf = (CreateEvntConf *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = conf->getUserData(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_CONF evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - evntRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(conf->senderRef)); - - // we will only have a valid tablename if it the master DICT sending this - // but that's ok - LinearSectionPtr ptr[1]; - ptr[0].p = (Uint32 *)evntRecPtr.p->m_eventRec.TABLE_NAME; - ptr[0].sz = - (strlen(evntRecPtr.p->m_eventRec.TABLE_NAME)+4)/4; // to make sure we have a null - - createEvent_sendReply(signal, evntRecPtr, ptr, 1); - - return; -} - -/************************************************ - * - * Participant stuff - * - */ - -void -Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){ - DBUG_ENTER("Dbdict::createEvent_RT_DICT_AFTER_GET"); - jam(); - evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef()); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - // the signal comes from the DICT block that got the first user request! - // This code runs on all DICT nodes, including oneself - - // Seize a Create Event record, the Coordinator will now have two seized - // but that's ok, it's like a recursion - - CRASH_INSERTION2(6009, getOwnNodeId() != c_masterNodeId); - - SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend(); - - sumaReq->senderRef = reference(); // reference to DICT - sumaReq->senderData = evntRecPtr.i; - sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId(); - sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey(); - sumaReq->subscriptionType = SubCreateReq::TableEvent; - if (evntRecPtr.p->m_request.getReportAll()) - sumaReq->subscriptionType|= SubCreateReq::ReportAll; - if (evntRecPtr.p->m_request.getReportSubscribe()) - sumaReq->subscriptionType|= SubCreateReq::ReportSubscribe; - sumaReq->tableId = evntRecPtr.p->m_request.getTableId(); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("sending GSN_SUB_CREATE_REQ"); -#endif - - sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal, - SubCreateReq::SignalLength, JBB); - DBUG_VOID_RETURN; -} - -void Dbdict::execSUB_CREATE_REF(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execSUB_CREATE_REF"); - - SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->senderData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - if (ref->errorCode == 1415) { - jam(); - createEvent_sendReply(signal, evntRecPtr); - DBUG_VOID_RETURN; - } - - if (ref->errorCode) - { - evntRecPtr.p->m_errorCode = ref->errorCode; - evntRecPtr.p->m_errorLine = __LINE__; - } - else - { - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - } - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - DBUG_VOID_RETURN; -} - -void Dbdict::execSUB_CREATE_CONF(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execSUB_CREATE_CONF"); - EVENT_TRACE; - - SubCreateConf * const sumaConf = (SubCreateConf *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = sumaConf->senderData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - createEvent_sendReply(signal, evntRecPtr); - - DBUG_VOID_RETURN; -} - -/**************************************************** - * - * common create reply method - * - *******************************************************/ - -void Dbdict::createEvent_sendReply(Signal* signal, - OpCreateEventPtr evntRecPtr, - LinearSectionPtr *ptr, int noLSP) -{ - jam(); - EVENT_TRACE; - - // check if we're ready to sent reply - // if we are the master dict we might be waiting for conf/ref - - if (!evntRecPtr.p->m_reqTracker.done()) { - jam(); - return; // there's more to come - } - - if (evntRecPtr.p->m_reqTracker.hasRef()) { - ptr = NULL; // we don't want to return anything if there's an error - if (!evntRecPtr.p->hasError()) { - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - jam(); - } else - jam(); - } - - // reference to API if master DICT - // else reference to master DICT - Uint32 senderRef = evntRecPtr.p->m_request.getUserRef(); - Uint32 signalLength; - Uint32 gsn; - - if (evntRecPtr.p->hasError()) { - jam(); - EVENT_TRACE; - CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend(); - - ret->setEventId(evntRecPtr.p->m_request.getEventId()); - ret->setEventKey(evntRecPtr.p->m_request.getEventKey()); - ret->setUserData(evntRecPtr.p->m_request.getUserData()); - ret->senderRef = reference(); - ret->setTableId(evntRecPtr.p->m_request.getTableId()); - ret->setTableVersion(evntRecPtr.p->m_request.getTableVersion()); - ret->setEventType(evntRecPtr.p->m_request.getEventType()); - ret->setRequestType(evntRecPtr.p->m_request.getRequestType()); - - ret->setErrorCode(evntRecPtr.p->m_errorCode); - ret->setErrorLine(evntRecPtr.p->m_errorLine); - ret->setErrorNode(evntRecPtr.p->m_errorNode); - - signalLength = CreateEvntRef::SignalLength; -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT sending GSN_CREATE_EVNT_REF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef); - ndbout_c("errorCode = %u", evntRecPtr.p->m_errorCode); - ndbout_c("errorLine = %u", evntRecPtr.p->m_errorLine); -#endif - gsn = GSN_CREATE_EVNT_REF; - - } else { - jam(); - EVENT_TRACE; - CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend(); - - evntConf->setEventId(evntRecPtr.p->m_request.getEventId()); - evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey()); - evntConf->setUserData(evntRecPtr.p->m_request.getUserData()); - evntConf->senderRef = reference(); - evntConf->setTableId(evntRecPtr.p->m_request.getTableId()); - evntConf->setTableVersion(evntRecPtr.p->m_request.getTableVersion()); - evntConf->setAttrListBitmask(evntRecPtr.p->m_request.getAttrListBitmask()); - evntConf->setEventType(evntRecPtr.p->m_request.getEventType()); - evntConf->setRequestType(evntRecPtr.p->m_request.getRequestType()); - - signalLength = CreateEvntConf::SignalLength; -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT sending GSN_CREATE_EVNT_CONF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef); -#endif - gsn = GSN_CREATE_EVNT_CONF; - } - - if (ptr) { - jam(); - sendSignal(senderRef, gsn, signal, signalLength, JBB, ptr, noLSP); - } else { - jam(); - sendSignal(senderRef, gsn, signal, signalLength, JBB); - } - - c_opCreateEvent.release(evntRecPtr); -} - -/*************************************************************/ - -/******************************************************************** - * - * Start event - * - *******************************************************************/ - -void Dbdict::execSUB_START_REQ(Signal* signal) -{ - jamEntry(); - - Uint32 origSenderRef = signal->senderBlockRef(); - - if (refToBlock(origSenderRef) != DBDICT && - getOwnNodeId() != c_masterNodeId) - { - /* - * Coordinator but not master - */ - SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->errorCode = SubStartRef::NotMaster; - ref->m_masterNodeId = c_masterNodeId; - sendSignal(origSenderRef, GSN_SUB_START_REF, signal, - SubStartRef::SignalLength2, JBB); - return; - } - OpSubEventPtr subbPtr; - Uint32 errCode = 0; - - DictLockPtr loopPtr; - if (c_dictLockQueue.first(loopPtr) && - loopPtr.p->lt->lockType == DictLockReq::NodeRestartLock) - { - jam(); - errCode = 1405; - goto busy; - } - - if (!c_opSubEvent.seize(subbPtr)) { - errCode = SubStartRef::Busy; -busy: - jam(); - SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); - - { // fix - Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef; - ref->subscriberRef = subcriberRef; - } - jam(); - // ret->setErrorCode(SubStartRef::SeizeError); - // ret->setErrorLine(__LINE__); - // ret->setErrorNode(reference()); - ref->senderRef = reference(); - ref->errorCode = errCode; - - sendSignal(origSenderRef, GSN_SUB_START_REF, signal, - SubStartRef::SignalLength2, JBB); - return; - } - - { - const SubStartReq* req = (SubStartReq*) signal->getDataPtr(); - subbPtr.p->m_senderRef = req->senderRef; - subbPtr.p->m_senderData = req->senderData; - subbPtr.p->m_errorCode = 0; - } - - if (refToBlock(origSenderRef) != DBDICT) { - /* - * Coordinator - */ - jam(); - - subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = subbPtr.p->m_reqTracker; - if (!p.init(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i)) - { - c_opSubEvent.release(subbPtr); - errCode = SubStartRef::Busy; - goto busy; - } - - SubStartReq* req = (SubStartReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i); -#endif - - sendSignal(rg, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB); - return; - } - /* - * Participant - */ - ndbrequire(refToBlock(origSenderRef) == DBDICT); - - CRASH_INSERTION(6007); - - { - SubStartReq* req = (SubStartReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i); -#endif - sendSignal(SUMA_REF, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB); - } -} - -void Dbdict::execSUB_START_REF(Signal* signal) -{ - jamEntry(); - - const SubStartRef* ref = (SubStartRef*) signal->getDataPtr(); - Uint32 senderRef = ref->senderRef; - Uint32 err = ref->errorCode; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ref->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Participant) got GSN_SUB_START_REF = (%d)", subbPtr.i); -#endif - - jam(); - SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - ref->errorCode = err; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF, - signal, SubStartRef::SignalLength2, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_REF = (%d)", subbPtr.i); -#endif - if (err == SubStartRef::NF_FakeErrorREF){ - jam(); - subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef)); - } else { - jam(); - if (subbPtr.p->m_errorCode == 0) - { - subbPtr.p->m_errorCode= err ? err : 1; - } - subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef)); - } - completeSubStartReq(signal,subbPtr.i,0); -} - -void Dbdict::execSUB_START_CONF(Signal* signal) -{ - jamEntry(); - - const SubStartConf* conf = (SubStartConf*) signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, conf->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - SubStartConf* conf = (SubStartConf*) signal->getDataPtrSend(); - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Participant) got GSN_SUB_START_CONF = (%d)", subbPtr.i); -#endif - - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF, - signal, SubStartConf::SignalLength2, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i); -#endif - subbPtr.p->m_sub_start_conf = *conf; - subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); - completeSubStartReq(signal,subbPtr.i,0); -} - -/* - * Coordinator - */ -void Dbdict::completeSubStartReq(Signal* signal, - Uint32 ptrI, - Uint32 returnCode){ - jam(); - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ptrI); - - if (!subbPtr.p->m_reqTracker.done()){ - jam(); - return; - } - - if (subbPtr.p->m_reqTracker.hasRef()) { - jam(); -#ifdef EVENT_DEBUG - ndbout_c("SUB_START_REF"); -#endif - SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->errorCode = subbPtr.p->m_errorCode; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF, - signal, SubStartRef::SignalLength, JBB); - if (subbPtr.p->m_reqTracker.hasConf()) { - // stopStartedNodes(signal); - } - c_opSubEvent.release(subbPtr); - return; - } -#ifdef EVENT_DEBUG - ndbout_c("SUB_START_CONF"); -#endif - - SubStartConf* conf = (SubStartConf*)signal->getDataPtrSend(); - * conf = subbPtr.p->m_sub_start_conf; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF, - signal, SubStartConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); -} - -/******************************************************************** - * - * Stop event - * - *******************************************************************/ - -void Dbdict::execSUB_STOP_REQ(Signal* signal) -{ - jamEntry(); - - Uint32 origSenderRef = signal->senderBlockRef(); - - if (refToBlock(origSenderRef) != DBDICT && - getOwnNodeId() != c_masterNodeId) - { - /* - * Coordinator but not master - */ - SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->errorCode = SubStopRef::NotMaster; - ref->m_masterNodeId = c_masterNodeId; - sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal, - SubStopRef::SignalLength2, JBB); - return; - } - OpSubEventPtr subbPtr; - Uint32 errCode = 0; - if (!c_opSubEvent.seize(subbPtr)) { - errCode = SubStopRef::Busy; -busy: - SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend(); - jam(); - // ret->setErrorCode(SubStartRef::SeizeError); - // ret->setErrorLine(__LINE__); - // ret->setErrorNode(reference()); - ref->senderRef = reference(); - ref->errorCode = errCode; - - sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal, - SubStopRef::SignalLength, JBB); - return; - } - - { - const SubStopReq* req = (SubStopReq*) signal->getDataPtr(); - subbPtr.p->m_senderRef = req->senderRef; - subbPtr.p->m_senderData = req->senderData; - subbPtr.p->m_errorCode = 0; - } - - if (refToBlock(origSenderRef) != DBDICT) { - /* - * Coordinator - */ - jam(); -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_REQ 1"); -#endif - subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = subbPtr.p->m_reqTracker; - if (!p.init(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i)) - { - jam(); - c_opSubEvent.release(subbPtr); - errCode = SubStopRef::Busy; - goto busy; - } - - SubStopReq* req = (SubStopReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - - sendSignal(rg, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); - return; - } - /* - * Participant - */ -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_REQ 2"); -#endif - ndbrequire(refToBlock(origSenderRef) == DBDICT); - - CRASH_INSERTION(6008); - - { - SubStopReq* req = (SubStopReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - - sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); - } -} - -void Dbdict::execSUB_STOP_REF(Signal* signal) -{ - jamEntry(); - const SubStopRef* ref = (SubStopRef*) signal->getDataPtr(); - Uint32 senderRef = ref->senderRef; - Uint32 err = ref->errorCode; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ref->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - ref->errorCode = err; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF, - signal, SubStopRef::SignalLength, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - if (err == SubStopRef::NF_FakeErrorREF){ - jam(); - subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef)); - } else { - jam(); - if (subbPtr.p->m_errorCode == 0) - { - subbPtr.p->m_errorCode= err ? err : 1; - } - subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef)); - } - completeSubStopReq(signal,subbPtr.i,0); -} - -void Dbdict::execSUB_STOP_CONF(Signal* signal) -{ - jamEntry(); - - const SubStopConf* conf = (SubStopConf*) signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, conf->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - SubStopConf* conf = (SubStopConf*) signal->getDataPtrSend(); - - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF, - signal, SubStopConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - subbPtr.p->m_sub_stop_conf = *conf; - subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); - completeSubStopReq(signal,subbPtr.i,0); -} - -/* - * Coordinator - */ -void Dbdict::completeSubStopReq(Signal* signal, - Uint32 ptrI, - Uint32 returnCode){ - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ptrI); - - if (!subbPtr.p->m_reqTracker.done()){ - jam(); - return; - } - - if (subbPtr.p->m_reqTracker.hasRef()) { - jam(); -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_REF"); -#endif - SubStopRef* ref = (SubStopRef*)signal->getDataPtrSend(); - - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - ref->errorCode = subbPtr.p->m_errorCode; - - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF, - signal, SubStopRef::SignalLength, JBB); - if (subbPtr.p->m_reqTracker.hasConf()) { - // stopStartedNodes(signal); - } - c_opSubEvent.release(subbPtr); - return; - } -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_CONF"); -#endif - SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend(); - * conf = subbPtr.p->m_sub_stop_conf; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF, - signal, SubStopConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); -} - -/*************************************************************** - * MODULE: Drop event. - * - * Drop event. - * - * TODO - */ - -void -Dbdict::execDROP_EVNT_REQ(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execDROP_EVNT_REQ"); - - DropEvntReq *req = (DropEvntReq*)signal->getDataPtr(); - const Uint32 senderRef = signal->senderBlockRef(); - OpDropEventPtr evntRecPtr; - - if (refToBlock(senderRef) != DBDICT && - getOwnNodeId() != c_masterNodeId) - { - jam(); - releaseSections(signal); - - DropEvntRef * ref = (DropEvntRef *)signal->getDataPtrSend(); - ref->setUserRef(reference()); - ref->setErrorCode(DropEvntRef::NotMaster); - ref->setErrorLine(__LINE__); - ref->setErrorNode(reference()); - ref->setMasterNode(c_masterNodeId); - sendSignal(senderRef, GSN_DROP_EVNT_REF, signal, - DropEvntRef::SignalLength2, JBB); - return; - } - - // Seize a Create Event record - if (!c_opDropEvent.seize(evntRecPtr)) { - // Failed to allocate event record - jam(); - releaseSections(signal); - - DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend(); - ret->setErrorCode(747); - ret->setErrorLine(__LINE__); - ret->setErrorNode(reference()); - sendSignal(senderRef, GSN_DROP_EVNT_REF, signal, - DropEvntRef::SignalLength, JBB); - DBUG_VOID_RETURN; - } - -#ifdef EVENT_DEBUG - ndbout_c("DBDICT::execDROP_EVNT_REQ evntRecId = (%d)", evntRecPtr.i); -#endif - - OpDropEvent* evntRec = evntRecPtr.p; - evntRec->init(req); - - SegmentedSectionPtr ssPtr; - - signal->getSection(ssPtr, 0); - - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); -#ifdef EVENT_DEBUG - r0.printAll(ndbout); -#endif - // event name - if ((!r0.first()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = 1; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - dropEvent_sendReply(signal, evntRecPtr); - DBUG_VOID_RETURN; - } - r0.getString(evntRecPtr.p->m_eventRec.NAME); - { - int len = strlen(evntRecPtr.p->m_eventRec.NAME); - memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len); -#ifdef EVENT_DEBUG - printf("DropEvntReq; EventName %s, len %u\n", - evntRecPtr.p->m_eventRec.NAME, len); - for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++) - printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]); - printf("\n"); -#endif - } - - releaseSections(signal); - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecPtr.i, - UtilPrepareReq::Read); - DBUG_VOID_RETURN; -} - -void -Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilPrepareRef(signal, callbackData, returnCode); - return; - } - - UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr(); - OpDropEventPtr evntRecPtr; - evntRecPtr.i = req->getSenderData(); - const Uint32 prepareId = req->getPrepareId(); - - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_READ), 0 }; - - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Read); -} - -void -Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilExecuteRef(signal, callbackData, returnCode); - return; - } - - OpDropEventPtr evntRecPtr; - UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr(); - jam(); - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - parseReadEventSys(signal, evntRecPtr.p->m_eventRec); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = evntRecPtr.p->m_reqTracker; - if (!p.init(c_counterMgr, rg, GSN_SUB_REMOVE_REF, - evntRecPtr.i)) - { - evntRecPtr.p->m_errorCode = 701; - dropEvent_sendReply(signal, evntRecPtr); - return; - } - - SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = evntRecPtr.i; - req->subscriptionId = evntRecPtr.p->m_eventRec.SUBID; - req->subscriptionKey = evntRecPtr.p->m_eventRec.SUBKEY; - - sendSignal(rg, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB); -} - -/* - * Participant - */ - -void -Dbdict::execSUB_REMOVE_REQ(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execSUB_REMOVE_REQ"); - - Uint32 origSenderRef = signal->senderBlockRef(); - - OpSubEventPtr subbPtr; - if (!c_opSubEvent.seize(subbPtr)) { - SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend(); - jam(); - ref->senderRef = reference(); - ref->errorCode = SubRemoveRef::Busy; - - sendSignal(origSenderRef, GSN_SUB_REMOVE_REF, signal, - SubRemoveRef::SignalLength, JBB); - DBUG_VOID_RETURN; - } - - { - const SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtr(); - subbPtr.p->m_senderRef = req->senderRef; - subbPtr.p->m_senderData = req->senderData; - subbPtr.p->m_errorCode = 0; - } - - CRASH_INSERTION2(6010, getOwnNodeId() != c_masterNodeId); - - SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = subbPtr.i; - - sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB); - DBUG_VOID_RETURN; -} - -/* - * Coordintor/Participant - */ - -void -Dbdict::execSUB_REMOVE_REF(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Dbdict::execSUB_REMOVE_REF"); - - const SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtr(); - Uint32 senderRef = ref->senderRef; - Uint32 err= ref->errorCode; - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ref->senderData); - if (err == 1407) { - // conf this since this may occur if a nodefailure has occured - // earlier so that the systable was not cleared - SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF, - signal, SubRemoveConf::SignalLength, JBB); - } else { - SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - ref->errorCode = err; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF, - signal, SubRemoveRef::SignalLength, JBB); - } - c_opSubEvent.release(subbPtr); - DBUG_VOID_RETURN; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - OpDropEventPtr eventRecPtr; - c_opDropEvent.getPtr(eventRecPtr, ref->senderData); - if (err == SubRemoveRef::NF_FakeErrorREF){ - jam(); - eventRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef)); - } else { - jam(); - if (eventRecPtr.p->m_errorCode == 0) - { - eventRecPtr.p->m_errorCode= err ? err : 1; - eventRecPtr.p->m_errorLine= __LINE__; - eventRecPtr.p->m_errorNode= reference(); - } - eventRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef)); - } - completeSubRemoveReq(signal,eventRecPtr.i,0); - DBUG_VOID_RETURN; -} - -void -Dbdict::execSUB_REMOVE_CONF(Signal* signal) -{ - jamEntry(); - const SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, conf->senderData); - SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF, - signal, SubRemoveConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - OpDropEventPtr eventRecPtr; - c_opDropEvent.getPtr(eventRecPtr, conf->senderData); - eventRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); - completeSubRemoveReq(signal,eventRecPtr.i,0); -} - -void -Dbdict::completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 xxx) -{ - OpDropEventPtr evntRecPtr; - c_opDropEvent.getPtr(evntRecPtr, ptrI); - - if (!evntRecPtr.p->m_reqTracker.done()){ - jam(); - return; - } - - if (evntRecPtr.p->m_reqTracker.hasRef()) { - jam(); - if ( evntRecPtr.p->m_errorCode == 0 ) - { - evntRecPtr.p->m_errorNode = reference(); - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorCode = 1; - } - dropEvent_sendReply(signal, evntRecPtr); - return; - } - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_DELETE), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecPtr.i, - UtilPrepareReq::Delete); -} - -void -Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilPrepareRef(signal, callbackData, returnCode); - return; - } - - UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr(); - OpDropEventPtr evntRecPtr; - jam(); - evntRecPtr.i = req->getSenderData(); - const Uint32 prepareId = req->getPrepareId(); - - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); -#ifdef EVENT_DEBUG - printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i); -#endif - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 }; - - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Delete); -} - -void -Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilExecuteRef(signal, callbackData, returnCode); - return; - } - - OpDropEventPtr evntRecPtr; - UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr(); - jam(); - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - dropEvent_sendReply(signal, evntRecPtr); -} - -void -Dbdict::dropEventUtilPrepareRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - UtilPrepareRef * const ref = (UtilPrepareRef *)signal->getDataPtr(); - OpDropEventPtr evntRecPtr; - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - interpretUtilPrepareErrorCode((UtilPrepareRef::ErrorCode)ref->getErrorCode(), - evntRecPtr.p->m_errorCode, evntRecPtr.p->m_errorLine); - evntRecPtr.p->m_errorNode = reference(); - - dropEvent_sendReply(signal, evntRecPtr); -} - -void -Dbdict::dropEventUtilExecuteRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - OpDropEventPtr evntRecPtr; - UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr(); - jam(); - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - evntRecPtr.p->m_errorNode = reference(); - evntRecPtr.p->m_errorLine = __LINE__; - - switch (ref->getErrorCode()) { - case UtilExecuteRef::TCError: - switch (ref->getTCErrorCode()) { - case ZNOT_FOUND: - jam(); - evntRecPtr.p->m_errorCode = 4710; - break; - default: - jam(); - evntRecPtr.p->m_errorCode = ref->getTCErrorCode(); - break; - } - break; - default: - jam(); - evntRecPtr.p->m_errorCode = ref->getErrorCode(); - break; - } - dropEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::dropEvent_sendReply(Signal* signal, - OpDropEventPtr evntRecPtr) -{ - jam(); - EVENT_TRACE; - Uint32 senderRef = evntRecPtr.p->m_request.getUserRef(); - - if (evntRecPtr.p->hasError()) { - jam(); - DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend(); - - ret->setUserData(evntRecPtr.p->m_request.getUserData()); - ret->setUserRef(evntRecPtr.p->m_request.getUserRef()); - - ret->setErrorCode(evntRecPtr.p->m_errorCode); - ret->setErrorLine(evntRecPtr.p->m_errorLine); - ret->setErrorNode(evntRecPtr.p->m_errorNode); - - sendSignal(senderRef, GSN_DROP_EVNT_REF, signal, - DropEvntRef::SignalLength, JBB); - } else { - jam(); - DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend(); - - evntConf->setUserData(evntRecPtr.p->m_request.getUserData()); - evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef()); - - sendSignal(senderRef, GSN_DROP_EVNT_CONF, signal, - DropEvntConf::SignalLength, JBB); - } - - c_opDropEvent.release(evntRecPtr); -} - -/** - * MODULE: Alter index - * - * Alter index state. Alter online creates the index in each TC and - * then invokes create trigger and alter trigger protocols to activate - * the 3 triggers. Alter offline does the opposite. - * - * Request type received in REQ and returned in CONF/REF: - * - * RT_USER - from API to DICT master - * RT_CREATE_INDEX - part of create index operation - * RT_DROP_INDEX - part of drop index operation - * RT_NODERESTART - node restart, activate locally only - * RT_SYSTEMRESTART - system restart, activate and build if not logged - * RT_DICT_PREPARE - prepare participants - * RT_DICT_TC - to local TC via each participant - * RT_DICT_COMMIT - commit in each participant - */ - -void -Dbdict::execALTER_INDX_REQ(Signal* signal) -{ - jamEntry(); - AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend(); - OpAlterIndexPtr opPtr; - const Uint32 senderRef = signal->senderBlockRef(); - const AlterIndxReq::RequestType requestType = req->getRequestType(); - if (requestType == AlterIndxReq::RT_USER || - requestType == AlterIndxReq::RT_CREATE_INDEX || - requestType == AlterIndxReq::RT_DROP_INDEX || - requestType == AlterIndxReq::RT_NODERESTART || - requestType == AlterIndxReq::RT_SYSTEMRESTART) { - jam(); - const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL; - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (isLocal) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } - if (signal->getLength() == AlterIndxReq::SignalLength) { - jam(); - if (! isLocal && getOwnNodeId() != c_masterNodeId) { - jam(); - - releaseSections(signal); - OpAlterIndex opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = AlterIndxRef::NotMaster; - opPtr.p->m_errorLine = __LINE__; - opPtr.p->m_errorNode = c_masterNodeId; - alterIndex_sendReply(signal, opPtr, true); - return; - } - // forward initial request plus operation key to all - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_ALTER_INDX_REQ, - signal, AlterIndxReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == AlterIndxReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpAlterIndex opBusy; - if (! c_opAlterIndex.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = AlterIndxRef::Busy; - opPtr.p->m_errorLine = __LINE__; - alterIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opAlterIndex.add(opPtr); - // master expects to hear from all - if (opPtr.p->m_isMaster) - opPtr.p->m_signalCounter = receiverNodes; - // check request in all participants - alterIndex_slavePrepare(signal, opPtr); - alterIndex_sendReply(signal, opPtr, false); - return; - } - c_opAlterIndex.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == AlterIndxReq::RT_DICT_TC) { - jam(); - if (opPtr.p->m_request.getOnline()) - alterIndex_toCreateTc(signal, opPtr); - else - alterIndex_toDropTc(signal, opPtr); - return; - } - if (requestType == AlterIndxReq::RT_DICT_COMMIT || - requestType == AlterIndxReq::RT_DICT_ABORT) { - jam(); - if (requestType == AlterIndxReq::RT_DICT_COMMIT) - alterIndex_slaveCommit(signal, opPtr); - else - alterIndex_slaveAbort(signal, opPtr); - alterIndex_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opAlterIndex.release(opPtr); - return; - } - } - jam(); - // return to sender - OpAlterIndex opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = AlterIndxRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - alterIndex_sendReply(signal, opPtr, true); -} - -void -Dbdict::execALTER_INDX_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - AlterIndxConf* conf = (AlterIndxConf*)signal->getDataPtrSend(); - alterIndex_recvReply(signal, conf, 0); -} - -void -Dbdict::execALTER_INDX_REF(Signal* signal) -{ - jamEntry(); - AlterIndxRef* ref = (AlterIndxRef*)signal->getDataPtrSend(); - alterIndex_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf, - const AlterIndxRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const AlterIndxReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == AlterIndxReq::RT_CREATE_INDEX) { - jam(); - // part of create index operation - OpCreateIndexPtr opPtr; - c_opCreateIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - createIndex_fromAlterIndex(signal, opPtr); - return; - } - if (requestType == AlterIndxReq::RT_DROP_INDEX) { - jam(); - // part of drop index operation - OpDropIndexPtr opPtr; - c_opDropIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - dropIndex_fromAlterIndex(signal, opPtr); - return; - } - if (requestType == AlterIndxReq::RT_TC || - requestType == AlterIndxReq::RT_TUX) { - jam(); - // part of build index operation - OpBuildIndexPtr opPtr; - c_opBuildIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - buildIndex_fromOnline(signal, opPtr); - return; - } - if (requestType == AlterIndxReq::RT_NODERESTART) { - jam(); - if (ref == 0) { - infoEvent("DICT: index %u activated", (unsigned)key); - } else { - warningEvent("DICT: index %u activation failed: code=%d line=%d", - (unsigned)key, - ref->getErrorCode(), ref->getErrorLine()); - } - activateIndexes(signal, key + 1); - return; - } - if (requestType == AlterIndxReq::RT_SYSTEMRESTART) { - jam(); - if (ref == 0) { - infoEvent("DICT: index %u activated done", (unsigned)key); - } else { - warningEvent("DICT: index %u activated failed: code=%d line=%d node=%d", - (unsigned)key, - ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode()); - } - activateIndexes(signal, key + 1); - return; - } - OpAlterIndexPtr opPtr; - c_opAlterIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - opPtr.p->setError(ref); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == AlterIndxReq::RT_DICT_COMMIT || - requestType == AlterIndxReq::RT_DICT_ABORT) { - jam(); - // send reply to user - alterIndex_sendReply(signal, opPtr, true); - c_opAlterIndex.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT; - alterIndex_sendSlaveReq(signal, opPtr); - return; - } - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - if (indexPtr.p->isHashIndex()) { - if (requestType == AlterIndxReq::RT_DICT_PREPARE) { - jam(); - if (opPtr.p->m_request.getOnline()) { - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC; - alterIndex_sendSlaveReq(signal, opPtr); - } else { - // start drop triggers - alterIndex_toDropTrigger(signal, opPtr); - } - return; - } - if (requestType == AlterIndxReq::RT_DICT_TC) { - jam(); - if (opPtr.p->m_request.getOnline()) { - // start create triggers - alterIndex_toCreateTrigger(signal, opPtr); - } else { - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT; - alterIndex_sendSlaveReq(signal, opPtr); - } - return; - } - } - if (indexPtr.p->isOrderedIndex()) { - if (requestType == AlterIndxReq::RT_DICT_PREPARE) { - jam(); - if (opPtr.p->m_request.getOnline()) { - // start create triggers - alterIndex_toCreateTrigger(signal, opPtr); - } else { - // start drop triggers - alterIndex_toDropTrigger(signal, opPtr); - } - return; - } - } - ndbrequire(false); -} - -void -Dbdict::alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - const AlterIndxReq* const req = &opPtr.p->m_request; - if (! (req->getIndexId() < c_tableRecordPool.getSize())) { - jam(); - opPtr.p->m_errorCode = AlterIndxRef::Inconsistency; - opPtr.p->m_errorLine = __LINE__; - return; - } - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, req->getIndexId()); - if (indexPtr.p->tabState != TableRecord::DEFINED) { - jam(); - opPtr.p->m_errorCode = AlterIndxRef::IndexNotFound; - opPtr.p->m_errorLine = __LINE__; - return; - } - if (! indexPtr.p->isIndex()) { - jam(); - opPtr.p->m_errorCode = AlterIndxRef::NotAnIndex; - opPtr.p->m_errorLine = __LINE__; - return; - } - if (req->getOnline()) - indexPtr.p->indexState = TableRecord::IS_BUILDING; - else - indexPtr.p->indexState = TableRecord::IS_DROPPING; -} - -void -Dbdict::alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - // request to create index in local TC - CreateIndxReq* const req = (CreateIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(CreateIndxReq::RT_TC); - req->setIndexType(indexPtr.p->tableType); - req->setTableId(indexPtr.p->primaryTableId); - req->setIndexId(indexPtr.i); - req->setOnline(true); - getIndexAttrList(indexPtr, opPtr.p->m_attrList); - // send - LinearSectionPtr lsPtr[3]; - lsPtr[0].p = (Uint32*)&opPtr.p->m_attrList; - lsPtr[0].sz = 1 + opPtr.p->m_attrList.sz; - sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_CREATE_INDX_REQ, - signal, CreateIndxReq::SignalLength, JBB, lsPtr, 1); -} - -void -Dbdict::alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - // mark created in local TC - if (! opPtr.p->hasLastError()) { - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - indexPtr.p->indexLocal |= TableRecord::IL_CREATED_TC; - } - // forward CONF or REF to master - ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC); - alterIndex_sendReply(signal, opPtr, false); -} - -void -Dbdict::alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - // broken index allowed if force - if (! (indexPtr.p->indexLocal & TableRecord::IL_CREATED_TC)) { - jam(); - ndbassert(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE); - alterIndex_sendReply(signal, opPtr, false); - return; - } - // request to drop in local TC - DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(DropIndxReq::RT_TC); - req->setTableId(indexPtr.p->primaryTableId); - req->setIndexId(indexPtr.i); - req->setIndexVersion(indexPtr.p->tableVersion); - // send - sendSignal(calcTcBlockRef(getOwnNodeId()), GSN_DROP_INDX_REQ, - signal, DropIndxReq::SignalLength, JBB); -} - -void -Dbdict::alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - ndbrequire(opPtr.p->m_requestType == AlterIndxReq::RT_DICT_TC); - // mark dropped locally - if (! opPtr.p->hasLastError()) { - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - indexPtr.p->indexLocal &= ~TableRecord::IL_CREATED_TC; - } - // forward CONF or REF to master - alterIndex_sendReply(signal, opPtr, false); -} - -void -Dbdict::alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - // start creation of index triggers - CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(CreateTrigReq::RT_ALTER_INDEX); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setTableId(opPtr.p->m_request.getTableId()); - req->setIndexId(opPtr.p->m_request.getIndexId()); - req->setTriggerId(RNIL); - req->setTriggerActionTime(TriggerActionTime::TA_AFTER); - req->setMonitorAllAttributes(false); - req->setOnline(true); // alter online after create - req->setReceiverRef(0); // implicit for index triggers - getIndexAttrMask(indexPtr, req->getAttributeMask()); - // name section - char triggerName[MAX_TAB_NAME_SIZE]; - Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string - LinearWriter w(buffer, sizeof(buffer) >> 2); - LinearSectionPtr lsPtr[3]; - if (indexPtr.p->isHashIndex()) { - req->setTriggerType(TriggerType::SECONDARY_INDEX); - req->setMonitorReplicas(false); - req->setReportAllMonitoredAttributes(true); - // insert - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) - req->setTriggerId(indexPtr.p->insertTriggerId); - req->setTriggerEvent(TriggerEvent::TE_INSERT); - sprintf(triggerName, "NDB$INDEX_%u_INSERT", opPtr.p->m_request.getIndexId()); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = buffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(reference(), GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1); - // update - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) - req->setTriggerId(indexPtr.p->updateTriggerId); - req->setTriggerEvent(TriggerEvent::TE_UPDATE); - sprintf(triggerName, "NDB$INDEX_%u_UPDATE", opPtr.p->m_request.getIndexId()); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = buffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(reference(), GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1); - // delete - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) - req->setTriggerId(indexPtr.p->deleteTriggerId); - req->setTriggerEvent(TriggerEvent::TE_DELETE); - sprintf(triggerName, "NDB$INDEX_%u_DELETE", opPtr.p->m_request.getIndexId()); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = buffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(reference(), GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1); - // triggers left to create - opPtr.p->m_triggerCounter = 3; - return; - } - if (indexPtr.p->isOrderedIndex()) { - req->addRequestFlag(RequestFlag::RF_NOTCTRIGGER); - req->setTriggerType(TriggerType::ORDERED_INDEX); - req->setTriggerActionTime(TriggerActionTime::TA_CUSTOM); - req->setMonitorReplicas(true); - req->setReportAllMonitoredAttributes(true); - // one trigger for 5 events (insert, update, delete, commit, abort) - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) - req->setTriggerId(indexPtr.p->customTriggerId); - req->setTriggerEvent(TriggerEvent::TE_CUSTOM); - sprintf(triggerName, "NDB$INDEX_%u_CUSTOM", opPtr.p->m_request.getIndexId()); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = buffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(reference(), GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1); - // triggers left to create - opPtr.p->m_triggerCounter = 1; - return; - } - ndbrequire(false); -} - -void -Dbdict::alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - ndbrequire(opPtr.p->m_triggerCounter != 0); - if (--opPtr.p->m_triggerCounter != 0) { - jam(); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT; - alterIndex_sendSlaveReq(signal, opPtr); - return; - } - if(opPtr.p->m_requestType != AlterIndxReq::RT_SYSTEMRESTART){ - // send build request - alterIndex_toBuildIndex(signal, opPtr); - return; - } - - /** - * During system restart, - * leave index in activated but not build state. - * - * Build a bit later when REDO has been run - */ - alterIndex_sendReply(signal, opPtr, true); -} - -void -Dbdict::alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - // start drop of index triggers - DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(DropTrigReq::RT_ALTER_INDEX); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setTableId(opPtr.p->m_request.getTableId()); - req->setIndexId(opPtr.p->m_request.getIndexId()); - req->setTriggerInfo(0); // not used - opPtr.p->m_triggerCounter = 0; - if (indexPtr.p->isHashIndex()) { - // insert - req->setTriggerId(indexPtr.p->insertTriggerId); - sendSignal(reference(), GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - opPtr.p->m_triggerCounter++; - // update - req->setTriggerId(indexPtr.p->updateTriggerId); - sendSignal(reference(), GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - opPtr.p->m_triggerCounter++; - // delete - req->setTriggerId(indexPtr.p->deleteTriggerId); - sendSignal(reference(), GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - opPtr.p->m_triggerCounter++; - // build - if (indexPtr.p->buildTriggerId != RNIL) { - req->setTriggerId(indexPtr.p->buildTriggerId); - sendSignal(reference(), GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - opPtr.p->m_triggerCounter++; - } - return; - } - if (indexPtr.p->isOrderedIndex()) { - // custom - req->addRequestFlag(RequestFlag::RF_NOTCTRIGGER); - req->setTriggerId(indexPtr.p->customTriggerId); - sendSignal(reference(), GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - opPtr.p->m_triggerCounter++; - return; - } - ndbrequire(false); -} - -void -Dbdict::alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - ndbrequire(opPtr.p->m_triggerCounter != 0); - if (--opPtr.p->m_triggerCounter != 0) { - jam(); - return; - } - // finally drop index in each TC - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - const bool isHashIndex = indexPtr.p->isHashIndex(); - const bool isOrderedIndex = indexPtr.p->isOrderedIndex(); - ndbrequire(isHashIndex != isOrderedIndex); // xor - if (isHashIndex) - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_TC; - if (isOrderedIndex) - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT; - alterIndex_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - // get index and table records - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId); - // build request to self (short signal) - BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(BuildIndxReq::RT_ALTER_INDEX); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setBuildId(0); // not used - req->setBuildKey(0); // not used - req->setIndexType(indexPtr.p->tableType); - req->setIndexId(indexPtr.i); - req->setTableId(indexPtr.p->primaryTableId); - req->setParallelism(16); - // send - sendSignal(reference(), GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); -} - -void -Dbdict::alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_ABORT; - alterIndex_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = AlterIndxReq::RT_DICT_COMMIT; - alterIndex_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - // get index record - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - indexPtr.p->indexState = TableRecord::IS_ONLINE; -} - -void -Dbdict::alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr) -{ - jam(); - // find index record - const Uint32 indexId = opPtr.p->m_request.getIndexId(); - if (indexId >= c_tableRecordPool.getSize()) - return; - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, indexId); - if (! indexPtr.p->isIndex()) - return; - // mark broken - indexPtr.p->indexState = TableRecord::IS_BROKEN; -} - -void -Dbdict::alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr) -{ - AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } - opPtr.p->m_signalCounter = receiverNodes; - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_ALTER_INDX_REQ, - signal, AlterIndxReq::SignalLength, JBB); -} - -void -Dbdict::alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr, - bool toUser) -{ - AlterIndxRef* rep = (AlterIndxRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_ALTER_INDX_CONF; - Uint32 length = AlterIndxConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == AlterIndxReq::RT_DICT_ABORT) - sendRef = false; - } else { - sendRef = opPtr.p->hasError(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = AlterIndxConf::SignalLength; - } - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setIndexId(opPtr.p->m_request.getIndexId()); - if (sendRef) { - if (opPtr.p->m_errorNode == 0) - opPtr.p->m_errorNode = getOwnNodeId(); - rep->setErrorCode(opPtr.p->m_errorCode); - rep->setErrorLine(opPtr.p->m_errorLine); - rep->setErrorNode(opPtr.p->m_errorNode); - gsn = GSN_ALTER_INDX_REF; - length = AlterIndxRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/** - * MODULE: Build index - * - * Build index or all indexes on a table. Request type: - * - * RT_USER - normal user request, not yet used - * RT_ALTER_INDEX - from alter index - * RT_SYSTEM_RESTART - - * RT_DICT_PREPARE - prepare participants - * RT_DICT_TRIX - to participant on way to local TRIX - * RT_DICT_COMMIT - commit in each participant - * RT_DICT_ABORT - abort - * RT_TRIX - to local TRIX - */ - -void -Dbdict::execBUILDINDXREQ(Signal* signal) -{ - jamEntry(); - BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend(); - OpBuildIndexPtr opPtr; - const Uint32 senderRef = signal->senderBlockRef(); - const BuildIndxReq::RequestType requestType = req->getRequestType(); - if (requestType == BuildIndxReq::RT_USER || - requestType == BuildIndxReq::RT_ALTER_INDEX || - requestType == BuildIndxReq::RT_SYSTEMRESTART) { - jam(); - - const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL; - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (isLocal) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } - - if (signal->getLength() == BuildIndxReq::SignalLength) { - jam(); - - if (!isLocal && getOwnNodeId() != c_masterNodeId) { - jam(); - - releaseSections(signal); - OpBuildIndex opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = BuildIndxRef::NotMaster; - opPtr.p->m_errorLine = __LINE__; - opPtr.p->m_errorNode = c_masterNodeId; - buildIndex_sendReply(signal, opPtr, true); - return; - } - // forward initial request plus operation key to all - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == BuildIndxReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpBuildIndex opBusy; - if (! c_opBuildIndex.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = BuildIndxRef::Busy; - opPtr.p->m_errorLine = __LINE__; - buildIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opBuildIndex.add(opPtr); - // master expects to hear from all - opPtr.p->m_signalCounter = receiverNodes; - buildIndex_sendReply(signal, opPtr, false); - return; - } - c_opBuildIndex.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == BuildIndxReq::RT_DICT_TRIX) { - jam(); - buildIndex_buildTrix(signal, opPtr); - return; - } - if (requestType == BuildIndxReq::RT_DICT_TC || - requestType == BuildIndxReq::RT_DICT_TUX) { - jam(); - buildIndex_toOnline(signal, opPtr); - return; - } - if (requestType == BuildIndxReq::RT_DICT_COMMIT || - requestType == BuildIndxReq::RT_DICT_ABORT) { - jam(); - buildIndex_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opBuildIndex.release(opPtr); - return; - } - } - jam(); - // return to sender - OpBuildIndex opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = BuildIndxRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - buildIndex_sendReply(signal, opPtr, true); -} - -void -Dbdict::execBUILDINDXCONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - BuildIndxConf* conf = (BuildIndxConf*)signal->getDataPtrSend(); - buildIndex_recvReply(signal, conf, 0); -} - -void -Dbdict::execBUILDINDXREF(Signal* signal) -{ - jamEntry(); - BuildIndxRef* ref = (BuildIndxRef*)signal->getDataPtrSend(); - buildIndex_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf, - const BuildIndxRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const BuildIndxReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == BuildIndxReq::RT_ALTER_INDEX) { - jam(); - // part of alter index operation - OpAlterIndexPtr opPtr; - c_opAlterIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterIndex_fromBuildIndex(signal, opPtr); - return; - } - - if (requestType == BuildIndxReq::RT_SYSTEMRESTART) { - jam(); - if (ref == 0) { - infoEvent("DICT: index %u rebuild done", (unsigned)key); - } else { - warningEvent("DICT: index %u rebuild failed: code=%d line=%d node=%d", - (unsigned)key, ref->getErrorCode()); - } - rebuildIndexes(signal, key + 1); - return; - } - - OpBuildIndexPtr opPtr; - c_opBuildIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - if (requestType == BuildIndxReq::RT_TRIX) { - jam(); - // forward to master - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX; - buildIndex_sendReply(signal, opPtr, false); - return; - } - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == BuildIndxReq::RT_DICT_COMMIT || - requestType == BuildIndxReq::RT_DICT_ABORT) { - jam(); - // send reply to user - buildIndex_sendReply(signal, opPtr, true); - c_opBuildIndex.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT; - buildIndex_sendSlaveReq(signal, opPtr); - return; - } - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - if (indexPtr.p->isHashIndex()) { - if (requestType == BuildIndxReq::RT_DICT_PREPARE) { - jam(); - if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) { - buildIndex_toCreateConstr(signal, opPtr); - } else { - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC; - buildIndex_sendSlaveReq(signal, opPtr); - } - return; - } - if (requestType == BuildIndxReq::RT_DICT_TRIX) { - jam(); - ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)); - buildIndex_toDropConstr(signal, opPtr); - return; - } - if (requestType == BuildIndxReq::RT_DICT_TC) { - jam(); - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT; - buildIndex_sendSlaveReq(signal, opPtr); - return; - } - } - if (indexPtr.p->isOrderedIndex()) { - if (requestType == BuildIndxReq::RT_DICT_PREPARE) { - jam(); - if (! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)) { - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX; - buildIndex_sendSlaveReq(signal, opPtr); - } else { - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX; - buildIndex_sendSlaveReq(signal, opPtr); - } - return; - } - if (requestType == BuildIndxReq::RT_DICT_TRIX) { - jam(); - ndbrequire(! (opPtr.p->m_requestFlag & RequestFlag::RF_NOBUILD)); - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TUX; - buildIndex_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == BuildIndxReq::RT_DICT_TUX) { - jam(); - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_COMMIT; - buildIndex_sendSlaveReq(signal, opPtr); - return; - } - } - ndbrequire(false); -} - -void -Dbdict::buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - // request to create constraint trigger - CreateTrigReq* req = (CreateTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(CreateTrigReq::RT_BUILD_INDEX); - req->addRequestFlag(0); // none - req->setTableId(indexPtr.i); - req->setIndexId(RNIL); - req->setTriggerId(RNIL); - req->setTriggerType(TriggerType::READ_ONLY_CONSTRAINT); - req->setTriggerActionTime(TriggerActionTime::TA_AFTER); - req->setTriggerEvent(TriggerEvent::TE_UPDATE); - req->setMonitorReplicas(false); - req->setMonitorAllAttributes(false); - req->setReportAllMonitoredAttributes(true); - req->setOnline(true); // alter online after create - req->setReceiverRef(0); // no receiver, REF-ed by TUP - req->getAttributeMask().clear(); - // NDB$PK is last attribute - req->getAttributeMask().set(indexPtr.p->noOfAttributes - 1); - // name section - char triggerName[MAX_TAB_NAME_SIZE]; - Uint32 buffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string - LinearWriter w(buffer, sizeof(buffer) >> 2); - LinearSectionPtr lsPtr[3]; - sprintf(triggerName, "NDB$INDEX_%u_BUILD", indexPtr.i); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = buffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(reference(), GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1); -} - -void -Dbdict::buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT; - buildIndex_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TRIX; - buildIndex_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId); - // build request - BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(BuildIndxReq::RT_TRIX); - req->setBuildId(0); // not yet.. - req->setBuildKey(0); // ..in use - req->setIndexType(indexPtr.p->tableType); - req->setIndexId(indexPtr.i); - req->setTableId(indexPtr.p->primaryTableId); - req->setParallelism(16); - if (indexPtr.p->isHashIndex()) { - jam(); - getIndexAttrList(indexPtr, opPtr.p->m_attrList); - getTableKeyList(tablePtr, opPtr.p->m_tableKeyList); - // send - LinearSectionPtr lsPtr[3]; - lsPtr[0].sz = opPtr.p->m_attrList.sz; - lsPtr[0].p = opPtr.p->m_attrList.id; - lsPtr[1].sz = opPtr.p->m_tableKeyList.sz; - lsPtr[1].p = opPtr.p->m_tableKeyList.id; - sendSignal(calcTrixBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB, lsPtr, 2); - return; - } - if (indexPtr.p->isOrderedIndex()) { - jam(); - sendSignal(calcTupBlockRef(getOwnNodeId()), GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); - return; - } - ndbrequire(false); -} - -void -Dbdict::buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - // request to drop constraint trigger - DropTrigReq* req = (DropTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(DropTrigReq::RT_BUILD_INDEX); - req->addRequestFlag(0); // none - req->setTableId(indexPtr.i); - req->setIndexId(RNIL); - req->setTriggerId(opPtr.p->m_constrTriggerId); - req->setTriggerInfo(0); // not used - sendSignal(reference(), GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); -} - -void -Dbdict::buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_ABORT; - buildIndex_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = BuildIndxReq::RT_DICT_TC; - buildIndex_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, opPtr.p->m_request.getIndexId()); - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId); - // request to set index online in TC or TUX - AlterIndxReq* const req = (AlterIndxReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) { - jam(); - req->setRequestType(AlterIndxReq::RT_TC); - } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) { - jam(); - req->setRequestType(AlterIndxReq::RT_TUX); - } else { - ndbrequire(false); - } - req->setTableId(tablePtr.i); - req->setIndexId(indexPtr.i); - req->setIndexVersion(indexPtr.p->tableVersion); - req->setOnline(true); - BlockReference blockRef = 0; - if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TC) { - jam(); - blockRef = calcTcBlockRef(getOwnNodeId()); - } else if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_TUX) { - jam(); - blockRef = calcTuxBlockRef(getOwnNodeId()); - } else { - ndbrequire(false); - } - // send - sendSignal(blockRef, GSN_ALTER_INDX_REQ, - signal, BuildIndxReq::SignalLength, JBB); -} - -void -Dbdict::buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr) -{ - jam(); - // forward to master - buildIndex_sendReply(signal, opPtr, false); -} - -void -Dbdict::buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr) -{ - BuildIndxReq* const req = (BuildIndxReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - if(opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) { - jam(); - opPtr.p->m_signalCounter.clearWaitingFor(); - opPtr.p->m_signalCounter.setWaitingFor(getOwnNodeId()); - sendSignal(reference(), GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); - } else { - jam(); - opPtr.p->m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); - } -} - -void -Dbdict::buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr, - bool toUser) -{ - BuildIndxRef* rep = (BuildIndxRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_BUILDINDXCONF; - Uint32 length = BuildIndxConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == BuildIndxReq::RT_DICT_ABORT) - sendRef = false; - } else { - sendRef = opPtr.p->hasError(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = BuildIndxConf::SignalLength; - } - rep->setIndexType(opPtr.p->m_request.getIndexType()); - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setIndexId(opPtr.p->m_request.getIndexId()); - if (sendRef) { - rep->setErrorCode(opPtr.p->m_errorCode); - rep->masterNodeId = opPtr.p->m_errorNode; - gsn = GSN_BUILDINDXREF; - length = BuildIndxRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/** - * MODULE: Create trigger - * - * Create trigger in all DICT blocks. Optionally start alter trigger - * operation to set the trigger online. - * - * Request type received in REQ and returned in CONF/REF: - * - * RT_USER - normal user e.g. BACKUP - * RT_ALTER_INDEX - from alter index online - * RT_DICT_PREPARE - seize operation in each DICT - * RT_DICT_COMMIT - commit create in each DICT - * RT_TC - sending to TC (operation alter trigger) - * RT_LQH - sending to LQH (operation alter trigger) - */ - -void -Dbdict::execCREATE_TRIG_REQ(Signal* signal) -{ - jamEntry(); - CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend(); - OpCreateTriggerPtr opPtr; - const Uint32 senderRef = signal->senderBlockRef(); - const CreateTrigReq::RequestType requestType = req->getRequestType(); - if (requestType == CreateTrigReq::RT_USER || - requestType == CreateTrigReq::RT_ALTER_INDEX || - requestType == CreateTrigReq::RT_BUILD_INDEX) { - jam(); - if (! assembleFragments(signal)) { - jam(); - return; - } - const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL; - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (isLocal) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } - if (signal->getLength() == CreateTrigReq::SignalLength) { - jam(); - if (! isLocal && getOwnNodeId() != c_masterNodeId) { - jam(); - - releaseSections(signal); - OpCreateTrigger opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = CreateTrigRef::NotMaster; - opPtr.p->m_errorLine = __LINE__; - opPtr.p->m_errorNode = c_masterNodeId; - createTrigger_sendReply(signal, opPtr, true); - return; - } - // forward initial request plus operation key to all - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == CreateTrigReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpCreateTrigger opBusy; - if (! c_opCreateTrigger.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = CreateTrigRef::Busy; - opPtr.p->m_errorLine = __LINE__; - releaseSections(signal); - createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opCreateTrigger.add(opPtr); - { - // save name - SegmentedSectionPtr ssPtr; - signal->getSection(ssPtr, CreateTrigReq::TRIGGER_NAME_SECTION); - SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool()); - if (ssReader.getKey() != CreateTrigReq::TriggerNameKey || - ! ssReader.getString(opPtr.p->m_triggerName)) { - jam(); - opPtr.p->m_errorCode = CreateTrigRef::InvalidName; - opPtr.p->m_errorLine = __LINE__; - releaseSections(signal); - createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - } - releaseSections(signal); - if(get_object(opPtr.p->m_triggerName) != 0){ - jam(); - opPtr.p->m_errorCode = CreateTrigRef::TriggerExists; - opPtr.p->m_errorLine = __LINE__; - createTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - - // master expects to hear from all - if (opPtr.p->m_isMaster) - opPtr.p->m_signalCounter = receiverNodes; - // check request in all participants - createTrigger_slavePrepare(signal, opPtr); - createTrigger_sendReply(signal, opPtr, false); - return; - } - c_opCreateTrigger.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == CreateTrigReq::RT_DICT_CREATE) { - jam(); - // master has set trigger id - opPtr.p->m_request.setTriggerId(req->getTriggerId()); - createTrigger_slaveCreate(signal, opPtr); - createTrigger_sendReply(signal, opPtr, false); - return; - } - if (requestType == CreateTrigReq::RT_DICT_COMMIT || - requestType == CreateTrigReq::RT_DICT_ABORT) { - jam(); - if (requestType == CreateTrigReq::RT_DICT_COMMIT) - createTrigger_slaveCommit(signal, opPtr); - else - createTrigger_slaveAbort(signal, opPtr); - createTrigger_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opCreateTrigger.release(opPtr); - return; - } - } - jam(); - // return to sender - releaseSections(signal); - OpCreateTrigger opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = CreateTrigRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - createTrigger_sendReply(signal, opPtr, true); -} - -void -Dbdict::execCREATE_TRIG_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(signal->getNoOfSections() == 0); - CreateTrigConf* conf = (CreateTrigConf*)signal->getDataPtrSend(); - createTrigger_recvReply(signal, conf, 0); -} - -void -Dbdict::execCREATE_TRIG_REF(Signal* signal) -{ - jamEntry(); - CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtrSend(); - createTrigger_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf, - const CreateTrigRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const CreateTrigReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == CreateTrigReq::RT_ALTER_INDEX) { - jam(); - // part of alter index operation - OpAlterIndexPtr opPtr; - c_opAlterIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterIndex_fromCreateTrigger(signal, opPtr); - return; - } - if (requestType == CreateTrigReq::RT_BUILD_INDEX) { - jam(); - // part of build index operation - OpBuildIndexPtr opPtr; - c_opBuildIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - // fill in trigger id - opPtr.p->m_constrTriggerId = conf->getTriggerId(); - buildIndex_fromCreateConstr(signal, opPtr); - return; - } - if (requestType == CreateTrigReq::RT_TC || - requestType == CreateTrigReq::RT_LQH) { - jam(); - // part of alter trigger operation - OpAlterTriggerPtr opPtr; - c_opAlterTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterTrigger_fromCreateLocal(signal, opPtr); - return; - } - OpCreateTriggerPtr opPtr; - c_opCreateTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - opPtr.p->setError(ref); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == CreateTrigReq::RT_DICT_COMMIT || - requestType == CreateTrigReq::RT_DICT_ABORT) { - jam(); - // send reply to user - createTrigger_sendReply(signal, opPtr, true); - c_opCreateTrigger.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT; - createTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == CreateTrigReq::RT_DICT_PREPARE) { - jam(); - // seize trigger id in master - createTrigger_masterSeize(signal, opPtr); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT; - createTrigger_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_CREATE; - createTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == CreateTrigReq::RT_DICT_CREATE) { - jam(); - if (opPtr.p->m_request.getOnline()) { - jam(); - // start alter online - createTrigger_toAlterTrigger(signal, opPtr); - return; - } - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT; - createTrigger_sendSlaveReq(signal, opPtr); - return; - } - ndbrequire(false); -} - -void -Dbdict::createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr) -{ - jam(); - const CreateTrigReq* const req = &opPtr.p->m_request; - // check trigger type - if (req->getRequestType() == CreateTrigReq::RT_USER && - req->getTriggerType() == TriggerType::SUBSCRIPTION || - req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX && - req->getTriggerType() == TriggerType::SECONDARY_INDEX || - req->getRequestType() == CreateTrigReq::RT_ALTER_INDEX && - req->getTriggerType() == TriggerType::ORDERED_INDEX || - req->getRequestType() == CreateTrigReq::RT_BUILD_INDEX && - req->getTriggerType() == TriggerType::READ_ONLY_CONSTRAINT) { - ; - } else { - jam(); - opPtr.p->m_errorCode = CreateTrigRef::UnsupportedTriggerType; - opPtr.p->m_errorLine = __LINE__; - return; - } - // check the table - const Uint32 tableId = req->getTableId(); - if (! (tableId < c_tableRecordPool.getSize())) { - jam(); - opPtr.p->m_errorCode = CreateTrigRef::InvalidTable; - opPtr.p->m_errorLine = __LINE__; - return; - } - TableRecordPtr tablePtr; - c_tableRecordPool.getPtr(tablePtr, tableId); - if (tablePtr.p->tabState != TableRecord::DEFINED && - tablePtr.p->tabState != TableRecord::BACKUP_ONGOING) { - jam(); - opPtr.p->m_errorCode = CreateTrigRef::InvalidTable; - opPtr.p->m_errorLine = __LINE__; - return; - } -} - -void -Dbdict::createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr) -{ - TriggerRecordPtr triggerPtr; - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) { - triggerPtr.i = opPtr.p->m_request.getTriggerId(); - } else { - triggerPtr.i = getFreeTriggerRecord(); - if (triggerPtr.i == RNIL) { - jam(); - opPtr.p->m_errorCode = CreateTrigRef::TooManyTriggers; - opPtr.p->m_errorLine = __LINE__; - return; - } - } - c_triggerRecordPool.getPtr(triggerPtr); - initialiseTriggerRecord(triggerPtr); - triggerPtr.p->triggerState = TriggerRecord::TS_DEFINING; - opPtr.p->m_request.setTriggerId(triggerPtr.i); -} - -void -Dbdict::createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr) -{ - jam(); - const CreateTrigReq* const req = &opPtr.p->m_request; - // get the trigger record - const Uint32 triggerId = req->getTriggerId(); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, triggerId); - initialiseTriggerRecord(triggerPtr); - // fill in trigger data - { - Rope name(c_rope_pool, triggerPtr.p->triggerName); - if(!name.assign(opPtr.p->m_triggerName)) - { - opPtr.p->m_errorCode = (CreateTrigRef::ErrorCode)CreateTableRef::OutOfStringBuffer; - return; - } - } - triggerPtr.p->triggerId = triggerId; - triggerPtr.p->tableId = req->getTableId(); - triggerPtr.p->indexId = RNIL; - triggerPtr.p->triggerType = req->getTriggerType(); - triggerPtr.p->triggerActionTime = req->getTriggerActionTime(); - triggerPtr.p->triggerEvent = req->getTriggerEvent(); - triggerPtr.p->monitorReplicas = req->getMonitorReplicas(); - triggerPtr.p->monitorAllAttributes = req->getMonitorAllAttributes(); - triggerPtr.p->reportAllMonitoredAttributes = req->getReportAllMonitoredAttributes(); - triggerPtr.p->attributeMask = req->getAttributeMask(); - triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE; - // add to hash table - // ndbout_c("++++++++++++ Adding trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName); - { - Ptr obj_ptr; - ndbrequire(c_obj_hash.seize(obj_ptr)); - obj_ptr.p->m_name = triggerPtr.p->triggerName; - obj_ptr.p->m_id = triggerId; - obj_ptr.p->m_type = triggerPtr.p->triggerType; - obj_ptr.p->m_ref_count = 0; - c_obj_hash.add(obj_ptr); - triggerPtr.p->m_obj_ptr_i = obj_ptr.i; - } - if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX || - triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) { - jam(); - // connect to index record XXX should be done in caller instead - triggerPtr.p->indexId = req->getIndexId(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId); - switch (triggerPtr.p->triggerEvent) { - case TriggerEvent::TE_INSERT: - indexPtr.p->insertTriggerId = triggerPtr.p->triggerId; - break; - case TriggerEvent::TE_UPDATE: - indexPtr.p->updateTriggerId = triggerPtr.p->triggerId; - break; - case TriggerEvent::TE_DELETE: - indexPtr.p->deleteTriggerId = triggerPtr.p->triggerId; - break; - case TriggerEvent::TE_CUSTOM: - indexPtr.p->customTriggerId = triggerPtr.p->triggerId; - break; - default: - ndbrequire(false); - break; - } - } - if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) { - jam(); - // connect to index record XXX should be done in caller instead - triggerPtr.p->indexId = req->getTableId(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId); - indexPtr.p->buildTriggerId = triggerPtr.p->triggerId; - } -} - -void -Dbdict::createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr) -{ - jam(); - AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(AlterTrigReq::RT_CREATE_TRIGGER); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setTableId(opPtr.p->m_request.getTableId()); - req->setTriggerId(opPtr.p->m_request.getTriggerId()); - req->setTriggerInfo(0); // not used - req->setOnline(true); - req->setReceiverRef(opPtr.p->m_request.getReceiverRef()); - sendSignal(reference(), GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); -} - -void -Dbdict::createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr) -{ - jam(); - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_ABORT; - createTrigger_sendSlaveReq(signal, opPtr); - return; - } - opPtr.p->m_requestType = CreateTrigReq::RT_DICT_COMMIT; - createTrigger_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr) -{ - jam(); - const CreateTrigReq* const req = &opPtr.p->m_request; - // get the trigger record - const Uint32 triggerId = req->getTriggerId(); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, triggerId); - if (! req->getOnline()) { - triggerPtr.p->triggerState = TriggerRecord::TS_OFFLINE; - } else { - ndbrequire(triggerPtr.p->triggerState == TriggerRecord::TS_ONLINE); - } -} - -void -Dbdict::createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr) -{ - jam(); -} - -void -Dbdict::createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr) -{ - CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } - opPtr.p->m_signalCounter = receiverNodes; - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB); -} - -void -Dbdict::createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr, - bool toUser) -{ - CreateTrigRef* rep = (CreateTrigRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_CREATE_TRIG_CONF; - Uint32 length = CreateTrigConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == CreateTrigReq::RT_DICT_ABORT) - sendRef = false; - } else { - sendRef = opPtr.p->hasError(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = CreateTrigConf::SignalLength; - } - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setIndexId(opPtr.p->m_request.getIndexId()); - rep->setTriggerId(opPtr.p->m_request.getTriggerId()); - rep->setTriggerInfo(opPtr.p->m_request.getTriggerInfo()); - if (sendRef) { - if (opPtr.p->m_errorNode == 0) - opPtr.p->m_errorNode = getOwnNodeId(); - rep->setErrorCode(opPtr.p->m_errorCode); - rep->setErrorLine(opPtr.p->m_errorLine); - rep->setErrorNode(opPtr.p->m_errorNode); - gsn = GSN_CREATE_TRIG_REF; - length = CreateTrigRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/** - * MODULE: Drop trigger. - */ - -void -Dbdict::execDROP_TRIG_REQ(Signal* signal) -{ - jamEntry(); - DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend(); - OpDropTriggerPtr opPtr; - const Uint32 senderRef = signal->senderBlockRef(); - const DropTrigReq::RequestType requestType = req->getRequestType(); - - if (signal->getNoOfSections() > 0) { - ndbrequire(signal->getNoOfSections() == 1); - jam(); - char triggerName[MAX_TAB_NAME_SIZE]; - OpDropTrigger opTmp; - opPtr.p=&opTmp; - - SegmentedSectionPtr ssPtr; - signal->getSection(ssPtr, DropTrigReq::TRIGGER_NAME_SECTION); - SimplePropertiesSectionReader ssReader(ssPtr, getSectionSegmentPool()); - if (ssReader.getKey() != DropTrigReq::TriggerNameKey || - ! ssReader.getString(triggerName)) { - jam(); - opPtr.p->m_errorCode = DropTrigRef::InvalidName; - opPtr.p->m_errorLine = __LINE__; - releaseSections(signal); - dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - releaseSections(signal); - - //ndbout_c("++++++++++++++ Looking for trigger %s", keyRecord.triggerName); - DictObject * obj_ptr_p = get_object(triggerName); - if (obj_ptr_p == 0){ - jam(); - req->setTriggerId(RNIL); - } else { - jam(); - //ndbout_c("++++++++++ Found trigger %s", triggerPtr.p->triggerName); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, obj_ptr_p->m_id); - req->setTriggerId(triggerPtr.p->triggerId); - req->setTableId(triggerPtr.p->tableId); - } - } - if (requestType == DropTrigReq::RT_USER || - requestType == DropTrigReq::RT_ALTER_INDEX || - requestType == DropTrigReq::RT_BUILD_INDEX) { - jam(); - if (signal->getLength() == DropTrigReq::SignalLength) { - if (getOwnNodeId() != c_masterNodeId) { - jam(); - // forward to DICT master - sendSignal(calcDictBlockRef(c_masterNodeId), GSN_DROP_TRIG_REQ, - signal, signal->getLength(), JBB); - return; - } - if (!c_triggerRecordPool.findId(req->getTriggerId())) { - jam(); - // return to sender - OpDropTrigger opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - if (! (req->getRequestFlag() & RequestFlag::RF_FORCE)) { - opPtr.p->m_errorCode = DropTrigRef::TriggerNotFound; - opPtr.p->m_errorLine = __LINE__; - } - dropTrigger_sendReply(signal, opPtr, true); - return; - } - // forward initial request plus operation key to all - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == DropTrigReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpDropTrigger opBusy; - if (! c_opDropTrigger.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = DropTrigReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = DropTrigRef::Busy; - opPtr.p->m_errorLine = __LINE__; - dropTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opDropTrigger.add(opPtr); - // master expects to hear from all - if (opPtr.p->m_isMaster) - opPtr.p->m_signalCounter = c_aliveNodes; - dropTrigger_slavePrepare(signal, opPtr); - dropTrigger_sendReply(signal, opPtr, false); - return; - } - c_opDropTrigger.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == DropTrigReq::RT_DICT_COMMIT || - requestType == DropTrigReq::RT_DICT_ABORT) { - jam(); - if (requestType == DropTrigReq::RT_DICT_COMMIT) - dropTrigger_slaveCommit(signal, opPtr); - else - dropTrigger_slaveAbort(signal, opPtr); - dropTrigger_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opDropTrigger.release(opPtr); - return; - } - } - jam(); - // return to sender - OpDropTrigger opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = DropTrigRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - dropTrigger_sendReply(signal, opPtr, true); -} - -void -Dbdict::execDROP_TRIG_CONF(Signal* signal) -{ - jamEntry(); - DropTrigConf* conf = (DropTrigConf*)signal->getDataPtrSend(); - dropTrigger_recvReply(signal, conf, 0); -} - -void -Dbdict::execDROP_TRIG_REF(Signal* signal) -{ - jamEntry(); - DropTrigRef* ref = (DropTrigRef*)signal->getDataPtrSend(); - dropTrigger_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf, - const DropTrigRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const DropTrigReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == DropTrigReq::RT_ALTER_INDEX) { - jam(); - // part of alter index operation - OpAlterIndexPtr opPtr; - c_opAlterIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterIndex_fromDropTrigger(signal, opPtr); - return; - } - if (requestType == DropTrigReq::RT_BUILD_INDEX) { - jam(); - // part of build index operation - OpBuildIndexPtr opPtr; - c_opBuildIndex.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - buildIndex_fromDropConstr(signal, opPtr); - return; - } - if (requestType == DropTrigReq::RT_TC || - requestType == DropTrigReq::RT_LQH) { - jam(); - // part of alter trigger operation - OpAlterTriggerPtr opPtr; - c_opAlterTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - alterTrigger_fromDropLocal(signal, opPtr); - return; - } - OpDropTriggerPtr opPtr; - c_opDropTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - opPtr.p->setError(ref); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == DropTrigReq::RT_DICT_COMMIT || - requestType == DropTrigReq::RT_DICT_ABORT) { - jam(); - // send reply to user - dropTrigger_sendReply(signal, opPtr, true); - c_opDropTrigger.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = DropTrigReq::RT_DICT_ABORT; - dropTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == DropTrigReq::RT_DICT_PREPARE) { - jam(); - // start alter offline - dropTrigger_toAlterTrigger(signal, opPtr); - return; - } - ndbrequire(false); -} - -void -Dbdict::dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr) -{ - jam(); -} - -void -Dbdict::dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr) -{ - jam(); - AlterTrigReq* req = (AlterTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(AlterTrigReq::RT_DROP_TRIGGER); - req->addRequestFlag(opPtr.p->m_requestFlag); - req->setTableId(opPtr.p->m_request.getTableId()); - req->setTriggerId(opPtr.p->m_request.getTriggerId()); - req->setTriggerInfo(0); // not used - req->setOnline(false); - req->setReceiverRef(0); - sendSignal(reference(), GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); -} - -void -Dbdict::dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr) -{ - jam(); - // remove in all - opPtr.p->m_requestType = DropTrigReq::RT_DICT_COMMIT; - dropTrigger_sendSlaveReq(signal, opPtr); -} - -void -Dbdict::dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr) -{ - DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - opPtr.p->m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); -} - -void -Dbdict::dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr) -{ - jam(); - const DropTrigReq* const req = &opPtr.p->m_request; - // get trigger record - const Uint32 triggerId = req->getTriggerId(); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, triggerId); - if (triggerPtr.p->triggerType == TriggerType::SECONDARY_INDEX || - triggerPtr.p->triggerType == TriggerType::ORDERED_INDEX) { - jam(); - // disconnect from index if index trigger XXX move to drop index - triggerPtr.p->indexId = req->getIndexId(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId); - ndbrequire(! indexPtr.isNull()); - switch (triggerPtr.p->triggerEvent) { - case TriggerEvent::TE_INSERT: - indexPtr.p->insertTriggerId = RNIL; - break; - case TriggerEvent::TE_UPDATE: - indexPtr.p->updateTriggerId = RNIL; - break; - case TriggerEvent::TE_DELETE: - indexPtr.p->deleteTriggerId = RNIL; - break; - case TriggerEvent::TE_CUSTOM: - indexPtr.p->customTriggerId = RNIL; - break; - default: - ndbrequire(false); - break; - } - } - if (triggerPtr.p->triggerType == TriggerType::READ_ONLY_CONSTRAINT) { - jam(); - // disconnect from index record XXX should be done in caller instead - triggerPtr.p->indexId = req->getTableId(); - TableRecordPtr indexPtr; - c_tableRecordPool.getPtr(indexPtr, triggerPtr.p->indexId); - indexPtr.p->buildTriggerId = RNIL; - } - //remove trigger - //ndbout_c("++++++++++++ Removing trigger id %u, %s", triggerPtr.p->triggerId, triggerPtr.p->triggerName); - release_object(triggerPtr.p->m_obj_ptr_i); - triggerPtr.p->triggerState = TriggerRecord::TS_NOT_DEFINED; -} - -void -Dbdict::dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr) -{ - jam(); -} - -void -Dbdict::dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr, - bool toUser) -{ - DropTrigRef* rep = (DropTrigRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_DROP_TRIG_CONF; - Uint32 length = DropTrigConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == DropTrigReq::RT_DICT_ABORT) - sendRef = false; - } else { - sendRef = opPtr.p->hasError(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = DropTrigConf::SignalLength; - } - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setIndexId(opPtr.p->m_request.getIndexId()); - rep->setTriggerId(opPtr.p->m_request.getTriggerId()); - if (sendRef) { - if (opPtr.p->m_errorNode == 0) - opPtr.p->m_errorNode = getOwnNodeId(); - rep->setErrorCode(opPtr.p->m_errorCode); - rep->setErrorLine(opPtr.p->m_errorLine); - rep->setErrorNode(opPtr.p->m_errorNode); - gsn = GSN_DROP_TRIG_REF; - length = CreateTrigRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/** - * MODULE: Alter trigger. - * - * Alter trigger state. Alter online creates the trigger first in all - * TC (if index trigger) and then in all LQH-TUP. - * - * Request type received in REQ and returned in CONF/REF: - * - * RT_USER - normal user e.g. BACKUP - * RT_CREATE_TRIGGER - from create trigger - * RT_DROP_TRIGGER - from drop trigger - * RT_DICT_PREPARE - seize operations and check request - * RT_DICT_TC - master to each DICT on way to TC - * RT_DICT_LQH - master to each DICT on way to LQH-TUP - * RT_DICT_COMMIT - commit state change in each DICT (no reply) - */ - -void -Dbdict::execALTER_TRIG_REQ(Signal* signal) -{ - jamEntry(); - AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend(); - OpAlterTriggerPtr opPtr; - const Uint32 senderRef = signal->senderBlockRef(); - const AlterTrigReq::RequestType requestType = req->getRequestType(); - if (requestType == AlterTrigReq::RT_USER || - requestType == AlterTrigReq::RT_CREATE_TRIGGER || - requestType == AlterTrigReq::RT_DROP_TRIGGER) { - jam(); - const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL; - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (isLocal) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } - if (signal->getLength() == AlterTrigReq::SignalLength) { - jam(); - if (! isLocal && getOwnNodeId() != c_masterNodeId) { - jam(); - // forward to DICT master - sendSignal(calcDictBlockRef(c_masterNodeId), GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); - return; - } - // forward initial request plus operation key to all - req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength + 1, JBB); - return; - } - // seize operation record - ndbrequire(signal->getLength() == AlterTrigReq::SignalLength + 1); - const Uint32 opKey = req->getOpKey(); - OpAlterTrigger opBusy; - if (! c_opAlterTrigger.seize(opPtr)) - opPtr.p = &opBusy; - opPtr.p->save(req); - opPtr.p->m_coordinatorRef = senderRef; - opPtr.p->m_isMaster = (senderRef == reference()); - opPtr.p->key = opKey; - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_PREPARE; - if (opPtr.p == &opBusy) { - jam(); - opPtr.p->m_errorCode = AlterTrigRef::Busy; - opPtr.p->m_errorLine = __LINE__; - alterTrigger_sendReply(signal, opPtr, opPtr.p->m_isMaster); - return; - } - c_opAlterTrigger.add(opPtr); - // master expects to hear from all - if (opPtr.p->m_isMaster) { - opPtr.p->m_nodes = receiverNodes; - opPtr.p->m_signalCounter = receiverNodes; - } - alterTrigger_slavePrepare(signal, opPtr); - alterTrigger_sendReply(signal, opPtr, false); - return; - } - c_opAlterTrigger.find(opPtr, req->getConnectionPtr()); - if (! opPtr.isNull()) { - opPtr.p->m_requestType = requestType; - if (requestType == AlterTrigReq::RT_DICT_TC || - requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - if (req->getOnline()) - alterTrigger_toCreateLocal(signal, opPtr); - else - alterTrigger_toDropLocal(signal, opPtr); - return; - } - if (requestType == AlterTrigReq::RT_DICT_COMMIT || - requestType == AlterTrigReq::RT_DICT_ABORT) { - jam(); - if (requestType == AlterTrigReq::RT_DICT_COMMIT) - alterTrigger_slaveCommit(signal, opPtr); - else - alterTrigger_slaveAbort(signal, opPtr); - alterTrigger_sendReply(signal, opPtr, false); - // done in slave - if (! opPtr.p->m_isMaster) - c_opAlterTrigger.release(opPtr); - return; - } - } - jam(); - // return to sender - OpAlterTrigger opBad; - opPtr.p = &opBad; - opPtr.p->save(req); - opPtr.p->m_errorCode = AlterTrigRef::BadRequestType; - opPtr.p->m_errorLine = __LINE__; - alterTrigger_sendReply(signal, opPtr, true); - return; -} - -void -Dbdict::execALTER_TRIG_CONF(Signal* signal) -{ - jamEntry(); - AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtrSend(); - alterTrigger_recvReply(signal, conf, 0); -} - -void -Dbdict::execALTER_TRIG_REF(Signal* signal) -{ - jamEntry(); - AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtrSend(); - alterTrigger_recvReply(signal, ref->getConf(), ref); -} - -void -Dbdict::alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf, - const AlterTrigRef* ref) -{ - jam(); - const Uint32 senderRef = signal->senderBlockRef(); - const AlterTrigReq::RequestType requestType = conf->getRequestType(); - const Uint32 key = conf->getConnectionPtr(); - if (requestType == AlterTrigReq::RT_CREATE_TRIGGER) { - jam(); - // part of create trigger operation - OpCreateTriggerPtr opPtr; - c_opCreateTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - createTrigger_fromAlterTrigger(signal, opPtr); - return; - } - if (requestType == AlterTrigReq::RT_DROP_TRIGGER) { - jam(); - // part of drop trigger operation - OpDropTriggerPtr opPtr; - c_opDropTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - opPtr.p->setError(ref); - dropTrigger_fromAlterTrigger(signal, opPtr); - return; - } - OpAlterTriggerPtr opPtr; - c_opAlterTrigger.find(opPtr, key); - ndbrequire(! opPtr.isNull()); - ndbrequire(opPtr.p->m_isMaster); - ndbrequire(opPtr.p->m_requestType == requestType); - /* - * If refuse on drop trig, because of non-existent trigger, - * comes from anyone but the master node - ignore it and - * remove the node from forter ALTER_TRIG communication - * This will happen if a new node has started since the - * trigger whas created. - */ - if (ref && - refToNode(senderRef) != refToNode(reference()) && - opPtr.p->m_request.getRequestType() == AlterTrigReq::RT_DROP_TRIGGER && - ref->getErrorCode() == AlterTrigRef::TriggerNotFound) { - jam(); - ref = 0; // ignore this error - opPtr.p->m_nodes.clear(refToNode(senderRef)); // remove this from group - } - opPtr.p->setError(ref); - opPtr.p->m_signalCounter.clearWaitingFor(refToNode(senderRef)); - if (! opPtr.p->m_signalCounter.done()) { - jam(); - return; - } - if (requestType == AlterTrigReq::RT_DICT_COMMIT || - requestType == AlterTrigReq::RT_DICT_ABORT) { - jam(); - // send reply to user - alterTrigger_sendReply(signal, opPtr, true); - c_opAlterTrigger.release(opPtr); - return; - } - if (opPtr.p->hasError()) { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_ABORT; - alterTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (! (opPtr.p->m_request.getRequestFlag() & RequestFlag::RF_NOTCTRIGGER)) { - if (requestType == AlterTrigReq::RT_DICT_PREPARE) { - jam(); - if (opPtr.p->m_request.getOnline()) { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC; - } else { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH; - } - alterTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - if (opPtr.p->m_request.getOnline()) { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH; - } else { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT; - } - alterTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - if (opPtr.p->m_request.getOnline()) { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT; - } else { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_TC; - } - alterTrigger_sendSlaveReq(signal, opPtr); - return; - } - } else { - if (requestType == AlterTrigReq::RT_DICT_PREPARE) { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_LQH; - alterTrigger_sendSlaveReq(signal, opPtr); - return; - } - if (requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - opPtr.p->m_requestType = AlterTrigReq::RT_DICT_COMMIT; - alterTrigger_sendSlaveReq(signal, opPtr); - return; - } - } - ndbrequire(false); -} - -void -Dbdict::alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); - const AlterTrigReq* const req = &opPtr.p->m_request; - const Uint32 triggerId = req->getTriggerId(); - TriggerRecordPtr triggerPtr; - if (! (triggerId < c_triggerRecordPool.getSize())) { - jam(); - opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound; - opPtr.p->m_errorLine = __LINE__; - return; - } - c_triggerRecordPool.getPtr(triggerPtr, triggerId); - if (triggerPtr.p->triggerState == TriggerRecord::TS_NOT_DEFINED) { - jam(); - opPtr.p->m_errorCode = AlterTrigRef::TriggerNotFound; - opPtr.p->m_errorLine = __LINE__; - return; - } - - if (triggerPtr.p->triggerType == TriggerType::SUBSCRIPTION) - { - opPtr.p->m_request.addRequestFlag(RequestFlag::RF_NOTCTRIGGER); - } -} - -void -Dbdict::alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); - // find trigger record - const Uint32 triggerId = opPtr.p->m_request.getTriggerId(); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, triggerId); - CreateTrigReq* const req = (CreateTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - req->setRequestType(CreateTrigReq::RT_TC); - } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - req->setRequestType(CreateTrigReq::RT_LQH); - } else { - ndbassert(false); - } - req->setTableId(triggerPtr.p->tableId); - req->setIndexId(triggerPtr.p->indexId); - req->setTriggerId(triggerPtr.i); - req->setTriggerType(triggerPtr.p->triggerType); - req->setTriggerActionTime(triggerPtr.p->triggerActionTime); - req->setTriggerEvent(triggerPtr.p->triggerEvent); - req->setMonitorReplicas(triggerPtr.p->monitorReplicas); - req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes); - req->setReportAllMonitoredAttributes(triggerPtr.p->reportAllMonitoredAttributes); - req->setOnline(true); - req->setReceiverRef(opPtr.p->m_request.getReceiverRef()); - BlockReference blockRef = 0; - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - blockRef = calcTcBlockRef(getOwnNodeId()); - } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - blockRef = calcLqhBlockRef(getOwnNodeId()); - } else { - ndbassert(false); - } - req->setAttributeMask(triggerPtr.p->attributeMask); - sendSignal(blockRef, GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB); -} - -void -Dbdict::alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); - if (! opPtr.p->hasLastError()) { - // mark created locally - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId()); - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_TC; - } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - triggerPtr.p->triggerLocal |= TriggerRecord::TL_CREATED_LQH; - } else { - ndbrequire(false); - } - } - // forward CONF or REF to master - alterTrigger_sendReply(signal, opPtr, false); -} - -void -Dbdict::alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId()); - DropTrigReq* const req = (DropTrigReq*)signal->getDataPtrSend(); - req->setUserRef(reference()); - req->setConnectionPtr(opPtr.p->key); - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - // broken trigger allowed if force - if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_TC)) { - jam(); - ndbassert(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE); - alterTrigger_sendReply(signal, opPtr, false); - return; - } - req->setRequestType(DropTrigReq::RT_TC); - } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - // broken trigger allowed if force - if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_LQH)) { - jam(); - ndbassert(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE); - alterTrigger_sendReply(signal, opPtr, false); - return; - } - req->setRequestType(DropTrigReq::RT_LQH); - } else { - ndbassert(false); - } - req->setTableId(triggerPtr.p->tableId); - req->setIndexId(triggerPtr.p->indexId); - req->setTriggerId(triggerPtr.i); - req->setTriggerType(triggerPtr.p->triggerType); - req->setTriggerActionTime(triggerPtr.p->triggerActionTime); - req->setTriggerEvent(triggerPtr.p->triggerEvent); - req->setMonitorReplicas(triggerPtr.p->monitorReplicas); - req->setMonitorAllAttributes(triggerPtr.p->monitorAllAttributes); - BlockReference blockRef = 0; - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - blockRef = calcTcBlockRef(getOwnNodeId()); - } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - blockRef = calcLqhBlockRef(getOwnNodeId()); - } else { - ndbassert(false); - } - sendSignal(blockRef, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); -} - -void -Dbdict::alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); - if (! opPtr.p->hasLastError()) { - // mark dropped locally - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId()); - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_TC) { - jam(); - triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_TC; - } else if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_LQH) { - jam(); - triggerPtr.p->triggerLocal &= ~TriggerRecord::TL_CREATED_LQH; - } else { - ndbrequire(false); - } - } - // forward CONF or REF to master - alterTrigger_sendReply(signal, opPtr, false); -} - -void -Dbdict::alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); - TriggerRecordPtr triggerPtr; - c_triggerRecordPool.getPtr(triggerPtr, opPtr.p->m_request.getTriggerId()); - // set state - triggerPtr.p->triggerState = TriggerRecord::TS_ONLINE; -} - -void -Dbdict::alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr) -{ - jam(); -} - -void -Dbdict::alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr) -{ - AlterTrigReq* const req = (AlterTrigReq*)signal->getDataPtrSend(); - *req = opPtr.p->m_request; - req->setUserRef(opPtr.p->m_coordinatorRef); - req->setConnectionPtr(opPtr.p->key); - req->setRequestType(opPtr.p->m_requestType); - req->addRequestFlag(opPtr.p->m_requestFlag); - NdbNodeBitmask receiverNodes = c_aliveNodes; - if (opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) { - receiverNodes.clear(); - receiverNodes.set(getOwnNodeId()); - } else { - opPtr.p->m_nodes.bitAND(receiverNodes); - receiverNodes = opPtr.p->m_nodes; - } - opPtr.p->m_signalCounter = receiverNodes; - NodeReceiverGroup rg(DBDICT, receiverNodes); - sendSignal(rg, GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); -} - -void -Dbdict::alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr, - bool toUser) -{ - jam(); - AlterTrigRef* rep = (AlterTrigRef*)signal->getDataPtrSend(); - Uint32 gsn = GSN_ALTER_TRIG_CONF; - Uint32 length = AlterTrigConf::InternalLength; - bool sendRef; - if (! toUser) { - sendRef = opPtr.p->hasLastError(); - rep->setUserRef(opPtr.p->m_coordinatorRef); - rep->setConnectionPtr(opPtr.p->key); - rep->setRequestType(opPtr.p->m_requestType); - if (opPtr.p->m_requestType == AlterTrigReq::RT_DICT_ABORT) { - jam(); - sendRef = false; - } else { - jam(); - } - } else { - sendRef = opPtr.p->hasError(); - jam(); - rep->setUserRef(opPtr.p->m_request.getUserRef()); - rep->setConnectionPtr(opPtr.p->m_request.getConnectionPtr()); - rep->setRequestType(opPtr.p->m_request.getRequestType()); - length = AlterTrigConf::SignalLength; - } - rep->setTableId(opPtr.p->m_request.getTableId()); - rep->setTriggerId(opPtr.p->m_request.getTriggerId()); - if (sendRef) { - if (opPtr.p->m_errorNode == 0) { - jam(); - opPtr.p->m_errorNode = getOwnNodeId(); - } else { - jam(); - } - rep->setErrorCode(opPtr.p->m_errorCode); - rep->setErrorLine(opPtr.p->m_errorLine); - rep->setErrorNode(opPtr.p->m_errorNode); - gsn = GSN_ALTER_TRIG_REF; - length = AlterTrigRef::SignalLength; - } - sendSignal(rep->getUserRef(), gsn, signal, length, JBB); -} - -/** - * MODULE: Support routines for index and trigger. - */ - -/* - This routine is used to set-up the primary key attributes of the unique - hash index. Since we store fragment id as part of the primary key here - we insert the pseudo column for getting fragment id first in the array. - This routine is used as part of the building of the index. -*/ - -void -Dbdict::getTableKeyList(TableRecordPtr tablePtr, - Id_array& list) -{ - jam(); - list.sz = 0; - list.id[list.sz++] = AttributeHeader::FRAGMENT; - LocalDLFifoList alist(c_attributeRecordPool, - tablePtr.p->m_attributes); - AttributeRecordPtr attrPtr; - for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) { - if (attrPtr.p->tupleKey) { - list.id[list.sz++] = attrPtr.p->attributeId; - } - } - ndbrequire(list.sz == (uint)(tablePtr.p->noOfPrimkey + 1)); - ndbrequire(list.sz <= MAX_ATTRIBUTES_IN_INDEX + 1); -} - -// XXX should store the primary attribute id -void -Dbdict::getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id) -{ - jam(); - - Uint32 len; - char name[MAX_ATTR_NAME_SIZE]; - TableRecordPtr tablePtr; - AttributeRecordPtr attrPtr; - - c_tableRecordPool.getPtr(tablePtr, indexPtr.p->primaryTableId); - AttributeRecord* iaRec = c_attributeRecordPool.getPtr(itAttr); - { - ConstRope tmp(c_rope_pool, iaRec->attributeName); - tmp.copy(name); - len = tmp.size(); - } - LocalDLFifoList alist(c_attributeRecordPool, - tablePtr.p->m_attributes); - for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)){ - ConstRope tmp(c_rope_pool, attrPtr.p->attributeName); - if(tmp.compare(name, len) == 0){ - id[0] = attrPtr.p->attributeId; - return; - } - } - ndbrequire(false); -} - -void -Dbdict::getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list) -{ - jam(); - list.sz = 0; - memset(list.id, 0, sizeof(list.id)); - ndbrequire(indexPtr.p->noOfAttributes >= 2); - - LocalDLFifoList alist(c_attributeRecordPool, - indexPtr.p->m_attributes); - AttributeRecordPtr attrPtr; - for (alist.first(attrPtr); !attrPtr.isNull(); alist.next(attrPtr)) { - // skip last - AttributeRecordPtr tempPtr = attrPtr; - if (! alist.next(tempPtr)) - break; - getIndexAttr(indexPtr, attrPtr.i, &list.id[list.sz++]); - } - ndbrequire(indexPtr.p->noOfAttributes == list.sz + 1); -} - -void -Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask) -{ - jam(); - mask.clear(); - ndbrequire(indexPtr.p->noOfAttributes >= 2); - - AttributeRecordPtr attrPtr, currPtr; - LocalDLFifoList alist(c_attributeRecordPool, - indexPtr.p->m_attributes); - - - for (alist.first(attrPtr); currPtr = attrPtr, alist.next(attrPtr); ){ - Uint32 id; - getIndexAttr(indexPtr, currPtr.i, &id); - mask.set(id); - } -} - -// DICT lock master - -const Dbdict::DictLockType* -Dbdict::getDictLockType(Uint32 lockType) -{ - static const DictLockType lt[] = { - { DictLockReq::NodeRestartLock, BS_NODE_RESTART, "NodeRestart" } - }; - for (unsigned int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) { - if ((Uint32) lt[i].lockType == lockType) - return <[i]; - } - return NULL; -} - -void -Dbdict::sendDictLockInfoEvent(Uint32 pollCount) -{ - DictLockPtr loopPtr; - c_dictLockQueue.first(loopPtr); - unsigned count = 0; - - char queue_buf[100]; - char *p = &queue_buf[0]; - const char *const q = &queue_buf[sizeof(queue_buf)]; - *p = 0; - - while (loopPtr.i != RNIL) { - jam(); - my_snprintf(p, q-p, "%s%u%s", - ++count == 1 ? "" : " ", - (unsigned)refToNode(loopPtr.p->req.userRef), - loopPtr.p->locked ? "L" : ""); - p += strlen(p); - c_dictLockQueue.next(loopPtr); - } - - infoEvent("DICT: lock bs: %d ops: %d poll: %d cnt: %d queue: %s", - (int)c_blockState, - c_opRecordPool.getSize() - c_opRecordPool.getNoOfFree(), - c_dictLockPoll, (int)pollCount, queue_buf); -} - -void -Dbdict::sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text) -{ - infoEvent("DICT: %s %u for %s", - text, - (unsigned)refToNode(lockPtr.p->req.userRef), lockPtr.p->lt->text); -} - -void -Dbdict::execDICT_LOCK_REQ(Signal* signal) -{ - jamEntry(); - const DictLockReq* req = (const DictLockReq*)&signal->theData[0]; - - // make sure bad request crashes slave, not master (us) - - if (getOwnNodeId() != c_masterNodeId) { - jam(); - sendDictLockRef(signal, *req, DictLockRef::NotMaster); - return; - } - - const DictLockType* lt = getDictLockType(req->lockType); - if (lt == NULL) { - jam(); - sendDictLockRef(signal, *req, DictLockRef::InvalidLockType); - return; - } - - if (req->userRef != signal->getSendersBlockRef() || - getNodeInfo(refToNode(req->userRef)).m_type != NodeInfo::DB) { - jam(); - sendDictLockRef(signal, *req, DictLockRef::BadUserRef); - return; - } - - if (c_aliveNodes.get(refToNode(req->userRef))) { - jam(); - sendDictLockRef(signal, *req, DictLockRef::TooLate); - return; - } - - DictLockPtr lockPtr; - if (! c_dictLockQueue.seize(lockPtr)) { - jam(); - sendDictLockRef(signal, *req, DictLockRef::TooManyRequests); - return; - } - - lockPtr.p->req = *req; - lockPtr.p->locked = false; - lockPtr.p->lt = lt; - - checkDictLockQueue(signal, false); - - if (! lockPtr.p->locked) - sendDictLockInfoEvent(lockPtr, "lock request by node"); -} - -// only table and index ops are checked -bool -Dbdict::hasDictLockSchemaOp() -{ - return - ! c_opCreateTable.isEmpty() || - ! c_opDropTable.isEmpty() || - ! c_opCreateIndex.isEmpty() || - ! c_opDropIndex.isEmpty(); -} - -void -Dbdict::checkDictLockQueue(Signal* signal, bool poll) -{ - Uint32 pollCount = ! poll ? 0 : signal->theData[1]; - - DictLockPtr lockPtr; - - do { - if (! c_dictLockQueue.first(lockPtr)) { - jam(); - setDictLockPoll(signal, false, pollCount); - return; - } - - if (lockPtr.p->locked) { - jam(); - ndbrequire(c_blockState == lockPtr.p->lt->blockState); - break; - } - - if (hasDictLockSchemaOp()) { - jam(); - break; - } - - if (c_blockState != BS_IDLE) - { - /** - * If state is BS_NODE_FAILURE, it might be that no op is running - */ - jam(); - break; - } - - ndbrequire(c_blockState == BS_IDLE); - lockPtr.p->locked = true; - c_blockState = lockPtr.p->lt->blockState; - sendDictLockConf(signal, lockPtr); - - sendDictLockInfoEvent(lockPtr, "locked by node"); - } while (0); - - // poll while first request is open - // this routine is called again when it is removed for any reason - - bool on = ! lockPtr.p->locked; - setDictLockPoll(signal, on, pollCount); -} - -void -Dbdict::execDICT_UNLOCK_ORD(Signal* signal) -{ - jamEntry(); - const DictUnlockOrd* ord = (const DictUnlockOrd*)&signal->theData[0]; - - DictLockPtr lockPtr; - c_dictLockQueue.getPtr(lockPtr, ord->lockPtr); - ndbrequire((Uint32) lockPtr.p->lt->lockType == ord->lockType); - - if (lockPtr.p->locked) { - jam(); - ndbrequire(c_blockState == lockPtr.p->lt->blockState); - ndbrequire(! hasDictLockSchemaOp()); - ndbrequire(! c_dictLockQueue.hasPrev(lockPtr)); - - c_blockState = BS_IDLE; - sendDictLockInfoEvent(lockPtr, "unlocked by node"); - } else { - sendDictLockInfoEvent(lockPtr, "lock request removed by node"); - } - - c_dictLockQueue.release(lockPtr); - - checkDictLockQueue(signal, false); -} - -void -Dbdict::sendDictLockConf(Signal* signal, DictLockPtr lockPtr) -{ - DictLockConf* conf = (DictLockConf*)&signal->theData[0]; - const DictLockReq& req = lockPtr.p->req; - - conf->userPtr = req.userPtr; - conf->lockType = req.lockType; - conf->lockPtr = lockPtr.i; - - sendSignal(req.userRef, GSN_DICT_LOCK_CONF, signal, - DictLockConf::SignalLength, JBB); -} - -void -Dbdict::sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode) -{ - DictLockRef* ref = (DictLockRef*)&signal->theData[0]; - - ref->userPtr = req.userPtr; - ref->lockType = req.lockType; - ref->errorCode = errorCode; - - sendSignal(req.userRef, GSN_DICT_LOCK_REF, signal, - DictLockRef::SignalLength, JBB); -} - -// control polling - -void -Dbdict::setDictLockPoll(Signal* signal, bool on, Uint32 pollCount) -{ - if (on) { - jam(); - signal->theData[0] = ZDICT_LOCK_POLL; - signal->theData[1] = pollCount + 1; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); - } - - bool change = (c_dictLockPoll != on); - - if (change) { - jam(); - c_dictLockPoll = on; - } - - // avoid too many messages if master is stuck busy (BS_NODE_FAILURE) - bool periodic = - pollCount < 8 || - pollCount < 64 && pollCount % 8 == 0 || - pollCount < 512 && pollCount % 64 == 0 || - pollCount < 4096 && pollCount % 512 == 0 || - pollCount % 4096 == 0; // about every 6 minutes - - if (change || periodic) - sendDictLockInfoEvent(pollCount); -} - -// NF handling - -void -Dbdict::removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes) -{ - DictLockPtr loopPtr; - c_dictLockQueue.first(loopPtr); - - if (getOwnNodeId() != c_masterNodeId) { - ndbrequire(loopPtr.i == RNIL); - return; - } - - while (loopPtr.i != RNIL) { - jam(); - DictLockPtr lockPtr = loopPtr; - c_dictLockQueue.next(loopPtr); - - Uint32 nodeId = refToNode(lockPtr.p->req.userRef); - - if (NodeBitmask::get(theFailedNodes, nodeId)) { - if (lockPtr.p->locked) { - jam(); - ndbrequire(c_blockState == lockPtr.p->lt->blockState); - ndbrequire(! hasDictLockSchemaOp()); - ndbrequire(! c_dictLockQueue.hasPrev(lockPtr)); - - c_blockState = BS_IDLE; - - sendDictLockInfoEvent(lockPtr, "remove lock by failed node"); - } else { - sendDictLockInfoEvent(lockPtr, "remove lock request by failed node"); - } - - c_dictLockQueue.release(lockPtr); - } - } - - checkDictLockQueue(signal, false); -} - - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* MODULE: STORE/RESTORE SCHEMA FILE---------------------- */ -/* ---------------------------------------------------------------- */ -/* */ -/* General module used to store the schema file on disk and */ -/* similar function to restore it from disk. */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -void -Dbdict::initSchemaFile(XSchemaFile * xsf, Uint32 firstPage, Uint32 lastPage, - bool initEntries) -{ - ndbrequire(lastPage <= xsf->noOfPages); - for (Uint32 n = firstPage; n < lastPage; n++) { - SchemaFile * sf = &xsf->schemaPage[n]; - if (initEntries) - memset(sf, 0, NDB_SF_PAGE_SIZE); - - Uint32 ndb_version = NDB_VERSION; - if (ndb_version < NDB_SF_VERSION_5_0_6) - ndb_version = NDB_SF_VERSION_5_0_6; - - memcpy(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)); - sf->ByteOrder = 0x12345678; - sf->NdbVersion = ndb_version; - sf->FileSize = xsf->noOfPages * NDB_SF_PAGE_SIZE; - sf->PageNumber = n; - sf->CheckSum = 0; - sf->NoOfTableEntries = NDB_SF_PAGE_ENTRIES; - - computeChecksum(xsf, n); - } -} - -void -Dbdict::resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages) -{ - ndbrequire(noOfPages <= NDB_SF_MAX_PAGES); - if (xsf->noOfPages < noOfPages) { - jam(); - Uint32 firstPage = xsf->noOfPages; - xsf->noOfPages = noOfPages; - initSchemaFile(xsf, 0, firstPage, false); - initSchemaFile(xsf, firstPage, xsf->noOfPages, true); - } - if (xsf->noOfPages > noOfPages) { - jam(); - Uint32 tableId = noOfPages * NDB_SF_PAGE_ENTRIES; - while (tableId < xsf->noOfPages * NDB_SF_PAGE_ENTRIES) { - SchemaFile::TableEntry * te = getTableEntry(xsf, tableId); - if (te->m_tableState != SchemaFile::INIT && - te->m_tableState != SchemaFile::DROP_TABLE_COMMITTED) { - ndbrequire(false); - } - tableId++; - } - xsf->noOfPages = noOfPages; - initSchemaFile(xsf, 0, xsf->noOfPages, false); - } -} - -void -Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){ - SchemaFile * sf = &xsf->schemaPage[pageNo]; - sf->CheckSum = 0; - sf->CheckSum = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS); -} - -bool -Dbdict::validateChecksum(const XSchemaFile * xsf){ - - for (Uint32 n = 0; n < xsf->noOfPages; n++) { - SchemaFile * sf = &xsf->schemaPage[n]; - Uint32 c = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS); - if ( c != 0) - return false; - } - return true; -} - -Uint32 -Dbdict::computeChecksum(const Uint32 * src, Uint32 len){ - Uint32 ret = 0; - for(Uint32 i = 0; inoOfPages); - - SchemaFile * sf = &xsf->schemaPage[n]; - return &sf->TableEntries[i]; -} - -//****************************************** -void -Dbdict::execCREATE_FILE_REQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - CreateFileReq * req = (CreateFileReq*)signal->getDataPtr(); - CreateFileRef * ref = (CreateFileRef*)signal->getDataPtrSend(); - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - Uint32 type = req->objType; - Uint32 requestInfo = req->requestInfo; - - do { - if(getOwnNodeId() != c_masterNodeId){ - jam(); - ref->errorCode = CreateFileRef::NotMaster; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (c_blockState != BS_IDLE){ - jam(); - ref->errorCode = CreateFileRef::Busy; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (checkSingleUserMode(senderRef)) - { - ref->errorCode = CreateFileRef::SingleUser; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - Ptr trans_ptr; - if (! c_Trans.seize(trans_ptr)){ - jam(); - ref->errorCode = CreateFileRef::Busy; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - jam(); - const Uint32 trans_key = ++c_opRecordSequence; - trans_ptr.p->key = trans_key; - trans_ptr.p->m_senderRef = senderRef; - trans_ptr.p->m_senderData = senderData; - trans_ptr.p->m_nodes = c_aliveNodes; - trans_ptr.p->m_errorCode = 0; -// trans_ptr.p->m_nodes.clear(); -// trans_ptr.p->m_nodes.set(getOwnNodeId()); - c_Trans.add(trans_ptr); - - const Uint32 op_key = ++c_opRecordSequence; - trans_ptr.p->m_op.m_key = op_key; - trans_ptr.p->m_op.m_vt_index = 1; - trans_ptr.p->m_op.m_state = DictObjOp::Preparing; - - CreateObjReq* create_obj = (CreateObjReq*)signal->getDataPtrSend(); - create_obj->op_key = op_key; - create_obj->senderRef = reference(); - create_obj->senderData = trans_key; - create_obj->clientRef = senderRef; - create_obj->clientData = senderData; - - create_obj->objType = type; - create_obj->requestInfo = requestInfo; - - { - Uint32 objId = getFreeObjId(0); - if (objId == RNIL) { - jam(); - ref->errorCode = CreateFileRef::NoMoreObjectRecords; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - create_obj->objId = objId; - trans_ptr.p->m_op.m_obj_id = objId; - create_obj->gci = 0; - - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry *objEntry = getTableEntry(xsf, objId); - create_obj->objVersion = - create_obj_inc_schema_version(objEntry->m_tableVersion); - } - - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); - SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); - tmp.init(rg, GSN_CREATE_OBJ_REF, trans_key); - sendSignal(rg, GSN_CREATE_OBJ_REQ, signal, - CreateObjReq::SignalLength, JBB); - - c_blockState = BS_CREATE_TAB; - return; - } while(0); - - ref->senderData = senderData; - ref->masterNodeId = c_masterNodeId; - sendSignal(senderRef, GSN_CREATE_FILE_REF,signal, - CreateFileRef::SignalLength, JBB); -} - -void -Dbdict::execCREATE_FILEGROUP_REQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - CreateFilegroupReq * req = (CreateFilegroupReq*)signal->getDataPtr(); - CreateFilegroupRef * ref = (CreateFilegroupRef*)signal->getDataPtrSend(); - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - Uint32 type = req->objType; - - do { - if(getOwnNodeId() != c_masterNodeId){ - jam(); - ref->errorCode = CreateFilegroupRef::NotMaster; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (c_blockState != BS_IDLE){ - jam(); - ref->errorCode = CreateFilegroupRef::Busy; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (checkSingleUserMode(senderRef)) - { - ref->errorCode = CreateFilegroupRef::SingleUser; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - Ptr trans_ptr; - if (! c_Trans.seize(trans_ptr)){ - jam(); - ref->errorCode = CreateFilegroupRef::Busy; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - jam(); - const Uint32 trans_key = ++c_opRecordSequence; - trans_ptr.p->key = trans_key; - trans_ptr.p->m_senderRef = senderRef; - trans_ptr.p->m_senderData = senderData; - trans_ptr.p->m_nodes = c_aliveNodes; - trans_ptr.p->m_errorCode = 0; - c_Trans.add(trans_ptr); - - const Uint32 op_key = ++c_opRecordSequence; - trans_ptr.p->m_op.m_key = op_key; - trans_ptr.p->m_op.m_vt_index = 0; - trans_ptr.p->m_op.m_state = DictObjOp::Preparing; - - CreateObjReq* create_obj = (CreateObjReq*)signal->getDataPtrSend(); - create_obj->op_key = op_key; - create_obj->senderRef = reference(); - create_obj->senderData = trans_key; - create_obj->clientRef = senderRef; - create_obj->clientData = senderData; - - create_obj->objType = type; - - { - Uint32 objId = getFreeObjId(0); - if (objId == RNIL) { - jam(); - ref->errorCode = CreateFilegroupRef::NoMoreObjectRecords; - ref->status = 0; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - create_obj->objId = objId; - trans_ptr.p->m_op.m_obj_id = objId; - create_obj->gci = 0; - - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry *objEntry = getTableEntry(xsf, objId); - create_obj->objVersion = - create_obj_inc_schema_version(objEntry->m_tableVersion); - } - - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); - SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); - tmp.init(rg, GSN_CREATE_OBJ_REF, trans_key); - sendSignal(rg, GSN_CREATE_OBJ_REQ, signal, - CreateObjReq::SignalLength, JBB); - - c_blockState = BS_CREATE_TAB; - return; - } while(0); - - ref->senderData = senderData; - ref->masterNodeId = c_masterNodeId; - sendSignal(senderRef, GSN_CREATE_FILEGROUP_REF,signal, - CreateFilegroupRef::SignalLength, JBB); -} - -void -Dbdict::execDROP_FILE_REQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - DropFileReq * req = (DropFileReq*)signal->getDataPtr(); - DropFileRef * ref = (DropFileRef*)signal->getDataPtrSend(); - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - Uint32 objId = req->file_id; - Uint32 version = req->file_version; - - do { - if(getOwnNodeId() != c_masterNodeId){ - jam(); - ref->errorCode = DropFileRef::NotMaster; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (c_blockState != BS_IDLE) - { - jam(); - ref->errorCode = DropFileRef::Busy; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (checkSingleUserMode(senderRef)) - { - jam(); - ref->errorCode = DropFileRef::SingleUser; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - Ptr file_ptr; - if (!c_file_hash.find(file_ptr, objId)) - { - jam(); - ref->errorCode = DropFileRef::NoSuchFile; - ref->errorLine = __LINE__; - break; - } - - if (file_ptr.p->m_version != version) - { - jam(); - ref->errorCode = DropFileRef::InvalidSchemaObjectVersion; - ref->errorLine = __LINE__; - break; - } - - Ptr trans_ptr; - if (! c_Trans.seize(trans_ptr)) - { - jam(); - ref->errorCode = DropFileRef::Busy; - ref->errorLine = __LINE__; - break; - } - jam(); - - const Uint32 trans_key = ++c_opRecordSequence; - trans_ptr.p->key = trans_key; - trans_ptr.p->m_senderRef = senderRef; - trans_ptr.p->m_senderData = senderData; - trans_ptr.p->m_nodes = c_aliveNodes; - trans_ptr.p->m_errorCode = 0; - c_Trans.add(trans_ptr); - - const Uint32 op_key = ++c_opRecordSequence; - trans_ptr.p->m_op.m_key = op_key; - trans_ptr.p->m_op.m_vt_index = 2; - trans_ptr.p->m_op.m_state = DictObjOp::Preparing; - - DropObjReq* drop_obj = (DropObjReq*)signal->getDataPtrSend(); - drop_obj->op_key = op_key; - drop_obj->objVersion = version; - drop_obj->objId = objId; - drop_obj->objType = file_ptr.p->m_type; - trans_ptr.p->m_op.m_obj_id = objId; - - drop_obj->senderRef = reference(); - drop_obj->senderData = trans_key; - drop_obj->clientRef = senderRef; - drop_obj->clientData = senderData; - - drop_obj->requestInfo = 0; - - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); - SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); - tmp.init(rg, GSN_DROP_OBJ_REF, trans_key); - sendSignal(rg, GSN_DROP_OBJ_REQ, signal, - DropObjReq::SignalLength, JBB); - - c_blockState = BS_CREATE_TAB; - return; - } while(0); - - ref->senderData = senderData; - ref->masterNodeId = c_masterNodeId; - sendSignal(senderRef, GSN_DROP_FILE_REF,signal, - DropFileRef::SignalLength, JBB); -} - -void -Dbdict::execDROP_FILEGROUP_REQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - DropFilegroupReq * req = (DropFilegroupReq*)signal->getDataPtr(); - DropFilegroupRef * ref = (DropFilegroupRef*)signal->getDataPtrSend(); - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - Uint32 objId = req->filegroup_id; - Uint32 version = req->filegroup_version; - - do { - if(getOwnNodeId() != c_masterNodeId) - { - jam(); - ref->errorCode = DropFilegroupRef::NotMaster; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (c_blockState != BS_IDLE) - { - jam(); - ref->errorCode = DropFilegroupRef::Busy; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - if (checkSingleUserMode(senderRef)) - { - jam(); - ref->errorCode = DropFilegroupRef::SingleUser; - ref->errorKey = 0; - ref->errorLine = __LINE__; - break; - } - - Ptr filegroup_ptr; - if (!c_filegroup_hash.find(filegroup_ptr, objId)) - { - jam(); - ref->errorCode = DropFilegroupRef::NoSuchFilegroup; - ref->errorLine = __LINE__; - break; - } - - if (filegroup_ptr.p->m_version != version) - { - jam(); - ref->errorCode = DropFilegroupRef::InvalidSchemaObjectVersion; - ref->errorLine = __LINE__; - break; - } - - Ptr trans_ptr; - if (! c_Trans.seize(trans_ptr)) - { - jam(); - ref->errorCode = DropFilegroupRef::Busy; - ref->errorLine = __LINE__; - break; - } - jam(); - - const Uint32 trans_key = ++c_opRecordSequence; - trans_ptr.p->key = trans_key; - trans_ptr.p->m_senderRef = senderRef; - trans_ptr.p->m_senderData = senderData; - trans_ptr.p->m_nodes = c_aliveNodes; - trans_ptr.p->m_errorCode = 0; - c_Trans.add(trans_ptr); - - const Uint32 op_key = ++c_opRecordSequence; - trans_ptr.p->m_op.m_key = op_key; - trans_ptr.p->m_op.m_vt_index = 3; - trans_ptr.p->m_op.m_state = DictObjOp::Preparing; - - DropObjReq* drop_obj = (DropObjReq*)signal->getDataPtrSend(); - drop_obj->op_key = op_key; - drop_obj->objVersion = version; - drop_obj->objId = objId; - drop_obj->objType = filegroup_ptr.p->m_type; - trans_ptr.p->m_op.m_obj_id = objId; - - drop_obj->senderRef = reference(); - drop_obj->senderData = trans_key; - drop_obj->clientRef = senderRef; - drop_obj->clientData = senderData; - - drop_obj->requestInfo = 0; - - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); - SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); - tmp.init(rg, GSN_DROP_OBJ_REF, trans_key); - sendSignal(rg, GSN_DROP_OBJ_REQ, signal, - DropObjReq::SignalLength, JBB); - - c_blockState = BS_CREATE_TAB; - return; - } while(0); - - ref->senderData = senderData; - ref->masterNodeId = c_masterNodeId; - sendSignal(senderRef, GSN_DROP_FILEGROUP_REF,signal, - DropFilegroupRef::SignalLength, JBB); -} - -void -Dbdict::execCREATE_OBJ_REF(Signal* signal) -{ - CreateObjRef * const ref = (CreateObjRef*)signal->getDataPtr(); - Ptr trans_ptr; - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != CreateObjRef::NF_FakeErrorREF){ - jam(); - trans_ptr.p->setErrorCode(ref->errorCode); - } - Uint32 node = refToNode(ref->senderRef); - schemaOp_reply(signal, trans_ptr.p, node); -} - -void -Dbdict::execCREATE_OBJ_CONF(Signal* signal) -{ - Ptr trans_ptr; - CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr(); - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); - schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); -} - -void -Dbdict::schemaOp_reply(Signal* signal, - SchemaTransaction * trans_ptr_p, - Uint32 nodeId) -{ - jam(); - { - SafeCounter tmp(c_counterMgr, trans_ptr_p->m_counter); - if(!tmp.clearWaitingFor(nodeId)){ - jam(); - return; - } - } - - switch(trans_ptr_p->m_op.m_state){ - case DictObjOp::Preparing:{ - if(trans_ptr_p->m_errorCode != 0) - { - /** - * Failed to prepare on atleast one node -> abort on all - */ - trans_ptr_p->m_op.m_state = DictObjOp::Aborting; - trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key; - trans_ptr_p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::trans_abort_start_done); - - if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start) - { - jam(); - (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_start) - (signal, trans_ptr_p); - } - else - { - jam(); - execute(signal, trans_ptr_p->m_callback, 0); - } - return; - } - - trans_ptr_p->m_op.m_state = DictObjOp::Prepared; - trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key; - trans_ptr_p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::trans_commit_start_done); - - if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start) - { - jam(); - (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_start) - (signal, trans_ptr_p); - } - else - { - jam(); - execute(signal, trans_ptr_p->m_callback, 0); - } - return; - } - case DictObjOp::Committing: { - ndbrequire(trans_ptr_p->m_errorCode == 0); - - trans_ptr_p->m_op.m_state = DictObjOp::Committed; - trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key; - trans_ptr_p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::trans_commit_complete_done); - - if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete) - { - jam(); - (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_commit_complete) - (signal, trans_ptr_p); - } - else - { - jam(); - execute(signal, trans_ptr_p->m_callback, 0); - } - return; - } - case DictObjOp::Aborting:{ - trans_ptr_p->m_op.m_state = DictObjOp::Committed; - trans_ptr_p->m_callback.m_callbackData = trans_ptr_p->key; - trans_ptr_p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::trans_abort_complete_done); - - if(f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete) - { - jam(); - (this->*f_dict_op[trans_ptr_p->m_op.m_vt_index].m_trans_abort_complete) - (signal, trans_ptr_p); - } - else - { - jam(); - execute(signal, trans_ptr_p->m_callback, 0); - } - return; - } - case DictObjOp::Defined: - case DictObjOp::Prepared: - case DictObjOp::Committed: - case DictObjOp::Aborted: - jam(); - break; - } - ndbrequire(false); -} - -void -Dbdict::trans_commit_start_done(Signal* signal, - Uint32 callbackData, - Uint32 retValue) -{ - Ptr trans_ptr; - - jam(); - ndbrequire(retValue == 0); - ndbrequire(c_Trans.find(trans_ptr, callbackData)); - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); - SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); - tmp.init(rg, GSN_DICT_COMMIT_REF, trans_ptr.p->key); - - DictCommitReq * const req = (DictCommitReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = trans_ptr.p->key; - req->op_key = trans_ptr.p->m_op.m_key; - sendSignal(rg, GSN_DICT_COMMIT_REQ, signal, DictCommitReq::SignalLength, - JBB); - trans_ptr.p->m_op.m_state = DictObjOp::Committing; -} - -void -Dbdict::trans_commit_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 retValue) -{ - Ptr trans_ptr; - - jam(); - ndbrequire(retValue == 0); - ndbrequire(c_Trans.find(trans_ptr, callbackData)); - - switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){ - case GSN_CREATE_FILEGROUP_REQ:{ - FilegroupPtr fg_ptr; - jam(); - ndbrequire(c_filegroup_hash.find(fg_ptr, trans_ptr.p->m_op.m_obj_id)); - - CreateFilegroupConf * conf = (CreateFilegroupConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = trans_ptr.p->m_senderData; - conf->filegroupId = fg_ptr.p->key; - conf->filegroupVersion = fg_ptr.p->m_version; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_CONF, signal, - CreateFilegroupConf::SignalLength, JBB); - break; - } - case GSN_CREATE_FILE_REQ:{ - FilePtr f_ptr; - jam(); - ndbrequire(c_file_hash.find(f_ptr, trans_ptr.p->m_op.m_obj_id)); - CreateFileConf * conf = (CreateFileConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = trans_ptr.p->m_senderData; - conf->fileId = f_ptr.p->key; - conf->fileVersion = f_ptr.p->m_version; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_CONF, signal, - CreateFileConf::SignalLength, JBB); - break; - } - case GSN_DROP_FILE_REQ:{ - DropFileConf * conf = (DropFileConf*)signal->getDataPtr(); - jam(); - conf->senderRef = reference(); - conf->senderData = trans_ptr.p->m_senderData; - conf->fileId = trans_ptr.p->m_op.m_obj_id; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILE_CONF, signal, - DropFileConf::SignalLength, JBB); - break; - } - case GSN_DROP_FILEGROUP_REQ:{ - DropFilegroupConf * conf = (DropFilegroupConf*)signal->getDataPtr(); - jam(); - conf->senderRef = reference(); - conf->senderData = trans_ptr.p->m_senderData; - conf->filegroupId = trans_ptr.p->m_op.m_obj_id; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILEGROUP_CONF, signal, - DropFilegroupConf::SignalLength, JBB); - break; - } - default: - ndbrequire(false); - } - - c_Trans.release(trans_ptr); - ndbrequire(c_blockState == BS_CREATE_TAB); - c_blockState = BS_IDLE; - return; -} - -void -Dbdict::trans_abort_start_done(Signal* signal, - Uint32 callbackData, - Uint32 retValue) -{ - Ptr trans_ptr; - - jam(); - ndbrequire(retValue == 0); - ndbrequire(c_Trans.find(trans_ptr, callbackData)); - - NodeReceiverGroup rg(DBDICT, trans_ptr.p->m_nodes); - SafeCounter tmp(c_counterMgr, trans_ptr.p->m_counter); - ndbrequire(tmp.init(rg, trans_ptr.p->key)); - - DictAbortReq * const req = (DictAbortReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = trans_ptr.p->key; - req->op_key = trans_ptr.p->m_op.m_key; - - sendSignal(rg, GSN_DICT_ABORT_REQ, signal, DictAbortReq::SignalLength, JBB); -} - -void -Dbdict::trans_abort_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 retValue) -{ - Ptr trans_ptr; - - jam(); - ndbrequire(retValue == 0); - ndbrequire(c_Trans.find(trans_ptr, callbackData)); - - switch(f_dict_op[trans_ptr.p->m_op.m_vt_index].m_gsn_user_req){ - case GSN_CREATE_FILEGROUP_REQ: - { - // - CreateFilegroupRef * ref = (CreateFilegroupRef*)signal->getDataPtr(); - jam(); - ref->senderRef = reference(); - ref->senderData = trans_ptr.p->m_senderData; - ref->masterNodeId = c_masterNodeId; - ref->errorCode = trans_ptr.p->m_errorCode; - ref->errorLine = 0; - ref->errorKey = 0; - ref->status = 0; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILEGROUP_REF, signal, - CreateFilegroupRef::SignalLength, JBB); - break; - } - case GSN_CREATE_FILE_REQ: - { - CreateFileRef * ref = (CreateFileRef*)signal->getDataPtr(); - jam(); - ref->senderRef = reference(); - ref->senderData = trans_ptr.p->m_senderData; - ref->masterNodeId = c_masterNodeId; - ref->errorCode = trans_ptr.p->m_errorCode; - ref->errorLine = 0; - ref->errorKey = 0; - ref->status = 0; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_CREATE_FILE_REF, signal, - CreateFileRef::SignalLength, JBB); - break; - } - case GSN_DROP_FILE_REQ: - { - DropFileRef * ref = (DropFileRef*)signal->getDataPtr(); - jam(); - ref->senderRef = reference(); - ref->senderData = trans_ptr.p->m_senderData; - ref->masterNodeId = c_masterNodeId; - ref->errorCode = trans_ptr.p->m_errorCode; - ref->errorLine = 0; - ref->errorKey = 0; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILE_REF, signal, - DropFileRef::SignalLength, JBB); - break; - } - case GSN_DROP_FILEGROUP_REQ: - { - // - DropFilegroupRef * ref = (DropFilegroupRef*)signal->getDataPtr(); - jam(); - ref->senderRef = reference(); - ref->senderData = trans_ptr.p->m_senderData; - ref->masterNodeId = c_masterNodeId; - ref->errorCode = trans_ptr.p->m_errorCode; - ref->errorLine = 0; - ref->errorKey = 0; - - //@todo check api failed - sendSignal(trans_ptr.p->m_senderRef, GSN_DROP_FILEGROUP_REF, signal, - DropFilegroupRef::SignalLength, JBB); - break; - } - default: - ndbrequire(false); - } - - c_Trans.release(trans_ptr); - ndbrequire(c_blockState == BS_CREATE_TAB); - c_blockState = BS_IDLE; - return; -} - -void -Dbdict::execCREATE_OBJ_REQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - CreateObjReq * const req = (CreateObjReq*)signal->getDataPtr(); - const Uint32 gci = req->gci; - const Uint32 objId = req->objId; - const Uint32 objVersion = req->objVersion; - const Uint32 objType = req->objType; - const Uint32 requestInfo = req->requestInfo; - - SegmentedSectionPtr objInfoPtr; - signal->getSection(objInfoPtr, CreateObjReq::DICT_OBJ_INFO); - - CreateObjRecordPtr createObjPtr; - ndbrequire(c_opCreateObj.seize(createObjPtr)); - - const Uint32 key = req->op_key; - createObjPtr.p->key = key; - c_opCreateObj.add(createObjPtr); - createObjPtr.p->m_errorCode = 0; - createObjPtr.p->m_senderRef = req->senderRef; - createObjPtr.p->m_senderData = req->senderData; - createObjPtr.p->m_clientRef = req->clientRef; - createObjPtr.p->m_clientData = req->clientData; - - createObjPtr.p->m_gci = gci; - createObjPtr.p->m_obj_id = objId; - createObjPtr.p->m_obj_type = objType; - createObjPtr.p->m_obj_version = objVersion; - createObjPtr.p->m_obj_info_ptr_i = objInfoPtr.i; - createObjPtr.p->m_obj_ptr_i = RNIL; - - createObjPtr.p->m_callback.m_callbackData = key; - createObjPtr.p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::createObj_prepare_start_done); - - createObjPtr.p->m_restart= 0; - switch(objType){ - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup: - jam(); - createObjPtr.p->m_vt_index = 0; - break; - case DictTabInfo::Datafile: - case DictTabInfo::Undofile: - /** - * Use restart code to impl. ForceCreateFile - */ - if (requestInfo & CreateFileReq::ForceCreateFile) - { - jam(); - createObjPtr.p->m_restart= 2; - } - jam(); - createObjPtr.p->m_vt_index = 1; - break; - default: - ndbrequire(false); - } - - signal->header.m_noOfSections = 0; - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_start) - (signal, createObjPtr.p); -} - -void -Dbdict::execDICT_COMMIT_REQ(Signal* signal) -{ - DictCommitReq* req = (DictCommitReq*)signal->getDataPtr(); - Ptr op; - - jamEntry(); - ndbrequire(c_schemaOp.find(op, req->op_key)); - (this->*f_dict_op[op.p->m_vt_index].m_commit)(signal, op.p); -} - -void -Dbdict::execDICT_ABORT_REQ(Signal* signal) -{ - DictAbortReq* req = (DictAbortReq*)signal->getDataPtr(); - Ptr op; - - jamEntry(); - ndbrequire(c_schemaOp.find(op, req->op_key)); - (this->*f_dict_op[op.p->m_vt_index].m_abort)(signal, op.p); -} - -void -Dbdict::execDICT_COMMIT_REF(Signal* signal) -{ - DictCommitRef * const ref = (DictCommitRef*)signal->getDataPtr(); - Ptr trans_ptr; - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DictCommitRef::NF_FakeErrorREF){ - jam(); - trans_ptr.p->setErrorCode(ref->errorCode); - } - Uint32 node = refToNode(ref->senderRef); - schemaOp_reply(signal, trans_ptr.p, node); -} - -void -Dbdict::execDICT_COMMIT_CONF(Signal* signal) -{ - Ptr trans_ptr; - DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr(); - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); - schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); -} - -void -Dbdict::execDICT_ABORT_REF(Signal* signal) -{ - DictAbortRef * const ref = (DictAbortRef*)signal->getDataPtr(); - Ptr trans_ptr; - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DictAbortRef::NF_FakeErrorREF){ - jam(); - trans_ptr.p->setErrorCode(ref->errorCode); - } - Uint32 node = refToNode(ref->senderRef); - schemaOp_reply(signal, trans_ptr.p, node); -} - -void -Dbdict::execDICT_ABORT_CONF(Signal* signal) -{ - DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); - Ptr trans_ptr; - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); - schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); -} - -void -Dbdict::createObj_prepare_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - SegmentedSectionPtr objInfoPtr; - - ndbrequire(returnCode == 0); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - jam(); - getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); - if(createObjPtr.p->m_errorCode != 0){ - jam(); - createObjPtr.p->m_obj_info_ptr_i= RNIL; - signal->setSection(objInfoPtr, 0); - releaseSections(signal); - createObj_prepare_complete_done(signal, callbackData, 0); - return; - } - - SchemaFile::TableEntry tabEntry; - bzero(&tabEntry, sizeof(tabEntry)); - tabEntry.m_tableVersion = createObjPtr.p->m_obj_version; - tabEntry.m_tableType = createObjPtr.p->m_obj_type; - tabEntry.m_tableState = SchemaFile::ADD_STARTED; - tabEntry.m_gcp = createObjPtr.p->m_gci; - tabEntry.m_info_words = objInfoPtr.sz; - - Callback cb; - cb.m_callbackData = createObjPtr.p->key; - cb.m_callbackFunction = safe_cast(&Dbdict::createObj_writeSchemaConf1); - - updateSchemaState(signal, createObjPtr.p->m_obj_id, &tabEntry, &cb); -} - -void -Dbdict::createObj_writeSchemaConf1(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - Callback callback; - SegmentedSectionPtr objInfoPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - callback.m_callbackData = createObjPtr.p->key; - callback.m_callbackFunction = safe_cast(&Dbdict::createObj_writeObjConf); - - getSection(objInfoPtr, createObjPtr.p->m_obj_info_ptr_i); - writeTableFile(signal, createObjPtr.p->m_obj_id, objInfoPtr, &callback); - - signal->setSection(objInfoPtr, 0); - releaseSections(signal); - createObjPtr.p->m_obj_info_ptr_i = RNIL; -} - -void -Dbdict::createObj_writeObjConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_prepare_complete_done); - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_prepare_complete) - (signal, createObjPtr.p); -} - -void -Dbdict::createObj_prepare_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - //@todo check for master failed - - if(createObjPtr.p->m_errorCode == 0){ - jam(); - - CreateObjConf * const conf = (CreateObjConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createObjPtr.p->m_senderData; - sendSignal(createObjPtr.p->m_senderRef, GSN_CREATE_OBJ_CONF, - signal, CreateObjConf::SignalLength, JBB); - return; - } - - CreateObjRef * const ref = (CreateObjRef*)signal->getDataPtr(); - ref->senderRef = reference(); - ref->senderData = createObjPtr.p->m_senderData; - ref->errorCode = createObjPtr.p->m_errorCode; - ref->errorLine = 0; - ref->errorKey = 0; - ref->errorStatus = 0; - - sendSignal(createObjPtr.p->m_senderRef, GSN_CREATE_OBJ_REF, - signal, CreateObjRef::SignalLength, JBB); -} - -void -Dbdict::createObj_commit(Signal * signal, SchemaOp * op) -{ - OpCreateObj * createObj = (OpCreateObj*)op; - - createObj->m_callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_commit_start_done); - if (f_dict_op[createObj->m_vt_index].m_commit_start) - { - jam(); - (this->*f_dict_op[createObj->m_vt_index].m_commit_start)(signal, createObj); - } - else - { - jam(); - execute(signal, createObj->m_callback, 0); - } -} - -void -Dbdict::createObj_commit_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - Uint32 objId = createObjPtr.p->m_obj_id; - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry objEntry = * getTableEntry(xsf, objId); - objEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED; - - Callback callback; - callback.m_callbackData = createObjPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_writeSchemaConf2); - - updateSchemaState(signal, createObjPtr.p->m_obj_id, &objEntry, &callback); - -} - -void -Dbdict::createObj_writeSchemaConf2(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_commit_complete_done); - if (f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) - { - jam(); - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_commit_complete) - (signal, createObjPtr.p); - } - else - { - jam(); - execute(signal, createObjPtr.p->m_callback, 0); - } - -} - -void -Dbdict::createObj_commit_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - jam(); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - //@todo check error - //@todo check master failed - - DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createObjPtr.p->m_senderData; - sendSignal(createObjPtr.p->m_senderRef, GSN_DICT_COMMIT_CONF, - signal, DictCommitConf::SignalLength, JBB); - - c_opCreateObj.release(createObjPtr); -} - -void -Dbdict::createObj_abort(Signal* signal, SchemaOp* op) -{ - OpCreateObj * createObj = (OpCreateObj*)op; - - createObj->m_callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_abort_start_done); - if (f_dict_op[createObj->m_vt_index].m_abort_start) - { - jam(); - (this->*f_dict_op[createObj->m_vt_index].m_abort_start)(signal, createObj); - } - else - { - jam(); - execute(signal, createObj->m_callback, 0); - } -} - -void -Dbdict::createObj_abort_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - jam(); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry objEntry = * getTableEntry(xsf, - createObjPtr.p->m_obj_id); - objEntry.m_tableState = SchemaFile::DROP_TABLE_COMMITTED; - - Callback callback; - callback.m_callbackData = createObjPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_abort_writeSchemaConf); - - updateSchemaState(signal, createObjPtr.p->m_obj_id, &objEntry, &callback); -} - -void -Dbdict::createObj_abort_writeSchemaConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - createObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::createObj_abort_complete_done); - - if (f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete) - { - jam(); - (this->*f_dict_op[createObjPtr.p->m_vt_index].m_abort_complete) - (signal, createObjPtr.p); - } - else - { - jam(); - execute(signal, createObjPtr.p->m_callback, 0); - } -} - -void -Dbdict::createObj_abort_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - CreateObjRecordPtr createObjPtr; - - jam(); - ndbrequire(c_opCreateObj.find(createObjPtr, callbackData)); - - DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = createObjPtr.p->m_senderData; - sendSignal(createObjPtr.p->m_senderRef, GSN_DICT_ABORT_CONF, - signal, DictAbortConf::SignalLength, JBB); - - c_opCreateObj.release(createObjPtr); -} - -void -Dbdict::execDROP_OBJ_REQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - DropObjReq * const req = (DropObjReq*)signal->getDataPtr(); - - const Uint32 objId = req->objId; - const Uint32 objVersion = req->objVersion; - const Uint32 objType = req->objType; - - DropObjRecordPtr dropObjPtr; - ndbrequire(c_opDropObj.seize(dropObjPtr)); - - const Uint32 key = req->op_key; - dropObjPtr.p->key = key; - c_opDropObj.add(dropObjPtr); - dropObjPtr.p->m_errorCode = 0; - dropObjPtr.p->m_senderRef = req->senderRef; - dropObjPtr.p->m_senderData = req->senderData; - dropObjPtr.p->m_clientRef = req->clientRef; - dropObjPtr.p->m_clientData = req->clientData; - - dropObjPtr.p->m_obj_id = objId; - dropObjPtr.p->m_obj_type = objType; - dropObjPtr.p->m_obj_version = objVersion; - - dropObjPtr.p->m_callback.m_callbackData = key; - dropObjPtr.p->m_callback.m_callbackFunction= - safe_cast(&Dbdict::dropObj_prepare_start_done); - - switch(objType){ - case DictTabInfo::Tablespace: - case DictTabInfo::LogfileGroup: - { - Ptr fg_ptr; - jam(); - dropObjPtr.p->m_vt_index = 3; - ndbrequire(c_filegroup_hash.find(fg_ptr, objId)); - dropObjPtr.p->m_obj_ptr_i = fg_ptr.i; - break; - - } - case DictTabInfo::Datafile: - { - Ptr file_ptr; - jam(); - dropObjPtr.p->m_vt_index = 2; - ndbrequire(c_file_hash.find(file_ptr, objId)); - dropObjPtr.p->m_obj_ptr_i = file_ptr.i; - break; - } - case DictTabInfo::Undofile: - { - jam(); - dropObjPtr.p->m_vt_index = 4; - return; - } - default: - ndbrequire(false); - } - - signal->header.m_noOfSections = 0; - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_start) - (signal, dropObjPtr.p); -} - -void -Dbdict::dropObj_prepare_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - Callback cb; - - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - - cb.m_callbackData = callbackData; - cb.m_callbackFunction = - safe_cast(&Dbdict::dropObj_prepare_writeSchemaConf); - - if(dropObjPtr.p->m_errorCode != 0) - { - jam(); - dropObj_prepare_complete_done(signal, callbackData, 0); - return; - } - jam(); - Uint32 objId = dropObjPtr.p->m_obj_id; - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry objEntry = *getTableEntry(xsf, objId); - objEntry.m_tableState = SchemaFile::DROP_TABLE_STARTED; - updateSchemaState(signal, objId, &objEntry, &cb); -} - -void -Dbdict::dropObj_prepare_writeSchemaConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_prepare_complete_done); - if(f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) - { - jam(); - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_prepare_complete) - (signal, dropObjPtr.p); - } - else - { - jam(); - execute(signal, dropObjPtr.p->m_callback, 0); - } -} - -void -Dbdict::dropObj_prepare_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - jam(); - - //@todo check for master failed - - if(dropObjPtr.p->m_errorCode == 0){ - jam(); - - DropObjConf * const conf = (DropObjConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = dropObjPtr.p->m_senderData; - sendSignal(dropObjPtr.p->m_senderRef, GSN_DROP_OBJ_CONF, - signal, DropObjConf::SignalLength, JBB); - return; - } - - DropObjRef * const ref = (DropObjRef*)signal->getDataPtr(); - ref->senderRef = reference(); - ref->senderData = dropObjPtr.p->m_senderData; - ref->errorCode = dropObjPtr.p->m_errorCode; - - sendSignal(dropObjPtr.p->m_senderRef, GSN_DROP_OBJ_REF, - signal, DropObjRef::SignalLength, JBB); - -} - -void -Dbdict::dropObj_commit(Signal * signal, SchemaOp * op) -{ - OpDropObj * dropObj = (OpDropObj*)op; - - dropObj->m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_commit_start_done); - if (f_dict_op[dropObj->m_vt_index].m_commit_start) - { - jam(); - (this->*f_dict_op[dropObj->m_vt_index].m_commit_start)(signal, dropObj); - } - else - { - jam(); - execute(signal, dropObj->m_callback, 0); - } -} - -void -Dbdict::dropObj_commit_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - - Uint32 objId = dropObjPtr.p->m_obj_id; - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry objEntry = * getTableEntry(xsf, objId); - objEntry.m_tableState = SchemaFile::DROP_TABLE_COMMITTED; - - Callback callback; - callback.m_callbackData = dropObjPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_commit_writeSchemaConf); - - updateSchemaState(signal, objId, &objEntry, &callback); -} - -void -Dbdict::dropObj_commit_writeSchemaConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_commit_complete_done); - - if(f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) - { - jam(); - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_commit_complete) - (signal, dropObjPtr.p); - } - else - { - jam(); - execute(signal, dropObjPtr.p->m_callback, 0); - } -} - -void -Dbdict::dropObj_commit_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - jam(); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - - //@todo check error - //@todo check master failed - - DictCommitConf * const conf = (DictCommitConf*)signal->getDataPtr(); - conf->senderRef = reference(); - conf->senderData = dropObjPtr.p->m_senderData; - sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_COMMIT_CONF, - signal, DictCommitConf::SignalLength, JBB); - c_opDropObj.release(dropObjPtr); -} - -void -Dbdict::dropObj_abort(Signal * signal, SchemaOp * op) -{ - OpDropObj * dropObj = (OpDropObj*)op; - - dropObj->m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_abort_start_done); - if (f_dict_op[dropObj->m_vt_index].m_abort_start) - { - jam(); - (this->*f_dict_op[dropObj->m_vt_index].m_abort_start)(signal, dropObj); - } - else - { - jam(); - execute(signal, dropObj->m_callback, 0); - } -} - -void -Dbdict::dropObj_abort_start_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - jam(); - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - SchemaFile::TableEntry objEntry = * getTableEntry(xsf, - dropObjPtr.p->m_obj_id); - - Callback callback; - callback.m_callbackData = dropObjPtr.p->key; - callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_abort_writeSchemaConf); - - if (objEntry.m_tableState == SchemaFile::DROP_TABLE_STARTED) - { - jam(); - objEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED; - - updateSchemaState(signal, dropObjPtr.p->m_obj_id, &objEntry, &callback); - } - else - { - jam(); - execute(signal, callback, 0); - } -} - -void -Dbdict::dropObj_abort_writeSchemaConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - - ndbrequire(returnCode == 0); - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - dropObjPtr.p->m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropObj_abort_complete_done); - - if(f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete) - { - jam(); - (this->*f_dict_op[dropObjPtr.p->m_vt_index].m_abort_complete) - (signal, dropObjPtr.p); - } - else - { - jam(); - execute(signal, dropObjPtr.p->m_callback, 0); - } -} - -void -Dbdict::dropObj_abort_complete_done(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - DropObjRecordPtr dropObjPtr; - DictAbortConf * const conf = (DictAbortConf*)signal->getDataPtr(); - - ndbrequire(c_opDropObj.find(dropObjPtr, callbackData)); - jam(); - conf->senderRef = reference(); - conf->senderData = dropObjPtr.p->m_senderData; - sendSignal(dropObjPtr.p->m_senderRef, GSN_DICT_ABORT_CONF, - signal, DictAbortConf::SignalLength, JBB); - c_opDropObj.release(dropObjPtr); -} - -void -Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op) -{ - /** - * Put data into table record - */ - SegmentedSectionPtr objInfoPtr; - jam(); - getSection(objInfoPtr, ((OpCreateObj*)op)->m_obj_info_ptr_i); - SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool()); - - Ptr obj_ptr; obj_ptr.setNull(); - FilegroupPtr fg_ptr; fg_ptr.setNull(); - - SimpleProperties::UnpackStatus status; - DictFilegroupInfo::Filegroup fg; fg.init(); - do { - status = SimpleProperties::unpack(it, &fg, - DictFilegroupInfo::Mapping, - DictFilegroupInfo::MappingSize, - true, true); - - if(status != SimpleProperties::Eof) - { - jam(); - op->m_errorCode = CreateTableRef::InvalidFormat; - break; - } - - if(fg.FilegroupType == DictTabInfo::Tablespace) - { - if(!fg.TS_ExtentSize) - { - jam(); - op->m_errorCode = CreateFilegroupRef::InvalidExtentSize; - break; - } - } - else if(fg.FilegroupType == DictTabInfo::LogfileGroup) - { - /** - * undo_buffer_size can't be less than 96KB in LGMAN block - */ - if(fg.LF_UndoBufferSize < 3 * File_formats::NDB_PAGE_SIZE) - { - jam(); - op->m_errorCode = CreateFilegroupRef::InvalidUndoBufferSize; - break; - } - } - - Uint32 len = strlen(fg.FilegroupName) + 1; - Uint32 hash = Rope::hash(fg.FilegroupName, len); - if(get_object(fg.FilegroupName, len, hash) != 0){ - jam(); - op->m_errorCode = CreateTableRef::TableAlreadyExist; - break; - } - - if(!c_obj_pool.seize(obj_ptr)){ - jam(); - op->m_errorCode = CreateTableRef::NoMoreTableRecords; - break; - } - - if(!c_filegroup_pool.seize(fg_ptr)){ - jam(); - op->m_errorCode = CreateTableRef::NoMoreTableRecords; - break; - } - - new (fg_ptr.p) Filegroup(); - - { - Rope name(c_rope_pool, obj_ptr.p->m_name); - if(!name.assign(fg.FilegroupName, len, hash)){ - jam(); - op->m_errorCode = CreateTableRef::OutOfStringBuffer; - break; - } - } - - fg_ptr.p->key = op->m_obj_id; - fg_ptr.p->m_obj_ptr_i = obj_ptr.i; - fg_ptr.p->m_type = fg.FilegroupType; - fg_ptr.p->m_version = op->m_obj_version; - fg_ptr.p->m_name = obj_ptr.p->m_name; - - switch(fg.FilegroupType){ - case DictTabInfo::Tablespace: - { - //fg.TS_DataGrow = group.m_grow_spec; - fg_ptr.p->m_tablespace.m_extent_size = fg.TS_ExtentSize; - fg_ptr.p->m_tablespace.m_default_logfile_group_id = fg.TS_LogfileGroupId; - - Ptr lg_ptr; - if (!c_filegroup_hash.find(lg_ptr, fg.TS_LogfileGroupId)) - { - jam(); - op->m_errorCode = CreateFilegroupRef::NoSuchLogfileGroup; - goto error; - } - - if (lg_ptr.p->m_version != fg.TS_LogfileGroupVersion) - { - jam(); - op->m_errorCode = CreateFilegroupRef::InvalidFilegroupVersion; - goto error; - } - increase_ref_count(lg_ptr.p->m_obj_ptr_i); - break; - } - case DictTabInfo::LogfileGroup: - { - jam(); - fg_ptr.p->m_logfilegroup.m_undo_buffer_size = fg.LF_UndoBufferSize; - fg_ptr.p->m_logfilegroup.m_files.init(); - //fg.LF_UndoGrow = ; - break; - } - default: - ndbrequire(false); - } - - obj_ptr.p->m_id = op->m_obj_id; - obj_ptr.p->m_type = fg.FilegroupType; - obj_ptr.p->m_ref_count = 0; - c_obj_hash.add(obj_ptr); - c_filegroup_hash.add(fg_ptr); - - op->m_obj_ptr_i = fg_ptr.i; - } while(0); - -error: - if (op->m_errorCode) - { - jam(); - if (!fg_ptr.isNull()) - { - jam(); - c_filegroup_pool.release(fg_ptr); - } - - if (!obj_ptr.isNull()) - { - jam(); - c_obj_pool.release(obj_ptr); - } - } - - execute(signal, op->m_callback, 0); -} - -void -Dbdict::create_fg_prepare_complete(Signal* signal, SchemaOp* op) -{ - /** - * CONTACT TSMAN LGMAN PGMAN - */ - CreateFilegroupImplReq* req = - (CreateFilegroupImplReq*)signal->getDataPtrSend(); - jam(); - req->senderData = op->key; - req->senderRef = reference(); - req->filegroup_id = op->m_obj_id; - req->filegroup_version = op->m_obj_version; - - FilegroupPtr fg_ptr; - c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - - Uint32 ref= 0; - Uint32 len= 0; - switch(op->m_obj_type){ - case DictTabInfo::Tablespace: - { - jam(); - ref = TSMAN_REF; - len = CreateFilegroupImplReq::TablespaceLength; - req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size; - req->tablespace.logfile_group_id = - fg_ptr.p->m_tablespace.m_default_logfile_group_id; - break; - } - case DictTabInfo::LogfileGroup: - { - jam(); - ref = LGMAN_REF; - len = CreateFilegroupImplReq::LogfileGroupLength; - req->logfile_group.buffer_size = - fg_ptr.p->m_logfilegroup.m_undo_buffer_size; - break; - } - default: - ndbrequire(false); - } - - sendSignal(ref, GSN_CREATE_FILEGROUP_REQ, signal, len, JBB); -} - -void -Dbdict::execCREATE_FILEGROUP_REF(Signal* signal) -{ - CreateFilegroupImplRef * ref = (CreateFilegroupImplRef*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; - jamEntry(); - ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData)); - op_ptr.p->m_errorCode = ref->errorCode; - - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::execCREATE_FILEGROUP_CONF(Signal* signal) -{ - CreateFilegroupImplConf * rep = - (CreateFilegroupImplConf*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; - jamEntry(); - ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData)); - - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::create_fg_abort_start(Signal* signal, SchemaOp* op){ - (void) signal->getDataPtrSend(); - - if (op->m_obj_ptr_i != RNIL) - { - jam(); - send_drop_fg(signal, op, DropFilegroupImplReq::Commit); - return; - } - jam(); - execute(signal, op->m_callback, 0); -} - -void -Dbdict::create_fg_abort_complete(Signal* signal, SchemaOp* op) -{ - if (op->m_obj_ptr_i != RNIL) - { - jam(); - FilegroupPtr fg_ptr; - c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - - release_object(fg_ptr.p->m_obj_ptr_i); - c_filegroup_hash.release(fg_ptr); - } - jam(); - execute(signal, op->m_callback, 0); -} - -void -Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op) -{ - /** - * Put data into table record - */ - SegmentedSectionPtr objInfoPtr; - getSection(objInfoPtr, ((OpCreateObj*)op)->m_obj_info_ptr_i); - SimplePropertiesSectionReader it(objInfoPtr, getSectionSegmentPool()); - - Ptr obj_ptr; obj_ptr.setNull(); - FilePtr filePtr; filePtr.setNull(); - - DictFilegroupInfo::File f; f.init(); - SimpleProperties::UnpackStatus status; - status = SimpleProperties::unpack(it, &f, - DictFilegroupInfo::FileMapping, - DictFilegroupInfo::FileMappingSize, - true, true); - - do { - if(status != SimpleProperties::Eof){ - jam(); - op->m_errorCode = CreateFileRef::InvalidFormat; - break; - } - - // Get Filegroup - FilegroupPtr fg_ptr; - if(!c_filegroup_hash.find(fg_ptr, f.FilegroupId)){ - jam(); - op->m_errorCode = CreateFileRef::NoSuchFilegroup; - break; - } - - if(fg_ptr.p->m_version != f.FilegroupVersion){ - jam(); - op->m_errorCode = CreateFileRef::InvalidFilegroupVersion; - break; - } - - switch(f.FileType){ - case DictTabInfo::Datafile: - { - if(fg_ptr.p->m_type != DictTabInfo::Tablespace) - { - jam(); - op->m_errorCode = CreateFileRef::InvalidFileType; - } - jam(); - break; - } - case DictTabInfo::Undofile: - { - if(fg_ptr.p->m_type != DictTabInfo::LogfileGroup) - { - jam(); - op->m_errorCode = CreateFileRef::InvalidFileType; - } - jam(); - break; - } - default: - jam(); - op->m_errorCode = CreateFileRef::InvalidFileType; - } - - if(op->m_errorCode) - { - jam(); - break; - } - - Uint32 len = strlen(f.FileName) + 1; - Uint32 hash = Rope::hash(f.FileName, len); - if(get_object(f.FileName, len, hash) != 0){ - jam(); - op->m_errorCode = CreateFileRef::FilenameAlreadyExists; - break; - } - - { - Uint32 dl; - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - if(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &dl) && dl) - { - jam(); - op->m_errorCode = CreateFileRef::NotSupportedWhenDiskless; - break; - } - } - - // Loop through all filenames... - if(!c_obj_pool.seize(obj_ptr)){ - jam(); - op->m_errorCode = CreateTableRef::NoMoreTableRecords; - break; - } - - if (! c_file_pool.seize(filePtr)){ - jam(); - op->m_errorCode = CreateFileRef::OutOfFileRecords; - break; - } - - new (filePtr.p) File(); - - { - Rope name(c_rope_pool, obj_ptr.p->m_name); - if(!name.assign(f.FileName, len, hash)){ - jam(); - op->m_errorCode = CreateTableRef::OutOfStringBuffer; - break; - } - } - - switch(fg_ptr.p->m_type){ - case DictTabInfo::Tablespace: - { - jam(); - increase_ref_count(fg_ptr.p->m_obj_ptr_i); - break; - } - case DictTabInfo::LogfileGroup: - { - jam(); - Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); - list.add(filePtr); - break; - } - default: - ndbrequire(false); - } - - /** - * Init file - */ - filePtr.p->key = op->m_obj_id; - filePtr.p->m_file_size = ((Uint64)f.FileSizeHi) << 32 | f.FileSizeLo; - filePtr.p->m_path = obj_ptr.p->m_name; - filePtr.p->m_obj_ptr_i = obj_ptr.i; - filePtr.p->m_filegroup_id = f.FilegroupId; - filePtr.p->m_type = f.FileType; - filePtr.p->m_version = op->m_obj_version; - - obj_ptr.p->m_id = op->m_obj_id; - obj_ptr.p->m_type = f.FileType; - obj_ptr.p->m_ref_count = 0; - c_obj_hash.add(obj_ptr); - c_file_hash.add(filePtr); - - op->m_obj_ptr_i = filePtr.i; - } while(0); - - if (op->m_errorCode) - { - jam(); - if (!filePtr.isNull()) - { - jam(); - c_file_pool.release(filePtr); - } - - if (!obj_ptr.isNull()) - { - jam(); - c_obj_pool.release(obj_ptr); - } - } - execute(signal, op->m_callback, 0); -} - - -void -Dbdict::create_file_prepare_complete(Signal* signal, SchemaOp* op) -{ - /** - * CONTACT TSMAN LGMAN PGMAN - */ - CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - - req->senderData = op->key; - req->senderRef = reference(); - switch(((OpCreateObj*)op)->m_restart){ - case 0: - { - jam(); - req->requestInfo = CreateFileImplReq::Create; - break; - } - case 1: - { - jam(); - req->requestInfo = CreateFileImplReq::Open; - break; - } - case 2: - { - jam(); - req->requestInfo = CreateFileImplReq::CreateForce; - break; - } - } - - req->file_id = f_ptr.p->key; - req->filegroup_id = f_ptr.p->m_filegroup_id; - req->filegroup_version = fg_ptr.p->m_version; - req->file_size_hi = f_ptr.p->m_file_size >> 32; - req->file_size_lo = f_ptr.p->m_file_size & 0xFFFFFFFF; - - Uint32 ref= 0; - Uint32 len= 0; - switch(op->m_obj_type){ - case DictTabInfo::Datafile: - { - jam(); - ref = TSMAN_REF; - len = CreateFileImplReq::DatafileLength; - req->tablespace.extent_size = fg_ptr.p->m_tablespace.m_extent_size; - break; - } - case DictTabInfo::Undofile: - { - jam(); - ref = LGMAN_REF; - len = CreateFileImplReq::UndofileLength; - break; - } - default: - ndbrequire(false); - } - - char name[MAX_TAB_NAME_SIZE]; - ConstRope tmp(c_rope_pool, f_ptr.p->m_path); - tmp.copy(name); - LinearSectionPtr ptr[3]; - ptr[0].p = (Uint32*)&name[0]; - ptr[0].sz = (strlen(name)+1+3)/4; - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, len, JBB, ptr, 1); -} - -void -Dbdict::execCREATE_FILE_REF(Signal* signal) -{ - CreateFileImplRef * ref = (CreateFileImplRef*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; - - jamEntry(); - ndbrequire(c_opCreateObj.find(op_ptr, ref->senderData)); - op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::execCREATE_FILE_CONF(Signal* signal) -{ - CreateFileImplConf * rep = - (CreateFileImplConf*)signal->getDataPtr(); - CreateObjRecordPtr op_ptr; - - jamEntry(); - ndbrequire(c_opCreateObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::create_file_commit_start(Signal* signal, SchemaOp* op) -{ - /** - * CONTACT TSMAN LGMAN PGMAN - */ - CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - - req->senderData = op->key; - req->senderRef = reference(); - req->requestInfo = CreateFileImplReq::Commit; - - req->file_id = f_ptr.p->key; - req->filegroup_id = f_ptr.p->m_filegroup_id; - req->filegroup_version = fg_ptr.p->m_version; - - Uint32 ref= 0; - switch(op->m_obj_type){ - case DictTabInfo::Datafile: - { - jam(); - ref = TSMAN_REF; - break; - } - case DictTabInfo::Undofile: - { - jam(); - ref = LGMAN_REF; - break; - } - default: - ndbrequire(false); - } - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, - CreateFileImplReq::CommitLength, JBB); -} - -void -Dbdict::create_file_abort_start(Signal* signal, SchemaOp* op) -{ - CreateFileImplReq* req = (CreateFileImplReq*)signal->getDataPtrSend(); - - if (op->m_obj_ptr_i != RNIL) - { - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - - req->senderData = op->key; - req->senderRef = reference(); - req->requestInfo = CreateFileImplReq::Abort; - - req->file_id = f_ptr.p->key; - req->filegroup_id = f_ptr.p->m_filegroup_id; - req->filegroup_version = fg_ptr.p->m_version; - - Uint32 ref= 0; - switch(op->m_obj_type){ - case DictTabInfo::Datafile: - { - jam(); - ref = TSMAN_REF; - break; - } - case DictTabInfo::Undofile: - { - jam(); - ref = LGMAN_REF; - break; - } - default: - ndbrequire(false); - } - sendSignal(ref, GSN_CREATE_FILE_REQ, signal, - CreateFileImplReq::AbortLength, JBB); - return; - } - execute(signal, op->m_callback, 0); -} - -void -Dbdict::create_file_abort_complete(Signal* signal, SchemaOp* op) -{ - if (op->m_obj_ptr_i != RNIL) - { - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - switch(fg_ptr.p->m_type){ - case DictTabInfo::Tablespace: - { - jam(); - decrease_ref_count(fg_ptr.p->m_obj_ptr_i); - break; - } - case DictTabInfo::LogfileGroup: - { - jam(); - Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); - list.remove(f_ptr); - break; - } - default: - ndbrequire(false); - } - - release_object(f_ptr.p->m_obj_ptr_i); - c_file_hash.release(f_ptr); - } - execute(signal, op->m_callback, 0); -} - -void -Dbdict::drop_file_prepare_start(Signal* signal, SchemaOp* op) -{ - jam(); - send_drop_file(signal, op, DropFileImplReq::Prepare); -} - -void -Dbdict::drop_undofile_prepare_start(Signal* signal, SchemaOp* op) -{ - jam(); - op->m_errorCode = DropFileRef::DropUndoFileNotSupported; - execute(signal, op->m_callback, 0); -} - -void -Dbdict::drop_file_commit_start(Signal* signal, SchemaOp* op) -{ - jam(); - send_drop_file(signal, op, DropFileImplReq::Commit); -} - -void -Dbdict::drop_file_commit_complete(Signal* signal, SchemaOp* op) -{ - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - decrease_ref_count(fg_ptr.p->m_obj_ptr_i); - release_object(f_ptr.p->m_obj_ptr_i); - c_file_hash.release(f_ptr); - execute(signal, op->m_callback, 0); -} - -void -Dbdict::drop_undofile_commit_complete(Signal* signal, SchemaOp* op) -{ - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); - list.remove(f_ptr); - release_object(f_ptr.p->m_obj_ptr_i); - c_file_hash.release(f_ptr); - execute(signal, op->m_callback, 0); -} - -void -Dbdict::drop_file_abort_start(Signal* signal, SchemaOp* op) -{ - jam(); - send_drop_file(signal, op, DropFileImplReq::Abort); -} - -void -Dbdict::send_drop_file(Signal* signal, SchemaOp* op, - DropFileImplReq::RequestInfo type) -{ - DropFileImplReq* req = (DropFileImplReq*)signal->getDataPtrSend(); - FilePtr f_ptr; - FilegroupPtr fg_ptr; - - jam(); - c_file_pool.getPtr(f_ptr, op->m_obj_ptr_i); - ndbrequire(c_filegroup_hash.find(fg_ptr, f_ptr.p->m_filegroup_id)); - - req->senderData = op->key; - req->senderRef = reference(); - req->requestInfo = type; - - req->file_id = f_ptr.p->key; - req->filegroup_id = f_ptr.p->m_filegroup_id; - req->filegroup_version = fg_ptr.p->m_version; - - Uint32 ref= 0; - switch(op->m_obj_type){ - case DictTabInfo::Datafile: - { - jam(); - ref = TSMAN_REF; - break; - } - case DictTabInfo::Undofile: - { - jam(); - ref = LGMAN_REF; - break; - } - default: - ndbrequire(false); - } - sendSignal(ref, GSN_DROP_FILE_REQ, signal, - DropFileImplReq::SignalLength, JBB); -} - -void -Dbdict::execDROP_OBJ_REF(Signal* signal) -{ - DropObjRef * const ref = (DropObjRef*)signal->getDataPtr(); - Ptr trans_ptr; - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, ref->senderData)); - if(ref->errorCode != DropObjRef::NF_FakeErrorREF){ - jam(); - trans_ptr.p->setErrorCode(ref->errorCode); - } - Uint32 node = refToNode(ref->senderRef); - schemaOp_reply(signal, trans_ptr.p, node); -} - -void -Dbdict::execDROP_OBJ_CONF(Signal* signal) -{ - DropObjConf * const conf = (DropObjConf*)signal->getDataPtr(); - Ptr trans_ptr; - - jamEntry(); - ndbrequire(c_Trans.find(trans_ptr, conf->senderData)); - schemaOp_reply(signal, trans_ptr.p, refToNode(conf->senderRef)); -} - -void -Dbdict::execDROP_FILE_REF(Signal* signal) -{ - DropFileImplRef * ref = (DropFileImplRef*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; - - jamEntry(); - ndbrequire(c_opDropObj.find(op_ptr, ref->senderData)); - op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::execDROP_FILE_CONF(Signal* signal) -{ - DropFileImplConf * rep = - (DropFileImplConf*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; - - jamEntry(); - ndbrequire(c_opDropObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::execDROP_FILEGROUP_REF(Signal* signal) -{ - DropFilegroupImplRef * ref = (DropFilegroupImplRef*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; - - jamEntry(); - ndbrequire(c_opDropObj.find(op_ptr, ref->senderData)); - op_ptr.p->m_errorCode = ref->errorCode; - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::execDROP_FILEGROUP_CONF(Signal* signal) -{ - DropFilegroupImplConf * rep = - (DropFilegroupImplConf*)signal->getDataPtr(); - DropObjRecordPtr op_ptr; - - jamEntry(); - ndbrequire(c_opDropObj.find(op_ptr, rep->senderData)); - execute(signal, op_ptr.p->m_callback, 0); -} - -void -Dbdict::drop_fg_prepare_start(Signal* signal, SchemaOp* op) -{ - FilegroupPtr fg_ptr; - c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - - DictObject * obj = c_obj_pool.getPtr(fg_ptr.p->m_obj_ptr_i); - if (obj->m_ref_count) - { - jam(); - op->m_errorCode = DropFilegroupRef::FilegroupInUse; - execute(signal, op->m_callback, 0); - } - else - { - jam(); - send_drop_fg(signal, op, DropFilegroupImplReq::Prepare); - } -} - -void -Dbdict::drop_fg_commit_start(Signal* signal, SchemaOp* op) -{ - FilegroupPtr fg_ptr; - c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - if (op->m_obj_type == DictTabInfo::LogfileGroup) - { - jam(); - /** - * Mark all undofiles as dropped - */ - Ptr filePtr; - Local_file_list list(c_file_pool, fg_ptr.p->m_logfilegroup.m_files); - XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; - for(list.first(filePtr); !filePtr.isNull(); list.next(filePtr)) - { - jam(); - Uint32 objId = filePtr.p->key; - SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, objId); - tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED; - computeChecksum(xsf, objId / NDB_SF_PAGE_ENTRIES); - release_object(filePtr.p->m_obj_ptr_i); - c_file_hash.remove(filePtr); - } - list.release(); - } - else if(op->m_obj_type == DictTabInfo::Tablespace) - { - FilegroupPtr lg_ptr; - jam(); - ndbrequire(c_filegroup_hash. - find(lg_ptr, - fg_ptr.p->m_tablespace.m_default_logfile_group_id)); - - decrease_ref_count(lg_ptr.p->m_obj_ptr_i); - } - jam(); - send_drop_fg(signal, op, DropFilegroupImplReq::Commit); -} - -void -Dbdict::drop_fg_commit_complete(Signal* signal, SchemaOp* op) -{ - FilegroupPtr fg_ptr; - c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - - jam(); - release_object(fg_ptr.p->m_obj_ptr_i); - c_filegroup_hash.release(fg_ptr); - execute(signal, op->m_callback, 0); -} - -void -Dbdict::drop_fg_abort_start(Signal* signal, SchemaOp* op) -{ - jam(); - send_drop_fg(signal, op, DropFilegroupImplReq::Abort); -} - -void -Dbdict::send_drop_fg(Signal* signal, SchemaOp* op, - DropFilegroupImplReq::RequestInfo type) -{ - DropFilegroupImplReq* req = (DropFilegroupImplReq*)signal->getDataPtrSend(); - - FilegroupPtr fg_ptr; - c_filegroup_pool.getPtr(fg_ptr, op->m_obj_ptr_i); - - req->senderData = op->key; - req->senderRef = reference(); - req->requestInfo = type; - - req->filegroup_id = fg_ptr.p->key; - req->filegroup_version = fg_ptr.p->m_version; - - Uint32 ref= 0; - switch(op->m_obj_type){ - case DictTabInfo::Tablespace: - ref = TSMAN_REF; - break; - case DictTabInfo::LogfileGroup: - ref = LGMAN_REF; - break; - default: - ndbrequire(false); - } - - sendSignal(ref, GSN_DROP_FILEGROUP_REQ, signal, - DropFilegroupImplReq::SignalLength, JBB); -} - -/* - return 1 if all of the below is true - a) node in single user mode - b) senderRef is not a db node - c) senderRef nodeid is not the singleUserApi -*/ -int Dbdict::checkSingleUserMode(Uint32 senderRef) -{ - Uint32 nodeId = refToNode(senderRef); - return - getNodeState().getSingleUserMode() && - (getNodeInfo(nodeId).m_type != NodeInfo::DB) && - (nodeId != getNodeState().getSingleUserApi()); -} - diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp deleted file mode 100644 index 6d738740e87..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ /dev/null @@ -1,2707 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBDICT_H -#define DBDICT_H - -/** - * Dict : Dictionary Block - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "SchemaFile.hpp" -#include -#include -#include -#include -#include -#include -#include - -#ifdef DBDICT_C -// Debug Macros - -/*--------------------------------------------------------------*/ -// Constants for CONTINUEB -/*--------------------------------------------------------------*/ -#define ZPACK_TABLE_INTO_PAGES 0 -#define ZSEND_GET_TAB_RESPONSE 3 -#define ZDICT_LOCK_POLL 4 - - -/*--------------------------------------------------------------*/ -// Other constants in alphabetical order -/*--------------------------------------------------------------*/ -#define ZNOMOREPHASES 255 - -/*--------------------------------------------------------------*/ -// Schema file defines -/*--------------------------------------------------------------*/ -#define ZSCHEMA_WORDS 4 - -/*--------------------------------------------------------------*/ -// Page constants -/*--------------------------------------------------------------*/ -#define ZBAT_SCHEMA_FILE 0 //Variable number of page for NDBFS -#define ZBAT_TABLE_FILE 1 //Variable number of page for NDBFS -#define ZPAGE_HEADER_SIZE 32 -#define ZPOS_PAGE_SIZE 16 -#define ZPOS_CHECKSUM 17 -#define ZPOS_VERSION 18 -#define ZPOS_PAGE_HEADER_SIZE 19 - -/*--------------------------------------------------------------*/ -// Size constants -/*--------------------------------------------------------------*/ -#define ZFS_CONNECT_SIZE 4 -#define ZSIZE_OF_PAGES_IN_WORDS 8192 -#define ZLOG_SIZE_OF_PAGES_IN_WORDS 13 -#define ZMAX_PAGES_OF_TABLE_DEFINITION 8 -#define ZNUMBER_OF_PAGES (ZMAX_PAGES_OF_TABLE_DEFINITION + 1) -#define ZNO_OF_FRAGRECORD 5 - -/*--------------------------------------------------------------*/ -// Error codes -/*--------------------------------------------------------------*/ -#define ZNODE_FAILURE_ERROR 704 -#endif - -/** - * Systable NDB$EVENTS_0 - */ -#define EVENT_SYSTEM_TABLE_LENGTH 8 - -struct sysTab_NDBEVENTS_0 { - char NAME[MAX_TAB_NAME_SIZE]; - Uint32 EVENT_TYPE; - Uint32 TABLEID; - Uint32 TABLEVERSION; - char TABLE_NAME[MAX_TAB_NAME_SIZE]; - Uint32 ATTRIBUTE_MASK[MAXNROFATTRIBUTESINWORDS]; - Uint32 SUBID; - Uint32 SUBKEY; -}; - -/** - * DICT - This blocks handles all metadata - */ -class Dbdict: public SimulatedBlock { -public: - /* - * 2.3 RECORD AND FILESIZES - */ - - /** - * Table attributes. Permanent data. - * - * Indexes have an attribute list which duplicates primary table - * attributes. This is wrong but convenient. - */ - struct AttributeRecord { - AttributeRecord(){} - - /* attribute id */ - Uint16 attributeId; - - /* Attribute number within tuple key (counted from 1) */ - Uint16 tupleKey; - - /* Attribute name (unique within table) */ - RopeHandle attributeName; - - /* Attribute description (old-style packed descriptor) */ - Uint32 attributeDescriptor; - - /* Extended attributes */ - Uint32 extType; - Uint32 extPrecision; - Uint32 extScale; - Uint32 extLength; - - /* Autoincrement flag, only for ODBC/SQL */ - bool autoIncrement; - - /* Default value as null-terminated string, only for ODBC/SQL */ - RopeHandle defaultValue; - - struct { - Uint32 m_name_len; - const char * m_name_ptr; - RopePool * m_pool; - } m_key; - - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - Uint32 nextHash; - Uint32 prevHash; - - Uint32 hashValue() const { return attributeName.hashValue();} - bool equal(const AttributeRecord& obj) const { - if(obj.hashValue() == hashValue()){ - ConstRope r(* m_key.m_pool, obj.attributeName); - return r.compare(m_key.m_name_ptr, m_key.m_name_len) == 0; - } - return false; - } - - /** Singly linked in internal (attributeId) order */ - // TODO use DL template when possible to have more than 1 - Uint32 nextAttributeIdPtrI; - }; - typedef Ptr AttributeRecordPtr; - ArrayPool c_attributeRecordPool; - DLHashTable c_attributeRecordHash; - - /** - * Shared table / index record. Most of this is permanent data stored - * on disk. Index trigger ids are volatile. - */ - struct TableRecord { - TableRecord(){} - Uint32 maxRowsLow; - Uint32 maxRowsHigh; - Uint32 minRowsLow; - Uint32 minRowsHigh; - /* Table id (array index in DICT and other blocks) */ - Uint32 tableId; - Uint32 m_obj_ptr_i; - - /* Table version (incremented when tableId is re-used) */ - Uint32 tableVersion; - - /* Table name (may not be unique under "alter table") */ - RopeHandle tableName; - - /* Type of table or index */ - DictTabInfo::TableType tableType; - - /* Is table or index online (this flag is not used in DICT) */ - bool online; - - /* Primary table of index otherwise RNIL */ - Uint32 primaryTableId; - - /* Type of fragmentation (small/medium/large) */ - DictTabInfo::FragmentType fragmentType; - - /* Global checkpoint identity when table created */ - Uint32 gciTableCreated; - - /* Is the table logged (i.e. data survives system restart) */ - enum Bits - { - TR_Logged = 0x1, - TR_RowGCI = 0x2, - TR_RowChecksum = 0x4, - TR_Temporary = 0x8, - TR_ForceVarPart = 0x10 - }; - Uint16 m_bits; - - /* Number of attibutes in table */ - Uint16 noOfAttributes; - - /* Number of null attributes in table (should be computed) */ - Uint16 noOfNullAttr; - - /* Number of primary key attributes (should be computed) */ - Uint16 noOfPrimkey; - - /* Length of primary key in words (should be computed) */ - /* For ordered index this is tree node size in words */ - Uint16 tupKeyLength; - - /** */ - Uint16 noOfCharsets; - - /* K value for LH**3 algorithm (only 6 allowed currently) */ - Uint8 kValue; - - /* Local key length in words (currently 1) */ - Uint8 localKeyLen; - - /* - * Parameter for hash algorithm that specifies the load factor in - * percentage of fill level in buckets. A high value means we are - * splitting early and that buckets are only lightly used. A high - * value means that we have fill the buckets more and get more - * likelihood of overflow buckets. - */ - Uint8 maxLoadFactor; - - /* - Flag to indicate default number of partitions - */ - bool defaultNoPartFlag; - - /* - Flag to indicate using linear hash function - */ - bool linearHashFlag; - - /* - * Used when shrinking to decide when to merge buckets. Hysteresis - * is thus possible. Should be smaller but not much smaller than - * maxLoadFactor - */ - Uint8 minLoadFactor; - - /* Convenience routines */ - bool isTable() const; - bool isIndex() const; - bool isUniqueIndex() const; - bool isNonUniqueIndex() const; - bool isHashIndex() const; - bool isOrderedIndex() const; - - /**************************************************** - * Support variables for table handling - ****************************************************/ - - /* Active page which is sent to disk */ - Uint32 activePage; - - /** File pointer received from disk */ - Uint32 filePtr[2]; - - /** Pointer to first attribute in table */ - DLFifoList::Head m_attributes; - - /* Pointer to first page of table description */ - Uint32 firstPage; - - Uint32 nextPool; - - enum TabState { - NOT_DEFINED = 0, - DEFINING = 2, - DEFINED = 4, - PREPARE_DROPPING = 5, - DROPPING = 6, - BACKUP_ONGOING = 7 - }; - TabState tabState; - - /* State when returning from TC_SCHVERREQ */ - enum TabReturnState { - TRS_IDLE = 0, - ADD_TABLE = 1, - SLAVE_SYSTEM_RESTART = 2, - MASTER_SYSTEM_RESTART = 3 - }; - TabReturnState tabReturnState; - - /** Number of words */ - Uint32 packedSize; - - /** Index state (volatile data) */ - enum IndexState { - IS_UNDEFINED = 0, // initial - IS_OFFLINE = 1, // index table created - IS_BUILDING = 2, // building (local state) - IS_DROPPING = 3, // dropping (local state) - IS_ONLINE = 4, // online - IS_BROKEN = 9 // build or drop aborted - }; - IndexState indexState; - - /** Trigger ids of index (volatile data) */ - Uint32 insertTriggerId; - Uint32 updateTriggerId; - Uint32 deleteTriggerId; - Uint32 customTriggerId; // ordered index - Uint32 buildTriggerId; // temp during build - - /** Index state in other blocks on this node */ - enum IndexLocal { - IL_CREATED_TC = 1 << 0 // created in TC - }; - Uint32 indexLocal; - - Uint32 noOfNullBits; - - /** frm data for this table */ - RopeHandle frmData; - RopeHandle tsData; - RopeHandle ngData; - RopeHandle rangeData; - - Uint32 fragmentCount; - Uint32 m_tablespace_id; - - /* - * Access rights to table during single user mode - */ - Uint8 singleUserMode; - }; - - typedef Ptr TableRecordPtr; - ArrayPool c_tableRecordPool; - - /** Node Group and Tablespace id+version + range or list data. - * This is only stored temporarily in DBDICT during an ongoing - * change. - * TODO RONM: Look into improvements of this - */ - Uint32 c_fragDataLen; - Uint16 c_fragData[MAX_NDB_PARTITIONS]; - Uint32 c_tsIdData[2*MAX_NDB_PARTITIONS]; - - /** - * Triggers. This is volatile data not saved on disk. Setting a - * trigger online creates the trigger in TC (if index) and LQH-TUP. - */ - struct TriggerRecord { - TriggerRecord() {} - - /** Trigger state */ - enum TriggerState { - TS_NOT_DEFINED = 0, - TS_DEFINING = 1, - TS_OFFLINE = 2, // created globally in DICT - TS_BUILDING = 3, - TS_DROPPING = 4, - TS_ONLINE = 5 // activated globally - }; - TriggerState triggerState; - - /** Trigger state in other blocks on this node */ - enum IndexLocal { - TL_CREATED_TC = 1 << 0, // created in TC - TL_CREATED_LQH = 1 << 1 // created in LQH-TUP - }; - Uint32 triggerLocal; - - /** Trigger name, used by DICT to identify the trigger */ - RopeHandle triggerName; - - /** Trigger id, used by TRIX, TC, LQH, and TUP to identify the trigger */ - Uint32 triggerId; - Uint32 m_obj_ptr_i; - - /** Table id, the table the trigger is defined on */ - Uint32 tableId; - - /** Trigger type, defines what the trigger is used for */ - TriggerType::Value triggerType; - - /** Trigger action time, defines when the trigger should fire */ - TriggerActionTime::Value triggerActionTime; - - /** Trigger event, defines what events the trigger should monitor */ - TriggerEvent::Value triggerEvent; - - /** Monitor all replicas */ - bool monitorReplicas; - - /** Monitor all, the trigger monitors changes of all attributes in table */ - bool monitorAllAttributes; - - /** Monitor all, the trigger monitors changes of all attributes in table */ - bool reportAllMonitoredAttributes; - - /** - * Attribute mask, defines what attributes are to be monitored. - * Can be seen as a compact representation of SQL column name list. - */ - AttributeMask attributeMask; - - /** Index id, only used by secondary_index triggers */ - Uint32 indexId; - - /** Pointer to the next attribute used by ArrayPool */ - Uint32 nextPool; - }; - - Uint32 c_maxNoOfTriggers; - typedef Ptr TriggerRecordPtr; - ArrayPool c_triggerRecordPool; - - /** - * Information for each FS connection. - ***************************************************************************/ - struct FsConnectRecord { - enum FsState { - IDLE = 0, - OPEN_WRITE_SCHEMA = 1, - WRITE_SCHEMA = 2, - CLOSE_WRITE_SCHEMA = 3, - OPEN_READ_SCHEMA1 = 4, - OPEN_READ_SCHEMA2 = 5, - READ_SCHEMA1 = 6, - READ_SCHEMA2 = 7, - CLOSE_READ_SCHEMA = 8, - OPEN_READ_TAB_FILE1 = 9, - OPEN_READ_TAB_FILE2 = 10, - READ_TAB_FILE1 = 11, - READ_TAB_FILE2 = 12, - CLOSE_READ_TAB_FILE = 13, - OPEN_WRITE_TAB_FILE = 14, - WRITE_TAB_FILE = 15, - CLOSE_WRITE_TAB_FILE = 16 - }; - /** File Pointer for this file system connection */ - Uint32 filePtr; - - /** Reference of owner record */ - Uint32 ownerPtr; - - /** State of file system connection */ - FsState fsState; - - /** Used by Array Pool for free list handling */ - Uint32 nextPool; - }; - - typedef Ptr FsConnectRecordPtr; - ArrayPool c_fsConnectRecordPool; - - /** - * This record stores all the information about a node and all its attributes - ***************************************************************************/ - struct NodeRecord { - enum NodeState { - API_NODE = 0, - NDB_NODE_ALIVE = 1, - NDB_NODE_DEAD = 2 - }; - bool hotSpare; - NodeState nodeState; - }; - - typedef Ptr NodeRecordPtr; - CArray c_nodes; - NdbNodeBitmask c_aliveNodes; - - struct PageRecord { - Uint32 word[8192]; - }; - - typedef Ptr PageRecordPtr; - CArray c_pageRecordArray; - - struct SchemaPageRecord { - Uint32 word[NDB_SF_PAGE_SIZE_IN_WORDS]; - }; - - CArray c_schemaPageRecordArray; - - DictTabInfo::Table c_tableDesc; - - /** - * A page for create index table signal. - */ - PageRecord c_indexPage; - - struct File { - File() {} - - Uint32 key; - Uint32 m_magic; - Uint32 m_version; - Uint32 m_obj_ptr_i; - Uint32 m_filegroup_id; - Uint32 m_type; - Uint64 m_file_size; - Uint64 m_file_free; - RopeHandle m_path; - - Uint32 nextList; - union { - Uint32 prevList; - Uint32 nextPool; - }; - Uint32 nextHash, prevHash; - - Uint32 hashValue() const { return key;} - bool equal(const File& obj) const { return key == obj.key;} - }; - typedef Ptr FilePtr; - typedef RecordPool File_pool; - typedef DLListImpl File_list; - typedef LocalDLListImpl Local_file_list; - typedef KeyTableImpl File_hash; - - struct Filegroup { - Filegroup(){} - - Uint32 key; - Uint32 m_obj_ptr_i; - Uint32 m_magic; - - Uint32 m_type; - Uint32 m_version; - RopeHandle m_name; - - union { - struct { - Uint32 m_extent_size; - Uint32 m_default_logfile_group_id; - } m_tablespace; - - struct { - Uint32 m_undo_buffer_size; - File_list::HeadPOD m_files; - } m_logfilegroup; - }; - - union { - Uint32 nextPool; - Uint32 nextList; - Uint32 nextHash; - }; - Uint32 prevHash; - - Uint32 hashValue() const { return key;} - bool equal(const Filegroup& obj) const { return key == obj.key;} - }; - typedef Ptr FilegroupPtr; - typedef RecordPool Filegroup_pool; - typedef KeyTableImpl Filegroup_hash; - - File_pool c_file_pool; - Filegroup_pool c_filegroup_pool; - File_hash c_file_hash; - Filegroup_hash c_filegroup_hash; - - RopePool c_rope_pool; - - struct DictObject { - DictObject() {} - Uint32 m_id; - Uint32 m_type; - Uint32 m_ref_count; - RopeHandle m_name; - union { - struct { - Uint32 m_name_len; - const char * m_name_ptr; - RopePool * m_pool; - } m_key; - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 nextHash; - Uint32 prevHash; - - Uint32 hashValue() const { return m_name.hashValue();} - bool equal(const DictObject& obj) const { - if(obj.hashValue() == hashValue()){ - ConstRope r(* m_key.m_pool, obj.m_name); - return r.compare(m_key.m_name_ptr, m_key.m_name_len) == 0; - } - return false; - } - }; - - DLHashTable c_obj_hash; // Name - ArrayPool c_obj_pool; - - DictObject * get_object(const char * name){ - return get_object(name, strlen(name) + 1); - } - - DictObject * get_object(const char * name, Uint32 len){ - return get_object(name, len, Rope::hash(name, len)); - } - - DictObject * get_object(const char * name, Uint32 len, Uint32 hash); - - void release_object(Uint32 obj_ptr_i){ - release_object(obj_ptr_i, c_obj_pool.getPtr(obj_ptr_i)); - } - - void release_object(Uint32 obj_ptr_i, DictObject* obj_ptr_p); - - void increase_ref_count(Uint32 obj_ptr_i); - void decrease_ref_count(Uint32 obj_ptr_i); - -public: - Dbdict(Block_context& ctx); - virtual ~Dbdict(); - -private: - BLOCK_DEFINES(Dbdict); - - // Signal receivers - void execDICTSTARTREQ(Signal* signal); - - void execGET_TABINFOREQ(Signal* signal); - void execGET_TABLEDID_REQ(Signal* signal); - void execGET_TABINFO_REF(Signal* signal); - void execGET_TABINFO_CONF(Signal* signal); - void execCONTINUEB(Signal* signal); - - void execDUMP_STATE_ORD(Signal* signal); - void execHOT_SPAREREP(Signal* signal); - void execDIADDTABCONF(Signal* signal); - void execDIADDTABREF(Signal* signal); - void execTAB_COMMITCONF(Signal* signal); - void execTAB_COMMITREF(Signal* signal); - void execGET_SCHEMA_INFOREQ(Signal* signal); - void execSCHEMA_INFO(Signal* signal); - void execSCHEMA_INFOCONF(Signal* signal); - void execREAD_NODESCONF(Signal* signal); - void execFSCLOSECONF(Signal* signal); - void execFSOPENCONF(Signal* signal); - void execFSOPENREF(Signal* signal); - void execFSREADCONF(Signal* signal); - void execFSREADREF(Signal* signal); - void execFSWRITECONF(Signal* signal); - void execNDB_STTOR(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execSTTOR(Signal* signal); - void execTC_SCHVERCONF(Signal* signal); - void execNODE_FAILREP(Signal* signal); - void execINCL_NODEREQ(Signal* signal); - void execAPI_FAILREQ(Signal* signal); - - void execWAIT_GCP_REF(Signal* signal); - void execWAIT_GCP_CONF(Signal* signal); - - void execLIST_TABLES_REQ(Signal* signal); - - // Index signals - void execCREATE_INDX_REQ(Signal* signal); - void execCREATE_INDX_CONF(Signal* signal); - void execCREATE_INDX_REF(Signal* signal); - - void execALTER_INDX_REQ(Signal* signal); - void execALTER_INDX_CONF(Signal* signal); - void execALTER_INDX_REF(Signal* signal); - - void execCREATE_TABLE_CONF(Signal* signal); - void execCREATE_TABLE_REF(Signal* signal); - - void execDROP_INDX_REQ(Signal* signal); - void execDROP_INDX_CONF(Signal* signal); - void execDROP_INDX_REF(Signal* signal); - - void execDROP_TABLE_CONF(Signal* signal); - void execDROP_TABLE_REF(Signal* signal); - - void execBUILDINDXREQ(Signal* signal); - void execBUILDINDXCONF(Signal* signal); - void execBUILDINDXREF(Signal* signal); - - void execBACKUP_FRAGMENT_REQ(Signal*); - - // Util signals used by Event code - void execUTIL_PREPARE_CONF(Signal* signal); - void execUTIL_PREPARE_REF (Signal* signal); - void execUTIL_EXECUTE_CONF(Signal* signal); - void execUTIL_EXECUTE_REF (Signal* signal); - void execUTIL_RELEASE_CONF(Signal* signal); - void execUTIL_RELEASE_REF (Signal* signal); - - - // Event signals from API - void execCREATE_EVNT_REQ (Signal* signal); - void execCREATE_EVNT_CONF(Signal* signal); - void execCREATE_EVNT_REF (Signal* signal); - - void execDROP_EVNT_REQ (Signal* signal); - - void execSUB_START_REQ (Signal* signal); - void execSUB_START_CONF (Signal* signal); - void execSUB_START_REF (Signal* signal); - - void execSUB_STOP_REQ (Signal* signal); - void execSUB_STOP_CONF (Signal* signal); - void execSUB_STOP_REF (Signal* signal); - - // Event signals from SUMA - - void execCREATE_SUBID_CONF(Signal* signal); - void execCREATE_SUBID_REF (Signal* signal); - - void execSUB_CREATE_CONF(Signal* signal); - void execSUB_CREATE_REF (Signal* signal); - - void execSUB_REMOVE_REQ(Signal* signal); - void execSUB_REMOVE_CONF(Signal* signal); - void execSUB_REMOVE_REF(Signal* signal); - - // Trigger signals - void execCREATE_TRIG_REQ(Signal* signal); - void execCREATE_TRIG_CONF(Signal* signal); - void execCREATE_TRIG_REF(Signal* signal); - void execALTER_TRIG_REQ(Signal* signal); - void execALTER_TRIG_CONF(Signal* signal); - void execALTER_TRIG_REF(Signal* signal); - void execDROP_TRIG_REQ(Signal* signal); - void execDROP_TRIG_CONF(Signal* signal); - void execDROP_TRIG_REF(Signal* signal); - - void execDROP_TABLE_REQ(Signal* signal); - - void execPREP_DROP_TAB_REQ(Signal* signal); - void execPREP_DROP_TAB_REF(Signal* signal); - void execPREP_DROP_TAB_CONF(Signal* signal); - - void execDROP_TAB_REQ(Signal* signal); - void execDROP_TAB_REF(Signal* signal); - void execDROP_TAB_CONF(Signal* signal); - - void execCREATE_TABLE_REQ(Signal* signal); - void execALTER_TABLE_REQ(Signal* signal); - void execCREATE_FRAGMENTATION_REF(Signal*); - void execCREATE_FRAGMENTATION_CONF(Signal*); - void execCREATE_TAB_REQ(Signal* signal); - void execADD_FRAGREQ(Signal* signal); - void execLQHFRAGREF(Signal* signal); - void execLQHFRAGCONF(Signal* signal); - void execLQHADDATTREF(Signal* signal); - void execLQHADDATTCONF(Signal* signal); - void execCREATE_TAB_REF(Signal* signal); - void execCREATE_TAB_CONF(Signal* signal); - void execALTER_TAB_REQ(Signal* signal); - void execALTER_TAB_REF(Signal* signal); - void execALTER_TAB_CONF(Signal* signal); - bool check_ndb_versions() const; - - void execCREATE_FILE_REQ(Signal* signal); - void execCREATE_FILEGROUP_REQ(Signal* signal); - void execDROP_FILE_REQ(Signal* signal); - void execDROP_FILEGROUP_REQ(Signal* signal); - - // Internal - void execCREATE_FILE_REF(Signal* signal); - void execCREATE_FILE_CONF(Signal* signal); - void execCREATE_FILEGROUP_REF(Signal* signal); - void execCREATE_FILEGROUP_CONF(Signal* signal); - void execDROP_FILE_REF(Signal* signal); - void execDROP_FILE_CONF(Signal* signal); - void execDROP_FILEGROUP_REF(Signal* signal); - void execDROP_FILEGROUP_CONF(Signal* signal); - - void execDICT_LOCK_REQ(Signal* signal); - void execDICT_UNLOCK_ORD(Signal* signal); - - /* - * 2.4 COMMON STORED VARIABLES - */ - - /** - * This record stores all the state needed - * when the schema page is being sent to other nodes - ***************************************************************************/ - struct SendSchemaRecord { - /** Number of words of schema data */ - Uint32 noOfWords; - /** Page Id of schema data */ - Uint32 pageId; - - Uint32 nodeId; - SignalCounter m_SCHEMAINFO_Counter; - - Uint32 noOfWordsCurrentlySent; - Uint32 noOfSignalsSentSinceDelay; - - bool inUse; - }; - SendSchemaRecord c_sendSchemaRecord; - - /** - * This record stores all the state needed - * when a table file is being read from disk - ****************************************************************************/ - struct ReadTableRecord { - /** Number of Pages */ - Uint32 no_of_words; - /** Page Id*/ - Uint32 pageId; - /** Table Id of read table */ - Uint32 tableId; - - bool inUse; - Callback m_callback; - }; - ReadTableRecord c_readTableRecord; - - /** - * This record stores all the state needed - * when a table file is being written to disk - ****************************************************************************/ - struct WriteTableRecord { - /** Number of Pages */ - Uint32 no_of_words; - /** Page Id*/ - Uint32 pageId; - /** Table Files Handled, local state variable */ - Uint32 noOfTableFilesHandled; - /** Table Id of written table */ - Uint32 tableId; - /** State, indicates from where it was called */ - enum TableWriteState { - IDLE = 0, - WRITE_ADD_TABLE_MASTER = 1, - WRITE_ADD_TABLE_SLAVE = 2, - WRITE_RESTART_FROM_MASTER = 3, - WRITE_RESTART_FROM_OWN = 4, - TWR_CALLBACK = 5 - }; - TableWriteState tableWriteState; - Callback m_callback; - }; - WriteTableRecord c_writeTableRecord; - - /** - * This record stores all the state needed - * when a schema file is being read from disk - ****************************************************************************/ - struct ReadSchemaRecord { - /** Page Id of schema page */ - Uint32 pageId; - /** First page to read */ - Uint32 firstPage; - /** Number of pages to read */ - Uint32 noOfPages; - /** State, indicates from where it was called */ - enum SchemaReadState { - IDLE = 0, - INITIAL_READ_HEAD = 1, - INITIAL_READ = 2 - }; - SchemaReadState schemaReadState; - }; - ReadSchemaRecord c_readSchemaRecord; - - /** - * This record stores all the state needed - * when a schema file is being written to disk - ****************************************************************************/ - struct WriteSchemaRecord { - /** Page Id of schema page */ - Uint32 pageId; - /** Rewrite entire file */ - Uint32 newFile; - /** First page to write */ - Uint32 firstPage; - /** Number of pages to write */ - Uint32 noOfPages; - /** Schema Files Handled, local state variable */ - Uint32 noOfSchemaFilesHandled; - - bool inUse; - Callback m_callback; - }; - WriteSchemaRecord c_writeSchemaRecord; - - /** - * This record stores all the information needed - * when a file is being read from disk - ****************************************************************************/ - struct RestartRecord { - /** Global check point identity */ - Uint32 gciToRestart; - - /** The active table at restart process */ - Uint32 activeTable; - - /** The active table at restart process */ - BlockReference returnBlockRef; - - Uint32 m_pass; // 0 tablespaces/logfilegroups, 1 tables, 2 indexes - }; - RestartRecord c_restartRecord; - - /** - * This record stores all the information needed - * when a file is being read from disk - ****************************************************************************/ - struct RetrieveRecord { - RetrieveRecord(){ noOfWaiters = 0;} - - /** Only one retrieve table definition at a time */ - bool busyState; - - /** - * No of waiting in time queue - */ - Uint32 noOfWaiters; - - /** Block Reference of retriever */ - BlockReference blockRef; - - /** Id of retriever */ - Uint32 m_senderData; - - /** Table id of retrieved table */ - Uint32 tableId; - - Uint32 m_table_type; - - /** Starting page to retrieve data from */ - Uint32 retrievePage; - - /** Number of pages retrieved */ - Uint32 retrievedNoOfPages; - - /** Number of words retrieved */ - Uint32 retrievedNoOfWords; - - /** Number of words sent currently */ - Uint32 currentSent; - - /** - * Long signal stuff - */ - bool m_useLongSig; - }; - RetrieveRecord c_retrieveRecord; - - /** - * This record stores all the information needed - * when a file is being read from disk - * - * This is the info stored in one entry of the schema - * page. Each table has 4 words of info. - * Word 1: Schema version (upper 16 bits) - * Table State (lower 16 bits) - * Word 2: Number of pages of table description - * Word 3: Global checkpoint id table was created - * Word 4: Currently zero - ****************************************************************************/ - struct SchemaRecord { - /** Schema file first page (0) */ - Uint32 schemaPage; - - /** Old Schema file first page (used at node restart) */ - Uint32 oldSchemaPage; - - Callback m_callback; - }; - SchemaRecord c_schemaRecord; - - /* - * Schema file, list of schema pages. Use an array until a pool - * exists and NDBFS interface can use it. - */ - struct XSchemaFile { - SchemaFile* schemaPage; - Uint32 noOfPages; - }; - // 0-normal 1-old - XSchemaFile c_schemaFile[2]; - - void initSchemaFile(XSchemaFile *, Uint32 firstPage, Uint32 lastPage, - bool initEntries); - void resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages); - void computeChecksum(XSchemaFile *, Uint32 pageNo); - bool validateChecksum(const XSchemaFile *); - SchemaFile::TableEntry * getTableEntry(XSchemaFile *, Uint32 tableId); - - Uint32 computeChecksum(const Uint32 * src, Uint32 len); - - - /* ----------------------------------------------------------------------- */ - // Node References - /* ----------------------------------------------------------------------- */ - Uint16 c_masterNodeId; - - /* ----------------------------------------------------------------------- */ - // Various current system properties - /* ----------------------------------------------------------------------- */ - Uint16 c_numberNode; - Uint16 c_noHotSpareNodes; - Uint16 c_noNodesFailed; - Uint32 c_failureNr; - - /* ----------------------------------------------------------------------- */ - // State variables - /* ----------------------------------------------------------------------- */ - -#ifndef ndb_dbdict_log_block_state - enum BlockState { - BS_IDLE = 0, - BS_CREATE_TAB = 1, - BS_BUSY = 2, - BS_NODE_FAILURE = 3, - BS_NODE_RESTART = 4 - }; -#else // quick hack to log changes - enum { - BS_IDLE = 0, - BS_CREATE_TAB = 1, - BS_BUSY = 2, - BS_NODE_FAILURE = 3, - BS_NODE_RESTART = 4 - }; - struct BlockState; - friend struct BlockState; - struct BlockState { - BlockState() : - m_value(BS_IDLE) { - } - BlockState(int value) : - m_value(value) { - } - operator int() const { - return m_value; - } - BlockState& operator=(const BlockState& bs) { - Dbdict* dict = (Dbdict*)globalData.getBlock(DBDICT); - dict->infoEvent("DICT: bs %d->%d", m_value, bs.m_value); - globalSignalLoggers.log(DBDICT, "bs %d->%d", m_value, bs.m_value); - m_value = bs.m_value; - return *this; - } - int m_value; - }; -#endif - BlockState c_blockState; - - struct PackTable { - - enum PackTableState { - PTS_IDLE = 0, - PTS_GET_TAB = 3 - } m_state; - - } c_packTable; - - Uint32 c_startPhase; - Uint32 c_restartType; - bool c_initialStart; - bool c_systemRestart; - bool c_nodeRestart; - bool c_initialNodeRestart; - Uint32 c_tabinfoReceived; - - /** - * Temporary structure used when parsing table info - */ - struct ParseDictTabInfoRecord { - DictTabInfo::RequestType requestType; - Uint32 errorCode; - Uint32 errorLine; - - SimpleProperties::UnpackStatus status; - Uint32 errorKey; - TableRecordPtr tablePtr; - }; - - // Operation records - - /** - * Common part of operation records. Uses KeyTable2. Note that each - * seize/release invokes ctor/dtor automatically. - */ - struct OpRecordCommon { - OpRecordCommon() {} - Uint32 key; // key shared between master and slaves - Uint32 nextHash; - Uint32 prevHash; - Uint32 hashValue() const { - return key; - } - bool equal(const OpRecordCommon& rec) const { - return key == rec.key; - } - }; - - /** - * Create table record - */ - struct CreateTableRecord : OpRecordCommon { - CreateTableRecord() {} - Uint32 m_senderRef; - Uint32 m_senderData; - Uint32 m_coordinatorRef; - - Uint32 m_errorCode; - void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;} - - // For alter table - Uint32 m_changeMask; - bool m_alterTableFailed; - AlterTableRef m_alterTableRef; - Uint32 m_alterTableId; - - /* Previous table name (used for reverting failed table rename) */ - char previousTableName[MAX_TAB_NAME_SIZE]; - - /* Previous table definition, frm (used for reverting) */ - /** TODO Could preferrably be made dynamic size */ - Uint32 previousFrmLen; - char previousFrmData[MAX_FRM_DATA_SIZE]; - - Uint32 m_tablePtrI; - Uint32 m_tabInfoPtrI; - Uint32 m_fragmentsPtrI; - - Uint32 m_dihAddFragPtr; // Connect ptr towards DIH - Uint32 m_lqhFragPtr; // Connect ptr towards LQH - - Callback m_callback; // Who's using local create tab - MutexHandle2 m_startLcpMutex; - - struct CoordinatorData { - Uint32 m_gsn; - SafeCounterHandle m_counter; - CreateTabReq::RequestType m_requestType; - } m_coordinatorData; - }; - typedef Ptr CreateTableRecordPtr; - - /** - * Drop table record - */ - struct DropTableRecord : OpRecordCommon { - DropTableRecord() {} - DropTableReq m_request; - - Uint32 m_requestType; - Uint32 m_coordinatorRef; - - Uint32 m_errorCode; - void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;} - - MutexHandle2 m_define_backup_mutex; - - /** - * When sending stuff around - */ - struct CoordinatorData { - Uint32 m_gsn; - Uint32 m_block; - SignalCounter m_signalCounter; - } m_coordinatorData; - - struct ParticipantData { - Uint32 m_gsn; - Uint32 m_block; - SignalCounter m_signalCounter; - - Callback m_callback; - } m_participantData; - }; - typedef Ptr DropTableRecordPtr; - - /** - * Request flags passed in signals along with request type and - * propagated across operations. - */ - struct RequestFlag { - enum { - RF_LOCAL = 1 << 0, // create on local node only - RF_NOBUILD = 1 << 1, // no need to build index - RF_NOTCTRIGGER = 1 << 2, // alter trigger: no trigger in TC - RF_FORCE = 1 << 4 // force drop - }; - }; - - /** - * Operation record for create index. - */ - struct OpCreateIndex : OpRecordCommon { - // original request (index id will be added) - CreateIndxReq m_request; - AttributeList m_attrList; - char m_indexName[MAX_TAB_NAME_SIZE]; - bool m_loggedIndex; - bool m_temporaryIndex; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - CreateIndxReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - CreateIndxRef::ErrorCode m_lastError; - CreateIndxRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - // ctor - OpCreateIndex() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = CreateIndxReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = CreateIndxRef::NoError; - m_errorCode = CreateIndxRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void save(const CreateIndxReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != CreateIndxRef::NoError; - } - bool hasError() { - return m_errorCode != CreateIndxRef::NoError; - } - void setError(const CreateIndxRef* ref) { - m_lastError = CreateIndxRef::NoError; - if (ref != 0) { - m_lastError = ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const CreateTableRef* ref) { - m_lastError = CreateIndxRef::NoError; - if (ref != 0) { - switch (ref->getErrorCode()) { - case CreateTableRef::TableAlreadyExist: - m_lastError = CreateIndxRef::IndexExists; - break; - default: - m_lastError = (CreateIndxRef::ErrorCode)ref->getErrorCode(); - break; - } - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - } - } - } - void setError(const AlterIndxRef* ref) { - m_lastError = CreateIndxRef::NoError; - if (ref != 0) { - m_lastError = (CreateIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - }; - typedef Ptr OpCreateIndexPtr; - - /** - * Operation record for drop index. - */ - struct OpDropIndex : OpRecordCommon { - // original request - DropIndxReq m_request; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - DropIndxReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - DropIndxRef::ErrorCode m_lastError; - DropIndxRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - // ctor - OpDropIndex() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = DropIndxReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = DropIndxRef::NoError; - m_errorCode = DropIndxRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void save(const DropIndxReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != DropIndxRef::NoError; - } - bool hasError() { - return m_errorCode != DropIndxRef::NoError; - } - void setError(const DropIndxRef* ref) { - m_lastError = DropIndxRef::NoError; - if (ref != 0) { - m_lastError = ref->getErrorCode(); - if (! hasError()) { - m_errorCode = ref->getErrorCode(); - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const AlterIndxRef* ref) { - m_lastError = DropIndxRef::NoError; - if (ref != 0) { - m_lastError = (DropIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const DropTableRef* ref) { - m_lastError = DropIndxRef::NoError; - if (ref != 0) { - switch (ref->errorCode) { - case DropTableRef::Busy: - m_lastError = DropIndxRef::Busy; - break; - case DropTableRef::NoSuchTable: - m_lastError = DropIndxRef::IndexNotFound; - break; - case DropTableRef::DropInProgress: - m_lastError = DropIndxRef::Busy; - break; - case DropTableRef::NoDropTableRecordAvailable: - m_lastError = DropIndxRef::Busy; - break; - default: - m_lastError = (DropIndxRef::ErrorCode)ref->errorCode; - break; - } - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = 0; - m_errorNode = 0; - } - } - } - }; - typedef Ptr OpDropIndexPtr; - - /** - * Operation record for alter index. - */ - struct OpAlterIndex : OpRecordCommon { - // original request plus buffer for attribute lists - AlterIndxReq m_request; - AttributeList m_attrList; - AttributeList m_tableKeyList; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - AlterIndxReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - AlterIndxRef::ErrorCode m_lastError; - AlterIndxRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - Uint32 m_triggerCounter; - // ctor - OpAlterIndex() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = AlterIndxReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = AlterIndxRef::NoError; - m_errorCode = AlterIndxRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - m_triggerCounter = 0; - } - void save(const AlterIndxReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != AlterIndxRef::NoError; - } - bool hasError() { - return m_errorCode != AlterIndxRef::NoError; - } - void setError(const AlterIndxRef* ref) { - m_lastError = AlterIndxRef::NoError; - if (ref != 0) { - m_lastError = ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const CreateIndxRef* ref) { - m_lastError = AlterIndxRef::NoError; - if (ref != 0) { - m_lastError = (AlterIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const DropIndxRef* ref) { - m_lastError = AlterIndxRef::NoError; - if (ref != 0) { - m_lastError = (AlterIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const BuildIndxRef* ref) { - m_lastError = AlterIndxRef::NoError; - if (ref != 0) { - m_lastError = (AlterIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = 0; - m_errorNode = 0; - } - } - } - void setError(const CreateTrigRef* ref) { - m_lastError = AlterIndxRef::NoError; - if (ref != 0) { - m_lastError = (AlterIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const DropTrigRef* ref) { - m_lastError = AlterIndxRef::NoError; - if (ref != 0) { - m_lastError = (AlterIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - }; - typedef Ptr OpAlterIndexPtr; - - /** - * Operation record for build index. - */ - struct OpBuildIndex : OpRecordCommon { - // original request plus buffer for attribute lists - BuildIndxReq m_request; - AttributeList m_attrList; - Id_array m_tableKeyList; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - BuildIndxReq::RequestType m_requestType; - Uint32 m_requestFlag; - Uint32 m_constrTriggerId; - // error info - BuildIndxRef::ErrorCode m_lastError; - BuildIndxRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - // ctor - OpBuildIndex() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = BuildIndxReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = BuildIndxRef::NoError; - m_errorCode = BuildIndxRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void save(const BuildIndxReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != BuildIndxRef::NoError; - } - bool hasError() { - return m_errorCode != BuildIndxRef::NoError; - } - void setError(const BuildIndxRef* ref) { - m_lastError = BuildIndxRef::NoError; - if (ref != 0) { - m_lastError = ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = 0; - m_errorNode = 0; - } - } - } - void setError(const AlterIndxRef* ref) { - m_lastError = BuildIndxRef::NoError; - if (ref != 0) { - m_lastError = (BuildIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const CreateTrigRef* ref) { - m_lastError = BuildIndxRef::NoError; - if (ref != 0) { - m_lastError = (BuildIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const DropTrigRef* ref) { - m_lastError = BuildIndxRef::NoError; - if (ref != 0) { - m_lastError = (BuildIndxRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - }; - typedef Ptr OpBuildIndexPtr; - - /** - * Operation record for Util Signals. - */ - struct OpSignalUtil : OpRecordCommon{ - Callback m_callback; - Uint32 m_userData; - }; - typedef Ptr OpSignalUtilPtr; - - /** - * Operation record for subscribe-start-stop - */ - struct OpSubEvent : OpRecordCommon { - Uint32 m_senderRef; - Uint32 m_senderData; - Uint32 m_errorCode; - union { - SubStartConf m_sub_start_conf; - SubStopConf m_sub_stop_conf; - }; - RequestTracker m_reqTracker; - }; - typedef Ptr OpSubEventPtr; - - static const Uint32 sysTab_NDBEVENTS_0_szs[]; - - /** - * Operation record for create event. - */ - struct OpCreateEvent : OpRecordCommon { - // original request (event id will be added) - CreateEvntReq m_request; - //AttributeMask m_attrListBitmask; - // AttributeList m_attrList; - sysTab_NDBEVENTS_0 m_eventRec; - // char m_eventName[MAX_TAB_NAME_SIZE]; - // char m_tableName[MAX_TAB_NAME_SIZE]; - - // coordinator DICT - RequestTracker m_reqTracker; - // state info - CreateEvntReq::RequestType m_requestType; - // error info - Uint32 m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; /* also used to store master node id - in case of NotMaster */ - // ctor - OpCreateEvent() { - memset(&m_request, 0, sizeof(m_request)); - m_requestType = CreateEvntReq::RT_UNDEFINED; - m_errorCode = CreateEvntRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void init(const CreateEvntReq* req, Dbdict* dp) { - m_request = *req; - m_errorCode = CreateEvntRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - m_requestType = req->getRequestType(); - } - bool hasError() { - return m_errorCode != CreateEvntRef::NoError; - } - void setError(const CreateEvntRef* ref) { - if (ref != 0 && ! hasError()) { - m_errorCode = ref->getErrorCode(); - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - - }; - typedef Ptr OpCreateEventPtr; - - /** - * Operation record for drop event. - */ - struct OpDropEvent : OpRecordCommon { - // original request - DropEvntReq m_request; - // char m_eventName[MAX_TAB_NAME_SIZE]; - sysTab_NDBEVENTS_0 m_eventRec; - RequestTracker m_reqTracker; - // error info - Uint32 m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // ctor - OpDropEvent() { - memset(&m_request, 0, sizeof(m_request)); - m_errorCode = 0; - m_errorLine = 0; - m_errorNode = 0; - } - void init(const DropEvntReq* req) { - m_request = *req; - m_errorCode = 0; - m_errorLine = 0; - m_errorNode = 0; - } - bool hasError() { - return m_errorCode != 0; - } - void setError(const DropEvntRef* ref) { - if (ref != 0 && ! hasError()) { - m_errorCode = ref->getErrorCode(); - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - }; - typedef Ptr OpDropEventPtr; - - /** - * Operation record for create trigger. - */ - struct OpCreateTrigger : OpRecordCommon { - // original request (trigger id will be added) - CreateTrigReq m_request; - char m_triggerName[MAX_TAB_NAME_SIZE]; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - CreateTrigReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - CreateTrigRef::ErrorCode m_lastError; - CreateTrigRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - // ctor - OpCreateTrigger() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = CreateTrigReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = CreateTrigRef::NoError; - m_errorCode = CreateTrigRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void save(const CreateTrigReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != CreateTrigRef::NoError; - } - bool hasError() { - return m_errorCode != CreateTrigRef::NoError; - } - void setError(const CreateTrigRef* ref) { - m_lastError = CreateTrigRef::NoError; - if (ref != 0) { - m_lastError = ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const AlterTrigRef* ref) { - m_lastError = CreateTrigRef::NoError; - if (ref != 0) { - m_lastError = (CreateTrigRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - }; - typedef Ptr OpCreateTriggerPtr; - - /** - * Operation record for drop trigger. - */ - struct OpDropTrigger : OpRecordCommon { - // original request - DropTrigReq m_request; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - DropTrigReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - DropTrigRef::ErrorCode m_lastError; - DropTrigRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - // ctor - OpDropTrigger() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = DropTrigReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = DropTrigRef::NoError; - m_errorCode = DropTrigRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void save(const DropTrigReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != DropTrigRef::NoError; - } - bool hasError() { - return m_errorCode != DropTrigRef::NoError; - } - void setError(const DropTrigRef* ref) { - m_lastError = DropTrigRef::NoError; - if (ref != 0) { - m_lastError = ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const AlterTrigRef* ref) { - m_lastError = DropTrigRef::NoError; - if (ref != 0) { - m_lastError = (DropTrigRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - }; - typedef Ptr OpDropTriggerPtr; - - /** - * Operation record for alter trigger. - */ - struct OpAlterTrigger : OpRecordCommon { - // original request - AlterTrigReq m_request; - // nodes participating in operation - NdbNodeBitmask m_nodes; - // coordinator DICT - Uint32 m_coordinatorRef; - bool m_isMaster; - // state info - AlterTrigReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - AlterTrigRef::ErrorCode m_lastError; - AlterTrigRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // counters - SignalCounter m_signalCounter; - // ctor - OpAlterTrigger() { - memset(&m_request, 0, sizeof(m_request)); - m_coordinatorRef = 0; - m_requestType = AlterTrigReq::RT_UNDEFINED; - m_requestFlag = 0; - m_lastError = AlterTrigRef::NoError; - m_errorCode = AlterTrigRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void save(const AlterTrigReq* req) { - m_request = *req; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasLastError() { - return m_lastError != AlterTrigRef::NoError; - } - bool hasError() { - return m_errorCode != AlterTrigRef::NoError; - } - void setError(const AlterTrigRef* ref) { - m_lastError = AlterTrigRef::NoError; - if (ref != 0) { - m_lastError = (AlterTrigRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const CreateTrigRef* ref) { - m_lastError = AlterTrigRef::NoError; - if (ref != 0) { - m_lastError = (AlterTrigRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - void setError(const DropTrigRef* ref) { - m_lastError = AlterTrigRef::NoError; - if (ref != 0) { - m_lastError = (AlterTrigRef::ErrorCode)ref->getErrorCode(); - if (! hasError()) { - m_errorCode = m_lastError; - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - } - }; - typedef Ptr OpAlterTriggerPtr; - -public: - struct SchemaOp : OpRecordCommon { - - Uint32 m_clientRef; // API (for take-over) - Uint32 m_clientData;// API - - Uint32 m_senderRef; // - Uint32 m_senderData;// transaction key value - - Uint32 m_errorCode; - - Uint32 m_obj_id; - Uint32 m_obj_type; - Uint32 m_obj_version; - Uint32 m_obj_ptr_i; - Uint32 m_vt_index; - Callback m_callback; - }; - typedef Ptr SchemaOpPtr; - - struct SchemaTransaction : OpRecordCommon { - Uint32 m_senderRef; // API - Uint32 m_senderData;// API - - Callback m_callback; - SafeCounterHandle m_counter; - NodeBitmask m_nodes; - - Uint32 m_errorCode; - SchemaTransaction() {} - void setErrorCode(Uint32 c){ if(m_errorCode == 0) m_errorCode = c;} - - /** - * This should contain "lists" with operations - */ - struct { - Uint32 m_key; // Operation key - Uint32 m_vt_index; // Operation type - Uint32 m_obj_id; - DictObjOp::State m_state; - } m_op; - }; -private: - - struct OpCreateObj : public SchemaOp { - Uint32 m_gci; - Uint32 m_obj_info_ptr_i; - Uint32 m_restart; - }; - typedef Ptr CreateObjRecordPtr; - - struct OpDropObj : public SchemaOp - { - }; - typedef Ptr DropObjRecordPtr; - - /** - * Only used at coordinator/master - */ - // Common operation record pool -public: - STATIC_CONST( opCreateTableSize = sizeof(CreateTableRecord) ); - STATIC_CONST( opDropTableSize = sizeof(DropTableRecord) ); - STATIC_CONST( opCreateIndexSize = sizeof(OpCreateIndex) ); - STATIC_CONST( opDropIndexSize = sizeof(OpDropIndex) ); - STATIC_CONST( opAlterIndexSize = sizeof(OpAlterIndex) ); - STATIC_CONST( opBuildIndexSize = sizeof(OpBuildIndex) ); - STATIC_CONST( opCreateEventSize = sizeof(OpCreateEvent) ); - STATIC_CONST( opSubEventSize = sizeof(OpSubEvent) ); - STATIC_CONST( opDropEventSize = sizeof(OpDropEvent) ); - STATIC_CONST( opSignalUtilSize = sizeof(OpSignalUtil) ); - STATIC_CONST( opCreateTriggerSize = sizeof(OpCreateTrigger) ); - STATIC_CONST( opDropTriggerSize = sizeof(OpDropTrigger) ); - STATIC_CONST( opAlterTriggerSize = sizeof(OpAlterTrigger) ); - STATIC_CONST( opCreateObjSize = sizeof(OpCreateObj) ); -private: -#define PTR_ALIGN(n) ((((n)+sizeof(void*)-1)>>2)&~((sizeof(void*)-1)>>2)) - union OpRecordUnion { - Uint32 u_opCreateTable [PTR_ALIGN(opCreateTableSize)]; - Uint32 u_opDropTable [PTR_ALIGN(opDropTableSize)]; - Uint32 u_opCreateIndex [PTR_ALIGN(opCreateIndexSize)]; - Uint32 u_opDropIndex [PTR_ALIGN(opDropIndexSize)]; - Uint32 u_opCreateEvent [PTR_ALIGN(opCreateEventSize)]; - Uint32 u_opSubEvent [PTR_ALIGN(opSubEventSize)]; - Uint32 u_opDropEvent [PTR_ALIGN(opDropEventSize)]; - Uint32 u_opSignalUtil [PTR_ALIGN(opSignalUtilSize)]; - Uint32 u_opAlterIndex [PTR_ALIGN(opAlterIndexSize)]; - Uint32 u_opBuildIndex [PTR_ALIGN(opBuildIndexSize)]; - Uint32 u_opCreateTrigger[PTR_ALIGN(opCreateTriggerSize)]; - Uint32 u_opDropTrigger [PTR_ALIGN(opDropTriggerSize)]; - Uint32 u_opAlterTrigger [PTR_ALIGN(opAlterTriggerSize)]; - Uint32 u_opCreateObj [PTR_ALIGN(opCreateObjSize)]; - Uint32 nextPool; - }; - ArrayPool c_opRecordPool; - - // Operation records - KeyTable2 c_opCreateTable; - KeyTable2 c_opDropTable; - KeyTable2 c_opCreateIndex; - KeyTable2 c_opDropIndex; - KeyTable2 c_opAlterIndex; - KeyTable2 c_opBuildIndex; - KeyTable2C c_opCreateEvent; - KeyTable2C c_opSubEvent; - KeyTable2C c_opDropEvent; - KeyTable2C c_opSignalUtil; - KeyTable2 c_opCreateTrigger; - KeyTable2 c_opDropTrigger; - KeyTable2 c_opAlterTrigger; - KeyTable2 c_schemaOp; - KeyTable2 c_Trans; - KeyTable2Ref c_opCreateObj; - KeyTable2Ref c_opDropObj; - - // Unique key for operation XXX move to some system table - Uint32 c_opRecordSequence; - - /* - * Master DICT can be locked in 2 mutually exclusive ways: - * - * 1) for schema ops, via operation records - * 2) against schema ops, via a lock queue - * - * Current use of 2) is by a starting node, to prevent schema ops - * until started. The ops are refused (BlockState != BS_IDLE), - * not queued. - * - * Master failure is not handled, in node start case the starting - * node will crash too anyway. Use lock table in future.. - * - * The lock queue is "serial" but other behaviour is possible - * by checking lock types e.g. to allow parallel node starts. - * - * Checking release of last op record is not convenient with - * current structure (5.0). Instead we poll via continueB. - * - * XXX only table ops check BlockState - */ - struct DictLockType; - friend struct DictLockType; - - struct DictLockType { - DictLockReq::LockType lockType; - BlockState blockState; - const char* text; - }; - - struct DictLockRecord; - friend struct DictLockRecord; - - struct DictLockRecord { - DictLockReq req; - const DictLockType* lt; - bool locked; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - - typedef Ptr DictLockPtr; - ArrayPool c_dictLockPool; - DLFifoList c_dictLockQueue; - bool c_dictLockPoll; - - static const DictLockType* getDictLockType(Uint32 lockType); - void sendDictLockInfoEvent(Uint32 pollCount); - void sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text); - - // check if any schema op exists (conflicting with dict lock) - bool hasDictLockSchemaOp(); - - void checkDictLockQueue(Signal* signal, bool poll); - void sendDictLockConf(Signal* signal, DictLockPtr lockPtr); - void sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode); - - // control polling i.e. continueB loop - void setDictLockPoll(Signal* signal, bool on, Uint32 pollCount); - - // NF handling - void removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes); - - - // Statement blocks - - /* ------------------------------------------------------------ */ - // Start/Restart Handling - /* ------------------------------------------------------------ */ - void sendSTTORRY(Signal* signal); - void sendNDB_STTORRY(Signal* signal); - void initSchemaFile(Signal* signal); - - /* ------------------------------------------------------------ */ - // Drop Table Handling - /* ------------------------------------------------------------ */ - void releaseTableObject(Uint32 tableId, bool removeFromHash = true); - - /* ------------------------------------------------------------ */ - // General Stuff - /* ------------------------------------------------------------ */ - Uint32 getFreeObjId(Uint32 minId); - Uint32 getFreeTableRecord(Uint32 primaryTableId); - Uint32 getFreeTriggerRecord(); - bool getNewAttributeRecord(TableRecordPtr tablePtr, - AttributeRecordPtr & attrPtr); - void packTableIntoPages(Signal* signal); - void packTableIntoPages(SimpleProperties::Writer &, TableRecordPtr, Signal* =0); - void packFilegroupIntoPages(SimpleProperties::Writer &, - FilegroupPtr, - const Uint32 undo_free_hi, - const Uint32 undo_free_lo); - void packFileIntoPages(SimpleProperties::Writer &, FilePtr, const Uint32); - - void sendGET_TABINFOREQ(Signal* signal, - Uint32 tableId); - void sendTC_SCHVERREQ(Signal* signal, - Uint32 tableId, - BlockReference tcRef); - - /* ------------------------------------------------------------ */ - // System Restart Handling - /* ------------------------------------------------------------ */ - void initSendSchemaData(Signal* signal); - void sendSchemaData(Signal* signal); - Uint32 sendSCHEMA_INFO(Signal* signal, Uint32 nodeId, Uint32* pagePointer); - void checkSchemaStatus(Signal* signal); - void sendDIHSTARTTAB_REQ(Signal* signal); - - /* ------------------------------------------------------------ */ - // Receive Table Handling - /* ------------------------------------------------------------ */ - void handleTabInfoInit(SimpleProperties::Reader &, - ParseDictTabInfoRecord *, - bool checkExist = true); - void handleTabInfo(SimpleProperties::Reader & it, ParseDictTabInfoRecord *, - DictTabInfo::Table & tableDesc); - - void handleAddTableFailure(Signal* signal, - Uint32 failureLine, - Uint32 tableId); - bool verifyTableCorrect(Signal* signal, Uint32 tableId); - - /* ------------------------------------------------------------ */ - // Add Table Handling - /* ------------------------------------------------------------ */ - void releaseCreateTableOp(Signal* signal, CreateTableRecordPtr createTabPtr); - - /* ------------------------------------------------------------ */ - // Add Fragment Handling - /* ------------------------------------------------------------ */ - void sendLQHADDATTRREQ(Signal*, CreateTableRecordPtr, Uint32 attributePtrI); - - /* ------------------------------------------------------------ */ - // Read/Write Schema and Table files - /* ------------------------------------------------------------ */ - void updateSchemaState(Signal* signal, Uint32 tableId, - SchemaFile::TableEntry*, Callback*, - bool savetodisk = 1); - void startWriteSchemaFile(Signal* signal); - void openSchemaFile(Signal* signal, - Uint32 fileNo, - Uint32 fsPtr, - bool writeFlag, - bool newFile); - void writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr); - void writeSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr); - void closeFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr); - void closeWriteSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr); - void initSchemaFile_conf(Signal* signal, Uint32 i, Uint32 returnCode); - - void writeTableFile(Signal* signal, Uint32 tableId, - SegmentedSectionPtr tabInfo, Callback*); - void startWriteTableFile(Signal* signal, Uint32 tableId); - void openTableFile(Signal* signal, - Uint32 fileNo, - Uint32 fsPtr, - Uint32 tableId, - bool writeFlag); - void writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr); - void writeTableConf(Signal* signal, - FsConnectRecordPtr fsPtr); - void closeWriteTableConf(Signal* signal, - FsConnectRecordPtr fsPtr); - - void startReadTableFile(Signal* signal, Uint32 tableId); - void openReadTableRef(Signal* signal, - FsConnectRecordPtr fsPtr); - void readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr); - void readTableConf(Signal* signal, - FsConnectRecordPtr fsPtr); - void readTableRef(Signal* signal, - FsConnectRecordPtr fsPtr); - void closeReadTableConf(Signal* signal, - FsConnectRecordPtr fsPtr); - - void startReadSchemaFile(Signal* signal); - void openReadSchemaRef(Signal* signal, - FsConnectRecordPtr fsPtr); - void readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr); - void readSchemaConf(Signal* signal, FsConnectRecordPtr fsPtr); - void readSchemaRef(Signal* signal, FsConnectRecordPtr fsPtr); - void closeReadSchemaConf(Signal* signal, - FsConnectRecordPtr fsPtr); - bool convertSchemaFileTo_5_0_6(XSchemaFile*); - - /* ------------------------------------------------------------ */ - // Get table definitions - /* ------------------------------------------------------------ */ - void sendGET_TABINFOREF(Signal* signal, - GetTabInfoReq*, - GetTabInfoRef::ErrorCode errorCode); - - void sendGET_TABLEID_REF(Signal* signal, - GetTableIdReq * req, - GetTableIdRef::ErrorCode errorCode); - - void sendGetTabResponse(Signal* signal); - - /* ------------------------------------------------------------ */ - // Indexes and triggers - /* ------------------------------------------------------------ */ - - // reactivate and rebuild indexes on start up - void activateIndexes(Signal* signal, Uint32 i); - void rebuildIndexes(Signal* signal, Uint32 i); - - // create index - void createIndex_recvReply(Signal* signal, const CreateIndxConf* conf, - const CreateIndxRef* ref); - void createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_fromCreateTable(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_toAlterIndex(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_fromAlterIndex(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_slaveCommit(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_slaveAbort(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_sendSlaveReq(Signal* signal, OpCreateIndexPtr opPtr); - void createIndex_sendReply(Signal* signal, OpCreateIndexPtr opPtr, bool); - // drop index - void dropIndex_recvReply(Signal* signal, const DropIndxConf* conf, - const DropIndxRef* ref); - void dropIndex_slavePrepare(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_toAlterIndex(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_fromAlterIndex(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_toDropTable(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_fromDropTable(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_slaveCommit(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_slaveAbort(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_sendSlaveReq(Signal* signal, OpDropIndexPtr opPtr); - void dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr, bool); - // alter index - void alterIndex_recvReply(Signal* signal, const AlterIndxConf* conf, - const AlterIndxRef* ref); - void alterIndex_slavePrepare(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_toCreateTc(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_fromCreateTc(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_fromDropTc(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_toCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_fromCreateTrigger(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_toDropTrigger(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_fromDropTrigger(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_toBuildIndex(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_fromBuildIndex(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_slaveCommit(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_slaveAbort(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_sendSlaveReq(Signal* signal, OpAlterIndexPtr opPtr); - void alterIndex_sendReply(Signal* signal, OpAlterIndexPtr opPtr, bool); - // build index - void buildIndex_recvReply(Signal* signal, const BuildIndxConf* conf, - const BuildIndxRef* ref); - void buildIndex_toCreateConstr(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_fromCreateConstr(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_buildTrix(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_toDropConstr(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_fromDropConstr(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_toOnline(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_fromOnline(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr); - void buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr, bool); - - // Events - void - createEventUTIL_PREPARE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - createEventUTIL_EXECUTE(Signal *signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_PREPARE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_EXECUTE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_PREPARE_DELETE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_EXECUTE_DELETE(Signal *signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUtilPrepareRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUtilExecuteRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - int - sendSignalUtilReq(Callback *c, - BlockReference ref, - GlobalSignalNumber gsn, - Signal* signal, - Uint32 length, - JobBufferLevel jbuf, - LinearSectionPtr ptr[3], - Uint32 noOfSections); - int - recvSignalUtilReq(Signal* signal, Uint32 returnCode); - - void completeSubStartReq(Signal* signal, Uint32 ptrI, Uint32 returnCode); - void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode); - void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode); - - void dropEvent_sendReply(Signal* signal, - OpDropEventPtr evntRecPtr); - - void createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr); - void createEventComplete_RT_USER_CREATE(Signal* signal, - OpCreateEventPtr evntRecPtr); - void createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr); - void createEventComplete_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr); - - void createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr); - - void createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI, - Uint32 returnCode); - void createEvent_sendReply(Signal* signal, OpCreateEventPtr evntRecPtr, - LinearSectionPtr *ptr = NULL, int noLSP = 0); - - void prepareTransactionEventSysTable (Callback *c, - Signal* signal, - Uint32 senderData, - UtilPrepareReq::OperationTypeValue prepReq); - void prepareUtilTransaction(Callback *c, - Signal* signal, - Uint32 senderData, - Uint32 tableId, - const char *tableName, - UtilPrepareReq::OperationTypeValue prepReq, - Uint32 noAttr, - Uint32 attrIds[], - const char *attrNames[]); - - void executeTransEventSysTable(Callback *c, - Signal *signal, - const Uint32 ptrI, - sysTab_NDBEVENTS_0& m_eventRec, - const Uint32 prepareId, - UtilPrepareReq::OperationTypeValue prepReq); - void executeTransaction(Callback *c, - Signal* signal, - Uint32 senderData, - Uint32 prepareId, - Uint32 noAttr, - LinearSectionPtr headerPtr, - LinearSectionPtr dataPtr); - - void parseReadEventSys(Signal *signal, sysTab_NDBEVENTS_0& m_eventRec); - - // create trigger - void createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf, - const CreateTrigRef* ref); - void createTrigger_slavePrepare(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_masterSeize(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_slaveCreate(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_toAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_fromAlterTrigger(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_slaveCommit(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_slaveAbort(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_sendSlaveReq(Signal* signal, OpCreateTriggerPtr opPtr); - void createTrigger_sendReply(Signal* signal, OpCreateTriggerPtr opPtr, bool); - // drop trigger - void dropTrigger_recvReply(Signal* signal, const DropTrigConf* conf, - const DropTrigRef* ref); - void dropTrigger_slavePrepare(Signal* signal, OpDropTriggerPtr opPtr); - void dropTrigger_toAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr); - void dropTrigger_fromAlterTrigger(Signal* signal, OpDropTriggerPtr opPtr); - void dropTrigger_slaveCommit(Signal* signal, OpDropTriggerPtr opPtr); - void dropTrigger_slaveAbort(Signal* signal, OpDropTriggerPtr opPtr); - void dropTrigger_sendSlaveReq(Signal* signal, OpDropTriggerPtr opPtr); - void dropTrigger_sendReply(Signal* signal, OpDropTriggerPtr opPtr, bool); - // alter trigger - void alterTrigger_recvReply(Signal* signal, const AlterTrigConf* conf, - const AlterTrigRef* ref); - void alterTrigger_slavePrepare(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_toCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_fromCreateLocal(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_fromDropLocal(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_slaveCommit(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_slaveAbort(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_sendSlaveReq(Signal* signal, OpAlterTriggerPtr opPtr); - void alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr, bool); - // support - void getTableKeyList(TableRecordPtr, - Id_array& list); - void getIndexAttr(TableRecordPtr indexPtr, Uint32 itAttr, Uint32* id); - void getIndexAttrList(TableRecordPtr indexPtr, AttributeList& list); - void getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask); - - /* ------------------------------------------------------------ */ - // Initialisation - /* ------------------------------------------------------------ */ - void initCommonData(); - void initRecords(); - void initConnectRecord(); - void initRetrieveRecord(Signal*, Uint32, Uint32 returnCode); - void initSchemaRecord(); - void initRestartRecord(); - void initSendSchemaRecord(); - void initReadTableRecord(); - void initWriteTableRecord(); - void initReadSchemaRecord(); - void initWriteSchemaRecord(); - - void initNodeRecords(); - void initTableRecords(); - void initialiseTableRecord(TableRecordPtr tablePtr); - void initTriggerRecords(); - void initialiseTriggerRecord(TriggerRecordPtr triggerPtr); - void initPageRecords(); - - Uint32 getFsConnRecord(); - - bool getIsFailed(Uint32 nodeId) const; - - void dropTable_backup_mutex_locked(Signal* signal, Uint32, Uint32); - void dropTableRef(Signal * signal, DropTableReq *, DropTableRef::ErrorCode); - void printTables(); // For debugging only - int handleAlterTab(AlterTabReq * req, - CreateTableRecord * regAlterTabPtr, - TableRecordPtr origTablePtr, - TableRecordPtr newTablePtr); - void revertAlterTable(Signal * signal, - Uint32 changeMask, - Uint32 tableId, - CreateTableRecord * regAlterTabPtr); - void alterTable_backup_mutex_locked(Signal* signal, Uint32, Uint32); - void alterTableRef(Signal * signal, - AlterTableReq *, AlterTableRef::ErrorCode, - ParseDictTabInfoRecord* parseRecord = NULL); - void alterTabRef(Signal * signal, - AlterTabReq *, AlterTableRef::ErrorCode, - ParseDictTabInfoRecord* parseRecord = NULL); - void alterTab_writeSchemaConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void alterTab_writeTableConf(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - - void prepDropTab_nextStep(Signal* signal, DropTableRecordPtr); - void prepDropTab_complete(Signal* signal, DropTableRecordPtr); - void prepDropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32); - - void dropTab_localDROP_TAB_CONF(Signal* signal); - void dropTab_nextStep(Signal* signal, DropTableRecordPtr); - void dropTab_complete(Signal* signal, Uint32 dropTabPtrI, Uint32); - void dropTab_writeSchemaConf(Signal* signal, Uint32 dropTabPtrI, Uint32); - - void createTab_prepare(Signal* signal, CreateTabReq * req); - void createTab_writeSchemaConf1(Signal* signal, Uint32 callback, Uint32); - void createTab_writeTableConf(Signal* signal, Uint32 callbackData, Uint32); - void createTab_dih(Signal*, CreateTableRecordPtr, - SegmentedSectionPtr, Callback*); - void createTab_dihComplete(Signal* signal, Uint32 callbackData, Uint32); - - void createTab_startLcpMutex_locked(Signal* signal, Uint32, Uint32); - void createTab_startLcpMutex_unlocked(Signal* signal, Uint32, Uint32); - - void createTab_commit(Signal* signal, CreateTabReq * req); - void createTab_writeSchemaConf2(Signal* signal, Uint32 callbackData, Uint32); - void createTab_alterComplete(Signal*, Uint32 callbackData, Uint32); - - void createTab_drop(Signal* signal, CreateTabReq * req); - void createTab_dropComplete(Signal* signal, Uint32 callbackData, Uint32); - - void createTab_reply(Signal* signal, CreateTableRecordPtr, Uint32 nodeId); - void alterTab_activate(Signal*, CreateTableRecordPtr, Callback*); - - void restartCreateTab(Signal*, Uint32, - const SchemaFile::TableEntry *, - const SchemaFile::TableEntry *, bool); - void restartCreateTab_readTableConf(Signal* signal, Uint32 callback, Uint32); - void restartCreateTab_writeTableConf(Signal* signal, Uint32 callback, Uint32); - void restartCreateTab_dihComplete(Signal* signal, Uint32 callback, Uint32); - void restartCreateTab_activateComplete(Signal*, Uint32 callback, Uint32); - - void restartDropTab(Signal* signal, Uint32 tableId, - const SchemaFile::TableEntry *, - const SchemaFile::TableEntry *); - void restartDropTab_complete(Signal*, Uint32 callback, Uint32); - - void restartDropObj(Signal*, Uint32, const SchemaFile::TableEntry *); - void restartDropObj_prepare_start_done(Signal*, Uint32, Uint32); - void restartDropObj_prepare_complete_done(Signal*, Uint32, Uint32); - void restartDropObj_commit_start_done(Signal*, Uint32, Uint32); - void restartDropObj_commit_complete_done(Signal*, Uint32, Uint32); - - void restart_checkSchemaStatusComplete(Signal*, Uint32 callback, Uint32); - void restart_writeSchemaConf(Signal*, Uint32 callbackData, Uint32); - void masterRestart_checkSchemaStatusComplete(Signal*, Uint32, Uint32); - - void sendSchemaComplete(Signal*, Uint32 callbackData, Uint32); - - void execCREATE_OBJ_REQ(Signal* signal); - void execCREATE_OBJ_REF(Signal* signal); - void execCREATE_OBJ_CONF(Signal* signal); - - void createObj_prepare_start_done(Signal* signal, Uint32 callback, Uint32); - void createObj_writeSchemaConf1(Signal* signal, Uint32 callback, Uint32); - void createObj_writeObjConf(Signal* signal, Uint32 callbackData, Uint32); - void createObj_prepare_complete_done(Signal*, Uint32 callbackData, Uint32); - void createObj_commit_start_done(Signal* signal, Uint32 callback, Uint32); - void createObj_writeSchemaConf2(Signal* signal, Uint32 callbackData, Uint32); - void createObj_commit_complete_done(Signal*, Uint32 callbackData, Uint32); - void createObj_abort(Signal*, struct CreateObjReq*); - void createObj_abort_start_done(Signal*, Uint32 callbackData, Uint32); - void createObj_abort_writeSchemaConf(Signal*, Uint32 callbackData, Uint32); - void createObj_abort_complete_done(Signal*, Uint32 callbackData, Uint32); - - void schemaOp_reply(Signal* signal, SchemaTransaction *, Uint32); - void trans_commit_start_done(Signal*, Uint32 callbackData, Uint32); - void trans_commit_complete_done(Signal*, Uint32 callbackData, Uint32); - void trans_abort_start_done(Signal*, Uint32 callbackData, Uint32); - void trans_abort_complete_done(Signal*, Uint32 callbackData, Uint32); - - void execDROP_OBJ_REQ(Signal* signal); - void execDROP_OBJ_REF(Signal* signal); - void execDROP_OBJ_CONF(Signal* signal); - - void dropObj_prepare_start_done(Signal* signal, Uint32 callback, Uint32); - void dropObj_prepare_writeSchemaConf(Signal*, Uint32 callback, Uint32); - void dropObj_prepare_complete_done(Signal*, Uint32 callbackData, Uint32); - void dropObj_commit_start_done(Signal*, Uint32 callbackData, Uint32); - void dropObj_commit_writeSchemaConf(Signal*, Uint32 callback, Uint32); - void dropObj_commit_complete_done(Signal*, Uint32 callbackData, Uint32); - void dropObj_abort_start_done(Signal*, Uint32 callbackData, Uint32); - void dropObj_abort_writeSchemaConf(Signal*, Uint32 callback, Uint32); - void dropObj_abort_complete_done(Signal*, Uint32 callbackData, Uint32); - - void restartCreateObj(Signal*, Uint32, - const SchemaFile::TableEntry *, - const SchemaFile::TableEntry *, bool); - void restartCreateObj_readConf(Signal*, Uint32, Uint32); - void restartCreateObj_getTabInfoConf(Signal*); - void restartCreateObj_prepare_start_done(Signal*, Uint32, Uint32); - void restartCreateObj_write_complete(Signal*, Uint32, Uint32); - void restartCreateObj_prepare_complete_done(Signal*, Uint32, Uint32); - void restartCreateObj_commit_start_done(Signal*, Uint32, Uint32); - void restartCreateObj_commit_complete_done(Signal*, Uint32, Uint32); - - void execDICT_COMMIT_REQ(Signal*); - void execDICT_COMMIT_REF(Signal*); - void execDICT_COMMIT_CONF(Signal*); - - void execDICT_ABORT_REQ(Signal*); - void execDICT_ABORT_REF(Signal*); - void execDICT_ABORT_CONF(Signal*); - -public: - void createObj_commit(Signal*, struct SchemaOp*); - void createObj_abort(Signal*, struct SchemaOp*); - - void create_fg_prepare_start(Signal* signal, SchemaOp*); - void create_fg_prepare_complete(Signal* signal, SchemaOp*); - void create_fg_abort_start(Signal* signal, SchemaOp*); - void create_fg_abort_complete(Signal* signal, SchemaOp*); - - void create_file_prepare_start(Signal* signal, SchemaOp*); - void create_file_prepare_complete(Signal* signal, SchemaOp*); - void create_file_commit_start(Signal* signal, SchemaOp*); - void create_file_abort_start(Signal* signal, SchemaOp*); - void create_file_abort_complete(Signal* signal, SchemaOp*); - - void dropObj_commit(Signal*, struct SchemaOp*); - void dropObj_abort(Signal*, struct SchemaOp*); - void drop_file_prepare_start(Signal* signal, SchemaOp*); - void drop_file_commit_start(Signal* signal, SchemaOp*); - void drop_file_commit_complete(Signal* signal, SchemaOp*); - void drop_file_abort_start(Signal* signal, SchemaOp*); - void send_drop_file(Signal*, SchemaOp*, DropFileImplReq::RequestInfo); - - void drop_fg_prepare_start(Signal* signal, SchemaOp*); - void drop_fg_commit_start(Signal* signal, SchemaOp*); - void drop_fg_commit_complete(Signal* signal, SchemaOp*); - void drop_fg_abort_start(Signal* signal, SchemaOp*); - void send_drop_fg(Signal*, SchemaOp*, DropFilegroupImplReq::RequestInfo); - - void drop_undofile_prepare_start(Signal* signal, SchemaOp*); - void drop_undofile_commit_complete(Signal* signal, SchemaOp*); - - int checkSingleUserMode(Uint32 senderRef); -}; - -inline bool -Dbdict::TableRecord::isTable() const -{ - return DictTabInfo::isTable(tableType); -} - -inline bool -Dbdict::TableRecord::isIndex() const -{ - return DictTabInfo::isIndex(tableType); -} - -inline bool -Dbdict::TableRecord::isUniqueIndex() const -{ - return DictTabInfo::isUniqueIndex(tableType); -} - -inline bool -Dbdict::TableRecord::isNonUniqueIndex() const -{ - return DictTabInfo::isNonUniqueIndex(tableType); -} - -inline bool -Dbdict::TableRecord::isHashIndex() const -{ - return DictTabInfo::isHashIndex(tableType); -} - -inline bool -Dbdict::TableRecord::isOrderedIndex() const -{ - return DictTabInfo::isOrderedIndex(tableType); -} - - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt deleted file mode 100644 index 8d4267a1c42..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.txt +++ /dev/null @@ -1,88 +0,0 @@ - -Event creation - -USER DICT(Master) UTIL SUMA -================================================================================ -CREATE_EVENT_REQ::create ---------------------------> - - Get ID - CREATE_SUBID - -----------------------------------------------> - <----------------------------------------------- - - insert into system table - UTIL_PREPARE::insert - ------------------------> - <------------------------ - UTIL_EXECUTE - ------------------------> - <------------------------ -CREATE_EVENT_CONF -<-------------------------- - - -Event dropping - -USER DICT(Master) UTIL SUMA -================================================================================ -DROP_EVENT_REQ ---------------------------> - - remove from system table - UTIL_PREPARE::delete - ------------------------> - <------------------------ - UTIL_EXECUTE - ------------------------> - <------------------------ -DROP_EVENT_CONF -<-------------------------- - - - -create NdbEventOperation - -USER DICT(Master) (Slaves) UTIL -======================================================================= -CREATE_EVENT_REQ::get ---------------------------> - - read from system table - UTIL_PREPARE::read - ----------------------------------------> - <---------------------------------------- - UTIL_EXECUTE - ----------------------------------------> - <---------------------------------------- - SUMA - CREATE_EVENT_REQ::after_get ====== - ----------------------> - SUB_CREATE - ------------------> - <------------------ - SUB_SYNC - ------------------> - <------------------ - CREATE_EVENT_CONF - <---------------------- -CREATE_EVENT_CONF -<------------------------- - - - -USER DICT(Master) (Slaves) SUMA -======================================================================= -SUB_START_REQ ---------------------------> - SUB_START_REQ - ----------------------> - SUB_START - ------------------> - <------------------ - SUB_START_CONF - <---------------------- -SUB_START_CONF -<------------------------- - - -SUB_STOP analogous to SUB_STOP - - - diff --git a/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt b/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt deleted file mode 100644 index 72e23ed15a5..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/DictLock.txt +++ /dev/null @@ -1,98 +0,0 @@ -Lock master DICT against schema operations - -Implementation --------------- - -[ see comments in Dbdict.hpp ] - -Use case: Node startup INR / NR -------------------------------- - -Master DICT (like any block) keeps list of alive nodes (c_aliveNodes). -These are participants in schema ops. - -(1) c_aliveNodes is initialized when DICT starts - in sp3 in READ_NODESCONF from CNTR - -(2) when slave node fails (in any sp of the slave node) - it is removed from c_aliveNodes in NODE_FAILREP - -(3) when slave starts, it is added to c_aliveNodes - in sp4 of the starting node in INCL_NODEREQ - -Slave DIH locks master DICT in sp2 and releases the lock when started. -Based on the constraints: - -- the lock is taken when master DICT is known - DIH reads this in sp2 in READ_NODESCONF - -- the lock is taken before (3) - -- the lock is taken before copying starts and held until it is done - in sp4 DIH meta, DICT meta, tuple data - -- on INR in sp2 in START_PERMREQ the LCP info of the slave is erased - in all DIH in invalidateNodeLCP() - not safe under schema ops - -Signals: - -All but DICT_LOCK are standard v5.0 signals. -s=starting node, m=master, a=all participants, l=local block. - -* sp2 - DICT_LOCK and START_PERM - -DIH/s - DICT_LOCK_REQ - DICT/m - DICT_LOCK_CONF -DIH/s - START_PERMREQ - DIH/m - START_INFOREQ - DIH/a - invalidateNodeLCP() if INR - DIH/a - START_INFOCONF - DIH/m - START_PERMCONF -DIH/s - -* sp4 - START_ME (copy metadata, no changes) - -DIH/s - START_MEREQ - DIH/m - COPY_TABREQ - DIH/s - COPY_TABCONF - DIH/m - DICTSTARTREQ - DICT/s - GET_SCHEMA_INFOREQ - DICT/m - SCHEMA_INFO - DICT/s - DICTSTARTCONF - DIH/m - INCL_NODEREQ - DIH/a - INCL_NODEREQ - ANY/l - INCL_NODECONF - DIH/a - INCL_NODECONF - DIH/m - START_MECONF -DIH/s - -* (copy data, omitted) - -* SL_STARTED - release DICT lock - -CNTR/s - NODE_START_REP - DIH/s - DICT_UNLOCK_ORD - DICT/m - -# vim: set et sw=4: diff --git a/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt b/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt deleted file mode 100644 index 8d364d15c57..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/DropTable.txt +++ /dev/null @@ -1,140 +0,0 @@ -DROP TABLE DESCRIPTION ----------------------- - -Drop table is controlled by DICT. - -Drop table is used in the following cases in some sort. - - Drop Table - - Abort Add Table - - Drop table in node restart - - Drop table in system restart - -Sequence of Drop Table: ------------------------ - -1) PREP_DROP_TAB_REQ -> all DICT - Update schema files on disk - Table status = DROPPING - -2) Controlling DICT only - Report Table Dropped secured but not yet completed. - ------- PREP DROP - -4) PREP_DROP_TAB_REQ -> all LQHs - -5) PREP_DROP_TAB_REQ -> all TCs - -6) PREP_DROP_TAB_REQ -> all DIHs - - ---- LQH::PREP_DROP_TAB_REQ - -*) Mark the table so that no new operations will start -*) Mark all fragments so that new LCP_FRAG_ORD gets replied directly - w.o actually checkpointing the fragment -2) Start waiting for completion -3) Reply PREP_DROP_TAB_CONF - -- After this LQH accepts WAIT_DROP_TAB_REQ - ---- TC::PREP_DROP_TAB_REQ - -1) Mark the table so that no new transactions will start on the table -2) Send WAIT_DROP_TAB_REQ -> all connected LQH's -3) Wait for CONF (including NF-handling) from LQH:s -4) Reply PREP_DROP_TAB_CONF - ---- DIH::PREP_DROP_TAB_REQ - -1) Mark the table so that no new LCP will start on the table -2) If master (unlink any queued LCP_FRAG_ORD) -3) Send WAIT_DROP_TAB_REQ -> all connected LQH's -4) Wait for CONF (including NF-handling) from LQH:s -5) Reply PREP_DROP_TAB_CONF - ---- LQH::WAIT_DROP_TAB_REQ - -1) Wait for running operations - Wait for running LCP - -2) Reply - ------- PREP_DROP - -7) DROP_TAB_REQ -> all DICT's - *) DROP_TAB_REQ -> TC - *) DROP_TAB_REQ -> ACC - *) DROP_TAB_REQ -> TUP - *) DROP_TAB_REQ -> DIH - *) DROP_TAB_REQ -> LQH - *) Update schema files on disk DROPPED - -8) DICT_SCHEMAREQ -> all DICT - Table status = DROPPED - ---------------------------------- - -Sequence of Drop table in node/system restart ---------------------------------------------- - -In both node and system restart the node receives the schema information from -the master. If the table is in a state where it needs to complete the drop -table activity then DBACC, DBTUP, DBDIH, DBDICT is contacted to drop all files -related to the table. After this the schema information is updated with the new -state. Since all nodes receive the same schema information there is no risk of -different behaviour in the various NDB nodes. - -API Requirements for Drop Table -------------------------------- -Definition: - - Two tables are NOT the same if they were created with two create - tables at different points in time, even if the two create tables - had exactly the same definition. - -Requirements: - -1. Each operation in a transaction refering to a table (by name or by id) - should operate on the same table. (This is probably necessary.) - -2. Each operation in a transaction refering to a table (by name or by - id) should operate on the same table as were defined at the - startTransaction timepoint. (This is not strictly necessary for - API consistency.) - - Example 1: - - startTransaction() - - drop("TableName1") - create("TableName1") - - getNdbOperation("TableName1") - - execute(commit) - - - If both requirements 1 and 2 are fulfilled, then this should lead - to "Error: Invalid Schema Version" or similar error - - - If only requirement 1 is fulfilled, then this may be executed - without any errors. - - - Example 2: - - startTransaction() - - getNdbOperation("TableName1") - execute(NoCommit) - - drop("TableName1") - create("TableName1") - - getNdbOperation("TableName1") - - execute(commit) - - - This should always lead to "Error: Invalid Schema Version" or - similar error. - diff --git a/storage/ndb/src/kernel/blocks/dbdict/Event.txt b/storage/ndb/src/kernel/blocks/dbdict/Event.txt deleted file mode 100644 index 553c915d9c5..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Event.txt +++ /dev/null @@ -1,102 +0,0 @@ - -Event creation - -USER DICT(Master) UTIL SUMA -================================================================================ -CREATE_EVENT_REQ::create ---------------------------> - - Get ID - CREATE_SUBID - -----------------------------------------------> - <----------------------------------------------- - - insert into system table - UTIL_PREPARE::insert - ------------------------> - <------------------------ - UTIL_EXECUTE - ------------------------> - <------------------------ -CREATE_EVENT_CONF -<-------------------------- - - -Event dropping - -USER DICT(Master) (Slaves) UTIL SUMA -================================================================================ -DROP_EVENT_REQ ---------------------------> - - read from system table - UTIL_PREPARE::read - ------------------------------------> - <------------------------------------ - UTIL_EXECUTE - ------------------------------------> - <------------------------------------ - SUB_REMOVE_REQ - --------------------> - SUB_REMOVE - ------------------------------> - <------------------------------ - SUB_REMOVE_CONF - <-------------------- - - remove from system table - UTIL_PREPARE::delete - ------------------------------------> - <------------------------------------ - UTIL_EXECUTE - ------------------------------------> - <------------------------------------ -DROP_EVENT_CONF -<-------------------------- - - - -create NdbEventOperation - -USER DICT(Master) (Slaves) UTIL -======================================================================= -CREATE_EVENT_REQ::get ---------------------------> - - read from system table - UTIL_PREPARE::read - ----------------------------------------> - <---------------------------------------- - UTIL_EXECUTE - ----------------------------------------> - <---------------------------------------- - SUMA - CREATE_EVENT_REQ::after_get ====== - ----------------------> - SUB_CREATE - ------------------> - <------------------ - SUB_SYNC - ------------------> - <------------------ - CREATE_EVENT_CONF - <---------------------- -CREATE_EVENT_CONF -<------------------------- - - - -USER DICT(Master) (Slaves) SUMA -======================================================================= -SUB_START_REQ ---------------------------> - SUB_START_REQ - ----------------------> - SUB_START - ------------------> - <------------------ - SUB_START_CONF - <---------------------- -SUB_START_CONF -<------------------------- - - -SUB_STOP analogous to SUB_STOP - - - diff --git a/storage/ndb/src/kernel/blocks/dbdict/Makefile.am b/storage/ndb/src/kernel/blocks/dbdict/Makefile.am deleted file mode 100644 index 6b900369b10..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Makefile.am +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am - -LDADD += \ - $(top_builddir)/storage/ndb/src/common/util/libgeneral.la \ - $(top_builddir)/storage/ndb/src/common/portlib/libportlib.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a - -ndbtools_PROGRAMS = ndb_print_schema_file -ndb_print_schema_file_SOURCES = printSchemaFile.cpp -ndb_print_schema_file_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a diff --git a/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl b/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl deleted file mode 100644 index 69fbb973133..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Master_AddTable.sfl +++ /dev/null @@ -1,765 +0,0 @@ -// Copyright (C) 2004 MySQL AB -// Use is subject to license terms -// -// This program is free software; you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation; version 2 of the License. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// --------------------------------------------------------------------------- -// This file contains a signal log trace for DBDICT at the master for a -// create table. Another file contains the signal log for the participant -// node. Master node is 2, participant node 4 and api node is 3. - -// --------------------------------------------------------------------------- -// First arrives the table description in a number of DICTTABINFO signals. -// These have a header of 5 words (see DictTabInfo.hpp for details) and -// upto 20 words of property data per signal. The property data is packed -// by the SimpleProperties class. -// --------------------------------------------------------------------------- ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 25 trace: 0 - H'00010003 H'00047700 H'00000001 H'00000042 H'00000000 H'4e444250 H'524f5053 - H'00010000 H'00000000 H'1c0a1203 H'524f4c46 H'00020001 H'0000000a H'56504e5f - H'55534552 H'53000000 H'0001000a H'0000004b H'000203e8 H'00000007 H'56504e5f - H'49440000 H'000103ee H'00000001 H'000203e8 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 25 trace: 0 - H'00010003 H'00047700 H'00000001 H'00000042 H'00000014 H'00000007 H'56504e5f - H'4e420000 H'000103ee H'00000001 H'000203e8 H'0000000d H'44495245 H'43544f52 - H'595f4e42 H'00000000 H'000103eb H'00000003 H'000103ed H'0000000a H'000103ec - H'00000002 H'000203e8 H'00000010 H'4c415354 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 25 trace: 0 - H'00010003 H'00047700 H'00000001 H'00000042 H'00000028 H'5f43414c H'4c5f5041 - H'52545900 H'000103eb H'00000003 H'000103ed H'0000000a H'000103ec H'00000002 - H'000203e8 H'00000006 H'44455343 H'52000000 H'000103eb H'00000003 H'000103ed - H'00000064 H'000103ec H'00000002 H'00010005 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 0 "API", s.proc: 3, s.sigId: 940284 length: 11 trace: 0 - H'00010003 H'00047700 H'00000001 H'00000042 H'0000003c H'00000002 H'00010006 - H'00000005 H'0001000c H'00000002 H'0000ffff - -// --------------------------------------------------------------------------- -// Send DICT_SCHEMAREQ to all nodes including ourselves to write the state -// ADD_STARTED in the schema file for the new table. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57069 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57069 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57069 gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57077 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001 - -// --------------------------------------------------------------------------- -// Write both schema files with new state of table added. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57069 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57081 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57082 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 99 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57081 length: 8 trace: 0 - FilePointer: 99 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57090 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57091 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57090 length: 4 trace: 0 - FilePointer: 99 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57099 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57100 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57099 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57111 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57112 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 100 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57111 length: 8 trace: 0 - FilePointer: 100 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57123 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57124 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57123 length: 4 trace: 0 - FilePointer: 100 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57133 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 1 trace: 0 - H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57135 length: 1 trace: 0 - H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46718 length: 1 trace: 0 - H'00000004 - -// --------------------------------------------------------------------------- -// Pack Table description into pages in DICT using SimpleProperties class. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 3 trace: 0 - H'00000001 H'00000002 H'00000000 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57140 length: 3 trace: 0 - H'00000001 H'00000002 H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0 - H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57141 length: 2 trace: 0 - H'00000002 H'00000002 - -// --------------------------------------------------------------------------- -// Send the table description over to the other NDB nodes. -// A CONTINUEB is sent for each signal sent to avoid overloading the -// transporters. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000000 H'4e444250 H'524f5053 - H'00002000 H'0000001c H'1c0a1203 H'524f4c46 H'00020001 H'0000000a H'56504e5f - H'55534552 H'53000000 H'0001000a H'0000004b H'000203e8 H'00000007 H'56504e5f - H'49440000 H'1cc03924 H'00000001 H'000203e8 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0 - H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57142 length: 2 trace: 0 - H'00000002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000014 H'00000007 H'56504e5f - H'4e420000 H'000103ee H'00000001 H'000203e8 H'0000000d H'44495245 H'43544f52 - H'595f4e42 H'00000000 H'000103eb H'00000003 H'524f4c46 H'00020001 H'0000000a - H'56504e5f H'55534552 H'53000010 H'00010002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0 - H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57143 length: 2 trace: 0 - H'00000002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000028 H'00000002 H'00010011 - H'00000003 H'00010003 H'00000001 H'00010005 H'00000002 H'00010006 H'00000005 - H'0001000a H'0000004b H'0001000c H'00000002 H'000203e8 H'00000007 H'56504e5f - H'49440064 H'000103e9 H'00000000 H'000103ee ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0 - H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57144 length: 2 trace: 0 - H'00000002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'0000003c H'00000001 H'000203e8 - H'00000007 H'56504e5f H'4e420002 H'000103e9 H'00000001 H'000103ee H'00000001 - H'000203e8 H'0000000d H'44495245 H'43544f52 H'595f4e42 H'00000000 H'000103e9 - H'00000002 H'000103eb H'00000003 H'000103ec ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0 - H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57145 length: 2 trace: 0 - H'00000002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000050 H'00000002 H'000103ed - H'0000000a H'000203e8 H'00000010 H'4c415354 H'5f43414c H'4c5f5041 H'52545900 - H'000103e9 H'00000003 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed - H'0000000a H'000203e8 H'00000006 H'44455343 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 2 trace: 0 - H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57132 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57146 length: 2 trace: 0 - H'00000002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 15 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000064 H'52000000 H'000103e9 - H'00000004 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed H'00000064 - H'0000ffff - -// --------------------------------------------------------------------------- -// In parallel with sending the table description to other nodes we will also -// write the table description to our local file system. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57132 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010401ff - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57165 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57166 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 101 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57165 length: 8 trace: 0 - FilePointer: 101 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000000, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57177 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57178 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57177 length: 4 trace: 0 - FilePointer: 101 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57186 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57187 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57186 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010402ff - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57195 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57196 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 102 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57195 length: 8 trace: 0 - FilePointer: 102 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000000, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57204 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57205 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57204 length: 4 trace: 0 - FilePointer: 102 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57218 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57219 length: 1 trace: 0 - UserPointer: H'00000000 - -// --------------------------------------------------------------------------- -// Completed writing to our file system the table description. -// --------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57229 gsn: 24 "DICTTABCONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46803 length: 2 trace: 0 - H'00000002 H'00000004 - -// --------------------------------------------------------------------------- -// Also the participant have completed writing the table description to file. -// --------------------------------------------------------------------------- - -// --------------------------------------------------------------------------- -// Write the state UPDATE_PAGE_COUNT to schema file for the new table. -// This also contains the number of pages used for the table description. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57229 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57229 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57229 gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57234 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002 - -// --------------------------------------------------------------------------- -// Write schema file to disk -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57229 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57238 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57239 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 103 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57238 length: 8 trace: 0 - FilePointer: 103 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57247 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57248 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57247 length: 4 trace: 0 - FilePointer: 103 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57257 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57258 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57257 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57267 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57268 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 104 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57267 length: 8 trace: 0 - FilePointer: 104 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57279 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57283 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57279 length: 4 trace: 0 - FilePointer: 104 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57290 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 57291 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57290 length: 1 trace: 0 - H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57290 gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57293 length: 1 trace: 0 - H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57299 gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46860 length: 1 trace: 0 - H'00000004 - -// --------------------------------------------------------------------------- -// All schema files in the system have been updated. -// --------------------------------------------------------------------------- - -// --------------------------------------------------------------------------- -// Now control is given to DIH for adding the fragments needed by this table. -// We first seize a record in DIH and then we send the add table request with -// the needed table parameters. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 2, gsn: 238 "DISEIZEREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57299 length: 2 trace: 0 - H'00000000 H'00fa0002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57299 gsn: 236 "DISEIZECONF" prio: 1 -s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 57304 length: 2 trace: 0 - H'00000000 H'00000210 ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 2, gsn: 187 "DIADDTABREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57299 length: 6 trace: 0 - H'00000210 H'00000002 H'00000000 H'00000006 H'00000000 H'00000001 - -// --------------------------------------------------------------------------- -// DIH requests us to add a certain fragment replica. -// --------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57400 gsn: 195 "DICTFRAGSREQ" prio: 1 -s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 57418 length: 7 trace: 0 - H'00000000 H'00000000 H'00000000 H'00000002 H'00150040 H'00000001 H'00000002 - -// --------------------------------------------------------------------------- -// We add the fragment by contacting LQH through sending a LQHFRAGREQ and -// a number of LQHADDATTREQ (in this case only one since not more than 8 -// attributes). -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 247 "DBLQH", r.proc: 2, gsn: 313 "LQHFRAGREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57400 length: 17 trace: 0 - H'00000000 H'00fa0002 H'00000000 H'00000000 H'00000002 H'00000001 H'00000050 - H'0000004b H'00000006 H'00000001 H'00000000 H'00000005 H'00000000 H'00000000 - H'00000001 H'00000002 H'00000000 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57400 gsn: 311 "LQHFRAGCONF" prio: 1 -s.bn: 247 "DBLQH", s.proc: 2, s.sigId: 57428 length: 2 trace: 0 - H'00000000 H'00000000 ----- Send ----- Signal ---------------- -r.bn: 247 "DBLQH", r.proc: 2, gsn: 310 "LQHADDATTREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57400 length: 12 trace: 0 - H'00000000 H'00000005 H'00000000 H'00012255 H'00000001 H'00012255 H'00000002 - H'000a2236 H'00000003 H'000a2236 H'00000004 H'00642236 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57400 gsn: 308 "LQHADDATTCONF" prio: 1 -s.bn: 247 "DBLQH", s.proc: 2, s.sigId: 57450 length: 1 trace: 0 - H'00000000 - -// --------------------------------------------------------------------------- -// When we have completed adding the fragment we send DINEXTNODEREQ (should -// change name to DICTFRAGSCONF) to DIH indicate we have completed the task. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 2, gsn: 231 "DINEXTNODEREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57400 length: 4 trace: 0 - H'00000210 H'00000000 H'00000001 H'00000000 - -// --------------------------------------------------------------------------- -// We continue by performing the same task again for the next fragment replica. -// We skip this from this log since they contain no more interesting stuff. -// --------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 57618 gsn: 185 "DIADDTABCONF" prio: 1 -s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 57655 length: 2 trace: 0 - H'00000000 H'00000002 - -// --------------------------------------------------------------------------- -// Now that we have added all fragments DIH gives back control to DICT by -// sending DIADDTABCONF. -// --------------------------------------------------------------------------- - -// --------------------------------------------------------------------------- -// It is now time to decide which global checkpoint this table will be born. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 2, gsn: 499 "WAIT_GCP_REQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 57618 length: 3 trace: 0 - H'00fa0002 H'00000000 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58288 gsn: 501 "WAIT_GCP_CONF" prio: 1 -s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 58296 length: 2 trace: 0 - H'00000000 H'0000000c - -// --------------------------------------------------------------------------- -// We can update all schema files in the system with this global checkpoint -// number. We are certain that no transaction will be performed on the table -// before this global checkpoint. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58288 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58288 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58288 gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58298 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003 - -// --------------------------------------------------------------------------- -// Write schema files as usual when updating schema file state. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58288 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58304 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58305 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 117 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58304 length: 8 trace: 0 - FilePointer: 117 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58315 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58316 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58315 length: 4 trace: 0 - FilePointer: 117 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58326 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58327 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58326 length: 7 trace: 0 - UserReference: H'00fa0002, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58339 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58340 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 118 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58339 length: 8 trace: 0 - FilePointer: 118 - UserReference: H'00fa0002, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58348 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58349 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 2, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58348 length: 4 trace: 0 - FilePointer: 118 - UserReference: H'00fa0002, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 2, s.sigId: 58360 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 1 trace: 0 - H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58364 length: 1 trace: 0 - H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 47846 length: 1 trace: 0 - H'00000004 - -// --------------------------------------------------------------------------- -// Commit the table for usage in DIH and LQH in all nodes. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 247 "DBLQH", r.proc: 2, gsn: 398 "TAB_COMMITREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0 - H'00000000 H'00fa0002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 2, gsn: 398 "TAB_COMMITREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0 - H'00000001 H'00fa0002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 247 "DBLQH", r.proc: 4, gsn: 398 "TAB_COMMITREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0 - H'00000000 H'00fa0002 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 4, gsn: 398 "TAB_COMMITREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0 - H'00000001 H'00fa0002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1 -s.bn: 247 "DBLQH", s.proc: 2, s.sigId: 58370 length: 3 trace: 0 - H'00000000 H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1 -s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 58371 length: 3 trace: 0 - H'00000001 H'00000002 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1 -s.bn: 247 "DBLQH", s.proc: 4, s.sigId: 47846 length: 3 trace: 0 - H'00000000 H'00000004 H'00000002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 396 "TAB_COMMITCONF" prio: 1 -s.bn: 246 "DBDIH", s.proc: 4, s.sigId: 47846 length: 3 trace: 0 - H'00000001 H'00000004 H'00000002 - -// --------------------------------------------------------------------------- -// Finally also open the table for usage from TC in all nodes. -// After this signal is received in TC it is ok to execute transactions on -// this new empty table. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 245 "DBTC", r.proc: 2, gsn: 404 "TC_SCHVERREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 5 trace: 0 - H'00000002 H'00000001 H'00000001 H'00fa0002 H'00000000 ----- Send ----- Signal ---------------- -r.bn: 245 "DBTC", r.proc: 4, gsn: 404 "TC_SCHVERREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 5 trace: 0 - H'00000002 H'00000001 H'00000001 H'00fa0002 H'00000000 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 403 "TC_SCHVERCONF" prio: 1 -s.bn: 245 "DBTC", s.proc: 2, s.sigId: 58376 length: 2 trace: 0 - H'00000002 H'00000000 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 403 "TC_SCHVERCONF" prio: 1 -s.bn: 245 "DBTC", s.proc: 4, s.sigId: 47846 length: 2 trace: 0 - H'00000002 H'00000001 - -// --------------------------------------------------------------------------- -// Unblock dictionary to allow for another add table. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 444 "UNBLO_DICTREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 1 trace: 0 - H'00fa0002 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 444 "UNBLO_DICTREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 1 trace: 0 - H'00fa0002 - -// --------------------------------------------------------------------------- -// Send the confirmation to the requesting application process. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 1 "API", r.proc: 3, gsn: 24 "DICTTABCONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0 - H'00047700 H'00000002 H'00000001 - -// --------------------------------------------------------------------------- -// Also release the connection in DIH that was previously established. -// --------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 246 "DBDIH", r.proc: 2, gsn: 234 "DIRELEASEREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, sigId: 58359 length: 3 trace: 0 - H'00000210 H'00000000 H'00fa0002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 444 "UNBLO_DICTREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58378 length: 1 trace: 0 - H'00fa0002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, sigId: 58359 gsn: 232 "DIRELEASECONF" prio: 1 -s.bn: 246 "DBDIH", s.proc: 2, s.sigId: 58380 length: 1 trace: 0 - H'00000000 - -// --------------------------------------------------------------------------- -// Now all actions regarding this add table have completed. -// --------------------------------------------------------------------------- diff --git a/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp deleted file mode 100644 index 42c6e30792f..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBDICT_SCHEMA_FILE_HPP -#define DBDICT_SCHEMA_FILE_HPP - -#include -#include -#include - -#define NDB_SF_MAGIC "NDBSCHMA" - -// page size 4k -#define NDB_SF_PAGE_SIZE_IN_WORDS_LOG2 10 -#define NDB_SF_PAGE_SIZE_IN_WORDS (1 << NDB_SF_PAGE_SIZE_IN_WORDS_LOG2) -#define NDB_SF_PAGE_SIZE (NDB_SF_PAGE_SIZE_IN_WORDS << 2) - -// 4k = (1 + 127) * 32 -#define NDB_SF_PAGE_ENTRIES 127 - -// 160 pages = 20320 objects -#define NDB_SF_MAX_PAGES 160 - -// versions where format changed -#define NDB_SF_VERSION_5_0_6 MAKE_VERSION(5, 0, 6) - -// One page in schema file. -struct SchemaFile { - // header size 32 bytes - char Magic[8]; - Uint32 ByteOrder; - Uint32 NdbVersion; - Uint32 FileSize; // In bytes - Uint32 PageNumber; - Uint32 CheckSum; // Of this page - Uint32 NoOfTableEntries; // On this page (NDB_SF_PAGE_ENTRIES) - - enum TableState { - INIT = 0, - ADD_STARTED = 1, - TABLE_ADD_COMMITTED = 2, - DROP_TABLE_STARTED = 3, - DROP_TABLE_COMMITTED = 4, - ALTER_TABLE_COMMITTED = 5, - TEMPORARY_TABLE_COMMITTED = 6 - }; - - // entry size 32 bytes - struct TableEntry { - Uint32 m_tableState; - Uint32 m_tableVersion; - Uint32 m_tableType; - Uint32 m_info_words; - Uint32 m_gcp; - Uint32 m_unused[3]; - - bool operator==(const TableEntry& o) const { - return memcmp(this, &o, sizeof(* this))== 0; - } - }; - - // pre-5.0.6 - struct TableEntry_old { - Uint32 m_tableState; - Uint32 m_tableVersion; - Uint32 m_tableType; - Uint32 m_noOfPages; - Uint32 m_gcp; - }; - - union { - TableEntry TableEntries[NDB_SF_PAGE_ENTRIES]; - TableEntry_old TableEntries_old[1]; - }; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl b/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl deleted file mode 100644 index 20512b6f975..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/Slave_AddTable.sfl +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright (C) 2004 MySQL AB -// Use is subject to license terms -// -// This program is free software; you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation; version 2 of the License. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// --------------------------------------------------------------------------- -// This file contains a signal log trace for DBDICT at the master for a -// create table. Another file contains the signal log for the participant -// node. Master node is 2, participant node 4 and api node is 3. - -// --------------------------------------------------------------------------- -// This file contains a signal log trace for DBDICT at the participant for a -// add table. Another file contains the signal log for the master -// node. Master node is 2, participant node 4 and api node is 3. -// - -// --------------------------------------------------------------------------- -//-------------------------------------------------------------------------- -// Master requests us to save a new state of the table in the schema file -// == ADD_STARTED -//-------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46661 gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57069 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000000 H'00000000 H'00000001 - -//-------------------------------------------------------------------------- -// Write the new state to the schema files. -//-------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46661 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46669 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46670 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 99 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46669 length: 8 trace: 0 - FilePointer: 99 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46679 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46680 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46679 length: 4 trace: 0 - FilePointer: 99 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46690 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46691 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46690 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46700 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46701 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 100 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46700 length: 8 trace: 0 - FilePointer: 100 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46709 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46710 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46709 length: 4 trace: 0 - FilePointer: 100 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46719 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46718 length: 1 trace: 0 - H'00000004 - -//-------------------------------------------------------------------------- -// We receive the table description from the master node. -// We set the data in the DICT block. (table and attribute records). -//-------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000000 H'4e444250 H'524f5053 - H'00002000 H'0000001c H'1c0a1203 H'524f4c46 H'00020001 H'0000000a H'56504e5f - H'55534552 H'53000000 H'0001000a H'0000004b H'000203e8 H'00000007 H'56504e5f - H'49440000 H'1cc03924 H'00000001 H'000203e8 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000014 H'00000007 H'56504e5f - H'4e420000 H'000103ee H'00000001 H'000203e8 H'0000000d H'44495245 H'43544f52 - H'595f4e42 H'00000000 H'000103eb H'00000003 H'524f4c46 H'00020001 H'0000000a - H'56504e5f H'55534552 H'53000010 H'00010002 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000028 H'00000002 H'00010011 - H'00000003 H'00010003 H'00000001 H'00010005 H'00000002 H'00010006 H'00000005 - H'0001000a H'0000004b H'0001000c H'00000002 H'000203e8 H'00000007 H'56504e5f - H'49440064 H'000103e9 H'00000000 H'000103ee ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'0000003c H'00000001 H'000203e8 - H'00000007 H'56504e5f H'4e420002 H'000103e9 H'00000001 H'000103ee H'00000001 - H'000203e8 H'0000000d H'44495245 H'43544f52 H'595f4e42 H'00000000 H'000103e9 - H'00000002 H'000103eb H'00000003 H'000103ec ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 25 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000050 H'00000002 H'000103ed - H'0000000a H'000203e8 H'00000010 H'4c415354 H'5f43414c H'4c5f5041 H'52545900 - H'000103e9 H'00000003 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed - H'0000000a H'000203e8 H'00000006 H'44455343 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 204 "DICTTABINFO" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57132 length: 15 trace: 0 - H'00fa0002 H'00000000 H'00000002 H'0000006e H'00000064 H'52000000 H'000103e9 - H'00000004 H'000103eb H'00000003 H'000103ec H'00000002 H'000103ed H'00000064 - H'0000ffff - -//-------------------------------------------------------------------------- -// Pack the table description into pages. -//-------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46718 length: 3 trace: 0 - H'00000001 H'00000002 H'00000000 ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46718 gsn: 164 "CONTINUEB" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, s.sigId: 46730 length: 3 trace: 0 - H'00000001 H'00000002 H'00000000 - -//-------------------------------------------------------------------------- -// Write the pages of the table description to disk. -//-------------------------------------------------------------------------- - ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46718 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010401ff - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46748 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46749 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 101 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46748 length: 8 trace: 0 - FilePointer: 101 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000000, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46757 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46758 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46757 length: 4 trace: 0 - FilePointer: 101 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46766 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46767 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46766 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'00000002 H'ffffffff H'00000001 H'010402ff - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46783 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46784 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 102 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46783 length: 8 trace: 0 - FilePointer: 102 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000000, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46794 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46795 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46794 length: 4 trace: 0 - FilePointer: 102 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46803 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46804 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 24 "DICTTABCONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46803 length: 2 trace: 0 - H'00000002 H'00000004 - -//-------------------------------------------------------------------------- -// Update schema file ín memory and on disk to UPDATE_PAGE_COUNT. -//-------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46803 gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 57229 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'00000000 H'00000002 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46803 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46813 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46814 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 103 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46813 length: 8 trace: 0 - FilePointer: 103 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46823 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46824 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46823 length: 4 trace: 0 - FilePointer: 103 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46833 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46834 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46833 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46842 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46843 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 104 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46842 length: 8 trace: 0 - FilePointer: 104 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46851 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46852 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46851 length: 4 trace: 0 - FilePointer: 104 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 46860 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 46861 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 46860 length: 1 trace: 0 - H'00000004 - -//-------------------------------------------------------------------------- -// Update schema file with information about the starting global checkpoint -// identity. -//-------------------------------------------------------------------------- - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47782 gsn: 132 "DICT_SCHEMAREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58288 length: 7 trace: 0 - H'00010003 H'00047700 H'00000002 H'00000001 H'00000001 H'0000000c H'00000003 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47782 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050100 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47793 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47794 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 117 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47793 length: 8 trace: 0 - FilePointer: 117 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47804 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47805 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47804 length: 4 trace: 0 - FilePointer: 117 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47817 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47818 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 261 "FSOPENREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47817 length: 7 trace: 0 - UserReference: H'00fa0004, userPointer: H'00000000 - FileNumber[1-4]: H'ffffffff H'ffffffff H'ffffffff H'01050200 - FileFlags: H'00000311 Open write only, Create new file, Truncate existing file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47826 gsn: 259 "FSOPENCONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47827 length: 3 trace: 0 - UserPointer: H'00000000 - FilePointer: 118 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 272 "FSWRITEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47826 length: 8 trace: 0 - FilePointer: 118 - UserReference: H'00fa0004, UserPointer: H'00000000 - Operation flag: H'00000011, Sync, Format=Array of pages - varIndex: 1 - numberOfPages: 1 - pageData: H'00000008, H'00000000 - ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47836 gsn: 270 "FSWRITECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47837 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 253 "NDBFS", r.proc: 4, gsn: 257 "FSCLOSEREQ" prio: 0 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47836 length: 4 trace: 0 - FilePointer: 118 - UserReference: H'00fa0004, userPointer: H'00000000 - Flags: H'00000000, Don't remove file ----- Received - Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47846 gsn: 255 "FSCLOSECONF" prio: 1 -s.bn: 253 "NDBFS", s.proc: 4, s.sigId: 47847 length: 1 trace: 0 - UserPointer: H'00000000 ----- Send ----- Signal ---------------- -r.bn: 250 "DBDICT", r.proc: 2, gsn: 133 "DICT_SCHEMACONF" prio: 1 -s.bn: 250 "DBDICT", s.proc: 4, sigId: 47846 length: 1 trace: 0 - H'00000004 ----- Received - Signal ---------------- - -//-------------------------------------------------------------------------- -// Finally unblock the DICT block so that it can handle add table as master -// if it becomes master in the future. -//-------------------------------------------------------------------------- - -r.bn: 250 "DBDICT", r.proc: 4, sigId: 47846 gsn: 444 "UNBLO_DICTREQ" prio: 1 -s.bn: 250 "DBDICT", s.proc: 2, s.sigId: 58359 length: 1 trace: 0 - H'00fa0002 - -//-------------------------------------------------------------------------- -// We completed the add table operation. -//-------------------------------------------------------------------------- - diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp deleted file mode 100644 index 55c42ee88e7..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp +++ /dev/null @@ -1,285 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#include - -#include -#include -#include -#include - -static const char* progname = 0; -static bool allflag = false; -static bool checkonly = false; -static bool equalcontents = false; -static bool okquiet = false; - -static void -usage() -{ - ndbout - << "Usage: " << progname << " [-aceq]" << " file ..." << endl - << "-a print also unused slots" << endl - << "-c check only (return status 1 on error)" << endl - << "-e check also that the files have identical contents" << endl - << "-q no output if file is ok" << endl - << "Example: " << progname << " -ceq ndb_*_fs/D[12]/DBDICT/P0.SchemaLog" << endl; -} - -#ifdef NOT_USED - -static void -fill(const char * buf, int mod) -{ - int len = strlen(buf)+1; - ndbout << buf << " "; - while((len % mod) != 0){ - ndbout << " "; - len++; - } -} -#endif - -static const char* -version(Uint32 v) -{ - static char buf[40]; - sprintf(buf, "%d.%d.%d", v >> 16, (v >> 8) & 0xFF, v & 0xFF); - return buf; -} - -static int -print_head(const char * filename, const SchemaFile * sf) -{ - int retcode = 0; - - if (! checkonly) { - ndbout << "----- Schemafile: " << filename << " -----" << endl; - ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d", - (int) sizeof(sf->Magic), - sf->Magic, - sf->ByteOrder, - version(sf->NdbVersion), - sf->FileSize); - } - - if (memcmp(sf->Magic, "NDBSCHMA", sizeof(sf->Magic) != 0)) { - ndbout << filename << ": invalid header magic" << endl; - retcode = 1; - } - - if ((sf->NdbVersion >> 16) < 4 || (sf->NdbVersion >> 16) > 9) { - ndbout << filename << ": impossible version " << hex << sf->NdbVersion << endl; - retcode = 1; - } - - return retcode; -} - -inline -Uint32 -table_version_minor(Uint32 ver) -{ - return ver >> 24; -} - -static int -print_old(const char * filename, const SchemaFile * sf, Uint32 sz) -{ - int retcode = 0; - - if (print_head(filename, sf) != 0) - retcode = 1; - - for (Uint32 i = 0; i < sf->NoOfTableEntries; i++) { - SchemaFile::TableEntry_old te = sf->TableEntries_old[i]; - if (allflag || - (te.m_tableState != SchemaFile::INIT && - te.m_tableState != SchemaFile::DROP_TABLE_COMMITTED)) { - if (! checkonly) - ndbout << "Table " << i << ":" - << " State = " << te.m_tableState - << " version = " << table_version_major(te.m_tableVersion) - << "(" << table_version_minor(te.m_tableVersion) << ")" - << " type = " << te.m_tableType - << " noOfPages = " << te.m_noOfPages - << " gcp: " << te.m_gcp << endl; - } - } - return retcode; -} - -static int -print(const char * filename, const SchemaFile * xsf, Uint32 sz) -{ - int retcode = 0; - - if (print_head(filename, xsf) != 0) - retcode = 1; - - assert(sizeof(SchemaFile) == NDB_SF_PAGE_SIZE); - if (xsf->FileSize != sz || xsf->FileSize % NDB_SF_PAGE_SIZE != 0) { - ndbout << filename << ": invalid FileSize " << xsf->FileSize << endl; - retcode = 1; - } - Uint32 noOfPages = xsf->FileSize / NDB_SF_PAGE_SIZE; - for (Uint32 n = 0; n < noOfPages; n++) { - if (! checkonly) { - ndbout << "----- Page: " << n << " (" << noOfPages << ") -----" << endl; - } - const SchemaFile * sf = &xsf[n]; - if (memcmp(sf->Magic, xsf->Magic, sizeof(sf->Magic)) != 0) { - ndbout << filename << ": page " << n << " invalid magic" << endl; - retcode = 1; - } - if (sf->FileSize != xsf->FileSize) { - ndbout << filename << ": page " << n << " FileSize changed to " << sf->FileSize << "!=" << xsf->FileSize << endl; - retcode = 1; - } - Uint32 cs = 0; - for (Uint32 j = 0; j < NDB_SF_PAGE_SIZE_IN_WORDS; j++) - cs ^= ((const Uint32*)sf)[j]; - if (cs != 0) { - ndbout << filename << ": page " << n << " invalid CheckSum" << endl; - retcode = 1; - } - if (sf->NoOfTableEntries != NDB_SF_PAGE_ENTRIES) { - ndbout << filename << ": page " << n << " invalid NoOfTableEntries " << sf->NoOfTableEntries << endl; - retcode = 1; - } - for (Uint32 i = 0; i < NDB_SF_PAGE_ENTRIES; i++) { - SchemaFile::TableEntry te = sf->TableEntries[i]; - Uint32 j = n * NDB_SF_PAGE_ENTRIES + i; - if (allflag || - (te.m_tableState != SchemaFile::INIT && - te.m_tableState != SchemaFile::DROP_TABLE_COMMITTED)) { - if (! checkonly) - ndbout << "Table " << j << ":" - << " State = " << te.m_tableState - << " version = " << table_version_major(te.m_tableVersion) - << "(" << table_version_minor(te.m_tableVersion) << ")" - << " type = " << te.m_tableType - << " noOfWords = " << te.m_info_words - << " gcp: " << te.m_gcp << endl; - } - if (te.m_unused[0] != 0 || te.m_unused[1] != 0 || te.m_unused[2] != 0) { - ndbout << filename << ": entry " << j << " garbage in m_unused[3]" << endl; - retcode = 1; - } - } - } - - return retcode; -} - -NDB_COMMAND(printSchemafile, - "printSchemafile", "printSchemafile", "Prints a schemafile", 16384) -{ - progname = argv[0]; - int exitcode = 0; - - while (argc > 1 && argv[1][0] == '-') { - if (strchr(argv[1], 'a') != 0) - allflag = true; - if (strchr(argv[1], 'c') != 0) - checkonly = true; - if (strchr(argv[1], 'e') != 0) - equalcontents = true; - if (strchr(argv[1], 'q') != 0) - okquiet = true; - if (strchr(argv[1], 'h') != 0 || strchr(argv[1], '?') != 0) { - usage(); - return 0; - } - argc--, argv++; - } - - const char * prevfilename = 0; - Uint32 * prevbuf = 0; - Uint32 prevbytes = 0; - - while (argc > 1) { - const char * filename = argv[1]; - argc--, argv++; - - struct stat sbuf; - const int res = stat(filename, &sbuf); - if (res != 0) { - ndbout << filename << ": not found errno=" << errno << endl; - exitcode = 1; - continue; - } - const Uint32 bytes = sbuf.st_size; - - Uint32 * buf = new Uint32[bytes/4+1]; - - FILE * f = fopen(filename, "rb"); - if (f == 0) { - ndbout << filename << ": open failed errno=" << errno << endl; - delete [] buf; - exitcode = 1; - continue; - } - Uint32 sz = fread(buf, 1, bytes, f); - fclose(f); - if (sz != bytes) { - ndbout << filename << ": read failed errno=" << errno << endl; - delete [] buf; - exitcode = 1; - continue; - } - - if (sz < 32) { - ndbout << filename << ": too short (no header)" << endl; - delete [] buf; - exitcode = 1; - continue; - } - - SchemaFile* sf = (SchemaFile *)&buf[0]; - int ret; - if (sf->NdbVersion < NDB_SF_VERSION_5_0_6) - ret = print_old(filename, sf, sz); - else - ret = print(filename, sf, sz); - - if (ret != 0) { - ndbout << filename << ": check failed" - << " version=" << version(sf->NdbVersion) << endl; - exitcode = 1; - } else if (! okquiet) { - ndbout << filename << ": ok" - << " version=" << version(sf->NdbVersion) << endl; - } - - if (equalcontents && prevfilename != 0) { - if (prevbytes != bytes || memcmp(prevbuf, buf, bytes) != 0) { - ndbout << filename << ": differs from " << prevfilename << endl; - exitcode = 1; - } - } - - prevfilename = filename; - delete [] prevbuf; - prevbuf = buf; - prevbytes = bytes; - } - - delete [] prevbuf; - return exitcode; -} diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp deleted file mode 100644 index f27287b79a4..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ /dev/null @@ -1,1673 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBDIH_H -#define DBDIH_H - -#include -#include -#include -#include "Sysfile.hpp" -#include - -#include -#include -#include - -#ifdef DBDIH_C - -/*###################*/ -/* FILE SYSTEM FLAGS */ -/*###################*/ -#define ZLIST_OF_PAIRS 0 -#define ZLIST_OF_PAIRS_SYNCH 16 -#define ZOPEN_READ_WRITE 2 -#define ZCREATE_READ_WRITE 0x302 -#define ZCLOSE_NO_DELETE 0 -#define ZCLOSE_DELETE 1 - -/*###############*/ -/* NODE STATES */ -/*###############*/ -#define ZIDLE 0 -#define ZACTIVE 1 - -/*#########*/ -/* GENERAL */ -/*#########*/ -#define ZVAR_NO_WORD 1 -#define ZVAR_NO_CRESTART_INFO 20 -#define ZVAR_NO_CRESTART_INFO_TO_FILE 21 -#define ZVALID 1 -#define ZINVALID 2 - -/*###############*/ -/* ERROR CODES */ -/*###############*/ -// ------------------------------------------ -// Error Codes for Transactions (None sofar) -// ------------------------------------------ -#define ZUNDEFINED_FRAGMENT_ERROR 311 - -// -------------------------------------- -// Error Codes for Add Table -// -------------------------------------- -#define ZREPLERROR1 306 - -// -------------------------------------- -// Crash Codes -// -------------------------------------- -#define ZCOULD_NOT_OCCUR_ERROR 300 -#define ZNOT_MASTER_ERROR 301 -#define ZWRONG_FAILURE_NUMBER_ERROR 302 -#define ZWRONG_START_NODE_ERROR 303 -#define ZNO_REPLICA_FOUND_ERROR 304 - -// -------------------------------------- -// Codes from LQH -// -------------------------------------- -#define ZNODE_FAILURE_ERROR 400 - - -/*#########*/ -/* PHASES */ -/*#########*/ -#define ZNDB_SPH1 1 -#define ZNDB_SPH2 2 -#define ZNDB_SPH3 3 -#define ZNDB_SPH4 4 -#define ZNDB_SPH5 5 -#define ZNDB_SPH6 6 -#define ZNDB_SPH7 7 -#define ZNDB_SPH8 8 -/*#########*/ -/* SIZES */ -/*#########*/ -#define ZPAGEREC 100 -#define ZCREATE_REPLICA_FILE_SIZE 4 -#define ZPROXY_MASTER_FILE_SIZE 10 -#define ZPROXY_FILE_SIZE 10 -#endif - -class Dbdih: public SimulatedBlock { -public: - - // Records - - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤ - * THE API CONNECT RECORD IS THE SAME RECORD POINTER AS USED IN THE TC BLOCK - * - * IT KEEPS TRACK OF ALL THE OPERATIONS CONNECTED TO THIS TRANSACTION. - * IT IS LINKED INTO A QUEUE IN CASE THE GLOBAL CHECKPOINT IS CURRENTLY - * ONGOING */ - struct ApiConnectRecord { - Uint32 apiGci; - Uint32 nextApi; - }; - typedef Ptr ApiConnectRecordPtr; - - /*############## CONNECT_RECORD ##############*/ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* THE CONNECT RECORD IS CREATED WHEN A TRANSACTION HAS TO START. IT KEEPS - ALL INTERMEDIATE INFORMATION NECESSARY FOR THE TRANSACTION FROM THE - DISTRIBUTED MANAGER. THE RECORD KEEPS INFORMATION ABOUT THE - OPERATIONS THAT HAVE TO BE CARRIED OUT BY THE TRANSACTION AND - ALSO THE TRAIL OF NODES FOR EACH OPERATION IN THE THE - TRANSACTION. - */ - struct ConnectRecord { - enum ConnectState { - INUSE = 0, - FREE = 1, - STARTED = 2 - }; - Uint32 nodes[MAX_REPLICAS]; - ConnectState connectState; - Uint32 nfConnect; - Uint32 table; - Uint32 userpointer; - BlockReference userblockref; - }; - typedef Ptr ConnectRecordPtr; - - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* THESE RECORDS ARE USED WHEN CREATING REPLICAS DURING SYSTEM */ - /* RESTART. I NEED A COMPLEX DATA STRUCTURE DESCRIBING THE REPLICAS */ - /* I WILL TRY TO CREATE FOR EACH FRAGMENT. */ - /* */ - /* I STORE A REFERENCE TO THE FOUR POSSIBLE CREATE REPLICA RECORDS */ - /* IN A COMMON STORED VARIABLE. I ALLOW A MAXIMUM OF 4 REPLICAS TO */ - /* BE RESTARTED PER FRAGMENT. */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - struct CreateReplicaRecord { - Uint32 logStartGci[MAX_LOG_EXEC]; - Uint32 logStopGci[MAX_LOG_EXEC]; - Uint16 logNodeId[MAX_LOG_EXEC]; - Uint32 createLcpId; - - bool hotSpareUse; - Uint32 replicaRec; - Uint16 dataNodeId; - Uint16 lcpNo; - Uint16 noLogNodes; - }; - typedef Ptr CreateReplicaRecordPtr; - - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* THIS RECORD CONTAINS A FILE DESCRIPTION. THERE ARE TWO */ - /* FILES PER TABLE TO RAISE SECURITY LEVEL AGAINST DISK CRASHES. */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - struct FileRecord { - enum FileStatus { - CLOSED = 0, - CRASHED = 1, - OPEN = 2 - }; - enum FileType { - TABLE_FILE = 0, - GCP_FILE = 1 - }; - enum ReqStatus { - IDLE = 0, - CREATING_GCP = 1, - OPENING_GCP = 2, - OPENING_COPY_GCI = 3, - WRITING_COPY_GCI = 4, - CREATING_COPY_GCI = 5, - OPENING_TABLE = 6, - READING_GCP = 7, - READING_TABLE = 8, - WRITE_INIT_GCP = 9, - TABLE_CREATE = 10, - TABLE_WRITE = 11, - TABLE_CLOSE = 12, - CLOSING_GCP = 13, - CLOSING_TABLE_CRASH = 14, - CLOSING_TABLE_SR = 15, - CLOSING_GCP_CRASH = 16, - TABLE_OPEN_FOR_DELETE = 17, - TABLE_CLOSE_DELETE = 18 - }; - Uint32 fileName[4]; - Uint32 fileRef; - FileStatus fileStatus; - FileType fileType; - Uint32 nextFile; - ReqStatus reqStatus; - Uint32 tabRef; - }; - typedef Ptr FileRecordPtr; - - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* THIS RECORD KEEPS THE STORAGE AND DECISIONS INFORMATION OF A FRAGMENT */ - /* AND ITS REPLICAS. IF FRAGMENT HAS MORE THAN ONE BACK UP */ - /* REPLICA THEN A LIST OF MORE NODES IS ATTACHED TO THIS RECORD. */ - /* EACH RECORD IN MORE LIST HAS INFORMATION ABOUT ONE BACKUP. THIS RECORD */ - /* ALSO HAVE THE STATUS OF THE FRAGMENT. */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* */ - /* FRAGMENTSTORE RECORD ALIGNED TO BE 64 BYTES */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - struct Fragmentstore { - Uint16 activeNodes[MAX_REPLICAS]; - Uint32 preferredPrimary; - - Uint32 oldStoredReplicas; /* "DEAD" STORED REPLICAS */ - Uint32 storedReplicas; /* "ALIVE" STORED REPLICAS */ - Uint32 nextFragmentChunk; - - Uint32 m_log_part_id; - - Uint8 distributionKey; - Uint8 fragReplicas; - Uint8 noOldStoredReplicas; /* NUMBER OF "DEAD" STORED REPLICAS */ - Uint8 noStoredReplicas; /* NUMBER OF "ALIVE" STORED REPLICAS*/ - Uint8 noLcpReplicas; ///< No of replicas remaining to be LCP:ed - }; - typedef Ptr FragmentstorePtr; - - /*########### PAGE RECORD ############*/ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* THIS RECORD KEEPS INFORMATION ABOUT NODE GROUPS. */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - struct NodeGroupRecord { - Uint32 nodesInGroup[MAX_REPLICAS + 1]; - Uint32 nextReplicaNode; - Uint32 nodeCount; - bool activeTakeOver; - }; - typedef Ptr NodeGroupRecordPtr; - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* THIS RECORD KEEPS INFORMATION ABOUT NODES. */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - /* RECORD ALIGNED TO BE 64 BYTES. */ - /*¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤*/ - enum NodefailHandlingStep { - NF_REMOVE_NODE_FROM_TABLE = 1, - NF_GCP_TAKE_OVER = 2, - NF_LCP_TAKE_OVER = 4 - }; - - struct NodeRecord { - NodeRecord(); - - enum NodeStatus { - NOT_IN_CLUSTER = 0, - ALIVE = 1, - STARTING = 2, - DIED_NOW = 3, - DYING = 4, - DEAD = 5 - }; - - struct FragmentCheckpointInfo { - Uint32 tableId; - Uint32 fragId; - Uint32 replicaPtr; - }; - - enum GcpState { - READY = 0, - PREPARE_SENT = 1, - PREPARE_RECEIVED = 2, - COMMIT_SENT = 3, - NODE_FINISHED = 4, - SAVE_REQ_SENT = 5, - SAVE_RECEIVED = 6, - COPY_GCI_SENT = 7 - }; - - GcpState gcpstate; - Sysfile::ActiveStatus activeStatus; - - NodeStatus nodeStatus; - bool useInTransactions; - bool allowNodeStart; - bool copyCompleted; - bool m_inclDihLcp; - - FragmentCheckpointInfo startedChkpt[2]; - FragmentCheckpointInfo queuedChkpt[2]; - - Bitmask<1> m_nodefailSteps; - Uint32 activeTabptr; - Uint32 nextNode; - Uint32 nodeGroup; - - SignalCounter m_NF_COMPLETE_REP; - - Uint8 dbtcFailCompleted; - Uint8 dblqhFailCompleted; - Uint8 dbdihFailCompleted; - Uint8 dbdictFailCompleted; - Uint8 recNODE_FAILREP; - - Uint8 noOfQueuedChkpt; - Uint8 noOfStartedChkpt; - - MasterLCPConf::State lcpStateAtTakeOver; - Uint32 m_remove_node_from_table_lcp_id; - }; - typedef Ptr NodeRecordPtr; - /**********************************************************************/ - /* THIS RECORD KEEPS THE INFORMATION ABOUT A TABLE AND ITS FRAGMENTS */ - /**********************************************************************/ - struct PageRecord { - Uint32 word[2048]; - /* 8 KBYTE PAGE*/ - Uint32 nextfreepage; - }; - typedef Ptr PageRecordPtr; - - /************ REPLICA RECORD *************/ - /**********************************************************************/ - /* THIS RECORD KEEPS THE INFORMATION ABOUT A REPLICA OF A FRAGMENT */ - /**********************************************************************/ - struct ReplicaRecord { - /* -------------------------------------------------------------------- */ - /* THE GLOBAL CHECKPOINT IDENTITY WHEN THIS REPLICA WAS CREATED. */ - /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/ - /* CRASH OCCURS. */ - /* -------------------------------------------------------------------- */ - Uint32 createGci[8]; - /* -------------------------------------------------------------------- */ - /* THE LAST GLOBAL CHECKPOINT IDENTITY WHICH HAS BEEN SAVED ON DISK. */ - /* THIS VARIABLE IS ONLY VALID FOR REPLICAS WHICH HAVE "DIED". A REPLICA*/ - /* "DIES" EITHER WHEN THE NODE CRASHES THAT KEPT THE REPLICA OR BY BEING*/ - /* STOPPED IN A CONTROLLED MANNER. */ - /* THERE IS ONE INDEX PER REPLICA. A REPLICA INDEX IS CREATED WHEN ANODE*/ - /* CRASH OCCURS. */ - /* -------------------------------------------------------------------- */ - Uint32 replicaLastGci[8]; - /* -------------------------------------------------------------------- */ - /* THE LOCAL CHECKPOINT IDENTITY OF A LOCAL CHECKPOINT. */ - /* -------------------------------------------------------------------- */ - Uint32 lcpId[MAX_LCP_STORED]; - /* -------------------------------------------------------------------- */ - /* THIS VARIABLE KEEPS TRACK OF THE MAXIMUM GLOBAL CHECKPOINT COMPLETED */ - /* FOR EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */ - /* -------------------------------------------------------------------- */ - Uint32 maxGciCompleted[MAX_LCP_STORED]; - /* -------------------------------------------------------------------- */ - /* THIS VARIABLE KEEPS TRACK OF THE MINIMUM GLOBAL CHECKPOINT STARTEDFOR*/ - /* EACH OF THE LOCAL CHECKPOINTS IN THIS FRAGMENT REPLICA. */ - /* -------------------------------------------------------------------- */ - Uint32 maxGciStarted[MAX_LCP_STORED]; - /* -------------------------------------------------------------------- */ - /* THE GLOBAL CHECKPOINT IDENTITY WHEN THE TABLE WAS CREATED. */ - /* -------------------------------------------------------------------- */ - Uint32 initialGci; - - /* -------------------------------------------------------------------- */ - /* THE REFERENCE TO THE NEXT REPLICA. EITHER IT REFERS TO THE NEXT IN */ - /* THE FREE LIST OR IT REFERS TO THE NEXT IN A LIST OF REPLICAS ON A */ - /* FRAGMENT. */ - /* -------------------------------------------------------------------- */ - Uint32 nextReplica; - - /* -------------------------------------------------------------------- */ - /* THE NODE ID WHERE THIS REPLICA IS STORED. */ - /* -------------------------------------------------------------------- */ - Uint16 procNode; - - /* -------------------------------------------------------------------- */ - /* The last local checkpoint id started or queued on this replica. */ - /* -------------------------------------------------------------------- */ - Uint32 lcpIdStarted; // Started or queued - - /* -------------------------------------------------------------------- */ - /* THIS VARIABLE SPECIFIES WHAT THE STATUS OF THE LOCAL CHECKPOINT IS.IT*/ - /* CAN EITHER BE VALID OR INVALID. AT CREATION OF A FRAGMENT REPLICA ALL*/ - /* LCP'S ARE INVALID. ALSO IF IF INDEX >= NO_LCP THEN THELOCALCHECKPOINT*/ - /* IS ALWAYS INVALID. IF THE LCP BEFORE THE NEXT_LCP HAS LCP_ID THAT */ - /* DIFFERS FROM THE LATEST LCP_ID STARTED THEN THE NEXT_LCP IS ALSO */ - /* INVALID */ - /* -------------------------------------------------------------------- */ - Uint8 lcpStatus[MAX_LCP_STORED]; - - /* -------------------------------------------------------------------- */ - /* THE NEXT LOCAL CHECKPOINT TO EXECUTE IN THIS FRAGMENT REPLICA. */ - /* -------------------------------------------------------------------- */ - Uint8 nextLcp; - - /* -------------------------------------------------------------------- */ - /* THE NUMBER OF CRASHED REPLICAS IN THIS REPLICAS SO FAR. */ - /* -------------------------------------------------------------------- */ - Uint8 noCrashedReplicas; - - /** - * Is a LCP currently ongoing on fragment - */ - Uint8 lcpOngoingFlag; - }; - typedef Ptr ReplicaRecordPtr; - - /************************************************************************* - * TAB_DESCRIPTOR IS A DESCRIPTOR OF THE LOCATION OF THE FRAGMENTS BELONGING - * TO THE TABLE.THE INFORMATION ABOUT FRAGMENTS OF A TABLE ARE STORED IN - * CHUNKS OF FRAGMENTSTORE RECORDS. - * THIS RECORD ALSO HAS THE NECESSARY INFORMATION TO LOCATE A FRAGMENT AND - * TO LOCATE A FRAGMENT AND TO TRANSLATE A KEY OF A TUPLE TO THE FRAGMENT IT - * BELONGS - */ - struct TabRecord { - /** - * State for copying table description into pages - */ - enum CopyStatus { - CS_IDLE, - CS_SR_PHASE1_READ_PAGES, - CS_SR_PHASE2_READ_TABLE, - CS_SR_PHASE3_COPY_TABLE, - CS_REMOVE_NODE, - CS_LCP_READ_TABLE, - CS_COPY_TAB_REQ, - CS_COPY_NODE_STATE, - CS_ADD_TABLE_MASTER, - CS_ADD_TABLE_SLAVE, - CS_INVALIDATE_NODE_LCP - }; - /** - * State for copying pages to disk - */ - enum UpdateState { - US_IDLE, - US_LOCAL_CHECKPOINT, - US_REMOVE_NODE, - US_COPY_TAB_REQ, - US_ADD_TABLE_MASTER, - US_ADD_TABLE_SLAVE, - US_INVALIDATE_NODE_LCP - }; - enum TabLcpStatus { - TLS_ACTIVE = 1, - TLS_WRITING_TO_FILE = 2, - TLS_COMPLETED = 3 - }; - enum TabStatus { - TS_IDLE = 0, - TS_ACTIVE = 1, - TS_CREATING = 2, - TS_DROPPING = 3 - }; - enum Method { - LINEAR_HASH = 0, - NOTDEFINED = 1, - NORMAL_HASH = 2, - USER_DEFINED = 3 - }; - enum Storage { - ST_NOLOGGING = 0, // Table is not logged, but survives SR - ST_NORMAL = 1, // Normal table, logged and durable - ST_TEMPORARY = 2 // Table is lost after SR, not logged - }; - CopyStatus tabCopyStatus; - UpdateState tabUpdateState; - TabLcpStatus tabLcpStatus; - TabStatus tabStatus; - Method method; - Storage tabStorage; - - Uint32 pageRef[8]; -//----------------------------------------------------------------------------- -// Each entry in this array contains a reference to 16 fragment records in a -// row. Thus finding the correct record is very quick provided the fragment id. -//----------------------------------------------------------------------------- - Uint32 startFid[MAX_NDB_NODES]; - - Uint32 tabFile[2]; - Uint32 connectrec; - Uint32 hashpointer; - Uint32 mask; - Uint32 noOfWords; - Uint32 schemaVersion; - Uint32 tabRemoveNode; - Uint32 totalfragments; - Uint32 noOfFragChunks; - Uint32 tabErrorCode; - struct { - Uint32 tabUserRef; - Uint32 tabUserPtr; - } m_dropTab; - - struct DropTable { - Uint32 senderRef; - Uint32 senderData; - SignalCounter waitDropTabCount; - } m_prepDropTab; - - Uint8 kvalue; - Uint8 noOfBackups; - Uint8 noPages; - Uint16 tableType; - Uint16 primaryTableId; - }; - typedef Ptr TabRecordPtr; - - /***************************************************************************/ - /* THIS RECORD IS USED TO KEEP TRACK OF TAKE OVER AND STARTING A NODE. */ - /* WE KEEP IT IN A RECORD TO ENABLE IT TO BE PARALLELISED IN THE FUTURE. */ - /**************************************************************************/ - struct TakeOverRecord { - enum ToMasterStatus { - IDLE = 0, - TO_WAIT_START_TAKE_OVER = 1, - TO_START_COPY = 2, - TO_START_COPY_ONGOING = 3, - TO_WAIT_START = 4, - STARTING = 5, - SELECTING_NEXT = 6, - TO_WAIT_PREPARE_CREATE = 9, - PREPARE_CREATE = 10, - COPY_FRAG = 11, - TO_WAIT_UPDATE_TO = 12, - TO_UPDATE_TO = 13, - COPY_ACTIVE = 14, - TO_WAIT_COMMIT_CREATE = 15, - LOCK_MUTEX = 23, - COMMIT_CREATE = 16, - TO_COPY_COMPLETED = 17, - WAIT_LCP = 18, - TO_END_COPY = 19, - TO_END_COPY_ONGOING = 20, - TO_WAIT_ENDING = 21, - ENDING = 22, - - STARTING_LOCAL_FRAGMENTS = 24, - PREPARE_COPY = 25 - }; - enum ToSlaveStatus { - TO_SLAVE_IDLE = 0, - TO_SLAVE_STARTED = 1, - TO_SLAVE_CREATE_PREPARE = 2, - TO_SLAVE_COPY_FRAG_COMPLETED = 3, - TO_SLAVE_CREATE_COMMIT = 4, - TO_SLAVE_COPY_COMPLETED = 5 - }; - Uint32 startGci; - Uint32 maxPage; - Uint32 toCopyNode; - Uint32 toCurrentFragid; - Uint32 toCurrentReplica; - Uint32 toCurrentTabref; - Uint32 toFailedNode; - Uint32 toStartingNode; - Uint32 nextTakeOver; - Uint32 prevTakeOver; - bool toNodeRestart; - ToMasterStatus toMasterStatus; - ToSlaveStatus toSlaveStatus; - MutexHandle2 m_switchPrimaryMutexHandle; - }; - typedef Ptr TakeOverRecordPtr; - -public: - Dbdih(Block_context& ctx); - virtual ~Dbdih(); - - struct RWFragment { - Uint32 pageIndex; - Uint32 wordIndex; - Uint32 fragId; - TabRecordPtr rwfTabPtr; - PageRecordPtr rwfPageptr; - }; - struct CopyTableNode { - Uint32 pageIndex; - Uint32 wordIndex; - Uint32 noOfWords; - TabRecordPtr ctnTabPtr; - PageRecordPtr ctnPageptr; - }; - -private: - BLOCK_DEFINES(Dbdih); - - void execDUMP_STATE_ORD(Signal *); - void execNDB_TAMPER(Signal *); - void execDEBUG_SIG(Signal *); - void execEMPTY_LCP_CONF(Signal *); - void execMASTER_GCPREF(Signal *); - void execMASTER_GCPREQ(Signal *); - void execMASTER_GCPCONF(Signal *); - void execMASTER_LCPREF(Signal *); - void execMASTER_LCPREQ(Signal *); - void execMASTER_LCPCONF(Signal *); - void execNF_COMPLETEREP(Signal *); - void execSTART_PERMREQ(Signal *); - void execSTART_PERMCONF(Signal *); - void execSTART_PERMREF(Signal *); - void execINCL_NODEREQ(Signal *); - void execINCL_NODECONF(Signal *); - void execEND_TOREQ(Signal *); - void execEND_TOCONF(Signal *); - void execSTART_TOREQ(Signal *); - void execSTART_TOCONF(Signal *); - void execSTART_MEREQ(Signal *); - void execSTART_MECONF(Signal *); - void execSTART_MEREF(Signal *); - void execSTART_COPYREQ(Signal *); - void execSTART_COPYCONF(Signal *); - void execSTART_COPYREF(Signal *); - void execCREATE_FRAGREQ(Signal *); - void execCREATE_FRAGCONF(Signal *); - void execDIVERIFYREQ(Signal *); - void execGCP_SAVECONF(Signal *); - void execGCP_PREPARECONF(Signal *); - void execGCP_PREPARE(Signal *); - void execGCP_NODEFINISH(Signal *); - void execGCP_COMMIT(Signal *); - void execDIHNDBTAMPER(Signal *); - void execCONTINUEB(Signal *); - void execCOPY_GCIREQ(Signal *); - void execCOPY_GCICONF(Signal *); - void execCOPY_TABREQ(Signal *); - void execCOPY_TABCONF(Signal *); - void execTCGETOPSIZECONF(Signal *); - void execTC_CLOPSIZECONF(Signal *); - - int handle_invalid_lcp_no(const class LcpFragRep*, ReplicaRecordPtr); - void execLCP_FRAG_REP(Signal *); - void execLCP_COMPLETE_REP(Signal *); - void execSTART_LCP_REQ(Signal *); - void execSTART_LCP_CONF(Signal *); - MutexHandle2 c_startLcpMutexHandle; - void startLcpMutex_locked(Signal* signal, Uint32, Uint32); - void startLcpMutex_unlocked(Signal* signal, Uint32, Uint32); - - MutexHandle2 c_switchPrimaryMutexHandle; - void switchPrimaryMutex_locked(Signal* signal, Uint32, Uint32); - void switchPrimaryMutex_unlocked(Signal* signal, Uint32, Uint32); - void switch_primary_stop_node(Signal* signal, Uint32, Uint32); - - void execBLOCK_COMMIT_ORD(Signal *); - void execUNBLOCK_COMMIT_ORD(Signal *); - - void execDIH_SWITCH_REPLICA_REQ(Signal *); - void execDIH_SWITCH_REPLICA_REF(Signal *); - void execDIH_SWITCH_REPLICA_CONF(Signal *); - - void execSTOP_PERM_REQ(Signal *); - void execSTOP_PERM_REF(Signal *); - void execSTOP_PERM_CONF(Signal *); - - void execSTOP_ME_REQ(Signal *); - void execSTOP_ME_REF(Signal *); - void execSTOP_ME_CONF(Signal *); - - void execREAD_CONFIG_REQ(Signal *); - void execUNBLO_DICTCONF(Signal *); - void execCOPY_ACTIVECONF(Signal *); - void execTAB_COMMITREQ(Signal *); - void execNODE_FAILREP(Signal *); - void execCOPY_FRAGCONF(Signal *); - void execCOPY_FRAGREF(Signal *); - void execPREPARE_COPY_FRAG_REF(Signal*); - void execPREPARE_COPY_FRAG_CONF(Signal*); - void execDIADDTABREQ(Signal *); - void execDIGETNODESREQ(Signal *); - void execDIRELEASEREQ(Signal *); - void execDISEIZEREQ(Signal *); - void execSTTOR(Signal *); - void execDI_FCOUNTREQ(Signal *); - void execDIGETPRIMREQ(Signal *); - void execGCP_SAVEREF(Signal *); - void execGCP_TCFINISHED(Signal *); - void execREAD_NODESCONF(Signal *); - void execNDB_STTOR(Signal *); - void execDICTSTARTCONF(Signal *); - void execNDB_STARTREQ(Signal *); - void execGETGCIREQ(Signal *); - void execDIH_RESTARTREQ(Signal *); - void execSTART_RECCONF(Signal *); - void execSTART_FRAGREF(Signal *); - void execSTART_FRAGCONF(Signal *); - void execADD_FRAGCONF(Signal *); - void execADD_FRAGREF(Signal *); - void execFSOPENCONF(Signal *); - void execFSOPENREF(Signal *); - void execFSCLOSECONF(Signal *); - void execFSCLOSEREF(Signal *); - void execFSREADCONF(Signal *); - void execFSREADREF(Signal *); - void execFSWRITECONF(Signal *); - void execFSWRITEREF(Signal *); - void execCHECKNODEGROUPSREQ(Signal *); - void execSTART_INFOREQ(Signal*); - void execSTART_INFOREF(Signal*); - void execSTART_INFOCONF(Signal*); - void execWAIT_GCP_REQ(Signal* signal); - void execWAIT_GCP_REF(Signal* signal); - void execWAIT_GCP_CONF(Signal* signal); - void execUPDATE_TOREQ(Signal* signal); - void execUPDATE_TOCONF(Signal* signal); - - void execPREP_DROP_TAB_REQ(Signal* signal); - void execWAIT_DROP_TAB_REF(Signal* signal); - void execWAIT_DROP_TAB_CONF(Signal* signal); - void execDROP_TAB_REQ(Signal* signal); - - void execALTER_TAB_REQ(Signal* signal); - - void execCREATE_FRAGMENTATION_REQ(Signal*); - - void waitDropTabWritingToFile(Signal *, TabRecordPtr tabPtr); - void checkPrepDropTabComplete(Signal *, TabRecordPtr tabPtr); - void checkWaitDropTabFailedLqh(Signal *, Uint32 nodeId, Uint32 tableId); - - void execDICT_LOCK_CONF(Signal* signal); - void execDICT_LOCK_REF(Signal* signal); - - // Statement blocks -//------------------------------------ -// Methods that send signals -//------------------------------------ - void nullRoutine(Signal *, Uint32 nodeId); - void sendCOPY_GCIREQ(Signal *, Uint32 nodeId); - void sendDIH_SWITCH_REPLICA_REQ(Signal *, Uint32 nodeId); - void sendEMPTY_LCP_REQ(Signal *, Uint32 nodeId); - void sendEND_TOREQ(Signal *, Uint32 nodeId); - void sendGCP_COMMIT(Signal *, Uint32 nodeId); - void sendGCP_PREPARE(Signal *, Uint32 nodeId); - void sendGCP_SAVEREQ(Signal *, Uint32 nodeId); - void sendINCL_NODEREQ(Signal *, Uint32 nodeId); - void sendMASTER_GCPREQ(Signal *, Uint32 nodeId); - void sendMASTER_LCPREQ(Signal *, Uint32 nodeId); - void sendMASTER_LCPCONF(Signal * signal); - void sendSTART_RECREQ(Signal *, Uint32 nodeId); - void sendSTART_INFOREQ(Signal *, Uint32 nodeId); - void sendSTART_TOREQ(Signal *, Uint32 nodeId); - void sendSTOP_ME_REQ(Signal *, Uint32 nodeId); - void sendTC_CLOPSIZEREQ(Signal *, Uint32 nodeId); - void sendTCGETOPSIZEREQ(Signal *, Uint32 nodeId); - void sendUPDATE_TOREQ(Signal *, Uint32 nodeId); - void sendSTART_LCP_REQ(Signal *, Uint32 nodeId); - - void sendLCP_FRAG_ORD(Signal*, NodeRecord::FragmentCheckpointInfo info); - void sendLastLCP_FRAG_ORD(Signal *); - - void sendCopyTable(Signal *, CopyTableNode* ctn, - BlockReference ref, Uint32 reqinfo); - void sendCreateFragReq(Signal *, - Uint32 startGci, - Uint32 storedType, - Uint32 takeOverPtr); - void sendDihfragreq(Signal *, - TabRecordPtr regTabPtr, - Uint32 fragId); - void sendStartFragreq(Signal *, - TabRecordPtr regTabPtr, - Uint32 fragId); - void sendHOT_SPAREREP(Signal *); - void sendAddFragreq(Signal *, - TabRecordPtr regTabPtr, - Uint32 fragId, - Uint32 lcpNo, - Uint32 param); - - void sendAddFragreq(Signal*, ConnectRecordPtr, TabRecordPtr, Uint32 fragId); - void addTable_closeConf(Signal* signal, Uint32 tabPtrI); - void resetReplicaSr(TabRecordPtr tabPtr); - void resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci); - -//------------------------------------ -// Methods for LCP functionality -//------------------------------------ - void checkKeepGci(TabRecordPtr, Uint32, Fragmentstore*, Uint32); - void checkLcpStart(Signal *, Uint32 lineNo); - void checkStartMoreLcp(Signal *, Uint32 nodeId); - bool reportLcpCompletion(const class LcpFragRep *); - void sendLCP_COMPLETE_REP(Signal *); - -//------------------------------------ -// Methods for Delete Table Files -//------------------------------------ - void startDeleteFile(Signal* signal, TabRecordPtr tabPtr); - void openTableFileForDelete(Signal* signal, Uint32 fileIndex); - void tableOpenLab(Signal* signal, FileRecordPtr regFilePtr); - void tableDeleteLab(Signal* signal, FileRecordPtr regFilePtr); - -//------------------------------------ -// File Record specific methods -//------------------------------------ - void closeFile(Signal *, FileRecordPtr regFilePtr); - void closeFileDelete(Signal *, FileRecordPtr regFilePtr); - void createFileRw(Signal *, FileRecordPtr regFilePtr); - void openFileRw(Signal *, FileRecordPtr regFilePtr); - void openFileRo(Signal *, FileRecordPtr regFilePtr); - void seizeFile(FileRecordPtr& regFilePtr); - void releaseFile(Uint32 fileIndex); - -//------------------------------------ -// Methods called when completing file -// operation. -//------------------------------------ - void creatingGcpLab(Signal *, FileRecordPtr regFilePtr); - void openingGcpLab(Signal *, FileRecordPtr regFilePtr); - void openingTableLab(Signal *, FileRecordPtr regFilePtr); - void tableCreateLab(Signal *, FileRecordPtr regFilePtr); - void creatingGcpErrorLab(Signal *, FileRecordPtr regFilePtr); - void openingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr); - void creatingCopyGciErrorLab(Signal *, FileRecordPtr regFilePtr); - void openingGcpErrorLab(Signal *, FileRecordPtr regFilePtr); - void openingTableErrorLab(Signal *, FileRecordPtr regFilePtr); - void tableCreateErrorLab(Signal *, FileRecordPtr regFilePtr); - void closingGcpLab(Signal *, FileRecordPtr regFilePtr); - void closingGcpCrashLab(Signal *, FileRecordPtr regFilePtr); - void closingTableCrashLab(Signal *, FileRecordPtr regFilePtr); - void closingTableSrLab(Signal *, FileRecordPtr regFilePtr); - void tableCloseLab(Signal *, FileRecordPtr regFilePtr); - void tableCloseErrorLab(FileRecordPtr regFilePtr); - void readingGcpLab(Signal *, FileRecordPtr regFilePtr); - void readingTableLab(Signal *, FileRecordPtr regFilePtr); - void readingGcpErrorLab(Signal *, FileRecordPtr regFilePtr); - void readingTableErrorLab(Signal *, FileRecordPtr regFilePtr); - void writingCopyGciLab(Signal *, FileRecordPtr regFilePtr); - void writeInitGcpLab(Signal *, FileRecordPtr regFilePtr); - void tableWriteLab(Signal *, FileRecordPtr regFilePtr); - void writeInitGcpErrorLab(Signal *, FileRecordPtr regFilePtr); - - - void calculateHotSpare(); - void checkEscalation(); - void clearRestartInfoBits(Signal *); - void invalidateLcpInfoAfterSr(); - - bool isMaster(); - bool isActiveMaster(); - - void emptyverificbuffer(Signal *, bool aContintueB); - Uint32 findHotSpare(); - void handleGcpStateInMaster(Signal *, NodeRecordPtr failedNodeptr); - void initRestartInfo(); - void initRestorableGciFiles(); - void makeNodeGroups(Uint32 nodeArray[]); - void makePrnList(class ReadNodesConf * readNodes, Uint32 nodeArray[]); - void nodeResetStart(); - void releaseTabPages(Uint32 tableId); - void replication(Uint32 noOfReplicas, - NodeGroupRecordPtr NGPtr, - FragmentstorePtr regFragptr); - void selectMasterCandidateAndSend(Signal *); - void setInitialActiveStatus(); - void setLcpActiveStatusEnd(); - void setLcpActiveStatusStart(Signal *); - void setNodeActiveStatus(); - void setNodeGroups(); - void setNodeInfo(Signal *); - void setNodeLcpActiveStatus(); - void setNodeRestartInfoBits(); - void startGcp(Signal *); - - void readFragment(RWFragment* rf, FragmentstorePtr regFragptr); - Uint32 readPageWord(RWFragment* rf); - void readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr); - void readReplicas(RWFragment* rf, FragmentstorePtr regFragptr); - void readRestorableGci(Signal *, FileRecordPtr regFilePtr); - void readTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr); - void writeFragment(RWFragment* wf, FragmentstorePtr regFragptr); - void writePageWord(RWFragment* wf, Uint32 dataWord); - void writeReplicas(RWFragment* wf, Uint32 replicaStartIndex); - void writeRestorableGci(Signal *, FileRecordPtr regFilePtr); - void writeTabfile(Signal *, TabRecord* tab, FileRecordPtr regFilePtr); - void copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr); - - void gcpcommitreqLab(Signal *); - void gcpsavereqLab(Signal *); - void copyGciLab(Signal *, CopyGCIReq::CopyReason reason); - void storeNewLcpIdLab(Signal *); - void startLcpRoundLoopLab(Signal *, Uint32 startTableId, Uint32 startFragId); - - void nodeFailCompletedCheckLab(Signal*, NodeRecordPtr failedNodePtr); - - /** - * - */ - void setLocalNodefailHandling(Signal*, Uint32 failedNodeId, - NodefailHandlingStep step); - void checkLocalNodefailComplete(Signal*, Uint32 failedNodeId, - NodefailHandlingStep step); - - void ndbsttorry10Lab(Signal *, Uint32 _line); - void createMutexes(Signal* signal, Uint32 no); - void createMutex_done(Signal* signal, Uint32 no, Uint32 retVal); - void crashSystemAtGcpStop(Signal *, bool); - void sendFirstDictfragsreq(Signal *, TabRecordPtr regTabPtr); - void addtabrefuseLab(Signal *, ConnectRecordPtr regConnectPtr, Uint32 errorCode); - void GCP_SAVEhandling(Signal *, Uint32 nodeId); - void packTableIntoPagesLab(Signal *, Uint32 tableId); - void readPagesIntoTableLab(Signal *, Uint32 tableId); - void readPagesIntoFragLab(Signal *, RWFragment* rf); - void readTabDescriptionLab(Signal *, Uint32 tableId); - void copyTableLab(Signal *, Uint32 tableId); - void breakCopyTableLab(Signal *, - TabRecordPtr regTabPtr, - Uint32 nodeId); - void checkAddfragCompletedLab(Signal *, - TabRecordPtr regTabPtr, - Uint32 fragId); - void completeRestartLab(Signal *); - void readTableFromPagesLab(Signal *, TabRecordPtr regTabPtr); - void srPhase2ReadTableLab(Signal *, TabRecordPtr regTabPtr); - void checkTcCounterLab(Signal *); - void calculateKeepGciLab(Signal *, Uint32 tableId, Uint32 fragId); - void tableUpdateLab(Signal *, TabRecordPtr regTabPtr); - void checkLcpCompletedLab(Signal *); - void initLcpLab(Signal *, Uint32 masterRef, Uint32 tableId); - void startGcpLab(Signal *, Uint32 aWaitTime); - void checkGcpStopLab(Signal *); - void MASTER_GCPhandling(Signal *, Uint32 failedNodeId); - void MASTER_LCPhandling(Signal *, Uint32 failedNodeId); - void rnfTableNotReadyLab(Signal *, TabRecordPtr regTabPtr, Uint32 removeNodeId); - void startLcpTakeOverLab(Signal *, Uint32 failedNodeId); - - void startLcpMasterTakeOver(Signal *, Uint32 failedNodeId); - void startGcpMasterTakeOver(Signal *, Uint32 failedNodeId); - void checkGcpOutstanding(Signal*, Uint32 failedNodeId); - - void checkEmptyLcpComplete(Signal *); - void lcpBlockedLab(Signal *); - void breakCheckTabCompletedLab(Signal *, TabRecordPtr regTabptr); - void readGciFileLab(Signal *); - void openingCopyGciSkipInitLab(Signal *, FileRecordPtr regFilePtr); - void startLcpRoundLab(Signal *); - void gcpBlockedLab(Signal *); - void initialStartCompletedLab(Signal *); - void allNodesLcpCompletedLab(Signal *); - void nodeRestartPh2Lab(Signal *); - void nodeRestartPh2Lab2(Signal *); - void initGciFilesLab(Signal *); - void dictStartConfLab(Signal *); - void nodeDictStartConfLab(Signal *); - void ndbStartReqLab(Signal *, BlockReference ref); - void nodeRestartStartRecConfLab(Signal *); - void dihCopyCompletedLab(Signal *); - void release_connect(ConnectRecordPtr ptr); - void copyTableNode(Signal *, - CopyTableNode* ctn, - NodeRecordPtr regNodePtr); - void startFragment(Signal *, Uint32 tableId, Uint32 fragId); - bool checkLcpAllTablesDoneInLqh(); - - void lcpStateAtNodeFailureLab(Signal *, Uint32 nodeId); - void copyNodeLab(Signal *, Uint32 tableId); - void copyGciReqLab(Signal *); - void allLab(Signal *, - ConnectRecordPtr regConnectPtr, - TabRecordPtr regTabPtr); - void tableCopyNodeLab(Signal *, TabRecordPtr regTabPtr); - - void removeNodeFromTables(Signal *, Uint32 tableId, Uint32 nodeId); - void removeNodeFromTable(Signal *, Uint32 tableId, TabRecordPtr tabPtr); - void removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId); - - void packFragIntoPagesLab(Signal *, RWFragment* wf); - void startNextChkpt(Signal *); - void failedNodeLcpHandling(Signal*, NodeRecordPtr failedNodePtr); - void failedNodeSynchHandling(Signal *, NodeRecordPtr failedNodePtr); - void checkCopyTab(NodeRecordPtr failedNodePtr); - - void initCommonData(); - void initialiseRecordsLab(Signal *, Uint32 stepNo, Uint32, Uint32); - - void findReplica(ReplicaRecordPtr& regReplicaPtr, - Fragmentstore* fragPtrP, - Uint32 nodeId, - bool oldStoredReplicas = false); -//------------------------------------ -// Node failure handling methods -//------------------------------------ - void startRemoveFailedNode(Signal *, NodeRecordPtr failedNodePtr); - void handleGcpTakeOver(Signal *, NodeRecordPtr failedNodePtr); - void handleLcpTakeOver(Signal *, NodeRecordPtr failedNodePtr); - void handleNewMaster(Signal *, NodeRecordPtr failedNodePtr); - void checkTakeOverInMasterAllNodeFailure(Signal*, NodeRecordPtr failedNode); - void checkTakeOverInMasterCopyNodeFailure(Signal*, Uint32 failedNodeId); - void checkTakeOverInMasterStartNodeFailure(Signal*, Uint32 takeOverPtr); - void checkTakeOverInNonMasterStartNodeFailure(Signal*, Uint32 takeOverPtr); - void handleLcpMasterTakeOver(Signal *, Uint32 nodeId); - -//------------------------------------ -// Replica record specific methods -//------------------------------------ - Uint32 findLogInterval(ConstPtr regReplicaPtr, - Uint32 startGci); - void findMinGci(ReplicaRecordPtr fmgReplicaPtr, - Uint32& keeGci, - Uint32& oldestRestorableGci); - bool findStartGci(ConstPtr fstReplicaPtr, - Uint32 tfstStopGci, - Uint32& tfstStartGci, - Uint32& tfstLcp); - void newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr); - void packCrashedReplicas(ReplicaRecordPtr pcrReplicaPtr); - void releaseReplicas(Uint32 replicaPtr); - void removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr); - void removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr); - void seizeReplicaRec(ReplicaRecordPtr& replicaPtr); - -//------------------------------------ -// Methods operating on a fragment and -// its connected replicas and nodes. -//------------------------------------ - void allocStoredReplica(FragmentstorePtr regFragptr, - ReplicaRecordPtr& newReplicaPtr, - Uint32 nodeId); - Uint32 extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]); - bool findBestLogNode(CreateReplicaRecord* createReplica, - FragmentstorePtr regFragptr, - Uint32 startGci, - Uint32 stopGci, - Uint32 logNode, - Uint32& fblStopGci); - bool findLogNodes(CreateReplicaRecord* createReplica, - FragmentstorePtr regFragptr, - Uint32 startGci, - Uint32 stopGci); - void findToReplica(TakeOverRecord* regTakeOver, - Uint32 replicaType, - FragmentstorePtr regFragptr, - ReplicaRecordPtr& ftrReplicaPtr); - void initFragstore(FragmentstorePtr regFragptr); - void insertBackup(FragmentstorePtr regFragptr, Uint32 nodeId); - void insertfraginfo(FragmentstorePtr regFragptr, - Uint32 noOfBackups, - Uint32* nodeArray); - void linkOldStoredReplica(FragmentstorePtr regFragptr, - ReplicaRecordPtr replicaPtr); - void linkStoredReplica(FragmentstorePtr regFragptr, - ReplicaRecordPtr replicaPtr); - void prepareReplicas(FragmentstorePtr regFragptr); - void removeNodeFromStored(Uint32 nodeId, - FragmentstorePtr regFragptr, - ReplicaRecordPtr replicaPtr, - bool temporary); - void removeOldStoredReplica(FragmentstorePtr regFragptr, - ReplicaRecordPtr replicaPtr); - void removeStoredReplica(FragmentstorePtr regFragptr, - ReplicaRecordPtr replicaPtr); - void searchStoredReplicas(FragmentstorePtr regFragptr); - bool setup_create_replica(FragmentstorePtr, CreateReplicaRecord*, - ConstPtr); - void updateNodeInfo(FragmentstorePtr regFragptr); - -//------------------------------------ -// Fragment allocation, deallocation and -// find methods -//------------------------------------ - void allocFragments(Uint32 noOfFragments, TabRecordPtr regTabPtr); - void releaseFragments(TabRecordPtr regTabPtr); - void getFragstore(TabRecord *, Uint32 fragNo, FragmentstorePtr & ptr); - void initialiseFragstore(); - -//------------------------------------ -// Page Record specific methods -//------------------------------------ - void allocpage(PageRecordPtr& regPagePtr); - void releasePage(Uint32 pageIndex); - -//------------------------------------ -// Table Record specific methods -//------------------------------------ - void initTable(TabRecordPtr regTabPtr); - void initTableFile(TabRecordPtr regTabPtr); - void releaseTable(TabRecordPtr tabPtr); - Uint32 findTakeOver(Uint32 failedNodeId); - void handleTakeOverMaster(Signal *, Uint32 takeOverPtr); - void handleTakeOverNewMaster(Signal *, Uint32 takeOverPtr); - -//------------------------------------ -// TakeOver Record specific methods -//------------------------------------ - void initTakeOver(TakeOverRecordPtr regTakeOverptr); - void seizeTakeOver(TakeOverRecordPtr& regTakeOverptr); - void allocateTakeOver(TakeOverRecordPtr& regTakeOverptr); - void releaseTakeOver(Uint32 takeOverPtr); - bool anyActiveTakeOver(); - void checkToCopy(); - void checkToCopyCompleted(Signal *); - bool checkToInterrupted(TakeOverRecordPtr& regTakeOverptr); - Uint32 getStartNode(Uint32 takeOverPtr); - -//------------------------------------ -// Methods for take over functionality -//------------------------------------ - void changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver); - void endTakeOver(Uint32 takeOverPtr); - void initStartTakeOver(const class StartToReq *, - TakeOverRecordPtr regTakeOverPtr); - - void nodeRestartTakeOver(Signal *, Uint32 startNodeId); - void systemRestartTakeOverLab(Signal *); - void startTakeOver(Signal *, - Uint32 takeOverPtr, - Uint32 startNode, - Uint32 toNode); - void sendStartTo(Signal *, Uint32 takeOverPtr); - void startNextCopyFragment(Signal *, Uint32 takeOverPtr); - void toCopyFragLab(Signal *, Uint32 takeOverPtr); - void toStartCopyFrag(Signal *, TakeOverRecordPtr); - void startHsAddFragConfLab(Signal *); - void prepareSendCreateFragReq(Signal *, Uint32 takeOverPtr); - void sendUpdateTo(Signal *, Uint32 takeOverPtr, Uint32 updateState); - void toCopyCompletedLab(Signal *, TakeOverRecordPtr regTakeOverptr); - void takeOverCompleted(Uint32 aNodeId); - void sendEndTo(Signal *, Uint32 takeOverPtr); - -//------------------------------------ -// Node Record specific methods -//------------------------------------ - void checkStartTakeOver(Signal *); - void insertAlive(NodeRecordPtr newNodePtr); - void insertDeadNode(NodeRecordPtr removeNodePtr); - void removeAlive(NodeRecordPtr removeNodePtr); - void removeDeadNode(NodeRecordPtr removeNodePtr); - - NodeRecord::NodeStatus getNodeStatus(Uint32 nodeId); - void setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus); - Sysfile::ActiveStatus getNodeActiveStatus(Uint32 nodeId); - void setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus); - void setNodeLcpActiveStatus(Uint32 nodeId, bool newState); - bool getNodeLcpActiveStatus(Uint32 nodeId); - bool getAllowNodeStart(Uint32 nodeId); - void setAllowNodeStart(Uint32 nodeId, bool newState); - bool getNodeCopyCompleted(Uint32 nodeId); - void setNodeCopyCompleted(Uint32 nodeId, bool newState); - bool checkNodeAlive(Uint32 nodeId); - - void nr_start_fragments(Signal*, TakeOverRecordPtr); - void nr_start_fragment(Signal*, TakeOverRecordPtr, ReplicaRecordPtr); - void nr_run_redo(Signal*, TakeOverRecordPtr); - - // Initialisation - void initData(); - void initRecords(); - - // Variables to support record structures and their free lists - - ApiConnectRecord *apiConnectRecord; - Uint32 capiConnectFileSize; - - ConnectRecord *connectRecord; - Uint32 cfirstconnect; - Uint32 cconnectFileSize; - - CreateReplicaRecord *createReplicaRecord; - Uint32 cnoOfCreateReplicas; - - FileRecord *fileRecord; - Uint32 cfirstfreeFile; - Uint32 cfileFileSize; - - Fragmentstore *fragmentstore; - Uint32 cfirstfragstore; - Uint32 cfragstoreFileSize; - - Uint32 c_nextNodeGroup; - NodeGroupRecord *nodeGroupRecord; - Uint32 c_nextLogPart; - - NodeRecord *nodeRecord; - - PageRecord *pageRecord; - Uint32 cfirstfreepage; - Uint32 cpageFileSize; - - ReplicaRecord *replicaRecord; - Uint32 cfirstfreeReplica; - Uint32 cnoFreeReplicaRec; - Uint32 creplicaFileSize; - - TabRecord *tabRecord; - Uint32 ctabFileSize; - - TakeOverRecord *takeOverRecord; - Uint32 cfirstfreeTakeOver; - - /* - 2.4 C O M M O N S T O R E D V A R I A B L E S - ---------------------------------------------------- - */ - Uint32 cfirstVerifyQueue; - Uint32 clastVerifyQueue; - Uint32 cverifyQueueCounter; - - /*------------------------------------------------------------------------*/ - /* THIS VARIABLE KEEPS THE REFERENCES TO FILE RECORDS THAT DESCRIBE */ - /* THE TWO FILES THAT ARE USED TO STORE THE VARIABLE CRESTART_INFO */ - /* ON DISK. */ - /*------------------------------------------------------------------------*/ - Uint32 crestartInfoFile[2]; - /*------------------------------------------------------------------------*/ - /* THIS VARIABLE KEEPS TRACK OF THE STATUS OF A GLOBAL CHECKPOINT */ - /* PARTICIPANT. THIS IS NEEDED TO HANDLE A NODE FAILURE. WHEN A NODE*/ - /* FAILURE OCCURS IT IS EASY THAT THE PROTOCOL STOPS IF NO ACTION IS*/ - /* TAKEN TO PREVENT THIS. THIS VARIABLE ENSURES SUCH ACTION CAN BE */ - /* TAKEN. */ - /*------------------------------------------------------------------------*/ - enum GcpParticipantState { - GCP_PARTICIPANT_READY = 0, - GCP_PARTICIPANT_PREPARE_RECEIVED = 1, - GCP_PARTICIPANT_COMMIT_RECEIVED = 2, - GCP_PARTICIPANT_TC_FINISHED = 3, - GCP_PARTICIPANT_COPY_GCI_RECEIVED = 4 - }; - GcpParticipantState cgcpParticipantState; - /*------------------------------------------------------------------------*/ - /* THESE VARIABLES ARE USED TO CONTROL THAT GCP PROCESSING DO NOT */ - /*STOP FOR SOME REASON. */ - /*------------------------------------------------------------------------*/ - enum GcpStatus { - GCP_READY = 0, - GCP_PREPARE_SENT = 1, - GCP_COMMIT_SENT = 2, - GCP_NODE_FINISHED = 3, - GCP_SAVE_LQH_FINISHED = 4 - }; - GcpStatus cgcpStatus; - Uint32 cgcpStartCounter; - Uint32 coldGcpStatus; - Uint32 coldGcpId; - /*------------------------------------------------------------------------*/ - /* THIS VARIABLE KEEPS TRACK OF THE STATE OF THIS NODE AS MASTER. */ - /*------------------------------------------------------------------------*/ - enum MasterState { - MASTER_IDLE = 0, - MASTER_ACTIVE = 1, - MASTER_TAKE_OVER_GCP = 2 - }; - MasterState cmasterState; - Uint16 cmasterTakeOverNode; - /* NODE IS NOT MASTER */ - /* NODE IS ACTIVE AS MASTER */ - /* NODE IS TAKING OVER AS MASTER */ - - struct CopyGCIMaster { - CopyGCIMaster(){ m_copyReason = m_waiting = CopyGCIReq::IDLE;} - /*------------------------------------------------------------------------*/ - /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */ - /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */ - /* RESTART. */ - /*------------------------------------------------------------------------*/ - CopyGCIReq::CopyReason m_copyReason; - - /*------------------------------------------------------------------------*/ - /* COPYING RESTART INFO CAN BE STARTED BY LOCAL CHECKPOINTS AND BY */ - /* GLOBAL CHECKPOINTS. WE CAN HOWEVER ONLY HANDLE ONE SUCH COPY AT */ - /* THE TIME. THUS WE HAVE TO KEEP WAIT INFORMATION IN THIS VARIABLE.*/ - /*------------------------------------------------------------------------*/ - CopyGCIReq::CopyReason m_waiting; - } c_copyGCIMaster; - - struct CopyGCISlave { - CopyGCISlave(){ m_copyReason = CopyGCIReq::IDLE; m_expectedNextWord = 0;} - /*------------------------------------------------------------------------*/ - /* THIS STATE VARIABLE IS USED TO INDICATE IF COPYING OF RESTART */ - /* INFO WAS STARTED BY A LOCAL CHECKPOINT OR AS PART OF A SYSTEM */ - /* RESTART. THIS VARIABLE IS USED BY THE NODE THAT RECEIVES */ - /* COPY_GCI_REQ. */ - /*------------------------------------------------------------------------*/ - Uint32 m_senderData; - BlockReference m_senderRef; - CopyGCIReq::CopyReason m_copyReason; - - Uint32 m_expectedNextWord; - } c_copyGCISlave; - - /*------------------------------------------------------------------------*/ - /* THIS VARIABLE IS USED TO KEEP TRACK OF THE STATE OF LOCAL */ - /* CHECKPOINTS. */ - /*------------------------------------------------------------------------*/ -public: - enum LcpStatus { - LCP_STATUS_IDLE = 0, - LCP_TCGET = 1, // Only master - LCP_STATUS_ACTIVE = 2, - LCP_CALCULATE_KEEP_GCI = 4, // Only master - LCP_COPY_GCI = 5, - LCP_INIT_TABLES = 6, - LCP_TC_CLOPSIZE = 7, // Only master - LCP_START_LCP_ROUND = 8, - LCP_TAB_COMPLETED = 9, - LCP_TAB_SAVED = 10 - }; -private: - - struct LcpState { - LcpState() {} - LcpStatus lcpStatus; - Uint32 lcpStatusUpdatedPlace; - - struct Save { - LcpStatus m_status; - Uint32 m_place; - } m_saveState[10]; - - void setLcpStatus(LcpStatus status, Uint32 line){ - for (Uint32 i = 9; i > 0; i--) - m_saveState[i] = m_saveState[i-1]; - m_saveState[0].m_status = lcpStatus; - m_saveState[0].m_place = lcpStatusUpdatedPlace; - - lcpStatus = status; - lcpStatusUpdatedPlace = line; - } - - Uint32 lcpStart; - Uint32 lcpStopGcp; - Uint32 keepGci; /* USED TO CALCULATE THE GCI TO KEEP AFTER A LCP */ - Uint32 oldestRestorableGci; - - struct CurrentFragment { - Uint32 tableId; - Uint32 fragmentId; - } currentFragment; - - Uint32 noOfLcpFragRepOutstanding; - - /*------------------------------------------------------------------------*/ - /* USED TO ENSURE THAT LCP'S ARE EXECUTED WITH CERTAIN TIMEINTERVALS*/ - /* EVEN WHEN SYSTEM IS NOT DOING ANYTHING. */ - /*------------------------------------------------------------------------*/ - Uint32 ctimer; - Uint32 ctcCounter; - Uint32 clcpDelay; /* MAX. 2^(CLCP_DELAY - 2) SEC BETWEEN LCP'S */ - - /*------------------------------------------------------------------------*/ - /* THIS STATE IS USED TO TELL IF THE FIRST LCP AFTER START/RESTART */ - /* HAS BEEN RUN. AFTER A NODE RESTART THE NODE DOES NOT ENTER */ - /* STARTED STATE BEFORE THIS IS DONE. */ - /*------------------------------------------------------------------------*/ - bool immediateLcpStart; - bool m_LCP_COMPLETE_REP_From_Master_Received; - SignalCounter m_LCP_COMPLETE_REP_Counter_DIH; - SignalCounter m_LCP_COMPLETE_REP_Counter_LQH; - SignalCounter m_LAST_LCP_FRAG_ORD; - NdbNodeBitmask m_participatingLQH; - NdbNodeBitmask m_participatingDIH; - - Uint32 m_masterLcpDihRef; - bool m_MASTER_LCPREQ_Received; - Uint32 m_MASTER_LCPREQ_FailedNodeId; - } c_lcpState; - - /*------------------------------------------------------------------------*/ - /* THIS VARIABLE KEEPS TRACK OF HOW MANY TABLES ARE ACTIVATED WHEN */ - /* STARTING A LOCAL CHECKPOINT WE SHOULD AVOID STARTING A CHECKPOINT*/ - /* WHEN NO TABLES ARE ACTIVATED. */ - /*------------------------------------------------------------------------*/ - Uint32 cnoOfActiveTables; - Uint32 cgcpDelay; /* Delay between global checkpoints */ - - BlockReference cdictblockref; /* DICTIONARY BLOCK REFERENCE */ - Uint32 cfailurenr; /* EVERY TIME WHEN A NODE FAILURE IS REPORTED - THIS NUMBER IS INCREMENTED. AT THE START OF - THE SYSTEM THIS NUMBER MUST BE INITIATED TO - ZERO */ - bool cgckptflag; /* A FLAG WHICH IS SET WHILE A NEW GLOBAL CHECK - POINT IS BEING CREATED. NO VERIFICATION IS ALLOWED - IF THE FLAG IS SET*/ - Uint32 cgcpOrderBlocked; - BlockReference clocallqhblockref; - BlockReference clocaltcblockref; - BlockReference cmasterdihref; - Uint16 cownNodeId; - Uint32 cnewgcp; - BlockReference cndbStartReqBlockref; - BlockReference cntrlblockref; - Uint32 cgcpSameCounter; - Uint32 coldgcp; - Uint32 con_lineNodes; - Uint32 creceivedfrag; - Uint32 cremainingfrags; - Uint32 cstarttype; - Uint32 csystemnodes; - Uint32 currentgcp; - Uint32 c_newest_restorable_gci; - Uint32 c_set_initial_start_flag; - - enum GcpMasterTakeOverState { - GMTOS_IDLE = 0, - GMTOS_INITIAL = 1, - ALL_READY = 2, - ALL_PREPARED = 3, - COMMIT_STARTED_NOT_COMPLETED = 4, - COMMIT_COMPLETED = 5, - PREPARE_STARTED_NOT_COMMITTED = 6, - SAVE_STARTED_NOT_COMPLETED = 7 - }; - GcpMasterTakeOverState cgcpMasterTakeOverState; - -public: - enum LcpMasterTakeOverState { - LMTOS_IDLE = 0, - LMTOS_WAIT_EMPTY_LCP = 1, // Currently doing empty LCP - LMTOS_WAIT_LCP_FRAG_REP = 2,// Currently waiting for outst. LCP_FRAG_REP - LMTOS_INITIAL = 3, - LMTOS_ALL_IDLE = 4, - LMTOS_ALL_ACTIVE = 5, - LMTOS_LCP_CONCLUDING = 6, - LMTOS_COPY_ONGOING = 7 - }; -private: - class MasterTakeOverState { - public: - MasterTakeOverState() {} - void set(LcpMasterTakeOverState s, Uint32 line) { - state = s; updatePlace = line; - } - - LcpMasterTakeOverState state; - Uint32 updatePlace; - - Uint32 minTableId; - Uint32 minFragId; - Uint32 failedNodeId; - } c_lcpMasterTakeOverState; - - Uint16 cmasterNodeId; - Uint8 cnoHotSpare; - - struct NodeStartMasterRecord { - Uint32 startNode; - Uint32 wait; - Uint32 failNr; - bool activeState; - bool blockLcp; - bool blockGcp; - Uint32 startInfoErrorCode; - Uint32 m_outstandingGsn; - }; - NodeStartMasterRecord c_nodeStartMaster; - - struct NodeStartSlaveRecord { - NodeStartSlaveRecord() { nodeId = 0;} - - Uint32 nodeId; - }; - NodeStartSlaveRecord c_nodeStartSlave; - - Uint32 cfirstAliveNode; - Uint32 cfirstDeadNode; - Uint32 cstartPhase; - Uint32 cnoReplicas; - - Uint32 c_startToLock; - Uint32 c_endToLock; - Uint32 c_createFragmentLock; - Uint32 c_updateToLock; - - bool cwaitLcpSr; - Uint32 cnoOfNodeGroups; - bool cstartGcpNow; - - Uint32 crestartGci; /* VALUE OF GCI WHEN SYSTEM RESTARTED OR STARTED */ - Uint32 cminHotSpareNodes; - - /** - * Counter variables keeping track of the number of outstanding signals - * for particular signals in various protocols. - */ - SignalCounter c_COPY_GCIREQ_Counter; - SignalCounter c_COPY_TABREQ_Counter; - SignalCounter c_CREATE_FRAGREQ_Counter; - SignalCounter c_DIH_SWITCH_REPLICA_REQ_Counter; - SignalCounter c_EMPTY_LCP_REQ_Counter; - SignalCounter c_END_TOREQ_Counter; - SignalCounter c_GCP_COMMIT_Counter; - SignalCounter c_GCP_PREPARE_Counter; - SignalCounter c_GCP_SAVEREQ_Counter; - SignalCounter c_INCL_NODEREQ_Counter; - SignalCounter c_MASTER_GCPREQ_Counter; - SignalCounter c_MASTER_LCPREQ_Counter; - SignalCounter c_START_INFOREQ_Counter; - SignalCounter c_START_RECREQ_Counter; - SignalCounter c_START_TOREQ_Counter; - SignalCounter c_STOP_ME_REQ_Counter; - SignalCounter c_TC_CLOPSIZEREQ_Counter; - SignalCounter c_TCGETOPSIZEREQ_Counter; - SignalCounter c_UPDATE_TOREQ_Counter; - SignalCounter c_START_LCP_REQ_Counter; - - bool c_blockCommit; - Uint32 c_blockCommitNo; - - bool getBlockCommit() const { - return c_blockCommit || cgckptflag; - } - - /** - * SwitchReplicaRecord - Should only be used by master - */ - struct SwitchReplicaRecord { - SwitchReplicaRecord() {} - void clear(){} - - Uint32 nodeId; - Uint32 tableId; - Uint32 fragNo; - }; - SwitchReplicaRecord c_switchReplicas; - - struct StopPermProxyRecord { - StopPermProxyRecord() { clientRef = 0; } - - Uint32 clientData; - BlockReference clientRef; - BlockReference masterRef; - }; - - struct StopPermMasterRecord { - StopPermMasterRecord() { clientRef = 0;} - - Uint32 returnValue; - - Uint32 clientData; - BlockReference clientRef; - }; - - StopPermProxyRecord c_stopPermProxy; - StopPermMasterRecord c_stopPermMaster; - - void checkStopPermProxy(Signal*, NodeId failedNodeId); - void checkStopPermMaster(Signal*, NodeRecordPtr failedNodePtr); - - void switchReplica(Signal*, - Uint32 nodeId, - Uint32 tableId, - Uint32 fragNo); - - void switchReplicaReply(Signal*, NodeId nodeId); - - /** - * Wait GCP (proxy) - */ - struct WaitGCPProxyRecord { - WaitGCPProxyRecord() { clientRef = 0;} - - Uint32 clientData; - BlockReference clientRef; - BlockReference masterRef; - - union { Uint32 nextPool; Uint32 nextList; }; - Uint32 prevList; - }; - typedef Ptr WaitGCPProxyPtr; - - /** - * Wait GCP (master) - */ - struct WaitGCPMasterRecord { - WaitGCPMasterRecord() { clientRef = 0;} - Uint32 clientData; - BlockReference clientRef; - - union { Uint32 nextPool; Uint32 nextList; }; - Uint32 prevList; - }; - typedef Ptr WaitGCPMasterPtr; - - /** - * Pool/list of WaitGCPProxyRecord record - */ - ArrayPool waitGCPProxyPool; - DLList c_waitGCPProxyList; - - /** - * Pool/list of WaitGCPMasterRecord record - */ - ArrayPool waitGCPMasterPool; - DLList c_waitGCPMasterList; - - void checkWaitGCPProxy(Signal*, NodeId failedNodeId); - void checkWaitGCPMaster(Signal*, NodeId failedNodeId); - void emptyWaitGCPMasterQueue(Signal*); - - /** - * Stop me - */ - struct StopMeRecord { - StopMeRecord() { clientRef = 0;} - - BlockReference clientRef; - Uint32 clientData; - }; - StopMeRecord c_stopMe; - - void checkStopMe(Signal *, NodeRecordPtr failedNodePtr); - -#define DIH_CDATA_SIZE 128 - /** - * This variable must be atleast the size of Sysfile::SYSFILE_SIZE32 - */ - Uint32 cdata[DIH_CDATA_SIZE]; /* TEMPORARY ARRAY VARIABLE */ - - /** - * Sys file data - */ - Uint32 sysfileData[DIH_CDATA_SIZE]; - Uint32 sysfileDataToFile[DIH_CDATA_SIZE]; - - /** - * When a node comes up without filesystem - * we have to clear all LCP for that node - */ - void invalidateNodeLCP(Signal *, Uint32 nodeId, Uint32 tableId); - void invalidateNodeLCP(Signal *, Uint32 nodeId, TabRecordPtr); - - /** - * Reply from nodeId - */ - void startInfoReply(Signal *, Uint32 nodeId); - - void dump_replica_info(); - - // DIH specifics for execNODE_START_REP (sendDictUnlockOrd) - void execNODE_START_REP(Signal* signal); - - /* - * Lock master DICT. Only current use is by starting node - * during NR. A pool of slave records is convenient anyway. - */ - struct DictLockSlaveRecord { - Uint32 lockPtr; - Uint32 lockType; - bool locked; - Callback callback; - Uint32 nextPool; - }; - - typedef Ptr DictLockSlavePtr; - ArrayPool c_dictLockSlavePool; - - // slave - void sendDictLockReq(Signal* signal, Uint32 lockType, Callback c); - void recvDictLockConf(Signal* signal); - void sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI); - - // NR - Uint32 c_dictLockSlavePtrI_nodeRestart; // userPtr for NR - void recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret); - - Uint32 c_error_7181_ref; -}; - -#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32) -#error "cdata is to small compared to Sysfile size" -#endif - -#endif - diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp deleted file mode 100644 index 5560c94cbfa..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp +++ /dev/null @@ -1,333 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBDIH_C -#include "Dbdih.hpp" -#include - -#define DEBUG(x) { ndbout << "DIH::" << x << endl; } - -void Dbdih::initData() -{ - cpageFileSize = ZPAGEREC; - - // Records with constant sizes - createReplicaRecord = (CreateReplicaRecord*) - allocRecord("CreateReplicaRecord", sizeof(CreateReplicaRecord), - ZCREATE_REPLICA_FILE_SIZE); - - nodeGroupRecord = (NodeGroupRecord*) - allocRecord("NodeGroupRecord", sizeof(NodeGroupRecord), MAX_NDB_NODES); - - nodeRecord = (NodeRecord*) - allocRecord("NodeRecord", sizeof(NodeRecord), MAX_NDB_NODES); - - Uint32 i; - for(i = 0; iword[0]; - bat[1].nrr = cpageFileSize; - bat[1].ClusterSize = sizeof(PageRecord); - bat[1].bits.q = 11; - bat[1].bits.v = 5; - bat[20].WA = &sysfileData[0]; - bat[20].nrr = 1; - bat[20].ClusterSize = sizeof(sysfileData); - bat[20].bits.q = 7; - bat[20].bits.v = 5; - bat[21].WA = &sysfileDataToFile[0]; - bat[21].nrr = 1; - bat[21].ClusterSize = sizeof(sysfileDataToFile); - bat[21].bits.q = 7; - bat[21].bits.v = 5; -}//Dbdih::initRecords() - -Dbdih::Dbdih(Block_context& ctx): - SimulatedBlock(DBDIH, ctx), - c_waitGCPProxyList(waitGCPProxyPool), - c_waitGCPMasterList(waitGCPMasterPool) -{ - BLOCK_CONSTRUCTOR(Dbdih); - - addRecSignal(GSN_DUMP_STATE_ORD, &Dbdih::execDUMP_STATE_ORD); - addRecSignal(GSN_NDB_TAMPER, &Dbdih::execNDB_TAMPER, true); - addRecSignal(GSN_DEBUG_SIG, &Dbdih::execDEBUG_SIG); - addRecSignal(GSN_MASTER_GCPREQ, &Dbdih::execMASTER_GCPREQ); - addRecSignal(GSN_MASTER_GCPREF, &Dbdih::execMASTER_GCPREF); - addRecSignal(GSN_MASTER_GCPCONF, &Dbdih::execMASTER_GCPCONF); - addRecSignal(GSN_EMPTY_LCP_CONF, &Dbdih::execEMPTY_LCP_CONF); - addRecSignal(GSN_MASTER_LCPREQ, &Dbdih::execMASTER_LCPREQ); - addRecSignal(GSN_MASTER_LCPREF, &Dbdih::execMASTER_LCPREF); - addRecSignal(GSN_MASTER_LCPCONF, &Dbdih::execMASTER_LCPCONF); - addRecSignal(GSN_NF_COMPLETEREP, &Dbdih::execNF_COMPLETEREP); - addRecSignal(GSN_START_PERMREQ, &Dbdih::execSTART_PERMREQ); - addRecSignal(GSN_START_PERMCONF, &Dbdih::execSTART_PERMCONF); - addRecSignal(GSN_START_PERMREF, &Dbdih::execSTART_PERMREF); - addRecSignal(GSN_INCL_NODEREQ, &Dbdih::execINCL_NODEREQ); - addRecSignal(GSN_INCL_NODECONF, &Dbdih::execINCL_NODECONF); - addRecSignal(GSN_END_TOREQ, &Dbdih::execEND_TOREQ); - addRecSignal(GSN_END_TOCONF, &Dbdih::execEND_TOCONF); - addRecSignal(GSN_START_TOREQ, &Dbdih::execSTART_TOREQ); - addRecSignal(GSN_START_TOCONF, &Dbdih::execSTART_TOCONF); - addRecSignal(GSN_START_MEREQ, &Dbdih::execSTART_MEREQ); - addRecSignal(GSN_START_MECONF, &Dbdih::execSTART_MECONF); - addRecSignal(GSN_START_MEREF, &Dbdih::execSTART_MEREF); - addRecSignal(GSN_START_COPYREQ, &Dbdih::execSTART_COPYREQ); - addRecSignal(GSN_START_COPYCONF, &Dbdih::execSTART_COPYCONF); - addRecSignal(GSN_START_COPYREF, &Dbdih::execSTART_COPYREF); - addRecSignal(GSN_CREATE_FRAGREQ, &Dbdih::execCREATE_FRAGREQ); - addRecSignal(GSN_CREATE_FRAGCONF, &Dbdih::execCREATE_FRAGCONF); - addRecSignal(GSN_DIVERIFYREQ, &Dbdih::execDIVERIFYREQ); - addRecSignal(GSN_GCP_SAVECONF, &Dbdih::execGCP_SAVECONF); - addRecSignal(GSN_GCP_PREPARECONF, &Dbdih::execGCP_PREPARECONF); - addRecSignal(GSN_GCP_PREPARE, &Dbdih::execGCP_PREPARE); - addRecSignal(GSN_GCP_NODEFINISH, &Dbdih::execGCP_NODEFINISH); - addRecSignal(GSN_GCP_COMMIT, &Dbdih::execGCP_COMMIT); - addRecSignal(GSN_DIHNDBTAMPER, &Dbdih::execDIHNDBTAMPER); - addRecSignal(GSN_CONTINUEB, &Dbdih::execCONTINUEB); - addRecSignal(GSN_COPY_GCIREQ, &Dbdih::execCOPY_GCIREQ); - addRecSignal(GSN_COPY_GCICONF, &Dbdih::execCOPY_GCICONF); - addRecSignal(GSN_COPY_TABREQ, &Dbdih::execCOPY_TABREQ); - addRecSignal(GSN_COPY_TABCONF, &Dbdih::execCOPY_TABCONF); - addRecSignal(GSN_TCGETOPSIZECONF, &Dbdih::execTCGETOPSIZECONF); - addRecSignal(GSN_TC_CLOPSIZECONF, &Dbdih::execTC_CLOPSIZECONF); - - addRecSignal(GSN_LCP_COMPLETE_REP, &Dbdih::execLCP_COMPLETE_REP); - addRecSignal(GSN_LCP_FRAG_REP, &Dbdih::execLCP_FRAG_REP); - addRecSignal(GSN_START_LCP_REQ, &Dbdih::execSTART_LCP_REQ); - addRecSignal(GSN_START_LCP_CONF, &Dbdih::execSTART_LCP_CONF); - - addRecSignal(GSN_READ_CONFIG_REQ, &Dbdih::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_UNBLO_DICTCONF, &Dbdih::execUNBLO_DICTCONF); - addRecSignal(GSN_COPY_ACTIVECONF, &Dbdih::execCOPY_ACTIVECONF); - addRecSignal(GSN_TAB_COMMITREQ, &Dbdih::execTAB_COMMITREQ); - addRecSignal(GSN_NODE_FAILREP, &Dbdih::execNODE_FAILREP); - addRecSignal(GSN_COPY_FRAGCONF, &Dbdih::execCOPY_FRAGCONF); - addRecSignal(GSN_COPY_FRAGREF, &Dbdih::execCOPY_FRAGREF); - addRecSignal(GSN_DIADDTABREQ, &Dbdih::execDIADDTABREQ); - addRecSignal(GSN_DIGETNODESREQ, &Dbdih::execDIGETNODESREQ); - addRecSignal(GSN_DIRELEASEREQ, &Dbdih::execDIRELEASEREQ); - addRecSignal(GSN_DISEIZEREQ, &Dbdih::execDISEIZEREQ); - addRecSignal(GSN_STTOR, &Dbdih::execSTTOR); - addRecSignal(GSN_DI_FCOUNTREQ, &Dbdih::execDI_FCOUNTREQ); - addRecSignal(GSN_DIGETPRIMREQ, &Dbdih::execDIGETPRIMREQ); - addRecSignal(GSN_GCP_SAVEREF, &Dbdih::execGCP_SAVEREF); - addRecSignal(GSN_GCP_TCFINISHED, &Dbdih::execGCP_TCFINISHED); - addRecSignal(GSN_READ_NODESCONF, &Dbdih::execREAD_NODESCONF); - addRecSignal(GSN_NDB_STTOR, &Dbdih::execNDB_STTOR); - addRecSignal(GSN_DICTSTARTCONF, &Dbdih::execDICTSTARTCONF); - addRecSignal(GSN_NDB_STARTREQ, &Dbdih::execNDB_STARTREQ); - addRecSignal(GSN_GETGCIREQ, &Dbdih::execGETGCIREQ); - addRecSignal(GSN_DIH_RESTARTREQ, &Dbdih::execDIH_RESTARTREQ); - addRecSignal(GSN_START_RECCONF, &Dbdih::execSTART_RECCONF); - addRecSignal(GSN_START_FRAGCONF, &Dbdih::execSTART_FRAGCONF); - addRecSignal(GSN_ADD_FRAGCONF, &Dbdih::execADD_FRAGCONF); - addRecSignal(GSN_ADD_FRAGREF, &Dbdih::execADD_FRAGREF); - addRecSignal(GSN_FSOPENCONF, &Dbdih::execFSOPENCONF); - addRecSignal(GSN_FSOPENREF, &Dbdih::execFSOPENREF, true); - addRecSignal(GSN_FSCLOSECONF, &Dbdih::execFSCLOSECONF); - addRecSignal(GSN_FSCLOSEREF, &Dbdih::execFSCLOSEREF, true); - addRecSignal(GSN_FSREADCONF, &Dbdih::execFSREADCONF); - addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true); - addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF); - addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true); - - addRecSignal(GSN_START_INFOREQ, - &Dbdih::execSTART_INFOREQ); - addRecSignal(GSN_START_INFOREF, - &Dbdih::execSTART_INFOREF); - addRecSignal(GSN_START_INFOCONF, - &Dbdih::execSTART_INFOCONF); - - addRecSignal(GSN_CHECKNODEGROUPSREQ, &Dbdih::execCHECKNODEGROUPSREQ); - - addRecSignal(GSN_BLOCK_COMMIT_ORD, - &Dbdih::execBLOCK_COMMIT_ORD); - addRecSignal(GSN_UNBLOCK_COMMIT_ORD, - &Dbdih::execUNBLOCK_COMMIT_ORD); - - addRecSignal(GSN_DIH_SWITCH_REPLICA_REQ, - &Dbdih::execDIH_SWITCH_REPLICA_REQ); - - addRecSignal(GSN_DIH_SWITCH_REPLICA_REF, - &Dbdih::execDIH_SWITCH_REPLICA_REF); - - addRecSignal(GSN_DIH_SWITCH_REPLICA_CONF, - &Dbdih::execDIH_SWITCH_REPLICA_CONF); - - addRecSignal(GSN_STOP_PERM_REQ, &Dbdih::execSTOP_PERM_REQ); - addRecSignal(GSN_STOP_PERM_REF, &Dbdih::execSTOP_PERM_REF); - addRecSignal(GSN_STOP_PERM_CONF, &Dbdih::execSTOP_PERM_CONF); - - addRecSignal(GSN_STOP_ME_REQ, &Dbdih::execSTOP_ME_REQ); - addRecSignal(GSN_STOP_ME_REF, &Dbdih::execSTOP_ME_REF); - addRecSignal(GSN_STOP_ME_CONF, &Dbdih::execSTOP_ME_CONF); - - addRecSignal(GSN_WAIT_GCP_REQ, &Dbdih::execWAIT_GCP_REQ); - addRecSignal(GSN_WAIT_GCP_REF, &Dbdih::execWAIT_GCP_REF); - addRecSignal(GSN_WAIT_GCP_CONF, &Dbdih::execWAIT_GCP_CONF); - - addRecSignal(GSN_UPDATE_TOREQ, &Dbdih::execUPDATE_TOREQ); - addRecSignal(GSN_UPDATE_TOCONF, &Dbdih::execUPDATE_TOCONF); - - addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbdih::execPREP_DROP_TAB_REQ); - addRecSignal(GSN_WAIT_DROP_TAB_REF, &Dbdih::execWAIT_DROP_TAB_REF); - addRecSignal(GSN_WAIT_DROP_TAB_CONF, &Dbdih::execWAIT_DROP_TAB_CONF); - addRecSignal(GSN_DROP_TAB_REQ, &Dbdih::execDROP_TAB_REQ); - - addRecSignal(GSN_ALTER_TAB_REQ, &Dbdih::execALTER_TAB_REQ); - - addRecSignal(GSN_CREATE_FRAGMENTATION_REQ, - &Dbdih::execCREATE_FRAGMENTATION_REQ); - - addRecSignal(GSN_DICT_LOCK_CONF, &Dbdih::execDICT_LOCK_CONF); - addRecSignal(GSN_DICT_LOCK_REF, &Dbdih::execDICT_LOCK_REF); - addRecSignal(GSN_NODE_START_REP, &Dbdih::execNODE_START_REP, true); - - addRecSignal(GSN_START_FRAGREF, - &Dbdih::execSTART_FRAGREF); - - addRecSignal(GSN_PREPARE_COPY_FRAG_REF, - &Dbdih::execPREPARE_COPY_FRAG_REF); - addRecSignal(GSN_PREPARE_COPY_FRAG_CONF, - &Dbdih::execPREPARE_COPY_FRAG_CONF); - - apiConnectRecord = 0; - connectRecord = 0; - fileRecord = 0; - fragmentstore = 0; - pageRecord = 0; - replicaRecord = 0; - tabRecord = 0; - takeOverRecord = 0; - createReplicaRecord = 0; - nodeGroupRecord = 0; - nodeRecord = 0; - c_nextNodeGroup = 0; -}//Dbdih::Dbdih() - -Dbdih::~Dbdih() -{ - deallocRecord((void **)&apiConnectRecord, "ApiConnectRecord", - sizeof(ApiConnectRecord), - capiConnectFileSize); - - deallocRecord((void **)&connectRecord, "ConnectRecord", - sizeof(ConnectRecord), - cconnectFileSize); - - deallocRecord((void **)&fileRecord, "FileRecord", - sizeof(FileRecord), - cfileFileSize); - - deallocRecord((void **)&fragmentstore, "Fragmentstore", - sizeof(Fragmentstore), - cfragstoreFileSize); - - deallocRecord((void **)&pageRecord, "PageRecord", - sizeof(PageRecord), - cpageFileSize); - - deallocRecord((void **)&replicaRecord, "ReplicaRecord", - sizeof(ReplicaRecord), - creplicaFileSize); - - deallocRecord((void **)&tabRecord, "TabRecord", - sizeof(TabRecord), - ctabFileSize); - - // Records with constant sizes - deallocRecord((void **)&createReplicaRecord, - "CreateReplicaRecord", sizeof(CreateReplicaRecord), - ZCREATE_REPLICA_FILE_SIZE); - - deallocRecord((void **)&nodeGroupRecord, "NodeGroupRecord", - sizeof(NodeGroupRecord), MAX_NDB_NODES); - - deallocRecord((void **)&nodeRecord, "NodeRecord", - sizeof(NodeRecord), MAX_NDB_NODES); - - deallocRecord((void **)&takeOverRecord, "TakeOverRecord", - sizeof(TakeOverRecord), - MAX_NDB_NODES); - -}//Dbdih::~Dbdih() - -BLOCK_FUNCTIONS(Dbdih) - - - diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp deleted file mode 100644 index fb936fed464..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ /dev/null @@ -1,15878 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBDIH_C -#include -#include -#include - -#include "Dbdih.hpp" -#include "Configuration.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -extern EventLogger g_eventLogger; - -#define SYSFILE ((Sysfile *)&sysfileData[0]) - -#define RETURN_IF_NODE_NOT_ALIVE(node) \ - if (!checkNodeAlive((node))) { \ - jam(); \ - return; \ - } \ - -#define RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverIndex, regTOPtr) \ - regTOPtr.i = takeOverIndex; \ - ptrCheckGuard(regTOPtr, MAX_NDB_NODES, takeOverRecord); \ - if (checkToInterrupted(regTOPtr)) { \ - jam(); \ - return; \ - } \ - -#define receiveLoopMacro(sigName, receiveNodeId)\ -{ \ - c_##sigName##_Counter.clearWaitingFor(receiveNodeId); \ - if(c_##sigName##_Counter.done() == false){ \ - jam(); \ - return; \ - } \ -} - -#define sendLoopMacro(sigName, signalRoutine) \ -{ \ - c_##sigName##_Counter.clearWaitingFor(); \ - NodeRecordPtr specNodePtr; \ - specNodePtr.i = cfirstAliveNode; \ - do { \ - jam(); \ - ptrCheckGuard(specNodePtr, MAX_NDB_NODES, nodeRecord); \ - c_##sigName##_Counter.setWaitingFor(specNodePtr.i); \ - signalRoutine(signal, specNodePtr.i); \ - specNodePtr.i = specNodePtr.p->nextNode; \ - } while (specNodePtr.i != RNIL); \ -} - -static -Uint32 -prevLcpNo(Uint32 lcpNo){ - if(lcpNo == 0) - return MAX_LCP_STORED - 1; - return lcpNo - 1; -} - -static -Uint32 -nextLcpNo(Uint32 lcpNo){ - lcpNo++; - if(lcpNo == MAX_LCP_STORED) - return 0; - return lcpNo; -} - -#define gth(x, y) ndbrequire(((int)x)>((int)y)) - -void Dbdih::nullRoutine(Signal* signal, Uint32 nodeId) -{ -}//Dbdih::nullRoutine() - -void Dbdih::sendCOPY_GCIREQ(Signal* signal, Uint32 nodeId) -{ - ndbrequire(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE); - - const BlockReference ref = calcDihBlockRef(nodeId); - const Uint32 wordPerSignal = CopyGCIReq::DATA_SIZE; - const Uint32 noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) / - wordPerSignal); - - CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0]; - copyGCI->anyData = nodeId; - copyGCI->copyReason = c_copyGCIMaster.m_copyReason; - copyGCI->startWord = 0; - - for(Uint32 i = 0; i < noOfSignals; i++) { - jam(); - { // Do copy - const int startWord = copyGCI->startWord; - for(Uint32 j = 0; j < wordPerSignal; j++) { - copyGCI->data[j] = sysfileData[j+startWord]; - }//for - } - sendSignal(ref, GSN_COPY_GCIREQ, signal, 25, JBB); - copyGCI->startWord += wordPerSignal; - }//for -}//Dbdih::sendCOPY_GCIREQ() - - -void Dbdih::sendDIH_SWITCH_REPLICA_REQ(Signal* signal, Uint32 nodeId) -{ - const BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_DIH_SWITCH_REPLICA_REQ, signal, - DihSwitchReplicaReq::SignalLength, JBB); -}//Dbdih::sendDIH_SWITCH_REPLICA_REQ() - -void Dbdih::sendEMPTY_LCP_REQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcLqhBlockRef(nodeId); - sendSignal(ref, GSN_EMPTY_LCP_REQ, signal, EmptyLcpReq::SignalLength, JBB); -}//Dbdih::sendEMPTY_LCPREQ() - -void Dbdih::sendEND_TOREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_END_TOREQ, signal, EndToReq::SignalLength, JBB); -}//Dbdih::sendEND_TOREQ() - -void Dbdih::sendGCP_COMMIT(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcDihBlockRef(nodeId); - signal->theData[0] = cownNodeId; - signal->theData[1] = cnewgcp; - sendSignal(ref, GSN_GCP_COMMIT, signal, 2, JBA); -}//Dbdih::sendGCP_COMMIT() - -void Dbdih::sendGCP_PREPARE(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcDihBlockRef(nodeId); - signal->theData[0] = cownNodeId; - signal->theData[1] = cnewgcp; - sendSignal(ref, GSN_GCP_PREPARE, signal, 2, JBA); -}//Dbdih::sendGCP_PREPARE() - -void Dbdih::sendGCP_SAVEREQ(Signal* signal, Uint32 nodeId) -{ - GCPSaveReq * const saveReq = (GCPSaveReq*)&signal->theData[0]; - BlockReference ref = calcLqhBlockRef(nodeId); - saveReq->dihBlockRef = reference(); - saveReq->dihPtr = nodeId; - saveReq->gci = coldgcp; - sendSignal(ref, GSN_GCP_SAVEREQ, signal, GCPSaveReq::SignalLength, JBB); -}//Dbdih::sendGCP_SAVEREQ() - -void Dbdih::sendINCL_NODEREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference nodeDihRef = calcDihBlockRef(nodeId); - signal->theData[0] = reference(); - signal->theData[1] = c_nodeStartMaster.startNode; - signal->theData[2] = c_nodeStartMaster.failNr; - signal->theData[3] = 0; - signal->theData[4] = currentgcp; - sendSignal(nodeDihRef, GSN_INCL_NODEREQ, signal, 5, JBA); -}//Dbdih::sendINCL_NODEREQ() - -void Dbdih::sendMASTER_GCPREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_MASTER_GCPREQ, signal, MasterGCPReq::SignalLength, JBB); -}//Dbdih::sendMASTER_GCPREQ() - -void Dbdih::sendMASTER_LCPREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_MASTER_LCPREQ, signal, MasterLCPReq::SignalLength, JBB); -}//Dbdih::sendMASTER_LCPREQ() - -void Dbdih::sendSTART_INFOREQ(Signal* signal, Uint32 nodeId) -{ - const BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_START_INFOREQ, signal, StartInfoReq::SignalLength, JBB); -}//sendSTART_INFOREQ() - -void Dbdih::sendSTART_RECREQ(Signal* signal, Uint32 nodeId) -{ - StartRecReq * const req = (StartRecReq*)&signal->theData[0]; - BlockReference ref = calcLqhBlockRef(nodeId); - req->receivingNodeId = nodeId; - req->senderRef = reference(); - req->keepGci = SYSFILE->keepGCI; - req->lastCompletedGci = SYSFILE->lastCompletedGCI[nodeId]; - req->newestGci = SYSFILE->newestRestorableGCI; - sendSignal(ref, GSN_START_RECREQ, signal, StartRecReq::SignalLength, JBB); - - signal->theData[0] = NDB_LE_StartREDOLog; - signal->theData[1] = nodeId; - signal->theData[2] = SYSFILE->keepGCI; - signal->theData[3] = SYSFILE->lastCompletedGCI[nodeId]; - signal->theData[4] = SYSFILE->newestRestorableGCI; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 5, JBB); -}//Dbdih::sendSTART_RECREQ() - -void Dbdih::sendSTART_TOREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_START_TOREQ, signal, StartToReq::SignalLength, JBB); -}//Dbdih::sendSTART_TOREQ() - -void Dbdih::sendSTOP_ME_REQ(Signal* signal, Uint32 nodeId) -{ - if (nodeId != getOwnNodeId()) { - jam(); - const BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_STOP_ME_REQ, signal, StopMeReq::SignalLength, JBB); - }//if -}//Dbdih::sendSTOP_ME_REQ() - -void Dbdih::sendTC_CLOPSIZEREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcTcBlockRef(nodeId); - signal->theData[0] = nodeId; - signal->theData[1] = reference(); - sendSignal(ref, GSN_TC_CLOPSIZEREQ, signal, 2, JBB); -}//Dbdih::sendTC_CLOPSIZEREQ() - -void Dbdih::sendTCGETOPSIZEREQ(Signal* signal, Uint32 nodeId) -{ - BlockReference ref = calcTcBlockRef(nodeId); - signal->theData[0] = nodeId; - signal->theData[1] = reference(); - sendSignal(ref, GSN_TCGETOPSIZEREQ, signal, 2, JBB); -}//Dbdih::sendTCGETOPSIZEREQ() - -void Dbdih::sendUPDATE_TOREQ(Signal* signal, Uint32 nodeId) -{ - const BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_UPDATE_TOREQ, signal, UpdateToReq::SignalLength, JBB); -}//sendUPDATE_TOREQ() - -void Dbdih::execCONTINUEB(Signal* signal) -{ - jamEntry(); - switch ((DihContinueB::Type)signal->theData[0]) { - case DihContinueB::ZPACK_TABLE_INTO_PAGES: - { - jam(); - Uint32 tableId = signal->theData[1]; - packTableIntoPagesLab(signal, tableId); - return; - break; - } - case DihContinueB::ZPACK_FRAG_INTO_PAGES: - { - RWFragment wf; - jam(); - wf.rwfTabPtr.i = signal->theData[1]; - ptrCheckGuard(wf.rwfTabPtr, ctabFileSize, tabRecord); - wf.fragId = signal->theData[2]; - wf.pageIndex = signal->theData[3]; - wf.wordIndex = signal->theData[4]; - packFragIntoPagesLab(signal, &wf); - return; - break; - } - case DihContinueB::ZREAD_PAGES_INTO_TABLE: - { - jam(); - Uint32 tableId = signal->theData[1]; - readPagesIntoTableLab(signal, tableId); - return; - break; - } - case DihContinueB::ZREAD_PAGES_INTO_FRAG: - { - RWFragment rf; - jam(); - rf.rwfTabPtr.i = signal->theData[1]; - ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord); - rf.fragId = signal->theData[2]; - rf.pageIndex = signal->theData[3]; - rf.wordIndex = signal->theData[4]; - readPagesIntoFragLab(signal, &rf); - return; - break; - } - case DihContinueB::ZCOPY_TABLE: - { - jam(); - Uint32 tableId = signal->theData[1]; - copyTableLab(signal, tableId); - return; - } - case DihContinueB::ZCOPY_TABLE_NODE: - { - NodeRecordPtr nodePtr; - CopyTableNode ctn; - jam(); - ctn.ctnTabPtr.i = signal->theData[1]; - ptrCheckGuard(ctn.ctnTabPtr, ctabFileSize, tabRecord); - nodePtr.i = signal->theData[2]; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - ctn.pageIndex = signal->theData[3]; - ctn.wordIndex = signal->theData[4]; - ctn.noOfWords = signal->theData[5]; - copyTableNode(signal, &ctn, nodePtr); - return; - } - case DihContinueB::ZSTART_FRAGMENT: - { - jam(); - Uint32 tableId = signal->theData[1]; - Uint32 fragId = signal->theData[2]; - startFragment(signal, tableId, fragId); - return; - } - case DihContinueB::ZCOMPLETE_RESTART: - jam(); - completeRestartLab(signal); - return; - case DihContinueB::ZREAD_TABLE_FROM_PAGES: - { - TabRecordPtr tabPtr; - jam(); - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - readTableFromPagesLab(signal, tabPtr); - return; - } - case DihContinueB::ZSR_PHASE2_READ_TABLE: - { - TabRecordPtr tabPtr; - jam(); - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - srPhase2ReadTableLab(signal, tabPtr); - return; - } - case DihContinueB::ZCHECK_TC_COUNTER: - jam(); -#ifndef NO_LCP - checkTcCounterLab(signal); -#endif - return; - case DihContinueB::ZCALCULATE_KEEP_GCI: - { - jam(); - Uint32 tableId = signal->theData[1]; - Uint32 fragId = signal->theData[2]; - calculateKeepGciLab(signal, tableId, fragId); - return; - } - case DihContinueB::ZSTORE_NEW_LCP_ID: - jam(); - storeNewLcpIdLab(signal); - return; - case DihContinueB::ZTABLE_UPDATE: - { - TabRecordPtr tabPtr; - jam(); - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tableUpdateLab(signal, tabPtr); - return; - } - case DihContinueB::ZCHECK_LCP_COMPLETED: - { - jam(); - checkLcpCompletedLab(signal); - return; - } - case DihContinueB::ZINIT_LCP: - { - jam(); - Uint32 senderRef = signal->theData[1]; - Uint32 tableId = signal->theData[2]; - initLcpLab(signal, senderRef, tableId); - return; - } - case DihContinueB::ZADD_TABLE_MASTER_PAGES: - { - TabRecordPtr tabPtr; - jam(); - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_MASTER; - tableUpdateLab(signal, tabPtr); - return; - break; - } - case DihContinueB::ZDIH_ADD_TABLE_MASTER: - { - jam(); - addTable_closeConf(signal, signal->theData[1]); - return; - } - case DihContinueB::ZADD_TABLE_SLAVE_PAGES: - { - TabRecordPtr tabPtr; - jam(); - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tabPtr.p->tabUpdateState = TabRecord::US_ADD_TABLE_SLAVE; - tableUpdateLab(signal, tabPtr); - return; - } - case DihContinueB::ZDIH_ADD_TABLE_SLAVE: - { - ndbrequire(false); - return; - } - case DihContinueB::ZSTART_GCP: - jam(); -#ifndef NO_GCP - startGcpLab(signal, signal->theData[1]); -#endif - return; - break; - case DihContinueB::ZCOPY_GCI:{ - jam(); - CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)signal->theData[1]; - ndbrequire(c_copyGCIMaster.m_copyReason == reason); - sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ); - return; - } - break; - case DihContinueB::ZEMPTY_VERIFY_QUEUE: - jam(); - emptyverificbuffer(signal, true); - return; - break; - case DihContinueB::ZCHECK_GCP_STOP: - jam(); -#ifndef NO_GCP - checkGcpStopLab(signal); -#endif - return; - break; - case DihContinueB::ZREMOVE_NODE_FROM_TABLE: - { - jam(); - Uint32 nodeId = signal->theData[1]; - Uint32 tableId = signal->theData[2]; - removeNodeFromTables(signal, nodeId, tableId); - return; - } - case DihContinueB::ZCOPY_NODE: - { - jam(); - Uint32 tableId = signal->theData[1]; - copyNodeLab(signal, tableId); - return; - } - case DihContinueB::ZSTART_TAKE_OVER: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - Uint32 startNode = signal->theData[2]; - Uint32 toNode = signal->theData[3]; - startTakeOver(signal, takeOverPtrI, startNode, toNode); - return; - break; - } - case DihContinueB::ZCHECK_START_TAKE_OVER: - jam(); - checkStartTakeOver(signal); - break; - case DihContinueB::ZTO_START_COPY_FRAG: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - startNextCopyFragment(signal, takeOverPtrI); - return; - } - case DihContinueB::ZINVALIDATE_NODE_LCP: - { - jam(); - const Uint32 nodeId = signal->theData[1]; - const Uint32 tableId = signal->theData[2]; - invalidateNodeLCP(signal, nodeId, tableId); - return; - } - case DihContinueB::ZINITIALISE_RECORDS: - jam(); - initialiseRecordsLab(signal, - signal->theData[1], - signal->theData[2], - signal->theData[3]); - return; - break; - case DihContinueB::ZSTART_PERMREQ_AGAIN: - jam(); - nodeRestartPh2Lab2(signal); - return; - break; - case DihContinueB::SwitchReplica: - { - jam(); - const Uint32 nodeId = signal->theData[1]; - const Uint32 tableId = signal->theData[2]; - const Uint32 fragNo = signal->theData[3]; - switchReplica(signal, nodeId, tableId, fragNo); - return; - } - case DihContinueB::ZSEND_START_TO: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - sendStartTo(signal, takeOverPtrI); - return; - } - case DihContinueB::ZSEND_ADD_FRAG: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - toCopyFragLab(signal, takeOverPtrI); - return; - } - case DihContinueB::ZSEND_UPDATE_TO: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - Uint32 updateState = signal->theData[4]; - sendUpdateTo(signal, takeOverPtrI, updateState); - return; - } - case DihContinueB::ZSEND_END_TO: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - sendEndTo(signal, takeOverPtrI); - return; - } - case DihContinueB::ZSEND_CREATE_FRAG: - { - jam(); - Uint32 takeOverPtrI = signal->theData[1]; - Uint32 storedType = signal->theData[2]; - Uint32 startGci = signal->theData[3]; - sendCreateFragReq(signal, startGci, storedType, takeOverPtrI); - return; - } - case DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE:{ - jam(); - TabRecordPtr tabPtr; - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - waitDropTabWritingToFile(signal, tabPtr); - return; - } - case DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH:{ - jam(); - Uint32 nodeId = signal->theData[1]; - Uint32 tableId = signal->theData[2]; - checkWaitDropTabFailedLqh(signal, nodeId, tableId); - return; - } - case DihContinueB::ZTO_START_FRAGMENTS: - { - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = signal->theData[1]; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - nr_start_fragments(signal, takeOverPtr); - return; - } - }//switch - - ndbrequire(false); - return; -}//Dbdih::execCONTINUEB() - -void Dbdih::execCOPY_GCIREQ(Signal* signal) -{ - CopyGCIReq * const copyGCI = (CopyGCIReq *)&signal->theData[0]; - jamEntry(); - CopyGCIReq::CopyReason reason = (CopyGCIReq::CopyReason)copyGCI->copyReason; - const Uint32 tstart = copyGCI->startWord; - - ndbrequire(cmasterdihref == signal->senderBlockRef()) ; - ndbrequire(c_copyGCISlave.m_copyReason == CopyGCIReq::IDLE); - ndbrequire(c_copyGCISlave.m_expectedNextWord == tstart); - ndbrequire(reason != CopyGCIReq::IDLE); - bool isdone = (tstart + CopyGCIReq::DATA_SIZE) >= Sysfile::SYSFILE_SIZE32; - - if (ERROR_INSERTED(7177)) - { - jam(); - - if (signal->getLength() == 3) - { - jam(); - goto done; - } - } - - arrGuard(tstart + CopyGCIReq::DATA_SIZE, sizeof(sysfileData)/4); - for(Uint32 i = 0; idata[i]; - - if (ERROR_INSERTED(7177) && isMaster() && isdone) - { - sendSignalWithDelay(reference(), GSN_COPY_GCIREQ, signal, 1000, 3); - return; - } - -done: - if (isdone) - { - jam(); - c_copyGCISlave.m_expectedNextWord = 0; - } - else - { - jam(); - c_copyGCISlave.m_expectedNextWord += CopyGCIReq::DATA_SIZE; - return; - } - - if (cmasterdihref != reference()) - { - jam(); - Uint32 tmp= SYSFILE->m_restart_seq; - memcpy(sysfileData, cdata, sizeof(sysfileData)); - SYSFILE->m_restart_seq = tmp; - - if (c_set_initial_start_flag) - { - jam(); - Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits); - } - } - - c_copyGCISlave.m_copyReason = reason; - c_copyGCISlave.m_senderRef = signal->senderBlockRef(); - c_copyGCISlave.m_senderData = copyGCI->anyData; - - CRASH_INSERTION2(7020, reason==CopyGCIReq::LOCAL_CHECKPOINT); - CRASH_INSERTION2(7008, reason==CopyGCIReq::GLOBAL_CHECKPOINT); - - /* -------------------------------------------------------------------------*/ - /* WE SET THE REQUESTER OF THE COPY GCI TO THE CURRENT MASTER. IF THE */ - /* CURRENT MASTER WE DO NOT WANT THE NEW MASTER TO RECEIVE CONFIRM OF */ - /* SOMETHING HE HAS NOT SENT. THE TAKE OVER MUST BE CAREFUL. */ - /* -------------------------------------------------------------------------*/ - bool ok = false; - switch(reason){ - case CopyGCIReq::IDLE: - ok = true; - jam(); - ndbrequire(false); - break; - case CopyGCIReq::LOCAL_CHECKPOINT: { - ok = true; - jam(); - c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__); - c_lcpState.m_masterLcpDihRef = cmasterdihref; - setNodeInfo(signal); - break; - } - case CopyGCIReq::RESTART: { - ok = true; - jam(); - coldgcp = SYSFILE->newestRestorableGCI; - crestartGci = SYSFILE->newestRestorableGCI; - c_newest_restorable_gci = SYSFILE->newestRestorableGCI; - Sysfile::setRestartOngoing(SYSFILE->systemRestartBits); - currentgcp = coldgcp + 1; - cnewgcp = coldgcp + 1; - setNodeInfo(signal); - if ((Sysfile::getLCPOngoing(SYSFILE->systemRestartBits))) { - jam(); - /* -------------------------------------------------------------------- */ - // IF THERE WAS A LOCAL CHECKPOINT ONGOING AT THE CRASH MOMENT WE WILL - // INVALIDATE THAT LOCAL CHECKPOINT. - /* -------------------------------------------------------------------- */ - invalidateLcpInfoAfterSr(); - }//if - break; - } - case CopyGCIReq::GLOBAL_CHECKPOINT: { - ok = true; - jam(); - cgcpParticipantState = GCP_PARTICIPANT_COPY_GCI_RECEIVED; - c_newest_restorable_gci = SYSFILE->newestRestorableGCI; - setNodeInfo(signal); - break; - }//if - case CopyGCIReq::INITIAL_START_COMPLETED: - ok = true; - jam(); - break; - } - ndbrequire(ok); - - CRASH_INSERTION(7183); - - if (ERROR_INSERTED(7185) && reason==CopyGCIReq::GLOBAL_CHECKPOINT) - { - jam(); - return; - } - - /* ----------------------------------------------------------------------- */ - /* WE START BY TRYING TO OPEN THE FIRST RESTORABLE GCI FILE. */ - /* ----------------------------------------------------------------------- */ - FileRecordPtr filePtr; - filePtr.i = crestartInfoFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - if (filePtr.p->fileStatus == FileRecord::OPEN) { - jam(); - openingCopyGciSkipInitLab(signal, filePtr); - return; - }//if - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI; - return; -}//Dbdih::execCOPY_GCIREQ() - -void Dbdih::execDICTSTARTCONF(Signal* signal) -{ - jamEntry(); - Uint32 nodeId = refToNode(signal->getSendersBlockRef()); - if (nodeId != getOwnNodeId()) { - jam(); - nodeDictStartConfLab(signal); - } else { - jam(); - dictStartConfLab(signal); - }//if -}//Dbdih::execDICTSTARTCONF() - -void Dbdih::execFSCLOSECONF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - filePtr.p->fileStatus = FileRecord::CLOSED; - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::CLOSING_GCP: - jam(); - closingGcpLab(signal, filePtr); - break; - case FileRecord::CLOSING_GCP_CRASH: - jam(); - closingGcpCrashLab(signal, filePtr); - break; - case FileRecord::CLOSING_TABLE_CRASH: - jam(); - closingTableCrashLab(signal, filePtr); - break; - case FileRecord::CLOSING_TABLE_SR: - jam(); - closingTableSrLab(signal, filePtr); - break; - case FileRecord::TABLE_CLOSE: - jam(); - tableCloseLab(signal, filePtr); - break; - case FileRecord::TABLE_CLOSE_DELETE: - jam(); - tableDeleteLab(signal, filePtr); - break; - default: - ndbrequire(false); - break; - }//switch - return; -}//Dbdih::execFSCLOSECONF() - -void Dbdih::execFSCLOSEREF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::CLOSING_GCP: - jam(); - break; - case FileRecord::CLOSING_GCP_CRASH: - jam(); - closingGcpCrashLab(signal, filePtr); - return; - case FileRecord::CLOSING_TABLE_CRASH: - jam(); - closingTableCrashLab(signal, filePtr); - return; - case FileRecord::CLOSING_TABLE_SR: - jam(); - break; - case FileRecord::TABLE_CLOSE: - jam(); - break; - case FileRecord::TABLE_CLOSE_DELETE: - jam(); - break; - default: - jam(); - break; - - }//switch - { - char msg[100]; - sprintf(msg, "File system close failed during FileRecord status %d", (Uint32)status); - fsRefError(signal,__LINE__,msg); - } - - return; -}//Dbdih::execFSCLOSEREF() - -void Dbdih::execFSOPENCONF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - filePtr.p->fileRef = signal->theData[1]; - filePtr.p->fileStatus = FileRecord::OPEN; - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::CREATING_GCP: - jam(); - creatingGcpLab(signal, filePtr); - break; - case FileRecord::OPENING_COPY_GCI: - jam(); - openingCopyGciSkipInitLab(signal, filePtr); - break; - case FileRecord::CREATING_COPY_GCI: - jam(); - openingCopyGciSkipInitLab(signal, filePtr); - break; - case FileRecord::OPENING_GCP: - jam(); - openingGcpLab(signal, filePtr); - break; - case FileRecord::OPENING_TABLE: - jam(); - openingTableLab(signal, filePtr); - break; - case FileRecord::TABLE_CREATE: - jam(); - tableCreateLab(signal, filePtr); - break; - case FileRecord::TABLE_OPEN_FOR_DELETE: - jam(); - tableOpenLab(signal, filePtr); - break; - default: - ndbrequire(false); - break; - }//switch - return; -}//Dbdih::execFSOPENCONF() - -void Dbdih::execFSOPENREF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::CREATING_GCP: - /* --------------------------------------------------------------------- */ - /* WE DID NOT MANAGE TO CREATE A GLOBAL CHECKPOINT FILE. SERIOUS ERROR */ - /* WHICH CAUSES A SYSTEM RESTART. */ - /* --------------------------------------------------------------------- */ - jam(); - break; - case FileRecord::OPENING_COPY_GCI: - jam(); - openingCopyGciErrorLab(signal, filePtr); - return; - case FileRecord::CREATING_COPY_GCI: - jam(); - break; - case FileRecord::OPENING_GCP: - jam(); - openingGcpErrorLab(signal, filePtr); - return; - case FileRecord::OPENING_TABLE: - jam(); - openingTableErrorLab(signal, filePtr); - return; - case FileRecord::TABLE_CREATE: - jam(); - break; - case FileRecord::TABLE_OPEN_FOR_DELETE: - jam(); - tableDeleteLab(signal, filePtr); - return; - default: - jam(); - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system open failed during FileRecord status %d", (Uint32)status); - fsRefError(signal,__LINE__,msg); - } - return; -}//Dbdih::execFSOPENREF() - -void Dbdih::execFSREADCONF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::READING_GCP: - jam(); - readingGcpLab(signal, filePtr); - break; - case FileRecord::READING_TABLE: - jam(); - readingTableLab(signal, filePtr); - break; - default: - ndbrequire(false); - break; - }//switch - return; -}//Dbdih::execFSREADCONF() - -void Dbdih::execFSREADREF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::READING_GCP: - jam(); - readingGcpErrorLab(signal, filePtr); - return; - case FileRecord::READING_TABLE: - jam(); - readingTableErrorLab(signal, filePtr); - return; - default: - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system read failed during FileRecord status %d", (Uint32)status); - fsRefError(signal,__LINE__,msg); - } -}//Dbdih::execFSREADREF() - -void Dbdih::execFSWRITECONF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::WRITING_COPY_GCI: - jam(); - writingCopyGciLab(signal, filePtr); - break; - case FileRecord::WRITE_INIT_GCP: - jam(); - writeInitGcpLab(signal, filePtr); - break; - case FileRecord::TABLE_WRITE: - jam(); - tableWriteLab(signal, filePtr); - break; - default: - ndbrequire(false); - break; - }//switch - return; -}//Dbdih::execFSWRITECONF() - -void Dbdih::execFSWRITEREF(Signal* signal) -{ - FileRecordPtr filePtr; - jamEntry(); - filePtr.i = signal->theData[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - FileRecord::ReqStatus status = filePtr.p->reqStatus; - filePtr.p->reqStatus = FileRecord::IDLE; - switch (status) { - case FileRecord::WRITING_COPY_GCI: - /* --------------------------------------------------------------------- */ - /* EVEN CREATING THE FILE DID NOT WORK. WE WILL THEN CRASH. */ - /* ERROR IN WRITING FILE. WE WILL NOT CONTINUE FROM HERE. */ - /* --------------------------------------------------------------------- */ - jam(); - break; - case FileRecord::WRITE_INIT_GCP: - /* --------------------------------------------------------------------- */ - /* AN ERROR OCCURRED IN WRITING A GCI FILE WHICH IS A SERIOUS ERROR */ - /* THAT CAUSE A SYSTEM RESTART. */ - /* --------------------------------------------------------------------- */ - jam(); - break; - case FileRecord::TABLE_WRITE: - jam(); - break; - default: - jam(); - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system write failed during FileRecord status %d", (Uint32)status); - fsRefError(signal,__LINE__,msg); - } - return; -}//Dbdih::execFSWRITEREF() - -void Dbdih::execGETGCIREQ(Signal* signal) -{ - - jamEntry(); - Uint32 userPtr = signal->theData[0]; - BlockReference userRef = signal->theData[1]; - - signal->theData[0] = userPtr; - signal->theData[1] = SYSFILE->newestRestorableGCI; - sendSignal(userRef, GSN_GETGCICONF, signal, 2, JBB); -}//Dbdih::execGETGCIREQ() - -void Dbdih::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - jamEntry(); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequireErr(p != 0, NDBD_EXIT_INVALID_CONFIG); - - initData(); - - ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_API_CONNECT, - &capiConnectFileSize), - NDBD_EXIT_INVALID_CONFIG); - ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_CONNECT, - &cconnectFileSize), - NDBD_EXIT_INVALID_CONFIG); - ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_FRAG_CONNECT, - &cfragstoreFileSize), - NDBD_EXIT_INVALID_CONFIG); - ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_REPLICAS, - &creplicaFileSize), - NDBD_EXIT_INVALID_CONFIG); - ndbrequireErr(!ndb_mgm_get_int_parameter(p, CFG_DIH_TABLE, &ctabFileSize), - NDBD_EXIT_INVALID_CONFIG); - cfileFileSize = (2 * ctabFileSize) + 2; - initRecords(); - initialiseRecordsLab(signal, 0, ref, senderData); - return; -}//Dbdih::execSIZEALT_REP() - -void Dbdih::execSTART_COPYREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dbdih::execSTART_COPYREF() - -void Dbdih::execSTART_FRAGCONF(Signal* signal) -{ - (void)signal; // Don't want compiler warning - /* ********************************************************************* */ - /* If anyone wants to add functionality in this method, be aware that */ - /* for temporary tables no START_FRAGREQ is sent and therefore no */ - /* START_FRAGCONF signal will be received for those tables!! */ - /* ********************************************************************* */ - jamEntry(); - return; -}//Dbdih::execSTART_FRAGCONF() - -void Dbdih::execSTART_FRAGREF(Signal* signal) -{ - jamEntry(); - - /** - * Kill starting node - */ - Uint32 errCode = signal->theData[1]; - Uint32 nodeId = signal->theData[2]; - - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::StartFragRefError; - sysErr->errorRef = reference(); - sysErr->data1 = errCode; - sysErr->data2 = 0; - sendSignal(calcNdbCntrBlockRef(nodeId), GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBB); - return; -}//Dbdih::execSTART_FRAGCONF() - -void Dbdih::execSTART_MEREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dbdih::execSTART_MEREF() - -void Dbdih::execTAB_COMMITREQ(Signal* signal) -{ - TabRecordPtr tabPtr; - jamEntry(); - Uint32 tdictPtr = signal->theData[0]; - BlockReference tdictBlockref = signal->theData[1]; - tabPtr.i = signal->theData[2]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_CREATING); - tabPtr.p->tabStatus = TabRecord::TS_ACTIVE; - signal->theData[0] = tdictPtr; - signal->theData[1] = cownNodeId; - signal->theData[2] = tabPtr.i; - sendSignal(tdictBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB); - return; -}//Dbdih::execTAB_COMMITREQ() - -/* - 3.2 S T A N D A R D S U B P R O G R A M S I N P L E X - ************************************************************* - */ -/* - 3.2.1 S T A R T / R E S T A R T - ********************************** - */ -/*****************************************************************************/ -/* ********** START / RESTART MODULE *************/ -/*****************************************************************************/ -/* - 3.2.1.1 LOADING O W N B L O C K R E F E R E N C E (ABSOLUTE PHASE 1) - ***************************************************************************** - */ -void Dbdih::execDIH_RESTARTREQ(Signal* signal) -{ - jamEntry(); - if (signal->theData[0]) - { - jam(); - cntrlblockref = signal->theData[0]; - if(m_ctx.m_config.getInitialStart()){ - sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB); - } else { - readGciFileLab(signal); - } - } - else - { - /** - * Precondition, (not checked) - * atleast 1 node in each node group - */ - Uint32 i; - NdbNodeBitmask mask; - mask.assign(NdbNodeBitmask::Size, signal->theData + 1); - Uint32 *node_gcis = signal->theData+1+NdbNodeBitmask::Size; - Uint32 node_group_gcis[MAX_NDB_NODES+1]; - bzero(node_group_gcis, sizeof(node_group_gcis)); - for (i = 0; inodeGroups); - ndbrequire(ng < MAX_NDB_NODES); - Uint32 gci = node_gcis[i]; - if (gci < SYSFILE->lastCompletedGCI[i]) - { - jam(); - /** - * Handle case, where *I* know that node complete GCI - * but node does not...bug#29167 - * i.e node died before it wrote own sysfile - */ - gci = SYSFILE->lastCompletedGCI[i]; - } - - if (gci > node_group_gcis[ng]) - { - jam(); - node_group_gcis[ng] = gci; - } - } - } - for (i = 0; itheData[0] = i; - return; - } - } - signal->theData[0] = MAX_NDB_NODES; - return; - } - return; -}//Dbdih::execDIH_RESTARTREQ() - -void Dbdih::execSTTOR(Signal* signal) -{ - jamEntry(); - - signal->theData[0] = 0; - signal->theData[1] = 0; - signal->theData[2] = 0; - signal->theData[3] = 1; // Next start phase - signal->theData[4] = 255; // Next start phase - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB); - return; -}//Dbdih::execSTTOR() - -void Dbdih::initialStartCompletedLab(Signal* signal) -{ - /*-------------------------------------------------------------------------*/ - /* NOW THAT (RE)START IS COMPLETED WE CAN START THE LCP.*/ - /*-------------------------------------------------------------------------*/ - return; -}//Dbdih::initialStartCompletedLab() - -/* - * *************************************************************************** - * S E N D I N G R E P L Y T O S T A R T / R E S T A R T R E Q U E S T S - * **************************************************************************** - */ -void Dbdih::ndbsttorry10Lab(Signal* signal, Uint32 _line) -{ - /*-------------------------------------------------------------------------*/ - // AN NDB START PHASE HAS BEEN COMPLETED. WHEN START PHASE 6 IS COMPLETED WE - // RECORD THAT THE SYSTEM IS RUNNING. - /*-------------------------------------------------------------------------*/ - signal->theData[0] = reference(); - sendSignal(cntrlblockref, GSN_NDB_STTORRY, signal, 1, JBB); - return; -}//Dbdih::ndbsttorry10Lab() - -/* -**************************************** -I N T E R N A L P H A S E S -**************************************** -*/ -/*---------------------------------------------------------------------------*/ -/*NDB_STTOR START SIGNAL AT START/RESTART */ -/*---------------------------------------------------------------------------*/ -void Dbdih::execNDB_STTOR(Signal* signal) -{ - jamEntry(); - BlockReference cntrRef = signal->theData[0]; /* SENDERS BLOCK REFERENCE */ - Uint32 ownNodeId = signal->theData[1]; /* OWN PROCESSOR ID*/ - Uint32 phase = signal->theData[2]; /* INTERNAL START PHASE*/ - Uint32 typestart = signal->theData[3]; - - cstarttype = typestart; - cstartPhase = phase; - - switch (phase){ - case ZNDB_SPH1: - jam(); - /*----------------------------------------------------------------------*/ - /* Set the delay between local checkpoints in ndb startphase 1. */ - /*----------------------------------------------------------------------*/ - cownNodeId = ownNodeId; - /*-----------------------------------------------------------------------*/ - // Compute all static block references in this node as part of - // ndb start phase 1. - /*-----------------------------------------------------------------------*/ - cntrlblockref = cntrRef; - clocaltcblockref = calcTcBlockRef(ownNodeId); - clocallqhblockref = calcLqhBlockRef(ownNodeId); - cdictblockref = calcDictBlockRef(ownNodeId); - ndbsttorry10Lab(signal, __LINE__); - break; - - case ZNDB_SPH2: - jam(); - /*-----------------------------------------------------------------------*/ - // Set the number of replicas, maximum is 4 replicas. - // Read the ndb nodes from the configuration. - /*-----------------------------------------------------------------------*/ - - /*-----------------------------------------------------------------------*/ - // For node restarts we will also add a request for permission - // to continue the system restart. - // The permission is given by the master node in the alive set. - /*-----------------------------------------------------------------------*/ - createMutexes(signal, 0); - if (cstarttype == NodeState::ST_INITIAL_NODE_RESTART) - { - jam(); - c_set_initial_start_flag = TRUE; // In sysfile... - } - break; - - case ZNDB_SPH3: - jam(); - /*-----------------------------------------------------------------------*/ - // Non-master nodes performing an initial start will execute - // the start request here since the - // initial start do not synchronise so much from the master. - // In the master nodes the start - // request will be sent directly to dih (in ndb_startreq) when all - // nodes have completed phase 3 of the start. - /*-----------------------------------------------------------------------*/ - cmasterState = MASTER_IDLE; - if(cstarttype == NodeState::ST_INITIAL_START || - cstarttype == NodeState::ST_SYSTEM_RESTART){ - jam(); - cmasterState = isMaster() ? MASTER_ACTIVE : MASTER_IDLE; - } - if (!isMaster() && cstarttype == NodeState::ST_INITIAL_START) { - jam(); - ndbStartReqLab(signal, cntrRef); - return; - }//if - ndbsttorry10Lab(signal, __LINE__); - break; - - case ZNDB_SPH4: - jam(); - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - cmasterTakeOverNode = ZNIL; - switch(typestart){ - case NodeState::ST_INITIAL_START: - jam(); - ndbsttorry10Lab(signal, __LINE__); - return; - case NodeState::ST_SYSTEM_RESTART: - jam(); - if (isMaster()) { - jam(); - systemRestartTakeOverLab(signal); - if (anyActiveTakeOver()) - { - jam(); - return; - } - } - ndbsttorry10Lab(signal, __LINE__); - return; - case NodeState::ST_INITIAL_NODE_RESTART: - case NodeState::ST_NODE_RESTART: - jam(); - - /*********************************************************************** - * When starting nodes while system is operational we must be controlled - * by the master since only one node restart is allowed at a time. - * When this signal is confirmed the master has also copied the - * dictionary and the distribution information. - */ - StartMeReq * req = (StartMeReq*)&signal->theData[0]; - req->startingRef = reference(); - req->startingVersion = 0; // Obsolete - sendSignal(cmasterdihref, GSN_START_MEREQ, signal, - StartMeReq::SignalLength, JBB); - return; - } - ndbrequire(false); - break; - case ZNDB_SPH5: - jam(); - switch(typestart){ - case NodeState::ST_INITIAL_START: - case NodeState::ST_SYSTEM_RESTART: - jam(); - jam(); - /*---------------------------------------------------------------------*/ - // WE EXECUTE A LOCAL CHECKPOINT AS A PART OF A SYSTEM RESTART. - // THE IDEA IS THAT WE NEED TO - // ENSURE THAT WE CAN RECOVER FROM PROBLEMS CAUSED BY MANY NODE - // CRASHES THAT CAUSES THE LOG - // TO GROW AND THE NUMBER OF LOG ROUNDS TO EXECUTE TO GROW. - // THIS CAN OTHERWISE GET US INTO - // A SITUATION WHICH IS UNREPAIRABLE. THUS WE EXECUTE A CHECKPOINT - // BEFORE ALLOWING ANY TRANSACTIONS TO START. - /*---------------------------------------------------------------------*/ - if (!isMaster()) { - jam(); - ndbsttorry10Lab(signal, __LINE__); - return; - }//if - - c_lcpState.immediateLcpStart = true; - cwaitLcpSr = true; - checkLcpStart(signal, __LINE__); - return; - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - jam(); - signal->theData[0] = cownNodeId; - signal->theData[1] = reference(); - sendSignal(cmasterdihref, GSN_START_COPYREQ, signal, 2, JBB); - return; - } - ndbrequire(false); - case ZNDB_SPH6: - jam(); - switch(typestart){ - case NodeState::ST_INITIAL_START: - case NodeState::ST_SYSTEM_RESTART: - jam(); - if(isMaster()){ - jam(); - startGcp(signal); - } - ndbsttorry10Lab(signal, __LINE__); - return; - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - ndbsttorry10Lab(signal, __LINE__); - return; - } - ndbrequire(false); - break; - default: - jam(); - ndbsttorry10Lab(signal, __LINE__); - break; - }//switch -}//Dbdih::execNDB_STTOR() - -void -Dbdih::execNODE_START_REP(Signal* signal) -{ - /* - * Send DICT_UNLOCK_ORD when this node is SL_STARTED. - * - * Sending it before (sp 7) conflicts with code which assumes - * SL_STARTING means we are in copy phase of NR. - * - * NodeState::starting.restartType is not supposed to be used - * when SL_STARTED. Also it seems NODE_START_REP can arrive twice. - * - * For these reasons there are no consistency checks and - * we rely on c_dictLockSlavePtrI_nodeRestart alone. - */ - if (c_dictLockSlavePtrI_nodeRestart != RNIL) { - sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart); - c_dictLockSlavePtrI_nodeRestart = RNIL; - } -} - -void -Dbdih::createMutexes(Signal * signal, Uint32 count){ - Callback c = { safe_cast(&Dbdih::createMutex_done), count }; - - switch(count){ - case 0:{ - Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle); - mutex.create(c); - return; - } - case 1:{ - Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle); - mutex.create(c); - return; - } - } - - signal->theData[0] = reference(); - sendSignal(cntrlblockref, GSN_READ_NODESREQ, signal, 1, JBB); -} - -void -Dbdih::createMutex_done(Signal* signal, Uint32 senderData, Uint32 retVal){ - jamEntry(); - ndbrequire(retVal == 0); - - switch(senderData){ - case 0:{ - Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle); - mutex.release(); - } - case 1:{ - Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle); - mutex.release(); - } - } - - createMutexes(signal, senderData + 1); -} - -/*****************************************************************************/ -/* ------------------------------------------------------------------------- */ -/* WE HAVE BEEN REQUESTED BY NDBCNTR TO PERFORM A RESTART OF THE */ -/* DATABASE TABLES. */ -/* THIS SIGNAL IS SENT AFTER COMPLETING PHASE 3 IN ALL BLOCKS IN A */ -/* SYSTEM RESTART. WE WILL ALSO JUMP TO THIS LABEL FROM PHASE 3 IN AN */ -/* INITIAL START. */ -/* ------------------------------------------------------------------------- */ -/*****************************************************************************/ -void Dbdih::execNDB_STARTREQ(Signal* signal) -{ - jamEntry(); - BlockReference ref = signal->theData[0]; - cstarttype = signal->theData[1]; - ndbStartReqLab(signal, ref); -}//Dbdih::execNDB_STARTREQ() - -void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref) -{ - cndbStartReqBlockref = ref; - if (cstarttype == NodeState::ST_INITIAL_START) { - jam(); - initRestartInfo(); - initGciFilesLab(signal); - return; - } - - NodeRecordPtr nodePtr; - Uint32 gci = SYSFILE->lastCompletedGCI[getOwnNodeId()]; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) - { - jam(); - ptrAss(nodePtr, nodeRecord); - if (SYSFILE->lastCompletedGCI[nodePtr.i] > gci) - { - jam(); - /** - * Since we're starting(is master) and there - * there are other nodes with higher GCI... - * there gci's must be invalidated... - * and they _must_ do an initial start - * indicate this by setting lastCompletedGCI = 0 - */ - SYSFILE->lastCompletedGCI[nodePtr.i] = 0; - ndbrequire(nodePtr.p->nodeStatus != NodeRecord::ALIVE); - warningEvent("Making filesystem for node %d unusable (need --initial)", - nodePtr.i); - } - else if (nodePtr.p->nodeStatus == NodeRecord::ALIVE && - SYSFILE->lastCompletedGCI[nodePtr.i] == 0) - { - jam(); - CRASH_INSERTION(7170); - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Cluster requires this node to be started " - " with --initial as partial start has been performed" - " and this filesystem is unusable"); - progError(__LINE__, - NDBD_EXIT_SR_RESTARTCONFLICT, - buf); - ndbrequire(false); - } - } - - /** - * This set which GCI we will try to restart to - */ - SYSFILE->newestRestorableGCI = gci; - - ndbrequire(isMaster()); - copyGciLab(signal, CopyGCIReq::RESTART); // We have already read the file! -}//Dbdih::ndbStartReqLab() - -void Dbdih::execREAD_NODESCONF(Signal* signal) -{ - unsigned i; - ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0]; - jamEntry(); - Uint32 nodeArray[MAX_NDB_NODES]; - - csystemnodes = readNodes->noOfNodes; - cmasterNodeId = readNodes->masterNodeId; - int index = 0; - NdbNodeBitmask tmp; tmp.assign(2, readNodes->allNodes); - for (i = 1; i < MAX_NDB_NODES; i++){ - jam(); - if(tmp.get(i)){ - jam(); - nodeArray[index] = i; - if(NodeBitmask::get(readNodes->inactiveNodes, i) == false){ - jam(); - con_lineNodes++; - }//if - index++; - }//if - }//for - - if(cstarttype == NodeState::ST_SYSTEM_RESTART || - cstarttype == NodeState::ST_NODE_RESTART){ - - for(i = 1; inodeStatus); - if(stat == Sysfile::NS_NotDefined && !tmp.get(i)){ - jam(); - continue; - } - - if(tmp.get(i) && stat != Sysfile::NS_NotDefined){ - jam(); - continue; - } - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Illegal configuration change." - " Initial start needs to be performed " - " when changing no of storage nodes (node %d)", i); - progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf); - } - } - - ndbrequire(csystemnodes >= 1 && csystemnodes < MAX_NDB_NODES); - if (cstarttype == NodeState::ST_INITIAL_START) { - jam(); - ndbrequire(cnoReplicas <= csystemnodes); - calculateHotSpare(); - ndbrequire(cnoReplicas <= (csystemnodes - cnoHotSpare)); - }//if - - cmasterdihref = calcDihBlockRef(cmasterNodeId); - /*-------------------------------------------------------------------------*/ - /* MAKE THE LIST OF PRN-RECORD WHICH IS ONE OF THE NODES-LIST IN THIS BLOCK*/ - /*-------------------------------------------------------------------------*/ - makePrnList(readNodes, nodeArray); - if (cstarttype == NodeState::ST_INITIAL_START) { - jam(); - /**---------------------------------------------------------------------- - * WHEN WE INITIALLY START A DATABASE WE WILL CREATE NODE GROUPS. - * ALL NODES ARE PUT INTO NODE GROUPS ALTHOUGH HOT SPARE NODES ARE PUT - * INTO A SPECIAL NODE GROUP. IN EACH NODE GROUP WE HAVE THE SAME AMOUNT - * OF NODES AS THERE ARE NUMBER OF REPLICAS. - * ONE POSSIBLE USAGE OF NODE GROUPS ARE TO MAKE A NODE GROUP A COMPLETE - * FRAGMENT OF THE DATABASE. THIS MEANS THAT ALL REPLICAS WILL BE STORED - * IN THE NODE GROUP. - *-----------------------------------------------------------------------*/ - makeNodeGroups(nodeArray); - }//if - ndbrequire(checkNodeAlive(cmasterNodeId)); - if (cstarttype == NodeState::ST_INITIAL_START) { - jam(); - /**----------------------------------------------------------------------- - * INITIALISE THE SECOND NODE-LIST AND SET NODE BITS AND SOME NODE STATUS. - * VERY CONNECTED WITH MAKE_NODE_GROUPS. CHANGING ONE WILL AFFECT THE - * OTHER AS WELL. - *-----------------------------------------------------------------------*/ - setInitialActiveStatus(); - } else if (cstarttype == NodeState::ST_SYSTEM_RESTART) { - jam(); - /*empty*/; - } else if ((cstarttype == NodeState::ST_NODE_RESTART) || - (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - nodeRestartPh2Lab(signal); - return; - } else { - ndbrequire(false); - }//if - /**------------------------------------------------------------------------ - * ESTABLISH CONNECTIONS WITH THE OTHER DIH BLOCKS AND INITIALISE THIS - * NODE-LIST THAT HANDLES CONNECTION WITH OTHER DIH BLOCKS. - *-------------------------------------------------------------------------*/ - ndbsttorry10Lab(signal, __LINE__); -}//Dbdih::execREAD_NODESCONF() - -/*---------------------------------------------------------------------------*/ -/* START NODE LOGIC FOR NODE RESTART */ -/*---------------------------------------------------------------------------*/ -void Dbdih::nodeRestartPh2Lab(Signal* signal) -{ - /* - * Lock master DICT to avoid metadata operations during INR/NR. - * Done just before START_PERMREQ. - * - * It would be more elegant to do this just before START_MEREQ. - * The problem is, on INR we end up in massive invalidateNodeLCP - * which is not fully protected against metadata ops. - */ - ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); - - // check that we are not yet taking part in schema ops - CRASH_INSERTION(7174); - - Uint32 lockType = DictLockReq::NodeRestartLock; - Callback c = { safe_cast(&Dbdih::recvDictLockConf_nodeRestart), 0 }; - sendDictLockReq(signal, lockType, c); -} - -void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret) -{ - ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); - ndbrequire(data != RNIL); - c_dictLockSlavePtrI_nodeRestart = data; - - nodeRestartPh2Lab2(signal); -} - -void Dbdih::nodeRestartPh2Lab2(Signal* signal) -{ - /*------------------------------------------------------------------------*/ - // REQUEST FOR PERMISSION FROM MASTER TO START A NODE IN AN ALREADY - // RUNNING SYSTEM. - /*------------------------------------------------------------------------*/ - StartPermReq * const req = (StartPermReq *)&signal->theData[0]; - - req->blockRef = reference(); - req->nodeId = cownNodeId; - req->startType = cstarttype; - sendSignal(cmasterdihref, GSN_START_PERMREQ, signal, 3, JBB); -} - -void Dbdih::execSTART_PERMCONF(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(7121); - Uint32 nodeId = signal->theData[0]; - cfailurenr = signal->theData[1]; - ndbrequire(nodeId == cownNodeId); - ndbsttorry10Lab(signal, __LINE__); -}//Dbdih::execSTART_PERMCONF() - -void Dbdih::execSTART_PERMREF(Signal* signal) -{ - jamEntry(); - Uint32 errorCode = signal->theData[1]; - if (errorCode == StartPermRef::ZNODE_ALREADY_STARTING_ERROR || - errorCode == StartPermRef::ZNODE_START_DISALLOWED_ERROR) { - jam(); - /*-----------------------------------------------------------------------*/ - // The master was busy adding another node. We will wait for a second and - // try again. - /*-----------------------------------------------------------------------*/ - signal->theData[0] = DihContinueB::ZSTART_PERMREQ_AGAIN; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 3000, 1); - return; - }//if - - if (errorCode == StartPermRef::InitialStartRequired) - { - CRASH_INSERTION(7170); - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Cluster requires this node to be started " - " with --initial as partial start has been performed" - " and this filesystem is unusable"); - progError(__LINE__, - NDBD_EXIT_SR_RESTARTCONFLICT, - buf); - ndbrequire(false); - } - /*------------------------------------------------------------------------*/ - // Some node process in another node involving our node was still active. We - // will recover from this by crashing here. - // This is controlled restart using the - // already existing features of node crashes. It is not a bug getting here. - /*-------------------------------------------------------------------------*/ - ndbrequire(false); - return; -}//Dbdih::execSTART_PERMREF() - -/*---------------------------------------------------------------------------*/ -/* THIS SIGNAL IS RECEIVED IN THE STARTING NODE WHEN THE START_MEREQ */ -/* HAS BEEN EXECUTED IN THE MASTER NODE. */ -/*---------------------------------------------------------------------------*/ -void Dbdih::execSTART_MECONF(Signal* signal) -{ - jamEntry(); - StartMeConf * const startMe = (StartMeConf *)&signal->theData[0]; - Uint32 nodeId = startMe->startingNodeId; - const Uint32 startWord = startMe->startWord; - Uint32 i; - - CRASH_INSERTION(7130); - ndbrequire(nodeId == cownNodeId); - arrGuard(startWord + StartMeConf::DATA_SIZE, sizeof(cdata)/4); - for(i = 0; i < StartMeConf::DATA_SIZE; i++) - cdata[startWord+i] = startMe->data[i]; - - if(startWord + StartMeConf::DATA_SIZE < Sysfile::SYSFILE_SIZE32){ - jam(); - /** - * We are still waiting for data - */ - return; - } - jam(); - - /** - * Copy into sysfile - * - * But dont copy lastCompletedGCI:s - */ - Uint32 key = SYSFILE->m_restart_seq; - Uint32 tempGCP[MAX_NDB_NODES]; - for(i = 0; i < MAX_NDB_NODES; i++) - tempGCP[i] = SYSFILE->lastCompletedGCI[i]; - - for(i = 0; i < Sysfile::SYSFILE_SIZE32; i++) - sysfileData[i] = cdata[i]; - - SYSFILE->m_restart_seq = key; - for(i = 0; i < MAX_NDB_NODES; i++) - SYSFILE->lastCompletedGCI[i] = tempGCP[i]; - - setNodeActiveStatus(); - setNodeGroups(); - ndbsttorry10Lab(signal, __LINE__); -}//Dbdih::execSTART_MECONF() - -void Dbdih::execSTART_COPYCONF(Signal* signal) -{ - jamEntry(); - Uint32 nodeId = signal->theData[0]; - ndbrequire(nodeId == cownNodeId); - CRASH_INSERTION(7132); - ndbsttorry10Lab(signal, __LINE__); - return; -}//Dbdih::execSTART_COPYCONF() - -/*---------------------------------------------------------------------------*/ -/* MASTER LOGIC FOR NODE RESTART */ -/*---------------------------------------------------------------------------*/ -/* NODE RESTART PERMISSION REQUEST */ -/*---------------------------------------------------------------------------*/ -// A REQUEST FROM A STARTING NODE TO PERFORM A NODE RESTART. IF NO OTHER NODE -// IS ACTIVE IN PERFORMING A NODE RESTART AND THERE ARE NO ACTIVE PROCESSES IN -// THIS NODE INVOLVING THE STARTING NODE THIS REQUEST WILL BE GRANTED. -/*---------------------------------------------------------------------------*/ -void Dbdih::execSTART_PERMREQ(Signal* signal) -{ - StartPermReq * const req = (StartPermReq*)&signal->theData[0]; - jamEntry(); - const BlockReference retRef = req->blockRef; - const Uint32 nodeId = req->nodeId; - const Uint32 typeStart = req->startType; - CRASH_INSERTION(7122); - ndbrequire(isMaster()); - ndbrequire(refToNode(retRef) == nodeId); - if ((c_nodeStartMaster.activeState) || - (c_nodeStartMaster.wait != ZFALSE) || - ERROR_INSERTED_CLEAR(7175)) { - jam(); - signal->theData[0] = nodeId; - signal->theData[1] = StartPermRef::ZNODE_ALREADY_STARTING_ERROR; - sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB); - return; - }//if - if (getNodeStatus(nodeId) != NodeRecord::DEAD){ - g_eventLogger.error("nodeStatus in START_PERMREQ = %u", - (Uint32) getNodeStatus(nodeId)); - ndbrequire(false); - }//if - - if (SYSFILE->lastCompletedGCI[nodeId] == 0 && - typeStart != NodeState::ST_INITIAL_NODE_RESTART) - { - jam(); - signal->theData[0] = nodeId; - signal->theData[1] = StartPermRef::InitialStartRequired; - sendSignal(retRef, GSN_START_PERMREF, signal, 2, JBB); - return; - } - - /*---------------------------------------------------------------------- - * WE START THE INCLUSION PROCEDURE - * ---------------------------------------------------------------------*/ - c_nodeStartMaster.failNr = cfailurenr; - c_nodeStartMaster.wait = ZFALSE; - c_nodeStartMaster.startInfoErrorCode = 0; - c_nodeStartMaster.startNode = nodeId; - c_nodeStartMaster.activeState = true; - c_nodeStartMaster.m_outstandingGsn = GSN_START_INFOREQ; - - setNodeStatus(nodeId, NodeRecord::STARTING); - /** - * But if it's a NodeState::ST_INITIAL_NODE_RESTART - * - * We first have to clear LCP's - * For normal node restart we simply ensure that all nodes - * are informed of the node restart - */ - StartInfoReq *const r =(StartInfoReq*)&signal->theData[0]; - r->startingNodeId = nodeId; - r->typeStart = typeStart; - r->systemFailureNo = cfailurenr; - sendLoopMacro(START_INFOREQ, sendSTART_INFOREQ); -}//Dbdih::execSTART_PERMREQ() - -void Dbdih::execSTART_INFOREF(Signal* signal) -{ - StartInfoRef * ref = (StartInfoRef*)&signal->theData[0]; - if (getNodeStatus(ref->startingNodeId) != NodeRecord::STARTING) { - jam(); - return; - }//if - ndbrequire(c_nodeStartMaster.startNode == ref->startingNodeId); - c_nodeStartMaster.startInfoErrorCode = ref->errorCode; - startInfoReply(signal, ref->sendingNodeId); -}//Dbdih::execSTART_INFOREF() - -void Dbdih::execSTART_INFOCONF(Signal* signal) -{ - jamEntry(); - StartInfoConf * conf = (StartInfoConf*)&signal->theData[0]; - if (getNodeStatus(conf->startingNodeId) != NodeRecord::STARTING) { - jam(); - return; - }//if - ndbrequire(c_nodeStartMaster.startNode == conf->startingNodeId); - startInfoReply(signal, conf->sendingNodeId); -}//Dbdih::execSTART_INFOCONF() - -void Dbdih::startInfoReply(Signal* signal, Uint32 nodeId) -{ - receiveLoopMacro(START_INFOREQ, nodeId); - /** - * We're finished with the START_INFOREQ's - */ - if (c_nodeStartMaster.startInfoErrorCode == 0) { - jam(); - /** - * Everything has been a success so far - */ - StartPermConf * conf = (StartPermConf*)&signal->theData[0]; - conf->startingNodeId = c_nodeStartMaster.startNode; - conf->systemFailureNo = cfailurenr; - sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode), - GSN_START_PERMCONF, signal, StartPermConf::SignalLength, JBB); - c_nodeStartMaster.m_outstandingGsn = GSN_START_PERMCONF; - } else { - jam(); - StartPermRef * ref = (StartPermRef*)&signal->theData[0]; - ref->startingNodeId = c_nodeStartMaster.startNode; - ref->errorCode = c_nodeStartMaster.startInfoErrorCode; - sendSignal(calcDihBlockRef(c_nodeStartMaster.startNode), - GSN_START_PERMREF, signal, StartPermRef::SignalLength, JBB); - nodeResetStart(); - }//if -}//Dbdih::startInfoReply() - -/*---------------------------------------------------------------------------*/ -/* NODE RESTART CONTINUE REQUEST */ -/*---------------------------------------------------------------------------*/ -// THIS SIGNAL AND THE CODE BELOW IS EXECUTED BY THE MASTER WHEN IT HAS BEEN -// REQUESTED TO START UP A NEW NODE. The master instructs the starting node -// how to set up its log for continued execution. -/*---------------------------------------------------------------------------*/ -void Dbdih::execSTART_MEREQ(Signal* signal) -{ - StartMeReq * req = (StartMeReq*)&signal->theData[0]; - jamEntry(); - const BlockReference Tblockref = req->startingRef; - const Uint32 Tnodeid = refToNode(Tblockref); - - ndbrequire(isMaster()); - ndbrequire(c_nodeStartMaster.startNode == Tnodeid); - ndbrequire(getNodeStatus(Tnodeid) == NodeRecord::STARTING); - - c_nodeStartMaster.blockLcp = true; - if ((c_lcpState.lcpStatus != LCP_STATUS_IDLE) && - (c_lcpState.lcpStatus != LCP_TCGET)) { - jam(); - /*-----------------------------------------------------------------------*/ - // WE WILL NOT ALLOW A NODE RESTART TO COME IN WHEN A LOCAL CHECKPOINT IS - // ONGOING. IT WOULD COMPLICATE THE LCP PROTOCOL TOO MUCH. WE WILL ADD THIS - // LATER. - /*-----------------------------------------------------------------------*/ - return; - }//if - lcpBlockedLab(signal); -}//Dbdih::nodeRestartStartRecConfLab() - -void Dbdih::lcpBlockedLab(Signal* signal) -{ - ndbrequire(getNodeStatus(c_nodeStartMaster.startNode)==NodeRecord::STARTING); - /*------------------------------------------------------------------------*/ - // NOW WE HAVE COPIED ALL INFORMATION IN DICT WE ARE NOW READY TO COPY ALL - // INFORMATION IN DIH TO THE NEW NODE. - /*------------------------------------------------------------------------*/ - c_nodeStartMaster.wait = 10; - signal->theData[0] = DihContinueB::ZCOPY_NODE; - signal->theData[1] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - c_nodeStartMaster.m_outstandingGsn = GSN_COPY_TABREQ; -}//Dbdih::lcpBlockedLab() - -void Dbdih::nodeDictStartConfLab(Signal* signal) -{ - /*-------------------------------------------------------------------------*/ - // NOW WE HAVE COPIED BOTH DIH AND DICT INFORMATION. WE ARE NOW READY TO - // INTEGRATE THE NODE INTO THE LCP AND GCP PROTOCOLS AND TO ALLOW UPDATES OF - // THE DICTIONARY AGAIN. - /*-------------------------------------------------------------------------*/ - c_nodeStartMaster.wait = ZFALSE; - c_nodeStartMaster.blockGcp = true; - if (cgcpStatus != GCP_READY) { - /*-----------------------------------------------------------------------*/ - // The global checkpoint is executing. Wait until it is completed before we - // continue processing the node recovery. - /*-----------------------------------------------------------------------*/ - jam(); - return; - }//if - gcpBlockedLab(signal); - - /*-----------------------------------------------------------------*/ - // Report that node restart has completed copy of dictionary. - /*-----------------------------------------------------------------*/ - signal->theData[0] = NDB_LE_NR_CopyDict; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB); -}//Dbdih::nodeDictStartConfLab() - -void Dbdih::dihCopyCompletedLab(Signal* signal) -{ - BlockReference ref = calcDictBlockRef(c_nodeStartMaster.startNode); - DictStartReq * req = (DictStartReq*)&signal->theData[0]; - req->restartGci = cnewgcp; - req->senderRef = reference(); - sendSignal(ref, GSN_DICTSTARTREQ, - signal, DictStartReq::SignalLength, JBB); - c_nodeStartMaster.m_outstandingGsn = GSN_DICTSTARTREQ; - c_nodeStartMaster.wait = 0; -}//Dbdih::dihCopyCompletedLab() - -void Dbdih::gcpBlockedLab(Signal* signal) -{ - /*-----------------------------------------------------------------*/ - // Report that node restart has completed copy of distribution info. - /*-----------------------------------------------------------------*/ - signal->theData[0] = NDB_LE_NR_CopyDistr; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB); - - /** - * The node DIH will be part of LCP - */ - NodeRecordPtr nodePtr; - nodePtr.i = c_nodeStartMaster.startNode; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->m_inclDihLcp = true; - - /*-------------------------------------------------------------------------*/ - // NOW IT IS TIME TO INFORM ALL OTHER NODES IN THE CLUSTER OF THE STARTED - // NODE SUCH THAT THEY ALSO INCLUDE THE NODE IN THE NODE LISTS AND SO FORTH. - /*------------------------------------------------------------------------*/ - sendLoopMacro(INCL_NODEREQ, sendINCL_NODEREQ); - /*-------------------------------------------------------------------------*/ - // We also need to send to the starting node to ensure he is aware of the - // global checkpoint id and the correct state. We do not wait for any reply - // since the starting node will not send any. - /*-------------------------------------------------------------------------*/ - Uint32 startVersion = getNodeInfo(c_nodeStartMaster.startNode).m_version; - - if ((getMajor(startVersion) == 4 && - startVersion >= NDBD_INCL_NODECONF_VERSION_4) || - (getMajor(startVersion) == 5 && - startVersion >= NDBD_INCL_NODECONF_VERSION_5) || - (getMajor(startVersion) > 5)) - { - c_INCL_NODEREQ_Counter.setWaitingFor(c_nodeStartMaster.startNode); - } - - sendINCL_NODEREQ(signal, c_nodeStartMaster.startNode); -}//Dbdih::gcpBlockedLab() - -/*---------------------------------------------------------------------------*/ -// THIS SIGNAL IS EXECUTED IN BOTH SLAVES AND IN THE MASTER -/*---------------------------------------------------------------------------*/ -void Dbdih::execINCL_NODECONF(Signal* signal) -{ - jamEntry(); - Uint32 TstartNode = signal->theData[0]; - Uint32 TsendNodeId_or_blockref = signal->theData[1]; - - Uint32 blocklist[6]; - blocklist[0] = clocallqhblockref; - blocklist[1] = clocaltcblockref; - blocklist[2] = cdictblockref; - blocklist[3] = numberToRef(BACKUP, getOwnNodeId()); - blocklist[4] = numberToRef(SUMA, getOwnNodeId()); - blocklist[5] = 0; - - for (Uint32 i = 0; blocklist[i] != 0; i++) - { - if (TsendNodeId_or_blockref == blocklist[i]) - { - jam(); - - if (TstartNode != c_nodeStartSlave.nodeId) - { - jam(); - warningEvent("Recevied INCL_NODECONF for %u from %s" - " while %u is starting", - TstartNode, - getBlockName(refToBlock(TsendNodeId_or_blockref)), - c_nodeStartSlave.nodeId); - return; - } - - if (getNodeStatus(c_nodeStartSlave.nodeId) == NodeRecord::ALIVE && - blocklist[i+1] != 0) - { - /** - * Send to next in block list - */ - jam(); - signal->theData[0] = reference(); - signal->theData[1] = c_nodeStartSlave.nodeId; - sendSignal(blocklist[i+1], GSN_INCL_NODEREQ, signal, 2, JBB); - return; - } - else - { - /** - * All done, reply to master - */ - jam(); - signal->theData[0] = c_nodeStartSlave.nodeId; - signal->theData[1] = cownNodeId; - sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB); - - c_nodeStartSlave.nodeId = 0; - return; - } - } - } - - if (c_nodeStartMaster.startNode != TstartNode) - { - jam(); - warningEvent("Recevied INCL_NODECONF for %u from %u" - " while %u is starting", - TstartNode, - TsendNodeId_or_blockref, - c_nodeStartMaster.startNode); - return; - } - - ndbrequire(cmasterdihref = reference()); - receiveLoopMacro(INCL_NODEREQ, TsendNodeId_or_blockref); - - CRASH_INSERTION(7128); - /*-------------------------------------------------------------------------*/ - // Now that we have included the starting node in the node lists in the - // various blocks we are ready to start the global checkpoint protocol - /*------------------------------------------------------------------------*/ - c_nodeStartMaster.wait = 11; - c_nodeStartMaster.blockGcp = false; - - signal->theData[0] = reference(); - sendSignal(reference(), GSN_UNBLO_DICTCONF, signal, 1, JBB); -}//Dbdih::execINCL_NODECONF() - -void Dbdih::execUNBLO_DICTCONF(Signal* signal) -{ - jamEntry(); - c_nodeStartMaster.wait = ZFALSE; - if (!c_nodeStartMaster.activeState) { - jam(); - return; - }//if - - CRASH_INSERTION(7129); - /**----------------------------------------------------------------------- - * WE HAVE NOW PREPARED IT FOR INCLUSION IN THE LCP PROTOCOL. - * WE CAN NOW START THE LCP PROTOCOL AGAIN. - * WE HAVE ALSO MADE THIS FOR THE GCP PROTOCOL. - * WE ARE READY TO START THE PROTOCOLS AND RESPOND TO THE START REQUEST - * FROM THE STARTING NODE. - *------------------------------------------------------------------------*/ - - StartMeConf * const startMe = (StartMeConf *)&signal->theData[0]; - - const Uint32 wordPerSignal = StartMeConf::DATA_SIZE; - const int noOfSignals = ((Sysfile::SYSFILE_SIZE32 + (wordPerSignal - 1)) / - wordPerSignal); - - startMe->startingNodeId = c_nodeStartMaster.startNode; - startMe->startWord = 0; - - const Uint32 ref = calcDihBlockRef(c_nodeStartMaster.startNode); - for(int i = 0; i < noOfSignals; i++){ - jam(); - { // Do copy - const int startWord = startMe->startWord; - for(Uint32 j = 0; j < wordPerSignal; j++){ - startMe->data[j] = sysfileData[j+startWord]; - } - } - sendSignal(ref, GSN_START_MECONF, signal, StartMeConf::SignalLength, JBB); - startMe->startWord += wordPerSignal; - }//for - c_nodeStartMaster.m_outstandingGsn = GSN_START_MECONF; -}//Dbdih::execUNBLO_DICTCONF() - -/*---------------------------------------------------------------------------*/ -/* NODE RESTART COPY REQUEST */ -/*---------------------------------------------------------------------------*/ -// A NODE RESTART HAS REACHED ITS FINAL PHASE WHEN THE DATA IS TO BE COPIED -// TO THE NODE. START_COPYREQ IS EXECUTED BY THE MASTER NODE. -/*---------------------------------------------------------------------------*/ -void Dbdih::execSTART_COPYREQ(Signal* signal) -{ - jamEntry(); - Uint32 startNodeId = signal->theData[0]; - //BlockReference startingRef = signal->theData[1]; - ndbrequire(c_nodeStartMaster.startNode == startNodeId); - /*-------------------------------------------------------------------------*/ - // REPORT Copy process of node restart is now about to start up. - /*-------------------------------------------------------------------------*/ - signal->theData[0] = NDB_LE_NR_CopyFragsStarted; - signal->theData[1] = startNodeId; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - CRASH_INSERTION(7131); - nodeRestartTakeOver(signal, startNodeId); - // BlockReference ref = calcQmgrBlockRef(startNodeId); - // signal->theData[0] = cownNodeId; - // Remove comments as soon as I open up the Qmgr block - // TODO_RONM - // sendSignal(ref, GSN_ALLOW_NODE_CRASHORD, signal, 1, JBB); -}//Dbdih::execSTART_COPYREQ() - -/*---------------------------------------------------------------------------*/ -/* SLAVE LOGIC FOR NODE RESTART */ -/*---------------------------------------------------------------------------*/ -void Dbdih::execSTART_INFOREQ(Signal* signal) -{ - jamEntry(); - StartInfoReq *const req =(StartInfoReq*)&signal->theData[0]; - Uint32 startNode = req->startingNodeId; - if (cfailurenr != req->systemFailureNo) { - jam(); - //--------------------------------------------------------------- - // A failure occurred since master sent this request. We will ignore - // this request since the node is already dead that is starting. - //--------------------------------------------------------------- - return; - }//if - CRASH_INSERTION(7123); - if (isMaster()) { - jam(); - ndbrequire(getNodeStatus(startNode) == NodeRecord::STARTING); - } else { - jam(); - ndbrequire(getNodeStatus(startNode) == NodeRecord::DEAD); - }//if - if ((!getAllowNodeStart(startNode)) || - (c_nodeStartSlave.nodeId != 0) || - (ERROR_INSERTED(7124))) { - jam(); - StartInfoRef *const ref =(StartInfoRef*)&signal->theData[0]; - ref->startingNodeId = startNode; - ref->sendingNodeId = cownNodeId; - ref->errorCode = StartPermRef::ZNODE_START_DISALLOWED_ERROR; - sendSignal(cmasterdihref, GSN_START_INFOREF, signal, - StartInfoRef::SignalLength, JBB); - return; - }//if - setNodeStatus(startNode, NodeRecord::STARTING); - if (req->typeStart == NodeState::ST_INITIAL_NODE_RESTART) { - jam(); - setAllowNodeStart(startNode, false); - invalidateNodeLCP(signal, startNode, 0); - } else { - jam(); - StartInfoConf * c = (StartInfoConf*)&signal->theData[0]; - c->sendingNodeId = cownNodeId; - c->startingNodeId = startNode; - sendSignal(cmasterdihref, GSN_START_INFOCONF, signal, - StartInfoConf::SignalLength, JBB); - return; - }//if -}//Dbdih::execSTART_INFOREQ() - -void Dbdih::execINCL_NODEREQ(Signal* signal) -{ - jamEntry(); - Uint32 retRef = signal->theData[0]; - Uint32 nodeId = signal->theData[1]; - if (nodeId == getOwnNodeId() && ERROR_INSERTED(7165)) - { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(reference(), GSN_INCL_NODEREQ, signal, 5000, signal->getLength()); - return; - } - - Uint32 tnodeStartFailNr = signal->theData[2]; - currentgcp = signal->theData[4]; - CRASH_INSERTION(7127); - cnewgcp = currentgcp; - coldgcp = currentgcp - 1; - if (!isMaster()) { - jam(); - /*-----------------------------------------------------------------------*/ - // We don't want to change the state of the master since he can be in the - // state LCP_TCGET at this time. - /*-----------------------------------------------------------------------*/ - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - }//if - - /*-------------------------------------------------------------------------*/ - // When a node is restarted we must ensure that a lcp will be run - // as soon as possible and the reset the delay according to the original - // configuration. - // Without an initial local checkpoint the new node will not be available. - /*-------------------------------------------------------------------------*/ - if (getOwnNodeId() == nodeId) { - jam(); - /*-----------------------------------------------------------------------*/ - // We are the starting node. We came here only to set the global checkpoint - // id's and the lcp status. - /*-----------------------------------------------------------------------*/ - CRASH_INSERTION(7171); - Uint32 masterVersion = getNodeInfo(refToNode(cmasterdihref)).m_version; - - if ((NDB_VERSION_MAJOR == 4 && - masterVersion >= NDBD_INCL_NODECONF_VERSION_4) || - (NDB_VERSION_MAJOR == 5 && - masterVersion >= NDBD_INCL_NODECONF_VERSION_5) || - (NDB_VERSION_MAJOR > 5)) - { - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = getOwnNodeId(); - sendSignal(cmasterdihref, GSN_INCL_NODECONF, signal, 2, JBB); - } - return; - }//if - if (getNodeStatus(nodeId) != NodeRecord::STARTING) { - jam(); - return; - }//if - ndbrequire(cfailurenr == tnodeStartFailNr); - ndbrequire (c_nodeStartSlave.nodeId == 0); - c_nodeStartSlave.nodeId = nodeId; - - ndbrequire (retRef == cmasterdihref); - - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - Sysfile::ActiveStatus TsaveState = nodePtr.p->activeStatus; - Uint32 TnodeGroup = nodePtr.p->nodeGroup; - - new (nodePtr.p) NodeRecord(); - nodePtr.p->nodeGroup = TnodeGroup; - nodePtr.p->activeStatus = TsaveState; - nodePtr.p->nodeStatus = NodeRecord::ALIVE; - nodePtr.p->useInTransactions = true; - nodePtr.p->m_inclDihLcp = true; - - removeDeadNode(nodePtr); - insertAlive(nodePtr); - con_lineNodes++; - - /*-------------------------------------------------------------------------*/ - // WE WILL ALSO SEND THE INCLUDE NODE REQUEST TO THE LOCAL LQH BLOCK. - /*-------------------------------------------------------------------------*/ - signal->theData[0] = reference(); - signal->theData[1] = nodeId; - signal->theData[2] = currentgcp; - sendSignal(clocallqhblockref, GSN_INCL_NODEREQ, signal, 3, JBB); -}//Dbdih::execINCL_NODEREQ() - -/* ------------------------------------------------------------------------- */ -// execINCL_NODECONF() is found in the master logic part since it is used by -// both the master and the slaves. -/* ------------------------------------------------------------------------- */ - -/*****************************************************************************/ -/*********** TAKE OVER DECISION MODULE *************/ -/*****************************************************************************/ -// This module contains the subroutines that take the decision whether to take -// over a node now or not. -/* ------------------------------------------------------------------------- */ -/* MASTER LOGIC FOR SYSTEM RESTART */ -/* ------------------------------------------------------------------------- */ -// WE ONLY COME HERE IF WE ARE THE MASTER AND WE ARE PERFORMING A SYSTEM -// RESTART. WE ALSO COME HERE DURING THIS SYSTEM RESTART ONE TIME PER NODE -// THAT NEEDS TAKE OVER. -/*---------------------------------------------------------------------------*/ -// WE CHECK IF ANY NODE NEEDS TO BE TAKEN OVER AND THE TAKE OVER HAS NOT YET -// BEEN STARTED OR COMPLETED. -/*---------------------------------------------------------------------------*/ -void -Dbdih::systemRestartTakeOverLab(Signal* signal) -{ - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - jam(); - break; - /*---------------------------------------------------------------------*/ - // WE HAVE NOT REACHED A STATE YET WHERE THIS NODE NEEDS TO BE TAKEN OVER - /*---------------------------------------------------------------------*/ - case Sysfile::NS_ActiveMissed_2: - case Sysfile::NS_NotActive_NotTakenOver: - jam(); - /*---------------------------------------------------------------------*/ - // THIS NODE IS IN TROUBLE. - // WE MUST SUCCEED WITH A LOCAL CHECKPOINT WITH THIS NODE TO REMOVE THE - // DANGER. IF THE NODE IS NOT ALIVE THEN THIS WILL NOT BE - // POSSIBLE AND WE CAN START THE TAKE OVER IMMEDIATELY IF WE HAVE ANY - // NODES THAT CAN PERFORM A TAKE OVER. - /*---------------------------------------------------------------------*/ - if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) { - jam(); - Uint32 ThotSpareNode = findHotSpare(); - if (ThotSpareNode != RNIL) { - jam(); - startTakeOver(signal, RNIL, ThotSpareNode, nodePtr.i); - }//if - } else if(nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver){ - jam(); - /*-------------------------------------------------------------------*/ - // NOT ACTIVE NODES THAT HAVE NOT YET BEEN TAKEN OVER NEEDS TAKE OVER - // IMMEDIATELY. IF WE ARE ALIVE WE TAKE OVER OUR OWN NODE. - /*-------------------------------------------------------------------*/ - infoEvent("Take over of node %d started", - nodePtr.i); - startTakeOver(signal, RNIL, nodePtr.i, nodePtr.i); - }//if - break; - case Sysfile::NS_TakeOver: - /**------------------------------------------------------------------- - * WE MUST HAVE FAILED IN THE MIDDLE OF THE TAKE OVER PROCESS. - * WE WILL CONCLUDE THE TAKE OVER PROCESS NOW. - *-------------------------------------------------------------------*/ - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - Uint32 takeOverNode = Sysfile::getTakeOverNode(nodePtr.i, - SYSFILE->takeOver); - if(takeOverNode == 0){ - jam(); - warningEvent("Bug in take-over code restarting"); - takeOverNode = nodePtr.i; - } - startTakeOver(signal, RNIL, nodePtr.i, takeOverNode); - } else { - jam(); - /**------------------------------------------------------------------- - * We are not currently taking over, change our active status. - *-------------------------------------------------------------------*/ - nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; - setNodeRestartInfoBits(); - }//if - break; - case Sysfile::NS_HotSpare: - jam(); - break; - /*---------------------------------------------------------------------*/ - // WE NEED NOT TAKE OVER NODES THAT ARE HOT SPARE. - /*---------------------------------------------------------------------*/ - case Sysfile::NS_NotDefined: - jam(); - break; - /*---------------------------------------------------------------------*/ - // WE NEED NOT TAKE OVER NODES THAT DO NOT EVEN EXIST IN THE CLUSTER. - /*---------------------------------------------------------------------*/ - default: - ndbrequire(false); - break; - }//switch - }//for - /*-------------------------------------------------------------------------*/ - /* NO TAKE OVER HAS BEEN INITIATED. */ - /*-------------------------------------------------------------------------*/ -}//Dbdih::systemRestartTakeOverLab() - -/*---------------------------------------------------------------------------*/ -// This subroutine is called as part of node restart in the master node. -/*---------------------------------------------------------------------------*/ -void Dbdih::nodeRestartTakeOver(Signal* signal, Uint32 startNodeId) -{ - switch (getNodeActiveStatus(startNodeId)) { - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - case Sysfile::NS_ActiveMissed_2: - jam(); - /*-----------------------------------------------------------------------*/ - // AN ACTIVE NODE HAS BEEN STARTED. THE ACTIVE NODE MUST THEN GET ALL DATA - // IT HAD BEFORE ITS CRASH. WE START THE TAKE OVER IMMEDIATELY. - // SINCE WE ARE AN ACTIVE NODE WE WILL TAKE OVER OUR OWN NODE THAT - // PREVIOUSLY CRASHED. - /*-----------------------------------------------------------------------*/ - startTakeOver(signal, RNIL, startNodeId, startNodeId); - break; - case Sysfile::NS_HotSpare:{ - jam(); - /*-----------------------------------------------------------------------*/ - // WHEN STARTING UP A HOT SPARE WE WILL CHECK IF ANY NODE NEEDS TO TAKEN - // OVER. IF SO THEN WE WILL START THE TAKE OVER. - /*-----------------------------------------------------------------------*/ - bool takeOverStarted = false; - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) { - jam(); - takeOverStarted = true; - startTakeOver(signal, RNIL, startNodeId, nodePtr.i); - }//if - }//for - if (!takeOverStarted) { - jam(); - /*-------------------------------------------------------------------*/ - // NO TAKE OVER WAS NEEDED AT THE MOMENT WE START-UP AND WAIT UNTIL A - // TAKE OVER IS NEEDED. - /*-------------------------------------------------------------------*/ - BlockReference ref = calcDihBlockRef(startNodeId); - signal->theData[0] = startNodeId; - sendSignal(ref, GSN_START_COPYCONF, signal, 1, JBB); - }//if - break; - } - case Sysfile::NS_NotActive_NotTakenOver: - jam(); - /*-----------------------------------------------------------------------*/ - // ALL DATA IN THE NODE IS LOST BUT WE HAVE NOT TAKEN OVER YET. WE WILL - // TAKE OVER OUR OWN NODE - /*-----------------------------------------------------------------------*/ - startTakeOver(signal, RNIL, startNodeId, startNodeId); - break; - case Sysfile::NS_TakeOver:{ - jam(); - /*-------------------------------------------------------------------- - * We were in the process of taking over but it was not completed. - * We will complete it now instead. - *--------------------------------------------------------------------*/ - Uint32 takeOverNode = Sysfile::getTakeOverNode(startNodeId, - SYSFILE->takeOver); - if(takeOverNode == 0){ - jam(); - warningEvent("Bug in take-over code restarting"); - takeOverNode = startNodeId; - } - - startTakeOver(signal, RNIL, startNodeId, takeOverNode); - break; - } - default: - ndbrequire(false); - break; - }//switch - nodeResetStart(); -}//Dbdih::nodeRestartTakeOver() - -/*************************************************************************/ -// Ths routine is called when starting a local checkpoint. -/*************************************************************************/ -void Dbdih::checkStartTakeOver(Signal* signal) -{ - NodeRecordPtr csoNodeptr; - Uint32 tcsoHotSpareNode; - Uint32 tcsoTakeOverNode; - if (isMaster()) { - /*-----------------------------------------------------------------*/ - /* WE WILL ONLY START TAKE OVER IF WE ARE MASTER. */ - /*-----------------------------------------------------------------*/ - /* WE WILL ONLY START THE TAKE OVER IF THERE WERE A NEED OF */ - /* A TAKE OVER. */ - /*-----------------------------------------------------------------*/ - /* WE CAN ONLY PERFORM THE TAKE OVER IF WE HAVE A HOT SPARE */ - /* AVAILABLE. */ - /*-----------------------------------------------------------------*/ - tcsoTakeOverNode = 0; - tcsoHotSpareNode = 0; - for (csoNodeptr.i = 1; csoNodeptr.i < MAX_NDB_NODES; csoNodeptr.i++) { - ptrAss(csoNodeptr, nodeRecord); - if (csoNodeptr.p->activeStatus == Sysfile::NS_NotActive_NotTakenOver) { - jam(); - tcsoTakeOverNode = csoNodeptr.i; - } else { - jam(); - if (csoNodeptr.p->activeStatus == Sysfile::NS_HotSpare) { - jam(); - tcsoHotSpareNode = csoNodeptr.i; - }//if - }//if - }//for - if ((tcsoTakeOverNode != 0) && - (tcsoHotSpareNode != 0)) { - jam(); - startTakeOver(signal, RNIL, tcsoHotSpareNode, tcsoTakeOverNode); - }//if - }//if -}//Dbdih::checkStartTakeOver() - -/*****************************************************************************/ -/*********** NODE ADDING MODULE *************/ -/*********** CODE TO HANDLE TAKE OVER *************/ -/*****************************************************************************/ -// A take over can be initiated by a number of things: -// 1) A node restart, usually the node takes over itself but can also take -// over somebody else if its own data was already taken over -// 2) At system restart it is necessary to use the take over code to recover -// nodes which had too old checkpoints to be restorable by the usual -// restoration from disk. -// 3) When a node has missed too many local checkpoints and is decided by the -// master to be taken over by a hot spare node that sits around waiting -// for this to happen. -// -// To support multiple node failures efficiently the code is written such that -// only one take over can handle transitions in state but during a copy -// fragment other take over's can perform state transitions. -/*****************************************************************************/ -void Dbdih::startTakeOver(Signal* signal, - Uint32 takeOverPtrI, - Uint32 startNode, - Uint32 nodeTakenOver) -{ - NodeRecordPtr toNodePtr; - NodeGroupRecordPtr NGPtr; - toNodePtr.i = nodeTakenOver; - ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord); - NGPtr.i = toNodePtr.p->nodeGroup; - ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord); - TakeOverRecordPtr takeOverPtr; - if (takeOverPtrI == RNIL) { - jam(); - setAllowNodeStart(startNode, false); - seizeTakeOver(takeOverPtr); - if (startNode == c_nodeStartMaster.startNode) { - jam(); - takeOverPtr.p->toNodeRestart = true; - }//if - takeOverPtr.p->toStartingNode = startNode; - takeOverPtr.p->toFailedNode = nodeTakenOver; - } else { - jam(); - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - ndbrequire(takeOverPtr.p->toStartingNode == startNode); - ndbrequire(takeOverPtr.p->toFailedNode == nodeTakenOver); - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_WAIT_START_TAKE_OVER); - }//if - if ((NGPtr.p->activeTakeOver) || (ERROR_INSERTED(7157))) { - jam(); - /**------------------------------------------------------------------------ - * A take over is already active in this node group. We only allow one - * take over per node group. Otherwise we will overload the node group and - * also we will require much more checks when starting up copying of - * fragments. The parallelism for take over is mainly to ensure that we - * can handle take over efficiently in large systems with 4 nodes and above - * A typical case is a 8 node system executing on two 8-cpu boxes. - * A box crash in one of the boxes will mean 4 nodes crashes. - * We want to be able to restart those four nodes to some - * extent in parallel. - * - * We will wait for a few seconds and then try again. - */ - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START_TAKE_OVER; - signal->theData[0] = DihContinueB::ZSTART_TAKE_OVER; - signal->theData[1] = takeOverPtr.i; - signal->theData[2] = startNode; - signal->theData[3] = nodeTakenOver; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 5000, 4); - return; - }//if - NGPtr.p->activeTakeOver = true; - if (startNode == nodeTakenOver) { - jam(); - switch (getNodeActiveStatus(nodeTakenOver)) { - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - case Sysfile::NS_ActiveMissed_2: - jam(); - break; - case Sysfile::NS_NotActive_NotTakenOver: - case Sysfile::NS_TakeOver: - jam(); - setNodeActiveStatus(nodeTakenOver, Sysfile::NS_TakeOver); - break; - default: - ndbrequire(false); - }//switch - } else { - jam(); - setNodeActiveStatus(nodeTakenOver, Sysfile::NS_HotSpare); - setNodeActiveStatus(startNode, Sysfile::NS_TakeOver); - changeNodeGroups(startNode, nodeTakenOver); - }//if - setNodeRestartInfoBits(); - /* ---------------------------------------------------------------------- */ - /* WE SET THE RESTART INFORMATION TO INDICATE THAT WE ARE ABOUT TO TAKE */ - /* OVER THE FAILED NODE. WE SET THIS INFORMATION AND WAIT UNTIL THE */ - /* GLOBAL CHECKPOINT HAS WRITTEN THE RESTART INFORMATION. */ - /* ---------------------------------------------------------------------- */ - Sysfile::setTakeOverNode(takeOverPtr.p->toFailedNode, SYSFILE->takeOver, - startNode); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY; - - if (getNodeState().getSystemRestartInProgress()) - { - jam(); - checkToCopy(); - checkToCopyCompleted(signal); - return; - } - cstartGcpNow = true; -}//Dbdih::startTakeOver() - -void Dbdih::changeNodeGroups(Uint32 startNode, Uint32 nodeTakenOver) -{ - NodeRecordPtr startNodePtr; - NodeRecordPtr toNodePtr; - startNodePtr.i = startNode; - ptrCheckGuard(startNodePtr, MAX_NDB_NODES, nodeRecord); - toNodePtr.i = nodeTakenOver; - ptrCheckGuard(toNodePtr, MAX_NDB_NODES, nodeRecord); - ndbrequire(startNodePtr.p->nodeGroup == ZNIL); - NodeGroupRecordPtr NGPtr; - - NGPtr.i = toNodePtr.p->nodeGroup; - ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord); - bool nodeFound = false; - for (Uint32 i = 0; i < NGPtr.p->nodeCount; i++) { - jam(); - if (NGPtr.p->nodesInGroup[i] == nodeTakenOver) { - jam(); - NGPtr.p->nodesInGroup[i] = startNode; - nodeFound = true; - }//if - }//for - ndbrequire(nodeFound); - Sysfile::setNodeGroup(startNodePtr.i, SYSFILE->nodeGroups, toNodePtr.p->nodeGroup); - startNodePtr.p->nodeGroup = toNodePtr.p->nodeGroup; - Sysfile::setNodeGroup(toNodePtr.i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID); - toNodePtr.p->nodeGroup = ZNIL; -}//Dbdih::changeNodeGroups() - -void Dbdih::checkToCopy() -{ - TakeOverRecordPtr takeOverPtr; - for (takeOverPtr.i = 0;takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) { - ptrAss(takeOverPtr, takeOverRecord); - /*----------------------------------------------------------------------*/ - // TAKE OVER HANDLING WRITES RESTART INFORMATION THROUGH - // THE GLOBAL CHECKPOINT - // PROTOCOL. WE CHECK HERE BEFORE STARTING A WRITE OF THE RESTART - // INFORMATION. - /*-----------------------------------------------------------------------*/ - if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_START_COPY_ONGOING; - } else if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY_ONGOING; - }//if - }//for -}//Dbdih::checkToCopy() - -void Dbdih::checkToCopyCompleted(Signal* signal) -{ - /* ------------------------------------------------------------------------*/ - /* WE CHECK HERE IF THE WRITING OF TAKE OVER INFORMATION ALSO HAS BEEN */ - /* COMPLETED. */ - /* ------------------------------------------------------------------------*/ - TakeOverRecordPtr toPtr; - for (toPtr.i = 0; toPtr.i < MAX_NDB_NODES; toPtr.i++) { - ptrAss(toPtr, takeOverRecord); - if (toPtr.p->toMasterStatus == TakeOverRecord::TO_START_COPY_ONGOING){ - jam(); - sendStartTo(signal, toPtr.i); - } else if (toPtr.p->toMasterStatus == TakeOverRecord::TO_END_COPY_ONGOING){ - jam(); - sendEndTo(signal, toPtr.i); - } else { - jam(); - }//if - }//for -}//Dbdih::checkToCopyCompleted() - -bool Dbdih::checkToInterrupted(TakeOverRecordPtr& takeOverPtr) -{ - if (checkNodeAlive(takeOverPtr.p->toStartingNode)) { - jam(); - return false; - } else { - jam(); - endTakeOver(takeOverPtr.i); - return true; - }//if -}//Dbdih::checkToInterrupted() - -void Dbdih::sendStartTo(Signal* signal, Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - CRASH_INSERTION(7155); - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - if ((c_startToLock != RNIL) || (ERROR_INSERTED(7158))) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_START; - signal->theData[0] = DihContinueB::ZSEND_START_TO; - signal->theData[1] = takeOverPtrI; - signal->theData[2] = takeOverPtr.p->toStartingNode; - signal->theData[3] = takeOverPtr.p->toFailedNode; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4); - return; - }//if - c_startToLock = takeOverPtrI; - - takeOverPtr.p->toMasterStatus = TakeOverRecord::STARTING; - StartToReq * const req = (StartToReq *)&signal->theData[0]; - req->userPtr = takeOverPtr.i; - req->userRef = reference(); - req->startingNodeId = takeOverPtr.p->toStartingNode; - req->nodeTakenOver = takeOverPtr.p->toFailedNode; - req->nodeRestart = takeOverPtr.p->toNodeRestart; - sendLoopMacro(START_TOREQ, sendSTART_TOREQ); -}//Dbdih::sendStartTo() - -void Dbdih::execSTART_TOREQ(Signal* signal) -{ - TakeOverRecordPtr takeOverPtr; - jamEntry(); - const StartToReq * const req = (StartToReq *)&signal->theData[0]; - takeOverPtr.i = req->userPtr; - BlockReference ref = req->userRef; - Uint32 startingNode = req->startingNodeId; - - CRASH_INSERTION(7133); - RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId); - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - allocateTakeOver(takeOverPtr); - initStartTakeOver(req, takeOverPtr); - - StartToConf * const conf = (StartToConf *)&signal->theData[0]; - conf->userPtr = takeOverPtr.i; - conf->sendingNodeId = cownNodeId; - conf->startingNodeId = startingNode; - sendSignal(ref, GSN_START_TOCONF, signal, StartToConf::SignalLength, JBB); -}//Dbdih::execSTART_TOREQ() - -void Dbdih::execSTART_TOCONF(Signal* signal) -{ - TakeOverRecordPtr takeOverPtr; - jamEntry(); - const StartToConf * const conf = (StartToConf *)&signal->theData[0]; - - CRASH_INSERTION(7147); - - RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId); - - takeOverPtr.i = conf->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::STARTING); - ndbrequire(takeOverPtr.p->toStartingNode == conf->startingNodeId); - receiveLoopMacro(START_TOREQ, conf->sendingNodeId); - CRASH_INSERTION(7134); - c_startToLock = RNIL; - - if (takeOverPtr.p->toNodeRestart) - { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::STARTING_LOCAL_FRAGMENTS; - nr_start_fragments(signal, takeOverPtr); - return; - } - - startNextCopyFragment(signal, takeOverPtr.i); -}//Dbdih::execSTART_TOCONF() - -void -Dbdih::nr_start_fragments(Signal* signal, - TakeOverRecordPtr takeOverPtr) -{ - Uint32 loopCount = 0 ; - TabRecordPtr tabPtr; - while (loopCount++ < 100) { - tabPtr.i = takeOverPtr.p->toCurrentTabref; - if (tabPtr.i >= ctabFileSize) { - jam(); - nr_run_redo(signal, takeOverPtr); - return; - }//if - ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE || - tabPtr.p->tabStorage != TabRecord::ST_NORMAL) - { - jam(); - takeOverPtr.p->toCurrentFragid = 0; - takeOverPtr.p->toCurrentTabref++; - continue; - }//if - Uint32 fragId = takeOverPtr.p->toCurrentFragid; - if (fragId >= tabPtr.p->totalfragments) { - jam(); - takeOverPtr.p->toCurrentFragid = 0; - takeOverPtr.p->toCurrentTabref++; - continue; - }//if - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - ReplicaRecordPtr loopReplicaPtr; - loopReplicaPtr.i = fragPtr.p->oldStoredReplicas; - while (loopReplicaPtr.i != RNIL) { - ptrCheckGuard(loopReplicaPtr, creplicaFileSize, replicaRecord); - if (loopReplicaPtr.p->procNode == takeOverPtr.p->toStartingNode) { - jam(); - nr_start_fragment(signal, takeOverPtr, loopReplicaPtr); - break; - } else { - jam(); - loopReplicaPtr.i = loopReplicaPtr.p->nextReplica; - }//if - }//while - takeOverPtr.p->toCurrentFragid++; - }//while - signal->theData[0] = DihContinueB::ZTO_START_FRAGMENTS; - signal->theData[1] = takeOverPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); -} - -void -Dbdih::nr_start_fragment(Signal* signal, - TakeOverRecordPtr takeOverPtr, - ReplicaRecordPtr replicaPtr) -{ - Uint32 i, j = 0; - Uint32 maxLcpId = 0; - Uint32 maxLcpIndex = ~0; - - Uint32 restorableGCI = 0; - - ndbout_c("tab: %d frag: %d replicaP->nextLcp: %d", - takeOverPtr.p->toCurrentTabref, - takeOverPtr.p->toCurrentFragid, - replicaPtr.p->nextLcp); - - Uint32 idx = replicaPtr.p->nextLcp; - for(i = 0; ilcpId[idx]); - if (replicaPtr.p->lcpStatus[idx] == ZVALID) - { - ndbrequire(replicaPtr.p->lcpId[idx] > maxLcpId); - Uint32 stopGci = replicaPtr.p->maxGciStarted[idx]; - for (;j < replicaPtr.p->noCrashedReplicas; j++) - { - ndbout_c("crashed replica: %d(%d) replicaLastGci: %d", - j, - replicaPtr.p->noCrashedReplicas, - replicaPtr.p->replicaLastGci[j]); - if (replicaPtr.p->replicaLastGci[j] > stopGci) - { - maxLcpId = replicaPtr.p->lcpId[idx]; - maxLcpIndex = idx; - restorableGCI = replicaPtr.p->replicaLastGci[j]; - break; - } - } - } - } - - if (maxLcpIndex == ~ (Uint32) 0) - { - ndbout_c("Didnt find any LCP for node: %d tab: %d frag: %d", - takeOverPtr.p->toStartingNode, - takeOverPtr.p->toCurrentTabref, - takeOverPtr.p->toCurrentFragid); - replicaPtr.p->lcpIdStarted = 0; - BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode); - StartFragReq *req = (StartFragReq *)signal->getDataPtrSend(); - req->userPtr = 0; - req->userRef = reference(); - req->lcpNo = ZNIL; - req->lcpId = 0; - req->tableId = takeOverPtr.p->toCurrentTabref; - req->fragId = takeOverPtr.p->toCurrentFragid; - req->noOfLogNodes = 0; - sendSignal(ref, GSN_START_FRAGREQ, signal, - StartFragReq::SignalLength, JBB); - } - else - { - ndbout_c("Found LCP: %d(%d) maxGciStarted: %d maxGciCompleted: %d restorable: %d(%d) newestRestorableGCI: %d", - maxLcpId, - maxLcpIndex, - replicaPtr.p->maxGciStarted[maxLcpIndex], - replicaPtr.p->maxGciCompleted[maxLcpIndex], - restorableGCI, - SYSFILE->lastCompletedGCI[takeOverPtr.p->toStartingNode], - SYSFILE->newestRestorableGCI); - - replicaPtr.p->lcpIdStarted = restorableGCI; - BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode); - StartFragReq *req = (StartFragReq *)signal->getDataPtrSend(); - req->userPtr = 0; - req->userRef = reference(); - req->lcpNo = maxLcpIndex; - req->lcpId = maxLcpId; - req->tableId = takeOverPtr.p->toCurrentTabref; - req->fragId = takeOverPtr.p->toCurrentFragid; - req->noOfLogNodes = 1; - req->lqhLogNode[0] = takeOverPtr.p->toStartingNode; - req->startGci[0] = replicaPtr.p->maxGciCompleted[maxLcpIndex]; - req->lastGci[0] = restorableGCI; - sendSignal(ref, GSN_START_FRAGREQ, signal, - StartFragReq::SignalLength, JBB); - } -} - -void -Dbdih::nr_run_redo(Signal* signal, TakeOverRecordPtr takeOverPtr) -{ - takeOverPtr.p->toCurrentTabref = 0; - takeOverPtr.p->toCurrentFragid = 0; - sendSTART_RECREQ(signal, takeOverPtr.p->toStartingNode); -} - -void Dbdih::initStartTakeOver(const StartToReq * req, - TakeOverRecordPtr takeOverPtr) -{ - takeOverPtr.p->toCurrentTabref = 0; - takeOverPtr.p->toCurrentFragid = 0; - takeOverPtr.p->toStartingNode = req->startingNodeId; - takeOverPtr.p->toFailedNode = req->nodeTakenOver; - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_STARTED; - takeOverPtr.p->toCopyNode = RNIL; - takeOverPtr.p->toCurrentReplica = RNIL; - takeOverPtr.p->toNodeRestart = req->nodeRestart; -}//Dbdih::initStartTakeOver() - -void Dbdih::startNextCopyFragment(Signal* signal, Uint32 takeOverPtrI) -{ - TabRecordPtr tabPtr; - TakeOverRecordPtr takeOverPtr; - Uint32 loopCount; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT; - loopCount = 0; - if (ERROR_INSERTED(7159)) { - loopCount = 100; - }//if - while (loopCount++ < 100) { - tabPtr.i = takeOverPtr.p->toCurrentTabref; - if (tabPtr.i >= ctabFileSize) { - jam(); - CRASH_INSERTION(7136); - sendUpdateTo(signal, takeOverPtr.i, UpdateToReq::TO_COPY_COMPLETED); - return; - }//if - ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){ - jam(); - takeOverPtr.p->toCurrentFragid = 0; - takeOverPtr.p->toCurrentTabref++; - continue; - }//if - Uint32 fragId = takeOverPtr.p->toCurrentFragid; - if (fragId >= tabPtr.p->totalfragments) { - jam(); - takeOverPtr.p->toCurrentFragid = 0; - takeOverPtr.p->toCurrentTabref++; - if (ERROR_INSERTED(7135)) { - if (takeOverPtr.p->toCurrentTabref == 1) { - ndbrequire(false); - }//if - }//if - continue; - }//if - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - ReplicaRecordPtr loopReplicaPtr; - loopReplicaPtr.i = fragPtr.p->oldStoredReplicas; - while (loopReplicaPtr.i != RNIL) { - ptrCheckGuard(loopReplicaPtr, creplicaFileSize, replicaRecord); - if (loopReplicaPtr.p->procNode == takeOverPtr.p->toFailedNode) { - jam(); - /* ----------------------------------------------------------------- */ - /* WE HAVE FOUND A REPLICA THAT BELONGED THE FAILED NODE THAT NEEDS */ - /* TAKE OVER. WE TAKE OVER THIS REPLICA TO THE NEW NODE. */ - /* ----------------------------------------------------------------- */ - takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i; - toCopyFragLab(signal, takeOverPtr.i); - return; - } else if (loopReplicaPtr.p->procNode == takeOverPtr.p->toStartingNode) { - jam(); - /* ----------------------------------------------------------------- */ - /* WE HAVE OBVIOUSLY STARTED TAKING OVER THIS WITHOUT COMPLETING IT. */ - /* WE */ - /* NEED TO COMPLETE THE TAKE OVER OF THIS REPLICA. */ - /* ----------------------------------------------------------------- */ - takeOverPtr.p->toCurrentReplica = loopReplicaPtr.i; - toCopyFragLab(signal, takeOverPtr.i); - return; - } else { - jam(); - loopReplicaPtr.i = loopReplicaPtr.p->nextReplica; - }//if - }//while - takeOverPtr.p->toCurrentFragid++; - }//while - signal->theData[0] = DihContinueB::ZTO_START_COPY_FRAG; - signal->theData[1] = takeOverPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); -}//Dbdih::startNextCopyFragment() - -void Dbdih::toCopyFragLab(Signal* signal, - Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - - /** - * Inform starting node that TakeOver is about to start - */ - Uint32 nodeId = takeOverPtr.p->toStartingNode; - - Uint32 version = getNodeInfo(nodeId).m_version; - if (ndb_check_prep_copy_frag_version(version)) - { - jam(); - TabRecordPtr tabPtr; - tabPtr.i = takeOverPtr.p->toCurrentTabref; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr); - Uint32 nodes[MAX_REPLICAS]; - extractNodeInfo(fragPtr.p, nodes); - - PrepareCopyFragReq* req= (PrepareCopyFragReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = takeOverPtrI; - req->tableId = takeOverPtr.p->toCurrentTabref; - req->fragId = takeOverPtr.p->toCurrentFragid; - req->copyNodeId = nodes[0]; // Src - req->startingNodeId = takeOverPtr.p->toStartingNode; // Dst - Uint32 ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode); - - sendSignal(ref, GSN_PREPARE_COPY_FRAG_REQ, signal, - PrepareCopyFragReq::SignalLength, JBB); - - takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_COPY; - return; - } - - takeOverPtr.p->maxPage = RNIL; - toStartCopyFrag(signal, takeOverPtr); -} - -void -Dbdih::execPREPARE_COPY_FRAG_REF(Signal* signal) -{ - jamEntry(); - PrepareCopyFragRef ref = *(PrepareCopyFragRef*)signal->getDataPtr(); - - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(ref.senderData, takeOverPtr); - - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_COPY); - - /** - * Treat this as copy frag ref - */ - CopyFragRef * cfref = (CopyFragRef*)signal->getDataPtrSend(); - cfref->userPtr = ref.senderData; - cfref->startingNodeId = ref.startingNodeId; - cfref->errorCode = ref.errorCode; - cfref->tableId = ref.tableId; - cfref->fragId = ref.fragId; - cfref->sendingNodeId = ref.copyNodeId; - takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG; - execCOPY_FRAGREF(signal); -} - -void -Dbdih::execPREPARE_COPY_FRAG_CONF(Signal* signal) -{ - PrepareCopyFragConf conf = *(PrepareCopyFragConf*)signal->getDataPtr(); - - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(conf.senderData, takeOverPtr); - - Uint32 version = getNodeInfo(refToNode(conf.senderRef)).m_version; - if (ndb_check_prep_copy_frag_version(version) >= 2) - { - jam(); - takeOverPtr.p->maxPage = conf.maxPageNo; - } - else - { - jam(); - takeOverPtr.p->maxPage = RNIL; - } - toStartCopyFrag(signal, takeOverPtr); -} - -void -Dbdih::toStartCopyFrag(Signal* signal, TakeOverRecordPtr takeOverPtr) -{ - CreateReplicaRecordPtr createReplicaPtr; - createReplicaPtr.i = 0; - ptrAss(createReplicaPtr, createReplicaRecord); - - ReplicaRecordPtr replicaPtr; - replicaPtr.i = takeOverPtr.p->toCurrentReplica; - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - - TabRecordPtr tabPtr; - tabPtr.i = takeOverPtr.p->toCurrentTabref; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - /* ----------------------------------------------------------------------- */ - /* WE HAVE FOUND A REPLICA THAT NEEDS TAKE OVER. WE WILL START THIS TAKE */ - /* OVER BY ADDING THE FRAGMENT WHEREAFTER WE WILL ORDER THE PRIMARY */ - /* REPLICA TO COPY ITS CONTENT TO THE NEW STARTING REPLICA. */ - /* THIS OPERATION IS A SINGLE USER OPERATION UNTIL WE HAVE SENT */ - /* COPY_FRAGREQ. AFTER SENDING COPY_FRAGREQ WE ARE READY TO START A NEW */ - /* FRAGMENT REPLICA. WE WILL NOT IMPLEMENT THIS IN THE FIRST PHASE. */ - /* ----------------------------------------------------------------------- */ - cnoOfCreateReplicas = 1; - createReplicaPtr.p->hotSpareUse = true; - createReplicaPtr.p->dataNodeId = takeOverPtr.p->toStartingNode; - - prepareSendCreateFragReq(signal, takeOverPtr.i); -}//Dbdih::toStartCopy() - -void Dbdih::prepareSendCreateFragReq(Signal* signal, Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - - TabRecordPtr tabPtr; - tabPtr.i = takeOverPtr.p->toCurrentTabref; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - FragmentstorePtr fragPtr; - - getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr); - Uint32 nodes[MAX_REPLICAS]; - extractNodeInfo(fragPtr.p, nodes); - takeOverPtr.p->toCopyNode = nodes[0]; - sendCreateFragReq(signal, 0, CreateFragReq::STORED, takeOverPtr.i); -}//Dbdih::prepareSendCreateFragReq() - -void Dbdih::sendCreateFragReq(Signal* signal, - Uint32 startGci, - Uint32 replicaType, - Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - if ((c_createFragmentLock != RNIL) || - ((ERROR_INSERTED(7161))&&(replicaType == CreateFragReq::STORED)) || - ((ERROR_INSERTED(7162))&&(replicaType == CreateFragReq::COMMIT_STORED))){ - if (replicaType == CreateFragReq::STORED) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_PREPARE_CREATE; - } else { - ndbrequire(replicaType == CreateFragReq::COMMIT_STORED); - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_COMMIT_CREATE; - }//if - signal->theData[0] = DihContinueB::ZSEND_CREATE_FRAG; - signal->theData[1] = takeOverPtr.i; - signal->theData[2] = replicaType; - signal->theData[3] = startGci; - signal->theData[4] = takeOverPtr.p->toStartingNode; - signal->theData[5] = takeOverPtr.p->toFailedNode; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 50, 6); - return; - }//if - c_createFragmentLock = takeOverPtr.i; - sendLoopMacro(CREATE_FRAGREQ, nullRoutine); - - CreateFragReq * const req = (CreateFragReq *)&signal->theData[0]; - req->userPtr = takeOverPtr.i; - req->userRef = reference(); - req->tableId = takeOverPtr.p->toCurrentTabref; - req->fragId = takeOverPtr.p->toCurrentFragid; - req->startingNodeId = takeOverPtr.p->toStartingNode; - req->copyNodeId = takeOverPtr.p->toCopyNode; - req->startGci = startGci; - req->replicaType = replicaType; - - NodeRecordPtr nodePtr; - nodePtr.i = cfirstAliveNode; - do { - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - BlockReference ref = calcDihBlockRef(nodePtr.i); - sendSignal(ref, GSN_CREATE_FRAGREQ, signal, - CreateFragReq::SignalLength, JBB); - nodePtr.i = nodePtr.p->nextNode; - } while (nodePtr.i != RNIL); - - if (replicaType == CreateFragReq::STORED) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_CREATE; - } else { - ndbrequire(replicaType == CreateFragReq::COMMIT_STORED); - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE; - } -}//Dbdih::sendCreateFragReq() - -/* --------------------------------------------------------------------------*/ -/* AN ORDER TO START OR COMMIT THE REPLICA CREATION ARRIVED FROM THE */ -/* MASTER. */ -/* --------------------------------------------------------------------------*/ -void Dbdih::execCREATE_FRAGREQ(Signal* signal) -{ - jamEntry(); - CreateFragReq * const req = (CreateFragReq *)&signal->theData[0]; - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = req->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - BlockReference retRef = req->userRef; - - TabRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - Uint32 fragId = req->fragId; - Uint32 tdestNodeid = req->startingNodeId; - Uint32 tsourceNodeid = req->copyNodeId; - Uint32 startGci = req->startGci; - Uint32 replicaType = req->replicaType; - - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - RETURN_IF_NODE_NOT_ALIVE(tdestNodeid); - ReplicaRecordPtr frReplicaPtr; - findToReplica(takeOverPtr.p, replicaType, fragPtr, frReplicaPtr); - ndbrequire(frReplicaPtr.i != RNIL); - - switch (replicaType) { - case CreateFragReq::STORED: - jam(); - CRASH_INSERTION(7138); - /* ----------------------------------------------------------------------*/ - /* HERE WE ARE INSERTING THE NEW BACKUP NODE IN THE EXECUTION OF ALL */ - /* OPERATIONS. FROM HERE ON ALL OPERATIONS ON THIS FRAGMENT WILL INCLUDE*/ - /* USE OF THE NEW REPLICA. */ - /* --------------------------------------------------------------------- */ - insertBackup(fragPtr, tdestNodeid); - takeOverPtr.p->toCopyNode = tsourceNodeid; - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_PREPARE; - - fragPtr.p->distributionKey++; - fragPtr.p->distributionKey &= 255; - break; - case CreateFragReq::COMMIT_STORED: - jam(); - CRASH_INSERTION(7139); - /* ----------------------------------------------------------------------*/ - /* HERE WE ARE MOVING THE REPLICA TO THE STORED SECTION SINCE IT IS NOW */ - /* FULLY LOADED WITH ALL DATA NEEDED. */ - // We also update the order of the replicas here so that if the new - // replica is the desired primary we insert it as primary. - /* ----------------------------------------------------------------------*/ - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_CREATE_COMMIT; - removeOldStoredReplica(fragPtr, frReplicaPtr); - linkStoredReplica(fragPtr, frReplicaPtr); - updateNodeInfo(fragPtr); - break; - default: - ndbrequire(false); - break; - }//switch - - /* ------------------------------------------------------------------------*/ - /* THE NEW NODE OF THIS REPLICA IS THE STARTING NODE. */ - /* ------------------------------------------------------------------------*/ - if (frReplicaPtr.p->procNode != takeOverPtr.p->toStartingNode) { - jam(); - /* ---------------------------------------------------------------------*/ - /* IF WE ARE STARTING A TAKE OVER NODE WE MUST INVALIDATE ALL LCP'S. */ - /* OTHERWISE WE WILL TRY TO START LCP'S THAT DO NOT EXIST. */ - /* ---------------------------------------------------------------------*/ - frReplicaPtr.p->procNode = takeOverPtr.p->toStartingNode; - frReplicaPtr.p->noCrashedReplicas = 0; - frReplicaPtr.p->createGci[0] = startGci; - ndbrequire(startGci != 0xF1F1F1F1); - frReplicaPtr.p->replicaLastGci[0] = (Uint32)-1; - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { - frReplicaPtr.p->lcpStatus[i] = ZINVALID; - }//for - } else { - jam(); - const Uint32 noCrashed = frReplicaPtr.p->noCrashedReplicas; - arrGuard(noCrashed, 8); - frReplicaPtr.p->createGci[noCrashed] = startGci; - ndbrequire(startGci != 0xF1F1F1F1); - frReplicaPtr.p->replicaLastGci[noCrashed] = (Uint32)-1; - }//if - takeOverPtr.p->toCurrentTabref = tabPtr.i; - takeOverPtr.p->toCurrentFragid = fragId; - CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0]; - conf->userPtr = takeOverPtr.i; - conf->tableId = tabPtr.i; - conf->fragId = fragId; - conf->sendingNodeId = cownNodeId; - conf->startingNodeId = tdestNodeid; - sendSignal(retRef, GSN_CREATE_FRAGCONF, signal, - CreateFragConf::SignalLength, JBB); -}//Dbdih::execCREATE_FRAGREQ() - -void Dbdih::execCREATE_FRAGCONF(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(7148); - const CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0]; - Uint32 fragId = conf->fragId; - - RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId); - - TabRecordPtr tabPtr; - tabPtr.i = conf->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = conf->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbrequire(tabPtr.i == takeOverPtr.p->toCurrentTabref); - ndbrequire(fragId == takeOverPtr.p->toCurrentFragid); - receiveLoopMacro(CREATE_FRAGREQ, conf->sendingNodeId); - c_createFragmentLock = RNIL; - - if (takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_CREATE) { - jam(); - CRASH_INSERTION(7140); - /* --------------------------------------------------------------------- */ - /* ALL NODES HAVE PREPARED THE INTRODUCTION OF THIS NEW NODE AND IT IS */ - /* ALREADY IN USE. WE CAN NOW START COPYING THE FRAGMENT. */ - /*---------------------------------------------------------------------- */ - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - Uint32 gci = 0; - if (takeOverPtr.p->toNodeRestart) - { - ReplicaRecordPtr replicaPtr; - findReplica(replicaPtr, fragPtr.p, takeOverPtr.p->toStartingNode, true); - gci = replicaPtr.p->lcpIdStarted; - replicaPtr.p->lcpIdStarted = 0; - } - takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG; - BlockReference ref = calcLqhBlockRef(takeOverPtr.p->toCopyNode); - CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0]; - copyFragReq->userPtr = takeOverPtr.i; - copyFragReq->userRef = reference(); - copyFragReq->tableId = tabPtr.i; - copyFragReq->fragId = fragId; - copyFragReq->nodeId = takeOverPtr.p->toStartingNode; - copyFragReq->schemaVersion = tabPtr.p->schemaVersion; - copyFragReq->distributionKey = fragPtr.p->distributionKey; - copyFragReq->gci = gci; - Uint32 len = copyFragReq->nodeCount = - extractNodeInfo(fragPtr.p, - copyFragReq->nodeList); - copyFragReq->nodeList[len] = takeOverPtr.p->maxPage; - sendSignal(ref, GSN_COPY_FRAGREQ, signal, - CopyFragReq::SignalLength + len, JBB); - } else { - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COMMIT_CREATE); - jam(); - CRASH_INSERTION(7141); - /* --------------------------------------------------------------------- */ - // REPORT that copy of fragment has been completed. - /* --------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NR_CopyFragDone; - signal->theData[1] = takeOverPtr.p->toStartingNode; - signal->theData[2] = tabPtr.i; - signal->theData[3] = takeOverPtr.p->toCurrentFragid; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - /* --------------------------------------------------------------------- */ - /* WE HAVE NOW CREATED THIS NEW REPLICA AND WE ARE READY TO TAKE THE */ - /* THE NEXT REPLICA. */ - /* --------------------------------------------------------------------- */ - - Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle); - mutex.unlock(); // ignore result - - takeOverPtr.p->toCurrentFragid++; - startNextCopyFragment(signal, takeOverPtr.i); - }//if -}//Dbdih::execCREATE_FRAGCONF() - -void Dbdih::execCOPY_FRAGREF(Signal* signal) -{ - const CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0]; - jamEntry(); - Uint32 takeOverPtrI = ref->userPtr; - Uint32 startingNodeId = ref->startingNodeId; - Uint32 errorCode = ref->errorCode; - - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - ndbrequire(errorCode != ZNODE_FAILURE_ERROR); - ndbrequire(ref->tableId == takeOverPtr.p->toCurrentTabref); - ndbrequire(ref->fragId == takeOverPtr.p->toCurrentFragid); - ndbrequire(ref->startingNodeId == takeOverPtr.p->toStartingNode); - ndbrequire(ref->sendingNodeId == takeOverPtr.p->toCopyNode); - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG); - endTakeOver(takeOverPtrI); - //-------------------------------------------------------------------------- - // For some reason we did not succeed in copying a fragment. We treat this - // as a serious failure and crash the starting node. - //-------------------------------------------------------------------------- - BlockReference cntrRef = calcNdbCntrBlockRef(startingNodeId); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::CopyFragRefError; - sysErr->errorRef = reference(); - sysErr->data1 = errorCode; - sysErr->data2 = 0; - sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBB); - return; -}//Dbdih::execCOPY_FRAGREF() - -void Dbdih::execCOPY_FRAGCONF(Signal* signal) -{ - const CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0]; - jamEntry(); - CRASH_INSERTION(7142); - - TakeOverRecordPtr takeOverPtr; - Uint32 takeOverPtrI = conf->userPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - - ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref); - ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid); - ndbrequire(conf->startingNodeId == takeOverPtr.p->toStartingNode); - ndbrequire(conf->sendingNodeId == takeOverPtr.p->toCopyNode); - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG); - sendUpdateTo(signal, takeOverPtr.i, - (Uint32)UpdateToReq::TO_COPY_FRAG_COMPLETED); -}//Dbdih::execCOPY_FRAGCONF() - -void Dbdih::sendUpdateTo(Signal* signal, - Uint32 takeOverPtrI, Uint32 updateState) -{ - TakeOverRecordPtr takeOverPtr; - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - if ((c_updateToLock != RNIL) || - ((ERROR_INSERTED(7163)) && - (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED)) || - ((ERROR_INSERTED(7169)) && - (updateState == UpdateToReq::TO_COPY_COMPLETED))) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_UPDATE_TO; - signal->theData[0] = DihContinueB::ZSEND_UPDATE_TO; - signal->theData[1] = takeOverPtrI; - signal->theData[2] = takeOverPtr.p->toStartingNode; - signal->theData[3] = takeOverPtr.p->toFailedNode; - signal->theData[4] = updateState; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 5); - return; - }//if - c_updateToLock = takeOverPtrI; - if (updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_UPDATE_TO; - } else { - jam(); - ndbrequire(updateState == UpdateToReq::TO_COPY_COMPLETED); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_COPY_COMPLETED; - }//if - - UpdateToReq * const req = (UpdateToReq *)&signal->theData[0]; - req->userPtr = takeOverPtr.i; - req->userRef = reference(); - req->updateState = (UpdateToReq::UpdateState)updateState; - req->startingNodeId = takeOverPtr.p->toStartingNode; - req->tableId = takeOverPtr.p->toCurrentTabref; - req->fragmentNo = takeOverPtr.p->toCurrentFragid; - sendLoopMacro(UPDATE_TOREQ, sendUPDATE_TOREQ); -}//Dbdih::sendUpdateTo() - -void Dbdih::execUPDATE_TOREQ(Signal* signal) -{ - jamEntry(); - const UpdateToReq * const req = (UpdateToReq *)&signal->theData[0]; - BlockReference ref = req->userRef; - ndbrequire(cmasterdihref == ref); - - CRASH_INSERTION(7154); - RETURN_IF_NODE_NOT_ALIVE(req->startingNodeId); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = req->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbrequire(req->startingNodeId == takeOverPtr.p->toStartingNode); - if (req->updateState == UpdateToReq::TO_COPY_FRAG_COMPLETED) { - jam(); - ndbrequire(takeOverPtr.p->toSlaveStatus == TakeOverRecord::TO_SLAVE_CREATE_PREPARE); - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED; - takeOverPtr.p->toCurrentTabref = req->tableId; - takeOverPtr.p->toCurrentFragid = req->fragmentNo; - } else { - jam(); - ndbrequire(req->updateState == UpdateToReq::TO_COPY_COMPLETED); - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_COPY_COMPLETED; - setNodeCopyCompleted(takeOverPtr.p->toStartingNode, true); - }//if - - - UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0]; - conf->userPtr = takeOverPtr.i; - conf->sendingNodeId = cownNodeId; - conf->startingNodeId = takeOverPtr.p->toStartingNode; - sendSignal(ref, GSN_UPDATE_TOCONF, signal, UpdateToConf::SignalLength, JBB); -}//Dbdih::execUPDATE_TOREQ() - -void Dbdih::execUPDATE_TOCONF(Signal* signal) -{ - const UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0]; - CRASH_INSERTION(7152); - - RETURN_IF_NODE_NOT_ALIVE(conf->startingNodeId); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = conf->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - receiveLoopMacro(UPDATE_TOREQ, conf->sendingNodeId); - CRASH_INSERTION(7153); - c_updateToLock = RNIL; - - if (takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_COPY_COMPLETED) { - jam(); - toCopyCompletedLab(signal, takeOverPtr); - return; - } else { - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::TO_UPDATE_TO); - }//if - TabRecordPtr tabPtr; - tabPtr.i = takeOverPtr.p->toCurrentTabref; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr); - takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE; - BlockReference lqhRef = calcLqhBlockRef(takeOverPtr.p->toStartingNode); - CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0]; - req->userPtr = takeOverPtr.i; - req->userRef = reference(); - req->tableId = takeOverPtr.p->toCurrentTabref; - req->fragId = takeOverPtr.p->toCurrentFragid; - req->distributionKey = fragPtr.p->distributionKey; - - sendSignal(lqhRef, GSN_COPY_ACTIVEREQ, signal, - CopyActiveReq::SignalLength, JBB); -}//Dbdih::execUPDATE_TOCONF() - -void Dbdih::execCOPY_ACTIVECONF(Signal* signal) -{ - const CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0]; - jamEntry(); - CRASH_INSERTION(7143); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = conf->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbrequire(conf->tableId == takeOverPtr.p->toCurrentTabref); - ndbrequire(conf->fragId == takeOverPtr.p->toCurrentFragid); - ndbrequire(checkNodeAlive(conf->startingNodeId)); - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_ACTIVE); - - takeOverPtr.p->startGci = conf->startGci; - takeOverPtr.p->toMasterStatus = TakeOverRecord::LOCK_MUTEX; - - Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle); - Callback c = { safe_cast(&Dbdih::switchPrimaryMutex_locked), takeOverPtr.i }; - ndbrequire(mutex.lock(c)); -}//Dbdih::execCOPY_ACTIVECONF() - -void -Dbdih::switchPrimaryMutex_locked(Signal* signal, Uint32 toPtrI, Uint32 retVal){ - jamEntry(); - ndbrequire(retVal == 0); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = toPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::LOCK_MUTEX); - - if (!checkNodeAlive((takeOverPtr.p->toStartingNode))) { - // We have mutex - Mutex mutex(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle); - mutex.unlock(); // Ignore result - - c_createFragmentLock = RNIL; - c_CREATE_FRAGREQ_Counter.clearWaitingFor(); - endTakeOver(takeOverPtr.i); - return; - } - - takeOverPtr.p->toMasterStatus = TakeOverRecord::COMMIT_CREATE; - sendCreateFragReq(signal, takeOverPtr.p->startGci, - CreateFragReq::COMMIT_STORED, takeOverPtr.i); -} - -void Dbdih::toCopyCompletedLab(Signal * signal, TakeOverRecordPtr takeOverPtr) -{ - signal->theData[0] = NDB_LE_NR_CopyFragsCompleted; - signal->theData[1] = takeOverPtr.p->toStartingNode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - if (getNodeState().getSystemRestartInProgress()) - { - jam(); - infoEvent("Take over of node %d complete", takeOverPtr.p->toStartingNode); - setNodeActiveStatus(takeOverPtr.p->toStartingNode, Sysfile::NS_Active); - takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP; - takeOverCompleted(takeOverPtr.p->toStartingNode); - checkToCopy(); - checkToCopyCompleted(signal); - return; - } - - c_lcpState.immediateLcpStart = true; - takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP; - - /*-----------------------------------------------------------------------*/ - /* NOW WE CAN ALLOW THE NEW NODE TO PARTICIPATE IN LOCAL CHECKPOINTS. */ - /* WHEN THE FIRST LOCAL CHECKPOINT IS READY WE DECLARE THE TAKE OVER AS */ - /* COMPLETED. SINCE LOCAL CHECKPOINTS HAVE BEEN BLOCKED DURING THE COPY */ - /* PROCESS WE MUST ALSO START A NEW LOCAL CHECKPOINT PROCESS BY ENSURING */ - /* THAT IT LOOKS LIKE IT IS TIME FOR A NEW LOCAL CHECKPOINT AND BY */ - /* UNBLOCKING THE LOCAL CHECKPOINT AGAIN. */ - /* --------------------------------------------------------------------- */ -}//Dbdih::toCopyCompletedLab() - -void Dbdih::sendEndTo(Signal* signal, Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - CRASH_INSERTION(7156); - RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr); - if ((c_endToLock != RNIL) || (ERROR_INSERTED(7164))) { - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_WAIT_ENDING; - signal->theData[0] = DihContinueB::ZSEND_END_TO; - signal->theData[1] = takeOverPtrI; - signal->theData[2] = takeOverPtr.p->toStartingNode; - signal->theData[3] = takeOverPtr.p->toFailedNode; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 30, 4); - return; - }//if - c_endToLock = takeOverPtr.i; - takeOverPtr.p->toMasterStatus = TakeOverRecord::ENDING; - EndToReq * const req = (EndToReq *)&signal->theData[0]; - req->userPtr = takeOverPtr.i; - req->userRef = reference(); - req->startingNodeId = takeOverPtr.p->toStartingNode; - sendLoopMacro(END_TOREQ, sendEND_TOREQ); -}//Dbdih::sendStartTo() - -void Dbdih::execEND_TOREQ(Signal* signal) -{ - jamEntry(); - const EndToReq * const req = (EndToReq *)&signal->theData[0]; - BlockReference ref = req->userRef; - Uint32 startingNodeId = req->startingNodeId; - - CRASH_INSERTION(7144); - RETURN_IF_NODE_NOT_ALIVE(startingNodeId); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = req->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbrequire(startingNodeId == takeOverPtr.p->toStartingNode); - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE; - - if (!isMaster()) { - jam(); - endTakeOver(takeOverPtr.i); - }//if - - EndToConf * const conf = (EndToConf *)&signal->theData[0]; - conf->userPtr = takeOverPtr.i; - conf->sendingNodeId = cownNodeId; - conf->startingNodeId = startingNodeId; - sendSignal(ref, GSN_END_TOCONF, signal, EndToConf::SignalLength, JBB); -}//Dbdih::execEND_TOREQ() - -void Dbdih::execEND_TOCONF(Signal* signal) -{ - const EndToConf * const conf = (EndToConf *)&signal->theData[0]; - jamEntry(); - - const Uint32 nodeId = conf->startingNodeId; - CRASH_INSERTION(7145); - - RETURN_IF_NODE_NOT_ALIVE(nodeId); - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = conf->userPtr; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::ENDING); - ndbrequire(nodeId == takeOverPtr.p->toStartingNode); - - receiveLoopMacro(END_TOREQ, conf->sendingNodeId); - CRASH_INSERTION(7146); - c_endToLock = RNIL; - - /* -----------------------------------------------------------------------*/ - /* WE HAVE FINALLY COMPLETED THE TAKE OVER. WE RESET THE STATUS AND CHECK*/ - /* IF ANY MORE TAKE OVERS ARE NEEDED AT THE MOMENT. */ - /* FIRST WE CHECK IF A RESTART IS ONGOING. IN THAT CASE WE RESTART PHASE */ - /* 4 AND CHECK IF ANY MORE TAKE OVERS ARE NEEDED BEFORE WE START NDB */ - /* CLUSTER. THIS CAN ONLY HAPPEN IN A SYSTEM RESTART. */ - /* ---------------------------------------------------------------------- */ - if (takeOverPtr.p->toNodeRestart) { - jam(); - /* ----------------------------------------------------------------------*/ - /* THE TAKE OVER NODE WAS A STARTING NODE. WE WILL SEND START_COPYCONF */ - /* TO THE STARTING NODE SUCH THAT THE NODE CAN COMPLETE THE START-UP. */ - /* --------------------------------------------------------------------- */ - BlockReference ref = calcDihBlockRef(takeOverPtr.p->toStartingNode); - signal->theData[0] = takeOverPtr.p->toStartingNode; - sendSignal(ref, GSN_START_COPYCONF, signal, 1,JBB); - }//if - endTakeOver(takeOverPtr.i); - - if (cstartPhase == ZNDB_SPH4) { - jam(); - if (anyActiveTakeOver()) { - jam(); - return; - }//if - ndbsttorry10Lab(signal, __LINE__); - return; - }//if - checkStartTakeOver(signal); -}//Dbdih::execEND_TOCONF() - -void Dbdih::allocateTakeOver(TakeOverRecordPtr& takeOverPtr) -{ - if (isMaster()) { - jam(); - //-------------------------------------------- - // Master already seized the take over record. - //-------------------------------------------- - return; - }//if - if (takeOverPtr.i == cfirstfreeTakeOver) { - jam(); - seizeTakeOver(takeOverPtr); - } else { - TakeOverRecordPtr nextTakeOverptr; - TakeOverRecordPtr prevTakeOverptr; - nextTakeOverptr.i = takeOverPtr.p->nextTakeOver; - prevTakeOverptr.i = takeOverPtr.p->prevTakeOver; - if (prevTakeOverptr.i != RNIL) { - jam(); - ptrCheckGuard(prevTakeOverptr, MAX_NDB_NODES, takeOverRecord); - prevTakeOverptr.p->nextTakeOver = nextTakeOverptr.i; - }//if - if (nextTakeOverptr.i != RNIL) { - jam(); - ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord); - nextTakeOverptr.p->prevTakeOver = prevTakeOverptr.i; - }//if - }//if -}//Dbdih::allocateTakeOver() - -void Dbdih::seizeTakeOver(TakeOverRecordPtr& takeOverPtr) -{ - TakeOverRecordPtr nextTakeOverptr; - ndbrequire(cfirstfreeTakeOver != RNIL); - takeOverPtr.i = cfirstfreeTakeOver; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - cfirstfreeTakeOver = takeOverPtr.p->nextTakeOver; - nextTakeOverptr.i = takeOverPtr.p->nextTakeOver; - if (nextTakeOverptr.i != RNIL) { - jam(); - ptrCheckGuard(nextTakeOverptr, MAX_NDB_NODES, takeOverRecord); - nextTakeOverptr.p->prevTakeOver = RNIL; - }//if - takeOverPtr.p->nextTakeOver = RNIL; - takeOverPtr.p->prevTakeOver = RNIL; -}//Dbdih::seizeTakeOver() - -void Dbdih::endTakeOver(Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = takeOverPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - if ((takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) && - (takeOverPtr.p->toMasterStatus != TakeOverRecord::TO_WAIT_START_TAKE_OVER)) { - jam(); - NodeGroupRecordPtr NGPtr; - NodeRecordPtr nodePtr; - nodePtr.i = takeOverPtr.p->toStartingNode; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - NGPtr.i = nodePtr.p->nodeGroup; - ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord); - NGPtr.p->activeTakeOver = false; - }//if - setAllowNodeStart(takeOverPtr.p->toStartingNode, true); - initTakeOver(takeOverPtr); - releaseTakeOver(takeOverPtrI); -}//Dbdih::endTakeOver() - -void Dbdih::releaseTakeOver(Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = takeOverPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - takeOverPtr.p->nextTakeOver = cfirstfreeTakeOver; - cfirstfreeTakeOver = takeOverPtr.i; -}//Dbdih::releaseTakeOver() - -void Dbdih::initTakeOver(TakeOverRecordPtr takeOverPtr) -{ - takeOverPtr.p->toCopyNode = RNIL; - takeOverPtr.p->toCurrentFragid = RNIL; - takeOverPtr.p->toCurrentReplica = RNIL; - takeOverPtr.p->toCurrentTabref = RNIL; - takeOverPtr.p->toFailedNode = RNIL; - takeOverPtr.p->toStartingNode = RNIL; - takeOverPtr.p->prevTakeOver = RNIL; - takeOverPtr.p->nextTakeOver = RNIL; - takeOverPtr.p->toNodeRestart = false; - takeOverPtr.p->toMasterStatus = TakeOverRecord::IDLE; - takeOverPtr.p->toSlaveStatus = TakeOverRecord::TO_SLAVE_IDLE; -}//Dbdih::initTakeOver() - -bool Dbdih::anyActiveTakeOver() -{ - TakeOverRecordPtr takeOverPtr; - for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) { - ptrAss(takeOverPtr, takeOverRecord); - if (takeOverPtr.p->toMasterStatus != TakeOverRecord::IDLE) { - jam(); - return true; - }//if - }//for - return false; -}//Dbdih::anyActiveTakeOver() - -/*****************************************************************************/ -/* ------------------------------------------------------------------------- */ -/* WE HAVE BEEN REQUESTED TO PERFORM A SYSTEM RESTART. WE START BY */ -/* READING THE GCI FILES. THIS REQUEST WILL ONLY BE SENT TO THE MASTER */ -/* DIH. THAT MEANS WE HAVE TO REPLICATE THE INFORMATION WE READ FROM */ -/* OUR FILES TO ENSURE THAT ALL NODES HAVE THE SAME DISTRIBUTION */ -/* INFORMATION. */ -/* ------------------------------------------------------------------------- */ -/*****************************************************************************/ -void Dbdih::readGciFileLab(Signal* signal) -{ - FileRecordPtr filePtr; - filePtr.i = crestartInfoFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - filePtr.p->reqStatus = FileRecord::OPENING_GCP; - - openFileRo(signal, filePtr); -}//Dbdih::readGciFileLab() - -void Dbdih::openingGcpLab(Signal* signal, FileRecordPtr filePtr) -{ - /* ----------------------------------------------------------------------- */ - /* WE HAVE SUCCESSFULLY OPENED A FILE CONTAINING INFORMATION ABOUT */ - /* THE GLOBAL CHECKPOINTS THAT ARE POSSIBLE TO RESTART. */ - /* ----------------------------------------------------------------------- */ - readRestorableGci(signal, filePtr); - filePtr.p->reqStatus = FileRecord::READING_GCP; -}//Dbdih::openingGcpLab() - -void Dbdih::readingGcpLab(Signal* signal, FileRecordPtr filePtr) -{ - /* ----------------------------------------------------------------------- */ - /* WE HAVE NOW SUCCESSFULLY MANAGED TO READ IN THE GLOBAL CHECKPOINT */ - /* INFORMATION FROM FILE. LATER WE WILL ADD SOME FUNCTIONALITY THAT */ - /* CHECKS THE RESTART TIMERS TO DEDUCE FROM WHERE TO RESTART. */ - /* NOW WE WILL SIMPLY RESTART FROM THE NEWEST GLOBAL CHECKPOINT */ - /* POSSIBLE TO RESTORE. */ - /* */ - /* BEFORE WE INVOKE DICT WE NEED TO COPY CRESTART_INFO TO ALL NODES. */ - /* WE ALSO COPY TO OUR OWN NODE. TO ENABLE US TO DO THIS PROPERLY WE */ - /* START BY CLOSING THIS FILE. */ - /* ----------------------------------------------------------------------- */ - globalData.m_restart_seq = ++SYSFILE->m_restart_seq; - closeFile(signal, filePtr); - filePtr.p->reqStatus = FileRecord::CLOSING_GCP; -}//Dbdih::readingGcpLab() - -void Dbdih::closingGcpLab(Signal* signal, FileRecordPtr filePtr) -{ - if (Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) == false){ - jam(); - selectMasterCandidateAndSend(signal); - return; - } else { - jam(); - sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB); - return; - }//if -}//Dbdih::closingGcpLab() - -/* ------------------------------------------------------------------------- */ -/* SELECT THE MASTER CANDIDATE TO BE USED IN SYSTEM RESTARTS. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::selectMasterCandidateAndSend(Signal* signal) -{ - setNodeGroups(); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = SYSFILE->lastCompletedGCI[getOwnNodeId()]; - sendSignal(cntrlblockref, GSN_DIH_RESTARTCONF, signal, 2, JBB); - - NodeRecordPtr nodePtr; - Uint32 node_groups[MAX_NDB_NODES]; - memset(node_groups, 0, sizeof(node_groups)); - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - const Uint32 ng = Sysfile::getNodeGroup(nodePtr.i, SYSFILE->nodeGroups); - if(ng != NO_NODE_GROUP_ID){ - ndbrequire(ng < MAX_NDB_NODES); - node_groups[ng]++; - } - } - - for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - Uint32 count = node_groups[nodePtr.i]; - if(count != 0 && count != cnoReplicas){ - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Illegal configuration change." - " Initial start needs to be performed " - " when changing no of replicas (%d != %d)", - node_groups[nodePtr.i], cnoReplicas); - progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf); - } - } -}//Dbdih::selectMasterCandidate() - -/* ------------------------------------------------------------------------- */ -/* ERROR HANDLING DURING READING RESTORABLE GCI FROM FILE. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::openingGcpErrorLab(Signal* signal, FileRecordPtr filePtr) -{ - filePtr.p->fileStatus = FileRecord::CRASHED; - filePtr.p->reqStatus = FileRecord::IDLE; - if (crestartInfoFile[0] == filePtr.i) { - jam(); - /* --------------------------------------------------------------------- */ - /* THE FIRST FILE WAS NOT ABLE TO BE OPENED. SET STATUS TO CRASHED AND */ - /* TRY OPEN THE NEXT FILE. */ - /* --------------------------------------------------------------------- */ - filePtr.i = crestartInfoFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRo(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_GCP; - } else { - jam(); - /* --------------------------------------------------------------------- */ - /* WE FAILED IN OPENING THE SECOND FILE. BOTH FILES WERE CORRUPTED. WE */ - /* CANNOT CONTINUE THE RESTART IN THIS CASE. TELL NDBCNTR OF OUR */ - /* FAILURE. */ - /*---------------------------------------------------------------------- */ - sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB); - return; - }//if -}//Dbdih::openingGcpErrorLab() - -void Dbdih::readingGcpErrorLab(Signal* signal, FileRecordPtr filePtr) -{ - filePtr.p->fileStatus = FileRecord::CRASHED; - /* ----------------------------------------------------------------------- */ - /* WE FAILED IN READING THE FILE AS WELL. WE WILL CLOSE THIS FILE. */ - /* ----------------------------------------------------------------------- */ - closeFile(signal, filePtr); - filePtr.p->reqStatus = FileRecord::CLOSING_GCP_CRASH; -}//Dbdih::readingGcpErrorLab() - -void Dbdih::closingGcpCrashLab(Signal* signal, FileRecordPtr filePtr) -{ - if (crestartInfoFile[0] == filePtr.i) { - jam(); - /* --------------------------------------------------------------------- */ - /* ERROR IN FIRST FILE, TRY THE SECOND FILE. */ - /* --------------------------------------------------------------------- */ - filePtr.i = crestartInfoFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_GCP; - return; - }//if - /* ----------------------------------------------------------------------- */ - /* WE DISCOVERED A FAILURE WITH THE SECOND FILE AS WELL. THIS IS A */ - /* SERIOUS PROBLEM. REPORT FAILURE TO NDBCNTR. */ - /* ----------------------------------------------------------------------- */ - sendSignal(cntrlblockref, GSN_DIH_RESTARTREF, signal, 1, JBB); -}//Dbdih::closingGcpCrashLab() - -/*****************************************************************************/ -/* ------------------------------------------------------------------------- */ -/* THIS IS AN INITIAL RESTART. WE WILL CREATE THE TWO FILES DESCRIBING */ -/* THE GLOBAL CHECKPOINTS THAT ARE RESTORABLE. */ -/* ------------------------------------------------------------------------- */ -/*****************************************************************************/ -void Dbdih::initGciFilesLab(Signal* signal) -{ - FileRecordPtr filePtr; - filePtr.i = crestartInfoFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - createFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::CREATING_GCP; -}//Dbdih::initGciFilesLab() - -/* ------------------------------------------------------------------------- */ -/* GLOBAL CHECKPOINT FILE HAVE BEEN SUCCESSFULLY CREATED. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::creatingGcpLab(Signal* signal, FileRecordPtr filePtr) -{ - if (filePtr.i == crestartInfoFile[0]) { - jam(); - /* --------------------------------------------------------------------- */ - /* IF CREATED FIRST THEN ALSO CREATE THE SECOND FILE. */ - /* --------------------------------------------------------------------- */ - filePtr.i = crestartInfoFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - createFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::CREATING_GCP; - } else { - jam(); - /* --------------------------------------------------------------------- */ - /* BOTH FILES HAVE BEEN CREATED. NOW WRITE THE INITIAL DATA TO BOTH */ - /* OF THE FILES. */ - /* --------------------------------------------------------------------- */ - filePtr.i = crestartInfoFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - writeRestorableGci(signal, filePtr); - filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP; - }//if -}//Dbdih::creatingGcpLab() - -/* ------------------------------------------------------------------------- */ -/* WE HAVE SUCCESSFULLY WRITTEN A GCI FILE. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::writeInitGcpLab(Signal* signal, FileRecordPtr filePtr) -{ - filePtr.p->reqStatus = FileRecord::IDLE; - if (filePtr.i == crestartInfoFile[0]) { - jam(); - /* --------------------------------------------------------------------- */ - /* WE HAVE WRITTEN THE FIRST FILE NOW ALSO WRITE THE SECOND FILE. */ - /* --------------------------------------------------------------------- */ - filePtr.i = crestartInfoFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - writeRestorableGci(signal, filePtr); - filePtr.p->reqStatus = FileRecord::WRITE_INIT_GCP; - } else { - /* --------------------------------------------------------------------- */ - /* WE HAVE WRITTEN BOTH FILES. LEAVE BOTH FILES OPEN AND CONFIRM OUR */ - /* PART OF THE INITIAL START. */ - /* --------------------------------------------------------------------- */ - if (isMaster()) { - jam(); - /*---------------------------------------------------------------------*/ - // IN MASTER NODES THE START REQUEST IS RECEIVED FROM NDBCNTR AND WE MUST - // RESPOND WHEN COMPLETED. - /*---------------------------------------------------------------------*/ - signal->theData[0] = reference(); - sendSignal(cndbStartReqBlockref, GSN_NDB_STARTCONF, signal, 1, JBB); - } else { - jam(); - ndbsttorry10Lab(signal, __LINE__); - return; - }//if - }//if -}//Dbdih::writeInitGcpLab() - -/*****************************************************************************/ -/* ********** NODES DELETION MODULE *************/ -/*****************************************************************************/ -/*---------------------------------------------------------------------------*/ -/* LOGIC FOR NODE FAILURE */ -/*---------------------------------------------------------------------------*/ -void Dbdih::execNODE_FAILREP(Signal* signal) -{ - Uint32 i; - Uint32 failedNodes[MAX_NDB_NODES]; - jamEntry(); - NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; - - cfailurenr = nodeFail->failNo; - Uint32 newMasterId = nodeFail->masterNodeId; - const Uint32 noOfFailedNodes = nodeFail->noOfNodes; - - if (ERROR_INSERTED(7179)) - { - CLEAR_ERROR_INSERT_VALUE; - } - - if (ERROR_INSERTED(7184)) - { - SET_ERROR_INSERT_VALUE(7000); - } - - /*-------------------------------------------------------------------------*/ - // The first step is to convert from a bit mask to an array of failed nodes. - /*-------------------------------------------------------------------------*/ - Uint32 index = 0; - for (i = 1; i < MAX_NDB_NODES; i++) { - jam(); - if(NodeBitmask::get(nodeFail->theNodes, i)){ - jam(); - failedNodes[index] = i; - index++; - }//if - }//for - ndbrequire(noOfFailedNodes == index); - ndbrequire(noOfFailedNodes - 1 < MAX_NDB_NODES); - - /*-------------------------------------------------------------------------*/ - // The second step is to update the node status of the failed nodes, remove - // them from the alive node list and put them into the dead node list. Also - // update the number of nodes on-line. - // We also set certain state variables ensuring that the node no longer is - // used in transactions and also mark that we received this signal. - /*-------------------------------------------------------------------------*/ - for (i = 0; i < noOfFailedNodes; i++) { - jam(); - NodeRecordPtr TNodePtr; - TNodePtr.i = failedNodes[i]; - ptrCheckGuard(TNodePtr, MAX_NDB_NODES, nodeRecord); - TNodePtr.p->useInTransactions = false; - TNodePtr.p->m_inclDihLcp = false; - TNodePtr.p->recNODE_FAILREP = ZTRUE; - if (TNodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - con_lineNodes--; - TNodePtr.p->nodeStatus = NodeRecord::DIED_NOW; - removeAlive(TNodePtr); - insertDeadNode(TNodePtr); - }//if - }//for - - /*-------------------------------------------------------------------------*/ - // Verify that we can continue to operate the cluster. If we cannot we will - // not return from checkEscalation. - /*-------------------------------------------------------------------------*/ - checkEscalation(); - - /*------------------------------------------------------------------------*/ - // Verify that a starting node has also crashed. Reset the node start record. - /*-------------------------------------------------------------------------*/ -#if 0 - /** - * Node will crash by itself... - * nodeRestart is run then... - */ - if (false && c_nodeStartMaster.startNode != RNIL && getNodeStatus(c_nodeStartMaster.startNode) == NodeRecord::ALIVE) - { - BlockReference cntrRef = calcNdbCntrBlockRef(c_nodeStartMaster.startNode); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::StartInProgressError; - sysErr->errorRef = reference(); - sysErr->data1= 0; - sysErr->data2= __LINE__; - sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal, SystemError::SignalLength, JBA); - nodeResetStart(); - }//if -#endif - - /*--------------------------------------------------*/ - /* */ - /* WE CHANGE THE REFERENCE TO MASTER DIH */ - /* BLOCK AND POINTER AT THIS PLACE IN THE CODE*/ - /*--------------------------------------------------*/ - Uint32 oldMasterId = cmasterNodeId; - BlockReference oldMasterRef = cmasterdihref; - cmasterdihref = calcDihBlockRef(newMasterId); - cmasterNodeId = newMasterId; - - const bool masterTakeOver = (oldMasterId != newMasterId); - - for(i = 0; i < noOfFailedNodes; i++) { - NodeRecordPtr failedNodePtr; - failedNodePtr.i = failedNodes[i]; - ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord); - Uint32 activeTakeOverPtr = findTakeOver(failedNodes[i]); - if (oldMasterRef == reference()) { - /*-------------------------------------------------------*/ - // Functions that need to be called only for master nodes. - /*-------------------------------------------------------*/ - checkCopyTab(failedNodePtr); - checkStopPermMaster(signal, failedNodePtr); - checkWaitGCPMaster(signal, failedNodes[i]); - checkTakeOverInMasterAllNodeFailure(signal, failedNodePtr); - checkTakeOverInMasterCopyNodeFailure(signal, failedNodePtr.i); - checkTakeOverInMasterStartNodeFailure(signal, activeTakeOverPtr); - checkGcpOutstanding(signal, failedNodePtr.i); - } else { - jam(); - /*-----------------------------------------------------------*/ - // Functions that need to be called only for nodes that were - // not master before these failures. - /*-----------------------------------------------------------*/ - checkStopPermProxy(signal, failedNodes[i]); - checkWaitGCPProxy(signal, failedNodes[i]); - if (isMaster()) { - /*-----------------------------------------------------------*/ - // We take over as master since old master has failed - /*-----------------------------------------------------------*/ - handleTakeOverNewMaster(signal, activeTakeOverPtr); - } else { - /*-----------------------------------------------------------*/ - // We are not master and will not become master. - /*-----------------------------------------------------------*/ - checkTakeOverInNonMasterStartNodeFailure(signal, activeTakeOverPtr); - }//if - }//if - /*--------------------------------------------------*/ - // Functions that need to be called for all nodes. - /*--------------------------------------------------*/ - checkStopMe(signal, failedNodePtr); - failedNodeLcpHandling(signal, failedNodePtr); - checkWaitDropTabFailedLqh(signal, failedNodePtr.i, 0); // 0 = start w/ tab 0 - startRemoveFailedNode(signal, failedNodePtr); - - /** - * This is the last function called - * It modifies failedNodePtr.p->nodeStatus - */ - failedNodeSynchHandling(signal, failedNodePtr); - }//for - - if(masterTakeOver){ - jam(); - startLcpMasterTakeOver(signal, oldMasterId); - startGcpMasterTakeOver(signal, oldMasterId); - - if(getNodeState().getNodeRestartInProgress()){ - jam(); - progError(__LINE__, NDBD_EXIT_MASTER_FAILURE_DURING_NR); - } - } - - - if (isMaster()) { - jam(); - setNodeRestartInfoBits(); - }//if -}//Dbdih::execNODE_FAILREP() - -void Dbdih::checkCopyTab(NodeRecordPtr failedNodePtr) -{ - jam(); - - if(c_nodeStartMaster.startNode != failedNodePtr.i){ - jam(); - return; - } - - switch(c_nodeStartMaster.m_outstandingGsn){ - case GSN_COPY_TABREQ: - jam(); - ndbrequire(c_COPY_TABREQ_Counter.isWaitingFor(failedNodePtr.i)); - releaseTabPages(failedNodePtr.p->activeTabptr); - c_COPY_TABREQ_Counter.clearWaitingFor(failedNodePtr.i); - c_nodeStartMaster.wait = ZFALSE; - break; - case GSN_START_INFOREQ: - case GSN_START_PERMCONF: - case GSN_DICTSTARTREQ: - case GSN_START_MECONF: - jam(); - break; - default: - g_eventLogger.error("outstanding gsn: %s(%d)", - getSignalName(c_nodeStartMaster.m_outstandingGsn), - c_nodeStartMaster.m_outstandingGsn); - ndbrequire(false); - } - - nodeResetStart(); -}//Dbdih::checkCopyTab() - -void Dbdih::checkStopMe(Signal* signal, NodeRecordPtr failedNodePtr) -{ - jam(); - if (c_STOP_ME_REQ_Counter.isWaitingFor(failedNodePtr.i)){ - jam(); - ndbrequire(c_stopMe.clientRef != 0); - StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0]; - stopMeConf->senderRef = calcDihBlockRef(failedNodePtr.i); - stopMeConf->senderData = c_stopMe.clientData; - sendSignal(reference(), GSN_STOP_ME_CONF, signal, - StopMeConf::SignalLength, JBB); - }//if -}//Dbdih::checkStopMe() - -void Dbdih::checkStopPermMaster(Signal* signal, NodeRecordPtr failedNodePtr) -{ - DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0]; - jam(); - if (c_DIH_SWITCH_REPLICA_REQ_Counter.isWaitingFor(failedNodePtr.i)){ - jam(); - ndbrequire(c_stopPermMaster.clientRef != 0); - ref->senderNode = failedNodePtr.i; - ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure; - sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REF, signal, - DihSwitchReplicaRef::SignalLength, JBB); - return; - }//if -}//Dbdih::checkStopPermMaster() - -void Dbdih::checkStopPermProxy(Signal* signal, NodeId failedNodeId) -{ - jam(); - if(c_stopPermProxy.clientRef != 0 && - refToNode(c_stopPermProxy.masterRef) == failedNodeId){ - - /** - * The master has failed report to proxy-client - */ - jam(); - StopPermRef* const ref = (StopPermRef*)&signal->theData[0]; - - ref->senderData = c_stopPermProxy.clientData; - ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure; - sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB); - c_stopPermProxy.clientRef = 0; - }//if -}//Dbdih::checkStopPermProxy() - -void -Dbdih::checkTakeOverInMasterAllNodeFailure(Signal* signal, - NodeRecordPtr failedNodePtr) -{ - //------------------------------------------------------------------------ - // This code is used to handle the failure of "all" nodes during the - // take over when "all" nodes are informed about state changes in - // the take over protocol. - //-------------------------------------------------------------------------- - if (c_START_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){ - jam(); - StartToConf * const conf = (StartToConf *)&signal->theData[0]; - conf->userPtr = c_startToLock; - conf->sendingNodeId = failedNodePtr.i; - conf->startingNodeId = getStartNode(c_startToLock); - sendSignal(reference(), GSN_START_TOCONF, signal, - StartToConf::SignalLength, JBB); - }//if - if (c_CREATE_FRAGREQ_Counter.isWaitingFor(failedNodePtr.i)){ - jam(); - CreateFragConf * const conf = (CreateFragConf *)&signal->theData[0]; - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = c_createFragmentLock; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - conf->userPtr = takeOverPtr.i; - conf->tableId = takeOverPtr.p->toCurrentTabref; - conf->fragId = takeOverPtr.p->toCurrentFragid; - conf->sendingNodeId = failedNodePtr.i; - conf->startingNodeId = takeOverPtr.p->toStartingNode; - sendSignal(reference(), GSN_CREATE_FRAGCONF, signal, - CreateFragConf::SignalLength, JBB); - }//if - if (c_UPDATE_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){ - jam(); - UpdateToConf * const conf = (UpdateToConf *)&signal->theData[0]; - conf->userPtr = c_updateToLock; - conf->sendingNodeId = failedNodePtr.i; - conf->startingNodeId = getStartNode(c_updateToLock); - sendSignal(reference(), GSN_UPDATE_TOCONF, signal, - UpdateToConf::SignalLength, JBB); - }//if - - if (c_END_TOREQ_Counter.isWaitingFor(failedNodePtr.i)){ - jam(); - EndToConf * const conf = (EndToConf *)&signal->theData[0]; - conf->userPtr = c_endToLock; - conf->sendingNodeId = failedNodePtr.i; - conf->startingNodeId = getStartNode(c_endToLock); - sendSignal(reference(), GSN_END_TOCONF, signal, - EndToConf::SignalLength, JBB); - }//if -}//Dbdih::checkTakeOverInMasterAllNodeFailure() - -void Dbdih::checkTakeOverInMasterCopyNodeFailure(Signal* signal, - Uint32 failedNodeId) -{ - //--------------------------------------------------------------------------- - // This code is used to handle failure of the copying node during a take over - //--------------------------------------------------------------------------- - TakeOverRecordPtr takeOverPtr; - for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { - jam(); - takeOverPtr.i = i; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) && - (takeOverPtr.p->toCopyNode == failedNodeId)) { - jam(); - /** - * The copying node failed but the system is still operational. - * We restart the copy process by selecting a new copy node. - * We do not need to add a fragment however since it is already added. - * We start again from the prepare create fragment phase. - */ - prepareSendCreateFragReq(signal, takeOverPtr.i); - }//if - }//for -}//Dbdih::checkTakeOverInMasterCopyNodeFailure() - -void Dbdih::checkTakeOverInMasterStartNodeFailure(Signal* signal, - Uint32 takeOverPtrI) -{ - jam(); - ndbout_c("checkTakeOverInMasterStartNodeFailure %x", - takeOverPtrI); - if (takeOverPtrI == RNIL) { - jam(); - return; - } - //----------------------------------------------------------------------- - // We are the master and the starting node has failed during a take over. - // We need to handle this failure in different ways depending on the state. - //----------------------------------------------------------------------- - - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = takeOverPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - - ndbout_c("takeOverPtr.p->toMasterStatus: %x", - takeOverPtr.p->toMasterStatus); - - bool ok = false; - switch (takeOverPtr.p->toMasterStatus) { - case TakeOverRecord::IDLE: - //----------------------------------------------------------------------- - // The state cannot be idle when it has a starting node. - //----------------------------------------------------------------------- - ndbrequire(false); - break; - case TakeOverRecord::TO_WAIT_START_TAKE_OVER: - jam(); - case TakeOverRecord::TO_START_COPY: - jam(); - case TakeOverRecord::TO_START_COPY_ONGOING: - jam(); - case TakeOverRecord::TO_WAIT_START: - jam(); - case TakeOverRecord::TO_WAIT_PREPARE_CREATE: - jam(); - case TakeOverRecord::TO_WAIT_UPDATE_TO: - jam(); - case TakeOverRecord::TO_WAIT_COMMIT_CREATE: - jam(); - case TakeOverRecord::TO_END_COPY: - jam(); - case TakeOverRecord::TO_END_COPY_ONGOING: - jam(); - case TakeOverRecord::TO_WAIT_ENDING: - jam(); - //----------------------------------------------------------------------- - // We will not do anything since an internal signal process is outstanding. - // When the signal arrives the take over will be released. - //----------------------------------------------------------------------- - ok = true; - break; - case TakeOverRecord::STARTING: - jam(); - ok = true; - c_startToLock = RNIL; - c_START_TOREQ_Counter.clearWaitingFor(); - endTakeOver(takeOverPtr.i); - break; - case TakeOverRecord::TO_UPDATE_TO: - jam(); - ok = true; - c_updateToLock = RNIL; - c_UPDATE_TOREQ_Counter.clearWaitingFor(); - endTakeOver(takeOverPtr.i); - break; - case TakeOverRecord::ENDING: - jam(); - ok = true; - c_endToLock = RNIL; - c_END_TOREQ_Counter.clearWaitingFor(); - endTakeOver(takeOverPtr.i); - break; - case TakeOverRecord::COMMIT_CREATE: - ok = true; - jam(); - {// We have mutex - Mutex m(signal, c_mutexMgr, takeOverPtr.p->m_switchPrimaryMutexHandle); - m.unlock(); // Ignore result - } - // Fall through - case TakeOverRecord::PREPARE_CREATE: - ok = true; - jam(); - c_createFragmentLock = RNIL; - c_CREATE_FRAGREQ_Counter.clearWaitingFor(); - endTakeOver(takeOverPtr.i); - break; - case TakeOverRecord::LOCK_MUTEX: - ok = true; - jam(); - // Lock mutex will return and do endTakeOver - break; - - //----------------------------------------------------------------------- - // Signals are outstanding to external nodes. These signals carry the node - // id of the starting node and will not use the take over record if the - // starting node has failed. - //----------------------------------------------------------------------- - case TakeOverRecord::COPY_FRAG: - ok = true; - jam(); - //----------------------------------------------------------------------- - // The copying node will discover the problem. We will receive either - // COPY_FRAGREQ or COPY_FRAGCONF and then we can release the take over - // record and end the process. If the copying node should also die then - // we will try to send prepare create fragment and will then discover - // that the starting node has failed. - //----------------------------------------------------------------------- - break; - case TakeOverRecord::PREPARE_COPY: - ok = true; - jam(); - /** - * We're waiting for the starting node...which just died... - * endTakeOver - */ - endTakeOver(takeOverPtr.i); - break; - case TakeOverRecord::COPY_ACTIVE: - ok = true; - jam(); - //----------------------------------------------------------------------- - // In this we are waiting for a signal from the starting node. Thus we - // can release the take over record and end the process. - //----------------------------------------------------------------------- - endTakeOver(takeOverPtr.i); - break; - case TakeOverRecord::WAIT_LCP: - ok = true; - jam(); - //----------------------------------------------------------------------- - //----------------------------------------------------------------------- - endTakeOver(takeOverPtr.i); - break; - - case TakeOverRecord::STARTING_LOCAL_FRAGMENTS: - ok = true; - jam(); - endTakeOver(takeOverPtr.i); - break; - - /** - * The following are states that it should not be possible to "be" in - */ - case TakeOverRecord::SELECTING_NEXT: - jam(); - case TakeOverRecord::TO_COPY_COMPLETED: - jam(); - ndbrequire(false); - } - if(!ok){ - jamLine(takeOverPtr.p->toSlaveStatus); - ndbrequire(ok); - } -}//Dbdih::checkTakeOverInMasterStartNodeFailure() - -void Dbdih::checkTakeOverInNonMasterStartNodeFailure(Signal* signal, - Uint32 takeOverPtrI) -{ - jam(); - if (takeOverPtrI == RNIL) { - jam(); - return; - } - //----------------------------------------------------------------------- - // We are not master and not taking over as master. A take over was ongoing - // but the starting node has now failed. Handle it according to the state - // of the take over. - //----------------------------------------------------------------------- - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = takeOverPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - bool ok = false; - switch (takeOverPtr.p->toSlaveStatus) { - case TakeOverRecord::TO_SLAVE_IDLE: - ndbrequire(false); - break; - case TakeOverRecord::TO_SLAVE_STARTED: - jam(); - case TakeOverRecord::TO_SLAVE_CREATE_PREPARE: - jam(); - case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED: - jam(); - case TakeOverRecord::TO_SLAVE_CREATE_COMMIT: - jam(); - case TakeOverRecord::TO_SLAVE_COPY_COMPLETED: - jam(); - ok = true; - endTakeOver(takeOverPtr.i); - break; - }//switch - if(!ok){ - jamLine(takeOverPtr.p->toSlaveStatus); - ndbrequire(ok); - } -}//Dbdih::checkTakeOverInNonMasterStartNodeFailure() - -void Dbdih::failedNodeSynchHandling(Signal* signal, - NodeRecordPtr failedNodePtr) -{ - jam(); - /*----------------------------------------------------*/ - /* INITIALISE THE VARIABLES THAT KEEP TRACK OF */ - /* WHEN A NODE FAILURE IS COMPLETED. */ - /*----------------------------------------------------*/ - failedNodePtr.p->dbdictFailCompleted = ZFALSE; - failedNodePtr.p->dbtcFailCompleted = ZFALSE; - failedNodePtr.p->dbdihFailCompleted = ZFALSE; - failedNodePtr.p->dblqhFailCompleted = ZFALSE; - - failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor(); - - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - /** - * We'r waiting for nodePtr.i to complete - * handling of failedNodePtr.i's death - */ - - failedNodePtr.p->m_NF_COMPLETE_REP.setWaitingFor(nodePtr.i); - } else { - jam(); - if ((nodePtr.p->nodeStatus == NodeRecord::DYING) && - (nodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(failedNodePtr.i))){ - jam(); - /*----------------------------------------------------*/ - /* THE NODE FAILED BEFORE REPORTING THE FAILURE */ - /* HANDLING COMPLETED ON THIS FAILED NODE. */ - /* REPORT THAT NODE FAILURE HANDLING WAS */ - /* COMPLETED ON THE NEW FAILED NODE FOR THIS */ - /* PARTICULAR OLD FAILED NODE. */ - /*----------------------------------------------------*/ - NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0]; - nf->blockNo = 0; - nf->nodeId = failedNodePtr.i; - nf->failedNodeId = nodePtr.i; - nf->from = __LINE__; - sendSignal(reference(), GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); - }//if - }//if - }//for - if (failedNodePtr.p->nodeStatus == NodeRecord::DIED_NOW) { - jam(); - failedNodePtr.p->nodeStatus = NodeRecord::DYING; - } else { - jam(); - /*----------------------------------------------------*/ - // No more processing needed when node not even started - // yet. We give the node status to DEAD since we do not - // care whether all nodes complete the node failure - // handling. The node have not been included in the - // node failure protocols. - /*----------------------------------------------------*/ - failedNodePtr.p->nodeStatus = NodeRecord::DEAD; - /**----------------------------------------------------------------------- - * WE HAVE COMPLETED HANDLING THE NODE FAILURE IN DIH. WE CAN REPORT THIS - * TO DIH THAT WAIT FOR THE OTHER BLOCKS TO BE CONCLUDED AS WELL. - *-----------------------------------------------------------------------*/ - NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0]; - nf->blockNo = DBDIH; - nf->nodeId = cownNodeId; - nf->failedNodeId = failedNodePtr.i; - nf->from = __LINE__; - sendSignal(reference(), GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); - }//if -}//Dbdih::failedNodeSynchHandling() - -Uint32 Dbdih::findTakeOver(Uint32 failedNodeId) -{ - for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { - jam(); - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = i; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - if (takeOverPtr.p->toStartingNode == failedNodeId) { - jam(); - return i; - }//if - }//for - return RNIL; -}//Dbdih::findTakeOver() - -Uint32 Dbdih::getStartNode(Uint32 takeOverPtrI) -{ - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = takeOverPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - return takeOverPtr.p->toStartingNode; -}//Dbdih::getStartNode() - -void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr) -{ - jam(); - const Uint32 nodeId = failedNodePtr.i; - - if (isMaster() && c_lcpState.m_participatingLQH.get(failedNodePtr.i)) - { - /*----------------------------------------------------*/ - /* THE NODE WAS INVOLVED IN A LOCAL CHECKPOINT. WE */ - /* MUST UPDATE THE ACTIVE STATUS TO INDICATE THAT */ - /* THE NODE HAVE MISSED A LOCAL CHECKPOINT. */ - /*----------------------------------------------------*/ - - /** - * Bug#28717, Only master should do this, as this status is copied - * to other nodes - */ - switch (failedNodePtr.p->activeStatus) { - case Sysfile::NS_Active: - jam(); - failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1; - break; - case Sysfile::NS_ActiveMissed_1: - jam(); - failedNodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2; - break; - case Sysfile::NS_ActiveMissed_2: - jam(); - failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; - break; - case Sysfile::NS_TakeOver: - jam(); - failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; - break; - default: - g_eventLogger.error("activeStatus = %u " - "at failure after NODE_FAILREP of node = %u", - (Uint32) failedNodePtr.p->activeStatus, - failedNodePtr.i); - ndbrequire(false); - break; - }//switch - }//if - - c_lcpState.m_participatingDIH.clear(failedNodePtr.i); - c_lcpState.m_participatingLQH.clear(failedNodePtr.i); - - if(c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.isWaitingFor(failedNodePtr.i)){ - jam(); - LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend(); - rep->nodeId = failedNodePtr.i; - rep->lcpId = SYSFILE->latestLCP_ID; - rep->blockNo = DBDIH; - sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal, - LcpCompleteRep::SignalLength, JBB); - } - - /** - * Check if we'r waiting for the failed node's LQH to complete - * - * Note that this is ran "before" LCP master take over - */ - if(c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId)){ - jam(); - - LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend(); - rep->nodeId = nodeId; - rep->lcpId = SYSFILE->latestLCP_ID; - rep->blockNo = DBLQH; - sendSignal(reference(), GSN_LCP_COMPLETE_REP, signal, - LcpCompleteRep::SignalLength, JBB); - - if(c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId)){ - jam(); - /** - * Make sure we're ready to accept it - */ - c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodeId); - } - } - - if (c_TCGETOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) { - jam(); - signal->theData[0] = failedNodePtr.i; - signal->theData[1] = 0; - sendSignal(reference(), GSN_TCGETOPSIZECONF, signal, 2, JBB); - }//if - - if (c_TC_CLOPSIZEREQ_Counter.isWaitingFor(failedNodePtr.i)) { - jam(); - signal->theData[0] = failedNodePtr.i; - sendSignal(reference(), GSN_TC_CLOPSIZECONF, signal, 1, JBB); - }//if - - if (c_START_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) { - jam(); - StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend(); - conf->senderRef = numberToRef(DBLQH, failedNodePtr.i); - conf->lcpId = SYSFILE->latestLCP_ID; - sendSignal(reference(), GSN_START_LCP_CONF, signal, - StartLcpConf::SignalLength, JBB); - }//if - - if (c_EMPTY_LCP_REQ_Counter.isWaitingFor(failedNodePtr.i)) { - jam(); - EmptyLcpConf * const rep = (EmptyLcpConf *)&signal->theData[0]; - rep->senderNodeId = failedNodePtr.i; - rep->tableId = ~0; - rep->fragmentId = ~0; - rep->lcpNo = 0; - rep->lcpId = SYSFILE->latestLCP_ID; - rep->idle = true; - sendSignal(reference(), GSN_EMPTY_LCP_CONF, signal, - EmptyLcpConf::SignalLength, JBB); - }//if - - if (c_MASTER_LCPREQ_Counter.isWaitingFor(failedNodePtr.i)) { - jam(); - MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0]; - ref->senderNodeId = failedNodePtr.i; - ref->failedNodeId = cmasterTakeOverNode; - sendSignal(reference(), GSN_MASTER_LCPREF, signal, - MasterLCPRef::SignalLength, JBB); - }//if - -}//Dbdih::failedNodeLcpHandling() - -void Dbdih::checkGcpOutstanding(Signal* signal, Uint32 failedNodeId){ - if (c_GCP_PREPARE_Counter.isWaitingFor(failedNodeId)){ - jam(); - signal->theData[0] = failedNodeId; - signal->theData[1] = cnewgcp; - sendSignal(reference(), GSN_GCP_PREPARECONF, signal, 2, JBB); - }//if - - if (c_GCP_COMMIT_Counter.isWaitingFor(failedNodeId)) { - jam(); - signal->theData[0] = failedNodeId; - signal->theData[1] = coldgcp; - signal->theData[2] = cfailurenr; - sendSignal(reference(), GSN_GCP_NODEFINISH, signal, 3, JBB); - }//if - - if (c_GCP_SAVEREQ_Counter.isWaitingFor(failedNodeId)) { - jam(); - GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0]; - saveRef->dihPtr = failedNodeId; - saveRef->nodeId = failedNodeId; - saveRef->gci = coldgcp; - saveRef->errorCode = GCPSaveRef::FakedSignalDueToNodeFailure; - sendSignal(reference(), GSN_GCP_SAVEREF, signal, - GCPSaveRef::SignalLength, JBB); - }//if - - if (c_COPY_GCIREQ_Counter.isWaitingFor(failedNodeId)) { - jam(); - signal->theData[0] = failedNodeId; - sendSignal(reference(), GSN_COPY_GCICONF, signal, 1, JBB); - }//if - - if (c_MASTER_GCPREQ_Counter.isWaitingFor(failedNodeId)){ - jam(); - MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0]; - ref->senderNodeId = failedNodeId; - ref->failedNodeId = cmasterTakeOverNode; - sendSignal(reference(), GSN_MASTER_GCPREF, signal, - MasterGCPRef::SignalLength, JBB); - }//if -}//Dbdih::handleGcpStateInMaster() - - -void -Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){ - jam(); - - Uint32 oldNode = c_lcpMasterTakeOverState.failedNodeId; - - c_lcpMasterTakeOverState.minTableId = ~0; - c_lcpMasterTakeOverState.minFragId = ~0; - c_lcpMasterTakeOverState.failedNodeId = nodeId; - - c_lcpMasterTakeOverState.set(LMTOS_WAIT_EMPTY_LCP, __LINE__); - - if(c_EMPTY_LCP_REQ_Counter.done()){ - jam(); - c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(); - - EmptyLcpReq* req = (EmptyLcpReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - sendLoopMacro(EMPTY_LCP_REQ, sendEMPTY_LCP_REQ); - ndbrequire(!c_EMPTY_LCP_REQ_Counter.done()); - } else { - /** - * Node failure during master take over... - */ - g_eventLogger.info("Nodefail during master take over (old: %d)", oldNode); - } - - NodeRecordPtr nodePtr; - nodePtr.i = oldNode; - if (oldNode > 0 && oldNode < MAX_NDB_NODES) - { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->m_nodefailSteps.get(NF_LCP_TAKE_OVER)) - { - jam(); - checkLocalNodefailComplete(signal, oldNode, NF_LCP_TAKE_OVER); - } - } - - setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER); -} - -void Dbdih::startGcpMasterTakeOver(Signal* signal, Uint32 oldMasterId){ - jam(); - /*--------------------------------------------------*/ - /* */ - /* THE MASTER HAVE FAILED AND WE WERE ELECTED */ - /* TO BE THE NEW MASTER NODE. WE NEED TO QUERY*/ - /* ALL THE OTHER NODES ABOUT THEIR STATUS IN */ - /* ORDER TO BE ABLE TO TAKE OVER CONTROL OF */ - /* THE GLOBAL CHECKPOINT PROTOCOL AND THE */ - /* LOCAL CHECKPOINT PROTOCOL. */ - /*--------------------------------------------------*/ - if(!isMaster()){ - jam(); - return; - } - cmasterState = MASTER_TAKE_OVER_GCP; - cmasterTakeOverNode = oldMasterId; - MasterGCPReq * const req = (MasterGCPReq *)&signal->theData[0]; - req->masterRef = reference(); - req->failedNodeId = oldMasterId; - sendLoopMacro(MASTER_GCPREQ, sendMASTER_GCPREQ); - cgcpMasterTakeOverState = GMTOS_INITIAL; - - signal->theData[0] = NDB_LE_GCP_TakeoverStarted; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB); - - setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER); -}//Dbdih::handleNewMaster() - -void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI) -{ - jam(); - if (takeOverPtrI != RNIL) { - jam(); - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = takeOverPtrI; - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - bool ok = false; - switch (takeOverPtr.p->toSlaveStatus) { - case TakeOverRecord::TO_SLAVE_IDLE: - ndbrequire(false); - break; - case TakeOverRecord::TO_SLAVE_STARTED: - jam(); - case TakeOverRecord::TO_SLAVE_CREATE_PREPARE: - jam(); - case TakeOverRecord::TO_SLAVE_COPY_FRAG_COMPLETED: - jam(); - case TakeOverRecord::TO_SLAVE_CREATE_COMMIT: - jam(); - ok = true; - infoEvent("Unhandled MasterTO of TO slaveStatus=%d killing node %d", - takeOverPtr.p->toSlaveStatus, - takeOverPtr.p->toStartingNode); - takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_ACTIVE; - - { - BlockReference cntrRef = calcNdbCntrBlockRef(takeOverPtr.p->toStartingNode); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::CopyFragRefError; - sysErr->errorRef = reference(); - sysErr->data1= 0; - sysErr->data2= __LINE__; - sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBB); - } - break; - case TakeOverRecord::TO_SLAVE_COPY_COMPLETED: - ok = true; - jam(); - takeOverPtr.p->toMasterStatus = TakeOverRecord::WAIT_LCP; - break; - } - ndbrequire(ok); - endTakeOver(takeOverPtr.i); - }//if -}//Dbdih::handleTakeOverNewMaster() - -void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr) -{ - Uint32 nodeId = failedNodePtr.i; - if(failedNodePtr.p->nodeStatus != NodeRecord::DIED_NOW){ - jam(); - /** - * Is node isn't alive. It can't be part of LCP - */ - ndbrequire(!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(nodeId)); - - /** - * And there is no point in removing any replicas - * It's dead... - */ - return; - } - - /** - * If node has node complete LCP - * we need to remove it as undo might not be complete - * bug#31257 - */ - failedNodePtr.p->m_remove_node_from_table_lcp_id = RNIL; - if (c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(failedNodePtr.i)) - { - jam(); - failedNodePtr.p->m_remove_node_from_table_lcp_id = SYSFILE->latestLCP_ID; - } - - jam(); - - if (!ERROR_INSERTED(7194)) - { - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = failedNodePtr.i; - signal->theData[2] = 0; // Tab id - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - } - else - { - ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE"); - } - - setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE); -}//Dbdih::startRemoveFailedNode() - -/*--------------------------------------------------*/ -/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/ -/* QUERYING THIS NODE ABOUT THE STATE OF THE */ -/* GLOBAL CHECKPOINT PROTOCOL */ -/*--------------------------------------------------*/ -void Dbdih::execMASTER_GCPREQ(Signal* signal) -{ - NodeRecordPtr failedNodePtr; - MasterGCPReq * const masterGCPReq = (MasterGCPReq *)&signal->theData[0]; - jamEntry(); - const BlockReference newMasterBlockref = masterGCPReq->masterRef; - const Uint32 failedNodeId = masterGCPReq->failedNodeId; - if (c_copyGCISlave.m_copyReason != CopyGCIReq::IDLE) { - jam(); - /*--------------------------------------------------*/ - /* WE ARE CURRENTLY WRITING THE RESTART INFO */ - /* IN THIS NODE. SINCE ONLY ONE PROCESS IS */ - /* ALLOWED TO DO THIS AT A TIME WE MUST ENSURE*/ - /* THAT THIS IS NOT ONGOING WHEN THE NEW */ - /* MASTER TAKES OVER CONTROL. IF NOT ALL NODES*/ - /* RECEIVE THE SAME RESTART INFO DUE TO THE */ - /* FAILURE OF THE MASTER IT IS TAKEN CARE OF */ - /* BY THE NEW MASTER. */ - /*--------------------------------------------------*/ - sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ, - signal, 10, MasterGCPReq::SignalLength); - return; - }//if - failedNodePtr.i = failedNodeId; - ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord); - if (failedNodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - /*--------------------------------------------------*/ - /* ENSURE THAT WE HAVE PROCESSED THE SIGNAL */ - /* NODE_FAILURE BEFORE WE PROCESS THIS REQUEST*/ - /* FROM THE NEW MASTER. THIS ENSURES THAT WE */ - /* HAVE REMOVED THE FAILED NODE FROM THE LIST */ - /* OF ACTIVE NODES AND SO FORTH. */ - /*--------------------------------------------------*/ - sendSignalWithDelay(reference(), GSN_MASTER_GCPREQ, - signal, 10, MasterGCPReq::SignalLength); - return; - } else { - ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING); - }//if - - if (ERROR_INSERTED(7181)) - { - ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ"); - CLEAR_ERROR_INSERT_VALUE; - signal->theData[0] = c_error_7181_ref; - signal->theData[1] = coldgcp; - execGCP_TCFINISHED(signal); - } - - MasterGCPConf::State gcpState; - switch (cgcpParticipantState) { - case GCP_PARTICIPANT_READY: - jam(); - /*--------------------------------------------------*/ - /* THE GLOBAL CHECKPOINT IS NOT ACTIVE SINCE */ - /* THE PREVIOUS GLOBAL CHECKPOINT IS COMPLETED*/ - /* AND THE NEW HAVE NOT STARTED YET. */ - /*--------------------------------------------------*/ - gcpState = MasterGCPConf::GCP_READY; - break; - case GCP_PARTICIPANT_PREPARE_RECEIVED: - jam(); - /*--------------------------------------------------*/ - /* GCP_PREPARE HAVE BEEN RECEIVED AND RESPONSE*/ - /* HAVE BEEN SENT. */ - /*--------------------------------------------------*/ - gcpState = MasterGCPConf::GCP_PREPARE_RECEIVED; - break; - case GCP_PARTICIPANT_COMMIT_RECEIVED: - jam(); - /*------------------------------------------------*/ - /* GCP_COMMIT HAVE BEEN RECEIVED BUT NOT YET*/ - /* GCP_TCFINISHED FROM LOCAL TC. */ - /*------------------------------------------------*/ - gcpState = MasterGCPConf::GCP_COMMIT_RECEIVED; - break; - case GCP_PARTICIPANT_TC_FINISHED: - jam(); - /*------------------------------------------------*/ - /* GCP_COMMIT HAS BEEN RECEIVED AND ALSO */ - /* GCP_TCFINISHED HAVE BEEN RECEIVED. */ - /*------------------------------------------------*/ - gcpState = MasterGCPConf::GCP_TC_FINISHED; - break; - case GCP_PARTICIPANT_COPY_GCI_RECEIVED: - /*--------------------------------------------------*/ - /* COPY RESTART INFORMATION HAS BEEN RECEIVED */ - /* BUT NOT YET COMPLETED. */ - /*--------------------------------------------------*/ - ndbrequire(false); - gcpState= MasterGCPConf::GCP_READY; // remove warning - break; - default: - /*------------------------------------------------*/ - /* */ - /* THIS SHOULD NOT OCCUR SINCE THE ABOVE */ - /* STATES ARE THE ONLY POSSIBLE STATES AT A */ - /* NODE WHICH WAS NOT A MASTER NODE. */ - /*------------------------------------------------*/ - ndbrequire(false); - gcpState= MasterGCPConf::GCP_READY; // remove warning - break; - }//switch - MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0]; - masterGCPConf->gcpState = gcpState; - masterGCPConf->senderNodeId = cownNodeId; - masterGCPConf->failedNodeId = failedNodeId; - masterGCPConf->newGCP = cnewgcp; - masterGCPConf->latestLCP = SYSFILE->latestLCP_ID; - masterGCPConf->oldestRestorableGCI = SYSFILE->oldestRestorableGCI; - masterGCPConf->keepGCI = SYSFILE->keepGCI; - for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++) - masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i]; - sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal, - MasterGCPConf::SignalLength, JBB); - - if (ERROR_INSERTED(7182)) - { - ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ"); - CLEAR_ERROR_INSERT_VALUE; - signal->theData[0] = c_error_7181_ref; - signal->theData[1] = coldgcp; - execGCP_TCFINISHED(signal); - } -}//Dbdih::execMASTER_GCPREQ() - -void Dbdih::execMASTER_GCPCONF(Signal* signal) -{ - NodeRecordPtr senderNodePtr; - MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0]; - jamEntry(); - senderNodePtr.i = masterGCPConf->senderNodeId; - ptrCheckGuard(senderNodePtr, MAX_NDB_NODES, nodeRecord); - - MasterGCPConf::State gcpState = (MasterGCPConf::State)masterGCPConf->gcpState; - const Uint32 failedNodeId = masterGCPConf->failedNodeId; - const Uint32 newGcp = masterGCPConf->newGCP; - const Uint32 latestLcpId = masterGCPConf->latestLCP; - const Uint32 oldestRestorableGci = masterGCPConf->oldestRestorableGCI; - const Uint32 oldestKeepGci = masterGCPConf->keepGCI; - if (latestLcpId > SYSFILE->latestLCP_ID) { - jam(); -#if 0 - g_eventLogger.info("Dbdih: Setting SYSFILE->latestLCP_ID to %d", - latestLcpId); - SYSFILE->latestLCP_ID = latestLcpId; -#endif - SYSFILE->keepGCI = oldestKeepGci; - SYSFILE->oldestRestorableGCI = oldestRestorableGci; - for(Uint32 i = 0; i < NdbNodeBitmask::Size; i++) - SYSFILE->lcpActive[i] = masterGCPConf->lcpActive[i]; - }//if - switch (gcpState) { - case MasterGCPConf::GCP_READY: - jam(); - senderNodePtr.p->gcpstate = NodeRecord::READY; - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - senderNodePtr.p->gcpstate = NodeRecord::PREPARE_RECEIVED; - cnewgcp = newGcp; - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - jam(); - senderNodePtr.p->gcpstate = NodeRecord::COMMIT_SENT; - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - senderNodePtr.p->gcpstate = NodeRecord::NODE_FINISHED; - break; - default: - ndbrequire(false); - break; - }//switch - switch (cgcpMasterTakeOverState) { - case GMTOS_INITIAL: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - jam(); - cgcpMasterTakeOverState = ALL_READY; - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - cgcpMasterTakeOverState = ALL_PREPARED; - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - jam(); - cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED; - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - cgcpMasterTakeOverState = COMMIT_COMPLETED; - break; - default: - ndbrequire(false); - break; - }//switch - break; - case ALL_READY: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - jam(); - /*empty*/; - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED; - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - ndbrequire(false); - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED; - break; - default: - ndbrequire(false); - break; - }//switch - break; - case PREPARE_STARTED_NOT_COMMITTED: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - jam(); - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - ndbrequire(false); - break; - case MasterGCPConf::GCP_TC_FINISHED: - ndbrequire(false); - break; - default: - ndbrequire(false); - break; - }//switch - break; - case ALL_PREPARED: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - jam(); - cgcpMasterTakeOverState = PREPARE_STARTED_NOT_COMMITTED; - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - jam(); - cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED; - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED; - break; - default: - ndbrequire(false); - break; - }//switch - break; - case COMMIT_STARTED_NOT_COMPLETED: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - ndbrequire(false); - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - jam(); - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - break; - default: - ndbrequire(false); - break; - }//switch - break; - case COMMIT_COMPLETED: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - cgcpMasterTakeOverState = SAVE_STARTED_NOT_COMPLETED; - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - jam(); - cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED; - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - jam(); - cgcpMasterTakeOverState = COMMIT_STARTED_NOT_COMPLETED; - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - break; - default: - ndbrequire(false); - break; - }//switch - break; - case SAVE_STARTED_NOT_COMPLETED: - switch (gcpState) { - case MasterGCPConf::GCP_READY: - jam(); - break; - case MasterGCPConf::GCP_PREPARE_RECEIVED: - ndbrequire(false); - break; - case MasterGCPConf::GCP_COMMIT_RECEIVED: - ndbrequire(false); - break; - case MasterGCPConf::GCP_TC_FINISHED: - jam(); - break; - default: - ndbrequire(false); - break; - }//switch - break; - default: - ndbrequire(false); - break; - }//switch - receiveLoopMacro(MASTER_GCPREQ, senderNodePtr.i); - /*-------------------------------------------------------------------------*/ - // We have now received all responses and are ready to take over the GCP - // protocol as master. - /*-------------------------------------------------------------------------*/ - MASTER_GCPhandling(signal, failedNodeId); - return; -}//Dbdih::execMASTER_GCPCONF() - -void Dbdih::execMASTER_GCPREF(Signal* signal) -{ - const MasterGCPRef * const ref = (MasterGCPRef *)&signal->theData[0]; - jamEntry(); - receiveLoopMacro(MASTER_GCPREQ, ref->senderNodeId); - /*-------------------------------------------------------------------------*/ - // We have now received all responses and are ready to take over the GCP - // protocol as master. - /*-------------------------------------------------------------------------*/ - MASTER_GCPhandling(signal, ref->failedNodeId); -}//Dbdih::execMASTER_GCPREF() - -void Dbdih::MASTER_GCPhandling(Signal* signal, Uint32 failedNodeId) -{ - NodeRecordPtr failedNodePtr; - cmasterState = MASTER_ACTIVE; - /*----------------------------------------------------------*/ - /* REMOVE ALL ACTIVE STATUS ON ALREADY FAILED NODES */ - /* THIS IS PERFORMED HERE SINCE WE GET THE LCP ACTIVE */ - /* STATUS AS PART OF THE COPY RESTART INFO AND THIS IS*/ - /* HANDLED BY THE MASTER GCP TAKE OVER PROTOCOL. */ - /*----------------------------------------------------------*/ - - failedNodePtr.i = failedNodeId; - ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord); - switch (cgcpMasterTakeOverState) { - case ALL_READY: - jam(); - startGcp(signal); - break; - case PREPARE_STARTED_NOT_COMMITTED: - { - NodeRecordPtr nodePtr; - jam(); - c_GCP_PREPARE_Counter.clearWaitingFor(); - nodePtr.i = cfirstAliveNode; - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->gcpstate == NodeRecord::READY) { - jam(); - c_GCP_PREPARE_Counter.setWaitingFor(nodePtr.i); - sendGCP_PREPARE(signal, nodePtr.i); - }//if - nodePtr.i = nodePtr.p->nextNode; - } while(nodePtr.i != RNIL); - if (c_GCP_PREPARE_Counter.done()) { - jam(); - gcpcommitreqLab(signal); - }//if - break; - } - case ALL_PREPARED: - jam(); - gcpcommitreqLab(signal); - break; - case COMMIT_STARTED_NOT_COMPLETED: - { - NodeRecordPtr nodePtr; - jam(); - c_GCP_COMMIT_Counter.clearWaitingFor(); - nodePtr.i = cfirstAliveNode; - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->gcpstate == NodeRecord::PREPARE_RECEIVED) { - jam(); - sendGCP_COMMIT(signal, nodePtr.i); - c_GCP_COMMIT_Counter.setWaitingFor(nodePtr.i); - } else { - ndbrequire((nodePtr.p->gcpstate == NodeRecord::NODE_FINISHED) || - (nodePtr.p->gcpstate == NodeRecord::COMMIT_SENT)); - }//if - nodePtr.i = nodePtr.p->nextNode; - } while(nodePtr.i != RNIL); - if (c_GCP_COMMIT_Counter.done()){ - jam(); - gcpsavereqLab(signal); - }//if - break; - } - case COMMIT_COMPLETED: - jam(); - gcpsavereqLab(signal); - break; - case SAVE_STARTED_NOT_COMPLETED: - { - NodeRecordPtr nodePtr; - jam(); - SYSFILE->newestRestorableGCI = coldgcp; - nodePtr.i = cfirstAliveNode; - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - SYSFILE->lastCompletedGCI[nodePtr.i] = coldgcp; - nodePtr.i = nodePtr.p->nextNode; - } while (nodePtr.i != RNIL); - /**------------------------------------------------------------------- - * THE FAILED NODE DID ALSO PARTICIPATE IN THIS GLOBAL CHECKPOINT - * WHICH IS RECORDED. - *-------------------------------------------------------------------*/ - SYSFILE->lastCompletedGCI[failedNodeId] = coldgcp; - copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT); - break; - } - default: - ndbrequire(false); - break; - }//switch - - signal->theData[0] = NDB_LE_GCP_TakeoverCompleted; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB); - - /*--------------------------------------------------*/ - /* WE SEPARATE HANDLING OF GLOBAL CHECKPOINTS */ - /* AND LOCAL CHECKPOINTS HERE. LCP'S HAVE TO */ - /* REMOVE ALL FAILED FRAGMENTS BEFORE WE CAN */ - /* HANDLE THE LCP PROTOCOL. */ - /*--------------------------------------------------*/ - checkLocalNodefailComplete(signal, failedNodeId, NF_GCP_TAKE_OVER); - - return; -}//Dbdih::masterGcpConfFromFailedLab() - -void -Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, Uint32 tableId) -{ - jamEntry(); - TabRecordPtr tabPtr; - tabPtr.i = tableId; - const Uint32 RT_BREAK = 64; - if (ERROR_INSERTED(7125)) { - return; - }//if - for (Uint32 i = 0; i= ctabFileSize){ - jam(); - /** - * Ready with entire loop - * Return to master - */ - setAllowNodeStart(nodeId, true); - if (getNodeStatus(nodeId) == NodeRecord::STARTING) { - jam(); - StartInfoConf * conf = (StartInfoConf*)&signal->theData[0]; - conf->sendingNodeId = cownNodeId; - conf->startingNodeId = nodeId; - sendSignal(cmasterdihref, GSN_START_INFOCONF, signal, - StartInfoConf::SignalLength, JBB); - }//if - return; - }//if - ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) { - jam(); - invalidateNodeLCP(signal, nodeId, tabPtr); - return; - }//if - tabPtr.i++; - }//for - signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); -}//Dbdih::invalidateNodeLCP() - -void -Dbdih::invalidateNodeLCP(Signal* signal, Uint32 nodeId, TabRecordPtr tabPtr) -{ - /** - * Check so that no one else is using the tab descriptior - */ - if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) { - jam(); - signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3); - return; - }//if - - /** - * For each fragment - */ - bool modified = false; - FragmentstorePtr fragPtr; - for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){ - jam(); - getFragstore(tabPtr.p, fragNo, fragPtr); - /** - * For each of replica record - */ - ReplicaRecordPtr replicaPtr; - for(replicaPtr.i = fragPtr.p->oldStoredReplicas; replicaPtr.i != RNIL; - replicaPtr.i = replicaPtr.p->nextReplica) { - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if(replicaPtr.p->procNode == nodeId){ - jam(); - /** - * Found one with correct node id - */ - /** - * Invalidate all LCP's - */ - modified = true; - for(int i = 0; i < MAX_LCP_STORED; i++) { - replicaPtr.p->lcpStatus[i] = ZINVALID; - }//if - /** - * And reset nextLcp - */ - replicaPtr.p->nextLcp = 0; - replicaPtr.p->noCrashedReplicas = 0; - }//if - }//for - }//for - - if (modified) { - jam(); - /** - * Save table description to disk - */ - tabPtr.p->tabCopyStatus = TabRecord::CS_INVALIDATE_NODE_LCP; - tabPtr.p->tabUpdateState = TabRecord::US_INVALIDATE_NODE_LCP; - tabPtr.p->tabRemoveNode = nodeId; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - } - - jam(); - /** - * Move to next table - */ - tabPtr.i++; - signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; -}//Dbdih::invalidateNodeLCP() - -/*------------------------------------------------*/ -/* INPUT: TABPTR */ -/* TNODEID */ -/*------------------------------------------------*/ -void Dbdih::removeNodeFromTables(Signal* signal, - Uint32 nodeId, Uint32 tableId) -{ - jamEntry(); - TabRecordPtr tabPtr; - tabPtr.i = tableId; - const Uint32 RT_BREAK = 64; - for (Uint32 i = 0; i= ctabFileSize){ - jam(); - removeNodeFromTablesComplete(signal, nodeId); - return; - }//if - - ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) { - jam(); - removeNodeFromTable(signal, nodeId, tabPtr); - return; - }//if - tabPtr.i++; - }//for - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); -} - -void Dbdih::removeNodeFromTable(Signal* signal, - Uint32 nodeId, TabRecordPtr tabPtr){ - - /** - * Check so that no one else is using the tab descriptior - */ - if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) { - jam(); - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3); - return; - }//if - - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - const Uint32 lcpId = nodePtr.p->m_remove_node_from_table_lcp_id; - - /** - * For each fragment - */ - Uint32 noOfRemovedReplicas = 0; // No of replicas removed - Uint32 noOfRemovedLcpReplicas = 0; // No of replicas in LCP removed - Uint32 noOfRemainingLcpReplicas = 0;// No of replicas in LCP remaining - - const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE); - const bool unlogged = (tabPtr.p->tabStorage != TabRecord::ST_NORMAL); - - FragmentstorePtr fragPtr; - for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){ - jam(); - getFragstore(tabPtr.p, fragNo, fragPtr); - - /** - * For each of replica record - */ - bool found = false; - ReplicaRecordPtr replicaPtr; - for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL; - replicaPtr.i = replicaPtr.p->nextReplica) { - jam(); - - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if(replicaPtr.p->procNode == nodeId){ - jam(); - found = true; - noOfRemovedReplicas++; - removeNodeFromStored(nodeId, fragPtr, replicaPtr, unlogged); - if(replicaPtr.p->lcpOngoingFlag){ - jam(); - /** - * This replica is currently LCP:ed - */ - ndbrequire(fragPtr.p->noLcpReplicas > 0); - fragPtr.p->noLcpReplicas --; - - noOfRemovedLcpReplicas ++; - replicaPtr.p->lcpOngoingFlag = false; - } - - if (lcpId != RNIL) - { - jam(); - Uint32 lcpNo = prevLcpNo(replicaPtr.p->nextLcp); - if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID && - replicaPtr.p->lcpId[lcpNo] == SYSFILE->latestLCP_ID) - { - jam(); - replicaPtr.p->lcpStatus[lcpNo] = ZINVALID; - replicaPtr.p->lcpId[lcpNo] = 0; - replicaPtr.p->nextLcp = lcpNo; - ndbout_c("REMOVING lcp: %u from table: %u frag: %u node: %u", - SYSFILE->latestLCP_ID, - tabPtr.i, fragNo, nodeId); - } - } - } - } - if (!found) - { - jam(); - /** - * Run updateNodeInfo to remove any dead nodes from list of activeNodes - * see bug#15587 - */ - updateNodeInfo(fragPtr); - } - noOfRemainingLcpReplicas += fragPtr.p->noLcpReplicas; - } - - if(noOfRemovedReplicas == 0){ - jam(); - /** - * The table had no replica on the failed node - * continue with next table - */ - tabPtr.i++; - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; - } - - /** - * We did remove at least one replica - */ - bool ok = false; - switch(tabPtr.p->tabLcpStatus){ - case TabRecord::TLS_COMPLETED: - ok = true; - jam(); - /** - * WE WILL WRITE THE TABLE DESCRIPTION TO DISK AT THIS TIME - * INDEPENDENT OF WHAT THE LOCAL CHECKPOINT NEEDED. - * THIS IS TO ENSURE THAT THE FAILED NODES ARE ALSO UPDATED ON DISK - * IN THE DIH DATA STRUCTURES BEFORE WE COMPLETE HANDLING OF THE - * NODE FAILURE. - */ - ndbrequire(noOfRemovedLcpReplicas == 0); - - tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE; - tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE; - tabPtr.p->tabRemoveNode = nodeId; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::TLS_ACTIVE: - ok = true; - jam(); - /** - * The table is participating in an LCP currently - */ - // Fall through - break; - case TabRecord::TLS_WRITING_TO_FILE: - ok = true; - jam(); - /** - * This should never happen since we in the beginning of this function - * checks the tabCopyStatus - */ - ndbrequire(lcpOngoingFlag); - ndbrequire(false); - break; - } - ndbrequire(ok); - - /** - * The table is participating in an LCP currently - * and we removed some replicas that should have been checkpointed - */ - ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE); - ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE); - - /** - * Save the table - */ - tabPtr.p->tabCopyStatus = TabRecord::CS_REMOVE_NODE; - tabPtr.p->tabUpdateState = TabRecord::US_REMOVE_NODE; - tabPtr.p->tabRemoveNode = nodeId; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - - if(noOfRemainingLcpReplicas == 0){ - jam(); - /** - * The removal on the failed node made the LCP complete - */ - tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE; - checkLcpAllTablesDoneInLqh(); - } -} - -void -Dbdih::removeNodeFromTablesComplete(Signal* signal, Uint32 nodeId){ - jam(); - - /** - * Check if we "accidently" completed a LCP - */ - checkLcpCompletedLab(signal); - - /** - * Check if we (DIH) are finished with node fail handling - */ - checkLocalNodefailComplete(signal, nodeId, NF_REMOVE_NODE_FROM_TABLE); -} - -void -Dbdih::checkLocalNodefailComplete(Signal* signal, Uint32 failedNodeId, - NodefailHandlingStep step){ - jam(); - - NodeRecordPtr nodePtr; - nodePtr.i = failedNodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - ndbrequire(nodePtr.p->m_nodefailSteps.get(step)); - nodePtr.p->m_nodefailSteps.clear(step); - - if(nodePtr.p->m_nodefailSteps.count() > 0){ - jam(); - return; - } - - if (ERROR_INSERTED(7030)) - { - g_eventLogger.info("Reenable GCP_PREPARE"); - CLEAR_ERROR_INSERT_VALUE; - } - - NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0]; - nf->blockNo = DBDIH; - nf->nodeId = cownNodeId; - nf->failedNodeId = failedNodeId; - nf->from = __LINE__; - sendSignal(reference(), GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); -} - - -void -Dbdih::setLocalNodefailHandling(Signal* signal, Uint32 failedNodeId, - NodefailHandlingStep step){ - jam(); - - NodeRecordPtr nodePtr; - nodePtr.i = failedNodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - ndbrequire(!nodePtr.p->m_nodefailSteps.get(step)); - nodePtr.p->m_nodefailSteps.set(step); -} - -void Dbdih::startLcpTakeOverLab(Signal* signal, Uint32 failedNodeId) -{ - /*--------------------------------------------------------------------*/ - // Start LCP master take over process. Consists of the following steps. - // 1) Ensure that all LQH's have reported all fragments they have been - // told to checkpoint. Can be a fairly long step time-wise. - // 2) Query all nodes about their LCP status. - // During the query process we do not want our own state to change. - // This can change due to delayed reception of LCP_REPORT, completed - // save of table on disk or reception of DIH_LCPCOMPLETE from other - // node. - /*--------------------------------------------------------------------*/ -}//Dbdih::startLcpTakeOver() - -void Dbdih::execEMPTY_LCP_CONF(Signal* signal) -{ - jamEntry(); - - ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_EMPTY_LCP); - - const EmptyLcpConf * const conf = (EmptyLcpConf *)&signal->theData[0]; - Uint32 nodeId = conf->senderNodeId; - - if(!conf->idle){ - jam(); - if (conf->tableId < c_lcpMasterTakeOverState.minTableId) { - jam(); - c_lcpMasterTakeOverState.minTableId = conf->tableId; - c_lcpMasterTakeOverState.minFragId = conf->fragmentId; - } else if (conf->tableId == c_lcpMasterTakeOverState.minTableId && - conf->fragmentId < c_lcpMasterTakeOverState.minFragId) { - jam(); - c_lcpMasterTakeOverState.minFragId = conf->fragmentId; - }//if - if(isMaster()){ - jam(); - c_lcpState.m_LAST_LCP_FRAG_ORD.setWaitingFor(nodeId); - } - } - - receiveLoopMacro(EMPTY_LCP_REQ, nodeId); - /*--------------------------------------------------------------------*/ - // Received all EMPTY_LCPCONF. We can continue with next phase of the - // take over LCP master process. - /*--------------------------------------------------------------------*/ - c_lcpMasterTakeOverState.set(LMTOS_WAIT_LCP_FRAG_REP, __LINE__); - checkEmptyLcpComplete(signal); - return; -}//Dbdih::execEMPTY_LCPCONF() - -void -Dbdih::checkEmptyLcpComplete(Signal *signal){ - - ndbrequire(c_lcpMasterTakeOverState.state == LMTOS_WAIT_LCP_FRAG_REP); - - if(c_lcpState.noOfLcpFragRepOutstanding > 0){ - jam(); - return; - } - - if(isMaster()){ - jam(); - - signal->theData[0] = NDB_LE_LCP_TakeoverStarted; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB); - - signal->theData[0] = 7012; - execDUMP_STATE_ORD(signal); - - if (ERROR_INSERTED(7194)) - { - ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE"); - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId; - signal->theData[2] = 0; // Tab id - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - } - - c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__); - MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0]; - req->masterRef = reference(); - req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId; - sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ); - - } else { - sendMASTER_LCPCONF(signal); - } -} - -/*--------------------------------------------------*/ -/* THE MASTER HAS FAILED AND THE NEW MASTER IS*/ -/* QUERYING THIS NODE ABOUT THE STATE OF THE */ -/* LOCAL CHECKPOINT PROTOCOL. */ -/*--------------------------------------------------*/ -void Dbdih::execMASTER_LCPREQ(Signal* signal) -{ - const MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0]; - jamEntry(); - const BlockReference newMasterBlockref = req->masterRef; - - if (newMasterBlockref != cmasterdihref) - { - jam(); - ndbout_c("resending GSN_MASTER_LCPREQ"); - sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal, - 50, signal->getLength()); - return; - } - Uint32 failedNodeId = req->failedNodeId; - - /** - * There can be no take over with the same master - */ - ndbrequire(c_lcpState.m_masterLcpDihRef != newMasterBlockref); - c_lcpState.m_masterLcpDihRef = newMasterBlockref; - c_lcpState.m_MASTER_LCPREQ_Received = true; - c_lcpState.m_MASTER_LCPREQ_FailedNodeId = failedNodeId; - - if(newMasterBlockref != cmasterdihref){ - jam(); - ndbrequire(0); - } - - sendMASTER_LCPCONF(signal); -}//Dbdih::execMASTER_LCPREQ() - -void -Dbdih::sendMASTER_LCPCONF(Signal * signal){ - - if(!c_EMPTY_LCP_REQ_Counter.done()){ - /** - * Have not received all EMPTY_LCP_REP - * dare not answer MASTER_LCP_CONF yet - */ - jam(); - return; - } - - if(!c_lcpState.m_MASTER_LCPREQ_Received){ - jam(); - /** - * Has not received MASTER_LCPREQ yet - */ - return; - } - - if(c_lcpState.lcpStatus == LCP_INIT_TABLES){ - jam(); - /** - * Still aborting old initLcpLab - */ - return; - } - - if(c_lcpState.lcpStatus == LCP_COPY_GCI){ - jam(); - /** - * Restart it - */ - //Uint32 lcpId = SYSFILE->latestLCP_ID; - SYSFILE->latestLCP_ID--; - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); -#if 0 - if(c_copyGCISlave.m_copyReason == CopyGCIReq::LOCAL_CHECKPOINT){ - g_eventLogger.info("Dbdih: Also resetting c_copyGCISlave"); - c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE; - c_copyGCISlave.m_expectedNextWord = 0; - } -#endif - } - - MasterLCPConf::State lcpState; - switch (c_lcpState.lcpStatus) { - case LCP_STATUS_IDLE: - jam(); - /*------------------------------------------------*/ - /* LOCAL CHECKPOINT IS CURRENTLY NOT ACTIVE */ - /* SINCE NO COPY OF RESTART INFORMATION HAVE*/ - /* BEEN RECEIVED YET. ALSO THE PREVIOUS */ - /* CHECKPOINT HAVE BEEN FULLY COMPLETED. */ - /*------------------------------------------------*/ - lcpState = MasterLCPConf::LCP_STATUS_IDLE; - break; - case LCP_STATUS_ACTIVE: - jam(); - /*--------------------------------------------------*/ - /* COPY OF RESTART INFORMATION HAS BEEN */ - /* PERFORMED AND ALSO RESPONSE HAVE BEEN SENT.*/ - /*--------------------------------------------------*/ - lcpState = MasterLCPConf::LCP_STATUS_ACTIVE; - break; - case LCP_TAB_COMPLETED: - jam(); - /*--------------------------------------------------------*/ - /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */ - /* ALL TABLES. SAVE OF AT LEAST ONE TABLE IS */ - /* ONGOING YET. */ - /*--------------------------------------------------------*/ - lcpState = MasterLCPConf::LCP_TAB_COMPLETED; - break; - case LCP_TAB_SAVED: - jam(); - /*--------------------------------------------------------*/ - /* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */ - /* ALL TABLES. ALL TABLES HAVE ALSO BEEN SAVED */ - /* ALL OTHER NODES ARE NOT YET FINISHED WITH */ - /* THE LOCAL CHECKPOINT. */ - /*--------------------------------------------------------*/ - lcpState = MasterLCPConf::LCP_TAB_SAVED; - break; - case LCP_TCGET: - case LCP_CALCULATE_KEEP_GCI: - case LCP_TC_CLOPSIZE: - case LCP_START_LCP_ROUND: - /** - * These should only exists on the master - * but since this is master take over - * it not allowed - */ - ndbrequire(false); - lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning - break; - case LCP_COPY_GCI: - case LCP_INIT_TABLES: - /** - * These two states are handled by if statements above - */ - ndbrequire(false); - lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning - break; - default: - ndbrequire(false); - lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning - }//switch - - Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId; - MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0]; - conf->senderNodeId = cownNodeId; - conf->lcpState = lcpState; - conf->failedNodeId = failedNodeId; - sendSignal(c_lcpState.m_masterLcpDihRef, GSN_MASTER_LCPCONF, - signal, MasterLCPConf::SignalLength, JBB); - - // Answer to MASTER_LCPREQ sent, reset flag so - // that it's not sent again before another request comes in - c_lcpState.m_MASTER_LCPREQ_Received = false; - - if(c_lcpState.lcpStatus == LCP_TAB_SAVED){ -#ifdef VM_TRACE - g_eventLogger.info("Sending extra GSN_LCP_COMPLETE_REP to new master"); -#endif - sendLCP_COMPLETE_REP(signal); - } - - if(!isMaster()){ - c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__); - checkLocalNodefailComplete(signal, failedNodeId, NF_LCP_TAKE_OVER); - } - - return; -} - -NdbOut& -operator<<(NdbOut& out, const Dbdih::LcpMasterTakeOverState state){ - switch(state){ - case Dbdih::LMTOS_IDLE: - out << "LMTOS_IDLE"; - break; - case Dbdih::LMTOS_WAIT_EMPTY_LCP: - out << "LMTOS_WAIT_EMPTY_LCP"; - break; - case Dbdih::LMTOS_WAIT_LCP_FRAG_REP: - out << "LMTOS_WAIT_EMPTY_LCP"; - break; - case Dbdih::LMTOS_INITIAL: - out << "LMTOS_INITIAL"; - break; - case Dbdih::LMTOS_ALL_IDLE: - out << "LMTOS_ALL_IDLE"; - break; - case Dbdih::LMTOS_ALL_ACTIVE: - out << "LMTOS_ALL_ACTIVE"; - break; - case Dbdih::LMTOS_LCP_CONCLUDING: - out << "LMTOS_LCP_CONCLUDING"; - break; - case Dbdih::LMTOS_COPY_ONGOING: - out << "LMTOS_COPY_ONGOING"; - break; - } - return out; -} - -struct MASTERLCP_StateTransitions { - Dbdih::LcpMasterTakeOverState CurrentState; - MasterLCPConf::State ParticipantState; - Dbdih::LcpMasterTakeOverState NewState; -}; - -static const -MASTERLCP_StateTransitions g_masterLCPTakeoverStateTransitions[] = { - /** - * Current = LMTOS_INITIAL - */ - { Dbdih::LMTOS_INITIAL, - MasterLCPConf::LCP_STATUS_IDLE, - Dbdih::LMTOS_ALL_IDLE }, - - { Dbdih::LMTOS_INITIAL, - MasterLCPConf::LCP_STATUS_ACTIVE, - Dbdih::LMTOS_ALL_ACTIVE }, - - { Dbdih::LMTOS_INITIAL, - MasterLCPConf::LCP_TAB_COMPLETED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - { Dbdih::LMTOS_INITIAL, - MasterLCPConf::LCP_TAB_SAVED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - /** - * Current = LMTOS_ALL_IDLE - */ - { Dbdih::LMTOS_ALL_IDLE, - MasterLCPConf::LCP_STATUS_IDLE, - Dbdih::LMTOS_ALL_IDLE }, - - { Dbdih::LMTOS_ALL_IDLE, - MasterLCPConf::LCP_STATUS_ACTIVE, - Dbdih::LMTOS_COPY_ONGOING }, - - { Dbdih::LMTOS_ALL_IDLE, - MasterLCPConf::LCP_TAB_COMPLETED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - { Dbdih::LMTOS_ALL_IDLE, - MasterLCPConf::LCP_TAB_SAVED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - /** - * Current = LMTOS_COPY_ONGOING - */ - { Dbdih::LMTOS_COPY_ONGOING, - MasterLCPConf::LCP_STATUS_IDLE, - Dbdih::LMTOS_COPY_ONGOING }, - - { Dbdih::LMTOS_COPY_ONGOING, - MasterLCPConf::LCP_STATUS_ACTIVE, - Dbdih::LMTOS_COPY_ONGOING }, - - /** - * Current = LMTOS_ALL_ACTIVE - */ - { Dbdih::LMTOS_ALL_ACTIVE, - MasterLCPConf::LCP_STATUS_IDLE, - Dbdih::LMTOS_COPY_ONGOING }, - - { Dbdih::LMTOS_ALL_ACTIVE, - MasterLCPConf::LCP_STATUS_ACTIVE, - Dbdih::LMTOS_ALL_ACTIVE }, - - { Dbdih::LMTOS_ALL_ACTIVE, - MasterLCPConf::LCP_TAB_COMPLETED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - { Dbdih::LMTOS_ALL_ACTIVE, - MasterLCPConf::LCP_TAB_SAVED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - /** - * Current = LMTOS_LCP_CONCLUDING - */ - { Dbdih::LMTOS_LCP_CONCLUDING, - MasterLCPConf::LCP_STATUS_IDLE, - Dbdih::LMTOS_LCP_CONCLUDING }, - - { Dbdih::LMTOS_LCP_CONCLUDING, - MasterLCPConf::LCP_STATUS_ACTIVE, - Dbdih::LMTOS_LCP_CONCLUDING }, - - { Dbdih::LMTOS_LCP_CONCLUDING, - MasterLCPConf::LCP_TAB_COMPLETED, - Dbdih::LMTOS_LCP_CONCLUDING }, - - { Dbdih::LMTOS_LCP_CONCLUDING, - MasterLCPConf::LCP_TAB_SAVED, - Dbdih::LMTOS_LCP_CONCLUDING } -}; - -const Uint32 g_masterLCPTakeoverStateTransitionsRows = -sizeof(g_masterLCPTakeoverStateTransitions) / sizeof(struct MASTERLCP_StateTransitions); - -void Dbdih::execMASTER_LCPCONF(Signal* signal) -{ - const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0]; - jamEntry(); - - if (ERROR_INSERTED(7194)) - { - ndbout_c("delaying MASTER_LCPCONF due to error 7194"); - sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal, - 300, signal->getLength()); - return; - } - - Uint32 senderNodeId = conf->senderNodeId; - MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState; - const Uint32 failedNodeId = conf->failedNodeId; - NodeRecordPtr nodePtr; - nodePtr.i = senderNodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->lcpStateAtTakeOver = lcpState; - - CRASH_INSERTION(7180); - -#ifdef VM_TRACE - g_eventLogger.info("MASTER_LCPCONF"); - printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0); -#endif - - bool found = false; - for(Uint32 i = 0; iCurrentState == c_lcpMasterTakeOverState.state && - valid->ParticipantState == lcpState){ - jam(); - found = true; - c_lcpMasterTakeOverState.set(valid->NewState, __LINE__); - break; - } - } - ndbrequire(found); - - bool ok = false; - switch(lcpState){ - case MasterLCPConf::LCP_STATUS_IDLE: - ok = true; - break; - case MasterLCPConf::LCP_STATUS_ACTIVE: - case MasterLCPConf::LCP_TAB_COMPLETED: - case MasterLCPConf::LCP_TAB_SAVED: - ok = true; - c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.setWaitingFor(nodePtr.i); - break; - } - ndbrequire(ok); - - receiveLoopMacro(MASTER_LCPREQ, senderNodeId); - /*-------------------------------------------------------------------------*/ - // We have now received all responses and are ready to take over the LCP - // protocol as master. - /*-------------------------------------------------------------------------*/ - MASTER_LCPhandling(signal, failedNodeId); -}//Dbdih::execMASTER_LCPCONF() - -void Dbdih::execMASTER_LCPREF(Signal* signal) -{ - const MasterLCPRef * const ref = (MasterLCPRef *)&signal->theData[0]; - jamEntry(); - receiveLoopMacro(MASTER_LCPREQ, ref->senderNodeId); - /*-------------------------------------------------------------------------*/ - // We have now received all responses and are ready to take over the LCP - // protocol as master. - /*-------------------------------------------------------------------------*/ - MASTER_LCPhandling(signal, ref->failedNodeId); -}//Dbdih::execMASTER_LCPREF() - -void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) -{ - /*------------------------------------------------------------------------- - * - * WE ARE NOW READY TO CONCLUDE THE TAKE OVER AS MASTER. - * WE HAVE ENOUGH INFO TO START UP ACTIVITIES IN THE PROPER PLACE. - * ALSO SET THE PROPER STATE VARIABLES. - *------------------------------------------------------------------------*/ - c_lcpState.currentFragment.tableId = c_lcpMasterTakeOverState.minTableId; - c_lcpState.currentFragment.fragmentId = c_lcpMasterTakeOverState.minFragId; - c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH; - - NodeRecordPtr failedNodePtr; - failedNodePtr.i = failedNodeId; - ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord); - - switch (c_lcpMasterTakeOverState.state) { - case LMTOS_ALL_IDLE: - jam(); - /* --------------------------------------------------------------------- */ - // All nodes were idle in the LCP protocol. Start checking for start of LCP - // protocol. - /* --------------------------------------------------------------------- */ -#ifdef VM_TRACE - g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_IDLE -> checkLcpStart"); -#endif - checkLcpStart(signal, __LINE__); - break; - case LMTOS_COPY_ONGOING: - jam(); - /* --------------------------------------------------------------------- */ - // We were in the starting process of the LCP protocol. We will restart the - // protocol by calculating the keep gci and storing the new lcp id. - /* --------------------------------------------------------------------- */ -#ifdef VM_TRACE - g_eventLogger.info("MASTER_LCPhandling:: LMTOS_COPY_ONGOING -> storeNewLcpId"); -#endif - if (c_lcpState.lcpStatus == LCP_STATUS_ACTIVE) { - jam(); - /*---------------------------------------------------------------------*/ - /* WE NEED TO DECREASE THE LATEST LCP ID SINCE WE HAVE ALREADY */ - /* STARTED THIS */ - /* LOCAL CHECKPOINT. */ - /*---------------------------------------------------------------------*/ - Uint32 lcpId = SYSFILE->latestLCP_ID; -#ifdef VM_TRACE - g_eventLogger.info("Decreasing latestLCP_ID from %d to %d", lcpId, lcpId - 1); -#endif - SYSFILE->latestLCP_ID--; - }//if - storeNewLcpIdLab(signal); - break; - case LMTOS_ALL_ACTIVE: - { - jam(); - /* ------------------------------------------------------------------- - * Everybody was in the active phase. We will restart sending - * LCP_FRAGORD to the nodes from the new master. - * We also need to set dihLcpStatus to ZACTIVE - * in the master node since the master will wait for all nodes to - * complete before finalising the LCP process. - * ------------------------------------------------------------------ */ -#ifdef VM_TRACE - g_eventLogger.info("MASTER_LCPhandling:: LMTOS_ALL_ACTIVE -> " - "startLcpRoundLoopLab(table=%u, fragment=%u)", - c_lcpMasterTakeOverState.minTableId, - c_lcpMasterTakeOverState.minFragId); -#endif - - c_lcpState.keepGci = SYSFILE->keepGCI; - startLcpRoundLoopLab(signal, 0, 0); - break; - } - case LMTOS_LCP_CONCLUDING: - { - jam(); - /* ------------------------------------------------------------------- */ - // The LCP process is in the finalisation phase. We simply wait for it to - // complete with signals arriving in. We need to check also if we should - // change state due to table write completion during state - // collection phase. - /* ------------------------------------------------------------------- */ - ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE); - startLcpRoundLoopLab(signal, 0, 0); - break; - } - default: - ndbrequire(false); - break; - }//switch - signal->theData[0] = NDB_LE_LCP_TakeoverCompleted; - signal->theData[1] = c_lcpMasterTakeOverState.state; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - signal->theData[0] = 7012; - execDUMP_STATE_ORD(signal); - - c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__); - - checkLocalNodefailComplete(signal, failedNodePtr.i, NF_LCP_TAKE_OVER); -} - -/* ------------------------------------------------------------------------- */ -/* A BLOCK OR A NODE HAS COMPLETED THE HANDLING OF THE NODE FAILURE. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::execNF_COMPLETEREP(Signal* signal) -{ - NodeRecordPtr failedNodePtr; - NFCompleteRep * const nfCompleteRep = (NFCompleteRep *)&signal->theData[0]; - jamEntry(); - const Uint32 blockNo = nfCompleteRep->blockNo; - Uint32 nodeId = nfCompleteRep->nodeId; - failedNodePtr.i = nfCompleteRep->failedNodeId; - - ptrCheckGuard(failedNodePtr, MAX_NDB_NODES, nodeRecord); - switch (blockNo) { - case DBTC: - jam(); - ndbrequire(failedNodePtr.p->dbtcFailCompleted == ZFALSE); - /* -------------------------------------------------------------------- */ - // Report the event that DBTC completed node failure handling. - /* -------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NodeFailCompleted; - signal->theData[1] = DBTC; - signal->theData[2] = failedNodePtr.i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - - failedNodePtr.p->dbtcFailCompleted = ZTRUE; - break; - case DBDICT: - jam(); - ndbrequire(failedNodePtr.p->dbdictFailCompleted == ZFALSE); - /* --------------------------------------------------------------------- */ - // Report the event that DBDICT completed node failure handling. - /* --------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NodeFailCompleted; - signal->theData[1] = DBDICT; - signal->theData[2] = failedNodePtr.i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - - failedNodePtr.p->dbdictFailCompleted = ZTRUE; - break; - case DBDIH: - jam(); - ndbrequire(failedNodePtr.p->dbdihFailCompleted == ZFALSE); - /* --------------------------------------------------------------------- */ - // Report the event that DBDIH completed node failure handling. - /* --------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NodeFailCompleted; - signal->theData[1] = DBDIH; - signal->theData[2] = failedNodePtr.i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - - failedNodePtr.p->dbdihFailCompleted = ZTRUE; - break; - case DBLQH: - jam(); - ndbrequire(failedNodePtr.p->dblqhFailCompleted == ZFALSE); - /* --------------------------------------------------------------------- */ - // Report the event that DBDIH completed node failure handling. - /* --------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NodeFailCompleted; - signal->theData[1] = DBLQH; - signal->theData[2] = failedNodePtr.i; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - - failedNodePtr.p->dblqhFailCompleted = ZTRUE; - break; - case 0: /* Node has finished */ - jam(); - ndbrequire(nodeId < MAX_NDB_NODES); - - if (failedNodePtr.p->recNODE_FAILREP == ZFALSE) { - jam(); - /* ------------------------------------------------------------------- */ - // We received a report about completion of node failure before we - // received the message about the NODE failure ourselves. - // We will send the signal to ourselves with a small delay - // (10 milliseconds). - /* ------------------------------------------------------------------- */ - //nf->from = __LINE__; - sendSignalWithDelay(reference(), GSN_NF_COMPLETEREP, signal, 10, - signal->length()); - return; - }//if - - if (!failedNodePtr.p->m_NF_COMPLETE_REP.isWaitingFor(nodeId)){ - jam(); - return; - } - - failedNodePtr.p->m_NF_COMPLETE_REP.clearWaitingFor(nodeId);; - - /* -------------------------------------------------------------------- */ - // Report the event that nodeId has completed node failure handling. - /* -------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NodeFailCompleted; - signal->theData[1] = 0; - signal->theData[2] = failedNodePtr.i; - signal->theData[3] = nodeId; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - - nodeFailCompletedCheckLab(signal, failedNodePtr); - return; - break; - default: - ndbrequire(false); - return; - break; - }//switch - if (failedNodePtr.p->dbtcFailCompleted == ZFALSE) { - jam(); - return; - }//if - if (failedNodePtr.p->dbdictFailCompleted == ZFALSE) { - jam(); - return; - }//if - if (failedNodePtr.p->dbdihFailCompleted == ZFALSE) { - jam(); - return; - }//if - if (failedNodePtr.p->dblqhFailCompleted == ZFALSE) { - jam(); - return; - }//if - /* ----------------------------------------------------------------------- */ - /* ALL BLOCKS IN THIS NODE HAVE COMPLETED THEIR PART OF HANDLING THE */ - /* NODE FAILURE. WE CAN NOW REPORT THIS COMPLETION TO ALL OTHER NODES. */ - /* ----------------------------------------------------------------------- */ - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - BlockReference ref = calcDihBlockRef(nodePtr.i); - NFCompleteRep * const nf = (NFCompleteRep *)&signal->theData[0]; - nf->blockNo = 0; - nf->nodeId = cownNodeId; - nf->failedNodeId = failedNodePtr.i; - nf->from = __LINE__; - sendSignal(ref, GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); - }//if - }//for - return; -}//Dbdih::execNF_COMPLETEREP() - -void Dbdih::nodeFailCompletedCheckLab(Signal* signal, - NodeRecordPtr failedNodePtr) -{ - jam(); - if (!failedNodePtr.p->m_NF_COMPLETE_REP.done()){ - jam(); - return; - }//if - /* ---------------------------------------------------------------------- */ - /* ALL BLOCKS IN ALL NODES HAVE NOW REPORTED COMPLETION OF THE NODE */ - /* FAILURE HANDLING. WE ARE NOW READY TO ACCEPT THAT THIS NODE STARTS */ - /* AGAIN. */ - /* ---------------------------------------------------------------------- */ - jam(); - failedNodePtr.p->nodeStatus = NodeRecord::DEAD; - failedNodePtr.p->recNODE_FAILREP = ZFALSE; - - /* ---------------------------------------------------------------------- */ - // Report the event that all nodes completed node failure handling. - /* ---------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_NodeFailCompleted; - signal->theData[1] = 0; - signal->theData[2] = failedNodePtr.i; - signal->theData[3] = 0; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - - /* ---------------------------------------------------------------------- */ - // Report to QMGR that we have concluded recovery handling of this node. - /* ---------------------------------------------------------------------- */ - signal->theData[0] = failedNodePtr.i; - sendSignal(QMGR_REF, GSN_NDB_FAILCONF, signal, 1, JBB); - - if (isMaster()) { - jam(); - /* --------------------------------------------------------------------- */ - /* IF WE ARE MASTER WE MUST CHECK IF COPY FRAGMENT WAS INTERRUPTED */ - /* BY THE FAILED NODES. */ - /* --------------------------------------------------------------------- */ - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = 0; - ptrAss(takeOverPtr, takeOverRecord); - if ((takeOverPtr.p->toMasterStatus == TakeOverRecord::COPY_FRAG) && - (failedNodePtr.i == takeOverPtr.p->toCopyNode)) { - jam(); -#ifdef VM_TRACE - ndbrequire("Tell jonas" == 0); -#endif - /*------------------------------------------------------------------*/ - /* WE ARE CURRENTLY IN THE PROCESS OF COPYING A FRAGMENT. WE */ - /* WILL CHECK IF THE COPY NODE HAVE FAILED. */ - /*------------------------------------------------------------------*/ - takeOverPtr.p->toMasterStatus = TakeOverRecord::SELECTING_NEXT; - startNextCopyFragment(signal, takeOverPtr.i); - return; - }//if - checkStartTakeOver(signal); - }//if - return; -}//Dbdih::nodeFailCompletedCheckLab() - -/*****************************************************************************/ -/* ********** SEIZING / RELEASING MODULE *************/ -/*****************************************************************************/ -/* - 3.4 L O C A L N O D E S E I Z E - ************************************ - */ -/* - 3.4.1 L O C A L N O D E S E I Z E R E Q U E S T - ****************************************************** - */ -void Dbdih::execDISEIZEREQ(Signal* signal) -{ - ConnectRecordPtr connectPtr; - jamEntry(); - Uint32 userPtr = signal->theData[0]; - BlockReference userRef = signal->theData[1]; - ndbrequire(cfirstconnect != RNIL); - connectPtr.i = cfirstconnect; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - cfirstconnect = connectPtr.p->nfConnect; - connectPtr.p->nfConnect = RNIL; - connectPtr.p->userpointer = userPtr; - connectPtr.p->userblockref = userRef; - connectPtr.p->connectState = ConnectRecord::INUSE; - signal->theData[0] = connectPtr.p->userpointer; - signal->theData[1] = connectPtr.i; - sendSignal(userRef, GSN_DISEIZECONF, signal, 2, JBB); -}//Dbdih::execDISEIZEREQ() - -/* - 3.5 L O C A L N O D E R E L E A S E - **************************************** - */ -/* - 3.5.1 L O C A L N O D E R E L E A S E R E Q U E S T - *******************************************************= - */ -void Dbdih::execDIRELEASEREQ(Signal* signal) -{ - ConnectRecordPtr connectPtr; - jamEntry(); - connectPtr.i = signal->theData[0]; - Uint32 userRef = signal->theData[2]; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - ndbrequire(connectPtr.p->connectState != ConnectRecord::FREE); - ndbrequire(connectPtr.p->userblockref == userRef); - signal->theData[0] = connectPtr.p->userpointer; - sendSignal(connectPtr.p->userblockref, GSN_DIRELEASECONF, signal, 1, JBB); - release_connect(connectPtr); -}//Dbdih::execDIRELEASEREQ() - -/* - 3.7 A D D T A B L E - **********************= - */ -/*****************************************************************************/ -/* ********** TABLE ADDING MODULE *************/ -/*****************************************************************************/ -/* - 3.7.1 A D D T A B L E M A I N L Y - *************************************** - */ - -static inline void inc_node_or_group(Uint32 &node, Uint32 max_node) -{ - Uint32 next = node + 1; - node = (next == max_node ? 0 : next); -} - -/* - Spread fragments in backwards compatible mode -*/ -static void set_default_node_groups(Signal *signal, Uint32 noFrags) -{ - Uint16 *node_group_array = (Uint16*)&signal->theData[25]; - Uint32 i; - node_group_array[0] = 0; - for (i = 1; i < noFrags; i++) - node_group_array[i] = UNDEF_NODEGROUP; -} -void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal) -{ - Uint16 node_group_id[MAX_NDB_PARTITIONS]; - jamEntry(); - CreateFragmentationReq * const req = - (CreateFragmentationReq*)signal->getDataPtr(); - - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - Uint32 noOfFragments = req->noOfFragments; - const Uint32 fragType = req->fragmentationType; - const Uint32 primaryTableId = req->primaryTableId; - - Uint32 err = 0; - - do { - NodeGroupRecordPtr NGPtr; - TabRecordPtr primTabPtr; - Uint32 count = 2; - Uint16 noOfReplicas = cnoReplicas; - Uint16 *fragments = (Uint16*)(signal->theData+25); - if (primaryTableId == RNIL) { - jam(); - switch ((DictTabInfo::FragmentType)fragType) - { - /* - Backward compatability and for all places in code not changed. - */ - case DictTabInfo::AllNodesSmallTable: - jam(); - noOfFragments = csystemnodes; - set_default_node_groups(signal, noOfFragments); - break; - case DictTabInfo::AllNodesMediumTable: - jam(); - noOfFragments = 2 * csystemnodes; - set_default_node_groups(signal, noOfFragments); - break; - case DictTabInfo::AllNodesLargeTable: - jam(); - noOfFragments = 4 * csystemnodes; - set_default_node_groups(signal, noOfFragments); - break; - case DictTabInfo::SingleFragment: - jam(); - noOfFragments = 1; - set_default_node_groups(signal, noOfFragments); - break; - case DictTabInfo::DistrKeyHash: - jam(); - case DictTabInfo::DistrKeyLin: - jam(); - if (noOfFragments == 0) - { - jam(); - noOfFragments = csystemnodes; - set_default_node_groups(signal, noOfFragments); - } - break; - default: - jam(); - if (noOfFragments == 0) - { - jam(); - err = CreateFragmentationRef::InvalidFragmentationType; - } - break; - } - if (err) - break; - /* - When we come here the the exact partition is specified - and there is an array of node groups sent along as well. - */ - memcpy(&node_group_id[0], &signal->theData[25], 2 * noOfFragments); - Uint16 next_replica_node[MAX_NDB_NODES]; - memset(next_replica_node,0,sizeof(next_replica_node)); - Uint32 default_node_group= c_nextNodeGroup; - for(Uint32 fragNo = 0; fragNo < noOfFragments; fragNo++) - { - jam(); - NGPtr.i = node_group_id[fragNo]; - if (NGPtr.i == UNDEF_NODEGROUP) - { - jam(); - NGPtr.i = default_node_group; - } - if (NGPtr.i > cnoOfNodeGroups) - { - jam(); - err = CreateFragmentationRef::InvalidNodeGroup; - break; - } - ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord); - const Uint32 max = NGPtr.p->nodeCount; - - fragments[count++] = c_nextLogPart++; // Store logpart first - Uint32 tmp= next_replica_node[NGPtr.i]; - for(Uint32 replicaNo = 0; replicaNo < noOfReplicas; replicaNo++) - { - jam(); - const Uint16 nodeId = NGPtr.p->nodesInGroup[tmp]; - fragments[count++]= nodeId; - inc_node_or_group(tmp, max); - } - inc_node_or_group(tmp, max); - next_replica_node[NGPtr.i]= tmp; - - /** - * Next node group for next fragment - */ - inc_node_or_group(default_node_group, cnoOfNodeGroups); - } - if (err) - { - jam(); - break; - } - else - { - jam(); - c_nextNodeGroup = default_node_group; - } - } else { - if (primaryTableId >= ctabFileSize) { - jam(); - err = CreateFragmentationRef::InvalidPrimaryTable; - break; - } - primTabPtr.i = primaryTableId; - ptrAss(primTabPtr, tabRecord); - if (primTabPtr.p->tabStatus != TabRecord::TS_ACTIVE) { - jam(); - err = CreateFragmentationRef::InvalidPrimaryTable; - break; - } - noOfFragments= primTabPtr.p->totalfragments; - for (Uint32 fragNo = 0; - fragNo < noOfFragments; fragNo++) { - jam(); - FragmentstorePtr fragPtr; - ReplicaRecordPtr replicaPtr; - getFragstore(primTabPtr.p, fragNo, fragPtr); - fragments[count++] = fragPtr.p->m_log_part_id; - fragments[count++] = fragPtr.p->preferredPrimary; - for (replicaPtr.i = fragPtr.p->storedReplicas; - replicaPtr.i != RNIL; - replicaPtr.i = replicaPtr.p->nextReplica) { - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) { - jam(); - fragments[count++]= replicaPtr.p->procNode; - } - } - for (replicaPtr.i = fragPtr.p->oldStoredReplicas; - replicaPtr.i != RNIL; - replicaPtr.i = replicaPtr.p->nextReplica) { - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) { - jam(); - fragments[count++]= replicaPtr.p->procNode; - } - } - } - } - if(count != (2U + (1 + noOfReplicas) * noOfFragments)){ - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Illegal configuration change: NoOfReplicas." - " Can't be applied online "); - progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, buf); - } - - CreateFragmentationConf * const conf = - (CreateFragmentationConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->noOfReplicas = (Uint32)noOfReplicas; - conf->noOfFragments = (Uint32)noOfFragments; - - fragments[0]= noOfReplicas; - fragments[1]= noOfFragments; - - if(senderRef != 0) - { - jam(); - LinearSectionPtr ptr[3]; - ptr[0].p = (Uint32*)&fragments[0]; - ptr[0].sz = (count + 1) / 2; - sendSignal(senderRef, - GSN_CREATE_FRAGMENTATION_CONF, - signal, - CreateFragmentationConf::SignalLength, - JBB, - ptr, - 1); - } - // Always ACK/NACK (here ACK) - signal->theData[0] = 0; - return; - } while(false); - // Always ACK/NACK (here NACK) - signal->theData[0] = err; -} - -void Dbdih::execDIADDTABREQ(Signal* signal) -{ - Uint32 fragType; - jamEntry(); - - DiAddTabReq * const req = (DiAddTabReq*)signal->getDataPtr(); - - // Seize connect record - ndbrequire(cfirstconnect != RNIL); - ConnectRecordPtr connectPtr; - connectPtr.i = cfirstconnect; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - cfirstconnect = connectPtr.p->nfConnect; - - const Uint32 userPtr = req->connectPtr; - const BlockReference userRef = signal->getSendersBlockRef(); - connectPtr.p->nfConnect = RNIL; - connectPtr.p->userpointer = userPtr; - connectPtr.p->userblockref = userRef; - connectPtr.p->connectState = ConnectRecord::INUSE; - connectPtr.p->table = req->tableId; - - TabRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tabPtr.p->connectrec = connectPtr.i; - tabPtr.p->tableType = req->tableType; - fragType= req->fragType; - tabPtr.p->schemaVersion = req->schemaVersion; - tabPtr.p->primaryTableId = req->primaryTableId; - - if(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE){ - jam(); - tabPtr.p->tabStatus = TabRecord::TS_CREATING; - sendAddFragreq(signal, connectPtr, tabPtr, 0); - return; - } - - if(getNodeState().getSystemRestartInProgress() && - tabPtr.p->tabStatus == TabRecord::TS_IDLE){ - jam(); - - ndbrequire(cmasterNodeId == getOwnNodeId()); - tabPtr.p->tabStatus = TabRecord::TS_CREATING; - - initTableFile(tabPtr); - FileRecordPtr filePtr; - filePtr.i = tabPtr.p->tabFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_TABLE; - return; - } - - /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ - /* AT THE TIME OF INITIATING THE FILE OF TABLE */ - /* DESCRIPTION IS CREATED FOR APPROPRIATE SIZE. EACH */ - /* EACH RECORD IN THIS FILE HAS THE INFORMATION ABOUT */ - /* ONE TABLE. THE POINTER TO THIS RECORD IS THE TABLE */ - /* REFERENCE. IN THE BEGINNING ALL RECORDS ARE CREATED */ - /* BUT THEY DO NOT HAVE ANY INFORMATION ABOUT ANY TABLE*/ - /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ - tabPtr.p->tabStatus = TabRecord::TS_CREATING; - if(req->loggedTable) - tabPtr.p->tabStorage= TabRecord::ST_NORMAL; - else if(req->temporaryTable) - tabPtr.p->tabStorage= TabRecord::ST_TEMPORARY; - else - tabPtr.p->tabStorage= TabRecord::ST_NOLOGGING; - tabPtr.p->kvalue = req->kValue; - - switch ((DictTabInfo::FragmentType)fragType) - { - case DictTabInfo::AllNodesSmallTable: - case DictTabInfo::AllNodesMediumTable: - case DictTabInfo::AllNodesLargeTable: - case DictTabInfo::SingleFragment: - jam(); - case DictTabInfo::DistrKeyLin: - jam(); - tabPtr.p->method= TabRecord::LINEAR_HASH; - break; - case DictTabInfo::DistrKeyHash: - case DictTabInfo::DistrKeyUniqueHashIndex: - case DictTabInfo::DistrKeyOrderedIndex: - jam(); - tabPtr.p->method= TabRecord::NORMAL_HASH; - break; - case DictTabInfo::UserDefined: - jam(); - tabPtr.p->method= TabRecord::USER_DEFINED; - break; - default: - ndbrequire(false); - } - - union { - Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES]; - Uint32 align; - }; - SegmentedSectionPtr fragDataPtr; - LINT_INIT(fragDataPtr.i); - LINT_INIT(fragDataPtr.sz); - signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); - copy((Uint32*)fragments, fragDataPtr); - releaseSections(signal); - - const Uint32 noReplicas = fragments[0]; - const Uint32 noFragments = fragments[1]; - - tabPtr.p->noOfBackups = noReplicas - 1; - tabPtr.p->totalfragments = noFragments; - ndbrequire(noReplicas == cnoReplicas); // Only allowed - - if (ERROR_INSERTED(7173)) { - CLEAR_ERROR_INSERT_VALUE; - addtabrefuseLab(signal, connectPtr, ZREPLERROR1); - return; - } - if ((noReplicas * noFragments) > cnoFreeReplicaRec) { - jam(); - addtabrefuseLab(signal, connectPtr, ZREPLERROR1); - return; - }//if - if (noFragments > cremainingfrags) { - jam(); - addtabrefuseLab(signal, connectPtr, ZREPLERROR1); - return; - }//if - - Uint32 logTotalFragments = 1; - while (logTotalFragments <= tabPtr.p->totalfragments) { - jam(); - logTotalFragments <<= 1; - } - logTotalFragments >>= 1; - tabPtr.p->mask = logTotalFragments - 1; - tabPtr.p->hashpointer = tabPtr.p->totalfragments - logTotalFragments; - allocFragments(tabPtr.p->totalfragments, tabPtr); - - Uint32 index = 2; - for (Uint32 fragId = 0; fragId < noFragments; fragId++) { - jam(); - FragmentstorePtr fragPtr; - Uint32 activeIndex = 0; - getFragstore(tabPtr.p, fragId, fragPtr); - fragPtr.p->m_log_part_id = fragments[index++]; - fragPtr.p->preferredPrimary = fragments[index]; - - for (Uint32 i = 0; iactiveNodes[activeIndex] = nodeId; - activeIndex++; - } else { - jam(); - removeStoredReplica(fragPtr, replicaPtr); - linkOldStoredReplica(fragPtr, replicaPtr); - }//if - }//for - fragPtr.p->fragReplicas = activeIndex; - ndbrequire(activeIndex > 0 && fragPtr.p->storedReplicas != RNIL); - } - initTableFile(tabPtr); - tabPtr.p->tabCopyStatus = TabRecord::CS_ADD_TABLE_MASTER; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); -} - -void -Dbdih::addTable_closeConf(Signal * signal, Uint32 tabPtrI){ - TabRecordPtr tabPtr; - tabPtr.i = tabPtrI; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - ConnectRecordPtr connectPtr; - connectPtr.i = tabPtr.p->connectrec; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - - sendAddFragreq(signal, connectPtr, tabPtr, 0); -} - -void -Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr, - TabRecordPtr tabPtr, Uint32 fragId){ - jam(); - const Uint32 fragCount = tabPtr.p->totalfragments; - ReplicaRecordPtr replicaPtr; - LINT_INIT(replicaPtr.p); - replicaPtr.i = RNIL; - FragmentstorePtr fragPtr; - for(; fragIdstoredReplicas; - while(replicaPtr.i != RNIL){ - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if(replicaPtr.p->procNode == getOwnNodeId()){ - break; - } - replicaPtr.i = replicaPtr.p->nextReplica; - } - - if(replicaPtr.i != RNIL){ - jam(); - break; - } - - replicaPtr.i = fragPtr.p->oldStoredReplicas; - while(replicaPtr.i != RNIL){ - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if(replicaPtr.p->procNode == getOwnNodeId()){ - break; - } - replicaPtr.i = replicaPtr.p->nextReplica; - } - - if(replicaPtr.i != RNIL){ - jam(); - break; - } - } - - if(replicaPtr.i != RNIL){ - jam(); - ndbrequire(fragId < fragCount); - ndbrequire(replicaPtr.p->procNode == getOwnNodeId()); - - Uint32 requestInfo = 0; - if(tabPtr.p->tabStorage != TabRecord::ST_NORMAL){ - requestInfo |= LqhFragReq::TemporaryTable; - } - - if(getNodeState().getNodeRestartInProgress()){ - requestInfo |= LqhFragReq::CreateInRunning; - } - - AddFragReq* const req = (AddFragReq*)signal->getDataPtr(); - req->dihPtr = connectPtr.i; - req->senderData = connectPtr.p->userpointer; - req->fragmentId = fragId; - req->requestInfo = requestInfo; - req->tableId = tabPtr.i; - req->nextLCP = 0; - req->nodeId = getOwnNodeId(); - req->totalFragments = fragCount; - req->startGci = SYSFILE->newestRestorableGCI; - req->logPartId = fragPtr.p->m_log_part_id; - sendSignal(DBDICT_REF, GSN_ADD_FRAGREQ, signal, - AddFragReq::SignalLength, JBB); - return; - } - - // Done - DiAddTabConf * const conf = (DiAddTabConf*)signal->getDataPtr(); - conf->senderData = connectPtr.p->userpointer; - sendSignal(connectPtr.p->userblockref, GSN_DIADDTABCONF, signal, - DiAddTabConf::SignalLength, JBB); - - // Release - release_connect(connectPtr); -} -void -Dbdih::release_connect(ConnectRecordPtr ptr) -{ - ptr.p->userblockref = ZNIL; - ptr.p->userpointer = RNIL; - ptr.p->connectState = ConnectRecord::FREE; - ptr.p->nfConnect = cfirstconnect; - cfirstconnect = ptr.i; -} - -void -Dbdih::execADD_FRAGCONF(Signal* signal){ - jamEntry(); - AddFragConf * const conf = (AddFragConf*)signal->getDataPtr(); - - ConnectRecordPtr connectPtr; - connectPtr.i = conf->dihPtr; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - - TabRecordPtr tabPtr; - tabPtr.i = connectPtr.p->table; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - sendAddFragreq(signal, connectPtr, tabPtr, conf->fragId + 1); -} - -void -Dbdih::execADD_FRAGREF(Signal* signal){ - jamEntry(); - AddFragRef * const ref = (AddFragRef*)signal->getDataPtr(); - - ConnectRecordPtr connectPtr; - connectPtr.i = ref->dihPtr; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - - { - DiAddTabRef * const ref = (DiAddTabRef*)signal->getDataPtr(); - ref->senderData = connectPtr.p->userpointer; - ref->errorCode = ~0; - sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal, - DiAddTabRef::SignalLength, JBB); - } - - // Release - release_connect(connectPtr); -} - -/* - 3.7.1.3 R E F U S E - ********************* - */ -void Dbdih::addtabrefuseLab(Signal* signal, ConnectRecordPtr connectPtr, Uint32 errorCode) -{ - signal->theData[0] = connectPtr.p->userpointer; - signal->theData[1] = errorCode; - sendSignal(connectPtr.p->userblockref, GSN_DIADDTABREF, signal, 2, JBB); - release_connect(connectPtr); - return; -}//Dbdih::addtabrefuseLab() - -/* - 3.7.2 A D D T A B L E D U P L I C A T I O N - ************************************************* - */ -/* - 3.7.2.1 A D D T A B L E D U P L I C A T I O N R E Q U E S T - *******************************************************************= - */ - -/* - D E L E T E T A B L E - **********************= - */ -/*****************************************************************************/ -/*********** DELETE TABLE MODULE *************/ -/*****************************************************************************/ -void -Dbdih::execDROP_TAB_REQ(Signal* signal){ - jamEntry(); - DropTabReq* req = (DropTabReq*)signal->getDataPtr(); - - TabRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - tabPtr.p->m_dropTab.tabUserRef = req->senderRef; - tabPtr.p->m_dropTab.tabUserPtr = req->senderData; - - DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType; - - switch(rt){ - case DropTabReq::OnlineDropTab: - jam(); - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING); - releaseTable(tabPtr); - break; - case DropTabReq::CreateTabDrop: - jam(); - releaseTable(tabPtr); - break; - case DropTabReq::RestartDropTab: - break; - } - - startDeleteFile(signal, tabPtr); -} - -void Dbdih::startDeleteFile(Signal* signal, TabRecordPtr tabPtr) -{ - if (tabPtr.p->tabFile[0] == RNIL) { - jam(); - initTableFile(tabPtr); - }//if - openTableFileForDelete(signal, tabPtr.p->tabFile[0]); -}//Dbdih::startDeleteFile() - -void Dbdih::openTableFileForDelete(Signal* signal, Uint32 fileIndex) -{ - FileRecordPtr filePtr; - filePtr.i = fileIndex; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::TABLE_OPEN_FOR_DELETE; -}//Dbdih::openTableFileForDelete() - -void Dbdih::tableOpenLab(Signal* signal, FileRecordPtr filePtr) -{ - closeFileDelete(signal, filePtr); - filePtr.p->reqStatus = FileRecord::TABLE_CLOSE_DELETE; - return; -}//Dbdih::tableOpenLab() - -void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (filePtr.i == tabPtr.p->tabFile[0]) { - jam(); - openTableFileForDelete(signal, tabPtr.p->tabFile[1]); - return; - }//if - ndbrequire(filePtr.i == tabPtr.p->tabFile[1]); - - releaseFile(tabPtr.p->tabFile[0]); - releaseFile(tabPtr.p->tabFile[1]); - tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL; - - tabPtr.p->tabStatus = TabRecord::TS_IDLE; - - DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend(); - dropConf->senderRef = reference(); - dropConf->senderData = tabPtr.p->m_dropTab.tabUserPtr; - dropConf->tableId = tabPtr.i; - sendSignal(tabPtr.p->m_dropTab.tabUserRef, GSN_DROP_TAB_CONF, - signal, DropTabConf::SignalLength, JBB); - - tabPtr.p->m_dropTab.tabUserPtr = RNIL; - tabPtr.p->m_dropTab.tabUserRef = 0; -}//Dbdih::tableDeleteLab() - - -void Dbdih::releaseTable(TabRecordPtr tabPtr) -{ - FragmentstorePtr fragPtr; - if (tabPtr.p->noOfFragChunks > 0) { - for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) { - jam(); - getFragstore(tabPtr.p, fragId, fragPtr); - releaseReplicas(fragPtr.p->storedReplicas); - releaseReplicas(fragPtr.p->oldStoredReplicas); - }//for - releaseFragments(tabPtr); - } - if (tabPtr.p->tabFile[0] != RNIL) { - jam(); - releaseFile(tabPtr.p->tabFile[0]); - releaseFile(tabPtr.p->tabFile[1]); - tabPtr.p->tabFile[0] = tabPtr.p->tabFile[1] = RNIL; - }//if -}//Dbdih::releaseTable() - -void Dbdih::releaseReplicas(Uint32 replicaPtrI) -{ - ReplicaRecordPtr replicaPtr; - replicaPtr.i = replicaPtrI; - jam(); - while (replicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - Uint32 tmp = replicaPtr.p->nextReplica; - replicaPtr.p->nextReplica = cfirstfreeReplica; - cfirstfreeReplica = replicaPtr.i; - replicaPtr.i = tmp; - cnoFreeReplicaRec++; - }//while -}//Dbdih::releaseReplicas() - -void Dbdih::seizeReplicaRec(ReplicaRecordPtr& replicaPtr) -{ - replicaPtr.i = cfirstfreeReplica; - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - cfirstfreeReplica = replicaPtr.p->nextReplica; - cnoFreeReplicaRec--; - replicaPtr.p->nextReplica = RNIL; -}//Dbdih::seizeReplicaRec() - -void Dbdih::releaseFile(Uint32 fileIndex) -{ - FileRecordPtr filePtr; - filePtr.i = fileIndex; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - filePtr.p->nextFile = cfirstfreeFile; - cfirstfreeFile = filePtr.i; -}//Dbdih::releaseFile() - - -void Dbdih::execALTER_TAB_REQ(Signal * signal) -{ - AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - const Uint32 changeMask = req->changeMask; - const Uint32 tableId = req->tableId; - const Uint32 tableVersion = req->tableVersion; - const Uint32 gci = req->gci; - AlterTabReq::RequestType requestType = - (AlterTabReq::RequestType) req->requestType; - - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tabPtr.p->schemaVersion = tableVersion; - - // Request handled successfully - AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->changeMask = changeMask; - conf->tableId = tableId; - conf->tableVersion = tableVersion; - conf->gci = gci; - conf->requestType = requestType; - sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal, - AlterTabConf::SignalLength, JBB); -} - -/* - G E T N O D E S - **********************= - */ -/*****************************************************************************/ -/* ********** TRANSACTION HANDLING MODULE *************/ -/*****************************************************************************/ -/* - 3.8.1 G E T N O D E S R E Q U E S T - ****************************************** - Asks what nodes should be part of a transaction. -*/ -void Dbdih::execDIGETNODESREQ(Signal* signal) -{ - const DiGetNodesReq * const req = (DiGetNodesReq *)&signal->theData[0]; - FragmentstorePtr fragPtr; - TabRecordPtr tabPtr; - tabPtr.i = req->tableId; - Uint32 hashValue = req->hashValue; - Uint32 ttabFileSize = ctabFileSize; - Uint32 fragId; - DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0]; - TabRecord* regTabDesc = tabRecord; - jamEntry(); - ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc); - if (tabPtr.p->method == TabRecord::LINEAR_HASH) - { - jam(); - fragId = hashValue & tabPtr.p->mask; - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE); - if (fragId < tabPtr.p->hashpointer) { - jam(); - fragId = hashValue & ((tabPtr.p->mask << 1) + 1); - }//if - } - else if (tabPtr.p->method == TabRecord::NORMAL_HASH) - { - jam(); - fragId= hashValue % tabPtr.p->totalfragments; - } - else - { - jam(); - ndbassert(tabPtr.p->method == TabRecord::USER_DEFINED); - fragId= hashValue; - if (fragId >= tabPtr.p->totalfragments) - { - jam(); - conf->zero= 1; //Indicate error; - signal->theData[1]= ZUNDEFINED_FRAGMENT_ERROR; - return; - } - } - getFragstore(tabPtr.p, fragId, fragPtr); - Uint32 nodeCount = extractNodeInfo(fragPtr.p, conf->nodes); - Uint32 sig2 = (nodeCount - 1) + - (fragPtr.p->distributionKey << 16); - conf->zero = 0; - conf->reqinfo = sig2; - conf->fragId = fragId; -}//Dbdih::execDIGETNODESREQ() - -Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[]) -{ - Uint32 nodeCount = 0; - for (Uint32 i = 0; i < fragPtr->fragReplicas; i++) { - jam(); - NodeRecordPtr nodePtr; - ndbrequire(i < MAX_REPLICAS); - nodePtr.i = fragPtr->activeNodes[i]; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->useInTransactions) { - jam(); - nodes[nodeCount] = nodePtr.i; - nodeCount++; - }//if - }//for - ndbrequire(nodeCount > 0); - return nodeCount; -}//Dbdih::extractNodeInfo() - -void -Dbdih::getFragstore(TabRecord * tab, //In parameter - Uint32 fragNo, //In parameter - FragmentstorePtr & fragptr) //Out parameter -{ - FragmentstorePtr fragPtr; - Uint32 chunkNo = fragNo >> LOG_NO_OF_FRAGS_PER_CHUNK; - Uint32 chunkIndex = fragNo & (NO_OF_FRAGS_PER_CHUNK - 1); - Uint32 TfragstoreFileSize = cfragstoreFileSize; - Fragmentstore* TfragStore = fragmentstore; - if (chunkNo < MAX_NDB_NODES) { - fragPtr.i = tab->startFid[chunkNo] + chunkIndex; - ptrCheckGuard(fragPtr, TfragstoreFileSize, TfragStore); - fragptr = fragPtr; - return; - }//if - ndbrequire(false); -}//Dbdih::getFragstore() - -void Dbdih::allocFragments(Uint32 noOfFragments, TabRecordPtr tabPtr) -{ - FragmentstorePtr fragPtr; - Uint32 noOfChunks = (noOfFragments + (NO_OF_FRAGS_PER_CHUNK - 1)) >> LOG_NO_OF_FRAGS_PER_CHUNK; - ndbrequire(cremainingfrags >= noOfFragments); - for (Uint32 i = 0; i < noOfChunks; i++) { - jam(); - Uint32 baseFrag = cfirstfragstore; - tabPtr.p->startFid[i] = baseFrag; - fragPtr.i = baseFrag; - ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); - cfirstfragstore = fragPtr.p->nextFragmentChunk; - cremainingfrags -= NO_OF_FRAGS_PER_CHUNK; - for (Uint32 j = 0; j < NO_OF_FRAGS_PER_CHUNK; j++) { - jam(); - fragPtr.i = baseFrag + j; - ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); - initFragstore(fragPtr); - }//if - }//for - tabPtr.p->noOfFragChunks = noOfChunks; -}//Dbdih::allocFragments() - -void Dbdih::releaseFragments(TabRecordPtr tabPtr) -{ - FragmentstorePtr fragPtr; - for (Uint32 i = 0; i < tabPtr.p->noOfFragChunks; i++) { - jam(); - Uint32 baseFrag = tabPtr.p->startFid[i]; - fragPtr.i = baseFrag; - ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); - fragPtr.p->nextFragmentChunk = cfirstfragstore; - cfirstfragstore = baseFrag; - tabPtr.p->startFid[i] = RNIL; - cremainingfrags += NO_OF_FRAGS_PER_CHUNK; - }//for - tabPtr.p->noOfFragChunks = 0; -}//Dbdih::releaseFragments() - -void Dbdih::initialiseFragstore() -{ - Uint32 i; - FragmentstorePtr fragPtr; - for (i = 0; i < cfragstoreFileSize; i++) { - fragPtr.i = i; - ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); - initFragstore(fragPtr); - }//for - Uint32 noOfChunks = cfragstoreFileSize >> LOG_NO_OF_FRAGS_PER_CHUNK; - fragPtr.i = 0; - cfirstfragstore = RNIL; - cremainingfrags = 0; - for (i = 0; i < noOfChunks; i++) { - refresh_watch_dog(); - ptrCheckGuard(fragPtr, cfragstoreFileSize, fragmentstore); - fragPtr.p->nextFragmentChunk = cfirstfragstore; - cfirstfragstore = fragPtr.i; - fragPtr.i += NO_OF_FRAGS_PER_CHUNK; - cremainingfrags += NO_OF_FRAGS_PER_CHUNK; - }//for -}//Dbdih::initialiseFragstore() - -/* - 3.9 V E R I F I C A T I O N - ****************************= - */ -/****************************************************************************/ -/* ********** VERIFICATION SUB-MODULE *************/ -/****************************************************************************/ -/* - 3.9.1 R E C E I V I N G O F V E R I F I C A T I O N R E Q U E S T - ************************************************************************* - */ -void Dbdih::execDIVERIFYREQ(Signal* signal) -{ - - jamEntry(); - if ((getBlockCommit() == false) && - (cfirstVerifyQueue == RNIL)) { - jam(); - /*-----------------------------------------------------------------------*/ - // We are not blocked and the verify queue was empty currently so we can - // simply reply back to TC immediately. The method was called with - // EXECUTE_DIRECT so we reply back by setting signal data and returning. - // theData[0] already contains the correct information so - // we need not touch it. - /*-----------------------------------------------------------------------*/ - signal->theData[1] = currentgcp; - signal->theData[2] = 0; - return; - }//if - /*-------------------------------------------------------------------------*/ - // Since we are blocked we need to put this operation last in the verify - // queue to ensure that operation starts up in the correct order. - /*-------------------------------------------------------------------------*/ - ApiConnectRecordPtr tmpApiConnectptr; - ApiConnectRecordPtr localApiConnectptr; - - cverifyQueueCounter++; - localApiConnectptr.i = signal->theData[0]; - tmpApiConnectptr.i = clastVerifyQueue; - ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord); - localApiConnectptr.p->apiGci = cnewgcp; - localApiConnectptr.p->nextApi = RNIL; - clastVerifyQueue = localApiConnectptr.i; - if (tmpApiConnectptr.i == RNIL) { - jam(); - cfirstVerifyQueue = localApiConnectptr.i; - } else { - jam(); - ptrCheckGuard(tmpApiConnectptr, capiConnectFileSize, apiConnectRecord); - tmpApiConnectptr.p->nextApi = localApiConnectptr.i; - }//if - emptyverificbuffer(signal, false); - signal->theData[2] = 1; // Indicate no immediate return - return; -}//Dbdih::execDIVERIFYREQ() - -void Dbdih::execDI_FCOUNTREQ(Signal* signal) -{ - DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtr(); - ConnectRecordPtr connectPtr; - TabRecordPtr tabPtr; - const BlockReference senderRef = signal->senderBlockRef(); - const Uint32 senderData = req->m_senderData; - jamEntry(); - connectPtr.i = req->m_connectionData; - tabPtr.i = req->m_tableRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) - { - DihFragCountRef* ref = (DihFragCountRef*)signal->getDataPtrSend(); - //connectPtr.i == RNIL -> question without connect record - if(connectPtr.i == RNIL) - ref->m_connectionData = RNIL; - else - { - jam(); - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - ref->m_connectionData = connectPtr.p->userpointer; - } - ref->m_tableRef = tabPtr.i; - ref->m_senderData = senderData; - ref->m_error = DihFragCountRef::ErroneousTableState; - ref->m_tableStatus = tabPtr.p->tabStatus; - sendSignal(senderRef, GSN_DI_FCOUNTREF, signal, - DihFragCountRef::SignalLength, JBB); - return; - } - - if(connectPtr.i != RNIL){ - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - if (connectPtr.p->connectState == ConnectRecord::INUSE) { - jam(); - DihFragCountConf* conf = (DihFragCountConf*)signal->getDataPtrSend(); - conf->m_connectionData = connectPtr.p->userpointer; - conf->m_tableRef = tabPtr.i; - conf->m_senderData = senderData; - conf->m_fragmentCount = tabPtr.p->totalfragments; - conf->m_noOfBackups = tabPtr.p->noOfBackups; - sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTCONF, signal, - DihFragCountConf::SignalLength, JBB); - return; - }//if - DihFragCountRef* ref = (DihFragCountRef*)signal->getDataPtrSend(); - ref->m_connectionData = connectPtr.p->userpointer; - ref->m_tableRef = tabPtr.i; - ref->m_senderData = senderData; - ref->m_error = DihFragCountRef::ErroneousTableState; - ref->m_tableStatus = tabPtr.p->tabStatus; - sendSignal(connectPtr.p->userblockref, GSN_DI_FCOUNTREF, signal, - DihFragCountRef::SignalLength, JBB); - return; - }//if - DihFragCountConf* conf = (DihFragCountConf*)signal->getDataPtrSend(); - //connectPtr.i == RNIL -> question without connect record - conf->m_connectionData = RNIL; - conf->m_tableRef = tabPtr.i; - conf->m_senderData = senderData; - conf->m_fragmentCount = tabPtr.p->totalfragments; - conf->m_noOfBackups = tabPtr.p->noOfBackups; - sendSignal(senderRef, GSN_DI_FCOUNTCONF, signal, - DihFragCountConf::SignalLength, JBB); -}//Dbdih::execDI_FCOUNTREQ() - -void Dbdih::execDIGETPRIMREQ(Signal* signal) -{ - FragmentstorePtr fragPtr; - ConnectRecordPtr connectPtr; - TabRecordPtr tabPtr; - jamEntry(); - Uint32 passThrough = signal->theData[1]; - tabPtr.i = signal->theData[2]; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (DictTabInfo::isOrderedIndex(tabPtr.p->tableType)) { - jam(); - tabPtr.i = tabPtr.p->primaryTableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - } - Uint32 fragId = signal->theData[3]; - - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE); - connectPtr.i = signal->theData[0]; - if(connectPtr.i != RNIL) - { - jam(); - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - signal->theData[0] = connectPtr.p->userpointer; - } - else - { - jam(); - signal->theData[0] = RNIL; - } - - Uint32 nodes[MAX_REPLICAS]; - getFragstore(tabPtr.p, fragId, fragPtr); - Uint32 count = extractNodeInfo(fragPtr.p, nodes); - - signal->theData[1] = passThrough; - signal->theData[2] = nodes[0]; - signal->theData[3] = nodes[1]; - signal->theData[4] = nodes[2]; - signal->theData[5] = nodes[3]; - signal->theData[6] = count; - signal->theData[7] = tabPtr.i; - signal->theData[8] = fragId; - - const BlockReference senderRef = signal->senderBlockRef(); - sendSignal(senderRef, GSN_DIGETPRIMCONF, signal, 9, JBB); -}//Dbdih::execDIGETPRIMREQ() - -/****************************************************************************/ -/* ********** GLOBAL-CHECK-POINT HANDLING MODULE *************/ -/****************************************************************************/ -/* - 3.10 G L O B A L C H E C K P O I N T ( IN M A S T E R R O L E) - ******************************************************************* - */ -void Dbdih::checkGcpStopLab(Signal* signal) -{ - Uint32 tgcpStatus; - - tgcpStatus = cgcpStatus; - if (tgcpStatus == coldGcpStatus) { - jam(); - if (coldGcpId == cnewgcp) { - jam(); - if (cgcpStatus != GCP_READY) { - jam(); - cgcpSameCounter++; - if (cgcpSameCounter == 1200) { - jam(); -#ifdef VM_TRACE - g_eventLogger.error("System crash due to GCP Stop in state = %u", - (Uint32) cgcpStatus); -#endif - crashSystemAtGcpStop(signal, false); - return; - }//if - } else { - jam(); - if (cgcpOrderBlocked == 0) { - jam(); - cgcpSameCounter++; - if (cgcpSameCounter == 1200) { - jam(); -#ifdef VM_TRACE - g_eventLogger.error("System crash due to GCP Stop in state = %u", - (Uint32) cgcpStatus); -#endif - crashSystemAtGcpStop(signal, false); - return; - }//if - } else { - jam(); - cgcpSameCounter = 0; - }//if - }//if - } else { - jam(); - cgcpSameCounter = 0; - }//if - } else { - jam(); - cgcpSameCounter = 0; - }//if - signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP; - signal->theData[1] = coldGcpStatus; - signal->theData[2] = cgcpStatus; - signal->theData[3] = coldGcpId; - signal->theData[4] = cnewgcp; - signal->theData[5] = cgcpSameCounter; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 6); - coldGcpStatus = cgcpStatus; - coldGcpId = cnewgcp; - return; -}//Dbdih::checkGcpStopLab() - -void Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime) -{ - if ((cgcpOrderBlocked == 1) || - (c_nodeStartMaster.blockGcp == true) || - (cfirstVerifyQueue != RNIL)) { - /*************************************************************************/ - // 1: Global Checkpoint has been stopped by management command - // 2: Global Checkpoint is blocked by node recovery activity - // 3: Previous global checkpoint is not yet completed. - // All this means that global checkpoint cannot start now. - /*************************************************************************/ - jam(); - cgcpStartCounter++; - signal->theData[0] = DihContinueB::ZSTART_GCP; - signal->theData[1] = aWaitTime > 100 ? (aWaitTime - 100) : 0; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); - return; - }//if - if (cstartGcpNow == false && aWaitTime > 100){ - /*************************************************************************/ - // We still have more than 100 milliseconds before we start the next and - // nobody has ordered immediate start of a global checkpoint. - // During initial start we will use continuos global checkpoints to - // speed it up since we need to complete a global checkpoint after - // inserting a lot of records. - /*************************************************************************/ - jam(); - cgcpStartCounter++; - signal->theData[0] = DihContinueB::ZSTART_GCP; - signal->theData[1] = (aWaitTime - 100); - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); - return; - }//if - cgcpStartCounter = 0; - cstartGcpNow = false; - /***************************************************************************/ - // Report the event that a global checkpoint has started. - /***************************************************************************/ - signal->theData[0] = NDB_LE_GlobalCheckpointStarted; //Event type - signal->theData[1] = cnewgcp; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - CRASH_INSERTION(7000); - cnewgcp++; - signal->setTrace(TestOrd::TraceGlobalCheckpoint); - sendLoopMacro(GCP_PREPARE, sendGCP_PREPARE); - cgcpStatus = GCP_PREPARE_SENT; -}//Dbdih::startGcpLab() - -void Dbdih::execGCP_PREPARECONF(Signal* signal) -{ - jamEntry(); - Uint32 senderNodeId = signal->theData[0]; - Uint32 gci = signal->theData[1]; - ndbrequire(gci == cnewgcp); - receiveLoopMacro(GCP_PREPARE, senderNodeId); - //------------------------------------------------------------- - // We have now received all replies. We are ready to continue - // with committing the global checkpoint. - //------------------------------------------------------------- - gcpcommitreqLab(signal); -}//Dbdih::execGCP_PREPARECONF() - -void Dbdih::gcpcommitreqLab(Signal* signal) -{ - CRASH_INSERTION(7001); - sendLoopMacro(GCP_COMMIT, sendGCP_COMMIT); - cgcpStatus = GCP_COMMIT_SENT; - return; -}//Dbdih::gcpcommitreqLab() - -void Dbdih::execGCP_NODEFINISH(Signal* signal) -{ - jamEntry(); - const Uint32 senderNodeId = signal->theData[0]; - const Uint32 gci = signal->theData[1]; - const Uint32 failureNr = signal->theData[2]; - if (!isMaster()) { - jam(); - ndbrequire(failureNr > cfailurenr); - //------------------------------------------------------------- - // Another node thinks we are master. This could happen when he - // has heard of a node failure which I have not heard of. Ignore - // signal in this case since we will discover it by sending - // MASTER_GCPREQ to the node. - //------------------------------------------------------------- - return; - } else if (cmasterState == MASTER_TAKE_OVER_GCP) { - jam(); - //------------------------------------------------------------- - // We are currently taking over as master. Ignore - // signal in this case since we will discover it in reception of - // MASTER_GCPCONF. - //------------------------------------------------------------- - return; - } else { - ndbrequire(cmasterState == MASTER_ACTIVE); - }//if - ndbrequire(gci == coldgcp); - receiveLoopMacro(GCP_COMMIT, senderNodeId); - //------------------------------------------------------------- - // We have now received all replies. We are ready to continue - // with saving the global checkpoint to disk. - //------------------------------------------------------------- - CRASH_INSERTION(7002); - gcpsavereqLab(signal); - return; -}//Dbdih::execGCP_NODEFINISH() - -void Dbdih::gcpsavereqLab(Signal* signal) -{ - sendLoopMacro(GCP_SAVEREQ, sendGCP_SAVEREQ); - cgcpStatus = GCP_NODE_FINISHED; -}//Dbdih::gcpsavereqLab() - -void Dbdih::execGCP_SAVECONF(Signal* signal) -{ - jamEntry(); - const GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0]; - ndbrequire(saveConf->gci == coldgcp); - ndbrequire(saveConf->nodeId == saveConf->dihPtr); - SYSFILE->lastCompletedGCI[saveConf->nodeId] = saveConf->gci; - GCP_SAVEhandling(signal, saveConf->nodeId); -}//Dbdih::execGCP_SAVECONF() - -void Dbdih::execGCP_SAVEREF(Signal* signal) -{ - jamEntry(); - const GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0]; - ndbrequire(saveRef->gci == coldgcp); - ndbrequire(saveRef->nodeId == saveRef->dihPtr); - /** - * Only allow reason not to save - */ - ndbrequire(saveRef->errorCode == GCPSaveRef::NodeShutdownInProgress || - saveRef->errorCode == GCPSaveRef::FakedSignalDueToNodeFailure || - saveRef->errorCode == GCPSaveRef::NodeRestartInProgress); - GCP_SAVEhandling(signal, saveRef->nodeId); -}//Dbdih::execGCP_SAVEREF() - -void Dbdih::GCP_SAVEhandling(Signal* signal, Uint32 nodeId) -{ - receiveLoopMacro(GCP_SAVEREQ, nodeId); - /*-------------------------------------------------------------------------*/ - // All nodes have replied. We are ready to update the system file. - /*-------------------------------------------------------------------------*/ - cgcpStatus = GCP_SAVE_LQH_FINISHED; - CRASH_INSERTION(7003); - checkToCopy(); - /**------------------------------------------------------------------------ - * SET NEW RECOVERABLE GCI. ALSO RESET RESTART COUNTER TO ZERO. - * THIS INDICATES THAT THE SYSTEM HAS BEEN RECOVERED AND SURVIVED AT - * LEAST ONE GLOBAL CHECKPOINT PERIOD. WE WILL USE THIS PARAMETER TO - * SET BACK THE RESTART GCI IF WE ENCOUNTER MORE THAN ONE UNSUCCESSFUL - * RESTART. - *------------------------------------------------------------------------*/ - SYSFILE->newestRestorableGCI = coldgcp; - if(Sysfile::getInitialStartOngoing(SYSFILE->systemRestartBits) && - getNodeState().startLevel == NodeState::SL_STARTED){ - jam(); -#if 0 - g_eventLogger.info("Dbdih: Clearing initial start ongoing"); -#endif - Sysfile::clearInitialStartOngoing(SYSFILE->systemRestartBits); - } - copyGciLab(signal, CopyGCIReq::GLOBAL_CHECKPOINT); -}//Dbdih::GCP_SAVEhandling() - -/* - 3.11 G L O B A L C H E C K P O I N T (N O T - M A S T E R) - ************************************************************* - */ -void Dbdih::execGCP_PREPARE(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(7005); - - if (ERROR_INSERTED(7030)) - { - cgckptflag = true; - g_eventLogger.info("Delayed GCP_PREPARE 5s"); - sendSignalWithDelay(reference(), GSN_GCP_PREPARE, signal, 5000, - signal->getLength()); - return; - } - - Uint32 masterNodeId = signal->theData[0]; - Uint32 gci = signal->theData[1]; - BlockReference retRef = calcDihBlockRef(masterNodeId); - - ndbrequire (cmasterdihref == retRef); - ndbrequire (cgcpParticipantState == GCP_PARTICIPANT_READY); - ndbrequire (gci == (currentgcp + 1)); - - cgckptflag = true; - cgcpParticipantState = GCP_PARTICIPANT_PREPARE_RECEIVED; - cnewgcp = gci; - - if (ERROR_INSERTED(7031)) - { - g_eventLogger.info("Crashing delayed in GCP_PREPARE 3s"); - signal->theData[0] = 9999; - sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 3000, 1); - return; - } - - signal->theData[0] = cownNodeId; - signal->theData[1] = gci; - sendSignal(retRef, GSN_GCP_PREPARECONF, signal, 2, JBA); - return; -}//Dbdih::execGCP_PREPARE() - -void Dbdih::execGCP_COMMIT(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(7006); - Uint32 masterNodeId = signal->theData[0]; - Uint32 gci = signal->theData[1]; - - ndbrequire(gci == (currentgcp + 1)); - ndbrequire(masterNodeId = cmasterNodeId); - ndbrequire(cgcpParticipantState == GCP_PARTICIPANT_PREPARE_RECEIVED); - - coldgcp = currentgcp; - currentgcp = cnewgcp; - cgckptflag = false; - emptyverificbuffer(signal, true); - cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED; - signal->theData[0] = calcDihBlockRef(masterNodeId); - signal->theData[1] = coldgcp; - sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB); - return; -}//Dbdih::execGCP_COMMIT() - -void Dbdih::execGCP_TCFINISHED(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(7007); - Uint32 retRef = signal->theData[0]; - Uint32 gci = signal->theData[1]; - ndbrequire(gci == coldgcp); - - if (ERROR_INSERTED(7181) || ERROR_INSERTED(7182)) - { - c_error_7181_ref = retRef; // Save ref - ndbout_c("killing %d", refToNode(cmasterdihref)); - signal->theData[0] = 9999; - sendSignal(numberToRef(CMVMI, refToNode(cmasterdihref)), - GSN_NDB_TAMPER, signal, 1, JBB); - return; - } - - cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED; - signal->theData[0] = cownNodeId; - signal->theData[1] = coldgcp; - signal->theData[2] = cfailurenr; - sendSignal(retRef, GSN_GCP_NODEFINISH, signal, 3, JBB); -}//Dbdih::execGCP_TCFINISHED() - -/*****************************************************************************/ -//****** RECEIVING TAMPER REQUEST FROM NDBAPI ****** -/*****************************************************************************/ -void Dbdih::execDIHNDBTAMPER(Signal* signal) -{ - jamEntry(); - Uint32 tcgcpblocked = signal->theData[0]; - /* ACTION TO BE TAKEN BY DIH */ - Uint32 tuserpointer = signal->theData[1]; - BlockReference tuserblockref = signal->theData[2]; - switch (tcgcpblocked) { - case 1: - jam(); - if (isMaster()) { - jam(); - cgcpOrderBlocked = 1; - } else { - jam(); - /* TRANSFER THE REQUEST */ - /* TO MASTER*/ - signal->theData[0] = tcgcpblocked; - signal->theData[1] = tuserpointer; - signal->theData[2] = tuserblockref; - sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB); - }//if - break; - case 2: - jam(); - if (isMaster()) { - jam(); - cgcpOrderBlocked = 0; - } else { - jam(); - /* TRANSFER THE REQUEST */ - /* TO MASTER*/ - signal->theData[0] = tcgcpblocked; - signal->theData[1] = tuserpointer; - signal->theData[2] = tuserblockref; - sendSignal(cmasterdihref, GSN_DIHNDBTAMPER, signal, 3, JBB); - }//if - break; - case 3: - ndbrequire(false); - return; - break; - case 4: - jam(); - signal->theData[0] = tuserpointer; - signal->theData[1] = crestartGci; - sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 2, JBB); - break; -#ifdef ERROR_INSERT - case 5: - jam(); - if(tuserpointer == 0) - { - jam(); - signal->theData[0] = 0; - sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB); - return; - } - /*----------------------------------------------------------------------*/ - // Insert errors. - /*----------------------------------------------------------------------*/ - if (tuserpointer < 1000) { - /*--------------------------------------------------------------------*/ - // Insert errors into QMGR. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = QMGR_REF; - } else if (tuserpointer < 2000) { - /*--------------------------------------------------------------------*/ - // Insert errors into NDBCNTR. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = NDBCNTR_REF; - } else if (tuserpointer < 3000) { - /*--------------------------------------------------------------------*/ - // Insert errors into NDBFS. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = NDBFS_REF; - } else if (tuserpointer < 4000) { - /*--------------------------------------------------------------------*/ - // Insert errors into DBACC. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = DBACC_REF; - } else if (tuserpointer < 5000) { - /*--------------------------------------------------------------------*/ - // Insert errors into DBTUP. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = DBTUP_REF; - } else if (tuserpointer < 6000) { - /*---------------------------------------------------------------------*/ - // Insert errors into DBLQH. - /*---------------------------------------------------------------------*/ - jam(); - tuserblockref = DBLQH_REF; - } else if (tuserpointer < 7000) { - /*---------------------------------------------------------------------*/ - // Insert errors into DBDICT. - /*---------------------------------------------------------------------*/ - jam(); - tuserblockref = DBDICT_REF; - } else if (tuserpointer < 8000) { - /*---------------------------------------------------------------------*/ - // Insert errors into DBDIH. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = DBDIH_REF; - } else if (tuserpointer < 9000) { - /*--------------------------------------------------------------------*/ - // Insert errors into DBTC. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = DBTC_REF; - } else if (tuserpointer < 10000) { - /*--------------------------------------------------------------------*/ - // Insert errors into CMVMI. - /*--------------------------------------------------------------------*/ - jam(); - tuserblockref = CMVMI_REF; - } else if (tuserpointer < 11000) { - jam(); - tuserblockref = BACKUP_REF; - } else if (tuserpointer < 12000) { - // DBUTIL_REF ? - jam(); - } else if (tuserpointer < 13000) { - jam(); - tuserblockref = DBTUX_REF; - } else if (tuserpointer < 14000) { - jam(); - tuserblockref = SUMA_REF; - } else if (tuserpointer < 15000) { - jam(); - tuserblockref = DBDICT_REF; - } else if (tuserpointer < 16000) { - jam(); - tuserblockref = LGMAN_REF; - } else if (tuserpointer < 17000) { - jam(); - tuserblockref = TSMAN_REF; - } else if (tuserpointer < 30000) { - /*--------------------------------------------------------------------*/ - // Ignore errors in the 20000-range. - /*--------------------------------------------------------------------*/ - jam(); - return; - } else if (tuserpointer < 40000) { - jam(); - /*--------------------------------------------------------------------*/ - // Redirect errors to master DIH in the 30000-range. - /*--------------------------------------------------------------------*/ - tuserblockref = cmasterdihref; - tuserpointer -= 30000; - signal->theData[0] = 5; - signal->theData[1] = tuserpointer; - signal->theData[2] = tuserblockref; - sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB); - return; - } else if (tuserpointer < 50000) { - NodeRecordPtr localNodeptr; - Uint32 Tfound = 0; - jam(); - /*--------------------------------------------------------------------*/ - // Redirect errors to non-master DIH in the 40000-range. - /*--------------------------------------------------------------------*/ - tuserpointer -= 40000; - for (localNodeptr.i = 1; - localNodeptr.i < MAX_NDB_NODES; - localNodeptr.i++) { - jam(); - ptrAss(localNodeptr, nodeRecord); - if ((localNodeptr.p->nodeStatus == NodeRecord::ALIVE) && - (localNodeptr.i != cmasterNodeId)) { - jam(); - tuserblockref = calcDihBlockRef(localNodeptr.i); - Tfound = 1; - break; - }//if - }//for - if (Tfound == 0) { - jam(); - /*-------------------------------------------------------------------*/ - // Ignore since no non-master node existed. - /*-------------------------------------------------------------------*/ - return; - }//if - signal->theData[0] = 5; - signal->theData[1] = tuserpointer; - signal->theData[2] = tuserblockref; - sendSignal(tuserblockref, GSN_DIHNDBTAMPER, signal, 3, JBB); - return; - } else { - jam(); - return; - }//if - signal->theData[0] = tuserpointer; - if (tuserpointer != 0) { - sendSignal(tuserblockref, GSN_NDB_TAMPER, signal, 1, JBB); - } else { - sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB); - sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB); - }//if - break; -#endif - default: - ndbrequire(false); - break; - }//switch - return; -}//Dbdih::execDIHNDBTAMPER() - -/*****************************************************************************/ -/* ********** FILE HANDLING MODULE *************/ -/*****************************************************************************/ -void Dbdih::copyGciLab(Signal* signal, CopyGCIReq::CopyReason reason) -{ - if(c_copyGCIMaster.m_copyReason != CopyGCIReq::IDLE){ - /** - * There can currently only be one waiting - */ - ndbrequire(c_copyGCIMaster.m_waiting == CopyGCIReq::IDLE); - c_copyGCIMaster.m_waiting = reason; - return; - } - c_copyGCIMaster.m_copyReason = reason; - sendLoopMacro(COPY_GCIREQ, sendCOPY_GCIREQ); - -}//Dbdih::copyGciLab() - -/* ------------------------------------------------------------------------- */ -/* COPY_GCICONF RESPONSE TO COPY_GCIREQ */ -/* ------------------------------------------------------------------------- */ -void Dbdih::execCOPY_GCICONF(Signal* signal) -{ - jamEntry(); - NodeRecordPtr senderNodePtr; - senderNodePtr.i = signal->theData[0]; - receiveLoopMacro(COPY_GCIREQ, senderNodePtr.i); - - CopyGCIReq::CopyReason waiting = c_copyGCIMaster.m_waiting; - CopyGCIReq::CopyReason current = c_copyGCIMaster.m_copyReason; - - c_copyGCIMaster.m_copyReason = CopyGCIReq::IDLE; - c_copyGCIMaster.m_waiting = CopyGCIReq::IDLE; - - bool ok = false; - switch(current){ - case CopyGCIReq::RESTART:{ - ok = true; - jam(); - DictStartReq * req = (DictStartReq*)&signal->theData[0]; - req->restartGci = SYSFILE->newestRestorableGCI; - req->senderRef = reference(); - sendSignal(cdictblockref, GSN_DICTSTARTREQ, - signal, DictStartReq::SignalLength, JBB); - break; - } - case CopyGCIReq::LOCAL_CHECKPOINT:{ - ok = true; - jam(); - startLcpRoundLab(signal); - break; - } - case CopyGCIReq::GLOBAL_CHECKPOINT: - ok = true; - jam(); - checkToCopyCompleted(signal); - - /************************************************************************/ - // Report the event that a global checkpoint has completed. - /************************************************************************/ - signal->setTrace(0); - signal->theData[0] = NDB_LE_GlobalCheckpointCompleted; //Event type - signal->theData[1] = coldgcp; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - c_newest_restorable_gci = coldgcp; - - CRASH_INSERTION(7004); - emptyWaitGCPMasterQueue(signal); - cgcpStatus = GCP_READY; - signal->theData[0] = DihContinueB::ZSTART_GCP; - signal->theData[1] = cgcpDelay; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); - if (c_nodeStartMaster.blockGcp == true) { - jam(); - /* ------------------------------------------------------------------ */ - /* A NEW NODE WANTS IN AND WE MUST ALLOW IT TO COME IN NOW SINCE THE */ - /* GCP IS COMPLETED. */ - /* ------------------------------------------------------------------ */ - gcpBlockedLab(signal); - }//if - break; - case CopyGCIReq::INITIAL_START_COMPLETED: - ok = true; - jam(); - initialStartCompletedLab(signal); - break; - case CopyGCIReq::IDLE: - ok = false; - jam(); - } - ndbrequire(ok); - - /** - * Pop queue - */ - if(waiting != CopyGCIReq::IDLE){ - c_copyGCIMaster.m_copyReason = waiting; - signal->theData[0] = DihContinueB::ZCOPY_GCI; - signal->theData[1] = waiting; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - } -}//Dbdih::execCOPY_GCICONF() - -void Dbdih::invalidateLcpInfoAfterSr() -{ - NodeRecordPtr nodePtr; - SYSFILE->latestLCP_ID--; - Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits); - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (!NdbNodeBitmask::get(SYSFILE->lcpActive, nodePtr.i)){ - jam(); - /* ------------------------------------------------------------------- */ - // The node was not active in the local checkpoint. - // To avoid that we step the active status too fast to not - // active we step back one step from Sysfile::NS_ActiveMissed_x. - /* ------------------------------------------------------------------- */ - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - /* ----------------------------------------------------------------- */ - // When not active in ongoing LCP and still active is a contradiction. - /* ----------------------------------------------------------------- */ - ndbrequire(false); - case Sysfile::NS_ActiveMissed_1: - jam(); - nodePtr.p->activeStatus = Sysfile::NS_Active; - break; - case Sysfile::NS_ActiveMissed_2: - jam(); - nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1; - break; - default: - jam(); - break; - }//switch - }//if - }//for - setNodeRestartInfoBits(); -}//Dbdih::invalidateLcpInfoAfterSr() - -/* ------------------------------------------------------------------------- */ -/* THE NEXT STEP IS TO WRITE THE FILE. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::openingCopyGciSkipInitLab(Signal* signal, FileRecordPtr filePtr) -{ - writeRestorableGci(signal, filePtr); - filePtr.p->reqStatus = FileRecord::WRITING_COPY_GCI; - return; -}//Dbdih::openingCopyGciSkipInitLab() - -void Dbdih::writingCopyGciLab(Signal* signal, FileRecordPtr filePtr) -{ - /* ----------------------------------------------------------------------- */ - /* WE HAVE NOW WRITTEN THIS FILE. WRITE ALSO NEXT FILE IF THIS IS NOT */ - /* ALREADY THE LAST. */ - /* ----------------------------------------------------------------------- */ - filePtr.p->reqStatus = FileRecord::IDLE; - if (filePtr.i == crestartInfoFile[0]) { - jam(); - filePtr.i = crestartInfoFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - if (filePtr.p->fileStatus == FileRecord::OPEN) { - jam(); - openingCopyGciSkipInitLab(signal, filePtr); - return; - }//if - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_COPY_GCI; - return; - }//if - /* ----------------------------------------------------------------------- */ - /* WE HAVE COMPLETED WRITING BOTH FILES SUCCESSFULLY. NOW REPORT OUR */ - /* SUCCESS TO THE MASTER DIH. BUT FIRST WE NEED TO RESET A NUMBER OF */ - /* VARIABLES USED BY THE LOCAL CHECKPOINT PROCESS (ONLY IF TRIGGERED */ - /* BY LOCAL CHECKPOINT PROCESS. */ - /* ----------------------------------------------------------------------- */ - CopyGCIReq::CopyReason reason = c_copyGCISlave.m_copyReason; - - if (reason == CopyGCIReq::GLOBAL_CHECKPOINT) { - jam(); - cgcpParticipantState = GCP_PARTICIPANT_READY; - - SubGcpCompleteRep * const rep = (SubGcpCompleteRep*)signal->getDataPtr(); - rep->gci = coldgcp; - sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal, - SubGcpCompleteRep::SignalLength, JBB); - - EXECUTE_DIRECT(LGMAN, GSN_SUB_GCP_COMPLETE_REP, signal, - SubGcpCompleteRep::SignalLength); - jamEntry(); - } - - jam(); - c_copyGCISlave.m_copyReason = CopyGCIReq::IDLE; - - if(c_copyGCISlave.m_senderRef == cmasterdihref){ - jam(); - /** - * Only if same master - */ - signal->theData[0] = c_copyGCISlave.m_senderData; - sendSignal(c_copyGCISlave.m_senderRef, GSN_COPY_GCICONF, signal, 1, JBB); - - } - return; -}//Dbdih::writingCopyGciLab() - -void Dbdih::execSTART_LCP_REQ(Signal* signal){ - StartLcpReq * req = (StartLcpReq*)signal->getDataPtr(); - - CRASH_INSERTION2(7021, isMaster()); - CRASH_INSERTION2(7022, !isMaster()); - - ndbrequire(c_lcpState.m_masterLcpDihRef = req->senderRef); - c_lcpState.m_participatingDIH = req->participatingDIH; - c_lcpState.m_participatingLQH = req->participatingLQH; - - c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH = req->participatingLQH; - if(isMaster()){ - jam(); - ndbrequire(isActiveMaster()); - c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH = req->participatingDIH; - - } else { - c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor(); - } - - c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = false; - - c_lcpState.setLcpStatus(LCP_INIT_TABLES, __LINE__); - - signal->theData[0] = DihContinueB::ZINIT_LCP; - signal->theData[1] = c_lcpState.m_masterLcpDihRef; - signal->theData[2] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); -} - -void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId) -{ - TabRecordPtr tabPtr; - tabPtr.i = tableId; - - if(c_lcpState.m_masterLcpDihRef != senderRef){ - jam(); - /** - * This is LCP master takeover - */ -#ifdef VM_TRACE - g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 1"); -#endif - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - sendMASTER_LCPCONF(signal); - return; - } - - if(c_lcpState.m_masterLcpDihRef != cmasterdihref){ - jam(); - /** - * Master take over but has not yet received MASTER_LCPREQ - */ -#ifdef VM_TRACE - g_eventLogger.info("initLcpLab aborted due to LCP master takeover - 2"); -#endif - return; - } - - //const Uint32 lcpId = SYSFILE->latestLCP_ID; - - for(; tabPtr.i < ctabFileSize; tabPtr.i++){ - - ptrAss(tabPtr, tabRecord); - - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) { - jam(); - tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; - continue; - } - - if (tabPtr.p->tabStorage != TabRecord::ST_NORMAL) { - /** - * Table is not logged - */ - jam(); - tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; - continue; - } - - if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) { - /* ----------------------------------------------------------------- */ - // We protect the updates of table data structures by this variable. - /* ----------------------------------------------------------------- */ - jam(); - signal->theData[0] = DihContinueB::ZINIT_LCP; - signal->theData[1] = senderRef; - signal->theData[2] = tabPtr.i; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 20, 3); - return; - }//if - - /** - * Found a table - */ - tabPtr.p->tabLcpStatus = TabRecord::TLS_ACTIVE; - - /** - * For each fragment - */ - for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) { - jam(); - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - - /** - * For each of replica record - */ - Uint32 replicaCount = 0; - ReplicaRecordPtr replicaPtr; - for(replicaPtr.i = fragPtr.p->storedReplicas; replicaPtr.i != RNIL; - replicaPtr.i = replicaPtr.p->nextReplica) { - jam(); - - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - Uint32 nodeId = replicaPtr.p->procNode; - if(c_lcpState.m_participatingLQH.get(nodeId)){ - jam(); - replicaCount++; - replicaPtr.p->lcpOngoingFlag = true; - } - } - - fragPtr.p->noLcpReplicas = replicaCount; - }//for - - signal->theData[0] = DihContinueB::ZINIT_LCP; - signal->theData[1] = senderRef; - signal->theData[2] = tabPtr.i + 1; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; - } - - /** - * No more tables - */ - jam(); - - if (c_lcpState.m_masterLcpDihRef != reference()){ - jam(); - ndbrequire(!isMaster()); - c_lcpState.setLcpStatus(LCP_STATUS_ACTIVE, __LINE__); - } else { - jam(); - ndbrequire(isMaster()); - } - - CRASH_INSERTION2(7023, isMaster()); - CRASH_INSERTION2(7024, !isMaster()); - - jam(); - StartLcpConf * conf = (StartLcpConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - sendSignal(c_lcpState.m_masterLcpDihRef, GSN_START_LCP_CONF, signal, - StartLcpConf::SignalLength, JBB); - return; -}//Dbdih::initLcpLab() - -/* ------------------------------------------------------------------------- */ -/* ERROR HANDLING FOR COPY RESTORABLE GCI FILE. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::openingCopyGciErrorLab(Signal* signal, FileRecordPtr filePtr) -{ - createFileRw(signal, filePtr); - /* ------------------------------------------------------------------------- */ - /* ERROR IN OPENING FILE. WE WILL TRY BY CREATING FILE INSTEAD. */ - /* ------------------------------------------------------------------------- */ - filePtr.p->reqStatus = FileRecord::CREATING_COPY_GCI; - return; -}//Dbdih::openingCopyGciErrorLab() - -/* ------------------------------------------------------------------------- */ -/* ENTER DICTSTARTCONF WITH */ -/* TBLOCKREF */ -/* ------------------------------------------------------------------------- */ -void Dbdih::dictStartConfLab(Signal* signal) -{ - /* ----------------------------------------------------------------------- */ - /* WE HAVE NOW RECEIVED ALL THE TABLES TO RESTART. */ - /* ----------------------------------------------------------------------- */ - signal->theData[0] = DihContinueB::ZSTART_FRAGMENT; - signal->theData[1] = 0; /* START WITH TABLE 0 */ - signal->theData[2] = 0; /* AND FRAGMENT 0 */ - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; -}//Dbdih::dictStartConfLab() - - -void Dbdih::openingTableLab(Signal* signal, FileRecordPtr filePtr) -{ - /* ---------------------------------------------------------------------- */ - /* SUCCESSFULLY OPENED A FILE. READ THE FIRST PAGE OF THIS FILE. */ - /* ---------------------------------------------------------------------- */ - TabRecordPtr tabPtr; - PageRecordPtr pagePtr; - - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tabPtr.p->noPages = 1; - allocpage(pagePtr); - tabPtr.p->pageRef[0] = pagePtr.i; - readTabfile(signal, tabPtr.p, filePtr); - filePtr.p->reqStatus = FileRecord::READING_TABLE; - return; -}//Dbdih::openingTableLab() - -void Dbdih::openingTableErrorLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - /* ---------------------------------------------------------------------- */ - /* WE FAILED IN OPENING A FILE. IF THE FIRST FILE THEN TRY WITH THE */ - /* DUPLICATE FILE, OTHERWISE WE REPORT AN ERROR IN THE SYSTEM RESTART. */ - /* ---------------------------------------------------------------------- */ - if (filePtr.i == tabPtr.p->tabFile[0]) - { - filePtr.i = tabPtr.p->tabFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_TABLE; - } - else - { - char buf[256]; - BaseString::snprintf(buf, sizeof(buf), - "Error opening DIH schema files for table: %d", - tabPtr.i); - progError(__LINE__, NDBD_EXIT_AFS_NO_SUCH_FILE, buf); - } -}//Dbdih::openingTableErrorLab() - -void Dbdih::readingTableLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - PageRecordPtr pagePtr; - /* ---------------------------------------------------------------------- */ - /* WE HAVE SUCCESSFULLY READ A NUMBER OF PAGES IN THE TABLE FILE. IF */ - /* MORE PAGES EXIST IN THE FILE THEN READ ALL PAGES IN THE FILE. */ - /* ---------------------------------------------------------------------- */ - filePtr.p->reqStatus = FileRecord::IDLE; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - pagePtr.i = tabPtr.p->pageRef[0]; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - Uint32 noOfStoredPages = pagePtr.p->word[33]; - if (tabPtr.p->noPages < noOfStoredPages) { - jam(); - ndbrequire(noOfStoredPages <= 8); - for (Uint32 i = tabPtr.p->noPages; i < noOfStoredPages; i++) { - jam(); - allocpage(pagePtr); - tabPtr.p->pageRef[i] = pagePtr.i; - }//for - tabPtr.p->noPages = noOfStoredPages; - readTabfile(signal, tabPtr.p, filePtr); - filePtr.p->reqStatus = FileRecord::READING_TABLE; - } else { - ndbrequire(tabPtr.p->noPages == pagePtr.p->word[33]); - ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE); - jam(); - /* --------------------------------------------------------------------- */ - /* WE HAVE READ ALL PAGES. NOW READ FROM PAGES INTO TABLE AND FRAGMENT */ - /* DATA STRUCTURES. */ - /* --------------------------------------------------------------------- */ - tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE1_READ_PAGES; - signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - }//if - return; -}//Dbdih::readingTableLab() - -void Dbdih::readTableFromPagesLab(Signal* signal, TabRecordPtr tabPtr) -{ - FileRecordPtr filePtr; - filePtr.i = tabPtr.p->tabFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - /* ---------------------------------------------------------------------- */ - /* WE HAVE NOW COPIED TO OUR NODE. WE HAVE NOW COMPLETED RESTORING */ - /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */ - /* WE ALSO NEED TO CLOSE THE TABLE FILE. */ - /* ---------------------------------------------------------------------- */ - if (filePtr.p->fileStatus != FileRecord::OPEN) { - jam(); - filePtr.i = tabPtr.p->tabFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - }//if - closeFile(signal, filePtr); - filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_SR; - return; -}//Dbdih::readTableFromPagesLab() - -void Dbdih::closingTableSrLab(Signal* signal, FileRecordPtr filePtr) -{ - /** - * Update table/fragment info - */ - TabRecordPtr tabPtr; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - resetReplicaSr(tabPtr); - - signal->theData[0] = DihContinueB::ZCOPY_TABLE; - signal->theData[1] = filePtr.p->tabRef; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - - return; -}//Dbdih::closingTableSrLab() - -void -Dbdih::resetReplicaSr(TabRecordPtr tabPtr){ - - const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI; - - for(Uint32 i = 0; itotalfragments; i++){ - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, i, fragPtr); - - /** - * 1) Start by moving all replicas into oldStoredReplicas - */ - prepareReplicas(fragPtr); - - /** - * 2) Move all "alive" replicas into storedReplicas - * + update noCrashedReplicas... - */ - ReplicaRecordPtr replicaPtr; - replicaPtr.i = fragPtr.p->oldStoredReplicas; - while (replicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - const Uint32 nextReplicaPtrI = replicaPtr.p->nextReplica; - - NodeRecordPtr nodePtr; - nodePtr.i = replicaPtr.p->procNode; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - const Uint32 noCrashedReplicas = replicaPtr.p->noCrashedReplicas; - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - case Sysfile::NS_ActiveMissed_2:{ - jam(); - /* --------------------------------------------------------------- */ - /* THE NODE IS ALIVE AND KICKING AND ACTIVE, LET'S USE IT. */ - /* --------------------------------------------------------------- */ - arrGuardErr(noCrashedReplicas, 8, NDBD_EXIT_MAX_CRASHED_REPLICAS); - Uint32 lastGci = replicaPtr.p->replicaLastGci[noCrashedReplicas]; - if(lastGci >= newestRestorableGCI){ - jam(); - /** ------------------------------------------------------------- - * THE REPLICA WAS ALIVE AT THE SYSTEM FAILURE. WE WILL SET THE - * LAST REPLICA GCI TO MINUS ONE SINCE IT HASN'T FAILED YET IN THE - * NEW SYSTEM. - *-------------------------------------------------------------- */ - replicaPtr.p->replicaLastGci[noCrashedReplicas] = (Uint32)-1; - } else { - jam(); - /*-------------------------------------------------------------- - * SINCE IT WAS NOT ALIVE AT THE TIME OF THE SYSTEM CRASH THIS IS - * A COMPLETELY NEW REPLICA. WE WILL SET THE CREATE GCI TO BE THE - * NEXT GCI TO BE EXECUTED. - *--------_----------------------------------------------------- */ - const Uint32 nextCrashed = noCrashedReplicas + 1; - replicaPtr.p->noCrashedReplicas = nextCrashed; - arrGuardErr(nextCrashed, 8, NDBD_EXIT_MAX_CRASHED_REPLICAS); - replicaPtr.p->createGci[nextCrashed] = newestRestorableGCI + 1; - ndbrequire(newestRestorableGCI + 1 != 0xF1F1F1F1); - replicaPtr.p->replicaLastGci[nextCrashed] = (Uint32)-1; - }//if - - resetReplicaLcp(replicaPtr.p, newestRestorableGCI); - - /** - * Make sure we can also find REDO for restoring replica... - */ - { - CreateReplicaRecord createReplica; - ConstPtr constReplicaPtr; - constReplicaPtr.i = replicaPtr.i; - constReplicaPtr.p = replicaPtr.p; - if (tabPtr.p->tabStorage != TabRecord::ST_NORMAL || - setup_create_replica(fragPtr, - &createReplica, constReplicaPtr)) - { - jam(); - removeOldStoredReplica(fragPtr, replicaPtr); - linkStoredReplica(fragPtr, replicaPtr); - } - else - { - jam(); - infoEvent("Forcing take-over of node %d due to unsufficient REDO" - " for table %d fragment: %d", - nodePtr.i, tabPtr.i, i); - - setNodeActiveStatus(nodePtr.i, - Sysfile::NS_NotActive_NotTakenOver); - } - } - } - default: - jam(); - /*empty*/; - break; - } - } - replicaPtr.i = nextReplicaPtrI; - }//while - updateNodeInfo(fragPtr); - } -} - -void -Dbdih::resetReplicaLcp(ReplicaRecord * replicaP, Uint32 stopGci){ - - Uint32 lcpNo = replicaP->nextLcp; - const Uint32 startLcpNo = lcpNo; - do { - lcpNo = prevLcpNo(lcpNo); - ndbrequire(lcpNo < MAX_LCP_STORED); - if (replicaP->lcpStatus[lcpNo] == ZVALID) { - if (replicaP->maxGciStarted[lcpNo] < stopGci) { - jam(); - /* ----------------------------------------------------------------- */ - /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */ - /* RESTARTING THIS FRAGMENT REPLICA. */ - /* ----------------------------------------------------------------- */ - return ; - }//if - }//if - - /** - * WE COULD NOT USE THIS LOCAL CHECKPOINT. IT WAS TOO - * RECENT OR SIMPLY NOT A VALID CHECKPOINT. - * WE SHOULD THUS REMOVE THIS LOCAL CHECKPOINT SINCE IT WILL NEVER - * AGAIN BE USED. SET LCP_STATUS TO INVALID. - */ - replicaP->nextLcp = lcpNo; - replicaP->lcpId[lcpNo] = 0; - replicaP->lcpStatus[lcpNo] = ZINVALID; - } while (lcpNo != startLcpNo); - - replicaP->nextLcp = 0; -} - -void Dbdih::readingTableErrorLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - /* ---------------------------------------------------------------------- */ - /* READING THIS FILE FAILED. CLOSE IT AFTER RELEASING ALL PAGES. */ - /* ---------------------------------------------------------------------- */ - ndbrequire(tabPtr.p->noPages <= 8); - for (Uint32 i = 0; i < tabPtr.p->noPages; i++) { - jam(); - releasePage(tabPtr.p->pageRef[i]); - }//for - closeFile(signal, filePtr); - filePtr.p->reqStatus = FileRecord::CLOSING_TABLE_CRASH; - return; -}//Dbdih::readingTableErrorLab() - -void Dbdih::closingTableCrashLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - /* ---------------------------------------------------------------------- */ - /* WE HAVE NOW CLOSED A FILE WHICH WE HAD A READ ERROR WITH. PROCEED */ - /* WITH NEXT FILE IF NOT THE LAST OTHERWISE REPORT ERROR. */ - /* ---------------------------------------------------------------------- */ - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - ndbrequire(filePtr.i == tabPtr.p->tabFile[0]); - filePtr.i = tabPtr.p->tabFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - openFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::OPENING_TABLE; -}//Dbdih::closingTableCrashLab() - -/*****************************************************************************/ -/* ********** COPY TABLE MODULE *************/ -/*****************************************************************************/ -void Dbdih::execCOPY_TABREQ(Signal* signal) -{ - CRASH_INSERTION(7172); - - TabRecordPtr tabPtr; - PageRecordPtr pagePtr; - jamEntry(); - BlockReference ref = signal->theData[0]; - Uint32 reqinfo = signal->theData[1]; - tabPtr.i = signal->theData[2]; - Uint32 schemaVersion = signal->theData[3]; - Uint32 noOfWords = signal->theData[4]; - ndbrequire(ref == cmasterdihref); - ndbrequire(!isMaster()); - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (reqinfo == 1) { - jam(); - tabPtr.p->schemaVersion = schemaVersion; - initTableFile(tabPtr); - }//if - ndbrequire(tabPtr.p->noPages < 8); - if (tabPtr.p->noOfWords == 0) { - jam(); - allocpage(pagePtr); - tabPtr.p->pageRef[tabPtr.p->noPages] = pagePtr.i; - tabPtr.p->noPages++; - } else { - jam(); - pagePtr.i = tabPtr.p->pageRef[tabPtr.p->noPages - 1]; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - }//if - ndbrequire(tabPtr.p->noOfWords + 15 < 2048); - ndbrequire(tabPtr.p->noOfWords < 2048); - MEMCOPY_NO_WORDS(&pagePtr.p->word[tabPtr.p->noOfWords], &signal->theData[5], 16); - tabPtr.p->noOfWords += 16; - if (tabPtr.p->noOfWords == 2048) { - jam(); - tabPtr.p->noOfWords = 0; - }//if - if (noOfWords > 16) { - jam(); - return; - }//if - tabPtr.p->noOfWords = 0; - ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE); - tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_TAB_REQ; - signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_TABLE; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); -}//Dbdih::execCOPY_TABREQ() - -void -Dbdih::copyTabReq_complete(Signal* signal, TabRecordPtr tabPtr){ - if (!isMaster()) { - jam(); - //---------------------------------------------------------------------------- - // In this particular case we do not release table pages if we are master. The - // reason is that the master could still be sending the table info to another - // node. - //---------------------------------------------------------------------------- - releaseTabPages(tabPtr.i); - tabPtr.p->tabStatus = TabRecord::TS_ACTIVE; - for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) { - jam(); - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - updateNodeInfo(fragPtr); - }//for - }//if - signal->theData[0] = cownNodeId; - signal->theData[1] = tabPtr.i; - sendSignal(cmasterdihref, GSN_COPY_TABCONF, signal, 2, JBB); -} - -/*****************************************************************************/ -/* ****** READ FROM A NUMBER OF PAGES INTO THE TABLE DATA STRUCTURES ********/ -/*****************************************************************************/ -void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId) -{ - RWFragment rf; - rf.wordIndex = 35; - rf.pageIndex = 0; - rf.rwfTabPtr.i = tableId; - ptrCheckGuard(rf.rwfTabPtr, ctabFileSize, tabRecord); - rf.rwfPageptr.i = rf.rwfTabPtr.p->pageRef[0]; - ptrCheckGuard(rf.rwfPageptr, cpageFileSize, pageRecord); - rf.rwfTabPtr.p->totalfragments = readPageWord(&rf); - rf.rwfTabPtr.p->noOfBackups = readPageWord(&rf); - rf.rwfTabPtr.p->hashpointer = readPageWord(&rf); - rf.rwfTabPtr.p->kvalue = readPageWord(&rf); - rf.rwfTabPtr.p->mask = readPageWord(&rf); - rf.rwfTabPtr.p->method = (TabRecord::Method)readPageWord(&rf); - /* ------------- */ - /* Type of table */ - /* ------------- */ - rf.rwfTabPtr.p->tabStorage = (TabRecord::Storage)(readPageWord(&rf)); - - Uint32 noOfFrags = rf.rwfTabPtr.p->totalfragments; - ndbrequire(noOfFrags > 0); - ndbrequire((noOfFrags * (rf.rwfTabPtr.p->noOfBackups + 1)) <= cnoFreeReplicaRec); - allocFragments(noOfFrags, rf.rwfTabPtr); - - signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG; - signal->theData[1] = rf.rwfTabPtr.i; - signal->theData[2] = 0; - signal->theData[3] = rf.pageIndex; - signal->theData[4] = rf.wordIndex; - sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); - return; -}//Dbdih::readPagesIntoTableLab() - -void Dbdih::readPagesIntoFragLab(Signal* signal, RWFragment* rf) -{ - ndbrequire(rf->pageIndex < 8); - rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex]; - ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord); - FragmentstorePtr fragPtr; - getFragstore(rf->rwfTabPtr.p, rf->fragId, fragPtr); - readFragment(rf, fragPtr); - readReplicas(rf, fragPtr); - rf->fragId++; - if (rf->fragId == rf->rwfTabPtr.p->totalfragments) { - jam(); - switch (rf->rwfTabPtr.p->tabCopyStatus) { - case TabRecord::CS_SR_PHASE1_READ_PAGES: - jam(); - releaseTabPages(rf->rwfTabPtr.i); - rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - signal->theData[0] = DihContinueB::ZREAD_TABLE_FROM_PAGES; - signal->theData[1] = rf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::CS_COPY_TAB_REQ: - jam(); - rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - if(getNodeState().getSystemRestartInProgress()){ - jam(); - copyTabReq_complete(signal, rf->rwfTabPtr); - return; - } - rf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - rf->rwfTabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ; - signal->theData[0] = DihContinueB::ZTABLE_UPDATE; - signal->theData[1] = rf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - default: - ndbrequire(false); - return; - break; - }//switch - } else { - jam(); - signal->theData[0] = DihContinueB::ZREAD_PAGES_INTO_FRAG; - signal->theData[1] = rf->rwfTabPtr.i; - signal->theData[2] = rf->fragId; - signal->theData[3] = rf->pageIndex; - signal->theData[4] = rf->wordIndex; - sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); - }//if - return; -}//Dbdih::readPagesIntoFragLab() - -/*****************************************************************************/ -/***** WRITING FROM TABLE DATA STRUCTURES INTO A SET OF PAGES ******/ -// execCONTINUEB(ZPACK_TABLE_INTO_PAGES) -/*****************************************************************************/ -void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId) -{ - RWFragment wf; - TabRecordPtr tabPtr; - allocpage(wf.rwfPageptr); - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - tabPtr.p->pageRef[0] = wf.rwfPageptr.i; - tabPtr.p->noPages = 1; - wf.wordIndex = 35; - wf.pageIndex = 0; - writePageWord(&wf, tabPtr.p->totalfragments); - writePageWord(&wf, tabPtr.p->noOfBackups); - writePageWord(&wf, tabPtr.p->hashpointer); - writePageWord(&wf, tabPtr.p->kvalue); - writePageWord(&wf, tabPtr.p->mask); - writePageWord(&wf, tabPtr.p->method); - writePageWord(&wf, tabPtr.p->tabStorage); - - signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES; - signal->theData[1] = tabPtr.i; - signal->theData[2] = 0; - signal->theData[3] = wf.pageIndex; - signal->theData[4] = wf.wordIndex; - sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); -}//Dbdih::packTableIntoPagesLab() - -/*****************************************************************************/ -// execCONTINUEB(ZPACK_FRAG_INTO_PAGES) -/*****************************************************************************/ -void Dbdih::packFragIntoPagesLab(Signal* signal, RWFragment* wf) -{ - ndbrequire(wf->pageIndex < 8); - wf->rwfPageptr.i = wf->rwfTabPtr.p->pageRef[wf->pageIndex]; - ptrCheckGuard(wf->rwfPageptr, cpageFileSize, pageRecord); - FragmentstorePtr fragPtr; - getFragstore(wf->rwfTabPtr.p, wf->fragId, fragPtr); - writeFragment(wf, fragPtr); - writeReplicas(wf, fragPtr.p->storedReplicas); - writeReplicas(wf, fragPtr.p->oldStoredReplicas); - wf->fragId++; - if (wf->fragId == wf->rwfTabPtr.p->totalfragments) { - jam(); - PageRecordPtr pagePtr; - pagePtr.i = wf->rwfTabPtr.p->pageRef[0]; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - pagePtr.p->word[33] = wf->rwfTabPtr.p->noPages; - pagePtr.p->word[34] = ((wf->rwfTabPtr.p->noPages - 1) * 2048) + wf->wordIndex; - switch (wf->rwfTabPtr.p->tabCopyStatus) { - case TabRecord::CS_SR_PHASE2_READ_TABLE: - /* -------------------------------------------------------------------*/ - // We are performing a system restart and we are now ready to copy the - // table from this node (the master) to all other nodes. - /* -------------------------------------------------------------------*/ - jam(); - wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - signal->theData[0] = DihContinueB::ZSR_PHASE2_READ_TABLE; - signal->theData[1] = wf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::CS_COPY_NODE_STATE: - jam(); - tableCopyNodeLab(signal, wf->rwfTabPtr); - return; - break; - case TabRecord::CS_LCP_READ_TABLE: - jam(); - signal->theData[0] = DihContinueB::ZTABLE_UPDATE; - signal->theData[1] = wf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::CS_REMOVE_NODE: - case TabRecord::CS_INVALIDATE_NODE_LCP: - jam(); - signal->theData[0] = DihContinueB::ZTABLE_UPDATE; - signal->theData[1] = wf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::CS_ADD_TABLE_MASTER: - jam(); - wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - signal->theData[0] = DihContinueB::ZADD_TABLE_MASTER_PAGES; - signal->theData[1] = wf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::CS_ADD_TABLE_SLAVE: - jam(); - wf->rwfTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - signal->theData[0] = DihContinueB::ZADD_TABLE_SLAVE_PAGES; - signal->theData[1] = wf->rwfTabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - default: - ndbrequire(false); - return; - break; - }//switch - } else { - jam(); - signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES; - signal->theData[1] = wf->rwfTabPtr.i; - signal->theData[2] = wf->fragId; - signal->theData[3] = wf->pageIndex; - signal->theData[4] = wf->wordIndex; - sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); - }//if - return; -}//Dbdih::packFragIntoPagesLab() - -/*****************************************************************************/ -/* ********** START FRAGMENT MODULE *************/ -/*****************************************************************************/ -void -Dbdih::dump_replica_info() -{ - TabRecordPtr tabPtr; - FragmentstorePtr fragPtr; - - for(tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) - { - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) - continue; - - for(Uint32 fid = 0; fidtotalfragments; fid++) - { - getFragstore(tabPtr.p, fid, fragPtr); - ndbout_c("tab: %d frag: %d gci: %d\n -- storedReplicas:", - tabPtr.i, fid, SYSFILE->newestRestorableGCI); - - Uint32 i; - ReplicaRecordPtr replicaPtr; - replicaPtr.i = fragPtr.p->storedReplicas; - for(; replicaPtr.i != RNIL; replicaPtr.i = replicaPtr.p->nextReplica) - { - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - ndbout_c(" node: %d initialGci: %d nextLcp: %d noCrashedReplicas: %d", - replicaPtr.p->procNode, - replicaPtr.p->initialGci, - replicaPtr.p->nextLcp, - replicaPtr.p->noCrashedReplicas); - for(i = 0; ilcpStatus[i] == ZVALID ?"VALID":"INVALID"), - replicaPtr.p->lcpId[i], - replicaPtr.p->maxGciCompleted[i], - replicaPtr.p->maxGciStarted[i]); - } - - for (i = 0; i < 8; i++) - { - ndbout_c(" crashed replica: %d replicaLastGci: %d createGci: %d", - i, - replicaPtr.p->replicaLastGci[i], - replicaPtr.p->createGci[i]); - } - } - ndbout_c(" -- oldStoredReplicas"); - replicaPtr.i = fragPtr.p->oldStoredReplicas; - for(; replicaPtr.i != RNIL; replicaPtr.i = replicaPtr.p->nextReplica) - { - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - for(i = 0; ilcpStatus[i] == ZVALID ?"VALID":"INVALID"), - replicaPtr.p->lcpId[i], - replicaPtr.p->maxGciCompleted[i], - replicaPtr.p->maxGciStarted[i]); - } - - for (i = 0; i < 8; i++) - { - ndbout_c(" crashed replica: %d replicaLastGci: %d createGci: %d", - i, - replicaPtr.p->replicaLastGci[i], - replicaPtr.p->createGci[i]); - } - } - } - } -} - -void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId) -{ - Uint32 TloopCount = 0; - TabRecordPtr tabPtr; - while (true) { - if (TloopCount > 100) { - jam(); - signal->theData[0] = DihContinueB::ZSTART_FRAGMENT; - signal->theData[1] = tableId; - signal->theData[2] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; - } - - if (tableId >= ctabFileSize) { - jam(); - signal->theData[0] = DihContinueB::ZCOMPLETE_RESTART; - sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB); - return; - }//if - - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE){ - jam(); - TloopCount++; - tableId++; - fragId = 0; - continue; - } - - if(tabPtr.p->tabStorage != TabRecord::ST_NORMAL){ - jam(); - TloopCount++; - tableId++; - fragId = 0; - continue; - } - - jam(); - break; - }//while - - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - /* ----------------------------------------------------------------------- */ - /* WE NEED TO RESET THE REPLICA DATA STRUCTURES. THIS MEANS THAT WE */ - /* MUST REMOVE REPLICAS THAT WAS NOT STARTED AT THE GCI TO RESTORE. WE */ - /* NEED TO PUT ALL STORED REPLICAS ON THE LIST OF OLD STORED REPLICAS */ - /* RESET THE NUMBER OF REPLICAS TO CREATE. */ - /* ----------------------------------------------------------------------- */ - cnoOfCreateReplicas = 0; - /* ----------------------------------------------------------------------- */ - /* WE WILL NEVER START MORE THAN FOUR FRAGMENT REPLICAS WHATEVER THE */ - /* DESIRED REPLICATION IS. */ - /* ----------------------------------------------------------------------- */ - ndbrequire(tabPtr.p->noOfBackups < 4); - /* ----------------------------------------------------------------------- */ - /* SEARCH FOR STORED REPLICAS THAT CAN BE USED TO RESTART THE SYSTEM. */ - /* ----------------------------------------------------------------------- */ - searchStoredReplicas(fragPtr); - - if (cnoOfCreateReplicas == 0) { - /* --------------------------------------------------------------------- */ - /* THERE WERE NO STORED REPLICAS AVAILABLE THAT CAN SERVE AS REPLICA TO*/ - /* RESTART THE SYSTEM FROM. IN A LATER RELEASE WE WILL ADD */ - /* FUNCTIONALITY TO CHECK IF THERE ARE ANY STANDBY NODES THAT COULD DO */ - /* THIS TASK INSTEAD IN THIS IMPLEMENTATION WE SIMPLY CRASH THE SYSTEM.*/ - /* THIS WILL DECREASE THE GCI TO RESTORE WHICH HOPEFULLY WILL MAKE IT */ - /* POSSIBLE TO RESTORE THE SYSTEM. */ - /* --------------------------------------------------------------------- */ - char buf[64]; - BaseString::snprintf(buf, sizeof(buf), "table: %d fragment: %d gci: %d", - tableId, fragId, SYSFILE->newestRestorableGCI); - - ndbout_c(buf); - dump_replica_info(); - - progError(__LINE__, NDBD_EXIT_NO_RESTORABLE_REPLICA, buf); - ndbrequire(false); - return; - }//if - - /* ----------------------------------------------------------------------- */ - /* WE HAVE CHANGED THE NODE TO BE PRIMARY REPLICA AND THE NODES TO BE */ - /* BACKUP NODES. WE MUST UPDATE THIS NODES DATA STRUCTURE SINCE WE */ - /* WILL NOT COPY THE TABLE DATA TO OURSELF. */ - /* ----------------------------------------------------------------------- */ - updateNodeInfo(fragPtr); - /* ----------------------------------------------------------------------- */ - /* NOW WE HAVE COLLECTED ALL THE REPLICAS WE COULD GET. WE WILL NOW */ - /* RESTART THE FRAGMENT REPLICAS WE HAVE FOUND IRRESPECTIVE OF IF THERE*/ - /* ARE ENOUGH ACCORDING TO THE DESIRED REPLICATION. */ - /* ----------------------------------------------------------------------- */ - /* WE START BY SENDING ADD_FRAGREQ FOR THOSE REPLICAS THAT NEED IT. */ - /* ----------------------------------------------------------------------- */ - CreateReplicaRecordPtr createReplicaPtr; - for (createReplicaPtr.i = 0; - createReplicaPtr.i < cnoOfCreateReplicas; - createReplicaPtr.i++) { - jam(); - ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord); - createReplicaPtr.p->hotSpareUse = false; - }//for - - sendStartFragreq(signal, tabPtr, fragId); - - /** - * Don't wait for START_FRAGCONF - */ - fragId++; - if (fragId >= tabPtr.p->totalfragments) { - jam(); - tabPtr.i++; - fragId = 0; - }//if - signal->theData[0] = DihContinueB::ZSTART_FRAGMENT; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragId; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - - return; -}//Dbdih::startFragmentLab() - - -/*****************************************************************************/ -/* ********** COMPLETE RESTART MODULE *************/ -/*****************************************************************************/ -void Dbdih::completeRestartLab(Signal* signal) -{ - sendLoopMacro(START_RECREQ, sendSTART_RECREQ); -}//completeRestartLab() - -/* ------------------------------------------------------------------------- */ -// SYSTEM RESTART: -/* A NODE HAS COMPLETED RESTORING ALL DATABASE FRAGMENTS. */ -// NODE RESTART: -// THE STARTING NODE HAS PREPARED ITS LOG FILES TO ENABLE EXECUTION -// OF TRANSACTIONS. -// Precondition: -// This signal must be received by the master node. -/* ------------------------------------------------------------------------- */ -void Dbdih::execSTART_RECCONF(Signal* signal) -{ - jamEntry(); - Uint32 senderNodeId = signal->theData[0]; - ndbrequire(isMaster()); - if (getNodeState().startLevel >= NodeState::SL_STARTED){ - /* --------------------------------------------------------------------- */ - // Since our node is already up and running this must be a node restart. - // This means that we should be the master node, - // otherwise we have a problem. - /* --------------------------------------------------------------------- */ - jam(); - ndbout_c("startNextCopyFragment"); - startNextCopyFragment(signal, findTakeOver(senderNodeId)); - return; - } else { - /* --------------------------------------------------------------------- */ - // This was the system restart case. We set the state indicating that the - // node has completed restoration of all fragments. - /* --------------------------------------------------------------------- */ - receiveLoopMacro(START_RECREQ, senderNodeId); - - signal->theData[0] = reference(); - sendSignal(cntrlblockref, GSN_NDB_STARTCONF, signal, 1, JBB); - return; - }//if -}//Dbdih::execSTART_RECCONF() - -void Dbdih::copyNodeLab(Signal* signal, Uint32 tableId) -{ - /* ----------------------------------------------------------------------- */ - // This code is executed by the master to assist a node restart in receiving - // the data in the master. - /* ----------------------------------------------------------------------- */ - Uint32 TloopCount = 0; - - if (!c_nodeStartMaster.activeState) { - jam(); - /* --------------------------------------------------------------------- */ - // Obviously the node crashed in the middle of its node restart. We will - // stop this process simply by returning after resetting the wait indicator. - /* ---------------------------------------------------------------------- */ - c_nodeStartMaster.wait = ZFALSE; - return; - }//if - TabRecordPtr tabPtr; - tabPtr.i = tableId; - while (tabPtr.i < ctabFileSize) { - ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) { - /* -------------------------------------------------------------------- */ - // The table is defined. We will start by packing the table into pages. - // The tabCopyStatus indicates to the CONTINUEB(ZPACK_TABLE_INTO_PAGES) - // who called it. After packing the table into page(s) it will be sent to - // the starting node by COPY_TABREQ signals. After returning from the - // starting node we will return to this subroutine and continue - // with the next table. - /* -------------------------------------------------------------------- */ - ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE); - tabPtr.p->tabCopyStatus = TabRecord::CS_COPY_NODE_STATE; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - } else { - jam(); - if (TloopCount > 100) { - /* ------------------------------------------------------------------ */ - // Introduce real-time break after looping through 100 not copied tables - /* ----------------------------------------------------------------- */ - jam(); - signal->theData[0] = DihContinueB::ZCOPY_NODE; - signal->theData[1] = tabPtr.i + 1; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - } else { - jam(); - TloopCount++; - tabPtr.i++; - }//if - }//if - }//while - dihCopyCompletedLab(signal); - return; -}//Dbdih::copyNodeLab() - -void Dbdih::tableCopyNodeLab(Signal* signal, TabRecordPtr tabPtr) -{ - /* ----------------------------------------------------------------------- */ - /* COPY PAGES READ TO STARTING NODE. */ - /* ----------------------------------------------------------------------- */ - if (!c_nodeStartMaster.activeState) { - jam(); - releaseTabPages(tabPtr.i); - c_nodeStartMaster.wait = ZFALSE; - return; - }//if - NodeRecordPtr copyNodePtr; - PageRecordPtr pagePtr; - copyNodePtr.i = c_nodeStartMaster.startNode; - ptrCheckGuard(copyNodePtr, MAX_NDB_NODES, nodeRecord); - - copyNodePtr.p->activeTabptr = tabPtr.i; - pagePtr.i = tabPtr.p->pageRef[0]; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - - signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE; - signal->theData[1] = tabPtr.i; - signal->theData[2] = copyNodePtr.i; - signal->theData[3] = 0; - signal->theData[4] = 0; - signal->theData[5] = pagePtr.p->word[34]; - sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB); -}//Dbdih::tableCopyNodeLab() - -/* ------------------------------------------------------------------------- */ -// execCONTINUEB(ZCOPY_TABLE) -// This routine is used to copy the table descriptions from the master to -// other nodes. It is used in the system restart to copy from master to all -// starting nodes. -/* ------------------------------------------------------------------------- */ -void Dbdih::copyTableLab(Signal* signal, Uint32 tableId) -{ - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrAss(tabPtr, tabRecord); - - ndbrequire(tabPtr.p->tabCopyStatus == TabRecord::CS_IDLE); - tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE2_READ_TABLE; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dbdih::copyTableLab() - -/* ------------------------------------------------------------------------- */ -// execCONTINUEB(ZSR_PHASE2_READ_TABLE) -/* ------------------------------------------------------------------------- */ -void Dbdih::srPhase2ReadTableLab(Signal* signal, TabRecordPtr tabPtr) -{ - /* ----------------------------------------------------------------------- */ - // We set the sendCOPY_TABREQState to ZACTIVE for all nodes since it is a long - // process to send off all table descriptions. Thus we ensure that we do - // not encounter race conditions where one node is completed before the - // sending process is completed. This could lead to that we start off the - // system before we actually finished all copying of table descriptions - // and could lead to strange errors. - /* ----------------------------------------------------------------------- */ - - //sendLoopMacro(COPY_TABREQ, nullRoutine); - - breakCopyTableLab(signal, tabPtr, cfirstAliveNode); - return; -}//Dbdih::srPhase2ReadTableLab() - -/* ------------------------------------------------------------------------- */ -/* COPY PAGES READ TO ALL NODES. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::breakCopyTableLab(Signal* signal, TabRecordPtr tabPtr, Uint32 nodeId) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - while (nodePtr.i != RNIL) { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.i == getOwnNodeId()){ - jam(); - /* ------------------------------------------------------------------- */ - /* NOT NECESSARY TO COPY TO MY OWN NODE. I ALREADY HAVE THE PAGES. */ - /* I DO HOWEVER NEED TO STORE THE TABLE DESCRIPTION ONTO DISK. */ - /* ------------------------------------------------------------------- */ - /* IF WE ARE MASTER WE ONLY NEED TO SAVE THE TABLE ON DISK. WE ALREADY */ - /* HAVE THE TABLE DESCRIPTION IN THE DATA STRUCTURES. */ - // AFTER COMPLETING THE WRITE TO DISK THE MASTER WILL ALSO SEND - // COPY_TABCONF AS ALL THE OTHER NODES. - /* ------------------------------------------------------------------- */ - c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i); - tabPtr.p->tabUpdateState = TabRecord::US_COPY_TAB_REQ; - signal->theData[0] = DihContinueB::ZTABLE_UPDATE; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - nodePtr.i = nodePtr.p->nextNode; - } else { - PageRecordPtr pagePtr; - /* -------------------------------------------------------------------- */ - // RATHER THAN SENDING ALL COPY_TABREQ IN PARALLEL WE WILL SERIALISE THIS - // ACTIVITY AND WILL THUS CALL breakCopyTableLab AGAIN WHEN COMPLETED THE - // SENDING OF COPY_TABREQ'S. - /* -------------------------------------------------------------------- */ - jam(); - tabPtr.p->tabCopyStatus = TabRecord::CS_SR_PHASE3_COPY_TABLE; - pagePtr.i = tabPtr.p->pageRef[0]; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE; - signal->theData[1] = tabPtr.i; - signal->theData[2] = nodePtr.i; - signal->theData[3] = 0; - signal->theData[4] = 0; - signal->theData[5] = pagePtr.p->word[34]; - sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB); - return; - }//if - }//while - /* ----------------------------------------------------------------------- */ - /* WE HAVE NOW SENT THE TABLE PAGES TO ALL NODES. EXIT AND WAIT FOR ALL */ - /* REPLIES. */ - /* ----------------------------------------------------------------------- */ - return; -}//Dbdih::breakCopyTableLab() - -/* ------------------------------------------------------------------------- */ -// execCONTINUEB(ZCOPY_TABLE_NODE) -/* ------------------------------------------------------------------------- */ -void Dbdih::copyTableNode(Signal* signal, - CopyTableNode* ctn, NodeRecordPtr nodePtr) -{ - if (getNodeState().startLevel >= NodeState::SL_STARTED){ - /* --------------------------------------------------------------------- */ - // We are in the process of performing a node restart and are copying a - // table description to a starting node. We will check that no nodes have - // crashed in this process. - /* --------------------------------------------------------------------- */ - if (!c_nodeStartMaster.activeState) { - jam(); - /** ------------------------------------------------------------------ - * The starting node crashed. We will release table pages and stop this - * copy process and allow new node restarts to start. - * ------------------------------------------------------------------ */ - releaseTabPages(ctn->ctnTabPtr.i); - c_nodeStartMaster.wait = ZFALSE; - return; - }//if - }//if - ndbrequire(ctn->pageIndex < 8); - ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex]; - ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord); - /** - * If first page & firstWord reqinfo = 1 (first signal) - */ - Uint32 reqinfo = (ctn->pageIndex == 0) && (ctn->wordIndex == 0); - if(reqinfo == 1){ - c_COPY_TABREQ_Counter.setWaitingFor(nodePtr.i); - } - - for (Uint32 i = 0; i < 16; i++) { - jam(); - sendCopyTable(signal, ctn, calcDihBlockRef(nodePtr.i), reqinfo); - reqinfo = 0; - if (ctn->noOfWords <= 16) { - jam(); - switch (ctn->ctnTabPtr.p->tabCopyStatus) { - case TabRecord::CS_SR_PHASE3_COPY_TABLE: - /* ------------------------------------------------------------------ */ - // We have copied the table description to this node. - // We will now proceed - // with sending the table description to the next node in the node list. - /* ------------------------------------------------------------------ */ - jam(); - ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - breakCopyTableLab(signal, ctn->ctnTabPtr, nodePtr.p->nextNode); - return; - break; - case TabRecord::CS_COPY_NODE_STATE: - jam(); - ctn->ctnTabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - return; - break; - default: - ndbrequire(false); - break; - }//switch - } else { - jam(); - ctn->wordIndex += 16; - if (ctn->wordIndex == 2048) { - jam(); - ctn->wordIndex = 0; - ctn->pageIndex++; - ndbrequire(ctn->pageIndex < 8); - ctn->ctnPageptr.i = ctn->ctnTabPtr.p->pageRef[ctn->pageIndex]; - ptrCheckGuard(ctn->ctnPageptr, cpageFileSize, pageRecord); - }//if - ctn->noOfWords -= 16; - }//if - }//for - signal->theData[0] = DihContinueB::ZCOPY_TABLE_NODE; - signal->theData[1] = ctn->ctnTabPtr.i; - signal->theData[2] = nodePtr.i; - signal->theData[3] = ctn->pageIndex; - signal->theData[4] = ctn->wordIndex; - signal->theData[5] = ctn->noOfWords; - sendSignal(reference(), GSN_CONTINUEB, signal, 6, JBB); -}//Dbdih::copyTableNodeLab() - -void Dbdih::sendCopyTable(Signal* signal, CopyTableNode* ctn, - BlockReference ref, Uint32 reqinfo) -{ - signal->theData[0] = reference(); - signal->theData[1] = reqinfo; - signal->theData[2] = ctn->ctnTabPtr.i; - signal->theData[3] = ctn->ctnTabPtr.p->schemaVersion; - signal->theData[4] = ctn->noOfWords; - ndbrequire(ctn->wordIndex + 15 < 2048); - MEMCOPY_NO_WORDS(&signal->theData[5], &ctn->ctnPageptr.p->word[ctn->wordIndex], 16); - sendSignal(ref, GSN_COPY_TABREQ, signal, 21, JBB); -}//Dbdih::sendCopyTable() - -void Dbdih::execCOPY_TABCONF(Signal* signal) -{ - NodeRecordPtr nodePtr; - jamEntry(); - nodePtr.i = signal->theData[0]; - Uint32 tableId = signal->theData[1]; - if (getNodeState().startLevel >= NodeState::SL_STARTED){ - /* --------------------------------------------------------------------- */ - // We are in the process of performing a node restart. Continue by copying - // the next table to the starting node. - /* --------------------------------------------------------------------- */ - jam(); - NodeRecordPtr nodePtr; - nodePtr.i = signal->theData[0]; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - c_COPY_TABREQ_Counter.clearWaitingFor(nodePtr.i); - - releaseTabPages(tableId); - signal->theData[0] = DihContinueB::ZCOPY_NODE; - signal->theData[1] = tableId + 1; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - } else { - /* --------------------------------------------------------------------- */ - // We are in the process of performing a system restart. Check if all nodes - // have saved the new table description to file and then continue with the - // next table. - /* --------------------------------------------------------------------- */ - receiveLoopMacro(COPY_TABREQ, nodePtr.i); - /* --------------------------------------------------------------------- */ - /* WE HAVE NOW COPIED TO ALL NODES. WE HAVE NOW COMPLETED RESTORING */ - /* THIS TABLE. CONTINUE WITH THE NEXT TABLE. */ - /* WE NEED TO RELEASE THE PAGES IN THE TABLE IN THIS NODE HERE. */ - /* WE ALSO NEED TO CLOSE THE TABLE FILE. */ - /* --------------------------------------------------------------------- */ - releaseTabPages(tableId); - - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - ConnectRecordPtr connectPtr; - connectPtr.i = tabPtr.p->connectrec; - ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord); - - sendAddFragreq(signal, connectPtr, tabPtr, 0); - return; - }//if -}//Dbdih::execCOPY_TABCONF() - -/* - 3.13 L O C A L C H E C K P O I N T (M A S T E R) - **************************************************** - */ -/*****************************************************************************/ -/* ********** LOCAL-CHECK-POINT-HANDLING MODULE *************/ -/*****************************************************************************/ -/* ------------------------------------------------------------------------- */ -/* IT IS TIME TO CHECK IF IT IS TIME TO START A LOCAL CHECKPOINT. */ -/* WE WILL EITHER START AFTER 1 MILLION WORDS HAVE ARRIVED OR WE WILL */ -/* EXECUTE AFTER ABOUT 16 MINUTES HAVE PASSED BY. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::checkTcCounterLab(Signal* signal) -{ - CRASH_INSERTION(7009); - if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) { - g_eventLogger.error("lcpStatus = %u" - "lcpStatusUpdatedPlace = %d", - (Uint32) c_lcpState.lcpStatus, - c_lcpState.lcpStatusUpdatedPlace); - ndbrequire(false); - return; - }//if - c_lcpState.ctimer += 32; - if ((c_nodeStartMaster.blockLcp == true) || - (c_lcpState.lcpStopGcp >= c_newest_restorable_gci)) { - jam(); - /* --------------------------------------------------------------------- */ - // No reason to start juggling the states and checking for start of LCP if - // we are blocked to start an LCP anyway. - // We also block LCP start if we have not completed one global checkpoints - // before starting another local checkpoint. - /* --------------------------------------------------------------------- */ - signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER; - signal->theData[1] = __LINE__; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2); - return; - }//if - c_lcpState.setLcpStatus(LCP_TCGET, __LINE__); - - c_lcpState.ctcCounter = c_lcpState.ctimer; - sendLoopMacro(TCGETOPSIZEREQ, sendTCGETOPSIZEREQ); -}//Dbdih::checkTcCounterLab() - -void Dbdih::checkLcpStart(Signal* signal, Uint32 lineNo) -{ - /* ----------------------------------------------------------------------- */ - // Verify that we are not attempting to start another instance of the LCP - // when it is not alright to do so. - /* ----------------------------------------------------------------------- */ - ndbrequire(c_lcpState.lcpStart == ZIDLE); - c_lcpState.lcpStart = ZACTIVE; - signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER; - signal->theData[1] = lineNo; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); -}//Dbdih::checkLcpStart() - -/* ------------------------------------------------------------------------- */ -/*TCGETOPSIZECONF HOW MUCH OPERATION SIZE HAVE BEEN EXECUTED BY TC */ -/* ------------------------------------------------------------------------- */ -void Dbdih::execTCGETOPSIZECONF(Signal* signal) -{ - jamEntry(); - Uint32 senderNodeId = signal->theData[0]; - c_lcpState.ctcCounter += signal->theData[1]; - - receiveLoopMacro(TCGETOPSIZEREQ, senderNodeId); - - ndbrequire(c_lcpState.lcpStatus == LCP_TCGET); - ndbrequire(c_lcpState.lcpStart == ZACTIVE); - /* ----------------------------------------------------------------------- */ - // We are not actively starting another LCP, still we receive this signal. - // This is not ok. - /* ---------------------------------------------------------------------- */ - /* ALL TC'S HAVE RESPONDED NOW. NOW WE WILL CHECK IF ENOUGH OPERATIONS */ - /* HAVE EXECUTED TO ENABLE US TO START A NEW LOCAL CHECKPOINT. */ - /* WHILE COPYING DICTIONARY AND DISTRIBUTION INFO TO A STARTING NODE */ - /* WE WILL ALSO NOT ALLOW THE LOCAL CHECKPOINT TO PROCEED. */ - /*----------------------------------------------------------------------- */ - if (c_lcpState.immediateLcpStart == false) { - if ((c_lcpState.ctcCounter < - ((Uint32)1 << c_lcpState.clcpDelay)) || - (c_nodeStartMaster.blockLcp == true)) { - jam(); - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - - signal->theData[0] = DihContinueB::ZCHECK_TC_COUNTER; - signal->theData[1] = __LINE__; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1 * 100, 2); - return; - }//if - }//if - c_lcpState.lcpStart = ZIDLE; - c_lcpState.immediateLcpStart = false; - /* ----------------------------------------------------------------------- - * Now the initial lcp is started, - * we can reset the delay to its orginal value - * --------------------------------------------------------------------- */ - CRASH_INSERTION(7010); - /* ----------------------------------------------------------------------- */ - /* IF MORE THAN 1 MILLION WORDS PASSED THROUGH THE TC'S THEN WE WILL */ - /* START A NEW LOCAL CHECKPOINT. CLEAR CTIMER. START CHECKPOINT */ - /* ACTIVITY BY CALCULATING THE KEEP GLOBAL CHECKPOINT. */ - // Also remember the current global checkpoint to ensure that we run at least - // one global checkpoints between each local checkpoint that we start up. - /* ----------------------------------------------------------------------- */ - c_lcpState.ctimer = 0; - c_lcpState.keepGci = coldgcp; - /* ----------------------------------------------------------------------- */ - /* UPDATE THE NEW LATEST LOCAL CHECKPOINT ID. */ - /* ----------------------------------------------------------------------- */ - cnoOfActiveTables = 0; - c_lcpState.setLcpStatus(LCP_CALCULATE_KEEP_GCI, __LINE__); - c_lcpState.oldestRestorableGci = SYSFILE->oldestRestorableGCI; - ndbrequire(((int)c_lcpState.oldestRestorableGci) > 0); - - if (ERROR_INSERTED(7011)) { - signal->theData[0] = NDB_LE_LCPStoppedInCalcKeepGci; - signal->theData[1] = 0; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - return; - }//if - signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI; - signal->theData[1] = 0; /* TABLE ID = 0 */ - signal->theData[2] = 0; /* FRAGMENT ID = 0 */ - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; -}//Dbdih::execTCGETOPSIZECONF() - -/* ------------------------------------------------------------------------- */ -/* WE NEED TO CALCULATE THE OLDEST GLOBAL CHECKPOINT THAT WILL BE */ -/* COMPLETELY RESTORABLE AFTER EXECUTING THIS LOCAL CHECKPOINT. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::calculateKeepGciLab(Signal* signal, Uint32 tableId, Uint32 fragId) -{ - TabRecordPtr tabPtr; - Uint32 TloopCount = 1; - tabPtr.i = tableId; - do { - if (tabPtr.i >= ctabFileSize) { - if (cnoOfActiveTables > 0) { - jam(); - signal->theData[0] = DihContinueB::ZSTORE_NEW_LCP_ID; - sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB); - return; - } else { - jam(); - /* ------------------------------------------------------------------ */ - /* THERE ARE NO TABLES TO CHECKPOINT. WE STOP THE CHECKPOINT ALREADY */ - /* HERE TO AVOID STRANGE PROBLEMS LATER. */ - /* ------------------------------------------------------------------ */ - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - checkLcpStart(signal, __LINE__); - return; - }//if - }//if - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE || - tabPtr.p->tabStorage != TabRecord::ST_NORMAL) { - if (TloopCount > 100) { - jam(); - signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI; - signal->theData[1] = tabPtr.i + 1; - signal->theData[2] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; - } else { - jam(); - TloopCount++; - tabPtr.i++; - }//if - } else { - jam(); - TloopCount = 0; - }//if - } while (TloopCount != 0); - cnoOfActiveTables++; - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - checkKeepGci(tabPtr, fragId, fragPtr.p, fragPtr.p->storedReplicas); - checkKeepGci(tabPtr, fragId, fragPtr.p, fragPtr.p->oldStoredReplicas); - fragId++; - if (fragId >= tabPtr.p->totalfragments) { - jam(); - tabPtr.i++; - fragId = 0; - }//if - signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragId; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; -}//Dbdih::calculateKeepGciLab() - -/* ------------------------------------------------------------------------- */ -/* WE NEED TO STORE ON DISK THE FACT THAT WE ARE STARTING THIS LOCAL */ -/* CHECKPOINT ROUND. THIS WILL INVALIDATE ALL THE LOCAL CHECKPOINTS */ -/* THAT WILL EVENTUALLY BE OVERWRITTEN AS PART OF THIS LOCAL CHECKPOINT*/ -/* ------------------------------------------------------------------------- */ -void Dbdih::storeNewLcpIdLab(Signal* signal) -{ - /***************************************************************************/ - // Report the event that a local checkpoint has started. - /***************************************************************************/ - signal->theData[0] = NDB_LE_LocalCheckpointStarted; //Event type - signal->theData[1] = SYSFILE->latestLCP_ID + 1; - signal->theData[2] = c_lcpState.keepGci; - signal->theData[3] = c_lcpState.oldestRestorableGci; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - - signal->setTrace(TestOrd::TraceLocalCheckpoint); - - CRASH_INSERTION(7013); - SYSFILE->keepGCI = c_lcpState.keepGci; - //Uint32 lcpId = SYSFILE->latestLCP_ID; - SYSFILE->latestLCP_ID++; - SYSFILE->oldestRestorableGCI = c_lcpState.oldestRestorableGci; - - const Uint32 oldestRestorableGCI = SYSFILE->oldestRestorableGCI; - //const Uint32 newestRestorableGCI = SYSFILE->newestRestorableGCI; - //ndbrequire(newestRestorableGCI >= oldestRestorableGCI); - - Int32 val = oldestRestorableGCI; - ndbrequire(val > 0); - - /* ----------------------------------------------------------------------- */ - /* SET BIT INDICATING THAT LOCAL CHECKPOINT IS ONGOING. THIS IS CLEARED */ - /* AT THE END OF A LOCAL CHECKPOINT. */ - /* ----------------------------------------------------------------------- */ - SYSFILE->setLCPOngoing(SYSFILE->systemRestartBits); - /* ---------------------------------------------------------------------- */ - /* CHECK IF ANY NODE MUST BE TAKEN OUT OF SERVICE AND REFILLED WITH */ - /* NEW FRESH DATA FROM AN ACTIVE NODE. */ - /* ---------------------------------------------------------------------- */ - setLcpActiveStatusStart(signal); - c_lcpState.setLcpStatus(LCP_COPY_GCI, __LINE__); - //#ifdef VM_TRACE - // infoEvent("LocalCheckpoint %d started", SYSFILE->latestLCP_ID); - // signal->theData[0] = 7012; - // execDUMP_STATE_ORD(signal); - //#endif - - copyGciLab(signal, CopyGCIReq::LOCAL_CHECKPOINT); -}//Dbdih::storeNewLcpIdLab() - -void Dbdih::startLcpRoundLab(Signal* signal) { - jam(); - - Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle); - Callback c = { safe_cast(&Dbdih::startLcpMutex_locked), 0 }; - ndbrequire(mutex.lock(c)); -} - -void -Dbdih::startLcpMutex_locked(Signal* signal, Uint32 senderData, Uint32 retVal){ - jamEntry(); - ndbrequire(retVal == 0); - - StartLcpReq* req = (StartLcpReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->lcpId = SYSFILE->latestLCP_ID; - req->participatingLQH = c_lcpState.m_participatingLQH; - req->participatingDIH = c_lcpState.m_participatingDIH; - sendLoopMacro(START_LCP_REQ, sendSTART_LCP_REQ); -} -void -Dbdih::sendSTART_LCP_REQ(Signal* signal, Uint32 nodeId){ - BlockReference ref = calcDihBlockRef(nodeId); - sendSignal(ref, GSN_START_LCP_REQ, signal, StartLcpReq::SignalLength, JBB); -} - -void -Dbdih::execSTART_LCP_CONF(Signal* signal){ - StartLcpConf * conf = (StartLcpConf*)signal->getDataPtr(); - - Uint32 nodeId = refToNode(conf->senderRef); - receiveLoopMacro(START_LCP_REQ, nodeId); - - Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle); - Callback c = { safe_cast(&Dbdih::startLcpMutex_unlocked), 0 }; - mutex.unlock(c); -} - -void -Dbdih::startLcpMutex_unlocked(Signal* signal, Uint32 data, Uint32 retVal){ - jamEntry(); - ndbrequire(retVal == 0); - - Mutex mutex(signal, c_mutexMgr, c_startLcpMutexHandle); - mutex.release(); - - CRASH_INSERTION(7014); - c_lcpState.setLcpStatus(LCP_TC_CLOPSIZE, __LINE__); - sendLoopMacro(TC_CLOPSIZEREQ, sendTC_CLOPSIZEREQ); -} - -void Dbdih::execTC_CLOPSIZECONF(Signal* signal) { - jamEntry(); - Uint32 senderNodeId = signal->theData[0]; - receiveLoopMacro(TC_CLOPSIZEREQ, senderNodeId); - - ndbrequire(c_lcpState.lcpStatus == LCP_TC_CLOPSIZE); - /* ----------------------------------------------------------------------- */ - /* ALL TC'S HAVE CLEARED THEIR OPERATION SIZE COUNTERS. NOW PROCEED BY */ - /* STARTING THE LOCAL CHECKPOINT IN EACH LQH. */ - /* ----------------------------------------------------------------------- */ - c_lcpState.m_LAST_LCP_FRAG_ORD = c_lcpState.m_participatingLQH; - - CRASH_INSERTION(7015); - c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__); - startLcpRoundLoopLab(signal, 0, 0); -}//Dbdih::execTC_CLOPSIZECONF() - -void Dbdih::startLcpRoundLoopLab(Signal* signal, - Uint32 startTableId, Uint32 startFragId) -{ - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - ndbrequire(nodePtr.p->noOfStartedChkpt == 0); - ndbrequire(nodePtr.p->noOfQueuedChkpt == 0); - }//if - }//if - c_lcpState.currentFragment.tableId = startTableId; - c_lcpState.currentFragment.fragmentId = startFragId; - startNextChkpt(signal); -}//Dbdih::startLcpRoundLoopLab() - -void Dbdih::startNextChkpt(Signal* signal) -{ - Uint32 lcpId = SYSFILE->latestLCP_ID; - - NdbNodeBitmask busyNodes; - busyNodes.clear(); - const Uint32 lcpNodes = c_lcpState.m_participatingLQH.count(); - - bool save = true; - LcpState::CurrentFragment curr = c_lcpState.currentFragment; - - while (curr.tableId < ctabFileSize) { - TabRecordPtr tabPtr; - tabPtr.i = curr.tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if ((tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) || - (tabPtr.p->tabLcpStatus != TabRecord::TLS_ACTIVE)) { - curr.tableId++; - curr.fragmentId = 0; - continue; - }//if - - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, curr.fragmentId, fragPtr); - - ReplicaRecordPtr replicaPtr; - for(replicaPtr.i = fragPtr.p->storedReplicas; - replicaPtr.i != RNIL ; - replicaPtr.i = replicaPtr.p->nextReplica){ - - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - - NodeRecordPtr nodePtr; - nodePtr.i = replicaPtr.p->procNode; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - if (c_lcpState.m_participatingLQH.get(nodePtr.i)) - { - if (replicaPtr.p->lcpOngoingFlag && - replicaPtr.p->lcpIdStarted < lcpId) - { - jam(); - //------------------------------------------------------------------- - // We have found a replica on a node that performs local checkpoint - // that is alive and that have not yet been started. - //------------------------------------------------------------------- - - if (nodePtr.p->noOfStartedChkpt < 2) - { - jam(); - /** - * Send LCP_FRAG_ORD to LQH - */ - - /** - * Mark the replica so with lcpIdStarted == true - */ - replicaPtr.p->lcpIdStarted = lcpId; - - Uint32 i = nodePtr.p->noOfStartedChkpt; - nodePtr.p->startedChkpt[i].tableId = tabPtr.i; - nodePtr.p->startedChkpt[i].fragId = curr.fragmentId; - nodePtr.p->startedChkpt[i].replicaPtr = replicaPtr.i; - nodePtr.p->noOfStartedChkpt = i + 1; - - sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]); - } - else if (nodePtr.p->noOfQueuedChkpt < 2) - { - jam(); - /** - * Put LCP_FRAG_ORD "in queue" - */ - - /** - * Mark the replica so with lcpIdStarted == true - */ - replicaPtr.p->lcpIdStarted = lcpId; - - Uint32 i = nodePtr.p->noOfQueuedChkpt; - nodePtr.p->queuedChkpt[i].tableId = tabPtr.i; - nodePtr.p->queuedChkpt[i].fragId = curr.fragmentId; - nodePtr.p->queuedChkpt[i].replicaPtr = replicaPtr.i; - nodePtr.p->noOfQueuedChkpt = i + 1; - } - else - { - jam(); - - if(save) - { - /** - * Stop increasing value on first that was "full" - */ - c_lcpState.currentFragment = curr; - save = false; - } - - busyNodes.set(nodePtr.i); - if(busyNodes.count() == lcpNodes) - { - /** - * There were no possibility to start the local checkpoint - * and it was not possible to queue it up. In this case we - * stop the start of local checkpoints until the nodes with a - * backlog have performed more checkpoints. We will return and - * will not continue the process of starting any more checkpoints. - */ - return; - }//if - }//if - } - }//while - } - curr.fragmentId++; - if (curr.fragmentId >= tabPtr.p->totalfragments) { - jam(); - curr.fragmentId = 0; - curr.tableId++; - }//if - }//while - - sendLastLCP_FRAG_ORD(signal); -}//Dbdih::startNextChkpt() - -void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal) -{ - LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; - lcpFragOrd->tableId = RNIL; - lcpFragOrd->fragmentId = 0; - lcpFragOrd->lcpId = SYSFILE->latestLCP_ID; - lcpFragOrd->lcpNo = 0; - lcpFragOrd->keepGci = c_lcpState.keepGci; - lcpFragOrd->lastFragmentFlag = true; - - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - - if(nodePtr.p->noOfQueuedChkpt == 0 && - nodePtr.p->noOfStartedChkpt == 0 && - c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodePtr.i)){ - jam(); - - CRASH_INSERTION(7028); - - /** - * Nothing queued or started <=> Complete on that node - * - */ - c_lcpState.m_LAST_LCP_FRAG_ORD.clearWaitingFor(nodePtr.i); - if(ERROR_INSERTED(7075)){ - continue; - } - - CRASH_INSERTION(7193); - BlockReference ref = calcLqhBlockRef(nodePtr.i); - sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB); - } - } - if(ERROR_INSERTED(7075)){ - if(c_lcpState.m_LAST_LCP_FRAG_ORD.done()) - CRASH_INSERTION(7075); - } -}//Dbdih::sendLastLCP_FRAGORD() - -/* ------------------------------------------------------------------------- */ -/* A FRAGMENT REPLICA HAS COMPLETED EXECUTING ITS LOCAL CHECKPOINT. */ -/* CHECK IF ALL REPLICAS IN THE TABLE HAVE COMPLETED. IF SO STORE THE */ -/* THE TABLE DISTRIBUTION ON DISK. ALSO SEND LCP_REPORT TO ALL OTHER */ -/* NODES SO THAT THEY CAN STORE THE TABLE ONTO DISK AS WELL. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::execLCP_FRAG_REP(Signal* signal) -{ - jamEntry(); - ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE); - -#if 0 - printLCP_FRAG_REP(stdout, - signal->getDataPtr(), - signal->length(), number()); -#endif - - LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0]; - Uint32 nodeId = lcpReport->nodeId; - Uint32 tableId = lcpReport->tableId; - Uint32 fragId = lcpReport->fragId; - - jamEntry(); - - if (ERROR_INSERTED(7178) && nodeId != getOwnNodeId()) - { - jam(); - Uint32 owng =Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups); - Uint32 nodeg = Sysfile::getNodeGroup(nodeId, SYSFILE->nodeGroups); - if (owng == nodeg) - { - jam(); - ndbout_c("throwing away LCP_FRAG_REP from (and killing) %d", nodeId); - SET_ERROR_INSERT_VALUE(7179); - signal->theData[0] = 9999; - sendSignal(numberToRef(CMVMI, nodeId), - GSN_NDB_TAMPER, signal, 1, JBA); - return; - } - } - - if (ERROR_INSERTED(7179) && nodeId != getOwnNodeId()) - { - jam(); - Uint32 owng =Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups); - Uint32 nodeg = Sysfile::getNodeGroup(nodeId, SYSFILE->nodeGroups); - if (owng == nodeg) - { - jam(); - ndbout_c("throwing away LCP_FRAG_REP from %d", nodeId); - return; - } - } - - CRASH_INSERTION2(7025, isMaster()); - CRASH_INSERTION2(7016, !isMaster()); - - bool fromTimeQueue = (signal->senderBlockRef() == reference()); - - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if(tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) { - jam(); - /*-----------------------------------------------------------------------*/ - // If the table is currently copied to disk we also - // stop already here to avoid strange half-way updates - // of the table data structures. - /*-----------------------------------------------------------------------*/ - /* - We need to send this signal without a delay since we have discovered - that we have run out of space in the short time queue. This problem - is very erunlikely to happen but it has and it results in a node crash. - This should be considered a "quick fix" and not a permanent solution. - A cleaner/better way would be to check the time queue if it is full or - not before sending this signal. - */ - sendSignal(reference(), GSN_LCP_FRAG_REP, signal, signal->length(), JBB); - /* Kept here for reference - sendSignalWithDelay(reference(), GSN_LCP_FRAG_REP, - signal, 20, signal->length()); - */ - - if(!fromTimeQueue){ - c_lcpState.noOfLcpFragRepOutstanding++; - } - - return; - }//if - - if(fromTimeQueue){ - jam(); - - ndbrequire(c_lcpState.noOfLcpFragRepOutstanding > 0); - c_lcpState.noOfLcpFragRepOutstanding--; - } - - bool tableDone = reportLcpCompletion(lcpReport); - - Uint32 started = lcpReport->maxGciStarted; - Uint32 completed = lcpReport->maxGciCompleted; - - if (started > c_lcpState.lcpStopGcp) - { - jam(); - c_lcpState.lcpStopGcp = started; - } - - if(tableDone){ - jam(); - - if(tabPtr.p->tabStatus == TabRecord::TS_DROPPING){ - jam(); - g_eventLogger.info("TS_DROPPING - Neglecting to save Table: %d Frag: %d - ", - tableId, fragId); - } else { - jam(); - /** - * Write table description to file - */ - tabPtr.p->tabLcpStatus = TabRecord::TLS_WRITING_TO_FILE; - tabPtr.p->tabCopyStatus = TabRecord::CS_LCP_READ_TABLE; - tabPtr.p->tabUpdateState = TabRecord::US_LOCAL_CHECKPOINT; - signal->theData[0] = DihContinueB::ZPACK_TABLE_INTO_PAGES; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - - checkLcpAllTablesDoneInLqh(); - } - } - -#ifdef VM_TRACE - /* --------------------------------------------------------------------- */ - // REPORT that local checkpoint have completed this fragment. - /* --------------------------------------------------------------------- */ - signal->theData[0] = NDB_LE_LCPFragmentCompleted; - signal->theData[1] = nodeId; - signal->theData[2] = tableId; - signal->theData[3] = fragId; - signal->theData[4] = started; - signal->theData[5] = completed; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB); -#endif - - bool ok = false; - switch(c_lcpMasterTakeOverState.state){ - case LMTOS_IDLE: - ok = true; - jam(); - /** - * Fall through - */ - break; - case LMTOS_WAIT_EMPTY_LCP: // LCP Take over waiting for EMPTY_LCPCONF - jam(); - return; - case LMTOS_WAIT_LCP_FRAG_REP: - jam(); - checkEmptyLcpComplete(signal); - return; - case LMTOS_INITIAL: - case LMTOS_ALL_IDLE: - case LMTOS_ALL_ACTIVE: - case LMTOS_LCP_CONCLUDING: - case LMTOS_COPY_ONGOING: - ndbrequire(false); - } - ndbrequire(ok); - - /* ----------------------------------------------------------------------- */ - // Check if there are more LCP's to start up. - /* ----------------------------------------------------------------------- */ - if(isMaster()){ - jam(); - - /** - * Remove from "running" array - */ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - const Uint32 outstanding = nodePtr.p->noOfStartedChkpt; - ndbrequire(outstanding > 0); - if(nodePtr.p->startedChkpt[0].tableId != tableId || - nodePtr.p->startedChkpt[0].fragId != fragId){ - jam(); - ndbrequire(outstanding > 1); - ndbrequire(nodePtr.p->startedChkpt[1].tableId == tableId); - ndbrequire(nodePtr.p->startedChkpt[1].fragId == fragId); - } else { - jam(); - nodePtr.p->startedChkpt[0] = nodePtr.p->startedChkpt[1]; - } - nodePtr.p->noOfStartedChkpt--; - checkStartMoreLcp(signal, nodeId); - } -} - -bool -Dbdih::checkLcpAllTablesDoneInLqh(){ - TabRecordPtr tabPtr; - - /** - * Check if finished with all tables - */ - for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) { - jam(); - ptrAss(tabPtr, tabRecord); - if ((tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) && - (tabPtr.p->tabLcpStatus == TabRecord::TLS_ACTIVE)) { - jam(); - /** - * Nope, not finished with all tables - */ - return false; - }//if - }//for - - CRASH_INSERTION2(7026, isMaster()); - CRASH_INSERTION2(7017, !isMaster()); - - c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__); - - if (ERROR_INSERTED(7194)) - { - ndbout_c("CLEARING 7194"); - CLEAR_ERROR_INSERT_VALUE; - } - - return true; -} - -void Dbdih::findReplica(ReplicaRecordPtr& replicaPtr, - Fragmentstore* fragPtrP, - Uint32 nodeId, - bool old) -{ - replicaPtr.i = old ? fragPtrP->oldStoredReplicas : fragPtrP->storedReplicas; - while(replicaPtr.i != RNIL){ - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if (replicaPtr.p->procNode == nodeId) { - jam(); - return; - } else { - jam(); - replicaPtr.i = replicaPtr.p->nextReplica; - }//if - }; - -#ifdef VM_TRACE - g_eventLogger.info("Fragment Replica(node=%d) not found", nodeId); - replicaPtr.i = fragPtrP->oldStoredReplicas; - while(replicaPtr.i != RNIL){ - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - if (replicaPtr.p->procNode == nodeId) { - jam(); - break; - } else { - jam(); - replicaPtr.i = replicaPtr.p->nextReplica; - }//if - }; - if(replicaPtr.i != RNIL){ - g_eventLogger.info("...But was found in oldStoredReplicas"); - } else { - g_eventLogger.info("...And wasn't found in oldStoredReplicas"); - } -#endif - ndbrequire(false); -}//Dbdih::findReplica() - - -int -Dbdih::handle_invalid_lcp_no(const LcpFragRep* rep, - ReplicaRecordPtr replicaPtr) -{ - ndbrequire(!isMaster()); - Uint32 lcpNo = rep->lcpNo; - Uint32 lcpId = rep->lcpId; - - warningEvent("Detected previous node failure of %d during lcp", - rep->nodeId); - replicaPtr.p->nextLcp = lcpNo; - replicaPtr.p->lcpId[lcpNo] = 0; - replicaPtr.p->lcpStatus[lcpNo] = ZINVALID; - - for (Uint32 i = lcpNo; i != lcpNo; i = nextLcpNo(i)) - { - jam(); - if (replicaPtr.p->lcpStatus[i] == ZVALID && - replicaPtr.p->lcpId[i] >= lcpId) - { - ndbout_c("i: %d lcpId: %d", i, replicaPtr.p->lcpId[i]); - ndbrequire(false); - } - } - - return 0; -} - -/** - * Return true if table is all fragment replicas have been checkpointed - * to disk (in all LQHs) - * false otherwise - */ -bool -Dbdih::reportLcpCompletion(const LcpFragRep* lcpReport) -{ - Uint32 lcpNo = lcpReport->lcpNo; - Uint32 lcpId = lcpReport->lcpId; - Uint32 maxGciStarted = lcpReport->maxGciStarted; - Uint32 maxGciCompleted = lcpReport->maxGciCompleted; - Uint32 tableId = lcpReport->tableId; - Uint32 fragId = lcpReport->fragId; - Uint32 nodeId = lcpReport->nodeId; - - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - - ReplicaRecordPtr replicaPtr; - findReplica(replicaPtr, fragPtr.p, nodeId); - - ndbrequire(replicaPtr.p->lcpOngoingFlag == true); - if(lcpNo != replicaPtr.p->nextLcp){ - if (handle_invalid_lcp_no(lcpReport, replicaPtr)) - { - g_eventLogger.error("lcpNo = %d replicaPtr.p->nextLcp = %d", - lcpNo, replicaPtr.p->nextLcp); - ndbrequire(false); - } - } - ndbrequire(lcpNo == replicaPtr.p->nextLcp); - ndbrequire(lcpNo < MAX_LCP_STORED); - ndbrequire(replicaPtr.p->lcpId[lcpNo] != lcpId); - - replicaPtr.p->lcpIdStarted = lcpId; - replicaPtr.p->lcpOngoingFlag = false; - - removeOldCrashedReplicas(replicaPtr); - replicaPtr.p->lcpId[lcpNo] = lcpId; - replicaPtr.p->lcpStatus[lcpNo] = ZVALID; - replicaPtr.p->maxGciStarted[lcpNo] = maxGciStarted; - gth(maxGciStarted + 1, 0); - replicaPtr.p->maxGciCompleted[lcpNo] = maxGciCompleted; - replicaPtr.p->nextLcp = nextLcpNo(replicaPtr.p->nextLcp); - - ndbrequire(fragPtr.p->noLcpReplicas > 0); - fragPtr.p->noLcpReplicas --; - - if(fragPtr.p->noLcpReplicas > 0){ - jam(); - return false; - } - - for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) { - jam(); - getFragstore(tabPtr.p, fid, fragPtr); - if (fragPtr.p->noLcpReplicas > 0){ - jam(); - /* ----------------------------------------------------------------- */ - // Not all fragments in table have been checkpointed. - /* ----------------------------------------------------------------- */ - if(0) - g_eventLogger.info("reportLcpCompletion: fragment %d not ready", fid); - return false; - }//if - }//for - return true; -}//Dbdih::reportLcpCompletion() - -void Dbdih::checkStartMoreLcp(Signal* signal, Uint32 nodeId) -{ - ndbrequire(isMaster()); - - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - ndbrequire(nodePtr.p->noOfStartedChkpt < 2); - - if (nodePtr.p->noOfQueuedChkpt > 0) { - jam(); - nodePtr.p->noOfQueuedChkpt--; - Uint32 i = nodePtr.p->noOfStartedChkpt; - nodePtr.p->startedChkpt[i] = nodePtr.p->queuedChkpt[0]; - nodePtr.p->queuedChkpt[0] = nodePtr.p->queuedChkpt[1]; - //------------------------------------------------------------------- - // We can send a LCP_FRAGORD to the node ordering it to perform a - // local checkpoint on this fragment replica. - //------------------------------------------------------------------- - nodePtr.p->noOfStartedChkpt = i + 1; - - sendLCP_FRAG_ORD(signal, nodePtr.p->startedChkpt[i]); - } - - /* ----------------------------------------------------------------------- */ - // When there are no more outstanding LCP reports and there are no one queued - // in at least one node, then we are ready to make sure all nodes have at - // least two outstanding LCP requests per node and at least two queued for - // sending. - /* ----------------------------------------------------------------------- */ - startNextChkpt(signal); -}//Dbdih::checkStartMoreLcp() - -void -Dbdih::sendLCP_FRAG_ORD(Signal* signal, - NodeRecord::FragmentCheckpointInfo info){ - - ReplicaRecordPtr replicaPtr; - replicaPtr.i = info.replicaPtr; - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - - BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode); - - if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId()) - { - return; - } - - LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; - lcpFragOrd->tableId = info.tableId; - lcpFragOrd->fragmentId = info.fragId; - lcpFragOrd->lcpId = SYSFILE->latestLCP_ID; - lcpFragOrd->lcpNo = replicaPtr.p->nextLcp; - lcpFragOrd->keepGci = c_lcpState.keepGci; - lcpFragOrd->lastFragmentFlag = false; - sendSignal(ref, GSN_LCP_FRAG_ORD, signal, LcpFragOrd::SignalLength, JBB); -} - -void Dbdih::checkLcpCompletedLab(Signal* signal) -{ - if(c_lcpState.lcpStatus < LCP_TAB_COMPLETED){ - jam(); - return; - } - - TabRecordPtr tabPtr; - for (tabPtr.i = 0; tabPtr.i < ctabFileSize; tabPtr.i++) { - jam(); - ptrAss(tabPtr, tabRecord); - if (tabPtr.p->tabStatus == TabRecord::TS_ACTIVE) { - if (tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED) { - jam(); - return; - }//if - }//if - }//for - - CRASH_INSERTION2(7027, isMaster()); - CRASH_INSERTION2(7018, !isMaster()); - - if(c_lcpState.lcpStatus == LCP_TAB_COMPLETED){ - /** - * We'r done - */ - c_lcpState.setLcpStatus(LCP_TAB_SAVED, __LINE__); - sendLCP_COMPLETE_REP(signal); - return; - } - - ndbrequire(c_lcpState.lcpStatus == LCP_TAB_SAVED); - allNodesLcpCompletedLab(signal); - return; -}//Dbdih::checkLcpCompletedLab() - -void -Dbdih::sendLCP_COMPLETE_REP(Signal* signal){ - jam(); - LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend(); - rep->nodeId = getOwnNodeId(); - rep->lcpId = SYSFILE->latestLCP_ID; - rep->blockNo = DBDIH; - - sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal, - LcpCompleteRep::SignalLength, JBB); - - /** - * Say that an initial node restart does not need to be redone - * once node has been part of first LCP - */ - if (c_set_initial_start_flag && - c_lcpState.m_participatingLQH.get(getOwnNodeId())) - { - jam(); - c_set_initial_start_flag = FALSE; - } -} - -/*-------------------------------------------------------------------------- */ -/* COMP_LCP_ROUND A LQH HAS COMPLETED A LOCAL CHECKPOINT */ -/*------------------------------------------------------------------------- */ -void Dbdih::execLCP_COMPLETE_REP(Signal* signal) -{ - jamEntry(); - - CRASH_INSERTION(7191); - -#if 0 - g_eventLogger.info("LCP_COMPLETE_REP"); - printLCP_COMPLETE_REP(stdout, - signal->getDataPtr(), - signal->length(), number()); -#endif - - LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtr(); - Uint32 lcpId = rep->lcpId; - Uint32 nodeId = rep->nodeId; - Uint32 blockNo = rep->blockNo; - - if(c_lcpMasterTakeOverState.state > LMTOS_WAIT_LCP_FRAG_REP){ - jam(); - /** - * Don't allow LCP_COMPLETE_REP to arrive during - * LCP master take over - */ - ndbrequire(isMaster()); - ndbrequire(blockNo == DBDIH); - sendSignalWithDelay(reference(), GSN_LCP_COMPLETE_REP, signal, 100, - signal->length()); - return; - } - - ndbrequire(c_lcpState.lcpStatus != LCP_STATUS_IDLE); - - switch(blockNo){ - case DBLQH: - jam(); - c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.clearWaitingFor(nodeId); - ndbrequire(!c_lcpState.m_LAST_LCP_FRAG_ORD.isWaitingFor(nodeId)); - break; - case DBDIH: - jam(); - ndbrequire(isMaster()); - c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.clearWaitingFor(nodeId); - break; - case 0: - jam(); - ndbrequire(!isMaster()); - ndbrequire(c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false); - c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received = true; - break; - default: - ndbrequire(false); - } - ndbrequire(lcpId == SYSFILE->latestLCP_ID); - - allNodesLcpCompletedLab(signal); - return; -} - -void Dbdih::allNodesLcpCompletedLab(Signal* signal) -{ - jam(); - - if (c_lcpState.lcpStatus != LCP_TAB_SAVED) { - jam(); - /** - * We have not sent LCP_COMPLETE_REP to master DIH yet - */ - return; - }//if - - if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.done()){ - jam(); - return; - } - - if (!c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.done()){ - jam(); - return; - } - - if (!isMaster() && - c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received == false){ - jam(); - /** - * Wait until master DIH has signaled lcp is complete - */ - return; - } - - if(c_lcpMasterTakeOverState.state != LMTOS_IDLE){ - jam(); -#ifdef VM_TRACE - g_eventLogger.info("Exiting from allNodesLcpCompletedLab"); -#endif - return; - } - - - /*------------------------------------------------------------------------ */ - /* WE HAVE NOW COMPLETED A LOCAL CHECKPOINT. WE ARE NOW READY TO WAIT */ - /* FOR THE NEXT LOCAL CHECKPOINT. SEND WITHOUT TIME-OUT SINCE IT MIGHT */ - /* BE TIME TO START THE NEXT LOCAL CHECKPOINT IMMEDIATELY. */ - /* CLEAR BIT 3 OF SYSTEM RESTART BITS TO INDICATE THAT THERE IS NO */ - /* LOCAL CHECKPOINT ONGOING. THIS WILL BE WRITTEN AT SOME LATER TIME */ - /* DURING A GLOBAL CHECKPOINT. IT IS NOT NECESSARY TO WRITE IT */ - /* IMMEDIATELY. WE WILL ALSO CLEAR BIT 2 OF SYSTEM RESTART BITS IF ALL */ - /* CURRENTLY ACTIVE NODES COMPLETED THE LOCAL CHECKPOINT. */ - /*------------------------------------------------------------------------ */ - CRASH_INSERTION(7019); - signal->setTrace(0); - - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - setLcpActiveStatusEnd(); - Sysfile::clearLCPOngoing(SYSFILE->systemRestartBits); - - if(!isMaster()){ - jam(); - /** - * We're not master, be content - */ - return; - } - - // Send LCP_COMPLETE_REP to all other nodes - // allowing them to set their lcpStatus to LCP_STATUS_IDLE - LcpCompleteRep * rep = (LcpCompleteRep*)signal->getDataPtrSend(); - rep->nodeId = getOwnNodeId(); - rep->lcpId = SYSFILE->latestLCP_ID; - rep->blockNo = 0; // 0 = Sent from master - - NodeRecordPtr nodePtr; - nodePtr.i = cfirstAliveNode; - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.i != cownNodeId){ - BlockReference ref = calcDihBlockRef(nodePtr.i); - sendSignal(ref, GSN_LCP_COMPLETE_REP, signal, - LcpCompleteRep::SignalLength, JBB); - } - nodePtr.i = nodePtr.p->nextNode; - } while (nodePtr.i != RNIL); - - - jam(); - /***************************************************************************/ - // Report the event that a local checkpoint has completed. - /***************************************************************************/ - signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type - signal->theData[1] = SYSFILE->latestLCP_ID; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - if (c_newest_restorable_gci > c_lcpState.lcpStopGcp) - { - jam(); - c_lcpState.lcpStopGcp = c_newest_restorable_gci; - } - - /** - * Start checking for next LCP - */ - checkLcpStart(signal, __LINE__); - - if (cwaitLcpSr == true) { - jam(); - cwaitLcpSr = false; - ndbsttorry10Lab(signal, __LINE__); - return; - }//if - - if (c_nodeStartMaster.blockLcp == true) { - jam(); - lcpBlockedLab(signal); - return; - }//if - return; -}//Dbdih::allNodesLcpCompletedLab() - -/******************************************************************************/ -/* ********** TABLE UPDATE MODULE *************/ -/* ****************************************************************************/ -/* ------------------------------------------------------------------------- */ -/* THIS MODULE IS USED TO UPDATE THE TABLE DESCRIPTION. IT STARTS BY */ -/* CREATING THE FIRST TABLE FILE, THEN UPDATES THIS FILE AND CLOSES IT.*/ -/* AFTER THAT THE SAME HAPPENS WITH THE SECOND FILE. AFTER THAT THE */ -/* TABLE DISTRIBUTION HAS BEEN UPDATED. */ -/* */ -/* THE REASON FOR CREATING THE FILE AND NOT OPENING IT IS TO ENSURE */ -/* THAT WE DO NOT GET A MIX OF OLD AND NEW INFORMATION IN THE FILE IN */ -/* ERROR SITUATIONS. */ -/* ------------------------------------------------------------------------- */ -void Dbdih::tableUpdateLab(Signal* signal, TabRecordPtr tabPtr) { - FileRecordPtr filePtr; - if(tabPtr.p->tabStorage == TabRecord::ST_TEMPORARY) { - // For temporary tables we do not write to disk. Mark both copies 0 and 1 - // as done, and go straight to the after-close code. - filePtr.i = tabPtr.p->tabFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - tableCloseLab(signal, filePtr); - return; - } - filePtr.i = tabPtr.p->tabFile[0]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - createFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::TABLE_CREATE; - return; -}//Dbdih::tableUpdateLab() - -void Dbdih::tableCreateLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - writeTabfile(signal, tabPtr.p, filePtr); - filePtr.p->reqStatus = FileRecord::TABLE_WRITE; - return; -}//Dbdih::tableCreateLab() - -void Dbdih::tableWriteLab(Signal* signal, FileRecordPtr filePtr) -{ - closeFile(signal, filePtr); - filePtr.p->reqStatus = FileRecord::TABLE_CLOSE; - return; -}//Dbdih::tableWriteLab() - -void Dbdih::tableCloseLab(Signal* signal, FileRecordPtr filePtr) -{ - TabRecordPtr tabPtr; - tabPtr.i = filePtr.p->tabRef; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - if (filePtr.i == tabPtr.p->tabFile[0]) { - jam(); - filePtr.i = tabPtr.p->tabFile[1]; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - createFileRw(signal, filePtr); - filePtr.p->reqStatus = FileRecord::TABLE_CREATE; - return; - }//if - switch (tabPtr.p->tabUpdateState) { - case TabRecord::US_LOCAL_CHECKPOINT: - jam(); - releaseTabPages(tabPtr.i); - signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED; - sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB); - - tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; - return; - break; - case TabRecord::US_REMOVE_NODE: - jam(); - releaseTabPages(tabPtr.i); - for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) { - jam(); - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragId, fragPtr); - updateNodeInfo(fragPtr); - }//for - tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - if (tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE) { - jam(); - tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; - signal->theData[0] = DihContinueB::ZCHECK_LCP_COMPLETED; - sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB); - }//if - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = tabPtr.p->tabRemoveNode; - signal->theData[2] = tabPtr.i + 1; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; - break; - case TabRecord::US_INVALIDATE_NODE_LCP: - jam(); - releaseTabPages(tabPtr.i); - tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - - signal->theData[0] = DihContinueB::ZINVALIDATE_NODE_LCP; - signal->theData[1] = tabPtr.p->tabRemoveNode; - signal->theData[2] = tabPtr.i + 1; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - return; - case TabRecord::US_COPY_TAB_REQ: - jam(); - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - copyTabReq_complete(signal, tabPtr); - return; - break; - case TabRecord::US_ADD_TABLE_MASTER: - jam(); - releaseTabPages(tabPtr.i); - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_MASTER; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - case TabRecord::US_ADD_TABLE_SLAVE: - jam(); - releaseTabPages(tabPtr.i); - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - signal->theData[0] = DihContinueB::ZDIH_ADD_TABLE_SLAVE; - signal->theData[1] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - break; - default: - ndbrequire(false); - return; - break; - }//switch -}//Dbdih::tableCloseLab() - -/** - * GCP stop detected, - * send SYSTEM_ERROR to all other alive nodes - */ -void Dbdih::crashSystemAtGcpStop(Signal* signal, bool local) -{ - if (local) - goto dolocal; - - switch(cgcpStatus){ - case GCP_PREPARE_SENT: - { - jam(); - /** - * We're waiting for a GCP PREPARE CONF - */ - infoEvent("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_GCP_PREPARE_Counter.getText()); - ndbout_c("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_GCP_PREPARE_Counter.getText()); - - { - NodeReceiverGroup rg(DBDIH, c_GCP_PREPARE_Counter); - signal->theData[0] = 7022; - sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBA); - } - - { - NodeReceiverGroup rg(NDBCNTR, c_GCP_PREPARE_Counter); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::GCPStopDetected; - sysErr->errorRef = reference(); - sysErr->data1 = cgcpStatus; - sysErr->data2 = cgcpOrderBlocked; - sendSignal(rg, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBA); - } - ndbrequire(!c_GCP_PREPARE_Counter.done()); - return; - } - case GCP_COMMIT_SENT: - { - jam(); - /** - * We're waiting for a GCP_NODEFINISH - */ - infoEvent("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_GCP_COMMIT_Counter.getText()); - ndbout_c("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_GCP_COMMIT_Counter.getText()); - - { - NodeReceiverGroup rg(DBDIH, c_GCP_COMMIT_Counter); - signal->theData[0] = 7022; - sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBA); - } - - { - NodeReceiverGroup rg(NDBCNTR, c_GCP_COMMIT_Counter); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::GCPStopDetected; - sysErr->errorRef = reference(); - sysErr->data1 = cgcpStatus; - sysErr->data2 = cgcpOrderBlocked; - sendSignal(rg, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBA); - } - ndbrequire(!c_GCP_COMMIT_Counter.done()); - return; - } - case GCP_NODE_FINISHED: - { - jam(); - /** - * We're waiting for a GCP save conf - */ - NodeReceiverGroup rg(DBLQH, c_GCP_SAVEREQ_Counter); - signal->theData[0] = 2305; - sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBB); - - infoEvent("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_GCP_SAVEREQ_Counter.getText()); - ndbout_c("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_GCP_SAVEREQ_Counter.getText()); - ndbrequire(!c_GCP_SAVEREQ_Counter.done()); - return; - } - case GCP_SAVE_LQH_FINISHED: - { - jam(); - /** - * We're waiting for a COPY_GCICONF - */ - infoEvent("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_COPY_GCIREQ_Counter.getText()); - ndbout_c("Detected GCP stop(%d)...sending kill to %s", - cgcpStatus, c_COPY_GCIREQ_Counter.getText()); - - { - NodeReceiverGroup rg(DBDIH, c_COPY_GCIREQ_Counter); - signal->theData[0] = 7022; - sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBA); - } - - { - NodeReceiverGroup rg(NDBCNTR, c_COPY_GCIREQ_Counter); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::GCPStopDetected; - sysErr->errorRef = reference(); - sysErr->data1 = cgcpStatus; - sysErr->data2 = cgcpOrderBlocked; - sendSignal(rg, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBA); - } - ndbrequire(!c_COPY_GCIREQ_Counter.done()); - return; - } - case GCP_READY: (void)1; - } - -dolocal: - ndbout_c("m_copyReason: %d m_waiting: %d", - c_copyGCIMaster.m_copyReason, - c_copyGCIMaster.m_waiting); - - ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d", - c_copyGCISlave.m_senderData, - c_copyGCISlave.m_senderRef, - c_copyGCISlave.m_copyReason, - c_copyGCISlave.m_expectedNextWord); - - FileRecordPtr file0Ptr; - file0Ptr.i = crestartInfoFile[0]; - ptrCheckGuard(file0Ptr, cfileFileSize, fileRecord); - FileRecordPtr file1Ptr; - file1Ptr.i = crestartInfoFile[1]; - ptrCheckGuard(file1Ptr, cfileFileSize, fileRecord); - - ndbout_c("file[0] status: %d type: %d reqStatus: %d file1: %d %d %d", - file0Ptr.p->fileStatus, file0Ptr.p->fileType, file0Ptr.p->reqStatus, - file1Ptr.p->fileStatus, file1Ptr.p->fileType, file1Ptr.p->reqStatus - ); - - signal->theData[0] = 404; - signal->theData[1] = file0Ptr.p->fileRef; - EXECUTE_DIRECT(NDBFS, GSN_DUMP_STATE_ORD, signal, 2); - - signal->theData[0] = 404; - signal->theData[1] = file1Ptr.p->fileRef; - EXECUTE_DIRECT(NDBFS, GSN_DUMP_STATE_ORD, signal, 2); - - ndbout_c("c_COPY_GCIREQ_Counter = %s", - c_COPY_GCIREQ_Counter.getText()); - ndbout_c("c_COPY_TABREQ_Counter = %s", - c_COPY_TABREQ_Counter.getText()); - ndbout_c("c_CREATE_FRAGREQ_Counter = %s", - c_CREATE_FRAGREQ_Counter.getText()); - ndbout_c("c_DIH_SWITCH_REPLICA_REQ_Counter = %s", - c_DIH_SWITCH_REPLICA_REQ_Counter.getText()); - ndbout_c("c_EMPTY_LCP_REQ_Counter = %s",c_EMPTY_LCP_REQ_Counter.getText()); - ndbout_c("c_END_TOREQ_Counter = %s", c_END_TOREQ_Counter.getText()); - ndbout_c("c_GCP_COMMIT_Counter = %s", c_GCP_COMMIT_Counter.getText()); - ndbout_c("c_GCP_PREPARE_Counter = %s", c_GCP_PREPARE_Counter.getText()); - ndbout_c("c_GCP_SAVEREQ_Counter = %s", c_GCP_SAVEREQ_Counter.getText()); - ndbout_c("c_INCL_NODEREQ_Counter = %s", c_INCL_NODEREQ_Counter.getText()); - ndbout_c("c_MASTER_GCPREQ_Counter = %s", - c_MASTER_GCPREQ_Counter.getText()); - ndbout_c("c_MASTER_LCPREQ_Counter = %s", - c_MASTER_LCPREQ_Counter.getText()); - ndbout_c("c_START_INFOREQ_Counter = %s", - c_START_INFOREQ_Counter.getText()); - ndbout_c("c_START_RECREQ_Counter = %s", c_START_RECREQ_Counter.getText()); - ndbout_c("c_START_TOREQ_Counter = %s", c_START_TOREQ_Counter.getText()); - ndbout_c("c_STOP_ME_REQ_Counter = %s", c_STOP_ME_REQ_Counter.getText()); - ndbout_c("c_TC_CLOPSIZEREQ_Counter = %s", - c_TC_CLOPSIZEREQ_Counter.getText()); - ndbout_c("c_TCGETOPSIZEREQ_Counter = %s", - c_TCGETOPSIZEREQ_Counter.getText()); - ndbout_c("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText()); - - if (local == false) - { - jam(); - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - const BlockReference ref = - numberToRef(refToBlock(cntrlblockref), nodePtr.i); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::GCPStopDetected; - sysErr->errorRef = reference(); - sysErr->data1 = cgcpStatus; - sysErr->data2 = cgcpOrderBlocked; - sendSignal(ref, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBA); - }//if - }//for - } - else - { - jam(); - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::GCPStopDetected; - sysErr->errorRef = reference(); - sysErr->data1 = cgcpStatus; - sysErr->data2 = cgcpOrderBlocked; - EXECUTE_DIRECT(NDBCNTR, GSN_SYSTEM_ERROR, - signal, SystemError::SignalLength); - ndbrequire(false); - } - return; -}//Dbdih::crashSystemAtGcpStop() - -/*************************************************************************/ -/* */ -/* MODULE: ALLOCPAGE */ -/* DESCRIPTION: THE SUBROUTINE IS CALLED WITH POINTER TO PAGE */ -/* RECORD. A PAGE RECORD IS TAKEN FROM */ -/* THE FREE PAGE LIST */ -/*************************************************************************/ -void Dbdih::allocpage(PageRecordPtr& pagePtr) -{ - ndbrequire(cfirstfreepage != RNIL); - pagePtr.i = cfirstfreepage; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - cfirstfreepage = pagePtr.p->nextfreepage; - pagePtr.p->nextfreepage = RNIL; -}//Dbdih::allocpage() - -/*************************************************************************/ -/* */ -/* MODULE: ALLOC_STORED_REPLICA */ -/* DESCRIPTION: THE SUBROUTINE IS CALLED TO GET A REPLICA RECORD, */ -/* TO INITIALISE IT AND TO LINK IT INTO THE FRAGMENT */ -/* STORE RECORD. USED FOR STORED REPLICAS. */ -/*************************************************************************/ -void Dbdih::allocStoredReplica(FragmentstorePtr fragPtr, - ReplicaRecordPtr& newReplicaPtr, - Uint32 nodeId) -{ - Uint32 i; - ReplicaRecordPtr arrReplicaPtr; - ReplicaRecordPtr arrPrevReplicaPtr; - - seizeReplicaRec(newReplicaPtr); - for (i = 0; i < MAX_LCP_STORED; i++) { - newReplicaPtr.p->maxGciCompleted[i] = 0; - newReplicaPtr.p->maxGciStarted[i] = 0; - newReplicaPtr.p->lcpId[i] = 0; - newReplicaPtr.p->lcpStatus[i] = ZINVALID; - }//for - newReplicaPtr.p->noCrashedReplicas = 0; - newReplicaPtr.p->initialGci = currentgcp; - for (i = 0; i < 8; i++) { - newReplicaPtr.p->replicaLastGci[i] = (Uint32)-1; - newReplicaPtr.p->createGci[i] = 0; - }//for - newReplicaPtr.p->createGci[0] = currentgcp; - ndbrequire(currentgcp != 0xF1F1F1F1); - newReplicaPtr.p->nextLcp = 0; - newReplicaPtr.p->procNode = nodeId; - newReplicaPtr.p->lcpOngoingFlag = false; - newReplicaPtr.p->lcpIdStarted = 0; - - arrPrevReplicaPtr.i = RNIL; - arrReplicaPtr.i = fragPtr.p->storedReplicas; - while (arrReplicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(arrReplicaPtr, creplicaFileSize, replicaRecord); - arrPrevReplicaPtr = arrReplicaPtr; - arrReplicaPtr.i = arrReplicaPtr.p->nextReplica; - }//while - if (arrPrevReplicaPtr.i == RNIL) { - jam(); - fragPtr.p->storedReplicas = newReplicaPtr.i; - } else { - jam(); - arrPrevReplicaPtr.p->nextReplica = newReplicaPtr.i; - }//if - fragPtr.p->noStoredReplicas++; -}//Dbdih::allocStoredReplica() - -/*************************************************************************/ -/* CALCULATE HOW MANY HOT SPARES THAT ARE TO BE ASSIGNED IN THIS SYSTEM */ -/*************************************************************************/ -void Dbdih::calculateHotSpare() -{ - Uint32 tchsTmp; - Uint32 tchsNoNodes; - - switch (cnoReplicas) { - case 1: - jam(); - cnoHotSpare = 0; - break; - case 2: - case 3: - case 4: - jam(); - if (csystemnodes > cnoReplicas) { - jam(); - /* --------------------------------------------------------------------- */ - /* WITH MORE NODES THAN REPLICAS WE WILL ALWAYS USE AT LEAST ONE HOT */ - /* SPARE IF THAT HAVE BEEN REQUESTED BY THE CONFIGURATION FILE. THE */ - /* NUMBER OF NODES TO BE USED FOR NORMAL OPERATION IS ALWAYS */ - /* A MULTIPLE OF THE NUMBER OF REPLICAS SINCE WE WILL ORGANISE NODES */ - /* INTO NODE GROUPS. THE REMAINING NODES WILL BE HOT SPARE NODES. */ - /* --------------------------------------------------------------------- */ - if ((csystemnodes - cnoReplicas) >= cminHotSpareNodes) { - jam(); - /* --------------------------------------------------------------------- */ - // We set the minimum number of hot spares according to users request - // through the configuration file. - /* --------------------------------------------------------------------- */ - tchsNoNodes = csystemnodes - cminHotSpareNodes; - cnoHotSpare = cminHotSpareNodes; - } else if (cminHotSpareNodes > 0) { - jam(); - /* --------------------------------------------------------------------- */ - // The user requested at least one hot spare node and we will support him - // in that. - /* --------------------------------------------------------------------- */ - tchsNoNodes = csystemnodes - 1; - cnoHotSpare = 1; - } else { - jam(); - /* --------------------------------------------------------------------- */ - // The user did not request any hot spare nodes so in this case we will - // only use hot spare nodes if the number of nodes is such that we cannot - // use all nodes as normal nodes. - /* --------------------------------------------------------------------- */ - tchsNoNodes = csystemnodes; - cnoHotSpare = 0; - }//if - } else { - jam(); - /* --------------------------------------------------------------------- */ - // We only have enough to support the replicas. We will not have any hot - // spares. - /* --------------------------------------------------------------------- */ - tchsNoNodes = csystemnodes; - cnoHotSpare = 0; - }//if - tchsTmp = tchsNoNodes - (cnoReplicas * (tchsNoNodes / cnoReplicas)); - cnoHotSpare = cnoHotSpare + tchsTmp; - break; - default: - jam(); - ndbrequire(false); - break; - }//switch -}//Dbdih::calculateHotSpare() - -/*************************************************************************/ -/* CHECK IF THE NODE CRASH IS TO ESCALATE INTO A SYSTEM CRASH. WE COULD */ -/* DO THIS BECAUSE ALL REPLICAS OF SOME FRAGMENT ARE LOST. WE COULD ALSO */ -/* DO IT AFTER MANY NODE FAILURES THAT MAKE IT VERY DIFFICULT TO RESTORE */ -/* DATABASE AFTER A SYSTEM CRASH. IT MIGHT EVEN BE IMPOSSIBLE AND THIS */ -/* MUST BE AVOIDED EVEN MORE THAN AVOIDING SYSTEM CRASHES. */ -/*************************************************************************/ -void Dbdih::checkEscalation() -{ - Uint32 TnodeGroup[MAX_NDB_NODES]; - NodeRecordPtr nodePtr; - Uint32 i; - for (i = 0; i < MAX_NDB_NODES; i++) { - TnodeGroup[i] = ZFALSE; - }//for - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE && - nodePtr.p->activeStatus == Sysfile::NS_Active){ - ndbrequire(nodePtr.p->nodeGroup < MAX_NDB_NODES); - TnodeGroup[nodePtr.p->nodeGroup] = ZTRUE; - } - } - for (i = 0; i < cnoOfNodeGroups; i++) { - jam(); - if (TnodeGroup[i] == ZFALSE) { - jam(); - progError(__LINE__, NDBD_EXIT_LOST_NODE_GROUP, "Lost node group"); - }//if - }//for -}//Dbdih::checkEscalation() - -/*************************************************************************/ -/* */ -/* MODULE: CHECK_KEEP_GCI */ -/* DESCRIPTION: CHECK FOR MINIMUM GCI RESTORABLE WITH NEW LOCAL */ -/* CHECKPOINT. */ -/*************************************************************************/ -void Dbdih::checkKeepGci(TabRecordPtr tabPtr, Uint32 fragId, Fragmentstore*, - Uint32 replicaStartIndex) -{ - ReplicaRecordPtr ckgReplicaPtr; - ckgReplicaPtr.i = replicaStartIndex; - while (ckgReplicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(ckgReplicaPtr, creplicaFileSize, replicaRecord); - Uint32 keepGci; - Uint32 oldestRestorableGci; - findMinGci(ckgReplicaPtr, keepGci, oldestRestorableGci); - if (keepGci < c_lcpState.keepGci) { - jam(); - /* ------------------------------------------------------------------- */ - /* WE MUST KEEP LOG RECORDS SO THAT WE CAN USE ALL LOCAL CHECKPOINTS */ - /* THAT ARE AVAILABLE. THUS WE NEED TO CALCULATE THE MINIMUM OVER ALL */ - /* FRAGMENTS. */ - /* ------------------------------------------------------------------- */ - c_lcpState.keepGci = keepGci; - }//if - if (oldestRestorableGci > c_lcpState.oldestRestorableGci) { - jam(); - c_lcpState.oldestRestorableGci = oldestRestorableGci; - }//if - ckgReplicaPtr.i = ckgReplicaPtr.p->nextReplica; - }//while -}//Dbdih::checkKeepGci() - -void Dbdih::closeFile(Signal* signal, FileRecordPtr filePtr) -{ - signal->theData[0] = filePtr.p->fileRef; - signal->theData[1] = reference(); - signal->theData[2] = filePtr.i; - signal->theData[3] = ZCLOSE_NO_DELETE; - sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA); -}//Dbdih::closeFile() - -void Dbdih::closeFileDelete(Signal* signal, FileRecordPtr filePtr) -{ - signal->theData[0] = filePtr.p->fileRef; - signal->theData[1] = reference(); - signal->theData[2] = filePtr.i; - signal->theData[3] = ZCLOSE_DELETE; - sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA); -}//Dbdih::closeFileDelete() - -void Dbdih::createFileRw(Signal* signal, FileRecordPtr filePtr) -{ - signal->theData[0] = reference(); - signal->theData[1] = filePtr.i; - signal->theData[2] = filePtr.p->fileName[0]; - signal->theData[3] = filePtr.p->fileName[1]; - signal->theData[4] = filePtr.p->fileName[2]; - signal->theData[5] = filePtr.p->fileName[3]; - signal->theData[6] = ZCREATE_READ_WRITE; - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA); -}//Dbdih::createFileRw() - -void Dbdih::emptyverificbuffer(Signal* signal, bool aContinueB) -{ - if(cfirstVerifyQueue == RNIL){ - jam(); - return; - }//if - ApiConnectRecordPtr localApiConnectptr; - if(getBlockCommit() == false){ - jam(); - ndbrequire(cverifyQueueCounter > 0); - cverifyQueueCounter--; - localApiConnectptr.i = cfirstVerifyQueue; - ptrCheckGuard(localApiConnectptr, capiConnectFileSize, apiConnectRecord); - ndbrequire(localApiConnectptr.p->apiGci <= currentgcp); - cfirstVerifyQueue = localApiConnectptr.p->nextApi; - if (cfirstVerifyQueue == RNIL) { - jam(); - ndbrequire(cverifyQueueCounter == 0); - clastVerifyQueue = RNIL; - }//if - signal->theData[0] = localApiConnectptr.i; - signal->theData[1] = currentgcp; - sendSignal(clocaltcblockref, GSN_DIVERIFYCONF, signal, 2, JBB); - if (aContinueB == true) { - jam(); - //----------------------------------------------------------------------- - // This emptying happened as part of a take-out process by continueb signals. - // This ensures that we will empty the queue eventually. We will also empty - // one item every time we insert one item to ensure that the list doesn't - // grow when it is not blocked. - //----------------------------------------------------------------------- - signal->theData[0] = DihContinueB::ZEMPTY_VERIFY_QUEUE; - sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB); - }//if - } else { - jam(); - //----------------------------------------------------------------------- - // We are blocked so it is no use in continuing the emptying of the - // verify buffer. Whenever the block is removed the emptying will - // restart. - //----------------------------------------------------------------------- - } - return; -}//Dbdih::emptyverificbuffer() - -/*----------------------------------------------------------------*/ -/* FIND A FREE HOT SPARE IF AVAILABLE AND ALIVE. */ -/*----------------------------------------------------------------*/ -Uint32 Dbdih::findHotSpare() -{ - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - if (nodePtr.p->activeStatus == Sysfile::NS_HotSpare) { - jam(); - return nodePtr.i; - }//if - }//if - }//for - return RNIL; -}//Dbdih::findHotSpare() - -/*************************************************************************/ -/* FIND THE NODES FROM WHICH WE CAN EXECUTE THE LOG TO RESTORE THE */ -/* DATA NODE IN A SYSTEM RESTART. */ -/*************************************************************************/ -bool Dbdih::findLogNodes(CreateReplicaRecord* createReplica, - FragmentstorePtr fragPtr, - Uint32 startGci, - Uint32 stopGci) -{ - ConstPtr flnReplicaPtr; - flnReplicaPtr.i = createReplica->replicaRec; - ptrCheckGuard(flnReplicaPtr, creplicaFileSize, replicaRecord); - /* --------------------------------------------------------------------- */ - /* WE START BY CHECKING IF THE DATA NODE CAN HANDLE THE LOG ALL BY */ - /* ITSELF. THIS IS THE DESIRED BEHAVIOUR. IF THIS IS NOT POSSIBLE */ - /* THEN WE SEARCH FOR THE BEST POSSIBLE NODES AMONG THE NODES THAT */ - /* ARE PART OF THIS SYSTEM RESTART. */ - /* THIS CAN ONLY BE HANDLED BY THE LAST CRASHED REPLICA. */ - /* The condition is that the replica was created before or at the */ - /* time of the starting gci, in addition it must have been alive */ - /* at the time of the stopping gci. This is checked by two */ - /* conditions, the first checks replicaLastGci and the second */ - /* checks that it is also smaller than the last gci the node was */ - /* involved in. This is necessary to check since createGci is set */ - /* Last + 1 and sometimes startGci = stopGci + 1 and in that case */ - /* it could happen that replicaLastGci is set to -1 with CreateGci */ - /* set to LastGci + 1. */ - /* --------------------------------------------------------------------- */ - arrGuard(flnReplicaPtr.p->noCrashedReplicas, 8); - const Uint32 noCrashed = flnReplicaPtr.p->noCrashedReplicas; - - if (!(ERROR_INSERTED(7073) || ERROR_INSERTED(7074))&& - (startGci >= flnReplicaPtr.p->createGci[noCrashed]) && - (stopGci <= flnReplicaPtr.p->replicaLastGci[noCrashed]) && - (stopGci <= SYSFILE->lastCompletedGCI[flnReplicaPtr.p->procNode])) { - jam(); - /* --------------------------------------------------------------------- */ - /* WE FOUND ALL THE LOG RECORDS NEEDED IN THE DATA NODE. WE WILL */ - /* USE THOSE. */ - /* --------------------------------------------------------------------- */ - createReplica->noLogNodes = 1; - createReplica->logStartGci[0] = startGci; - createReplica->logStopGci[0] = stopGci; - createReplica->logNodeId[0] = flnReplicaPtr.p->procNode; - return true; - }//if - Uint32 logNode = 0; - do { - Uint32 fblStopGci; - jam(); - if(!findBestLogNode(createReplica, - fragPtr, - startGci, - stopGci, - logNode, - fblStopGci)){ - jam(); - return false; - } - - logNode++; - if (fblStopGci >= stopGci) { - jam(); - createReplica->noLogNodes = logNode; - return true; - }//if - startGci = fblStopGci + 1; - if (logNode >= 4) { // Why?? - jam(); - break; - }//if - } while (1); - /* --------------------------------------------------------------------- */ - /* IT WAS NOT POSSIBLE TO RESTORE THE REPLICA. THIS CAN EITHER BE */ - /* BECAUSE OF LACKING NODES OR BECAUSE OF A REALLY SERIOUS PROBLEM.*/ - /* --------------------------------------------------------------------- */ - return false; -}//Dbdih::findLogNodes() - -/*************************************************************************/ -/* FIND THE BEST POSSIBLE LOG NODE TO EXECUTE THE LOG AS SPECIFIED */ -/* BY THE INPUT PARAMETERS. WE SCAN THROUGH ALL ALIVE REPLICAS. */ -/* THIS MEANS STORED, OLD_STORED */ -/*************************************************************************/ -bool -Dbdih::findBestLogNode(CreateReplicaRecord* createReplica, - FragmentstorePtr fragPtr, - Uint32 startGci, - Uint32 stopGci, - Uint32 logNode, - Uint32& fblStopGci) -{ - ConstPtr fblFoundReplicaPtr; - ConstPtr fblReplicaPtr; - LINT_INIT(fblFoundReplicaPtr.p); - - /* --------------------------------------------------------------------- */ - /* WE START WITH ZERO AS FOUND TO ENSURE THAT FIRST HIT WILL BE */ - /* BETTER. */ - /* --------------------------------------------------------------------- */ - fblStopGci = 0; - fblReplicaPtr.i = fragPtr.p->storedReplicas; - while (fblReplicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord); - if (checkNodeAlive(fblReplicaPtr.p->procNode)) { - jam(); - Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci); - if (fliStopGci > fblStopGci) { - jam(); - fblStopGci = fliStopGci; - fblFoundReplicaPtr = fblReplicaPtr; - }//if - }//if - fblReplicaPtr.i = fblReplicaPtr.p->nextReplica; - }//while - fblReplicaPtr.i = fragPtr.p->oldStoredReplicas; - while (fblReplicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(fblReplicaPtr, creplicaFileSize, replicaRecord); - if (checkNodeAlive(fblReplicaPtr.p->procNode)) { - jam(); - Uint32 fliStopGci = findLogInterval(fblReplicaPtr, startGci); - if (fliStopGci > fblStopGci) { - jam(); - fblStopGci = fliStopGci; - fblFoundReplicaPtr = fblReplicaPtr; - }//if - }//if - fblReplicaPtr.i = fblReplicaPtr.p->nextReplica; - }//while - if (fblStopGci != 0) { - jam(); - ndbrequire(logNode < MAX_LOG_EXEC); - createReplica->logNodeId[logNode] = fblFoundReplicaPtr.p->procNode; - createReplica->logStartGci[logNode] = startGci; - if (fblStopGci >= stopGci) { - jam(); - createReplica->logStopGci[logNode] = stopGci; - } else { - jam(); - createReplica->logStopGci[logNode] = fblStopGci; - }//if - }//if - - return fblStopGci != 0; -}//Dbdih::findBestLogNode() - -Uint32 Dbdih::findLogInterval(ConstPtr replicaPtr, - Uint32 startGci) -{ - ndbrequire(replicaPtr.p->noCrashedReplicas <= 8); - Uint32 loopLimit = replicaPtr.p->noCrashedReplicas + 1; - for (Uint32 i = 0; i < loopLimit; i++) { - jam(); - if (replicaPtr.p->createGci[i] <= startGci) { - if (replicaPtr.p->replicaLastGci[i] >= startGci) { - jam(); - return replicaPtr.p->replicaLastGci[i]; - }//if - }//if - }//for - return 0; -}//Dbdih::findLogInterval() - -/*************************************************************************/ -/* */ -/* MODULE: FIND THE MINIMUM GCI THAT THIS NODE HAS LOG RECORDS FOR.*/ -/*************************************************************************/ -void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr, - Uint32& keepGci, - Uint32& oldestRestorableGci) -{ - Uint32 nextLcpNo; - Uint32 lcpNo; - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { - jam(); - if ((fmgReplicaPtr.p->lcpStatus[i] == ZVALID) && - ((fmgReplicaPtr.p->lcpId[i] + MAX_LCP_STORED) <= (SYSFILE->latestLCP_ID + 1))) { - jam(); - /*--------------------------------------------------------------------*/ - // We invalidate the checkpoint we are preparing to overwrite. - // The LCP id is still the old lcp id, - // this is the reason of comparing with lcpId + 1. - /*---------------------------------------------------------------------*/ - fmgReplicaPtr.p->lcpStatus[i] = ZINVALID; - }//if - }//for - keepGci = (Uint32)-1; - oldestRestorableGci = 0; - nextLcpNo = fmgReplicaPtr.p->nextLcp; - lcpNo = fmgReplicaPtr.p->nextLcp; - do { - ndbrequire(lcpNo < MAX_LCP_STORED); - if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID) - { - jam(); - keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo]; - oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo]; - ndbassert(fmgReplicaPtr.p->maxGciStarted[lcpNo] createGci[0] == fmgReplicaPtr.p->initialGci) { - jam(); - /*------------------------------------------------------------------- - * WE CAN STILL RESTORE THIS REPLICA WITHOUT ANY LOCAL CHECKPOINTS BY - * ONLY USING THE LOG. IF THIS IS NOT POSSIBLE THEN WE REPORT THE LAST - * VALID LOCAL CHECKPOINT AS THE MINIMUM GCI RECOVERABLE. - *-----------------------------------------------------------------*/ - keepGci = fmgReplicaPtr.p->createGci[0]; - }//if - }//if - lcpNo = prevLcpNo(lcpNo); - } while (lcpNo != nextLcpNo); - return; -}//Dbdih::findMinGci() - -bool Dbdih::findStartGci(ConstPtr replicaPtr, - Uint32 stopGci, - Uint32& startGci, - Uint32& lcpNo) -{ - lcpNo = replicaPtr.p->nextLcp; - const Uint32 startLcpNo = lcpNo; - do { - lcpNo = prevLcpNo(lcpNo); - ndbrequire(lcpNo < MAX_LCP_STORED); - if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID) { - if (replicaPtr.p->maxGciStarted[lcpNo] < stopGci) { - jam(); - /* ----------------------------------------------------------------- */ - /* WE HAVE FOUND A USEFUL LOCAL CHECKPOINT THAT CAN BE USED FOR */ - /* RESTARTING THIS FRAGMENT REPLICA. */ - /* ----------------------------------------------------------------- */ - startGci = replicaPtr.p->maxGciCompleted[lcpNo] + 1; - return true; - } - } - } while (lcpNo != startLcpNo); - /* --------------------------------------------------------------------- */ - /* NO VALID LOCAL CHECKPOINT WAS AVAILABLE. WE WILL ADD THE */ - /* FRAGMENT. THUS THE NEXT LCP MUST BE SET TO ZERO. */ - /* WE MUST EXECUTE THE LOG FROM THE INITIAL GLOBAL CHECKPOINT WHEN */ - /* THE TABLE WAS CREATED. */ - /* --------------------------------------------------------------------- */ - startGci = replicaPtr.p->initialGci; - ndbrequire(replicaPtr.p->nextLcp == 0); - return false; -}//Dbdih::findStartGci() - -/**************************************************************************/ -/* ---------------------------------------------------------------------- */ -/* FIND A TAKE OVER REPLICA WHICH IS TO BE STARTED OR COMMITTED WHEN*/ -/* TAKING OVER A FAILED NODE. */ -/* ---------------------------------------------------------------------- */ -/*************************************************************************/ -void Dbdih::findToReplica(TakeOverRecord* regTakeOver, - Uint32 replicaType, - FragmentstorePtr fragPtr, - ReplicaRecordPtr& ftrReplicaPtr) -{ - switch (replicaType) { - case CreateFragReq::STORED: - case CreateFragReq::COMMIT_STORED: - /* ----------------------------------------------------------------------*/ - /* HERE WE SEARCH FOR STORED REPLICAS. THE REPLICA MUST BE STORED IN THE */ - /* SECTION FOR OLD STORED REPLICAS SINCE WE HAVE NOT TAKEN OVER YET. */ - /* ----------------------------------------------------------------------*/ - ftrReplicaPtr.i = fragPtr.p->oldStoredReplicas; - while (ftrReplicaPtr.i != RNIL) { - ptrCheckGuard(ftrReplicaPtr, creplicaFileSize, replicaRecord); - if (ftrReplicaPtr.p->procNode == regTakeOver->toStartingNode) { - jam(); - return; - } else { - if (ftrReplicaPtr.p->procNode == regTakeOver->toFailedNode) { - jam(); - return; - } else { - jam(); - ftrReplicaPtr.i = ftrReplicaPtr.p->nextReplica; - }//if - }//if - }//while - break; - default: - ndbrequire(false); - break; - }//switch -}//Dbdih::findToReplica() - -void Dbdih::initCommonData() -{ - c_blockCommit = false; - c_blockCommitNo = 0; - c_createFragmentLock = RNIL; - c_endToLock = RNIL; - cfailurenr = 1; - cfirstAliveNode = RNIL; - cfirstDeadNode = RNIL; - cfirstVerifyQueue = RNIL; - cgckptflag = false; - cgcpDelay = 0; - cgcpMasterTakeOverState = GMTOS_IDLE; - cgcpOrderBlocked = 0; - cgcpParticipantState = GCP_PARTICIPANT_READY; - cgcpSameCounter = 0; - cgcpStartCounter = 0; - cgcpStatus = GCP_READY; - - clastVerifyQueue = RNIL; - c_lcpMasterTakeOverState.set(LMTOS_IDLE, __LINE__); - - c_lcpState.clcpDelay = 0; - c_lcpState.lcpStart = ZIDLE; - c_lcpState.lcpStopGcp = 0; - c_lcpState.setLcpStatus(LCP_STATUS_IDLE, __LINE__); - c_lcpState.currentFragment.tableId = 0; - c_lcpState.currentFragment.fragmentId = 0; - c_lcpState.noOfLcpFragRepOutstanding = 0; - c_lcpState.keepGci = 0; - c_lcpState.oldestRestorableGci = 0; - c_lcpState.ctcCounter = 0; - c_lcpState.ctimer = 0; - c_lcpState.immediateLcpStart = false; - c_lcpState.m_MASTER_LCPREQ_Received = false; - - cmasterdihref = 0; - cmasterNodeId = 0; - cmasterState = MASTER_IDLE; - cmasterTakeOverNode = 0; - cnewgcp = 0; - cnoHotSpare = 0; - cnoOfActiveTables = 0; - cnoOfNodeGroups = 0; - c_nextNodeGroup = 0; - cnoReplicas = 0; - coldgcp = 0; - coldGcpId = 0; - coldGcpStatus = cgcpStatus; - con_lineNodes = 0; - creceivedfrag = 0; - crestartGci = 0; - crestartInfoFile[0] = RNIL; - crestartInfoFile[1] = RNIL; - cstartGcpNow = false; - cstartPhase = 0; - c_startToLock = RNIL; - cstarttype = (Uint32)-1; - csystemnodes = 0; - c_updateToLock = RNIL; - currentgcp = 0; - c_newest_restorable_gci = 0; - cverifyQueueCounter = 0; - cwaitLcpSr = false; - c_nextLogPart = 0; - - nodeResetStart(); - c_nodeStartMaster.wait = ZFALSE; - - memset(&sysfileData[0], 0, sizeof(sysfileData)); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - c_lcpState.clcpDelay = 20; - ndb_mgm_get_int_parameter(p, CFG_DB_LCP_INTERVAL, &c_lcpState.clcpDelay); - c_lcpState.clcpDelay = c_lcpState.clcpDelay > 31 ? 31 : c_lcpState.clcpDelay; - - cminHotSpareNodes = 0; - //ndb_mgm_get_int_parameter(p, CFG_DB_MIN_HOT_SPARES, &cminHotSpareNodes); - cminHotSpareNodes = cminHotSpareNodes > 2 ? 2 : cminHotSpareNodes; - - cnoReplicas = 1; - ndb_mgm_get_int_parameter(p, CFG_DB_NO_REPLICAS, &cnoReplicas); - if (cnoReplicas > 4) - { - progError(__LINE__, NDBD_EXIT_INVALID_CONFIG, - "Only up to four replicas are supported. Check NoOfReplicas."); - } - - cgcpDelay = 2000; - ndb_mgm_get_int_parameter(p, CFG_DB_GCP_INTERVAL, &cgcpDelay); - cgcpDelay = cgcpDelay > 60000 ? 60000 : (cgcpDelay < 10 ? 10 : cgcpDelay); -}//Dbdih::initCommonData() - -void Dbdih::initFragstore(FragmentstorePtr fragPtr) -{ - fragPtr.p->storedReplicas = RNIL; - fragPtr.p->oldStoredReplicas = RNIL; - - fragPtr.p->noStoredReplicas = 0; - fragPtr.p->noOldStoredReplicas = 0; - fragPtr.p->fragReplicas = 0; - fragPtr.p->preferredPrimary = 0; - - for (Uint32 i = 0; i < MAX_REPLICAS; i++) - fragPtr.p->activeNodes[i] = 0; - - fragPtr.p->noLcpReplicas = 0; - fragPtr.p->distributionKey = 0; -}//Dbdih::initFragstore() - -/*************************************************************************/ -/* */ -/* MODULE: INIT_RESTART_INFO */ -/* DESCRIPTION: INITIATE RESTART INFO VARIABLE AND VARIABLES FOR */ -/* GLOBAL CHECKPOINTS. */ -/*************************************************************************/ -void Dbdih::initRestartInfo() -{ - Uint32 i; - for (i = 0; i < MAX_NDB_NODES; i++) { - SYSFILE->lastCompletedGCI[i] = 0; - }//for - NodeRecordPtr nodePtr; - nodePtr.i = cfirstAliveNode; - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - SYSFILE->lastCompletedGCI[nodePtr.i] = 1; - /* FIRST GCP = 1 ALREADY SET BY LQH */ - nodePtr.i = nodePtr.p->nextNode; - } while (nodePtr.i != RNIL); - coldgcp = 1; - currentgcp = 2; - cnewgcp = 2; - crestartGci = 1; - c_newest_restorable_gci = 1; - - SYSFILE->keepGCI = 1; - SYSFILE->oldestRestorableGCI = 1; - SYSFILE->newestRestorableGCI = 1; - SYSFILE->systemRestartBits = 0; - for (i = 0; i < NodeBitmask::Size; i++) { - SYSFILE->lcpActive[0] = 0; - }//for - for (i = 0; i < Sysfile::TAKE_OVER_SIZE; i++) { - SYSFILE->takeOver[i] = 0; - }//for - Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits); - srand(time(0)); - globalData.m_restart_seq = SYSFILE->m_restart_seq = 0; -}//Dbdih::initRestartInfo() - -/*--------------------------------------------------------------------*/ -/* NODE GROUP BITS ARE INITIALISED BEFORE THIS. */ -/* NODE ACTIVE BITS ARE INITIALISED BEFORE THIS. */ -/*--------------------------------------------------------------------*/ -/*************************************************************************/ -/* */ -/* MODULE: INIT_RESTORABLE_GCI_FILES */ -/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/ -/* FILES THAT KEEP THE VARIABLE CRESTART_INFO */ -/*************************************************************************/ -void Dbdih::initRestorableGciFiles() -{ - Uint32 tirgTmp; - FileRecordPtr filePtr; - seizeFile(filePtr); - filePtr.p->tabRef = RNIL; - filePtr.p->fileType = FileRecord::GCP_FILE; - filePtr.p->reqStatus = FileRecord::IDLE; - filePtr.p->fileStatus = FileRecord::CLOSED; - crestartInfoFile[0] = filePtr.i; - filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */ - filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */ - filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */ - tirgTmp = 1; /* FILE NAME VERSION 1 */ - tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */ - tirgTmp = (tirgTmp << 8) + 1; /* D1 DIRECTORY */ - tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */ - filePtr.p->fileName[3] = tirgTmp; - /* --------------------------------------------------------------------- */ - /* THE NAME BECOMES /D1/DBDICT/S0.SYSFILE */ - /* --------------------------------------------------------------------- */ - seizeFile(filePtr); - filePtr.p->tabRef = RNIL; - filePtr.p->fileType = FileRecord::GCP_FILE; - filePtr.p->reqStatus = FileRecord::IDLE; - filePtr.p->fileStatus = FileRecord::CLOSED; - crestartInfoFile[1] = filePtr.i; - filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */ - filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */ - filePtr.p->fileName[2] = (Uint32)-1; /* S PART IGNORED */ - tirgTmp = 1; /* FILE NAME VERSION 1 */ - tirgTmp = (tirgTmp << 8) + 6; /* .SYSFILE */ - tirgTmp = (tirgTmp << 8) + 2; /* D1 DIRECTORY */ - tirgTmp = (tirgTmp << 8) + 0; /* P0 FILE NAME */ - filePtr.p->fileName[3] = tirgTmp; - /* --------------------------------------------------------------------- */ - /* THE NAME BECOMES /D2/DBDICT/P0.SYSFILE */ - /* --------------------------------------------------------------------- */ -}//Dbdih::initRestorableGciFiles() - -void Dbdih::initTable(TabRecordPtr tabPtr) -{ - tabPtr.p->noOfFragChunks = 0; - tabPtr.p->method = TabRecord::NOTDEFINED; - tabPtr.p->tabStatus = TabRecord::TS_IDLE; - tabPtr.p->noOfWords = 0; - tabPtr.p->noPages = 0; - tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; - tabPtr.p->tabCopyStatus = TabRecord::CS_IDLE; - tabPtr.p->tabUpdateState = TabRecord::US_IDLE; - tabPtr.p->noOfBackups = 0; - tabPtr.p->kvalue = 0; - tabPtr.p->hashpointer = (Uint32)-1; - tabPtr.p->mask = 0; - tabPtr.p->tabStorage = TabRecord::ST_NORMAL; - tabPtr.p->tabErrorCode = 0; - tabPtr.p->schemaVersion = (Uint32)-1; - tabPtr.p->tabRemoveNode = RNIL; - tabPtr.p->totalfragments = (Uint32)-1; - tabPtr.p->connectrec = RNIL; - tabPtr.p->tabFile[0] = RNIL; - tabPtr.p->tabFile[1] = RNIL; - tabPtr.p->m_dropTab.tabUserRef = 0; - tabPtr.p->m_dropTab.tabUserPtr = RNIL; - Uint32 i; - for (i = 0; i < MAX_NDB_NODES; i++) { - tabPtr.p->startFid[i] = RNIL; - }//for - for (i = 0; i < 8; i++) { - tabPtr.p->pageRef[i] = RNIL; - }//for - tabPtr.p->tableType = DictTabInfo::UndefTableType; -}//Dbdih::initTable() - -/*************************************************************************/ -/* */ -/* MODULE: INIT_TABLE_FILES */ -/* DESCRIPTION: THE SUBROUTINE SETS UP THE FILES THAT REFERS TO THE*/ -/* FILES THAT KEEP THE TABLE FRAGMENTATION DESCRIPTION. */ -/*************************************************************************/ -void Dbdih::initTableFile(TabRecordPtr tabPtr) -{ - Uint32 titfTmp; - FileRecordPtr filePtr; - seizeFile(filePtr); - filePtr.p->tabRef = tabPtr.i; - filePtr.p->fileType = FileRecord::TABLE_FILE; - filePtr.p->reqStatus = FileRecord::IDLE; - filePtr.p->fileStatus = FileRecord::CLOSED; - tabPtr.p->tabFile[0] = filePtr.i; - filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */ - filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */ - filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */ - titfTmp = 1; /* FILE NAME VERSION 1 */ - titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */ - titfTmp = (titfTmp << 8) + 1; /* D1 DIRECTORY */ - titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */ - filePtr.p->fileName[3] = titfTmp; - /* --------------------------------------------------------------------- */ - /* THE NAME BECOMES /D1/DBDICT/Stid.FRAGLIST */ - /* --------------------------------------------------------------------- */ - seizeFile(filePtr); - filePtr.p->tabRef = tabPtr.i; - filePtr.p->fileType = FileRecord::TABLE_FILE; - filePtr.p->reqStatus = FileRecord::IDLE; - filePtr.p->fileStatus = FileRecord::CLOSED; - tabPtr.p->tabFile[1] = filePtr.i; - filePtr.p->fileName[0] = (Uint32)-1; /* T DIRECTORY NOT USED */ - filePtr.p->fileName[1] = (Uint32)-1; /* F DIRECTORY NOT USED */ - filePtr.p->fileName[2] = tabPtr.i; /* Stid FILE NAME */ - titfTmp = 1; /* FILE NAME VERSION 1 */ - titfTmp = (titfTmp << 8) + 3; /* .FRAGLIST */ - titfTmp = (titfTmp << 8) + 2; /* D2 DIRECTORY */ - titfTmp = (titfTmp << 8) + 255; /* P PART IGNORED */ - filePtr.p->fileName[3] = titfTmp; - /* --------------------------------------------------------------------- */ - /* THE NAME BECOMES /D2/DBDICT/Stid.FRAGLIST */ - /* --------------------------------------------------------------------- */ -}//Dbdih::initTableFile() - -void Dbdih::initialiseRecordsLab(Signal* signal, - Uint32 stepNo, Uint32 retRef, Uint32 retData) -{ - switch (stepNo) { - case 0: - jam(); - initCommonData(); - break; - case 1:{ - ApiConnectRecordPtr apiConnectptr; - jam(); - /******** INTIALIZING API CONNECT RECORDS ********/ - for (apiConnectptr.i = 0; apiConnectptr.i < capiConnectFileSize; apiConnectptr.i++) { - refresh_watch_dog(); - ptrAss(apiConnectptr, apiConnectRecord); - apiConnectptr.p->nextApi = RNIL; - }//for - jam(); - break; - } - case 2:{ - ConnectRecordPtr connectPtr; - jam(); - /****** CONNECT ******/ - for (connectPtr.i = 0; connectPtr.i < cconnectFileSize; connectPtr.i++) { - refresh_watch_dog(); - ptrAss(connectPtr, connectRecord); - connectPtr.p->userpointer = RNIL; - connectPtr.p->userblockref = ZNIL; - connectPtr.p->connectState = ConnectRecord::FREE; - connectPtr.p->table = RNIL; - connectPtr.p->nfConnect = connectPtr.i + 1; - }//for - connectPtr.i = cconnectFileSize - 1; - ptrAss(connectPtr, connectRecord); - connectPtr.p->nfConnect = RNIL; - cfirstconnect = 0; - break; - } - case 3: - { - FileRecordPtr filePtr; - jam(); - /******** INTIALIZING FILE RECORDS ********/ - for (filePtr.i = 0; filePtr.i < cfileFileSize; filePtr.i++) { - ptrAss(filePtr, fileRecord); - filePtr.p->nextFile = filePtr.i + 1; - filePtr.p->fileStatus = FileRecord::CLOSED; - filePtr.p->reqStatus = FileRecord::IDLE; - }//for - filePtr.i = cfileFileSize - 1; - ptrAss(filePtr, fileRecord); - filePtr.p->nextFile = RNIL; - cfirstfreeFile = 0; - initRestorableGciFiles(); - break; - } - case 4: - jam(); - initialiseFragstore(); - break; - case 5: - { - jam(); - /******* NODE GROUP RECORD ******/ - /******* NODE RECORD ******/ - NodeGroupRecordPtr loopNGPtr; - for (loopNGPtr.i = 0; loopNGPtr.i < MAX_NDB_NODES; loopNGPtr.i++) { - ptrAss(loopNGPtr, nodeGroupRecord); - loopNGPtr.p->nodesInGroup[0] = RNIL; - loopNGPtr.p->nodesInGroup[1] = RNIL; - loopNGPtr.p->nodesInGroup[2] = RNIL; - loopNGPtr.p->nodesInGroup[3] = RNIL; - loopNGPtr.p->nextReplicaNode = 0; - loopNGPtr.p->nodeCount = 0; - loopNGPtr.p->activeTakeOver = false; - }//for - NodeRecordPtr nodePtr; - for (nodePtr.i = 0; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - ptrAss(nodePtr, nodeRecord); - new (nodePtr.p) NodeRecord(); - }//for - break; - } - case 6: - { - PageRecordPtr pagePtr; - jam(); - /******* PAGE RECORD ******/ - for (pagePtr.i = 0; pagePtr.i < cpageFileSize; pagePtr.i++) { - refresh_watch_dog(); - ptrAss(pagePtr, pageRecord); - pagePtr.p->nextfreepage = pagePtr.i + 1; - }//for - pagePtr.i = cpageFileSize - 1; - ptrAss(pagePtr, pageRecord); - pagePtr.p->nextfreepage = RNIL; - cfirstfreepage = 0; - break; - } - case 7: - { - ReplicaRecordPtr initReplicaPtr; - jam(); - /******* REPLICA RECORD ******/ - for (initReplicaPtr.i = 0; initReplicaPtr.i < creplicaFileSize; - initReplicaPtr.i++) { - refresh_watch_dog(); - ptrAss(initReplicaPtr, replicaRecord); - initReplicaPtr.p->lcpIdStarted = 0; - initReplicaPtr.p->lcpOngoingFlag = false; - initReplicaPtr.p->nextReplica = initReplicaPtr.i + 1; - }//for - initReplicaPtr.i = creplicaFileSize - 1; - ptrAss(initReplicaPtr, replicaRecord); - initReplicaPtr.p->nextReplica = RNIL; - cnoFreeReplicaRec = creplicaFileSize; - cfirstfreeReplica = 0; - break; - } - case 8: - { - TabRecordPtr loopTabptr; - jam(); - /********* TAB-DESCRIPTOR ********/ - for (loopTabptr.i = 0; loopTabptr.i < ctabFileSize; loopTabptr.i++) { - ptrAss(loopTabptr, tabRecord); - refresh_watch_dog(); - initTable(loopTabptr); - }//for - break; - } - case 9: - { - TakeOverRecordPtr takeOverPtr; - jam(); - cfirstfreeTakeOver = RNIL; - for (takeOverPtr.i = 0; takeOverPtr.i < MAX_NDB_NODES; takeOverPtr.i++) { - ptrAss(takeOverPtr, takeOverRecord); - initTakeOver(takeOverPtr); - releaseTakeOver(takeOverPtr.i); - }//for - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = retData; - sendSignal(retRef, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); - return; - break; - } - default: - ndbrequire(false); - break; - }//switch - jam(); - /* ---------------------------------------------------------------------- */ - /* SEND REAL-TIME BREAK DURING INIT OF VARIABLES DURING SYSTEM RESTART. */ - /* ---------------------------------------------------------------------- */ - signal->theData[0] = DihContinueB::ZINITIALISE_RECORDS; - signal->theData[1] = stepNo + 1; - signal->theData[2] = retRef; - signal->theData[3] = retData; - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); -}//Dbdih::initialiseRecordsLab() - -/*************************************************************************/ -/* INSERT THE NODE INTO THE LINKED LIST OF NODES INVOLVED ALL */ -/* DISTRIBUTED PROTOCOLS (EXCEPT GCP PROTOCOL THAT USES THE DIH */ -/* LINKED LIST INSTEAD). */ -/*************************************************************************/ -void Dbdih::insertAlive(NodeRecordPtr newNodePtr) -{ - NodeRecordPtr nodePtr; - - nodePtr.i = cfirstAliveNode; - if (nodePtr.i == RNIL) { - jam(); - cfirstAliveNode = newNodePtr.i; - } else { - do { - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->nextNode == RNIL) { - jam(); - nodePtr.p->nextNode = newNodePtr.i; - break; - } else { - jam(); - nodePtr.i = nodePtr.p->nextNode; - }//if - } while (1); - }//if - newNodePtr.p->nextNode = RNIL; -}//Dbdih::insertAlive() - -void Dbdih::insertBackup(FragmentstorePtr fragPtr, Uint32 nodeId) -{ - for (Uint32 i = fragPtr.p->fragReplicas; i > 1; i--) { - jam(); - ndbrequire(i < MAX_REPLICAS && i > 0); - fragPtr.p->activeNodes[i] = fragPtr.p->activeNodes[i - 1]; - }//for - fragPtr.p->activeNodes[1] = nodeId; - fragPtr.p->fragReplicas++; -}//Dbdih::insertBackup() - -void Dbdih::insertDeadNode(NodeRecordPtr newNodePtr) -{ - NodeRecordPtr nodePtr; - - nodePtr.i = cfirstDeadNode; - if (nodePtr.i == RNIL) { - jam(); - cfirstDeadNode = newNodePtr.i; - } else { - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->nextNode == RNIL) { - jam(); - nodePtr.p->nextNode = newNodePtr.i; - break; - } else { - jam(); - nodePtr.i = nodePtr.p->nextNode; - }//if - } while (1); - }//if - newNodePtr.p->nextNode = RNIL; -}//Dbdih::insertDeadNode() - -void Dbdih::linkOldStoredReplica(FragmentstorePtr fragPtr, - ReplicaRecordPtr replicatePtr) -{ - ReplicaRecordPtr losReplicaPtr; - - replicatePtr.p->nextReplica = RNIL; - fragPtr.p->noOldStoredReplicas++; - losReplicaPtr.i = fragPtr.p->oldStoredReplicas; - if (losReplicaPtr.i == RNIL) { - jam(); - fragPtr.p->oldStoredReplicas = replicatePtr.i; - return; - }//if - ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord); - while (losReplicaPtr.p->nextReplica != RNIL) { - jam(); - losReplicaPtr.i = losReplicaPtr.p->nextReplica; - ptrCheckGuard(losReplicaPtr, creplicaFileSize, replicaRecord); - }//if - losReplicaPtr.p->nextReplica = replicatePtr.i; -}//Dbdih::linkOldStoredReplica() - -void Dbdih::linkStoredReplica(FragmentstorePtr fragPtr, - ReplicaRecordPtr replicatePtr) -{ - ReplicaRecordPtr lsrReplicaPtr; - - fragPtr.p->noStoredReplicas++; - replicatePtr.p->nextReplica = RNIL; - lsrReplicaPtr.i = fragPtr.p->storedReplicas; - if (fragPtr.p->storedReplicas == RNIL) { - jam(); - fragPtr.p->storedReplicas = replicatePtr.i; - return; - }//if - ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord); - while (lsrReplicaPtr.p->nextReplica != RNIL) { - jam(); - lsrReplicaPtr.i = lsrReplicaPtr.p->nextReplica; - ptrCheckGuard(lsrReplicaPtr, creplicaFileSize, replicaRecord); - }//if - lsrReplicaPtr.p->nextReplica = replicatePtr.i; -}//Dbdih::linkStoredReplica() - -/*************************************************************************/ -/* MAKE NODE GROUPS BASED ON THE LIST OF NODES RECEIVED FROM CNTR */ -/*************************************************************************/ -void Dbdih::makeNodeGroups(Uint32 nodeArray[]) -{ - NodeRecordPtr mngNodeptr; - Uint32 tmngNode; - Uint32 tmngNodeGroup; - Uint32 tmngLimit; - Uint32 i, j; - - /**----------------------------------------------------------------------- - * ASSIGN ALL ACTIVE NODES INTO NODE GROUPS. HOT SPARE NODES ARE ASSIGNED - * TO NODE GROUP ZNIL - *-----------------------------------------------------------------------*/ - tmngNodeGroup = 0; - tmngLimit = csystemnodes - cnoHotSpare; - ndbrequire(tmngLimit < MAX_NDB_NODES); - for (i = 0; i < tmngLimit; i++) { - NodeGroupRecordPtr NGPtr; - jam(); - tmngNode = nodeArray[i]; - mngNodeptr.i = tmngNode; - ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord); - mngNodeptr.p->nodeGroup = tmngNodeGroup; - NGPtr.i = tmngNodeGroup; - ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord); - arrGuard(NGPtr.p->nodeCount, MAX_REPLICAS); - NGPtr.p->nodesInGroup[NGPtr.p->nodeCount++] = mngNodeptr.i; - if (NGPtr.p->nodeCount == cnoReplicas) { - jam(); - tmngNodeGroup++; - }//if - }//for - cnoOfNodeGroups = tmngNodeGroup; - ndbrequire(csystemnodes < MAX_NDB_NODES); - for (i = tmngLimit + 1; i < csystemnodes; i++) { - jam(); - tmngNode = nodeArray[i]; - mngNodeptr.i = tmngNode; - ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord); - mngNodeptr.p->nodeGroup = ZNIL; - }//for - for(i = 0; i < MAX_NDB_NODES; i++){ - jam(); - Sysfile::setNodeGroup(i, SYSFILE->nodeGroups, NO_NODE_GROUP_ID); - }//for - for (mngNodeptr.i = 1; mngNodeptr.i < MAX_NDB_NODES; mngNodeptr.i++) { - jam(); - ptrAss(mngNodeptr, nodeRecord); - if (mngNodeptr.p->nodeGroup != ZNIL) { - jam(); - Sysfile::setNodeGroup(mngNodeptr.i, SYSFILE->nodeGroups, mngNodeptr.p->nodeGroup); - }//if - }//for - - for (i = 0; inodeCount; j++) - { - jam(); - mngNodeptr.i = NGPtr.p->nodesInGroup[j]; - ptrCheckGuard(mngNodeptr, MAX_NDB_NODES, nodeRecord); - if (checkNodeAlive(NGPtr.p->nodesInGroup[j])) - { - alive = true; - break; - } - } - - if (!alive) - { - char buf[255]; - BaseString::snprintf - (buf, sizeof(buf), - "Illegal initial start, no alive node in nodegroup %u", i); - progError(__LINE__, - NDBD_EXIT_INSUFFICENT_NODES, - buf); - - } - } -}//Dbdih::makeNodeGroups() - -/** - * On node failure QMGR asks DIH about node groups. This is - * a direct signal (function call in same process). Input is - * bitmask of surviving nodes. The routine is not concerned - * about node count. Reply is one of: - * 1) win - we can survive, and nobody else can - * 2) lose - we cannot survive - * 3) partition - we can survive but there could be others - */ -void Dbdih::execCHECKNODEGROUPSREQ(Signal* signal) -{ - jamEntry(); - CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0]; - - bool direct = (sd->requestType & CheckNodeGroups::Direct); - bool ok = false; - switch(sd->requestType & ~CheckNodeGroups::Direct){ - case CheckNodeGroups::ArbitCheck:{ - ok = true; - jam(); - unsigned missall = 0; - unsigned haveall = 0; - for (Uint32 i = 0; i < cnoOfNodeGroups; i++) { - jam(); - NodeGroupRecordPtr ngPtr; - ngPtr.i = i; - ptrAss(ngPtr, nodeGroupRecord); - Uint32 count = 0; - for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) { - jam(); - Uint32 nodeId = ngPtr.p->nodesInGroup[j]; - if (sd->mask.get(nodeId)) { - jam(); - count++; - }//if - }//for - if (count == 0) { - jam(); - missall++; - }//if - if (count == ngPtr.p->nodeCount) { - haveall++; - }//if - }//for - - if (missall) { - jam(); - sd->output = CheckNodeGroups::Lose; - } else if (haveall) { - jam(); - sd->output = CheckNodeGroups::Win; - } else { - jam(); - sd->output = CheckNodeGroups::Partitioning; - }//if - } - break; - case CheckNodeGroups::GetNodeGroup: - ok = true; - sd->output = Sysfile::getNodeGroup(getOwnNodeId(), SYSFILE->nodeGroups); - break; - case CheckNodeGroups::GetNodeGroupMembers: { - ok = true; - Uint32 ownNodeGroup = - Sysfile::getNodeGroup(sd->nodeId, SYSFILE->nodeGroups); - - sd->output = ownNodeGroup; - sd->mask.clear(); - - NodeGroupRecordPtr ngPtr; - ngPtr.i = ownNodeGroup; - ptrAss(ngPtr, nodeGroupRecord); - for (Uint32 j = 0; j < ngPtr.p->nodeCount; j++) { - jam(); - sd->mask.set(ngPtr.p->nodesInGroup[j]); - } -#if 0 - for (int i = 0; i < MAX_NDB_NODES; i++) { - if (ownNodeGroup == - Sysfile::getNodeGroup(i, SYSFILE->nodeGroups)) { - sd->mask.set(i); - } - } -#endif - } - break; - } - ndbrequire(ok); - - if (!direct) - sendSignal(sd->blockRef, GSN_CHECKNODEGROUPSCONF, signal, - CheckNodeGroups::SignalLength, JBB); -}//Dbdih::execCHECKNODEGROUPSREQ() - -void Dbdih::makePrnList(ReadNodesConf * readNodes, Uint32 nodeArray[]) -{ - cfirstAliveNode = RNIL; - ndbrequire(con_lineNodes > 0); - ndbrequire(csystemnodes < MAX_NDB_NODES); - for (Uint32 i = 0; i < csystemnodes; i++) { - NodeRecordPtr nodePtr; - jam(); - nodePtr.i = nodeArray[i]; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - new (nodePtr.p) NodeRecord(); - if (NodeBitmask::get(readNodes->inactiveNodes, nodePtr.i) == false){ - jam(); - nodePtr.p->nodeStatus = NodeRecord::ALIVE; - nodePtr.p->useInTransactions = true; - nodePtr.p->copyCompleted = true; - nodePtr.p->m_inclDihLcp = true; - insertAlive(nodePtr); - } else { - jam(); - nodePtr.p->nodeStatus = NodeRecord::DEAD; - insertDeadNode(nodePtr); - }//if - }//for -}//Dbdih::makePrnList() - -/*************************************************************************/ -/* A NEW CRASHED REPLICA IS ADDED BY A NODE FAILURE. */ -/*************************************************************************/ -void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr) -{ - /*----------------------------------------------------------------------*/ - /* SET THE REPLICA_LAST_GCI OF THE CRASHED REPLICA TO LAST GCI */ - /* EXECUTED BY THE FAILED NODE. */ - /*----------------------------------------------------------------------*/ - /* WE HAVE A NEW CRASHED REPLICA. INITIATE CREATE GCI TO INDICATE */ - /* THAT THE NEW REPLICA IS NOT STARTED YET AND REPLICA_LAST_GCI IS*/ - /* SET TO -1 TO INDICATE THAT IT IS NOT DEAD YET. */ - /*----------------------------------------------------------------------*/ - Uint32 lastGCI = SYSFILE->lastCompletedGCI[nodeId]; - arrGuardErr(ncrReplicaPtr.p->noCrashedReplicas + 1, 8, - NDBD_EXIT_MAX_CRASHED_REPLICAS); - ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] = - lastGCI; - ncrReplicaPtr.p->noCrashedReplicas = ncrReplicaPtr.p->noCrashedReplicas + 1; - ncrReplicaPtr.p->createGci[ncrReplicaPtr.p->noCrashedReplicas] = 0; - ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] = - (Uint32)-1; - - if (ncrReplicaPtr.p->noCrashedReplicas == 7 && lastGCI) - { - jam(); - SYSFILE->lastCompletedGCI[nodeId] = 0; - warningEvent("Making filesystem for node %d unusable (need --initial)", - nodeId); - } -}//Dbdih::newCrashedReplica() - -/*************************************************************************/ -/* AT NODE FAILURE DURING START OF A NEW NODE WE NEED TO RESET A */ -/* SET OF VARIABLES CONTROLLING THE START AND INDICATING ONGOING */ -/* START OF A NEW NODE. */ -/*************************************************************************/ -void Dbdih::nodeResetStart() -{ - jam(); - c_nodeStartSlave.nodeId = 0; - c_nodeStartMaster.startNode = RNIL; - c_nodeStartMaster.failNr = cfailurenr; - c_nodeStartMaster.activeState = false; - c_nodeStartMaster.blockGcp = false; - c_nodeStartMaster.blockLcp = false; - c_nodeStartMaster.m_outstandingGsn = 0; -}//Dbdih::nodeResetStart() - -void Dbdih::openFileRw(Signal* signal, FileRecordPtr filePtr) -{ - signal->theData[0] = reference(); - signal->theData[1] = filePtr.i; - signal->theData[2] = filePtr.p->fileName[0]; - signal->theData[3] = filePtr.p->fileName[1]; - signal->theData[4] = filePtr.p->fileName[2]; - signal->theData[5] = filePtr.p->fileName[3]; - signal->theData[6] = FsOpenReq::OM_READWRITE; - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA); -}//Dbdih::openFileRw() - -void Dbdih::openFileRo(Signal* signal, FileRecordPtr filePtr) -{ - signal->theData[0] = reference(); - signal->theData[1] = filePtr.i; - signal->theData[2] = filePtr.p->fileName[0]; - signal->theData[3] = filePtr.p->fileName[1]; - signal->theData[4] = filePtr.p->fileName[2]; - signal->theData[5] = filePtr.p->fileName[3]; - signal->theData[6] = FsOpenReq::OM_READONLY; - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, 7, JBA); -}//Dbdih::openFileRw() - -/*************************************************************************/ -/* REMOVE A CRASHED REPLICA BY PACKING THE ARRAY OF CREATED GCI AND*/ -/* THE LAST GCI OF THE CRASHED REPLICA. */ -/*************************************************************************/ -void Dbdih::packCrashedReplicas(ReplicaRecordPtr replicaPtr) -{ - ndbrequire(replicaPtr.p->noCrashedReplicas > 0); - ndbrequire(replicaPtr.p->noCrashedReplicas <= 8); - for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) { - jam(); - replicaPtr.p->createGci[i] = replicaPtr.p->createGci[i + 1]; - replicaPtr.p->replicaLastGci[i] = replicaPtr.p->replicaLastGci[i + 1]; - }//for - replicaPtr.p->noCrashedReplicas--; - -#ifdef VM_TRACE - for (Uint32 i = 0; i < replicaPtr.p->noCrashedReplicas; i++) { - jam(); - ndbrequire(replicaPtr.p->createGci[i] != 0xF1F1F1F1); - ndbrequire(replicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1); - }//for -#endif -}//Dbdih::packCrashedReplicas() - -void Dbdih::prepareReplicas(FragmentstorePtr fragPtr) -{ - ReplicaRecordPtr prReplicaPtr; - Uint32 prevReplica = RNIL; - - /* --------------------------------------------------------------------- */ - /* BEGIN BY LINKING ALL REPLICA RECORDS ONTO THE OLD STORED REPLICA*/ - /* LIST. */ - /* AT A SYSTEM RESTART OBVIOUSLY ALL NODES ARE OLD. */ - /* --------------------------------------------------------------------- */ - prReplicaPtr.i = fragPtr.p->storedReplicas; - while (prReplicaPtr.i != RNIL) { - jam(); - prevReplica = prReplicaPtr.i; - ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord); - prReplicaPtr.i = prReplicaPtr.p->nextReplica; - }//while - /* --------------------------------------------------------------------- */ - /* LIST OF STORED REPLICAS WILL BE EMPTY NOW. */ - /* --------------------------------------------------------------------- */ - if (prevReplica != RNIL) { - prReplicaPtr.i = prevReplica; - ptrCheckGuard(prReplicaPtr, creplicaFileSize, replicaRecord); - prReplicaPtr.p->nextReplica = fragPtr.p->oldStoredReplicas; - fragPtr.p->oldStoredReplicas = fragPtr.p->storedReplicas; - fragPtr.p->storedReplicas = RNIL; - fragPtr.p->noOldStoredReplicas += fragPtr.p->noStoredReplicas; - fragPtr.p->noStoredReplicas = 0; - }//if -}//Dbdih::prepareReplicas() - -void Dbdih::readFragment(RWFragment* rf, FragmentstorePtr fragPtr) -{ - Uint32 TreadFid = readPageWord(rf); - fragPtr.p->preferredPrimary = readPageWord(rf); - fragPtr.p->noStoredReplicas = readPageWord(rf); - fragPtr.p->noOldStoredReplicas = readPageWord(rf); - Uint32 TdistKey = readPageWord(rf); - - ndbrequire(fragPtr.p->noStoredReplicas > 0); - ndbrequire(TreadFid == rf->fragId); - ndbrequire(TdistKey < 256); - if ((cstarttype == NodeState::ST_NODE_RESTART) || - (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - fragPtr.p->distributionKey = TdistKey; - }//if - - fragPtr.p->m_log_part_id = readPageWord(rf); -}//Dbdih::readFragment() - -Uint32 Dbdih::readPageWord(RWFragment* rf) -{ - if (rf->wordIndex >= 2048) { - jam(); - ndbrequire(rf->wordIndex == 2048); - rf->pageIndex++; - ndbrequire(rf->pageIndex < 8); - rf->rwfPageptr.i = rf->rwfTabPtr.p->pageRef[rf->pageIndex]; - ptrCheckGuard(rf->rwfPageptr, cpageFileSize, pageRecord); - rf->wordIndex = 32; - }//if - Uint32 dataWord = rf->rwfPageptr.p->word[rf->wordIndex]; - rf->wordIndex++; - return dataWord; -}//Dbdih::readPageWord() - -void Dbdih::readReplica(RWFragment* rf, ReplicaRecordPtr readReplicaPtr) -{ - Uint32 i; - readReplicaPtr.p->procNode = readPageWord(rf); - readReplicaPtr.p->initialGci = readPageWord(rf); - readReplicaPtr.p->noCrashedReplicas = readPageWord(rf); - readReplicaPtr.p->nextLcp = readPageWord(rf); - - for (i = 0; i < MAX_LCP_STORED; i++) { - readReplicaPtr.p->maxGciCompleted[i] = readPageWord(rf); - readReplicaPtr.p->maxGciStarted[i] = readPageWord(rf); - readReplicaPtr.p->lcpId[i] = readPageWord(rf); - readReplicaPtr.p->lcpStatus[i] = readPageWord(rf); - }//for - const Uint32 noCrashedReplicas = readReplicaPtr.p->noCrashedReplicas; - ndbrequire(noCrashedReplicas < 8); - for (i = 0; i < noCrashedReplicas; i++) { - readReplicaPtr.p->createGci[i] = readPageWord(rf); - readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf); - ndbrequire(readReplicaPtr.p->createGci[i] != 0xF1F1F1F1); - ndbrequire(readReplicaPtr.p->replicaLastGci[i] != 0xF1F1F1F1); - }//for - for(i = noCrashedReplicas; i<8; i++){ - readReplicaPtr.p->createGci[i] = readPageWord(rf); - readReplicaPtr.p->replicaLastGci[i] = readPageWord(rf); - // They are not initialized... - readReplicaPtr.p->createGci[i] = 0; - readReplicaPtr.p->replicaLastGci[i] = ~0; - } - /* ---------------------------------------------------------------------- */ - /* IF THE LAST COMPLETED LOCAL CHECKPOINT IS VALID AND LARGER THAN */ - /* THE LAST COMPLETED CHECKPOINT THEN WE WILL INVALIDATE THIS LOCAL */ - /* CHECKPOINT FOR THIS REPLICA. */ - /* ---------------------------------------------------------------------- */ - Uint32 trraLcp = prevLcpNo(readReplicaPtr.p->nextLcp); - ndbrequire(trraLcp < MAX_LCP_STORED); - if ((readReplicaPtr.p->lcpStatus[trraLcp] == ZVALID) && - (readReplicaPtr.p->lcpId[trraLcp] > SYSFILE->latestLCP_ID)) { - jam(); - readReplicaPtr.p->lcpStatus[trraLcp] = ZINVALID; - }//if - /* ---------------------------------------------------------------------- */ - /* WE ALSO HAVE TO INVALIDATE ANY LOCAL CHECKPOINTS THAT HAVE BEEN */ - /* INVALIDATED BY MOVING BACK THE RESTART GCI. */ - /* ---------------------------------------------------------------------- */ - for (i = 0; i < MAX_LCP_STORED; i++) { - jam(); - if ((readReplicaPtr.p->lcpStatus[i] == ZVALID) && - (readReplicaPtr.p->maxGciStarted[i] > SYSFILE->newestRestorableGCI)) { - jam(); - readReplicaPtr.p->lcpStatus[i] = ZINVALID; - }//if - }//for - /* ---------------------------------------------------------------------- */ - /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */ - /* THAT ARE NO LONGER VALID DUE TO MOVING RESTART GCI BACKWARDS. */ - /* ---------------------------------------------------------------------- */ - removeTooNewCrashedReplicas(readReplicaPtr); - /* ---------------------------------------------------------------------- */ - /* WE WILL REMOVE ANY OCCURRENCES OF REPLICAS THAT HAVE CRASHED */ - /* THAT ARE NO LONGER VALID SINCE THEY ARE NO LONGER RESTORABLE. */ - /* ---------------------------------------------------------------------- */ - removeOldCrashedReplicas(readReplicaPtr); - /* --------------------------------------------------------------------- */ - // We set the last GCI of the replica that was alive before the node - // crashed last time. We set it to the last GCI which the node participated in. - /* --------------------------------------------------------------------- */ - ndbrequire(readReplicaPtr.p->noCrashedReplicas < 8); - readReplicaPtr.p->replicaLastGci[readReplicaPtr.p->noCrashedReplicas] = - SYSFILE->lastCompletedGCI[readReplicaPtr.p->procNode]; - /* ---------------------------------------------------------------------- */ - /* FIND PROCESSOR RECORD */ - /* ---------------------------------------------------------------------- */ -}//Dbdih::readReplica() - -void Dbdih::readReplicas(RWFragment* rf, FragmentstorePtr fragPtr) -{ - Uint32 i; - ReplicaRecordPtr newReplicaPtr; - Uint32 noStoredReplicas = fragPtr.p->noStoredReplicas; - Uint32 noOldStoredReplicas = fragPtr.p->noOldStoredReplicas; - /* ----------------------------------------------------------------------- */ - /* WE CLEAR THE NUMBER OF STORED REPLICAS SINCE IT WILL BE CALCULATED */ - /* BY THE LINKING SUBROUTINES. */ - /* ----------------------------------------------------------------------- */ - fragPtr.p->noStoredReplicas = 0; - fragPtr.p->noOldStoredReplicas = 0; - Uint32 replicaIndex = 0; - ndbrequire(noStoredReplicas + noOldStoredReplicas <= MAX_REPLICAS); - for (i = 0; i < noStoredReplicas; i++) { - seizeReplicaRec(newReplicaPtr); - readReplica(rf, newReplicaPtr); - if (checkNodeAlive(newReplicaPtr.p->procNode)) { - jam(); - ndbrequire(replicaIndex < MAX_REPLICAS); - fragPtr.p->activeNodes[replicaIndex] = newReplicaPtr.p->procNode; - replicaIndex++; - linkStoredReplica(fragPtr, newReplicaPtr); - } else { - jam(); - linkOldStoredReplica(fragPtr, newReplicaPtr); - }//if - }//for - fragPtr.p->fragReplicas = noStoredReplicas; - for (i = 0; i < noOldStoredReplicas; i++) { - jam(); - seizeReplicaRec(newReplicaPtr); - readReplica(rf, newReplicaPtr); - linkOldStoredReplica(fragPtr, newReplicaPtr); - }//for -}//Dbdih::readReplicas() - -void Dbdih::readRestorableGci(Signal* signal, FileRecordPtr filePtr) -{ - signal->theData[0] = filePtr.p->fileRef; - signal->theData[1] = reference(); - signal->theData[2] = filePtr.i; - signal->theData[3] = ZLIST_OF_PAIRS; - signal->theData[4] = ZVAR_NO_CRESTART_INFO; - signal->theData[5] = 1; - signal->theData[6] = 0; - signal->theData[7] = 0; - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA); -}//Dbdih::readRestorableGci() - -void Dbdih::readTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr) -{ - signal->theData[0] = filePtr.p->fileRef; - signal->theData[1] = reference(); - signal->theData[2] = filePtr.i; - signal->theData[3] = ZLIST_OF_PAIRS; - signal->theData[4] = ZVAR_NO_WORD; - signal->theData[5] = tab->noPages; - for (Uint32 i = 0; i < tab->noPages; i++) { - signal->theData[6 + (2 * i)] = tab->pageRef[i]; - signal->theData[7 + (2 * i)] = i; - }//for - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 22, JBA); -}//Dbdih::readTabfile() - -void Dbdih::releasePage(Uint32 pageIndex) -{ - PageRecordPtr pagePtr; - pagePtr.i = pageIndex; - ptrCheckGuard(pagePtr, cpageFileSize, pageRecord); - pagePtr.p->nextfreepage = cfirstfreepage; - cfirstfreepage = pagePtr.i; -}//Dbdih::releasePage() - -void Dbdih::releaseTabPages(Uint32 tableId) -{ - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - ndbrequire(tabPtr.p->noPages <= 8); - for (Uint32 i = 0; i < tabPtr.p->noPages; i++) { - jam(); - releasePage(tabPtr.p->pageRef[i]); - }//for - tabPtr.p->noPages = 0; -}//Dbdih::releaseTabPages() - -/*************************************************************************/ -/* REMOVE NODE FROM SET OF ALIVE NODES. */ -/*************************************************************************/ -void Dbdih::removeAlive(NodeRecordPtr removeNodePtr) -{ - NodeRecordPtr nodePtr; - - nodePtr.i = cfirstAliveNode; - if (nodePtr.i == removeNodePtr.i) { - jam(); - cfirstAliveNode = removeNodePtr.p->nextNode; - return; - }//if - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->nextNode == removeNodePtr.i) { - jam(); - nodePtr.p->nextNode = removeNodePtr.p->nextNode; - break; - } else { - jam(); - nodePtr.i = nodePtr.p->nextNode; - }//if - } while (1); -}//Dbdih::removeAlive() - -/*************************************************************************/ -/* REMOVE NODE FROM SET OF DEAD NODES. */ -/*************************************************************************/ -void Dbdih::removeDeadNode(NodeRecordPtr removeNodePtr) -{ - NodeRecordPtr nodePtr; - - nodePtr.i = cfirstDeadNode; - if (nodePtr.i == removeNodePtr.i) { - jam(); - cfirstDeadNode = removeNodePtr.p->nextNode; - return; - }//if - do { - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->nextNode == removeNodePtr.i) { - jam(); - nodePtr.p->nextNode = removeNodePtr.p->nextNode; - break; - } else { - jam(); - nodePtr.i = nodePtr.p->nextNode; - }//if - } while (1); -}//Dbdih::removeDeadNode() - -/*---------------------------------------------------------------*/ -/* REMOVE REPLICAS OF A FAILED NODE FROM LIST OF STORED */ -/* REPLICAS AND MOVE IT TO THE LIST OF OLD STORED REPLICAS.*/ -/* ALSO UPDATE THE CRASHED REPLICA INFORMATION. */ -/*---------------------------------------------------------------*/ -void Dbdih::removeNodeFromStored(Uint32 nodeId, - FragmentstorePtr fragPtr, - ReplicaRecordPtr replicatePtr, - bool temporary) -{ - if (!temporary) - { - jam(); - newCrashedReplica(nodeId, replicatePtr); - } - else - { - jam(); - } - removeStoredReplica(fragPtr, replicatePtr); - linkOldStoredReplica(fragPtr, replicatePtr); - ndbrequire(fragPtr.p->storedReplicas != RNIL); -}//Dbdih::removeNodeFromStored() - -/*************************************************************************/ -/* REMOVE ANY OLD CRASHED REPLICAS THAT ARE NOT RESTORABLE ANY MORE*/ -/*************************************************************************/ -void Dbdih::removeOldCrashedReplicas(ReplicaRecordPtr rocReplicaPtr) -{ - while (rocReplicaPtr.p->noCrashedReplicas > 0) { - jam(); - /* --------------------------------------------------------------------- */ - /* ONLY IF THERE IS AT LEAST ONE REPLICA THEN CAN WE REMOVE ANY. */ - /* --------------------------------------------------------------------- */ - if (rocReplicaPtr.p->replicaLastGci[0] < SYSFILE->oldestRestorableGCI){ - jam(); - /* ------------------------------------------------------------------- */ - /* THIS CRASHED REPLICA HAS BECOME EXTINCT AND MUST BE REMOVED TO */ - /* GIVE SPACE FOR NEW CRASHED REPLICAS. */ - /* ------------------------------------------------------------------- */ - packCrashedReplicas(rocReplicaPtr); - } else { - break; - }//if - }//while - if (rocReplicaPtr.p->createGci[0] < SYSFILE->keepGCI){ - jam(); - /* --------------------------------------------------------------------- */ - /* MOVE FORWARD THE CREATE GCI TO A GCI THAT CAN BE USED. WE HAVE */ - /* NO CERTAINTY IN FINDING ANY LOG RECORDS FROM OLDER GCI'S. */ - /* --------------------------------------------------------------------- */ - rocReplicaPtr.p->createGci[0] = SYSFILE->keepGCI; - ndbrequire(SYSFILE->keepGCI != 0xF1F1F1F1); - }//if -}//Dbdih::removeOldCrashedReplicas() - -void Dbdih::removeOldStoredReplica(FragmentstorePtr fragPtr, - ReplicaRecordPtr replicatePtr) -{ - ReplicaRecordPtr rosTmpReplicaPtr; - ReplicaRecordPtr rosPrevReplicaPtr; - - fragPtr.p->noOldStoredReplicas--; - if (fragPtr.p->oldStoredReplicas == replicatePtr.i) { - jam(); - fragPtr.p->oldStoredReplicas = replicatePtr.p->nextReplica; - } else { - rosPrevReplicaPtr.i = fragPtr.p->oldStoredReplicas; - ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord); - rosTmpReplicaPtr.i = rosPrevReplicaPtr.p->nextReplica; - while (rosTmpReplicaPtr.i != replicatePtr.i) { - jam(); - rosPrevReplicaPtr.i = rosTmpReplicaPtr.i; - ptrCheckGuard(rosPrevReplicaPtr, creplicaFileSize, replicaRecord); - ptrCheckGuard(rosTmpReplicaPtr, creplicaFileSize, replicaRecord); - rosTmpReplicaPtr.i = rosTmpReplicaPtr.p->nextReplica; - }//if - rosPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica; - }//if -}//Dbdih::removeOldStoredReplica() - -void Dbdih::removeStoredReplica(FragmentstorePtr fragPtr, - ReplicaRecordPtr replicatePtr) -{ - ReplicaRecordPtr rsrTmpReplicaPtr; - ReplicaRecordPtr rsrPrevReplicaPtr; - - fragPtr.p->noStoredReplicas--; - if (fragPtr.p->storedReplicas == replicatePtr.i) { - jam(); - fragPtr.p->storedReplicas = replicatePtr.p->nextReplica; - } else { - jam(); - rsrPrevReplicaPtr.i = fragPtr.p->storedReplicas; - rsrTmpReplicaPtr.i = fragPtr.p->storedReplicas; - ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord); - rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica; - while (rsrTmpReplicaPtr.i != replicatePtr.i) { - jam(); - rsrPrevReplicaPtr.i = rsrTmpReplicaPtr.i; - ptrCheckGuard(rsrTmpReplicaPtr, creplicaFileSize, replicaRecord); - rsrTmpReplicaPtr.i = rsrTmpReplicaPtr.p->nextReplica; - }//while - ptrCheckGuard(rsrPrevReplicaPtr, creplicaFileSize, replicaRecord); - rsrPrevReplicaPtr.p->nextReplica = replicatePtr.p->nextReplica; - }//if -}//Dbdih::removeStoredReplica() - -/*************************************************************************/ -/* REMOVE ALL TOO NEW CRASHED REPLICAS THAT IS IN THIS REPLICA. */ -/*************************************************************************/ -void Dbdih::removeTooNewCrashedReplicas(ReplicaRecordPtr rtnReplicaPtr) -{ - while (rtnReplicaPtr.p->noCrashedReplicas > 0) { - jam(); - /* --------------------------------------------------------------------- */ - /* REMOVE ALL REPLICAS THAT ONLY LIVED IN A PERIOD THAT HAVE BEEN */ - /* REMOVED FROM THE RESTART INFORMATION SINCE THE RESTART FAILED */ - /* TOO MANY TIMES. */ - /* --------------------------------------------------------------------- */ - arrGuard(rtnReplicaPtr.p->noCrashedReplicas - 1, 8); - if (rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] > - SYSFILE->newestRestorableGCI){ - jam(); - rtnReplicaPtr.p->createGci[rtnReplicaPtr.p->noCrashedReplicas - 1] = - (Uint32)-1; - rtnReplicaPtr.p->replicaLastGci[rtnReplicaPtr.p->noCrashedReplicas - 1] = - (Uint32)-1; - rtnReplicaPtr.p->noCrashedReplicas--; - } else { - break; - }//if - }//while -}//Dbdih::removeTooNewCrashedReplicas() - -/*************************************************************************/ -/* */ -/* MODULE: SEARCH FOR POSSIBLE REPLICAS THAT CAN HANDLE THE GLOBAL */ -/* CHECKPOINT WITHOUT NEEDING ANY EXTRA LOGGING FACILITIES.*/ -/* A MAXIMUM OF FOUR NODES IS RETRIEVED. */ -/*************************************************************************/ -bool -Dbdih::setup_create_replica(FragmentstorePtr fragPtr, - CreateReplicaRecord* createReplicaPtrP, - ConstPtr replicaPtr) -{ - createReplicaPtrP->dataNodeId = replicaPtr.p->procNode; - createReplicaPtrP->replicaRec = replicaPtr.i; - - /* ----------------------------------------------------------------- */ - /* WE NEED TO SEARCH FOR A PROPER LOCAL CHECKPOINT TO USE FOR THE */ - /* SYSTEM RESTART. */ - /* ----------------------------------------------------------------- */ - Uint32 startGci; - Uint32 startLcpNo; - Uint32 stopGci = SYSFILE->newestRestorableGCI; - bool result = findStartGci(replicaPtr, - stopGci, - startGci, - startLcpNo); - if (!result) - { - jam(); - /* --------------------------------------------------------------- */ - /* WE COULD NOT FIND ANY LOCAL CHECKPOINT. THE FRAGMENT THUS DO NOT*/ - /* CONTAIN ANY VALID LOCAL CHECKPOINT. IT DOES HOWEVER CONTAIN A */ - /* VALID FRAGMENT LOG. THUS BY FIRST CREATING THE FRAGMENT AND THEN*/ - /* EXECUTING THE FRAGMENT LOG WE CAN CREATE THE FRAGMENT AS */ - /* DESIRED. THIS SHOULD ONLY OCCUR AFTER CREATING A FRAGMENT. */ - /* */ - /* TO INDICATE THAT NO LOCAL CHECKPOINT IS TO BE USED WE SET THE */ - /* LOCAL CHECKPOINT TO ZNIL. */ - /* --------------------------------------------------------------- */ - createReplicaPtrP->lcpNo = ZNIL; - } - else - { - jam(); - /* --------------------------------------------------------------- */ - /* WE FOUND A PROPER LOCAL CHECKPOINT TO RESTART FROM. */ - /* SET LOCAL CHECKPOINT ID AND LOCAL CHECKPOINT NUMBER. */ - /* --------------------------------------------------------------- */ - createReplicaPtrP->lcpNo = startLcpNo; - arrGuard(startLcpNo, MAX_LCP_STORED); - createReplicaPtrP->createLcpId = replicaPtr.p->lcpId[startLcpNo]; - }//if - - - /* ----------------------------------------------------------------- */ - /* WE HAVE EITHER FOUND A LOCAL CHECKPOINT OR WE ARE PLANNING TO */ - /* EXECUTE THE LOG FROM THE INITIAL CREATION OF THE TABLE. IN BOTH */ - /* CASES WE NEED TO FIND A SET OF LOGS THAT CAN EXECUTE SUCH THAT */ - /* WE RECOVER TO THE SYSTEM RESTART GLOBAL CHECKPOINT. */ - /* -_--------------------------------------------------------------- */ - return findLogNodes(createReplicaPtrP, fragPtr, startGci, stopGci); -} - -void Dbdih::searchStoredReplicas(FragmentstorePtr fragPtr) -{ - Uint32 nextReplicaPtrI; - Ptr replicaPtr; - - replicaPtr.i = fragPtr.p->storedReplicas; - while (replicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - nextReplicaPtrI = replicaPtr.p->nextReplica; - ConstPtr constReplicaPtr; - constReplicaPtr.i = replicaPtr.i; - constReplicaPtr.p = replicaPtr.p; - NodeRecordPtr nodePtr; - nodePtr.i = replicaPtr.p->procNode; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->nodeStatus == NodeRecord::ALIVE) { - jam(); - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - case Sysfile::NS_ActiveMissed_2:{ - /* ----------------------------------------------------------------- */ - /* INITIALISE THE CREATE REPLICA STRUCTURE THAT IS USED FOR SENDING*/ - /* TO LQH START_FRAGREQ. */ - /* SET THE DATA NODE WHERE THE LOCAL CHECKPOINT IS FOUND. ALSO */ - /* SET A REFERENCE TO THE REPLICA POINTER OF THAT. */ - /* ----------------------------------------------------------------- */ - CreateReplicaRecordPtr createReplicaPtr; - createReplicaPtr.i = cnoOfCreateReplicas; - ptrCheckGuard(createReplicaPtr, 4, createReplicaRecord); - cnoOfCreateReplicas++; - - /** - * Should have been checked in resetReplicaSr - */ - ndbrequire(setup_create_replica(fragPtr, - createReplicaPtr.p, - constReplicaPtr)); - break; - } - default: - jam(); - /*empty*/; - break; - }//switch - } - replicaPtr.i = nextReplicaPtrI; - }//while -}//Dbdih::searchStoredReplicas() - -/*************************************************************************/ -/* */ -/* MODULE: SEIZE_FILE */ -/* DESCRIPTION: THE SUBROUTINE SEIZES A FILE RECORD FROM THE */ -/* FREE LIST. */ -/*************************************************************************/ -void Dbdih::seizeFile(FileRecordPtr& filePtr) -{ - filePtr.i = cfirstfreeFile; - ptrCheckGuard(filePtr, cfileFileSize, fileRecord); - cfirstfreeFile = filePtr.p->nextFile; - filePtr.p->nextFile = RNIL; -}//Dbdih::seizeFile() - -/*************************************************************************/ -/* SEND CREATE_FRAGREQ TO ALL NODES IN THE NDB CLUSTER. */ -/*************************************************************************/ -/*************************************************************************/ -/* */ -/* MODULE: FIND THE START GCI AND LOCAL CHECKPOINT TO USE. */ -/*************************************************************************/ -void Dbdih::sendStartFragreq(Signal* signal, - TabRecordPtr tabPtr, Uint32 fragId) -{ - CreateReplicaRecordPtr replicaPtr; - for (replicaPtr.i = 0; replicaPtr.i < cnoOfCreateReplicas; replicaPtr.i++) { - jam(); - ptrAss(replicaPtr, createReplicaRecord); - BlockReference ref = calcLqhBlockRef(replicaPtr.p->dataNodeId); - StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0]; - startFragReq->userPtr = replicaPtr.p->replicaRec; - startFragReq->userRef = reference(); - startFragReq->lcpNo = replicaPtr.p->lcpNo; - startFragReq->lcpId = replicaPtr.p->createLcpId; - startFragReq->tableId = tabPtr.i; - startFragReq->fragId = fragId; - - if(ERROR_INSERTED(7072) || ERROR_INSERTED(7074)){ - jam(); - const Uint32 noNodes = replicaPtr.p->noLogNodes; - Uint32 start = replicaPtr.p->logStartGci[noNodes - 1]; - const Uint32 stop = replicaPtr.p->logStopGci[noNodes - 1]; - - for(Uint32 i = noNodes; i < 4 && (stop - start) > 0; i++){ - replicaPtr.p->noLogNodes++; - replicaPtr.p->logStopGci[i - 1] = start; - - replicaPtr.p->logNodeId[i] = replicaPtr.p->logNodeId[i-1]; - replicaPtr.p->logStartGci[i] = start + 1; - replicaPtr.p->logStopGci[i] = stop; - start += 1; - } - } - - startFragReq->noOfLogNodes = replicaPtr.p->noLogNodes; - - for (Uint32 i = 0; i < 4 ; i++) { - startFragReq->lqhLogNode[i] = replicaPtr.p->logNodeId[i]; - startFragReq->startGci[i] = replicaPtr.p->logStartGci[i]; - startFragReq->lastGci[i] = replicaPtr.p->logStopGci[i]; - }//for - - sendSignal(ref, GSN_START_FRAGREQ, signal, - StartFragReq::SignalLength, JBB); - }//for -}//Dbdih::sendStartFragreq() - -/*************************************************************************/ -/* SET THE INITIAL ACTIVE STATUS ON ALL NODES AND PUT INTO LISTS. */ -/*************************************************************************/ -void Dbdih::setInitialActiveStatus() -{ - NodeRecordPtr siaNodeptr; - Uint32 tsiaNoActiveNodes; - - tsiaNoActiveNodes = csystemnodes - cnoHotSpare; - for(Uint32 i = 0; inodeStatus[i] = 0; - for (siaNodeptr.i = 1; siaNodeptr.i < MAX_NDB_NODES; siaNodeptr.i++) { - ptrAss(siaNodeptr, nodeRecord); - switch(siaNodeptr.p->nodeStatus){ - case NodeRecord::ALIVE: - case NodeRecord::DEAD: - if (tsiaNoActiveNodes == 0) { - jam(); - siaNodeptr.p->activeStatus = Sysfile::NS_HotSpare; - } else { - jam(); - tsiaNoActiveNodes = tsiaNoActiveNodes - 1; - if (siaNodeptr.p->nodeStatus == NodeRecord::ALIVE) - { - jam(); - siaNodeptr.p->activeStatus = Sysfile::NS_Active; - } - else - { - siaNodeptr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; - } - } - break; - default: - jam(); - siaNodeptr.p->activeStatus = Sysfile::NS_NotDefined; - break; - }//if - Sysfile::setNodeStatus(siaNodeptr.i, - SYSFILE->nodeStatus, - siaNodeptr.p->activeStatus); - }//for -}//Dbdih::setInitialActiveStatus() - -/*************************************************************************/ -/* SET LCP ACTIVE STATUS AT THE END OF A LOCAL CHECKPOINT. */ -/*************************************************************************/ -void Dbdih::setLcpActiveStatusEnd() -{ - NodeRecordPtr nodePtr; - - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (c_lcpState.m_participatingLQH.get(nodePtr.i)){ - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - case Sysfile::NS_ActiveMissed_2: - jam(); - /*-------------------------------------------------------------------*/ - /* THE NODE PARTICIPATED IN THIS CHECKPOINT. - * WE CAN SET ITS STATUS TO ACTIVE */ - /*-------------------------------------------------------------------*/ - nodePtr.p->activeStatus = Sysfile::NS_Active; - takeOverCompleted(nodePtr.i); - break; - case Sysfile::NS_TakeOver: - jam(); - /*-------------------------------------------------------------------*/ - /* THE NODE HAS COMPLETED A CHECKPOINT AFTER TAKE OVER. WE CAN NOW */ - /* SET ITS STATUS TO ACTIVE. WE CAN ALSO COMPLETE THE TAKE OVER */ - /* AND ALSO WE CLEAR THE TAKE OVER NODE IN THE RESTART INFO. */ - /*-------------------------------------------------------------------*/ - nodePtr.p->activeStatus = Sysfile::NS_Active; - takeOverCompleted(nodePtr.i); - break; - default: - ndbrequire(false); - return; - break; - }//switch - }//if - }//for - - if(getNodeState().getNodeRestartInProgress()){ - jam(); - if(c_lcpState.m_participatingLQH.get(getOwnNodeId())){ - nodePtr.i = getOwnNodeId(); - ptrAss(nodePtr, nodeRecord); - ndbrequire(nodePtr.p->activeStatus == Sysfile::NS_Active); - g_eventLogger.info("NR: setLcpActiveStatusEnd - m_participatingLQH"); - } else { - g_eventLogger.info("NR: setLcpActiveStatusEnd - !m_participatingLQH"); - } - } - - c_lcpState.m_participatingDIH.clear(); - c_lcpState.m_participatingLQH.clear(); - if (isMaster()) { - jam(); - setNodeRestartInfoBits(); - }//if -}//Dbdih::setLcpActiveStatusEnd() - -void Dbdih::takeOverCompleted(Uint32 aNodeId) -{ - TakeOverRecordPtr takeOverPtr; - takeOverPtr.i = findTakeOver(aNodeId); - if (takeOverPtr.i != RNIL) { - jam(); - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - if (takeOverPtr.p->toMasterStatus != TakeOverRecord::WAIT_LCP) { - jam(); - ndbrequire(!isMaster()); - return; - }//if - ndbrequire(isMaster()); - Sysfile::setTakeOverNode(aNodeId, SYSFILE->takeOver, 0); - takeOverPtr.p->toMasterStatus = TakeOverRecord::TO_END_COPY; - cstartGcpNow = true; - }//if -}//Dbdih::takeOverCompleted() - -/*************************************************************************/ -/* SET LCP ACTIVE STATUS BEFORE STARTING A LOCAL CHECKPOINT. */ -/*************************************************************************/ -void Dbdih::setLcpActiveStatusStart(Signal* signal) -{ - NodeRecordPtr nodePtr; - - c_lcpState.m_participatingLQH.clear(); - c_lcpState.m_participatingDIH.clear(); - - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - ptrAss(nodePtr, nodeRecord); -#if 0 - if(nodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER){ - infoEvent("Node %d nodeStatus=%d activeStatus=%d copyCompleted=%d lcp=%d", - nodePtr.i, - nodePtr.p->nodeStatus, - nodePtr.p->activeStatus, - nodePtr.p->copyCompleted, - nodePtr.p->m_inclDihLcp); - } -#endif - if(nodePtr.p->nodeStatus == NodeRecord::ALIVE && nodePtr.p->m_inclDihLcp){ - jam(); - c_lcpState.m_participatingDIH.set(nodePtr.i); - } - - if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) && - (nodePtr.p->copyCompleted)) { - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - jam(); - /*-------------------------------------------------------------------*/ - // The normal case. Starting a LCP for a started node which hasn't - // missed the previous LCP. - /*-------------------------------------------------------------------*/ - c_lcpState.m_participatingLQH.set(nodePtr.i); - break; - case Sysfile::NS_ActiveMissed_1: - jam(); - /*-------------------------------------------------------------------*/ - // The node is starting up and is participating in a local checkpoint - // as the final phase of the start-up. We can still use the checkpoints - // on the node after a system restart. - /*-------------------------------------------------------------------*/ - c_lcpState.m_participatingLQH.set(nodePtr.i); - break; - case Sysfile::NS_ActiveMissed_2: - jam(); - /*-------------------------------------------------------------------*/ - // The node is starting up and is participating in a local checkpoint - // as the final phase of the start-up. We have missed so - // many checkpoints that we no longer can use this node to - // recreate fragments from disk. - // It must be taken over with the copy fragment process after a system - // crash. We indicate this by setting the active status to TAKE_OVER. - /*-------------------------------------------------------------------*/ - c_lcpState.m_participatingLQH.set(nodePtr.i); - nodePtr.p->activeStatus = Sysfile::NS_TakeOver; - //break; // Fall through - case Sysfile::NS_TakeOver:{ - TakeOverRecordPtr takeOverPtr; - jam(); - /*-------------------------------------------------------------------*/ - /* THIS NODE IS CURRENTLY TAKING OVER A FAILED NODE. */ - /*-------------------------------------------------------------------*/ - takeOverPtr.i = findTakeOver(nodePtr.i); - if (takeOverPtr.i != RNIL) { - jam(); - ptrCheckGuard(takeOverPtr, MAX_NDB_NODES, takeOverRecord); - if (takeOverPtr.p->toMasterStatus == TakeOverRecord::WAIT_LCP) { - jam(); - /*--------------------------------------------------------------- - * ALL THE INFORMATION HAVE BEEN REPLICATED TO THE NEW - * NODE AND WE ARE ONLY WAITING FOR A LOCAL CHECKPOINT TO BE - * PERFORMED ON THE NODE TO SET ITS STATUS TO ACTIVE. - */ - infoEvent("Node %d is WAIT_LCP including in LCP", nodePtr.i); - c_lcpState.m_participatingLQH.set(nodePtr.i); - }//if - }//if - break; - } - default: - jam(); - /*empty*/; - break; - }//switch - } else { - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - jam(); - nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_1; - break; - case Sysfile::NS_ActiveMissed_1: - jam(); - nodePtr.p->activeStatus = Sysfile::NS_ActiveMissed_2; - break; - case Sysfile::NS_ActiveMissed_2: - jam(); - CRASH_INSERTION(7192); - if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) && - (!nodePtr.p->copyCompleted)) { - jam(); - /*-----------------------------------------------------------------*/ - // The node is currently starting up and has not completed the - // copy phase. - // It will thus be in the TAKE_OVER state. - /*-----------------------------------------------------------------*/ - ndbrequire(findTakeOver(nodePtr.i) != RNIL); - nodePtr.p->activeStatus = Sysfile::NS_TakeOver; - } else { - jam(); - /*-----------------------------------------------------------------*/ - /* THE NODE IS ACTIVE AND HAS NOT COMPLETED ANY OF THE LAST 3 - * CHECKPOINTS */ - /* WE MUST TAKE IT OUT OF ACTION AND START A NEW NODE TO TAKE OVER.*/ - /*-----------------------------------------------------------------*/ - nodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; - }//if - break; - case Sysfile::NS_TakeOver: - jam(); - break; - default: - jam(); - /*empty*/; - break; - }//switch - }//if - }//for - if (isMaster()) { - jam(); - checkStartTakeOver(signal); - setNodeRestartInfoBits(); - }//if -}//Dbdih::setLcpActiveStatusStart() - -/*************************************************************************/ -/* SET NODE ACTIVE STATUS AT SYSTEM RESTART AND WHEN UPDATED BY MASTER */ -/*************************************************************************/ -void Dbdih::setNodeActiveStatus() -{ - NodeRecordPtr snaNodeptr; - - for (snaNodeptr.i = 1; snaNodeptr.i < MAX_NDB_NODES; snaNodeptr.i++) { - ptrAss(snaNodeptr, nodeRecord); - const Uint32 tsnaNodeBits = Sysfile::getNodeStatus(snaNodeptr.i, - SYSFILE->nodeStatus); - switch (tsnaNodeBits) { - case Sysfile::NS_Active: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_Active; - break; - case Sysfile::NS_ActiveMissed_1: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_1; - break; - case Sysfile::NS_ActiveMissed_2: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_ActiveMissed_2; - break; - case Sysfile::NS_TakeOver: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_TakeOver; - break; - case Sysfile::NS_HotSpare: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_HotSpare; - break; - case Sysfile::NS_NotActive_NotTakenOver: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver; - break; - case Sysfile::NS_NotDefined: - jam(); - snaNodeptr.p->activeStatus = Sysfile::NS_NotDefined; - break; - default: - ndbrequire(false); - break; - }//switch - }//for -}//Dbdih::setNodeActiveStatus() - -/***************************************************************************/ -/* SET THE NODE GROUP BASED ON THE RESTART INFORMATION OR AS SET BY MASTER */ -/***************************************************************************/ -void Dbdih::setNodeGroups() -{ - NodeGroupRecordPtr NGPtr; - NodeRecordPtr sngNodeptr; - Uint32 Ti; - - for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) { - NGPtr.i = Ti; - ptrAss(NGPtr, nodeGroupRecord); - NGPtr.p->nodeCount = 0; - }//for - for (sngNodeptr.i = 1; sngNodeptr.i < MAX_NDB_NODES; sngNodeptr.i++) { - ptrAss(sngNodeptr, nodeRecord); - Sysfile::ActiveStatus s = - (Sysfile::ActiveStatus)Sysfile::getNodeStatus(sngNodeptr.i, - SYSFILE->nodeStatus); - switch (s){ - case Sysfile::NS_Active: - case Sysfile::NS_ActiveMissed_1: - case Sysfile::NS_ActiveMissed_2: - case Sysfile::NS_NotActive_NotTakenOver: - case Sysfile::NS_TakeOver: - jam(); - sngNodeptr.p->nodeGroup = Sysfile::getNodeGroup(sngNodeptr.i, - SYSFILE->nodeGroups); - NGPtr.i = sngNodeptr.p->nodeGroup; - ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord); - NGPtr.p->nodesInGroup[NGPtr.p->nodeCount] = sngNodeptr.i; - NGPtr.p->nodeCount++; - break; - case Sysfile::NS_HotSpare: - case Sysfile::NS_NotDefined: - jam(); - sngNodeptr.p->nodeGroup = ZNIL; - break; - default: - ndbrequire(false); - return; - break; - }//switch - }//for - cnoOfNodeGroups = 0; - for (Ti = 0; Ti < MAX_NDB_NODES; Ti++) { - jam(); - NGPtr.i = Ti; - ptrAss(NGPtr, nodeGroupRecord); - if (NGPtr.p->nodeCount != 0) { - jam(); - cnoOfNodeGroups++; - }//if - }//for - cnoHotSpare = csystemnodes - (cnoOfNodeGroups * cnoReplicas); -}//Dbdih::setNodeGroups() - -/*************************************************************************/ -/* SET NODE INFORMATION AFTER RECEIVING RESTART INFORMATION FROM MASTER. */ -/* WE TAKE THE OPPORTUNITY TO SYNCHRONISE OUR DATA WITH THE MASTER. IT */ -/* IS ONLY THE MASTER THAT WILL ACT ON THIS DATA. WE WILL KEEP THEM */ -/* UPDATED FOR THE CASE WHEN WE HAVE TO BECOME MASTER. */ -/*************************************************************************/ -void Dbdih::setNodeInfo(Signal* signal) -{ - setNodeActiveStatus(); - setNodeGroups(); - sendHOT_SPAREREP(signal); -}//Dbdih::setNodeInfo() - -/*************************************************************************/ -// Keep also DBDICT informed about the Hot Spare situation in the cluster. -/*************************************************************************/ -void Dbdih::sendHOT_SPAREREP(Signal* signal) -{ - NodeRecordPtr locNodeptr; - Uint32 Ti = 0; - HotSpareRep * const hotSpare = (HotSpareRep*)&signal->theData[0]; - NodeBitmask::clear(hotSpare->theHotSpareNodes); - for (locNodeptr.i = 1; locNodeptr.i < MAX_NDB_NODES; locNodeptr.i++) { - ptrAss(locNodeptr, nodeRecord); - switch (locNodeptr.p->activeStatus) { - case Sysfile::NS_HotSpare: - jam(); - NodeBitmask::set(hotSpare->theHotSpareNodes, locNodeptr.i); - Ti++; - break; - default: - jam(); - break; - }//switch - }//for - hotSpare->noHotSpareNodes = Ti; - sendSignal(DBDICT_REF, GSN_HOT_SPAREREP, - signal, HotSpareRep::SignalLength, JBB); -}//Dbdih::sendHOT_SPAREREP() - -/*************************************************************************/ -/* SET LCP ACTIVE STATUS FOR ALL NODES BASED ON THE INFORMATION IN */ -/* THE RESTART INFORMATION. */ -/*************************************************************************/ -#if 0 -void Dbdih::setNodeLcpActiveStatus() -{ - c_lcpState.m_lcpActiveStatus.clear(); - for (Uint32 i = 1; i < MAX_NDB_NODES; i++) { - if (NodeBitmask::get(SYSFILE->lcpActive, i)) { - jam(); - c_lcpState.m_lcpActiveStatus.set(i); - }//if - }//for -}//Dbdih::setNodeLcpActiveStatus() -#endif - -/*************************************************************************/ -/* SET THE RESTART INFO BITS BASED ON THE NODES ACTIVE STATUS. */ -/*************************************************************************/ -void Dbdih::setNodeRestartInfoBits() -{ - NodeRecordPtr nodePtr; - Uint32 tsnrNodeGroup; - Uint32 tsnrNodeActiveStatus; - Uint32 i; - for(i = 1; i < MAX_NDB_NODES; i++){ - Sysfile::setNodeStatus(i, SYSFILE->nodeStatus, Sysfile::NS_Active); - }//for - for(i = 1; i < Sysfile::NODE_GROUPS_SIZE; i++){ - SYSFILE->nodeGroups[i] = 0; - }//for - NdbNodeBitmask::clear(SYSFILE->lcpActive); - - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - ptrAss(nodePtr, nodeRecord); - switch (nodePtr.p->activeStatus) { - case Sysfile::NS_Active: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_Active; - break; - case Sysfile::NS_ActiveMissed_1: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_1; - break; - case Sysfile::NS_ActiveMissed_2: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_ActiveMissed_2; - break; - case Sysfile::NS_HotSpare: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_HotSpare; - break; - case Sysfile::NS_TakeOver: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_TakeOver; - break; - case Sysfile::NS_NotActive_NotTakenOver: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_NotActive_NotTakenOver; - break; - case Sysfile::NS_NotDefined: - jam(); - tsnrNodeActiveStatus = Sysfile::NS_NotDefined; - break; - default: - ndbrequire(false); - tsnrNodeActiveStatus = Sysfile::NS_NotDefined; // remove warning - break; - }//switch - Sysfile::setNodeStatus(nodePtr.i, SYSFILE->nodeStatus, - tsnrNodeActiveStatus); - if (nodePtr.p->nodeGroup == ZNIL) { - jam(); - tsnrNodeGroup = NO_NODE_GROUP_ID; - } else { - jam(); - tsnrNodeGroup = nodePtr.p->nodeGroup; - }//if - Sysfile::setNodeGroup(nodePtr.i, SYSFILE->nodeGroups, tsnrNodeGroup); - if (c_lcpState.m_participatingLQH.get(nodePtr.i)){ - jam(); - NodeBitmask::set(SYSFILE->lcpActive, nodePtr.i); - }//if - }//for -}//Dbdih::setNodeRestartInfoBits() - -/*************************************************************************/ -/* START THE GLOBAL CHECKPOINT PROTOCOL IN MASTER AT START-UP */ -/*************************************************************************/ -void Dbdih::startGcp(Signal* signal) -{ - cgcpStatus = GCP_READY; - coldGcpStatus = cgcpStatus; - coldGcpId = cnewgcp; - cgcpSameCounter = 0; - signal->theData[0] = DihContinueB::ZSTART_GCP; - signal->theData[1] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - signal->theData[0] = DihContinueB::ZCHECK_GCP_STOP; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); -}//Dbdih::startGcp() - -void Dbdih::updateNodeInfo(FragmentstorePtr fragPtr) -{ - ReplicaRecordPtr replicatePtr; - Uint32 index = 0; - replicatePtr.i = fragPtr.p->storedReplicas; - do { - jam(); - ptrCheckGuard(replicatePtr, creplicaFileSize, replicaRecord); - ndbrequire(index < MAX_REPLICAS); - fragPtr.p->activeNodes[index] = replicatePtr.p->procNode; - index++; - replicatePtr.i = replicatePtr.p->nextReplica; - } while (replicatePtr.i != RNIL); - fragPtr.p->fragReplicas = index; - - /* ----------------------------------------------------------------------- */ - // We switch primary to the preferred primary if the preferred primary is - // in the list. - /* ----------------------------------------------------------------------- */ - const Uint32 prefPrim = fragPtr.p->preferredPrimary; - for (Uint32 i = 1; i < index; i++) { - jam(); - ndbrequire(i < MAX_REPLICAS); - if (fragPtr.p->activeNodes[i] == prefPrim){ - jam(); - Uint32 switchNode = fragPtr.p->activeNodes[0]; - fragPtr.p->activeNodes[0] = prefPrim; - fragPtr.p->activeNodes[i] = switchNode; - break; - }//if - }//for -}//Dbdih::updateNodeInfo() - -void Dbdih::writeFragment(RWFragment* wf, FragmentstorePtr fragPtr) -{ - writePageWord(wf, wf->fragId); - writePageWord(wf, fragPtr.p->preferredPrimary); - writePageWord(wf, fragPtr.p->noStoredReplicas); - writePageWord(wf, fragPtr.p->noOldStoredReplicas); - writePageWord(wf, fragPtr.p->distributionKey); - writePageWord(wf, fragPtr.p->m_log_part_id); -}//Dbdih::writeFragment() - -void Dbdih::writePageWord(RWFragment* wf, Uint32 dataWord) -{ - if (wf->wordIndex >= 2048) { - jam(); - ndbrequire(wf->wordIndex == 2048); - allocpage(wf->rwfPageptr); - wf->wordIndex = 32; - wf->pageIndex++; - ndbrequire(wf->pageIndex < 8); - wf->rwfTabPtr.p->pageRef[wf->pageIndex] = wf->rwfPageptr.i; - wf->rwfTabPtr.p->noPages++; - }//if - wf->rwfPageptr.p->word[wf->wordIndex] = dataWord; - wf->wordIndex++; -}//Dbdih::writePageWord() - -void Dbdih::writeReplicas(RWFragment* wf, Uint32 replicaStartIndex) -{ - ReplicaRecordPtr wfReplicaPtr; - wfReplicaPtr.i = replicaStartIndex; - while (wfReplicaPtr.i != RNIL) { - jam(); - ptrCheckGuard(wfReplicaPtr, creplicaFileSize, replicaRecord); - writePageWord(wf, wfReplicaPtr.p->procNode); - writePageWord(wf, wfReplicaPtr.p->initialGci); - writePageWord(wf, wfReplicaPtr.p->noCrashedReplicas); - writePageWord(wf, wfReplicaPtr.p->nextLcp); - Uint32 i; - for (i = 0; i < MAX_LCP_STORED; i++) { - writePageWord(wf, wfReplicaPtr.p->maxGciCompleted[i]); - writePageWord(wf, wfReplicaPtr.p->maxGciStarted[i]); - writePageWord(wf, wfReplicaPtr.p->lcpId[i]); - writePageWord(wf, wfReplicaPtr.p->lcpStatus[i]); - }//if - for (i = 0; i < 8; i++) { - writePageWord(wf, wfReplicaPtr.p->createGci[i]); - writePageWord(wf, wfReplicaPtr.p->replicaLastGci[i]); - }//if - - wfReplicaPtr.i = wfReplicaPtr.p->nextReplica; - }//while -}//Dbdih::writeReplicas() - -void Dbdih::writeRestorableGci(Signal* signal, FileRecordPtr filePtr) -{ - for (Uint32 i = 0; i < Sysfile::SYSFILE_SIZE32; i++) { - sysfileDataToFile[i] = sysfileData[i]; - }//for - signal->theData[0] = filePtr.p->fileRef; - signal->theData[1] = reference(); - signal->theData[2] = filePtr.i; - signal->theData[3] = ZLIST_OF_PAIRS_SYNCH; - signal->theData[4] = ZVAR_NO_CRESTART_INFO_TO_FILE; - signal->theData[5] = 1; /* AMOUNT OF PAGES */ - signal->theData[6] = 0; /* MEMORY PAGE = 0 SINCE COMMON STORED VARIABLE */ - signal->theData[7] = 0; - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA); -}//Dbdih::writeRestorableGci() - -void Dbdih::writeTabfile(Signal* signal, TabRecord* tab, FileRecordPtr filePtr) -{ - signal->theData[0] = filePtr.p->fileRef; - signal->theData[1] = reference(); - signal->theData[2] = filePtr.i; - signal->theData[3] = ZLIST_OF_PAIRS_SYNCH; - signal->theData[4] = ZVAR_NO_WORD; - signal->theData[5] = tab->noPages; - for (Uint32 i = 0; i < tab->noPages; i++) { - jam(); - signal->theData[6 + (2 * i)] = tab->pageRef[i]; - signal->theData[7 + (2 * i)] = i; - }//for - Uint32 length = 6 + (2 * tab->noPages); - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, length, JBA); -}//Dbdih::writeTabfile() - -void Dbdih::execDEBUG_SIG(Signal* signal) -{ - signal = signal; //Avoid compiler warnings -}//Dbdih::execDEBUG_SIG() - -void -Dbdih::execDUMP_STATE_ORD(Signal* signal) -{ - DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0]; - Uint32 arg = dumpState->args[0]; - if (arg == DumpStateOrd::DihDumpNodeRestartInfo) { - infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d, c_nodeStartMaster.wait = %d", - c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp, c_nodeStartMaster.wait); - infoEvent("cstartGcpNow = %d, cgcpStatus = %d", - cstartGcpNow, cgcpStatus); - infoEvent("cfirstVerifyQueue = %d, cverifyQueueCounter = %d", - cfirstVerifyQueue, cverifyQueueCounter); - infoEvent("cgcpOrderBlocked = %d, cgcpStartCounter = %d", - cgcpOrderBlocked, cgcpStartCounter); - }//if - if (arg == DumpStateOrd::DihDumpNodeStatusInfo) { - NodeRecordPtr localNodePtr; - infoEvent("Printing nodeStatus of all nodes"); - for (localNodePtr.i = 1; localNodePtr.i < MAX_NDB_NODES; localNodePtr.i++) { - ptrAss(localNodePtr, nodeRecord); - if (localNodePtr.p->nodeStatus != NodeRecord::NOT_IN_CLUSTER) { - infoEvent("Node = %d has status = %d", - localNodePtr.i, localNodePtr.p->nodeStatus); - }//if - }//for - }//if - - if (arg == DumpStateOrd::DihPrintFragmentation){ - infoEvent("Printing fragmentation of all tables --"); - for(Uint32 i = 0; itabStatus != TabRecord::TS_ACTIVE) - continue; - - for(Uint32 j = 0; j < tabPtr.p->totalfragments; j++){ - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, j, fragPtr); - - Uint32 nodeOrder[MAX_REPLICAS]; - const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, nodeOrder); - char buf[100]; - BaseString::snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j); - for(Uint32 k = 0; k < noOfReplicas; k++){ - char tmp[100]; - BaseString::snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]); - strcat(buf, tmp); - } - infoEvent(buf); - } - } - } - - if (signal->theData[0] == 7000) { - infoEvent("ctimer = %d, cgcpParticipantState = %d, cgcpStatus = %d", - c_lcpState.ctimer, cgcpParticipantState, cgcpStatus); - infoEvent("coldGcpStatus = %d, coldGcpId = %d, cmasterState = %d", - coldGcpStatus, coldGcpId, cmasterState); - infoEvent("cmasterTakeOverNode = %d, ctcCounter = %d", - cmasterTakeOverNode, c_lcpState.ctcCounter); - }//if - if (signal->theData[0] == 7001) { - infoEvent("c_lcpState.keepGci = %d", - c_lcpState.keepGci); - infoEvent("c_lcpState.lcpStatus = %d, clcpStopGcp = %d", - c_lcpState.lcpStatus, - c_lcpState.lcpStopGcp); - infoEvent("cgcpStartCounter = %d, cimmediateLcpStart = %d", - cgcpStartCounter, c_lcpState.immediateLcpStart); - }//if - if (signal->theData[0] == 7002) { - infoEvent("cnoOfActiveTables = %d, cgcpDelay = %d", - cnoOfActiveTables, cgcpDelay); - infoEvent("cdictblockref = %d, cfailurenr = %d", - cdictblockref, cfailurenr); - infoEvent("con_lineNodes = %d, reference() = %d, creceivedfrag = %d", - con_lineNodes, reference(), creceivedfrag); - }//if - if (signal->theData[0] == 7003) { - infoEvent("cfirstAliveNode = %d, cgckptflag = %d", - cfirstAliveNode, cgckptflag); - infoEvent("clocallqhblockref = %d, clocaltcblockref = %d, cgcpOrderBlocked = %d", - clocallqhblockref, clocaltcblockref, cgcpOrderBlocked); - infoEvent("cstarttype = %d, csystemnodes = %d, currentgcp = %d", - cstarttype, csystemnodes, currentgcp); - }//if - if (signal->theData[0] == 7004) { - infoEvent("cmasterdihref = %d, cownNodeId = %d, cnewgcp = %d", - cmasterdihref, cownNodeId, cnewgcp); - infoEvent("cndbStartReqBlockref = %d, cremainingfrags = %d", - cndbStartReqBlockref, cremainingfrags); - infoEvent("cntrlblockref = %d, cgcpSameCounter = %d, coldgcp = %d", - cntrlblockref, cgcpSameCounter, coldgcp); - }//if - if (signal->theData[0] == 7005) { - infoEvent("crestartGci = %d", - crestartGci); - }//if - if (signal->theData[0] == 7006) { - infoEvent("clcpDelay = %d, cgcpMasterTakeOverState = %d", - c_lcpState.clcpDelay, cgcpMasterTakeOverState); - infoEvent("cmasterNodeId = %d", cmasterNodeId); - infoEvent("cnoHotSpare = %d, c_nodeStartMaster.startNode = %d, c_nodeStartMaster.wait = %d", - cnoHotSpare, c_nodeStartMaster.startNode, c_nodeStartMaster.wait); - }//if - if (signal->theData[0] == 7007) { - infoEvent("c_nodeStartMaster.failNr = %d", c_nodeStartMaster.failNr); - infoEvent("c_nodeStartMaster.startInfoErrorCode = %d", - c_nodeStartMaster.startInfoErrorCode); - infoEvent("c_nodeStartMaster.blockLcp = %d, c_nodeStartMaster.blockGcp = %d", - c_nodeStartMaster.blockLcp, c_nodeStartMaster.blockGcp); - }//if - if (signal->theData[0] == 7008) { - infoEvent("cfirstDeadNode = %d, cstartPhase = %d, cnoReplicas = %d", - cfirstDeadNode, cstartPhase, cnoReplicas); - infoEvent("cwaitLcpSr = %d",cwaitLcpSr); - }//if - if (signal->theData[0] == 7009) { - infoEvent("ccalcOldestRestorableGci = %d, cnoOfNodeGroups = %d", - c_lcpState.oldestRestorableGci, cnoOfNodeGroups); - infoEvent("cstartGcpNow = %d", - cstartGcpNow); - infoEvent("crestartGci = %d", - crestartGci); - }//if - if (signal->theData[0] == 7010) { - infoEvent("cminHotSpareNodes = %d, c_lcpState.lcpStatusUpdatedPlace = %d, cLcpStart = %d", - cminHotSpareNodes, c_lcpState.lcpStatusUpdatedPlace, c_lcpState.lcpStart); - infoEvent("c_blockCommit = %d, c_blockCommitNo = %d", - c_blockCommit, c_blockCommitNo); - }//if - if (signal->theData[0] == 7011){ - infoEvent("c_COPY_GCIREQ_Counter = %s", - c_COPY_GCIREQ_Counter.getText()); - infoEvent("c_COPY_TABREQ_Counter = %s", - c_COPY_TABREQ_Counter.getText()); - infoEvent("c_CREATE_FRAGREQ_Counter = %s", - c_CREATE_FRAGREQ_Counter.getText()); - infoEvent("c_DIH_SWITCH_REPLICA_REQ_Counter = %s", - c_DIH_SWITCH_REPLICA_REQ_Counter.getText()); - infoEvent("c_EMPTY_LCP_REQ_Counter = %s",c_EMPTY_LCP_REQ_Counter.getText()); - infoEvent("c_END_TOREQ_Counter = %s", c_END_TOREQ_Counter.getText()); - infoEvent("c_GCP_COMMIT_Counter = %s", c_GCP_COMMIT_Counter.getText()); - infoEvent("c_GCP_PREPARE_Counter = %s", c_GCP_PREPARE_Counter.getText()); - infoEvent("c_GCP_SAVEREQ_Counter = %s", c_GCP_SAVEREQ_Counter.getText()); - infoEvent("c_INCL_NODEREQ_Counter = %s", c_INCL_NODEREQ_Counter.getText()); - infoEvent("c_MASTER_GCPREQ_Counter = %s", - c_MASTER_GCPREQ_Counter.getText()); - infoEvent("c_MASTER_LCPREQ_Counter = %s", - c_MASTER_LCPREQ_Counter.getText()); - infoEvent("c_START_INFOREQ_Counter = %s", - c_START_INFOREQ_Counter.getText()); - infoEvent("c_START_RECREQ_Counter = %s", c_START_RECREQ_Counter.getText()); - infoEvent("c_START_TOREQ_Counter = %s", c_START_TOREQ_Counter.getText()); - infoEvent("c_STOP_ME_REQ_Counter = %s", c_STOP_ME_REQ_Counter.getText()); - infoEvent("c_TC_CLOPSIZEREQ_Counter = %s", - c_TC_CLOPSIZEREQ_Counter.getText()); - infoEvent("c_TCGETOPSIZEREQ_Counter = %s", - c_TCGETOPSIZEREQ_Counter.getText()); - infoEvent("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText()); - } - - if(signal->theData[0] == 7012){ - char buf[8*_NDB_NODE_BITMASK_SIZE+1]; - infoEvent("ParticipatingDIH = %s", c_lcpState.m_participatingDIH.getText(buf)); - infoEvent("ParticipatingLQH = %s", c_lcpState.m_participatingLQH.getText(buf)); - infoEvent("m_LCP_COMPLETE_REP_Counter_DIH = %s", - c_lcpState.m_LCP_COMPLETE_REP_Counter_DIH.getText()); - infoEvent("m_LCP_COMPLETE_REP_Counter_LQH = %s", - c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.getText()); - infoEvent("m_LAST_LCP_FRAG_ORD = %s", - c_lcpState.m_LAST_LCP_FRAG_ORD.getText()); - infoEvent("m_LCP_COMPLETE_REP_From_Master_Received = %d", - c_lcpState.m_LCP_COMPLETE_REP_From_Master_Received); - - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if(nodePtr.p->nodeStatus == NodeRecord::ALIVE){ - Uint32 i; - for(i = 0; inoOfStartedChkpt; i++){ - infoEvent("Node %d: started: table=%d fragment=%d replica=%d", - nodePtr.i, - nodePtr.p->startedChkpt[i].tableId, - nodePtr.p->startedChkpt[i].fragId, - nodePtr.p->startedChkpt[i].replicaPtr); - } - - for(i = 0; inoOfQueuedChkpt; i++){ - infoEvent("Node %d: queued: table=%d fragment=%d replica=%d", - nodePtr.i, - nodePtr.p->queuedChkpt[i].tableId, - nodePtr.p->queuedChkpt[i].fragId, - nodePtr.p->queuedChkpt[i].replicaPtr); - } - } - } - } - - if(arg == 7019 && signal->getLength() == 2) - { - char buf2[8+1]; - NodeRecordPtr nodePtr; - nodePtr.i = signal->theData[1]; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - infoEvent("NF Node %d tc: %d lqh: %d dih: %d dict: %d recNODE_FAILREP: %d", - nodePtr.i, - nodePtr.p->dbtcFailCompleted, - nodePtr.p->dblqhFailCompleted, - nodePtr.p->dbdihFailCompleted, - nodePtr.p->dbdictFailCompleted, - nodePtr.p->recNODE_FAILREP); - infoEvent(" m_NF_COMPLETE_REP: %s m_nodefailSteps: %s", - nodePtr.p->m_NF_COMPLETE_REP.getText(), - nodePtr.p->m_nodefailSteps.getText(buf2)); - } - - if(arg == 7020 && signal->getLength() > 3) - { - Uint32 gsn= signal->theData[1]; - Uint32 block= signal->theData[2]; - Uint32 length= signal->length() - 3; - memmove(signal->theData, signal->theData+3, 4*length); - sendSignal(numberToRef(block, getOwnNodeId()), gsn, signal, length, JBB); - - warningEvent("-- SENDING CUSTOM SIGNAL --"); - char buf[100], buf2[100]; - buf2[0]= 0; - for(Uint32 i = 0; itheData[i]); - snprintf(buf2, 100, "%s", buf); - } - warningEvent("gsn: %d block: %s, length: %d theData: %s", - gsn, getBlockName(block, "UNKNOWN"), length, buf); - - g_eventLogger.warning("-- SENDING CUSTOM SIGNAL --"); - g_eventLogger.warning("gsn: %d block: %s, length: %d theData: %s", - gsn, getBlockName(block, "UNKNOWN"), length, buf); - } - - if(arg == DumpStateOrd::DihDumpLCPState){ - infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); - infoEvent("lcpStatus = %d (update place = %d) ", - c_lcpState.lcpStatus, c_lcpState.lcpStatusUpdatedPlace); - infoEvent - ("lcpStart = %d lcpStopGcp = %d keepGci = %d oldestRestorable = %d", - c_lcpState.lcpStart, c_lcpState.lcpStopGcp, - c_lcpState.keepGci, c_lcpState.oldestRestorableGci); - - infoEvent - ("immediateLcpStart = %d masterLcpNodeId = %d", - c_lcpState.immediateLcpStart, - refToNode(c_lcpState.m_masterLcpDihRef)); - - for (Uint32 i = 0; i<10; i++) - { - infoEvent("%u : status: %u place: %u", i, - c_lcpState.m_saveState[i].m_status, - c_lcpState.m_saveState[i].m_place); - } - - infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); - } - - if(arg == DumpStateOrd::DihDumpLCPMasterTakeOver){ - infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId()); - infoEvent - ("c_lcpMasterTakeOverState.state = %d updatePlace = %d failedNodeId = %d", - c_lcpMasterTakeOverState.state, - c_lcpMasterTakeOverState.updatePlace, - c_lcpMasterTakeOverState.failedNodeId); - - infoEvent("c_lcpMasterTakeOverState.minTableId = %u minFragId = %u", - c_lcpMasterTakeOverState.minTableId, - c_lcpMasterTakeOverState.minFragId); - - infoEvent("-- Node %d LCP MASTER TAKE OVER STATE --", getOwnNodeId()); - } - - if (signal->theData[0] == 7015) - { - if (signal->getLength() == 1) - { - signal->theData[1] = 0; - } - - Uint32 tableId = signal->theData[1]; - if (tableId < ctabFileSize) - { - signal->theData[0] = 7021; - execDUMP_STATE_ORD(signal); - signal->theData[0] = 7015; - signal->theData[1] = tableId + 1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - } - - if(arg == DumpStateOrd::EnableUndoDelayDataWrite){ - g_eventLogger.info("Dbdih:: delay write of datapages for table = %d", - dumpState->args[1]); - // Send this dump to ACC and TUP - EXECUTE_DIRECT(DBACC, GSN_DUMP_STATE_ORD, signal, 2); - EXECUTE_DIRECT(DBTUP, GSN_DUMP_STATE_ORD, signal, 2); - - // Start immediate LCP - c_lcpState.ctimer += (1 << c_lcpState.clcpDelay); - return; - } - - if (signal->theData[0] == DumpStateOrd::DihAllAllowNodeStart) { - for (Uint32 i = 1; i < MAX_NDB_NODES; i++) - setAllowNodeStart(i, true); - return; - }//if - if (signal->theData[0] == DumpStateOrd::DihMinTimeBetweenLCP) { - // Set time between LCP to min value - g_eventLogger.info("Set time between LCP to min value"); - c_lcpState.clcpDelay = 0; // TimeBetweenLocalCheckpoints.min - return; - } - if (signal->theData[0] == DumpStateOrd::DihMaxTimeBetweenLCP) { - // Set time between LCP to max value - g_eventLogger.info("Set time between LCP to max value"); - c_lcpState.clcpDelay = 31; // TimeBetweenLocalCheckpoints.max - return; - } - - if(arg == 7098){ - if(signal->length() == 3){ - jam(); - infoEvent("startLcpRoundLoopLab(tabel=%d, fragment=%d)", - signal->theData[1], signal->theData[2]); - startLcpRoundLoopLab(signal, signal->theData[1], signal->theData[2]); - return; - } else { - infoEvent("Invalid no of arguments to 7098 - startLcpRoundLoopLab -" - " expected 2 (tableId, fragmentId)"); - } - } - - if(arg == DumpStateOrd::DihStartLcpImmediately){ - c_lcpState.ctimer += (1 << c_lcpState.clcpDelay); - return; - } - - if (arg == DumpStateOrd::DihSetTimeBetweenGcp) - { - if (signal->getLength() == 1) - { - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - ndb_mgm_get_int_parameter(p, CFG_DB_GCP_INTERVAL, &cgcpDelay); - } - else - { - cgcpDelay = signal->theData[1]; - } - g_eventLogger.info("Setting time between gcp : %d", cgcpDelay); - } - - if (arg == 7021 && signal->getLength() == 2) - { - TabRecordPtr tabPtr; - tabPtr.i = signal->theData[1]; - if (tabPtr.i >= ctabFileSize) - return; - - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - if(tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) - return; - - infoEvent - ("Table %d: TabCopyStatus: %d TabUpdateStatus: %d TabLcpStatus: %d", - tabPtr.i, - tabPtr.p->tabCopyStatus, - tabPtr.p->tabUpdateState, - tabPtr.p->tabLcpStatus); - - FragmentstorePtr fragPtr; - for (Uint32 fid = 0; fid < tabPtr.p->totalfragments; fid++) { - jam(); - getFragstore(tabPtr.p, fid, fragPtr); - - char buf[100], buf2[100]; - BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ", - fid, fragPtr.p->noLcpReplicas); - - Uint32 num=0; - ReplicaRecordPtr replicaPtr; - replicaPtr.i = fragPtr.p->storedReplicas; - do { - ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord); - BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)", - buf, num, - replicaPtr.p->procNode, - replicaPtr.p->lcpIdStarted, - replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle"); - BaseString::snprintf(buf, sizeof(buf), "%s", buf2); - - num++; - replicaPtr.i = replicaPtr.p->nextReplica; - } while (replicaPtr.i != RNIL); - infoEvent(buf); - } - } - - if (arg == 7022) - { - jam(); - crashSystemAtGcpStop(signal, true); - } -}//Dbdih::execDUMP_STATE_ORD() - -void -Dbdih::execPREP_DROP_TAB_REQ(Signal* signal){ - jamEntry(); - - PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr(); - - TabRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - - PrepDropTabRef::ErrorCode err = PrepDropTabRef::OK; - { /** - * Check table state - */ - bool ok = false; - switch(tabPtr.p->tabStatus){ - case TabRecord::TS_IDLE: - ok = true; - jam(); - err = PrepDropTabRef::NoSuchTable; - break; - case TabRecord::TS_DROPPING: - ok = true; - jam(); - err = PrepDropTabRef::PrepDropInProgress; - break; - case TabRecord::TS_CREATING: - jam(); - ok = true; - break; - case TabRecord::TS_ACTIVE: - ok = true; - jam(); - break; - } - ndbrequire(ok); - } - - if(err != PrepDropTabRef::OK){ - jam(); - PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = err; - sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal, - PrepDropTabRef::SignalLength, JBB); - return; - } - - tabPtr.p->tabStatus = TabRecord::TS_DROPPING; - tabPtr.p->m_prepDropTab.senderRef = senderRef; - tabPtr.p->m_prepDropTab.senderData = senderData; - - if(isMaster()){ - /** - * Remove from queue - */ - NodeRecordPtr nodePtr; - for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) { - jam(); - ptrAss(nodePtr, nodeRecord); - if (c_lcpState.m_participatingLQH.get(nodePtr.i)){ - - Uint32 index = 0; - Uint32 count = nodePtr.p->noOfQueuedChkpt; - while(index < count){ - if(nodePtr.p->queuedChkpt[index].tableId == tabPtr.i){ - jam(); - // g_eventLogger.info("Unqueuing %d", index); - - count--; - for(Uint32 i = index; iqueuedChkpt[i] = nodePtr.p->queuedChkpt[i + 1]; - } - } else { - index++; - } - } - nodePtr.p->noOfQueuedChkpt = count; - } - } - } - - { /** - * Check table lcp state - */ - - bool ok = false; - switch(tabPtr.p->tabLcpStatus){ - case TabRecord::TLS_COMPLETED: - case TabRecord::TLS_WRITING_TO_FILE: - ok = true; - jam(); - break; - return; - case TabRecord::TLS_ACTIVE: - ok = true; - jam(); - - tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; - - /** - * First check if all fragments are done - */ - if(checkLcpAllTablesDoneInLqh()){ - jam(); - - g_eventLogger.info("This is the last table"); - - /** - * Then check if saving of tab info is done for all tables - */ - LcpStatus a = c_lcpState.lcpStatus; - checkLcpCompletedLab(signal); - - if(a != c_lcpState.lcpStatus){ - g_eventLogger.info("And all tables are written to already written disk"); - } - } - break; - } - ndbrequire(ok); - } - - { /** - * Send WaitDropTabReq to all LQH - */ - WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend(); - req->tableId = tabPtr.i; - req->senderRef = reference(); - - NodeRecordPtr nodePtr; - nodePtr.i = cfirstAliveNode; - tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(); - while(nodePtr.i != RNIL){ - jam(); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - - tabPtr.p->m_prepDropTab.waitDropTabCount.setWaitingFor(nodePtr.i); - sendSignal(calcLqhBlockRef(nodePtr.i), GSN_WAIT_DROP_TAB_REQ, - signal, WaitDropTabReq::SignalLength, JBB); - - nodePtr.i = nodePtr.p->nextNode; - } - } - - waitDropTabWritingToFile(signal, tabPtr); -} - -void -Dbdih::waitDropTabWritingToFile(Signal* signal, TabRecordPtr tabPtr){ - - if(tabPtr.p->tabLcpStatus == TabRecord::TLS_WRITING_TO_FILE){ - jam(); - signal->theData[0] = DihContinueB::WAIT_DROP_TAB_WRITING_TO_FILE; - signal->theData[1] = tabPtr.i; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); - return; - } - - ndbrequire(tabPtr.p->tabLcpStatus == TabRecord::TLS_COMPLETED); - checkPrepDropTabComplete(signal, tabPtr); -} - -void -Dbdih::checkPrepDropTabComplete(Signal* signal, TabRecordPtr tabPtr){ - - if(tabPtr.p->tabLcpStatus != TabRecord::TLS_COMPLETED){ - jam(); - return; - } - - if(!tabPtr.p->m_prepDropTab.waitDropTabCount.done()){ - jam(); - return; - } - - const Uint32 ref = tabPtr.p->m_prepDropTab.senderRef; - if(ref != 0){ - PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - conf->senderData = tabPtr.p->m_prepDropTab.senderData; - sendSignal(tabPtr.p->m_prepDropTab.senderRef, GSN_PREP_DROP_TAB_CONF, - signal, PrepDropTabConf::SignalLength, JBB); - tabPtr.p->m_prepDropTab.senderRef = 0; - } -} - -void -Dbdih::execWAIT_DROP_TAB_REF(Signal* signal){ - jamEntry(); - WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr(); - - TabRecordPtr tabPtr; - tabPtr.i = ref->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING); - Uint32 nodeId = refToNode(ref->senderRef); - - ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable || - ref->errorCode == WaitDropTabRef::NF_FakeErrorREF); - - tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId); - checkPrepDropTabComplete(signal, tabPtr); -} - -void -Dbdih::execWAIT_DROP_TAB_CONF(Signal* signal){ - jamEntry(); - WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr(); - - TabRecordPtr tabPtr; - tabPtr.i = conf->tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_DROPPING); - Uint32 nodeId = refToNode(conf->senderRef); - tabPtr.p->m_prepDropTab.waitDropTabCount.clearWaitingFor(nodeId); - checkPrepDropTabComplete(signal, tabPtr); -} - -void -Dbdih::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId){ - - TabRecordPtr tabPtr; - tabPtr.i = tableId; - - WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr(); - conf->tableId = tableId; - - const Uint32 RT_BREAK = 16; - for(Uint32 i = 0; itabStatus == TabRecord::TS_DROPPING){ - if(tabPtr.p->m_prepDropTab.waitDropTabCount.isWaitingFor(nodeId)){ - conf->senderRef = calcLqhBlockRef(nodeId); - execWAIT_DROP_TAB_CONF(signal); - tabPtr.i++; - break; - } - } - } - - if(tabPtr.i == ctabFileSize){ - /** - * Finished - */ - jam(); - return; - } - - signal->theData[0] = DihContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); -} - - -void -Dbdih::execNDB_TAMPER(Signal* signal) -{ - if ((ERROR_INSERTED(7011)) && - (signal->theData[0] == 7012)) { - CLEAR_ERROR_INSERT_VALUE; - calculateKeepGciLab(signal, 0, 0); - return; - }//if - SET_ERROR_INSERT_VALUE(signal->theData[0]); - return; -}//Dbdih::execNDB_TAMPER() - -void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){ - BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0]; - - jamEntry(); -#if 0 - ndbrequire(c_blockCommit == false || - c_blockCommitNo == block->failNo); -#else - if(!(c_blockCommit == false || c_blockCommitNo == block->failNo)){ - infoEvent("Possible bug in Dbdih::execBLOCK_COMMIT_ORD c_blockCommit = %d c_blockCommitNo = %d" - " sig->failNo = %d", c_blockCommit, c_blockCommitNo, block->failNo); - } -#endif - c_blockCommit = true; - c_blockCommitNo = block->failNo; -} - -void Dbdih::execUNBLOCK_COMMIT_ORD(Signal* signal){ - UnblockCommitOrd* const unblock = (UnblockCommitOrd *)&signal->theData[0]; - (void)unblock; - - jamEntry(); - - if(c_blockCommit == true){ - jam(); - // ndbrequire(c_blockCommitNo == unblock->failNo); - - c_blockCommit = false; - emptyverificbuffer(signal, true); - } -} - -void Dbdih::execSTOP_PERM_REQ(Signal* signal){ - - jamEntry(); - - StopPermReq* const req = (StopPermReq*)&signal->theData[0]; - StopPermRef* const ref = (StopPermRef*)&signal->theData[0]; - - const Uint32 senderData = req->senderData; - const BlockReference senderRef = req->senderRef; - const NodeId nodeId = refToNode(senderRef); - - if (isMaster()) { - /** - * Master - */ - jam(); - CRASH_INSERTION(7065); - if (c_stopPermMaster.clientRef != 0) { - jam(); - - ref->senderData = senderData; - ref->errorCode = StopPermRef::NodeShutdownInProgress; - sendSignal(senderRef, GSN_STOP_PERM_REF, signal, - StopPermRef::SignalLength, JBB); - return; - }//if - - if (c_nodeStartMaster.activeState) { - jam(); - ref->senderData = senderData; - ref->errorCode = StopPermRef::NodeStartInProgress; - sendSignal(senderRef, GSN_STOP_PERM_REF, signal, - StopPermRef::SignalLength, JBB); - return; - }//if - - /** - * Lock - */ - c_nodeStartMaster.activeState = true; - c_stopPermMaster.clientRef = senderRef; - - c_stopPermMaster.clientData = senderData; - c_stopPermMaster.returnValue = 0; - c_switchReplicas.clear(); - - Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle); - Callback c = { safe_cast(&Dbdih::switch_primary_stop_node), nodeId }; - ndbrequire(mutex.lock(c)); - } else { - /** - * Proxy part - */ - jam(); - CRASH_INSERTION(7066); - if(c_stopPermProxy.clientRef != 0){ - jam(); - ref->senderData = senderData; - ref->errorCode = StopPermRef::NodeShutdownInProgress; - sendSignal(senderRef, GSN_STOP_PERM_REF, signal, 2, JBB); - return; - }//if - - c_stopPermProxy.clientRef = senderRef; - c_stopPermProxy.masterRef = cmasterdihref; - c_stopPermProxy.clientData = senderData; - - req->senderRef = reference(); - req->senderData = senderData; - sendSignal(cmasterdihref, GSN_STOP_PERM_REQ, signal, - StopPermReq::SignalLength, JBB); - }//if -}//Dbdih::execSTOP_PERM_REQ() - -void -Dbdih::switch_primary_stop_node(Signal* signal, Uint32 node_id, Uint32 ret_val) -{ - ndbrequire(ret_val == 0); - signal->theData[0] = DihContinueB::SwitchReplica; - signal->theData[1] = node_id; - signal->theData[2] = 0; // table id - signal->theData[3] = 0; // fragment id - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); -} - -void Dbdih::execSTOP_PERM_REF(Signal* signal) -{ - jamEntry(); - ndbrequire(c_stopPermProxy.clientRef != 0); - ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef()); - sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_REF, signal, 2, JBB); - c_stopPermProxy.clientRef = 0; -}//Dbdih::execSTOP_PERM_REF() - -void Dbdih::execSTOP_PERM_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(c_stopPermProxy.clientRef != 0); - ndbrequire(c_stopPermProxy.masterRef == signal->senderBlockRef()); - sendSignal(c_stopPermProxy.clientRef, GSN_STOP_PERM_CONF, signal, 1, JBB); - c_stopPermProxy.clientRef = 0; -}//Dbdih::execSTOP_PERM_CONF() - -void Dbdih::execDIH_SWITCH_REPLICA_REQ(Signal* signal) -{ - jamEntry(); - DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0]; - const Uint32 tableId = req->tableId; - const Uint32 fragNo = req->fragNo; - const BlockReference senderRef = req->senderRef; - - CRASH_INSERTION(7067); - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE); - if (tabPtr.p->tabCopyStatus != TabRecord::CS_IDLE) { - jam(); - sendSignal(reference(), GSN_DIH_SWITCH_REPLICA_REQ, signal, - DihSwitchReplicaReq::SignalLength, JBB); - return; - }//if - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragNo, fragPtr); - - /** - * Do funky stuff - */ - Uint32 oldOrder[MAX_REPLICAS]; - const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder); - - if (noOfReplicas < req->noOfReplicas) { - jam(); - //--------------------------------------------------------------------- - // A crash occurred in the middle of our switch handling. - //--------------------------------------------------------------------- - DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0]; - ref->senderNode = cownNodeId; - ref->errorCode = StopPermRef::NF_CausedAbortOfStopProcedure; - sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_REF, signal, - DihSwitchReplicaRef::SignalLength, JBB); - }//if - for (Uint32 i = 0; i < noOfReplicas; i++) { - jam(); - ndbrequire(i < MAX_REPLICAS); - fragPtr.p->activeNodes[i] = req->newNodeOrder[i]; - }//for - /** - * Reply - */ - DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0]; - conf->senderNode = cownNodeId; - sendSignal(senderRef, GSN_DIH_SWITCH_REPLICA_CONF, signal, - DihSwitchReplicaConf::SignalLength, JBB); -}//Dbdih::execDIH_SWITCH_REPLICA_REQ() - -void Dbdih::execDIH_SWITCH_REPLICA_CONF(Signal* signal) -{ - jamEntry(); - /** - * Response to master - */ - CRASH_INSERTION(7068); - DihSwitchReplicaConf* const conf = (DihSwitchReplicaConf*)&signal->theData[0]; - switchReplicaReply(signal, conf->senderNode); -}//Dbdih::execDIH_SWITCH_REPLICA_CONF() - -void Dbdih::execDIH_SWITCH_REPLICA_REF(Signal* signal) -{ - jamEntry(); - DihSwitchReplicaRef* const ref = (DihSwitchReplicaRef*)&signal->theData[0]; - if(c_stopPermMaster.returnValue == 0){ - jam(); - c_stopPermMaster.returnValue = ref->errorCode; - }//if - switchReplicaReply(signal, ref->senderNode); -}//Dbdih::execDIH_SWITCH_REPLICA_REF() - -void Dbdih::switchReplicaReply(Signal* signal, - NodeId nodeId){ - jam(); - receiveLoopMacro(DIH_SWITCH_REPLICA_REQ, nodeId); - //------------------------------------------------------ - // We have received all responses from the nodes. Thus - // we have completed switching replica roles. Continue - // with the next fragment. - //------------------------------------------------------ - if(c_stopPermMaster.returnValue != 0){ - jam(); - c_switchReplicas.tableId = ctabFileSize + 1; - }//if - c_switchReplicas.fragNo++; - - signal->theData[0] = DihContinueB::SwitchReplica; - signal->theData[1] = c_switchReplicas.nodeId; - signal->theData[2] = c_switchReplicas.tableId; - signal->theData[3] = c_switchReplicas.fragNo; - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); -}//Dbdih::switchReplicaReply() - -void -Dbdih::switchReplica(Signal* signal, - Uint32 nodeId, - Uint32 tableId, - Uint32 fragNo){ - jam(); - DihSwitchReplicaReq* const req = (DihSwitchReplicaReq*)&signal->theData[0]; - - const Uint32 RT_BREAK = 64; - - for (Uint32 i = 0; i < RT_BREAK; i++) { - jam(); - if (tableId >= ctabFileSize) { - jam(); - StopPermConf* const conf = (StopPermConf*)&signal->theData[0]; - StopPermRef* const ref = (StopPermRef*)&signal->theData[0]; - /** - * Finished with all tables - */ - if(c_stopPermMaster.returnValue == 0) { - jam(); - conf->senderData = c_stopPermMaster.clientData; - sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_CONF, - signal, 1, JBB); - } else { - jam(); - ref->senderData = c_stopPermMaster.clientData; - ref->errorCode = c_stopPermMaster.returnValue; - sendSignal(c_stopPermMaster.clientRef, GSN_STOP_PERM_REF, signal, 2,JBB); - }//if - - /** - * UnLock - */ - c_nodeStartMaster.activeState = false; - c_stopPermMaster.clientRef = 0; - c_stopPermMaster.clientData = 0; - c_stopPermMaster.returnValue = 0; - Mutex mutex(signal, c_mutexMgr, c_switchPrimaryMutexHandle); - mutex.unlock(); // ignore result - return; - }//if - - TabRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); - - if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE) { - jam(); - tableId++; - fragNo = 0; - continue; - }//if - if (fragNo >= tabPtr.p->totalfragments) { - jam(); - tableId++; - fragNo = 0; - continue; - }//if - FragmentstorePtr fragPtr; - getFragstore(tabPtr.p, fragNo, fragPtr); - - Uint32 oldOrder[MAX_REPLICAS]; - const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, oldOrder); - - if(oldOrder[0] != nodeId) { - jam(); - fragNo++; - continue; - }//if - req->tableId = tableId; - req->fragNo = fragNo; - req->noOfReplicas = noOfReplicas; - for (Uint32 i = 0; i < (noOfReplicas - 1); i++) { - req->newNodeOrder[i] = oldOrder[i+1]; - }//for - req->newNodeOrder[noOfReplicas-1] = nodeId; - req->senderRef = reference(); - - /** - * Initialize struct - */ - c_switchReplicas.tableId = tableId; - c_switchReplicas.fragNo = fragNo; - c_switchReplicas.nodeId = nodeId; - - sendLoopMacro(DIH_SWITCH_REPLICA_REQ, sendDIH_SWITCH_REPLICA_REQ); - return; - }//for - - signal->theData[0] = DihContinueB::SwitchReplica; - signal->theData[1] = nodeId; - signal->theData[2] = tableId; - signal->theData[3] = fragNo; - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); -}//Dbdih::switchReplica() - -void Dbdih::execSTOP_ME_REQ(Signal* signal) -{ - jamEntry(); - StopMeReq* const req = (StopMeReq*)&signal->theData[0]; - const BlockReference senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - const Uint32 nodeId = refToNode(senderRef); - { - /** - * Set node dead (remove from operations) - */ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->useInTransactions = false; - } - if (nodeId != getOwnNodeId()) { - jam(); - StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0]; - stopMeConf->senderData = senderData; - stopMeConf->senderRef = reference(); - sendSignal(senderRef, GSN_STOP_ME_CONF, signal, - StopMeConf::SignalLength, JBB); - return; - }//if - - /** - * Local signal - */ - jam(); - ndbrequire(c_stopMe.clientRef == 0); - - c_stopMe.clientData = senderData; - c_stopMe.clientRef = senderRef; - - req->senderData = senderData; - req->senderRef = reference(); - - sendLoopMacro(STOP_ME_REQ, sendSTOP_ME_REQ); - - /** - * Send conf to self - */ - StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0]; - stopMeConf->senderData = senderData; - stopMeConf->senderRef = reference(); - sendSignal(reference(), GSN_STOP_ME_CONF, signal, - StopMeConf::SignalLength, JBB); -}//Dbdih::execSTOP_ME_REQ() - -void Dbdih::execSTOP_ME_REF(Signal* signal) -{ - ndbrequire(false); -} - -void Dbdih::execSTOP_ME_CONF(Signal* signal) -{ - jamEntry(); - StopMeConf * const stopMeConf = (StopMeConf *)&signal->theData[0]; - - const Uint32 senderRef = stopMeConf->senderRef; - const Uint32 senderData = stopMeConf->senderData; - const Uint32 nodeId = refToNode(senderRef); - - ndbrequire(c_stopMe.clientRef != 0); - ndbrequire(c_stopMe.clientData == senderData); - - receiveLoopMacro(STOP_ME_REQ, nodeId); - //--------------------------------------------------------- - // All STOP_ME_REQ have been received. We will send the - // confirmation back to the requesting block. - //--------------------------------------------------------- - - stopMeConf->senderRef = reference(); - stopMeConf->senderData = c_stopMe.clientData; - sendSignal(c_stopMe.clientRef, GSN_STOP_ME_CONF, signal, - StopMeConf::SignalLength, JBB); - c_stopMe.clientRef = 0; -}//Dbdih::execSTOP_ME_CONF() - -void Dbdih::execWAIT_GCP_REQ(Signal* signal) -{ - jamEntry(); - WaitGCPReq* const req = (WaitGCPReq*)&signal->theData[0]; - WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0]; - WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0]; - const Uint32 senderData = req->senderData; - const BlockReference senderRef = req->senderRef; - const Uint32 requestType = req->requestType; - - if(requestType == WaitGCPReq::CurrentGCI) { - jam(); - conf->senderData = senderData; - conf->gcp = cnewgcp; - conf->blockStatus = cgcpOrderBlocked; - sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal, - WaitGCPConf::SignalLength, JBB); - return; - }//if - - if (requestType == WaitGCPReq::BlockStartGcp) - { - jam(); - conf->senderData = senderData; - conf->gcp = cnewgcp; - conf->blockStatus = cgcpOrderBlocked; - sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal, - WaitGCPConf::SignalLength, JBB); - cgcpOrderBlocked = 1; - return; - } - - if (requestType == WaitGCPReq::UnblockStartGcp) - { - jam(); - conf->senderData = senderData; - conf->gcp = cnewgcp; - conf->blockStatus = cgcpOrderBlocked; - sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal, - WaitGCPConf::SignalLength, JBB); - cgcpOrderBlocked = 0; - return; - } - - if(isMaster()) { - /** - * Master - */ - jam(); - - if((requestType == WaitGCPReq::CompleteIfRunning) && - (cgcpStatus == GCP_READY)) { - jam(); - conf->senderData = senderData; - conf->gcp = coldgcp; - conf->blockStatus = cgcpOrderBlocked; - sendSignal(senderRef, GSN_WAIT_GCP_CONF, signal, - WaitGCPConf::SignalLength, JBB); - return; - }//if - - WaitGCPMasterPtr ptr; - if(c_waitGCPMasterList.seize(ptr) == false){ - jam(); - ref->senderData = senderData; - ref->errorCode = WaitGCPRef::NoWaitGCPRecords; - sendSignal(senderRef, GSN_WAIT_GCP_REF, signal, - WaitGCPRef::SignalLength, JBB); - return; - }//if - ptr.p->clientRef = senderRef; - ptr.p->clientData = senderData; - - if((requestType == WaitGCPReq::CompleteForceStart) && - (cgcpStatus == GCP_READY)) { - jam(); - cstartGcpNow = true; - }//if - return; - } else { - /** - * Proxy part - */ - jam(); - WaitGCPProxyPtr ptr; - if (c_waitGCPProxyList.seize(ptr) == false) { - jam(); - ref->senderData = senderData; - ref->errorCode = WaitGCPRef::NoWaitGCPRecords; - sendSignal(senderRef, GSN_WAIT_GCP_REF, signal, - WaitGCPRef::SignalLength, JBB); - return; - }//if - ptr.p->clientRef = senderRef; - ptr.p->clientData = senderData; - ptr.p->masterRef = cmasterdihref; - - req->senderData = ptr.i; - req->senderRef = reference(); - req->requestType = requestType; - - sendSignal(cmasterdihref, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); - return; - }//if -}//Dbdih::execWAIT_GCP_REQ() - -void Dbdih::execWAIT_GCP_REF(Signal* signal) -{ - jamEntry(); - ndbrequire(!isMaster()); - WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0]; - - const Uint32 proxyPtr = ref->senderData; - const Uint32 errorCode = ref->errorCode; - - WaitGCPProxyPtr ptr; - ptr.i = proxyPtr; - c_waitGCPProxyList.getPtr(ptr); - - ref->senderData = ptr.p->clientData; - ref->errorCode = errorCode; - sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_REF, signal, - WaitGCPRef::SignalLength, JBB); - - c_waitGCPProxyList.release(ptr); -}//Dbdih::execWAIT_GCP_REF() - -void Dbdih::execWAIT_GCP_CONF(Signal* signal) -{ - jamEntry(); - ndbrequire(!isMaster()); - WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0]; - const Uint32 proxyPtr = conf->senderData; - const Uint32 gcp = conf->gcp; - WaitGCPProxyPtr ptr; - - ptr.i = proxyPtr; - c_waitGCPProxyList.getPtr(ptr); - - conf->senderData = ptr.p->clientData; - conf->gcp = gcp; - conf->blockStatus = cgcpOrderBlocked; - sendSignal(ptr.p->clientRef, GSN_WAIT_GCP_CONF, signal, - WaitGCPConf::SignalLength, JBB); - - c_waitGCPProxyList.release(ptr); -}//Dbdih::execWAIT_GCP_CONF() - -void Dbdih::checkWaitGCPProxy(Signal* signal, NodeId failedNodeId) -{ - jam(); - WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0]; - ref->errorCode = WaitGCPRef::NF_CausedAbortOfProcedure; - - WaitGCPProxyPtr ptr; - c_waitGCPProxyList.first(ptr); - while(ptr.i != RNIL) { - jam(); - const Uint32 i = ptr.i; - const Uint32 clientData = ptr.p->clientData; - const BlockReference clientRef = ptr.p->clientRef; - const BlockReference masterRef = ptr.p->masterRef; - - c_waitGCPProxyList.next(ptr); - if(refToNode(masterRef) == failedNodeId) { - jam(); - c_waitGCPProxyList.release(i); - ref->senderData = clientData; - sendSignal(clientRef, GSN_WAIT_GCP_REF, signal, - WaitGCPRef::SignalLength, JBB); - }//if - }//while -}//Dbdih::checkWaitGCPProxy() - -void Dbdih::checkWaitGCPMaster(Signal* signal, NodeId failedNodeId) -{ - jam(); - WaitGCPMasterPtr ptr; - c_waitGCPMasterList.first(ptr); - - while (ptr.i != RNIL) { - jam(); - const Uint32 i = ptr.i; - const NodeId nodeId = refToNode(ptr.p->clientRef); - - c_waitGCPMasterList.next(ptr); - if (nodeId == failedNodeId) { - jam() - c_waitGCPMasterList.release(i); - }//if - }//while -}//Dbdih::checkWaitGCPMaster() - -void Dbdih::emptyWaitGCPMasterQueue(Signal* signal) -{ - jam(); - WaitGCPConf* const conf = (WaitGCPConf*)&signal->theData[0]; - conf->gcp = coldgcp; - - WaitGCPMasterPtr ptr; - c_waitGCPMasterList.first(ptr); - while(ptr.i != RNIL) { - jam(); - const Uint32 i = ptr.i; - const Uint32 clientData = ptr.p->clientData; - const BlockReference clientRef = ptr.p->clientRef; - - c_waitGCPMasterList.next(ptr); - conf->senderData = clientData; - conf->blockStatus = cgcpOrderBlocked; - sendSignal(clientRef, GSN_WAIT_GCP_CONF, signal, - WaitGCPConf::SignalLength, JBB); - - c_waitGCPMasterList.release(i); - }//while -}//Dbdih::emptyWaitGCPMasterQueue() - -void Dbdih::setNodeStatus(Uint32 nodeId, NodeRecord::NodeStatus newStatus) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->nodeStatus = newStatus; -}//Dbdih::setNodeStatus() - -Dbdih::NodeRecord::NodeStatus Dbdih::getNodeStatus(Uint32 nodeId) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - return nodePtr.p->nodeStatus; -}//Dbdih::getNodeStatus() - -Sysfile::ActiveStatus -Dbdih::getNodeActiveStatus(Uint32 nodeId) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - return nodePtr.p->activeStatus; -}//Dbdih::getNodeActiveStatus() - - -void -Dbdih::setNodeActiveStatus(Uint32 nodeId, Sysfile::ActiveStatus newStatus) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->activeStatus = newStatus; -}//Dbdih::setNodeActiveStatus() - -void Dbdih::setAllowNodeStart(Uint32 nodeId, bool newState) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->allowNodeStart = newState; -}//Dbdih::setAllowNodeStart() - -void Dbdih::setNodeCopyCompleted(Uint32 nodeId, bool newState) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - nodePtr.p->copyCompleted = newState; -}//Dbdih::setNodeCopyCompleted() - -bool Dbdih::getAllowNodeStart(Uint32 nodeId) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - return nodePtr.p->allowNodeStart; -}//Dbdih::getAllowNodeStart() - -bool Dbdih::getNodeCopyCompleted(Uint32 nodeId) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - return nodePtr.p->copyCompleted; -}//Dbdih::getNodeCopyCompleted() - -bool Dbdih::checkNodeAlive(Uint32 nodeId) -{ - NodeRecordPtr nodePtr; - nodePtr.i = nodeId; - ndbrequire(nodeId > 0); - ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); - if (nodePtr.p->nodeStatus != NodeRecord::ALIVE) { - return false; - } else { - return true; - }//if -}//Dbdih::checkNodeAlive() - -bool Dbdih::isMaster() -{ - return (reference() == cmasterdihref); -}//Dbdih::isMaster() - -bool Dbdih::isActiveMaster() -{ - return ((reference() == cmasterdihref) && (cmasterState == MASTER_ACTIVE)); -}//Dbdih::isActiveMaster() - -Dbdih::NodeRecord::NodeRecord(){ - m_nodefailSteps.clear(); - gcpstate = NodeRecord::READY; - - activeStatus = Sysfile::NS_NotDefined; - recNODE_FAILREP = ZFALSE; - nodeGroup = ZNIL; - dbtcFailCompleted = ZTRUE; - dbdictFailCompleted = ZTRUE; - dbdihFailCompleted = ZTRUE; - dblqhFailCompleted = ZTRUE; - noOfStartedChkpt = 0; - noOfQueuedChkpt = 0; - lcpStateAtTakeOver = (MasterLCPConf::State)255; - - activeTabptr = RNIL; - nodeStatus = NodeRecord::NOT_IN_CLUSTER; - useInTransactions = false; - copyCompleted = false; - allowNodeStart = true; -} - -// DICT lock slave - -void -Dbdih::sendDictLockReq(Signal* signal, Uint32 lockType, Callback c) -{ - DictLockReq* req = (DictLockReq*)&signal->theData[0]; - DictLockSlavePtr lockPtr; - - c_dictLockSlavePool.seize(lockPtr); - ndbrequire(lockPtr.i != RNIL); - - req->userPtr = lockPtr.i; - req->lockType = lockType; - req->userRef = reference(); - - lockPtr.p->lockPtr = RNIL; - lockPtr.p->lockType = lockType; - lockPtr.p->locked = false; - lockPtr.p->callback = c; - - // handle rolling upgrade - { - Uint32 masterVersion = getNodeInfo(cmasterNodeId).m_version; - - const unsigned int get_major = getMajor(masterVersion); - const unsigned int get_minor = getMinor(masterVersion); - const unsigned int get_build = getBuild(masterVersion); - ndbrequire(get_major >= 4); - - if (masterVersion < NDBD_DICT_LOCK_VERSION_5 || - masterVersion < NDBD_DICT_LOCK_VERSION_5_1 && - get_major == 5 && get_minor == 1 || - ERROR_INSERTED(7176)) { - jam(); - - infoEvent("DIH: detect upgrade: master node %u old version %u.%u.%u", - (unsigned int)cmasterNodeId, get_major, get_minor, get_build); - - DictLockConf* conf = (DictLockConf*)&signal->theData[0]; - conf->userPtr = lockPtr.i; - conf->lockType = lockType; - conf->lockPtr = ZNIL; - - sendSignal(reference(), GSN_DICT_LOCK_CONF, signal, - DictLockConf::SignalLength, JBB); - return; - } - } - - BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId); - sendSignal(dictMasterRef, GSN_DICT_LOCK_REQ, signal, - DictLockReq::SignalLength, JBB); -} - -void -Dbdih::execDICT_LOCK_CONF(Signal* signal) -{ - jamEntry(); - recvDictLockConf(signal); -} - -void -Dbdih::execDICT_LOCK_REF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -} - -void -Dbdih::recvDictLockConf(Signal* signal) -{ - const DictLockConf* conf = (const DictLockConf*)&signal->theData[0]; - - DictLockSlavePtr lockPtr; - c_dictLockSlavePool.getPtr(lockPtr, conf->userPtr); - - lockPtr.p->lockPtr = conf->lockPtr; - ndbrequire(lockPtr.p->lockType == conf->lockType); - ndbrequire(lockPtr.p->locked == false); - lockPtr.p->locked = true; - - lockPtr.p->callback.m_callbackData = lockPtr.i; - execute(signal, lockPtr.p->callback, 0); -} - -void -Dbdih::sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI) -{ - DictUnlockOrd* ord = (DictUnlockOrd*)&signal->theData[0]; - - DictLockSlavePtr lockPtr; - c_dictLockSlavePool.getPtr(lockPtr, lockSlavePtrI); - - ord->lockPtr = lockPtr.p->lockPtr; - ord->lockType = lockPtr.p->lockType; - - c_dictLockSlavePool.release(lockPtr); - - // handle rolling upgrade - { - Uint32 masterVersion = getNodeInfo(cmasterNodeId).m_version; - - const unsigned int get_major = getMajor(masterVersion); - const unsigned int get_minor = getMinor(masterVersion); - ndbrequire(get_major >= 4); - - if (masterVersion < NDBD_DICT_LOCK_VERSION_5 || - masterVersion < NDBD_DICT_LOCK_VERSION_5_1 && - get_major == 5 && get_minor == 1 || - ERROR_INSERTED(7176)) { - return; - } - } - - BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId); - sendSignal(dictMasterRef, GSN_DICT_UNLOCK_ORD, signal, - DictUnlockOrd::SignalLength, JBB); -} diff --git a/storage/ndb/src/kernel/blocks/dbdih/LCP.txt b/storage/ndb/src/kernel/blocks/dbdih/LCP.txt deleted file mode 100644 index 500c82f6baf..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/LCP.txt +++ /dev/null @@ -1,35 +0,0 @@ - -Master DIH LQH -========== ========== - -1) TCGETOPSIZEREQ -> all TC - -2) If sum(operation size) < Threshold - Goto 1 - -3) For each table - Calc Keep GCI (local using CONTINUEB) - -4) COPY_GCIREQ -> all DIH - -5) TC_CLOPSIZEREQ -> all TC - -6) For each fragment - LCP_FRAG_ORD -> LQH - - Do LCP... - 1) LCP_FRAG_REP -> all DIH - 2) If last fragment - LCP_COMPLETE_REP -> all DIH - -7) When receiving LCP_COMPLETE_REP from DIH - 1) If all DIHs have completed - Goto 1 - -All DIHs -========== -1) When receiving LCP_FRAG_REP - If all fragments & replicas done in table - 1) Save Table descriptor - 2) If all tables done + LCP_COMPLETE_REP(from lqh) has arrived - LCP_COMPLETE_REP -> master DIH diff --git a/storage/ndb/src/kernel/blocks/dbdih/Makefile.am b/storage/ndb/src/kernel/blocks/dbdih/Makefile.am deleted file mode 100644 index 7e667878d83..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/Makefile.am +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -ndbtools_PROGRAMS = ndb_print_sys_file -ndb_print_sys_file_SOURCES = printSysfile.cpp -ndb_print_sys_file_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a - - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am diff --git a/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp b/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp deleted file mode 100644 index ae489bafa90..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/Sysfile.hpp +++ /dev/null @@ -1,275 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SYSFILE_HPP -#define SYSFILE_HPP - -#include -#include -#include - -/** - * No bits in Sysfile to represent nodeid - */ -#define NODEID_BITS 8 - -/** - * Constant representing that node do not belong to - * any node group - */ -#define NO_NODE_GROUP_ID ((1 << NODEID_BITS) - 1) - -/** - * No of 32 bits word in sysfile - * - * 6 + // was 5 in < version 5.1 - * MAX_NDB_NODES + // lastCompletedGCI - * NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) + // nodeStatus - * NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + // nodeGroups - * NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + // takeOver - * NodeBitmask::NDB_NODE_BITMASK_SIZE // Lcp Active - */ -#define _SYSFILE_SIZE32 (6 + \ - MAX_NDB_NODES + \ - NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) + \ - NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + \ - NODE_ARRAY_SIZE(MAX_NDB_NODES, NODEID_BITS) + \ - _NDB_NODE_BITMASK_SIZE) - -/** - * This struct defines the format of P.sysfile - */ -struct Sysfile { -public: - - /** - * No of 32 bits words in the sysfile - */ - STATIC_CONST( SYSFILE_SIZE32 = _SYSFILE_SIZE32 ); - - Uint32 systemRestartBits; - - /** - * Restart seq for _this_ node... - */ - Uint32 m_restart_seq; - - static bool getInitialStartOngoing(const Uint32 & systemRestartBits); - static void setInitialStartOngoing(Uint32 & systemRestartBits); - static void clearInitialStartOngoing(Uint32 & systemRestartBits); - - static bool getRestartOngoing(const Uint32 & systemRestartBits); - static void setRestartOngoing(Uint32 & systemRestartBits); - static void clearRestartOngoing(Uint32 & systemRestartBits); - - static bool getLCPOngoing(const Uint32 & systemRestartBits); - static void setLCPOngoing(Uint32 & systemRestartBits); - static void clearLCPOngoing(Uint32 & systemRestartBits); - - Uint32 keepGCI; - Uint32 oldestRestorableGCI; - Uint32 newestRestorableGCI; - Uint32 latestLCP_ID; - - /** - * Last completed GCI for each node - */ - Uint32 lastCompletedGCI[MAX_NDB_NODES]; - - /** - * Active status bits - * - * It takes 4 bits to represent it - */ - enum ActiveStatus { - NS_Active = 0 - ,NS_ActiveMissed_1 = 1 - ,NS_ActiveMissed_2 = 2 - ,NS_ActiveMissed_3 = 3 - ,NS_HotSpare = 4 - ,NS_NotActive_NotTakenOver = 5 - ,NS_TakeOver = 6 - ,NS_NotActive_TakenOver = 7 - ,NS_NotDefined = 8 - ,NS_Standby = 9 - }; - STATIC_CONST( NODE_STATUS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) ); - Uint32 nodeStatus[NODE_STATUS_SIZE]; - - static Uint32 getNodeStatus(NodeId, const Uint32 nodeStatus[]); - static void setNodeStatus(NodeId, Uint32 nodeStatus[], Uint32 status); - - /** - * The node group of each node - * Sizeof(NodeGroup) = 8 Bit - */ - STATIC_CONST( NODE_GROUPS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES, - NODEID_BITS) ); - Uint32 nodeGroups[NODE_GROUPS_SIZE]; - - static Uint16 getNodeGroup(NodeId, const Uint32 nodeGroups[]); - static void setNodeGroup(NodeId, Uint32 nodeGroups[], Uint16 group); - - /** - * Any node can take over for any node - */ - STATIC_CONST( TAKE_OVER_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES, - NODEID_BITS) ); - Uint32 takeOver[TAKE_OVER_SIZE]; - - static NodeId getTakeOverNode(NodeId, const Uint32 takeOver[]); - static void setTakeOverNode(NodeId, Uint32 takeOver[], NodeId toNode); - - /** - * Is a node running a LCP - */ - Uint32 lcpActive[NdbNodeBitmask::Size]; -}; - -#if (MAX_NDB_NODES > (1<> 3; - const int shift = (nodeId & 7) << 2; - - return (nodeStatus[word] >> shift) & 15; -} - -inline -void -Sysfile::setNodeStatus(NodeId nodeId, Uint32 nodeStatus[], Uint32 status){ - const int word = nodeId >> 3; - const int shift = (nodeId & 7) << 2; - - const Uint32 mask = ~(((Uint32)15) << shift); - const Uint32 tmp = nodeStatus[word]; - - nodeStatus[word] = (tmp & mask) | ((status & 15) << shift); -} - -inline -Uint16 -Sysfile::getNodeGroup(NodeId nodeId, const Uint32 nodeGroups[]){ - const int word = nodeId >> 2; - const int shift = (nodeId & 3) << 3; - - return (nodeGroups[word] >> shift) & 255; -} - -inline -void -Sysfile::setNodeGroup(NodeId nodeId, Uint32 nodeGroups[], Uint16 group){ - const int word = nodeId >> 2; - const int shift = (nodeId & 3) << 3; - - const Uint32 mask = ~(((Uint32)255) << shift); - const Uint32 tmp = nodeGroups[word]; - - nodeGroups[word] = (tmp & mask) | ((group & 255) << shift); -} - -inline -NodeId -Sysfile::getTakeOverNode(NodeId nodeId, const Uint32 takeOver[]){ - const int word = nodeId >> 2; - const int shift = (nodeId & 3) << 3; - - return (takeOver[word] >> shift) & 255; -} - -inline -void -Sysfile::setTakeOverNode(NodeId nodeId, Uint32 takeOver[], NodeId toNode){ - const int word = nodeId >> 2; - const int shift = (nodeId & 3) << 3; - - const Uint32 mask = ~(((Uint32)255) << shift); - const Uint32 tmp = takeOver[word]; - - takeOver[word] = (tmp & mask) | ((toNode & 255) << shift); -} - - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp b/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp deleted file mode 100644 index 13aeac81f81..00000000000 --- a/storage/ndb/src/kernel/blocks/dbdih/printSysfile.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include - -#include -#include -#include - -void -usage(const char * prg){ - ndbout << "Usage " << prg - << " P[0-1].sysfile" << endl; -} - -struct NSString { - Sysfile::ActiveStatus NodeStatus; - const char * desc; -}; - -static const -NSString NodeStatusStrings[] = { - { Sysfile::NS_Active, "Active " }, - { Sysfile::NS_ActiveMissed_1, "Active missed 1" }, - { Sysfile::NS_ActiveMissed_2, "Active missed 2" }, - { Sysfile::NS_ActiveMissed_3, "Active missed 3" }, - { Sysfile::NS_HotSpare, "Hot spare " }, - { Sysfile::NS_NotActive_NotTakenOver, "Not active " }, - { Sysfile::NS_TakeOver, "Take over " }, - { Sysfile::NS_NotActive_TakenOver, "Taken over " }, - { Sysfile::NS_NotDefined, "Not defined " }, - { Sysfile::NS_Standby, "Stand by " } -}; - -const -char * getNSString(Uint32 ns){ - for(Uint32 i = 0; i<(sizeof(NodeStatusStrings)/sizeof(NSString)); i++) - if((Uint32)NodeStatusStrings[i].NodeStatus == ns) - return NodeStatusStrings[i].desc; - return ""; -} - -void -fill(const char * buf, int mod){ - int len = strlen(buf)+1; - ndbout << buf << " "; - while((len % mod) != 0){ - ndbout << " "; - len++; - } -} - -void -print(const char * filename, const Sysfile * sysfile){ - char buf[255]; - ndbout << "----- Sysfile: " << filename - << " seq: " << hex << sysfile->m_restart_seq - << " -----" << endl; - ndbout << "Initial start ongoing: " - << Sysfile::getInitialStartOngoing(sysfile->systemRestartBits) - << ", "; - - ndbout << "Restart Ongoing: " - << Sysfile::getRestartOngoing(sysfile->systemRestartBits) - << ", "; - - ndbout << "LCP Ongoing: " - << Sysfile::getLCPOngoing(sysfile->systemRestartBits) - << endl; - - - ndbout << "-- Global Checkpoint Identities: --" << endl; - sprintf(buf, "keepGCI = %u", sysfile->keepGCI); - fill(buf, 40); - ndbout << " -- Tail of REDO log" << endl; - - sprintf(buf, "oldestRestorableGCI = %u", sysfile->oldestRestorableGCI); - fill(buf, 40); - ndbout << " -- " << endl; - - sprintf(buf, "newestRestorableGCI = %u", sysfile->newestRestorableGCI); - fill(buf, 40); - ndbout << " -- " << endl; - - sprintf(buf, "latestLCP = %u", sysfile->latestLCP_ID); - fill(buf, 40); - ndbout << " -- " << endl; - - ndbout << "-- Node status: --" << endl; - for(int i = 1; i < MAX_NDB_NODES; i++){ - if(Sysfile::getNodeStatus(i, sysfile->nodeStatus) !=Sysfile::NS_NotDefined){ - sprintf(buf, - "Node %.2d -- %s GCP: %d, NodeGroup: %d, TakeOverNode: %d, " - "LCP Ongoing: %s", - i, - getNSString(Sysfile::getNodeStatus(i,sysfile->nodeStatus)), - sysfile->lastCompletedGCI[i], - Sysfile::getNodeGroup(i, sysfile->nodeGroups), - Sysfile::getTakeOverNode(i, sysfile->takeOver), - BitmaskImpl::get(NdbNodeBitmask::Size, - sysfile->lcpActive, i) != 0 ? "yes" : "no"); - ndbout << buf << endl; - } - } -} - -NDB_COMMAND(printSysfile, - "printSysfile", "printSysfile", "Prints a sysfile", 16384){ - if(argc < 2){ - usage(argv[0]); - return 0; - } - - for(int i = 1; i - -#include -#include -#include - -void -usage(const char * prg){ - ndbout << "Usage " << prg - << " P[0-1].sysfile" << endl; -} - -struct NSString { - Sysfile::ActiveStatus NodeStatus; - const char * desc; -}; - -static const -NSString NodeStatusStrings[] = { - { Sysfile::NS_Active, "Active " }, - { Sysfile::NS_ActiveMissed_1, "Active missed 1" }, - { Sysfile::NS_ActiveMissed_2, "Active missed 2" }, - { Sysfile::NS_ActiveMissed_3, "Active missed 3" }, - { Sysfile::NS_HotSpare, "Hot spare " }, - { Sysfile::NS_NotActive_NotTakenOver, "Not active " }, - { Sysfile::NS_TakeOver, "Take over " }, - { Sysfile::NS_NotActive_TakenOver, "Taken over " }, - { Sysfile::NS_NotDefined, "Not defined " }, - { Sysfile::NS_Standby, "Stand by " } -}; - -const -char * getNSString(Uint32 ns){ - for(Uint32 i = 0; i<(sizeof(NodeStatusStrings)/sizeof(NSString)); i++) - if((Uint32)NodeStatusStrings[i].NodeStatus == ns) - return NodeStatusStrings[i].desc; - return ""; -} - -void -fill(const char * buf, int mod){ - int len = strlen(buf)+1; - ndbout << buf << " "; - while((len % mod) != 0){ - ndbout << " "; - len++; - } -} - -void -print(const char * filename, const Sysfile * sysfile){ - char buf[255]; - ndbout << "----- Sysfile: " << filename << " -----" << endl; - ndbout << "Initial start ongoing: " - << Sysfile::getInitialStartOngoing(sysfile->systemRestartBits) - << ", "; - - ndbout << "Restart Ongoing: " - << Sysfile::getRestartOngoing(sysfile->systemRestartBits) - << ", "; - - ndbout << "LCP Ongoing: " - << Sysfile::getLCPOngoing(sysfile->systemRestartBits) - << endl; - - - ndbout << "-- Global Checkpoint Identities: --" << endl; - sprintf(buf, "keepGCI = %u", sysfile->keepGCI); - fill(buf, 40); - ndbout << " -- Tail of REDO log" << endl; - - sprintf(buf, "oldestRestorableGCI = %u", sysfile->oldestRestorableGCI); - fill(buf, 40); - ndbout << " -- " << endl; - - sprintf(buf, "newestRestorableGCI = %u", sysfile->newestRestorableGCI); - fill(buf, 40); - ndbout << " -- " << endl; - - sprintf(buf, "latestLCP = %u", sysfile->latestLCP_ID); - fill(buf, 40); - ndbout << " -- " << endl; - - ndbout << "-- Node status: --" << endl; - for(int i = 1; i < MAX_NDB_NODES; i++){ - if(Sysfile::getNodeStatus(i, sysfile->nodeStatus) !=Sysfile::NS_NotDefined){ - sprintf(buf, - "Node %.2d -- %s GCP: %d, NodeGroup: %d, TakeOverNode: %d, " - "LCP Ongoing: %s", - i, - getNSString(Sysfile::getNodeStatus(i,sysfile->nodeStatus)), - sysfile->lastCompletedGCI[i], - Sysfile::getNodeGroup(i, sysfile->nodeGroups), - Sysfile::getTakeOverNode(i, sysfile->takeOver), - BitmaskImpl::get(NdbNodeBitmask::Size, - sysfile->lcpActive, i) != 0 ? "yes" : "no"); - ndbout << buf << endl; - } - } -} - -NDB_COMMAND(printSysfile, - "printSysfile", "printSysfile", "Prints a sysfile", 16384){ - if(argc < 2){ - usage(argv[0]); - return 0; - } - - for(int i = 1; i -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -// primary key is stored in TUP -#include "../dbtup/Dbtup.hpp" - -class Dbacc; -class Dbtup; - -#ifdef DBLQH_C -// Constants -/* ------------------------------------------------------------------------- */ -/* CONSTANTS USED WHEN MASTER REQUESTS STATE OF COPY FRAGMENTS. */ -/* ------------------------------------------------------------------------- */ -#define ZCOPY_CLOSING 0 -#define ZCOPY_ONGOING 1 -#define ZCOPY_ACTIVATION 2 -/* ------------------------------------------------------------------------- */ -/* STATES FOR THE VARIABLE GCP_LOG_PART_STATE */ -/* ------------------------------------------------------------------------- */ -#define ZIDLE 0 -#define ZWAIT_DISK 1 -#define ZON_DISK 2 -#define ZACTIVE 1 -/* ------------------------------------------------------------------------- */ -/* STATES FOR THE VARIABLE CSR_PHASES_STARTED */ -/* ------------------------------------------------------------------------- */ -#define ZSR_NO_PHASE_STARTED 0 -#define ZSR_PHASE1_COMPLETED 1 -#define ZSR_PHASE2_COMPLETED 2 -#define ZSR_BOTH_PHASES_STARTED 3 -/* ------------------------------------------------------------------------- */ -/* THE NUMBER OF PAGES IN A MBYTE, THE TWO LOGARITHM OF THIS. */ -/* THE NUMBER OF MBYTES IN A LOG FILE. */ -/* THE MAX NUMBER OF PAGES READ/WRITTEN FROM/TO DISK DURING */ -/* A WRITE OR READ. */ -/* ------------------------------------------------------------------------- */ -#define ZNOT_DIRTY 0 -#define ZDIRTY 1 -#define ZREAD_AHEAD_SIZE 8 -/* ------------------------------------------------------------------------- */ -/* CONSTANTS OF THE LOG PAGES */ -/* ------------------------------------------------------------------------- */ -#define ZPAGE_HEADER_SIZE 32 -#define ZPAGE_SIZE 8192 -#define ZPAGES_IN_MBYTE 32 -#define ZTWOLOG_NO_PAGES_IN_MBYTE 5 -#define ZTWOLOG_PAGE_SIZE 13 -#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution - -#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config) -#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log -#define ZMIN_LOG_PAGES_OPERATION 10 // Minimum no of pages before stopping - -#define ZPOS_CHECKSUM 0 -#define ZPOS_LOG_LAP 1 -#define ZPOS_MAX_GCI_COMPLETED 2 -#define ZPOS_MAX_GCI_STARTED 3 -#define ZNEXT_PAGE 4 -#define ZPREV_PAGE 5 -#define ZPOS_VERSION 6 -#define ZPOS_NO_LOG_FILES 7 -#define ZCURR_PAGE_INDEX 8 -#define ZLAST_LOG_PREP_REF 10 -#define ZPOS_DIRTY 11 -/* A number of debug items written in the page header of all log files */ -#define ZPOS_LOG_TIMER 12 -#define ZPOS_PAGE_I 13 -#define ZPOS_PLACE_WRITTEN_FROM 14 -#define ZPOS_PAGE_NO 15 -#define ZPOS_PAGE_FILE_NO 16 -#define ZPOS_WORD_WRITTEN 17 -#define ZPOS_IN_WRITING 18 -#define ZPOS_PREV_PAGE_NO 19 -#define ZPOS_IN_FREE_LIST 20 - -/* ------------------------------------------------------------------------- */ -/* CONSTANTS FOR THE VARIOUS REPLICA AND NODE TYPES. */ -/* ------------------------------------------------------------------------- */ -#define ZPRIMARY_NODE 0 -#define ZBACKUP_NODE 1 -#define ZSTANDBY_NODE 2 -#define ZTC_NODE 3 -#define ZLOG_NODE 3 -/* ------------------------------------------------------------------------- */ -/* VARIOUS CONSTANTS USED AS FLAGS TO THE FILE MANAGER. */ -/* ------------------------------------------------------------------------- */ -#define ZVAR_NO_LOG_PAGE_WORD 1 -#define ZLIST_OF_PAIRS 0 -#define ZLIST_OF_PAIRS_SYNCH 16 -#define ZARRAY_OF_PAGES 1 -#define ZLIST_OF_MEM_PAGES 2 -#define ZLIST_OF_MEM_PAGES_SYNCH 18 -#define ZCLOSE_NO_DELETE 0 -#define ZCLOSE_DELETE 1 -#define ZPAGE_ZERO 0 -/* ------------------------------------------------------------------------- */ -/* THE FOLLOWING CONSTANTS ARE USED TO DESCRIBE THE TYPES OF */ -/* LOG RECORDS, THE SIZE OF THE VARIOUS LOG RECORD TYPES AND */ -/* THE POSITIONS WITHIN THOSE LOG RECORDS. */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* THESE CONSTANTS DESCRIBE THE SIZES OF VARIOUS TYPES OF LOG REORDS. */ -/* NEXT_LOG_SIZE IS ACTUALLY ONE. THE REASON WE SET IT TO 2 IS TO */ -/* SIMPLIFY THE CODE SINCE OTHERWISE HAVE TO USE A SPECIAL VERSION */ -/* OF READ_LOGWORD WHEN READING LOG RECORD TYPE */ -/* SINCE NEXT MBYTE TYPE COULD BE THE VERY LAST WORD IN THE MBYTE. */ -/* BY SETTING IT TO 2 WE ENSURE IT IS NEVER THE VERY LAST WORD */ -/* IN THE MBYTE. */ -/* ------------------------------------------------------------------------- */ -#define ZFD_HEADER_SIZE 3 -#define ZFD_MBYTE_SIZE 3 -#define ZLOG_HEAD_SIZE 8 -#define ZNEXT_LOG_SIZE 2 -#define ZABORT_LOG_SIZE 3 -#define ZCOMMIT_LOG_SIZE 9 -#define ZCOMPLETED_GCI_LOG_SIZE 2 -/* ------------------------------------------------------------------------- */ -/* THESE CONSTANTS DESCRIBE THE TYPE OF A LOG RECORD. */ -/* THIS IS THE FIRST WORD OF A LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZNEW_PREP_OP_TYPE 0 -#define ZPREP_OP_TYPE 1 -#define ZCOMMIT_TYPE 2 -#define ZABORT_TYPE 3 -#define ZFD_TYPE 4 -#define ZFRAG_SPLIT_TYPE 5 -#define ZNEXT_LOG_RECORD_TYPE 6 -#define ZNEXT_MBYTE_TYPE 7 -#define ZCOMPLETED_GCI_TYPE 8 -#define ZINVALID_COMMIT_TYPE 9 -/* ------------------------------------------------------------------------- */ -/* THE POSITIONS OF LOGGED DATA IN A FILE DESCRIPTOR LOG RECORD HEADER.*/ -/* ALSO THE MAXIMUM NUMBER OF FILE DESCRIPTORS IN A LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_LOG_TYPE 0 -#define ZPOS_NO_FD 1 -#define ZPOS_FILE_NO 2 -/* ------------------------------------------------------------------------- */ -/* THE POSITIONS WITHIN A PREPARE LOG RECORD AND A NEW PREPARE */ -/* LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_HASH_VALUE 2 -#define ZPOS_SCHEMA_VERSION 3 -#define ZPOS_TRANS_TICKET 4 -#define ZPOS_OP_TYPE 5 -#define ZPOS_NO_ATTRINFO 6 -#define ZPOS_NO_KEYINFO 7 -/* ------------------------------------------------------------------------- */ -/* THE POSITIONS WITHIN A COMMIT LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_COMMIT_TRANSID1 1 -#define ZPOS_COMMIT_TRANSID2 2 -#define ZPOS_COMMIT_GCI 3 -#define ZPOS_COMMIT_TABLE_REF 4 -#define ZPOS_COMMIT_FRAGID 5 -#define ZPOS_COMMIT_FILE_NO 6 -#define ZPOS_COMMIT_START_PAGE_NO 7 -#define ZPOS_COMMIT_START_PAGE_INDEX 8 -#define ZPOS_COMMIT_STOP_PAGE_NO 9 -/* ------------------------------------------------------------------------- */ -/* THE POSITIONS WITHIN A ABORT LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_ABORT_TRANSID1 1 -#define ZPOS_ABORT_TRANSID2 2 -/* ------------------------------------------------------------------------- */ -/* THE POSITION WITHIN A COMPLETED GCI LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_COMPLETED_GCI 1 -/* ------------------------------------------------------------------------- */ -/* THE POSITIONS WITHIN A NEW PREPARE LOG RECORD. */ -/* ------------------------------------------------------------------------- */ -#define ZPOS_NEW_PREP_FILE_NO 8 -#define ZPOS_NEW_PREP_PAGE_REF 9 - -#define ZLAST_WRITE_IN_FILE 1 -#define ZENFORCE_WRITE 2 -/* ------------------------------------------------------------------------- */ -/* CONSTANTS USED AS INPUT TO SUBROUTINE WRITE_LOG_PAGES AMONG OTHERS. */ -/* ------------------------------------------------------------------------- */ -#define ZNORMAL 0 -#define ZINIT 1 -/* ------------------------------------------------------------------------- */ -/* CONSTANTS USED BY CONTINUEB TO DEDUCE WHICH CONTINUE SIGNAL IS TO */ -/* BE EXECUTED AS A RESULT OF THIS CONTINUEB SIGNAL. */ -/* ------------------------------------------------------------------------- */ -#define ZLOG_LQHKEYREQ 0 -#define ZPACK_LQHKEYREQ 1 -#define ZSEND_ATTRINFO 2 -#define ZSR_GCI_LIMITS 3 -#define ZSR_LOG_LIMITS 4 -#define ZSEND_EXEC_CONF 5 -#define ZEXEC_SR 6 -#define ZSR_FOURTH_COMP 7 -#define ZINIT_FOURTH 8 -#define ZTIME_SUPERVISION 9 -#define ZSR_PHASE3_START 10 -#define ZLQH_TRANS_NEXT 11 -#define ZLQH_RELEASE_AT_NODE_FAILURE 12 -#define ZSCAN_TC_CONNECT 13 -#define ZINITIALISE_RECORDS 14 -#define ZINIT_GCP_REC 15 -#define ZCHECK_LCP_STOP_BLOCKED 17 -#define ZSCAN_MARKERS 18 -#define ZOPERATION_EVENT_REP 19 -#define ZPREP_DROP_TABLE 20 -#define ZENABLE_EXPAND_CHECK 21 -#define ZRETRY_TCKEYREF 22 - -/* ------------------------------------------------------------------------- */ -/* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */ -/* AND CNODES_EXEC_SR_STATE. */ -/* ------------------------------------------------------------------------- */ -#define ZSTART_SR 1 -#define ZEXEC_SR_COMPLETED 2 -/* ------------------------------------------------------------------------- */ -/* CONSTANTS USED BY NODE STATUS TO DEDUCE THE STATUS OF A NODE. */ -/* ------------------------------------------------------------------------- */ -#define ZNODE_UP 0 -#define ZNODE_DOWN 1 -/* ------------------------------------------------------------------------- */ -/* START PHASES */ -/* ------------------------------------------------------------------------- */ -#define ZLAST_START_PHASE 255 -#define ZSTART_PHASE1 1 -#define ZSTART_PHASE2 2 -#define ZSTART_PHASE3 3 -#define ZSTART_PHASE4 4 -#define ZSTART_PHASE6 6 -/* ------------------------------------------------------------------------- */ -/* CONSTANTS USED BY SCAN AND COPY FRAGMENT PROCEDURES */ -/* ------------------------------------------------------------------------- */ -#define ZSTORED_PROC_SCAN 0 -#define ZSTORED_PROC_COPY 2 -#define ZDELETE_STORED_PROC_ID 3 -#define ZWRITE_LOCK 1 -#define ZSCAN_FRAG_CLOSED 2 -/* ------------------------------------------------------------------------- */ -/* ERROR CODES ADDED IN VERSION 0.1 AND 0.2 */ -/* ------------------------------------------------------------------------- */ -#define ZNOT_FOUND 1 // Not an error code, a return value -#define ZNO_FREE_LQH_CONNECTION 414 -#define ZGET_DATAREC_ERROR 418 -#define ZGET_ATTRINBUF_ERROR 419 -#define ZNO_FREE_FRAGMENTREC 460 // Insert new fragment error code -#define ZTAB_FILE_SIZE 464 // Insert new fragment error code + Start kernel -#define ZNO_ADD_FRAGREC 465 // Insert new fragment error code -/* ------------------------------------------------------------------------- */ -/* ERROR CODES ADDED IN VERSION 0.3 */ -/* ------------------------------------------------------------------------- */ -#define ZTAIL_PROBLEM_IN_LOG_ERROR 410 -#define ZGCI_TOO_LOW_ERROR 429 // GCP_SAVEREF error code -#define ZTAB_STATE_ERROR 474 // Insert new fragment error code -#define ZTOO_NEW_GCI_ERROR 479 // LCP Start error -/* ------------------------------------------------------------------------- */ -/* ERROR CODES ADDED IN VERSION 0.4 */ -/* ------------------------------------------------------------------------- */ - -#define ZNO_FREE_FRAG_SCAN_REC_ERROR 490 // SCAN_FRAGREF error code -#define ZCOPY_NO_FRAGMENT_ERROR 491 // COPY_FRAGREF error code -#define ZTAKE_OVER_ERROR 499 -#define ZCOPY_NODE_ERROR 1204 -#define ZTOO_MANY_COPY_ACTIVE_ERROR 1208 // COPY_FRAG and COPY_ACTIVEREF code -#define ZCOPY_ACTIVE_ERROR 1210 // COPY_ACTIVEREF error code -#define ZNO_TC_CONNECT_ERROR 1217 // Simple Read + SCAN -/* ------------------------------------------------------------------------- */ -/* ERROR CODES ADDED IN VERSION 1.X */ -/* ------------------------------------------------------------------------- */ -//#define ZSCAN_BOOK_ACC_OP_ERROR 1219 // SCAN_FRAGREF error code -#define ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR 1220 -#define ZTEMPORARY_REDO_LOG_FAILURE 1221 -#define ZNO_FREE_MARKER_RECORDS_ERROR 1222 -#define ZNODE_SHUTDOWN_IN_PROGESS 1223 -#define ZTOO_MANY_FRAGMENTS 1224 -#define ZTABLE_NOT_DEFINED 1225 -#define ZDROP_TABLE_IN_PROGRESS 1226 -#define ZINVALID_SCHEMA_VERSION 1227 - -/* ------------------------------------------------------------------------- */ -/* ERROR CODES ADDED IN VERSION 2.X */ -/* ------------------------------------------------------------------------- */ -#define ZNODE_FAILURE_ERROR 400 -/* ------------------------------------------------------------------------- */ -/* ERROR CODES FROM ACC */ -/* ------------------------------------------------------------------------- */ -#define ZNO_TUPLE_FOUND 626 -#define ZTUPLE_ALREADY_EXIST 630 -/* ------------------------------------------------------------------------- */ -/* ERROR CODES FROM TUP */ -/* ------------------------------------------------------------------------- */ -#define ZSEARCH_CONDITION_FALSE 899 -#define ZUSER_ERROR_CODE_LIMIT 6000 -#endif - -/** - * @class dblqh - * - * @section secIntro Introduction - * - * Dblqh is the coordinator of the LDM. Dblqh is responsible for - * performing operations on tuples. It does this job with help of - * Dbacc block (that manages the index structures) and Dbtup - * (that manages the tuples). - * - * Dblqh also keeps track of the participants and acts as a coordinator of - * 2-phase commits. Logical redo logging is also handled by the Dblqh - * block. - * - * @section secModules Modules - * - * The code is partitioned into the following modules: - * - START / RESTART - * - Start phase 1: Load our block reference and our processor id - * - Start phase 2: Initiate all records within the block - * Connect LQH with ACC and TUP. - * - Start phase 4: Connect LQH with LQH. Connect every LQH with - * every LQH in the database system. - * If initial start, then create the fragment log files. - * If system restart or node restart, - * then open the fragment log files and - * find the end of the log files. - * - ADD / DELETE FRAGMENT
- * Used by dictionary to create new fragments and delete old fragments. - * - EXECUTION
- * handles the reception of lqhkeyreq and all processing - * of operations on behalf of this request. - * This does also involve reception of various types of attrinfo - * and keyinfo. - * It also involves communication with ACC and TUP. - * - LOG
- * The log module handles the reading and writing of the log. - * It is also responsible for handling system restart. - * It controls the system restart in TUP and ACC as well. - * - TRANSACTION
- * This module handles the commit and the complete phases. - * - MODULE TO HANDLE TC FAILURE
- * - SCAN
- * This module contains the code that handles a scan of a particular - * fragment. - * It operates under the control of TC and orders ACC to - * perform a scan of all tuples in the fragment. - * TUP performs the necessary search conditions - * to ensure that only valid tuples are returned to the application. - * - NODE RECOVERY
- * Used when a node has failed. - * It performs a copy of a fragment to a new replica of the fragment. - * It does also shut down all connections to the failed node. - * - LOCAL CHECKPOINT
- * Handles execution and control of LCPs - * It controls the LCPs in TUP and ACC. - * It also interacts with DIH to control which GCPs are recoverable. - * - GLOBAL CHECKPOINT
- * Helps DIH in discovering when GCPs are recoverable. - * It handles the request gcp_savereq that requests LQH to - * save a particular GCP to disk and respond when completed. - * - FILE HANDLING
- * With submodules: - * - SIGNAL RECEPTION - * - NORMAL OPERATION - * - FILE CHANGE - * - INITIAL START - * - SYSTEM RESTART PHASE ONE - * - SYSTEM RESTART PHASE TWO, - * - SYSTEM RESTART PHASE THREE - * - SYSTEM RESTART PHASE FOUR - * - ERROR - * - TEST - * - LOG - */ -class Dblqh: public SimulatedBlock { -public: - enum LcpCloseState { - LCP_IDLE = 0, - LCP_RUNNING = 1, // LCP is running - LCP_CLOSE_STARTED = 2, // Completion(closing of files) has started - ACC_LCP_CLOSE_COMPLETED = 3, - TUP_LCP_CLOSE_COMPLETED = 4 - }; - - enum ExecUndoLogState { - EULS_IDLE = 0, - EULS_STARTED = 1, - EULS_COMPLETED = 2 - }; - - struct AddFragRecord { - enum AddFragStatus { - FREE = 0, - ACC_ADDFRAG = 1, - WAIT_TUP = 3, - WAIT_TUX = 5, - WAIT_ADD_ATTR = 6, - TUP_ATTR_WAIT = 7, - TUX_ATTR_WAIT = 9 - }; - LqhAddAttrReq::Entry attributes[LqhAddAttrReq::MAX_ATTRIBUTES]; - UintR accConnectptr; - AddFragStatus addfragStatus; - UintR dictConnectptr; - UintR fragmentPtr; - UintR nextAddfragrec; - UintR schemaVer; - UintR tupConnectptr; - UintR tuxConnectptr; - UintR checksumIndicator; - UintR GCPIndicator; - BlockReference dictBlockref; - Uint32 m_senderAttrPtr; - Uint16 addfragErrorCode; - Uint16 attrSentToTup; - Uint16 attrReceived; - Uint16 addFragid; - Uint16 noOfAttr; - Uint16 noOfNull; - Uint16 tabId; - Uint16 totalAttrReceived; - Uint16 fragCopyCreation; - Uint16 noOfKeyAttr; - Uint16 noOfCharsets; - Uint16 lh3DistrBits; - Uint16 tableType; - Uint16 primaryTableId; - Uint32 tablespace_id; - Uint32 maxRowsLow; - Uint32 maxRowsHigh; - Uint32 minRowsLow; - Uint32 minRowsHigh; - Uint32 forceVarPartFlag; - }; - typedef Ptr AddFragRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ ATTRIBUTE INFORMATION RECORD $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * Can contain one (1) attrinfo signal. - * One signal contains 24 attr. info words. - * But 32 elements are used to make plex happy. - * Some of the elements are used to the following things: - * - Data length in this record is stored in the - * element indexed by ZINBUF_DATA_LEN. - * - Next attrinbuf is pointed out by the element - * indexed by ZINBUF_NEXT. - */ - struct Attrbuf { - UintR attrbuf[32]; - }; // Size 128 bytes - typedef Ptr AttrbufPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ DATA BUFFER $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This buffer is used as a general data storage. - */ - struct Databuf { - UintR data[4]; - UintR nextDatabuf; - }; // size 20 bytes - typedef Ptr DatabufPtr; - - struct ScanRecord { - ScanRecord() {} - enum ScanState { - SCAN_FREE = 0, - WAIT_STORED_PROC_COPY = 1, - WAIT_STORED_PROC_SCAN = 2, - WAIT_NEXT_SCAN_COPY = 3, - WAIT_NEXT_SCAN = 4, - WAIT_DELETE_STORED_PROC_ID_SCAN = 5, - WAIT_DELETE_STORED_PROC_ID_COPY = 6, - WAIT_ACC_COPY = 7, - WAIT_ACC_SCAN = 8, - WAIT_SCAN_NEXTREQ = 10, - WAIT_CLOSE_SCAN = 12, - WAIT_CLOSE_COPY = 13, - WAIT_RELEASE_LOCK = 14, - WAIT_TUPKEY_COPY = 15, - WAIT_LQHKEY_COPY = 16, - IN_QUEUE = 17 - }; - enum ScanType { - ST_IDLE = 0, - SCAN = 1, - COPY = 2 - }; - - UintR scan_acc_op_ptr[32]; - Uint32 scan_acc_index; - Uint32 scan_acc_attr_recs; - UintR scanApiOpPtr; - Local_key m_row_id; - - Uint32 m_max_batch_size_rows; - Uint32 m_max_batch_size_bytes; - - Uint32 m_curr_batch_size_rows; - Uint32 m_curr_batch_size_bytes; - - bool check_scan_batch_completed() const; - - UintR copyPtr; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - Uint32 nextHash; - Uint32 prevHash; - bool equal(const ScanRecord & key) const { - return scanNumber == key.scanNumber && fragPtrI == key.fragPtrI; - } - Uint32 hashValue() const { - return fragPtrI ^ scanNumber; - } - - UintR scanAccPtr; - UintR scanAiLength; - UintR scanErrorCounter; - UintR scanSchemaVersion; - - /** - * This is _always_ main table, even in range scan - * in which case scanTcrec->fragmentptr is different - */ - Uint32 fragPtrI; - UintR scanStoredProcId; - ScanState scanState; - UintR scanTcrec; - ScanType scanType; - BlockReference scanApiBlockref; - NodeId scanNodeId; - Uint16 scanReleaseCounter; - Uint16 scanNumber; - - // scan source block ACC TUX TUP - BlockReference scanBlockref; - - Uint8 scanCompletedStatus; - Uint8 scanFlag; - Uint8 scanLockHold; - Uint8 scanLockMode; - Uint8 readCommitted; - Uint8 rangeScan; - Uint8 descending; - Uint8 tupScan; - Uint8 lcpScan; - Uint8 scanTcWaiting; - Uint8 scanKeyinfoFlag; - Uint8 m_last_row; - }; // Size 272 bytes - typedef Ptr ScanRecordPtr; - - struct Fragrecord { - Fragrecord() {} - - enum ExecSrStatus { - IDLE = 0, - ACTIVE = 2 - }; - /** - * Possible state transitions are: - * - FREE -> DEFINED Fragment record is allocated - * - DEFINED -> ACTIVE Add fragment is completed and - * fragment is ready to - * receive operations. - * - DEFINED -> ACTIVE_CREATION Add fragment is completed and - * fragment is ready to - * receive operations in parallel - * with a copy fragment - * which is performed from the - * primary replica - * - DEFINED -> CRASH_RECOVERING A fragment is ready to be - * recovered from a local - * checkpoint on disk - * - ACTIVE -> BLOCKED A local checkpoint is to be - * started. No more operations - * are allowed to be started until - * the local checkpoint - * has been started. - * - ACTIVE -> REMOVING A fragment is removed from the node - * - BLOCKED -> ACTIVE Operations are allowed again in - * the fragment. - * - CRASH_RECOVERING -> ACTIVE A fragment has been recovered and - * are now ready for - * operations again. - * - CRASH_RECOVERING -> REMOVING Fragment recovery failed or - * was cancelled. - * - ACTIVE_CREATION -> ACTIVE A fragment is now copied and now - * is a normal fragment - * - ACTIVE_CREATION -> REMOVING Copying of the fragment failed - * - REMOVING -> FREE Removing of the fragment is - * completed and the fragment - * is now free again. - */ - enum FragStatus { - FREE = 0, ///< Fragment record is currently not in use - FSACTIVE = 1, ///< Fragment is defined and usable for operations - DEFINED = 2, ///< Fragment is defined but not yet usable by - ///< operations - BLOCKED = 3, ///< LQH is waiting for all active operations to - ///< complete the current phase so that the - ///< local checkpoint can be started. - ACTIVE_CREATION = 4, ///< Fragment is defined and active but is under - ///< creation by the primary LQH. - CRASH_RECOVERING = 5, ///< Fragment is recovering after a crash by - ///< executing the fragment log and so forth. - ///< Will need further breakdown. - REMOVING = 6 ///< The fragment is currently removed. - ///< Operations are not allowed. - }; - enum LogFlag { - STATE_TRUE = 0, - STATE_FALSE = 1 - }; - enum SrStatus { - SS_IDLE = 0, - SS_STARTED = 1, - SS_COMPLETED = 2 - }; - enum LcpFlag { - LCP_STATE_TRUE = 0, - LCP_STATE_FALSE = 1 - }; - /** - * Last GCI for executing the fragment log in this phase. - */ - UintR execSrLastGci[4]; - /** - * Start GCI for executing the fragment log in this phase. - */ - UintR execSrStartGci[4]; - /** - * Requesting user pointer for executing the fragment log in - * this phase - */ - UintR execSrUserptr[4]; - /** - * The LCP identifier of the LCP's. - * =0 means that the LCP number has not been stored. - * The LCP identifier is supplied by DIH when starting the LCP. - */ - UintR lcpId[MAX_LCP_STORED]; - UintR maxGciInLcp; - /** - * This variable contains the maximum global checkpoint - * identifier that exists in a certain local checkpoint. - * Maximum 4 local checkpoints is possible in this release. - */ - UintR maxGciCompletedInLcp; - UintR srLastGci[4]; - UintR srStartGci[4]; - /** - * The fragment pointers in ACC - */ - UintR accFragptr; - /** - * The EXEC_SR variables are used to keep track of which fragments - * that are interested in being executed as part of executing the - * fragment loop. - * It is initialised for every phase of executing the - * fragment log (the fragment log can be executed upto four times). - * - * Each execution is capable of executing the log records on four - * fragment replicas. - */ - /** - * Requesting block reference for executing the fragment log - * in this phase. - */ - BlockReference execSrBlockref[4]; - /** - * This variable contains references to active scan and copy - * fragment operations on the fragment. - * A maximum of four concurrently active is allowed. - */ - typedef Bitmask<4> ScanNumberMask; - ScanNumberMask m_scanNumberMask; - DLList::Head m_activeScans; - DLFifoList::Head m_queuedScans; - - Uint16 srLqhLognode[4]; - /** - * The fragment pointers in TUP and TUX - */ - UintR tupFragptr; - UintR tuxFragptr; - - /** - * This variable keeps track of how many operations that are - * active that have skipped writing the log but not yet committed - * or aborted. This is used during start of fragment. - */ - UintR activeTcCounter; - - /** - * This status specifies whether this fragment is actively - * engaged in executing the fragment log. - */ - ExecSrStatus execSrStatus; - - /** - * The fragment id of this fragment. - */ - UintR fragId; - - /** - * Status of fragment - */ - FragStatus fragStatus; - - /** - * 0 = undefined i.e fragStatus != ACTIVE_CREATION - * 1 = yes - * 2 = no - */ - enum ActiveCreat { - AC_NORMAL = 0, // fragStatus != ACTIVE_CREATION - AC_IGNORED = 1, // Operation that got ignored during NR - AC_NR_COPY = 2 // Operation that got performed during NR - }; - Uint8 m_copy_started_state; - - /** - * This flag indicates whether logging is currently activated at - * the fragment. - * During a system restart it is temporarily shut off. - * Some fragments have it permanently shut off. - */ - LogFlag logFlag; - UintR masterPtr; - /** - * This variable contains the maximum global checkpoint identifier - * which was completed when the local checkpoint was started. - */ - /** - * Reference to the next fragment record in a free list of fragment - * records. - */ - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - - /** - * The newest GCI that has been committed on fragment - */ - UintR newestGci; - SrStatus srStatus; - UintR srUserptr; - /** - * The starting global checkpoint of this fragment. - */ - UintR startGci; - /** - * A reference to the table owning this fragment. - */ - UintR tabRef; - - /** - * This is the queue to put operations that have been blocked - * during start of a local chkp. - */ - UintR firstWaitQueue; - UintR lastWaitQueue; - - /** - * The block reference to ACC on the fragment makes it - * possible to have different ACC blocks for different - * fragments in the future. - */ - BlockReference accBlockref; - - /** - * Ordered index block. - */ - BlockReference tuxBlockref; - /** - * The master block reference as sent in COPY_ACTIVEREQ. - */ - BlockReference masterBlockref; - /** - * These variables are used during system restart to recall - * from which node to execute the fragment log and which GCI's - * this node should start and stop from. Also to remember who - * to send the response to when system restart is completed. - */ - BlockReference srBlockref; - /** - * The block reference to TUP on the fragment makes it - * possible to have different TUP blocks for different - * fragments in the future. - */ - BlockReference tupBlockref; - /** - * This state indicates if the fragment will participate in a - * checkpoint. - * Temporary tables with Fragrecord::logFlag permanently off - * will also have Fragrecord::lcpFlag off. - */ - LcpFlag lcpFlag; - /** - * Used to ensure that updates started with old - * configuration do not arrive here after the copy fragment - * has started. - * If they are allowed to arrive after they - * could update a record that has already been replicated to - * the new node. This type of arrival should be extremely - * rare but we must anyway ensure that no harm is done. - */ - Uint16 copyNode; - /** - * This variable ensures that only one copy fragment is - * active at a time on the fragment. - */ - Uint8 copyFragState; - /** - * The number of fragment replicas that will execute the log - * records in this round of executing the fragment - * log. Maximum four is possible. - */ - Uint8 execSrNoReplicas; - /** - * This variable contains what type of replica this fragment - * is. Two types are possible: - * - Primary/Backup replica = 0 - * - Stand-by replica = 1 - * - * It is not possible to distinguish between primary and - * backup on a fragment. - * This can only be done per transaction. - * DIH can change from primary to backup without informing - * the various replicas about this change. - */ - Uint8 fragCopy; - /** - * This is the last fragment distribution key that we have - * heard of. - */ - Uint8 fragDistributionKey; - /** - * How many local checkpoints does the fragment contain - */ - Uint8 srChkpnr; - Uint8 srNoLognodes; - /** - * Table type. - */ - Uint8 tableType; - /** - * For ordered index fragment, i-value of corresponding - * fragment in primary table. - */ - UintR tableFragptr; - - /** - * Log part - */ - Uint32 m_log_part_ptr_i; - }; - typedef Ptr FragrecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ GLOBAL CHECKPOINT RECORD $$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This record describes a global checkpoint that is - * completed. It waits for all log records belonging to this - * global checkpoint to be saved on disk. - */ - struct GcpRecord { - /** - * The file number within each log part where the log was - * located when gcp_savereq was received. The last record - * belonging to this global checkpoint is certainly before - * this place in the log. We could come even closer but it - * would cost performance and doesn't seem like a good - * idea. This is simple and it works. - */ - Uint16 gcpFilePtr[4]; - /** - * The page number within the file for each log part. - */ - Uint16 gcpPageNo[4]; - /** - * The word number within the last page that was written for - * each log part. - */ - Uint16 gcpWordNo[4]; - /** - * The identity of this global checkpoint. - */ - UintR gcpId; - /** - * The state of this global checkpoint, one for each log part. - */ - Uint8 gcpLogPartState[4]; - /** - * The sync state of this global checkpoint, one for each - * log part. - */ - Uint8 gcpSyncReady[4]; - /** - * User pointer of the sender of gcp_savereq (= master DIH). - */ - UintR gcpUserptr; - /** - * Block reference of the sender of gcp_savereq - * (= master DIH). - */ - BlockReference gcpBlockref; - }; // Size 44 bytes - typedef Ptr GcpRecordPtr; - - struct HostRecord { - bool inPackedList; - UintR noOfPackedWordsLqh; - UintR packedWordsLqh[30]; - UintR noOfPackedWordsTc; - UintR packedWordsTc[29]; - BlockReference hostLqhBlockRef; - BlockReference hostTcBlockRef; - };// Size 128 bytes - typedef Ptr HostRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$ LOCAL CHECKPOINT SUPPORT RECORD $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This record contains the information about an outstanding - * request to TUP or ACC. Used for both local checkpoints and - * system restart. - */ - struct LcpLocRecord { - enum LcpLocstate { - IDLE = 0, - WAIT_TUP_PREPLCP = 1, - WAIT_LCPHOLDOP = 2, - HOLDOP_READY = 3, - ACC_WAIT_STARTED = 4, - ACC_STARTED = 5, - ACC_COMPLETED = 6, - TUP_WAIT_STARTED = 7, - TUP_STARTED = 8, - TUP_COMPLETED = 9, - SR_ACC_STARTED = 10, - SR_TUP_STARTED = 11, - SR_ACC_COMPLETED = 12, - SR_TUP_COMPLETED = 13 - }; - LcpLocstate lcpLocstate; - Uint32 lcpRef; - }; // 28 bytes - typedef Ptr LcpLocRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ LOCAL CHECKPOINT RECORD $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This record contains the information about a local - * checkpoint that is ongoing. This record is also used as a - * system restart record. - */ - struct LcpRecord { - LcpRecord() { m_EMPTY_LCP_REQ.clear(); } - - enum LcpState { - LCP_IDLE = 0, - LCP_COMPLETED = 2, - LCP_WAIT_FRAGID = 3, - LCP_WAIT_TUP_PREPLCP = 4, - LCP_WAIT_HOLDOPS = 5, - LCP_START_CHKP = 7, - LCP_BLOCKED_COMP = 8, - LCP_SR_WAIT_FRAGID = 9, - LCP_SR_STARTED = 10, - LCP_SR_COMPLETED = 11 - }; - - LcpState lcpState; - bool firstFragmentFlag; - bool lastFragmentFlag; - - struct FragOrd { - Uint32 fragPtrI; - LcpFragOrd lcpFragOrd; - }; - FragOrd currentFragment; - - bool lcpQueued; - FragOrd queuedFragment; - - bool reportEmpty; - NdbNodeBitmask m_EMPTY_LCP_REQ; - - Uint32 m_error; - Uint32 m_outstanding; - }; // Size 76 bytes - typedef Ptr LcpRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* */ - /* THE RECORDS THAT START BY LOG_ ARE A PART OF THE LOG MANAGER. */ - /* THESE RECORDS ARE USED TO HANDLE THE FRAGMENT LOG. */ - /* */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ LOG RECORD $$$$$$$ */ - /* */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* THIS RECORD IS ALIGNED TO BE 256 BYTES. */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This record describes the current state of a log. - * A log consists of a number of log files. - * These log files are described by the log file record. - * - * There will be 4 sets of log files. - * Different tables will use different log files dependent - * on the table id. - * This ensures that more than one outstanding request can - * be sent to the file system. - * The log file to use is found by performing a very simple hash - * function. - */ - struct LogPartRecord { - enum LogPartState { - IDLE = 0, ///< Nothing happens at the moment - ACTIVE = 1, ///< An operation is active logging - SR_FIRST_PHASE = 2, ///< Finding the end of the log and - ///< the information about global - ///< checkpoints in the log is ongoing. - SR_FIRST_PHASE_COMPLETED = 3, ///< First phase completed - SR_THIRD_PHASE_STARTED = 4, ///< Executing fragment log is in 3rd ph - SR_THIRD_PHASE_COMPLETED = 5, - SR_FOURTH_PHASE_STARTED = 6, ///< Finding the log tail and head - ///< is the fourth phase. - SR_FOURTH_PHASE_COMPLETED = 7, - FILE_CHANGE_PROBLEM = 8, ///< For some reason the write to - ///< page zero in file zero have not - ///< finished after 15 mbyte of - ///< log data have been written - TAIL_PROBLEM = 9 ///< Only 1 mbyte of log left. - ///< No operations allowed to enter the - ///< log. Only special log records - ///< are allowed - }; - enum WaitWriteGciLog { - WWGL_TRUE = 0, - WWGL_FALSE = 1 - }; - enum LogExecState { - LES_IDLE = 0, - LES_SEARCH_STOP = 1, - LES_SEARCH_START = 2, - LES_EXEC_LOG = 3, - LES_EXEC_LOG_NEW_MBYTE = 4, - LES_EXEC_LOG_NEW_FILE = 5, - LES_EXEC_LOGREC_FROM_FILE = 6, - LES_EXEC_LOG_COMPLETED = 7, - LES_WAIT_READ_EXEC_SR_NEW_MBYTE = 8, - LES_WAIT_READ_EXEC_SR = 9, - LES_EXEC_LOG_INVALIDATE = 10 - }; - - /** - * Is a CONTINUEB(ZLOG_LQHKEYREQ) signal sent and - * outstanding. We do not want several instances of this - * signal out in the air since that would create multiple - * writers of the list. - */ - UintR LogLqhKeyReqSent; - /** - * Contains the current log file where log records are - * written. During system restart it is used to indicate the - * last log file. - */ - UintR currentLogfile; - /** - * The log file used to execute log records from far behind. - */ - UintR execSrExecLogFile; - /** - * The currently executing prepare record starts in this log - * page. This variable is used to enable that a log record is - * executed multiple times in execution of the log. - */ - UintR execSrLogPage; - /** - * This variable keeps track of the lfo record where the - * pages that were read from disk when an operations log - * record were not found in the main memory buffer for log - * pages. - */ - UintR execSrLfoRec; - /** - * The starting page number when reading log from far behind. - */ - UintR execSrStartPageNo; - /** - * The last page number when reading log from far behind. - */ - UintR execSrStopPageNo; - /** - * Contains a reference to the first log file, file number 0. - */ - UintR firstLogfile; - /** - * The head of the operations queued for logging. - */ - UintR firstLogQueue; - /** - * This variable contains the oldest operation in this log - * part which have not been committed yet. - */ - UintR firstLogTcrec; - /** - * The first reference to a set of 8 pages. These are used - * during execution of the log to keep track of which pages - * are in memory and which are not. - */ - UintR firstPageRef; - /** - * This variable contains the global checkpoint record - * waiting for disk writes to complete. - */ - UintR gcprec; - /** - * The last reference to a set of 8 pages. These are used - * during execution of the log to keep track of which pages - * are in memory and which are not. - */ - UintR lastPageRef; - /** - * The tail of the operations queued for logging. - */ - UintR lastLogQueue; - /** - * This variable contains the newest operation in this log - * part which have not been committed yet. - */ - UintR lastLogTcrec; - /** - * This variable indicates which was the last mbyte that was - * written before the system crashed. Discovered during - * system restart. - */ - UintR lastLogfile; - /** - * This variable is used to keep track of the state during - * the third phase of the system restart, i.e. when - * LogPartRecord::logPartState == - * LogPartRecord::SR_THIRD_PHASE_STARTED. - */ - LogExecState logExecState; - /** - * This variable contains the lap number of this log part. - */ - UintR logLap; - /** - * This variable contains the place to stop executing the log - * in this phase. - */ - UintR logLastGci; - /** - * This variable contains the place to start executing the - * log in this phase. - */ - UintR logStartGci; - /** - * The latest GCI completed in this log part. - */ - UintR logPartNewestCompletedGCI; - /** - * The current state of this log part. - */ - LogPartState logPartState; - /** - * A timer that is set every time a log page is sent to disk. - * Ensures that log pages are not kept in main memory for - * more than a certain time. - */ - UintR logPartTimer; - /** - * The current timer which is set by the periodic signal - * received by LQH - */ - UintR logTimer; - /** - * Contains the number of the log tail file and the mbyte - * reference within that file. This information ensures that - * the tail is not overwritten when writing new log records. - */ - UintR logTailFileNo; - /** - * The TcConnectionrec used during execution of this log part. - */ - UintR logTcConrec; - /** - * The number of pages that currently resides in the main - * memory buffer. It does not refer pages that are currently - * read from the log files. Only to pages already read - * from the log file. - */ - UintR mmBufferSize; - /** - * Contains the current number of log files in this log part. - */ - UintR noLogFiles; - /** - * This variable is used only during execution of a log - * record. It keeps track of in which page record a log - * record was started. It is used then to deduce which - * pages that are dirty after that the log records on the - * page have been executed. - * - * It is also used to find out where to write the invalidate - * command when that is needed. - */ - UintR prevLogpage; - /** - * The number of files remaining to gather GCI information - * for during system restart. Only used if number of files - * is larger than 60. - */ - UintR srRemainingFiles; - /** - * The log file where to start executing the log during - * system restart. - */ - UintR startLogfile; - /** - * The last log file in which to execute the log during system - * restart. - */ - UintR stopLogfile; - /** - * This variable keeps track of when we want to write a complete - * gci log record but have been blocked by an ongoing log operation. - */ - WaitWriteGciLog waitWriteGciLog; - /** - * The currently executing prepare record starts in this index - * in the log page. - */ - Uint16 execSrLogPageIndex; - /** - * Which of the four exec_sr's in the fragment is currently executing - */ - Uint16 execSrExecuteIndex; - /** - * The number of pages executed in the current mbyte. - */ - Uint16 execSrPagesExecuted; - /** - * The number of pages read from disk that have arrived and are - * currently awaiting execution of the log. - */ - Uint16 execSrPagesRead; - /** - * The number of pages read from disk and currently not arrived - * to the block. - */ - Uint16 execSrPagesReading; - /** - * This variable refers to the new header file where we will - * start writing the log after a system restart have been completed. - */ - Uint16 headFileNo; - /** - * This variable refers to the page number within the header file. - */ - Uint16 headPageNo; - /** - * This variable refers to the index within the new header - * page. - */ - Uint16 headPageIndex; - /** - * This variables indicates which was the last mbyte in the last - * logfile before a system crash. Discovered during system restart. - */ - Uint16 lastMbyte; - /** - * This variable is used only during execution of a log - * record. It keeps track of in which file page a log - * record was started. It is used if it is needed to write a - * dirty page to disk during log execution (this happens when - * commit records are invalidated). - */ - Uint16 prevFilepage; - /** - * This is used to save where we were in the execution of log - * records when we find a commit record that needs to be - * executed. - * - * This variable is also used to remember the index where the - * log type was in the log record. It is only used in this - * role when finding a commit record that needs to be - * invalidated. - */ - Uint16 savePageIndex; - Uint8 logTailMbyte; - /** - * The mbyte within the starting log file where to start - * executing the log. - */ - Uint8 startMbyte; - /** - * The last mbyte in which to execute the log during system - * restart. - */ - Uint8 stopMbyte; - /** - * This variable refers to the file where invalidation is - * occuring during system/node restart. - */ - Uint16 invalidateFileNo; - /** - * This variable refers to the page where invalidation is - * occuring during system/node restart. - */ - Uint16 invalidatePageNo; - }; // Size 164 Bytes - typedef Ptr LogPartRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ LOG FILE RECORD $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* THIS RECORD IS ALIGNED TO BE 288 (256 + 32) BYTES. */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This record contains information about a log file. - * A log file contains log records from several tables and - * fragments of a table. LQH can contain more than - * one log file to ensure faster log processing. - * - * The number of pages to write to disk at a time is - * configurable. - */ - struct LogFileRecord { - enum FileChangeState { - NOT_ONGOING = 0, - BOTH_WRITES_ONGOING = 1, - LAST_WRITE_ONGOING = 2, - FIRST_WRITE_ONGOING = 3, - WRITE_PAGE_ZERO_ONGOING = 4 - }; - enum LogFileStatus { - LFS_IDLE = 0, ///< Log file record not in use - CLOSED = 1, ///< Log file closed - OPENING_INIT = 2, - OPEN_SR_FRONTPAGE = 3, ///< Log file opened as part of system - ///< restart. Open file 0 to find - ///< the front page of the log part. - OPEN_SR_LAST_FILE = 4, ///< Open last log file that was written - ///< before the system restart. - OPEN_SR_NEXT_FILE = 5, ///< Open a log file which is 16 files - ///< backwards to find the next - ///< information about GCPs. - OPEN_EXEC_SR_START = 6, ///< Log file opened as part of - ///< executing - ///< log during system restart. - OPEN_EXEC_SR_NEW_MBYTE = 7, - OPEN_SR_FOURTH_PHASE = 8, - OPEN_SR_FOURTH_NEXT = 9, - OPEN_SR_FOURTH_ZERO = 10, - OPENING_WRITE_LOG = 11, ///< Log file opened as part of writing - ///< log during normal operation. - OPEN_EXEC_LOG = 12, - CLOSING_INIT = 13, - CLOSING_SR = 14, ///< Log file closed as part of system - ///< restart. Currently trying to - ///< find where to start executing the - ///< log - CLOSING_EXEC_SR = 15, ///< Log file closed as part of - ///< executing log during system restart - CLOSING_EXEC_SR_COMPLETED = 16, - CLOSING_WRITE_LOG = 17, ///< Log file closed as part of writing - ///< log during normal operation. - CLOSING_EXEC_LOG = 18, - OPEN_INIT = 19, - OPEN = 20, ///< Log file open - OPEN_SR_INVALIDATE_PAGES = 21, - CLOSE_SR_INVALIDATE_PAGES = 22 - }; - - /** - * When a new mbyte is started in the log we have to find out - * how far back in the log we still have prepared operations - * which have been neither committed or aborted. This variable - * keeps track of this value for each of the mbytes in this - * log file. This is used in writing down these values in the - * header of each log file. That information is used during - * system restart to find the tail of the log. - */ - UintR *logLastPrepRef; - /** - * The max global checkpoint completed before the mbyte in the - * log file was started. One variable per mbyte. - */ - UintR *logMaxGciCompleted; - /** - * The max global checkpoint started before the mbyte in the log - * file was started. One variable per mbyte. - */ - UintR *logMaxGciStarted; - /** - * This variable contains the file name as needed by the file - * system when opening the file. - */ - UintR fileName[4]; - /** - * This variable has a reference to the log page which is - * currently in use by the log. - */ - UintR currentLogpage; - /** - * The number of the current mbyte in the log file. - */ - UintR currentMbyte; - /** - * This variable is used when changing files. It is to find - * out when both the last write in the previous file and the - * first write in this file has been completed. After these - * writes have completed the variable keeps track of when the - * write to page zero in file zero is completed. - */ - FileChangeState fileChangeState; - /** - * The number of the file within this log part. - */ - UintR fileNo; - /** - * This variable shows where to read/write the next pages into - * the log. Used when writing the log during normal operation - * and when reading the log during system restart. It - * specifies the page position where each page is 8 kbyte. - */ - UintR filePosition; - /** - * This contains the file pointer needed by the file system - * when reading/writing/closing and synching. - */ - UintR fileRef; - /** - * The head of the pages waiting for shipment to disk. - * They are filled with log info. - */ - UintR firstFilledPage; - /** - * A list of active read/write operations on the log file. - * Operations are always put in last and the first should - * always complete first. - */ - UintR firstLfo; - UintR lastLfo; - /** - * The tail of the pages waiting for shipment to disk. - * They are filled with log info. - */ - UintR lastFilledPage; - /** - * This variable keeps track of the last written page in the - * file while writing page zero in file zero when changing log - * file. - */ - UintR lastPageWritten; - /** - * This variable keeps track of the last written word in the - * last page written in the file while writing page zero in - * file zero when changing log file. - */ - UintR lastWordWritten; - /** - * This variable contains the last word written in the last page. - */ - LogFileStatus logFileStatus; - /** - * A reference to page zero in this file. - * This page is written before the file is closed. - */ - UintR logPageZero; - /** - * This variable contains a reference to the record describing - * this log part. One of four records (0,1,2 or 3). - */ - UintR logPartRec; - /** - * Next free log file record or next log file in this log. - */ - UintR nextLogFile; - /** - * The previous log file. - */ - UintR prevLogFile; - /** - * The number of remaining words in this mbyte of the log file. - */ - UintR remainingWordsInMbyte; - /** - * The current file page within the current log file. This is - * a reference within the file and not a reference to a log - * page record. It is used to deduce where log records are - * written. Particularly completed gcp records and prepare log - * records. - */ - Uint16 currentFilepage; - /** - * The number of pages in the list referenced by - * LOG_PAGE_BUFFER. - */ - Uint16 noLogpagesInBuffer; - }; // Size 288 bytes - typedef Ptr LogFileRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ LOG OPERATION RECORD $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * This record contains a currently active file operation - * that has started by the log module. - */ - struct LogFileOperationRecord { - enum LfoState { - IDLE = 0, ///< Operation is not used at the moment - INIT_WRITE_AT_END = 1, ///< Write in file so that it grows to - ///< 16 Mbyte - INIT_FIRST_PAGE = 2, ///< Initialise the first page in a file - WRITE_GCI_ZERO = 3, - WRITE_INIT_MBYTE = 4, - WRITE_DIRTY = 5, - READ_SR_FRONTPAGE = 6, ///< Read page zero in file zero during - ///< system restart - READ_SR_LAST_FILE = 7, ///< Read page zero in last file open - ///< before system crash - READ_SR_NEXT_FILE = 8, ///< Read 60 files backwards to find - ///< further information GCPs in page - ///< zero - READ_SR_LAST_MBYTE = 9, - READ_EXEC_SR = 10, - READ_EXEC_LOG = 11, - READ_SR_FOURTH_PHASE = 12, - READ_SR_FOURTH_ZERO = 13, - FIRST_PAGE_WRITE_IN_LOGFILE = 14, - LAST_WRITE_IN_FILE = 15, - WRITE_PAGE_ZERO = 16, - ACTIVE_WRITE_LOG = 17, ///< A write operation during - ///< writing of log - READ_SR_INVALIDATE_PAGES = 18, - WRITE_SR_INVALIDATE_PAGES = 19, - WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0 = 20 - }; - /** - * We have to remember the log pages read. - * Otherwise we cannot build the linked list after the pages have - * arrived to main memory. - */ - UintR logPageArray[16]; - /** - * A list of the pages that are part of this active operation. - */ - UintR firstLfoPage; - /** - * A timer to ensure that records are not lost. - */ - UintR lfoTimer; - /** - * The word number of the last written word in the last during - * a file write. - */ - UintR lfoWordWritten; - /** - * This variable contains the state of the log file operation. - */ - LfoState lfoState; - /** - * The log file that the file operation affects. - */ - UintR logFileRec; - /** - * The log file operations on a file are kept in a linked list. - */ - UintR nextLfo; - /** - * The page number of the first read/written page during a file - * read/write. - */ - Uint16 lfoPageNo; - /** - * The number of pages written or read during an operation to - * the log file. - */ - Uint16 noPagesRw; - }; // 92 bytes - typedef Ptr LogFileOperationRecordPtr; - - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /* $$$$$$$ LOG PAGE RECORD $$$$$$$ */ - /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ - /** - * These are the 8 k pages used to store log records before storing - * them in the file system. - * Since 64 kbyte is sent to disk at a time it is necessary to have - * at least 4*64 kbytes of log pages. - * To handle multiple outstanding requests we need some additional pages. - * Thus we allocate 1 mbyte to ensure that we do not get problems with - * insufficient number of pages. - */ - struct LogPageRecord { - /** - * This variable contains the pages that are sent to disk. - * - * All pages contain a header of 12 words: - * - WORD 0: CHECKSUM Calculated before storing on disk and - * checked when read from disk. - * - WORD 1: LAP How many wraparounds have the log - * experienced since initial start of the - * system. - * - WORD 2: MAX_GCI_COMPLETED Which is the maximum gci which have - * completed before this page. This - * gci will not be found in this - * page and hereafter in the log. - * - WORD 3: MAX_GCI_STARTED The maximum gci which have started - * before this page. - * - WORD 4: NEXT_PAGE Pointer to the next page. - * Only used in main memory - * - WORD 5: PREVIOUS_PAGE Pointer to the previous page. - * Currently not used. - * - WORD 6: VERSION NDB version that wrote the page. - * - WORD 7: NO_LOG_FILES Number of log files in this log part. - * - WORD 8: CURRENT PAGE INDEX This keeps track of where we are in the - * page. - * This is only used when pages is in - * memory. - * - WORD 9: OLD PREPARE FILE NO This keeps track of the oldest prepare - * operation still alive (not committed - * or aborted) when this mbyte started. - * - WORD 10: OLD PREPARE PAGE REF File page reference within this file - * number. - * Page no + Page index. - * If no prepare was alive then these - * values points this mbyte. - * - WORD 11: DIRTY FLAG = 0 means not dirty and - * = 1 means the page is dirty. - * Is used when executing log when - * a need to write invalid commit - * records arise. - * - * The remaining 2036 words are used for log information, i.e. - * log records. - * - * A log record on this page has the following layout: - * - WORD 0: LOG RECORD TYPE - * The following types are supported: - * - PREPARE OPERATION An operation not yet committed. - * - NEW PREPARE OPERATION A prepared operation already - * logged is inserted - * into the log again so that the - * log tail can be advanced. - * This can happen when a transaction is - * committed for a long time. - * - ABORT TRANSACTION A previously prepared transaction - * was aborted. - * - COMMIT TRANSACTION A previously prepared transaction - * was committed. - * - INVALID COMMIT A previous commit record was - * invalidated by a - * subsequent system restart. - * A log record must be invalidated - * in a system restart if it belongs - * to a global checkpoint id which - * is not included in the system - * restart. - * Otherwise it will be included in - * a subsequent system restart since - * it will then most likely belong - * to a global checkpoint id which - * is part of that system - * restart. - * This is not a correct behaviour - * since this operation is lost in a - * system restart and should not - * reappear at a later system - * restart. - * - COMPLETED GCI A GCI has now been completed. - * - FRAGMENT SPLIT A fragment has been split - * (not implemented yet) - * - FILE DESCRIPTOR This is always the first log record - * in a file. - * It is always placed on page 0 after - * the header. - * It is written when the file is - * opened and when the file is closed. - * - NEXT LOG RECORD This log record only records where - * the next log record starts. - * - NEXT MBYTE RECORD This log record specifies that there - * are no more log records in this mbyte. - * - * - * A FILE DESCRIPTOR log record continues as follows: - * - WORD 1: NO_LOG_DESCRIPTORS This defines the number of - * descriptors of log files that - * will follow hereafter (max 32). - * the log descriptor will describe - * information about - * max_gci_completed, - * max_gci_started and log_lap at - * every 1 mbyte of the log file - * since a log file is 16 mbyte - * always, i need 16 entries in the - * array with max_gci_completed, - * max_gci_started and log_lap. thus - * 32 entries per log file - * descriptor (max 32*48 = 1536, - * always fits in page 0). - * - WORD 2: LAST LOG FILE The number of the log file currently - * open. This is only valid in file 0. - * - WORD 3 - WORD 18: MAX_GCI_COMPLETED for every 1 mbyte - * in this log file. - * - WORD 19 - WORD 34: MAX_GCI_STARTED for every 1 mbyte - * in this log file. - * - * Then it continues for NO_LOG_DESCRIPTORS until all subsequent - * log files (max 32) have been properly described. - * - * - * A PREPARE OPERATION log record continues as follows: - * - WORD 1: LOG RECORD SIZE - * - WORD 2: HASH VALUE - * - WORD 3: SCHEMA VERSION - * - WORD 4: OPERATION TYPE - * = 0 READ, - * = 1 UPDATE, - * = 2 INSERT, - * = 3 DELETE - * - WORD 5: NUMBER OF WORDS IN ATTRINFO PART - * - WORD 6: KEY LENGTH IN WORDS - * - WORD 7 - (WORD 7 + KEY_LENGTH - 1) The tuple key - * - (WORD 7 + KEY_LENGTH) - - * (WORD 7 + KEY_LENGTH + ATTRINFO_LENGTH - 1) The attrinfo - * - * A log record can be spread in several pages in some cases. - * The next log record always starts immediately after this log record. - * A log record does however never traverse a 1 mbyte boundary. - * This is used to ensure that we can always come back if something - * strange occurs in the log file. - * To ensure this we also have log records which only records - * the next log record. - * - * - * A COMMIT TRANSACTION log record continues as follows: - * - WORD 1: TRANSACTION ID PART 1 - * - WORD 2: TRANSACTION ID PART 2 - * - WORD 3: FRAGMENT ID OF THE OPERATION - * - WORD 4: TABLE ID OF THE OPERATION - * - WORD 5: THE FILE NUMBER OF THE PREPARE RECORD - * - WORD 6: THE STARTING PAGE NUMBER OF THE PREPARE RECORD - * - WORD 7: THE STARTING PAGE INDEX OF THE PREPARE RECORD - * - WORD 8: THE STOP PAGE NUMBER OF THE PREPARE RECORD - * - WORD 9: GLOBAL CHECKPOINT OF THE TRANSACTION - * - * - * An ABORT TRANSACTION log record continues as follows: - * - WORD 1: TRANSACTION ID PART 1 - * - WORD 2: TRANSACTION ID PART 2 - * - * - * A COMPLETED CGI log record continues as follows: - * - WORD 1: THE COMPLETED GCI - * - * - * A NEXT LOG RECORD log record continues as follows: - * - There is no more information needed. - * The next log record will always refer to the start of the next page. - * - * A NEXT MBYTE RECORD log record continues as follows: - * - There is no more information needed. - * The next mbyte will always refer to the start of the next mbyte. - */ - UintR logPageWord[8192]; // Size 32 kbytes - }; - typedef Ptr LogPageRecordPtr; - - struct PageRefRecord { - UintR pageRef[8]; - UintR prNext; - UintR prPrev; - Uint16 prFileNo; - Uint16 prPageNo; - }; // size 44 bytes - typedef Ptr PageRefRecordPtr; - - struct Tablerec { - enum TableStatus { - TABLE_DEFINED = 0, - NOT_DEFINED = 1, - ADD_TABLE_ONGOING = 2, - PREP_DROP_TABLE_ONGOING = 3, - PREP_DROP_TABLE_DONE = 4 - }; - - UintR fragrec[MAX_FRAG_PER_NODE]; - Uint16 fragid[MAX_FRAG_PER_NODE]; - /** - * Status of the table - */ - TableStatus tableStatus; - /** - * Table type and target table of index. - */ - Uint16 tableType; - Uint16 primaryTableId; - Uint32 schemaVersion; - Uint8 m_disk_table; - - Uint32 usageCount; - NdbNodeBitmask waitingTC; - NdbNodeBitmask waitingDIH; - }; // Size 100 bytes - typedef Ptr TablerecPtr; - - struct TcConnectionrec { - enum ListState { - NOT_IN_LIST = 0, - WAIT_QUEUE_LIST = 3 - }; - enum LogWriteState { - NOT_STARTED = 0, - NOT_WRITTEN = 1, - NOT_WRITTEN_WAIT = 2, - WRITTEN = 3 - }; - enum AbortState { - ABORT_IDLE = 0, - ABORT_ACTIVE = 1, - NEW_FROM_TC = 2, - REQ_FROM_TC = 3, - ABORT_FROM_TC = 4, - ABORT_FROM_LQH = 5 - }; - enum TransactionState { - IDLE = 0, - - /* -------------------------------------------------------------------- */ - // Transaction in progress states - /* -------------------------------------------------------------------- */ - WAIT_ACC = 1, - WAIT_TUPKEYINFO = 2, - WAIT_ATTR = 3, - WAIT_TUP = 4, - STOPPED = 5, - LOG_QUEUED = 6, - PREPARED = 7, - LOG_COMMIT_WRITTEN_WAIT_SIGNAL = 8, - LOG_COMMIT_QUEUED_WAIT_SIGNAL = 9, - - /* -------------------------------------------------------------------- */ - // Commit in progress states - /* -------------------------------------------------------------------- */ - COMMIT_STOPPED = 10, - LOG_COMMIT_QUEUED = 11, - COMMIT_QUEUED = 12, - COMMITTED = 13, - WAIT_TUP_COMMIT= 35, - - /* -------------------------------------------------------------------- */ - // Abort in progress states - /* -------------------------------------------------------------------- */ - WAIT_ACC_ABORT = 14, - ABORT_QUEUED = 15, - ABORT_STOPPED = 16, - WAIT_AI_AFTER_ABORT = 17, - LOG_ABORT_QUEUED = 18, - WAIT_TUP_TO_ABORT = 19, - - /* -------------------------------------------------------------------- */ - // Scan in progress states - /* -------------------------------------------------------------------- */ - WAIT_SCAN_AI = 20, - SCAN_STATE_USED = 21, - SCAN_FIRST_STOPPED = 22, - SCAN_CHECK_STOPPED = 23, - SCAN_STOPPED = 24, - SCAN_RELEASE_STOPPED = 25, - SCAN_CLOSE_STOPPED = 26, - COPY_CLOSE_STOPPED = 27, - COPY_FIRST_STOPPED = 28, - COPY_STOPPED = 29, - SCAN_TUPKEY = 30, - COPY_TUPKEY = 31, - - TC_NOT_CONNECTED = 32, - PREPARED_RECEIVED_COMMIT = 33, // Temporary state in write commit log - LOG_COMMIT_WRITTEN = 34 // Temporary state in write commit log - }; - enum ConnectState { - DISCONNECTED = 0, - CONNECTED = 1, - COPY_CONNECTED = 2, - LOG_CONNECTED = 3 - }; - ConnectState connectState; - UintR copyCountWords; - UintR firstAttrinfo[5]; - UintR tupkeyData[4]; - UintR transid[2]; - AbortState abortState; - UintR accConnectrec; - UintR applOprec; - UintR clientConnectrec; - UintR tcTimer; - UintR currReclenAi; - UintR currTupAiLen; - UintR firstAttrinbuf; - UintR firstTupkeybuf; - UintR fragmentid; - UintR fragmentptr; - UintR gci; - UintR hashValue; - UintR lastTupkeybuf; - UintR lastAttrinbuf; - /** - * Each operation (TcConnectrec) can be stored in max one out of many - * lists. - * This variable keeps track of which list it is in. - */ - ListState listState; - - UintR logStartFileNo; - LogWriteState logWriteState; - UintR nextHashRec; - UintR nextLogTcrec; - UintR nextTcLogQueue; - UintR nextTc; - UintR nextTcConnectrec; - UintR prevHashRec; - UintR prevLogTcrec; - UintR prevTc; - UintR readlenAi; - UintR reqRef; - UintR reqinfo; - UintR schemaVersion; - UintR storedProcId; - UintR simpleTcConnect; - UintR tableref; - UintR tcOprec; - UintR tcScanInfo; - UintR tcScanRec; - UintR totReclenAi; - UintR totSendlenAi; - UintR tupConnectrec; - UintR savePointId; - TransactionState transactionState; - BlockReference applRef; - BlockReference clientBlockref; - - BlockReference reqBlockref; - BlockReference tcBlockref; - BlockReference tcAccBlockref; - BlockReference tcTuxBlockref; - BlockReference tcTupBlockref; - Uint32 commitAckMarker; - union { - Uint32 m_scan_curr_range_no; - UintR noFiredTriggers; - }; - Uint16 errorCode; - Uint16 logStartPageIndex; - Uint16 logStartPageNo; - Uint16 logStopPageNo; - Uint16 nextReplica; - Uint16 primKeyLen; - Uint16 save1; - Uint16 nodeAfterNext[3]; - - Uint8 activeCreat; - Uint8 apiVersionNo; - Uint8 dirtyOp; - Uint8 indTakeOver; - Uint8 lastReplicaNo; - Uint8 lockType; - Uint8 nextSeqNoReplica; - Uint8 opSimple; - Uint8 opExec; - Uint8 operation; - Uint8 reclenAiLqhkey; - Uint8 m_offset_current_keybuf; - Uint8 replicaType; - Uint8 seqNoReplica; - Uint8 tcNodeFailrec; - Uint8 m_disk_table; - Uint8 m_use_rowid; - Uint8 m_dealloc; - Uint32 m_log_part_ptr_i; - Local_key m_row_id; - - struct { - Uint32 m_cnt; - Uint32 m_page_id[2]; - Local_key m_disk_ref[2]; - } m_nr_delete; - }; /* p2c: size = 280 bytes */ - - typedef Ptr TcConnectionrecPtr; - - struct TcNodeFailRecord { - enum TcFailStatus { - TC_STATE_TRUE = 0, - TC_STATE_FALSE = 1, - TC_STATE_BREAK = 2 - }; - UintR lastNewTcRef; - UintR newTcRef; - TcFailStatus tcFailStatus; - UintR tcRecNow; - BlockReference lastNewTcBlockref; - BlockReference newTcBlockref; - Uint16 oldNodeId; - }; // Size 28 bytes - typedef Ptr TcNodeFailRecordPtr; - - struct CommitLogRecord { - Uint32 startPageNo; - Uint32 startPageIndex; - Uint32 stopPageNo; - Uint32 fileNo; - }; - -public: - Dblqh(Block_context& ctx); - virtual ~Dblqh(); - - void receive_keyinfo(Signal*, Uint32 * data, Uint32 len); - void receive_attrinfo(Signal*, Uint32 * data, Uint32 len); - -private: - BLOCK_DEFINES(Dblqh); - - void execPACKED_SIGNAL(Signal* signal); - void execDEBUG_SIG(Signal* signal); - void execATTRINFO(Signal* signal); - void execKEYINFO(Signal* signal); - void execLQHKEYREQ(Signal* signal); - void execLQHKEYREF(Signal* signal); - void execCOMMIT(Signal* signal); - void execCOMPLETE(Signal* signal); - void execLQHKEYCONF(Signal* signal); - void execTESTSIG(Signal* signal); - void execLQH_RESTART_OP(Signal* signal); - void execCONTINUEB(Signal* signal); - void execSTART_RECREQ(Signal* signal); - void execSTART_RECCONF(Signal* signal); - void execEXEC_FRAGREQ(Signal* signal); - void execEXEC_FRAGCONF(Signal* signal); - void execEXEC_FRAGREF(Signal* signal); - void execSTART_EXEC_SR(Signal* signal); - void execEXEC_SRREQ(Signal* signal); - void execEXEC_SRCONF(Signal* signal); - void execREAD_PSEUDO_REQ(Signal* signal); - - void execDUMP_STATE_ORD(Signal* signal); - void execACC_ABORTCONF(Signal* signal); - void execNODE_FAILREP(Signal* signal); - void execCHECK_LCP_STOP(Signal* signal); - void execSEND_PACKED(Signal* signal); - void execTUP_ATTRINFO(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execLQHFRAGREQ(Signal* signal); - void execLQHADDATTREQ(Signal* signal); - void execTUP_ADD_ATTCONF(Signal* signal); - void execTUP_ADD_ATTRREF(Signal* signal); - void execACCFRAGCONF(Signal* signal); - void execACCFRAGREF(Signal* signal); - void execTUPFRAGCONF(Signal* signal); - void execTUPFRAGREF(Signal* signal); - void execTAB_COMMITREQ(Signal* signal); - void execACCSEIZECONF(Signal* signal); - void execACCSEIZEREF(Signal* signal); - void execREAD_NODESCONF(Signal* signal); - void execREAD_NODESREF(Signal* signal); - void execSTTOR(Signal* signal); - void execNDB_STTOR(Signal* signal); - void execTUPSEIZECONF(Signal* signal); - void execTUPSEIZEREF(Signal* signal); - void execACCKEYCONF(Signal* signal); - void execACCKEYREF(Signal* signal); - void execTUPKEYCONF(Signal* signal); - void execTUPKEYREF(Signal* signal); - void execABORT(Signal* signal); - void execABORTREQ(Signal* signal); - void execCOMMITREQ(Signal* signal); - void execCOMPLETEREQ(Signal* signal); - void execMEMCHECKREQ(Signal* signal); - void execSCAN_FRAGREQ(Signal* signal); - void execSCAN_NEXTREQ(Signal* signal); - void execACC_SCANCONF(Signal* signal); - void execACC_SCANREF(Signal* signal); - void execNEXT_SCANCONF(Signal* signal); - void execNEXT_SCANREF(Signal* signal); - void execACC_TO_REF(Signal* signal); - void execSTORED_PROCCONF(Signal* signal); - void execSTORED_PROCREF(Signal* signal); - void execCOPY_FRAGREQ(Signal* signal); - void execPREPARE_COPY_FRAG_REQ(Signal* signal); - void execUPDATE_FRAG_DIST_KEY_ORD(Signal*); - void execCOPY_ACTIVEREQ(Signal* signal); - void execCOPY_STATEREQ(Signal* signal); - void execLQH_TRANSREQ(Signal* signal); - void execTRANSID_AI(Signal* signal); - void execINCL_NODEREQ(Signal* signal); - - void execLCP_FRAG_ORD(Signal* signal); - void execEMPTY_LCP_REQ(Signal* signal); - - void execSTART_FRAGREQ(Signal* signal); - void execSTART_RECREF(Signal* signal); - - void execGCP_SAVEREQ(Signal* signal); - void execFSOPENREF(Signal* signal); - void execFSOPENCONF(Signal* signal); - void execFSCLOSECONF(Signal* signal); - void execFSWRITECONF(Signal* signal); - void execFSWRITEREF(Signal* signal); - void execFSREADCONF(Signal* signal); - void execFSREADREF(Signal* signal); - void execSCAN_HBREP(Signal* signal); - void execTIME_SIGNAL(Signal* signal); - void execFSSYNCCONF(Signal* signal); - - void execALTER_TAB_REQ(Signal* signal); - void execALTER_TAB_CONF(Signal* signal); - - void execCREATE_TRIG_CONF(Signal* signal); - void execCREATE_TRIG_REF(Signal* signal); - void execCREATE_TRIG_REQ(Signal* signal); - - void execDROP_TRIG_CONF(Signal* signal); - void execDROP_TRIG_REF(Signal* signal); - void execDROP_TRIG_REQ(Signal* signal); - - void execPREP_DROP_TAB_REQ(Signal* signal); - void execWAIT_DROP_TAB_REQ(Signal* signal); - void execDROP_TAB_REQ(Signal* signal); - - void execLQH_ALLOCREQ(Signal* signal); - void execTUP_DEALLOCREQ(Signal* signal); - void execLQH_WRITELOG_REQ(Signal* signal); - - void execTUXFRAGCONF(Signal* signal); - void execTUXFRAGREF(Signal* signal); - void execTUX_ADD_ATTRCONF(Signal* signal); - void execTUX_ADD_ATTRREF(Signal* signal); - - // Statement blocks - - void init_acc_ptr_list(ScanRecord*); - bool seize_acc_ptr_list(ScanRecord*, Uint32); - void release_acc_ptr_list(ScanRecord*); - Uint32 get_acc_ptr_from_scan_record(ScanRecord*, Uint32, bool); - void set_acc_ptr_in_scan_record(ScanRecord*, Uint32, Uint32); - void i_get_acc_ptr(ScanRecord*, Uint32*&, Uint32); - - void removeTable(Uint32 tableId); - void sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId); - void sendEMPTY_LCP_CONF(Signal* signal, bool idle); - void sendLCP_FRAGIDREQ(Signal* signal); - void sendLCP_FRAG_REP(Signal * signal, const LcpRecord::FragOrd &) const; - - void updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId); - void LQHKEY_abort(Signal* signal, int errortype); - void LQHKEY_error(Signal* signal, int errortype); - void nextRecordCopy(Signal* signal); - Uint32 calculateHash(Uint32 tableId, const Uint32* src); - void continueAfterCheckLcpStopBlocked(Signal* signal); - void checkLcpStopBlockedLab(Signal* signal); - void sendCommittedTc(Signal* signal, BlockReference atcBlockref); - void sendCompletedTc(Signal* signal, BlockReference atcBlockref); - void sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref); - void sendCommitLqh(Signal* signal, BlockReference alqhBlockref); - void sendCompleteLqh(Signal* signal, BlockReference alqhBlockref); - void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr); - void sendPackedSignalTc(Signal* signal, HostRecord * ahostptr); - void cleanUp(Signal* signal); - void sendAttrinfoLoop(Signal* signal); - void sendAttrinfoSignal(Signal* signal); - void sendLqhAttrinfoSignal(Signal* signal); - void sendKeyinfoAcc(Signal* signal, Uint32 pos); - Uint32 initScanrec(const class ScanFragReq *); - void initScanTc(const class ScanFragReq *, - Uint32 transid1, - Uint32 transid2, - Uint32 fragId, - Uint32 nodeId); - void finishScanrec(Signal* signal); - void releaseScanrec(Signal* signal); - void seizeScanrec(Signal* signal); - Uint32 sendKeyinfo20(Signal* signal, ScanRecord *, TcConnectionrec *); - void sendTCKEYREF(Signal*, Uint32 dst, Uint32 route, Uint32 cnt); - void sendScanFragConf(Signal* signal, Uint32 scanCompleted); - void initCopyrec(Signal* signal); - void initCopyTc(Signal* signal, Operation_t); - void sendCopyActiveConf(Signal* signal,Uint32 tableId); - void checkLcpCompleted(Signal* signal); - void checkLcpHoldop(Signal* signal); - bool checkLcpStarted(Signal* signal); - void checkLcpTupprep(Signal* signal); - void getNextFragForLcp(Signal* signal); - void sendAccContOp(Signal* signal); - void sendStartLcp(Signal* signal); - void setLogTail(Signal* signal, Uint32 keepGci); - Uint32 remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr, - const LogPartRecordPtr &sltLogPartPtr); - void checkGcpCompleted(Signal* signal, Uint32 pageWritten, Uint32 wordWritten); - void initFsopenconf(Signal* signal); - void initFsrwconf(Signal* signal, bool write); - void initLfo(Signal* signal); - void initLogfile(Signal* signal, Uint32 fileNo); - void initLogpage(Signal* signal); - void openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr); - void openLogfileInit(Signal* signal); - void openNextLogfile(Signal* signal); - void releaseLfo(Signal* signal); - void releaseLfoPages(Signal* signal); - void releaseLogpage(Signal* signal); - void seizeLfo(Signal* signal); - void seizeLogfile(Signal* signal); - void seizeLogpage(Signal* signal); - void writeFileDescriptor(Signal* signal); - void writeFileHeaderOpen(Signal* signal, Uint32 type); - void writeInitMbyte(Signal* signal); - void writeSinglePage(Signal* signal, Uint32 pageNo, - Uint32 wordWritten, Uint32 place); - void buildLinkedLogPageList(Signal* signal); - void changeMbyte(Signal* signal); - Uint32 checkIfExecLog(Signal* signal); - void checkNewMbyte(Signal* signal); - void checkReadExecSr(Signal* signal); - void checkScanTcCompleted(Signal* signal); - void closeFile(Signal* signal, LogFileRecordPtr logFilePtr, Uint32 place); - void completedLogPage(Signal* signal, Uint32 clpType, Uint32 place); - void deleteFragrec(Uint32 fragId); - void deleteTransidHash(Signal* signal); - void findLogfile(Signal* signal, - Uint32 fileNo, - LogPartRecordPtr flfLogPartPtr, - LogFileRecordPtr* parLogFilePtr); - void findPageRef(Signal* signal, CommitLogRecord* commitLogRecord); - int findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec); - void getFirstInLogQueue(Signal* signal); - bool getFragmentrec(Signal* signal, Uint32 fragId); - void initialiseAddfragrec(Signal* signal); - void initialiseAttrbuf(Signal* signal); - void initialiseDatabuf(Signal* signal); - void initialiseFragrec(Signal* signal); - void initialiseGcprec(Signal* signal); - void initialiseLcpRec(Signal* signal); - void initialiseLfo(Signal* signal); - void initialiseLogFile(Signal* signal); - void initialiseLogPage(Signal* signal); - void initialiseLogPart(Signal* signal); - void initialisePageRef(Signal* signal); - void initialiseScanrec(Signal* signal); - void initialiseTabrec(Signal* signal); - void initialiseTcrec(Signal* signal); - void initialiseTcNodeFailRec(Signal* signal); - void initFragrec(Signal* signal, - Uint32 tableId, - Uint32 fragId, - Uint32 copyType); - void initFragrecSr(Signal* signal); - void initGciInLogFileRec(Signal* signal, Uint32 noFdDesc); - void initLcpSr(Signal* signal, - Uint32 lcpNo, - Uint32 lcpId, - Uint32 tableId, - Uint32 fragId, - Uint32 fragPtr); - void initLogpart(Signal* signal); - void initLogPointers(Signal* signal); - void initReqinfoExecSr(Signal* signal); - bool insertFragrec(Signal* signal, Uint32 fragId); - void linkFragQueue(Signal* signal); - void linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr); - void logNextStart(Signal* signal); - void moveToPageRef(Signal* signal); - void readAttrinfo(Signal* signal); - void readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord); - void readExecLog(Signal* signal); - void readExecSrNewMbyte(Signal* signal); - void readExecSr(Signal* signal); - void readKey(Signal* signal); - void readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr); - void readLogHeader(Signal* signal); - Uint32 readLogword(Signal* signal); - Uint32 readLogwordExec(Signal* signal); - void readSinglePage(Signal* signal, Uint32 pageNo); - void releaseActiveCopy(Signal* signal); - void releaseAddfragrec(Signal* signal); - void releaseFragrec(); - void releaseOprec(Signal* signal); - void releasePageRef(Signal* signal); - void releaseMmPages(Signal* signal); - void releasePrPages(Signal* signal); - void releaseTcrec(Signal* signal, TcConnectionrecPtr tcConnectptr); - void releaseTcrecLog(Signal* signal, TcConnectionrecPtr tcConnectptr); - void releaseWaitQueue(Signal* signal); - void removeLogTcrec(Signal* signal); - void removePageRef(Signal* signal); - Uint32 returnExecLog(Signal* signal); - int saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length); - void seizeAddfragrec(Signal* signal); - void seizeAttrinbuf(Signal* signal); - Uint32 seize_attrinbuf(); - Uint32 release_attrinbuf(Uint32); - Uint32 copy_bounds(Uint32 * dst, TcConnectionrec*); - - void seizeFragmentrec(Signal* signal); - void seizePageRef(Signal* signal); - void seizeTcrec(); - void seizeTupkeybuf(Signal* signal); - void sendAborted(Signal* signal); - void sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus); - void sendTupkey(Signal* signal); - void startExecSr(Signal* signal); - void startNextExecSr(Signal* signal); - void startTimeSupervision(Signal* signal); - void stepAhead(Signal* signal, Uint32 stepAheadWords); - void systemError(Signal* signal, int line); - void writeAbortLog(Signal* signal); - void writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr); - void writeCompletedGciLog(Signal* signal); - void writeDbgInfoPageHeader(LogPageRecordPtr logPagePtr, Uint32 place, - Uint32 pageNo, Uint32 wordWritten); - void writeDirty(Signal* signal, Uint32 place); - void writeKey(Signal* signal); - void writeLogHeader(Signal* signal); - void writeLogWord(Signal* signal, Uint32 data); - void writeNextLog(Signal* signal); - void errorReport(Signal* signal, int place); - void warningReport(Signal* signal, int place); - void invalidateLogAfterLastGCI(Signal *signal); - void readFileInInvalidate(Signal *signal, bool stepNext); - void exitFromInvalidate(Signal* signal); - Uint32 calcPageCheckSum(LogPageRecordPtr logP); - Uint32 handleLongTupKey(Signal* signal, Uint32* dataPtr, Uint32 len); - - // Generated statement blocks - void systemErrorLab(Signal* signal, int line); - void initFourth(Signal* signal); - void packLqhkeyreqLab(Signal* signal); - void sendNdbSttorryLab(Signal* signal); - void execSrCompletedLab(Signal* signal); - void execLogRecord(Signal* signal); - void srPhase3Comp(Signal* signal); - void srLogLimits(Signal* signal); - void srGciLimits(Signal* signal); - void srPhase3Start(Signal* signal); - void checkStartCompletedLab(Signal* signal); - void continueAbortLab(Signal* signal); - void abortContinueAfterBlockedLab(Signal* signal, bool canBlock); - void abortCommonLab(Signal* signal); - void localCommitLab(Signal* signal); - void abortErrorLab(Signal* signal); - void continueAfterReceivingAllAiLab(Signal* signal); - void abortStateHandlerLab(Signal* signal); - void writeAttrinfoLab(Signal* signal); - void scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length); - void abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode); - void localAbortStateHandlerLab(Signal* signal); - void logLqhkeyreqLab(Signal* signal); - void lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length); - void rwConcludedAiLab(Signal* signal); - void aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length); - void takeOverErrorLab(Signal* signal); - void endgettupkeyLab(Signal* signal); - void noFreeRecordLab(Signal* signal, - const class LqhKeyReq * lqhKeyReq, - Uint32 errorCode); - void logLqhkeyrefLab(Signal* signal); - void closeCopyLab(Signal* signal); - void commitReplyLab(Signal* signal); - void completeUnusualLab(Signal* signal); - void completeTransNotLastLab(Signal* signal); - void completedLab(Signal* signal); - void copyCompletedLab(Signal* signal); - void completeLcpRoundLab(Signal* signal, Uint32 lcpId); - void continueAfterLogAbortWriteLab(Signal* signal); - void sendAttrinfoLab(Signal* signal); - void sendExecConf(Signal* signal); - void execSr(Signal* signal); - void srFourthComp(Signal* signal); - void timeSup(Signal* signal); - void closeCopyRequestLab(Signal* signal); - void closeScanRequestLab(Signal* signal); - void scanTcConnectLab(Signal* signal, Uint32 startTcCon, Uint32 fragId); - void initGcpRecLab(Signal* signal); - void prepareContinueAfterBlockedLab(Signal* signal); - void commitContinueAfterBlockedLab(Signal* signal); - void continueCopyAfterBlockedLab(Signal* signal); - void continueFirstCopyAfterBlockedLab(Signal* signal); - void continueFirstScanAfterBlockedLab(Signal* signal); - void continueScanAfterBlockedLab(Signal* signal); - void continueScanReleaseAfterBlockedLab(Signal* signal); - void continueCloseScanAfterBlockedLab(Signal* signal); - void continueCloseCopyAfterBlockedLab(Signal* signal); - void sendExecFragRefLab(Signal* signal); - void fragrefLab(Signal* signal, BlockReference retRef, - Uint32 retPtr, Uint32 errorCode); - void abortAddFragOps(Signal* signal); - void rwConcludedLab(Signal* signal); - void sendsttorryLab(Signal* signal); - void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32); - void startphase2Lab(Signal* signal, Uint32 config); - void startphase3Lab(Signal* signal); - void startphase4Lab(Signal* signal); - void startphase6Lab(Signal* signal); - void moreconnectionsLab(Signal* signal); - void scanReleaseLocksLab(Signal* signal); - void closeScanLab(Signal* signal); - void nextScanConfLoopLab(Signal* signal); - void scanNextLoopLab(Signal* signal); - void commitReqLab(Signal* signal, Uint32 gci); - void completeTransLastLab(Signal* signal); - void tupScanCloseConfLab(Signal* signal); - void tupCopyCloseConfLab(Signal* signal); - void accScanCloseConfLab(Signal* signal); - void accCopyCloseConfLab(Signal* signal); - void nextScanConfScanLab(Signal* signal); - void nextScanConfCopyLab(Signal* signal); - void continueScanNextReqLab(Signal* signal); - void keyinfoLab(const Uint32 * src, const Uint32 * end); - void copySendTupkeyReqLab(Signal* signal); - void storedProcConfScanLab(Signal* signal); - void storedProcConfCopyLab(Signal* signal); - void copyStateFinishedLab(Signal* signal); - void lcpCompletedLab(Signal* signal); - void lcpStartedLab(Signal* signal); - void contChkpNextFragLab(Signal* signal); - void startLcpRoundLab(Signal* signal); - void startFragRefLab(Signal* signal); - void srCompletedLab(Signal* signal); - void openFileInitLab(Signal* signal); - void openSrFrontpageLab(Signal* signal); - void openSrLastFileLab(Signal* signal); - void openSrNextFileLab(Signal* signal); - void openExecSrStartLab(Signal* signal); - void openExecSrNewMbyteLab(Signal* signal); - void openSrFourthPhaseLab(Signal* signal); - void openSrFourthZeroSkipInitLab(Signal* signal); - void openSrFourthZeroLab(Signal* signal); - void openExecLogLab(Signal* signal); - void checkInitCompletedLab(Signal* signal); - void closingSrLab(Signal* signal); - void closeExecSrLab(Signal* signal); - void execLogComp(Signal* signal); - void closeWriteLogLab(Signal* signal); - void closeExecLogLab(Signal* signal); - void writePageZeroLab(Signal* signal); - void lastWriteInFileLab(Signal* signal); - void initWriteEndLab(Signal* signal); - void initFirstPageLab(Signal* signal); - void writeGciZeroLab(Signal* signal); - void writeDirtyLab(Signal* signal); - void writeInitMbyteLab(Signal* signal); - void writeLogfileLab(Signal* signal); - void firstPageWriteLab(Signal* signal); - void readSrLastMbyteLab(Signal* signal); - void readSrLastFileLab(Signal* signal); - void readSrNextFileLab(Signal* signal); - void readExecSrLab(Signal* signal); - void readExecLogLab(Signal* signal); - void readSrFourthPhaseLab(Signal* signal); - void readSrFourthZeroLab(Signal* signal); - void copyLqhKeyRefLab(Signal* signal); - void restartOperationsLab(Signal* signal); - void lqhTransNextLab(Signal* signal); - void restartOperationsAfterStopLab(Signal* signal); - void startphase1Lab(Signal* signal, Uint32 config, Uint32 nodeId); - void tupkeyConfLab(Signal* signal); - void copyTupkeyConfLab(Signal* signal); - void scanTupkeyConfLab(Signal* signal); - void scanTupkeyRefLab(Signal* signal); - void accScanConfScanLab(Signal* signal); - void accScanConfCopyLab(Signal* signal); - void scanLockReleasedLab(Signal* signal); - void openSrFourthNextLab(Signal* signal); - void closingInitLab(Signal* signal); - void closeExecSrCompletedLab(Signal* signal); - void readSrFrontpageLab(Signal* signal); - - void sendAddFragReq(Signal* signal); - void sendAddAttrReq(Signal* signal); - void checkDropTab(Signal*); - Uint32 checkDropTabState(Tablerec::TableStatus, Uint32) const; - - // Initialisation - void initData(); - void initRecords(); - - void define_backup(Signal*); - void execDEFINE_BACKUP_REF(Signal*); - void execDEFINE_BACKUP_CONF(Signal*); - void execBACKUP_FRAGMENT_REF(Signal* signal); - void execBACKUP_FRAGMENT_CONF(Signal* signal); - void execLCP_PREPARE_REF(Signal* signal); - void execLCP_PREPARE_CONF(Signal* signal); - void execEND_LCPREF(Signal* signal); - void execEND_LCPCONF(Signal* signal); - Uint32 m_backup_ptr; - - void send_restore_lcp(Signal * signal); - void execRESTORE_LCP_REF(Signal* signal); - void execRESTORE_LCP_CONF(Signal* signal); - - Dbtup* c_tup; - Dbacc* c_acc; - - /** - * Read primary key from tup - */ - Uint32 readPrimaryKeys(ScanRecord*, TcConnectionrec*, Uint32 * dst); - - /** - * Read primary key from operation - */ -public: - Uint32 readPrimaryKeys(Uint32 opPtrI, Uint32 * dst, bool xfrm); -private: - - void acckeyconf_tupkeyreq(Signal*, TcConnectionrec*, Fragrecord*, Uint32, Uint32); - void acckeyconf_load_diskpage(Signal*,TcConnectionrecPtr,Fragrecord*,Uint32); - - void handle_nr_copy(Signal*, Ptr); - void exec_acckeyreq(Signal*, Ptr); - int compare_key(const TcConnectionrec*, const Uint32 * ptr, Uint32 len); - void nr_copy_delete_row(Signal*, Ptr, Local_key*, Uint32); -public: - struct Nr_op_info - { - Uint32 m_ptr_i; - Uint32 m_tup_frag_ptr_i; - Uint32 m_gci; - Uint32 m_page_id; - Local_key m_disk_ref; - }; - void get_nr_op_info(Nr_op_info*, Uint32 page_id = RNIL); - void nr_delete_complete(Signal*, Nr_op_info*); - -public: - void acckeyconf_load_diskpage_callback(Signal*, Uint32, Uint32); - -private: - void next_scanconf_load_diskpage(Signal* signal, - ScanRecordPtr scanPtr, - Ptr regTcPtr, - Fragrecord* fragPtrP); - - void next_scanconf_tupkeyreq(Signal* signal, ScanRecordPtr, - TcConnectionrec * regTcPtr, - Fragrecord* fragPtrP, - Uint32 disk_page); - -public: - void next_scanconf_load_diskpage_callback(Signal* signal, Uint32, Uint32); - - void tupcommit_conf_callback(Signal* signal, Uint32 tcPtrI); -private: - void tupcommit_conf(Signal* signal, TcConnectionrec *,Fragrecord *); - -// ---------------------------------------------------------------- -// These are variables handling the records. For most records one -// pointer to the array of structs, one pointer-struct, a file size -// and a first free record variable. The pointer struct are temporary -// variables that are kept on the class object since there are often a -// great deal of those variables that exist simultaneously and -// thus no perfect solution of handling them is currently available. -// ---------------------------------------------------------------- -/* ------------------------------------------------------------------------- */ -/* POSITIONS WITHIN THE ATTRINBUF AND THE MAX SIZE OF DATA WITHIN AN */ -/* ATTRINBUF. */ -/* ------------------------------------------------------------------------- */ - - -#define ZADDFRAGREC_FILE_SIZE 1 - AddFragRecord *addFragRecord; - AddFragRecordPtr addfragptr; - UintR cfirstfreeAddfragrec; - UintR caddfragrecFileSize; - -#define ZATTRINBUF_FILE_SIZE 12288 // 1.5 MByte -#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */ -#define ZINBUF_NEXT 25 /* POSITION OF 'NEXT'-VARIABLE. */ - Attrbuf *attrbuf; - AttrbufPtr attrinbufptr; - UintR cfirstfreeAttrinbuf; - UintR cattrinbufFileSize; - Uint32 c_no_attrinbuf_recs; - -#define ZDATABUF_FILE_SIZE 10000 // 200 kByte - Databuf *databuf; - DatabufPtr databufptr; - UintR cfirstfreeDatabuf; - UintR cdatabufFileSize; - -// Configurable - FragrecordPtr fragptr; - ArrayPool c_fragment_pool; - -#define ZGCPREC_FILE_SIZE 1 - GcpRecord *gcpRecord; - GcpRecordPtr gcpPtr; - UintR cgcprecFileSize; - -// MAX_NDB_NODES is the size of this array - HostRecord *hostRecord; - UintR chostFileSize; - -#define ZNO_CONCURRENT_LCP 1 - LcpRecord *lcpRecord; - LcpRecordPtr lcpPtr; - UintR cfirstfreeLcpLoc; - UintR clcpFileSize; - -#define ZLOG_PART_FILE_SIZE 4 - LogPartRecord *logPartRecord; - LogPartRecordPtr logPartPtr; - UintR clogPartFileSize; - Uint32 clogFileSize; // In MBYTE - Uint32 cmaxLogFilesInPageZero; // - -// Configurable - LogFileRecord *logFileRecord; - LogFileRecordPtr logFilePtr; - UintR cfirstfreeLogFile; - UintR clogFileFileSize; - -#define ZLFO_MIN_FILE_SIZE 256 -// RedoBuffer/32K minimum ZLFO_MIN_FILE_SIZE - LogFileOperationRecord *logFileOperationRecord; - LogFileOperationRecordPtr lfoPtr; - UintR cfirstfreeLfo; - UintR clfoFileSize; - - LogPageRecord *logPageRecord; - void *logPageRecordUnaligned; - LogPageRecordPtr logPagePtr; - UintR cfirstfreeLogPage; - UintR clogPageFileSize; - -#define ZPAGE_REF_FILE_SIZE 20 - PageRefRecord *pageRefRecord; - PageRefRecordPtr pageRefPtr; - UintR cfirstfreePageRef; - UintR cpageRefFileSize; - -// Configurable - ArrayPool c_scanRecordPool; - ScanRecordPtr scanptr; - UintR cscanNoFreeRec; - Uint32 cscanrecFileSize; - -// Configurable - Tablerec *tablerec; - TablerecPtr tabptr; - UintR ctabrecFileSize; - -// Configurable - TcConnectionrec *tcConnectionrec; - TcConnectionrecPtr tcConnectptr; - UintR cfirstfreeTcConrec; - UintR ctcConnectrecFileSize; - -// MAX_NDB_NODES is the size of this array - TcNodeFailRecord *tcNodeFailRecord; - TcNodeFailRecordPtr tcNodeFailptr; - UintR ctcNodeFailrecFileSize; - - Uint16 terrorCode; - - Uint32 c_firstInNodeGroup; - -// ------------------------------------------------------------------------ -// These variables are used to store block state which do not need arrays -// of struct's. -// ------------------------------------------------------------------------ - Uint32 c_lcpId; - Uint32 cnoOfFragsCheckpointed; - -/* ------------------------------------------------------------------------- */ -// cmaxWordsAtNodeRec keeps track of how many words that currently are -// outstanding in a node recovery situation. -// cbookedAccOps keeps track of how many operation records that have been -// booked in ACC for the scan processes. -// cmaxAccOps contains the maximum number of operation records which can be -// allocated for scan purposes in ACC. -/* ------------------------------------------------------------------------- */ - UintR cmaxWordsAtNodeRec; - UintR cbookedAccOps; - UintR cmaxAccOps; -/* ------------------------------------------------------------------------- */ -/*THIS STATE VARIABLE IS ZTRUE IF AN ADD NODE IS ONGOING. ADD NODE MEANS */ -/*THAT CONNECTIONS ARE SET-UP TO THE NEW NODE. */ -/* ------------------------------------------------------------------------- */ - Uint8 caddNodeState; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE SPECIFIES WHICH TYPE OF RESTART THAT IS ONGOING */ -/* ------------------------------------------------------------------------- */ - Uint16 cstartType; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE INDICATES WHETHER AN INITIAL RESTART IS ONGOING OR NOT. */ -/* ------------------------------------------------------------------------- */ - Uint8 cinitialStartOngoing; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE COMPLETED EXECUTING */ -/*THEIR UNDO LOG. */ -/* ------------------------------------------------------------------------- */ - ExecUndoLogState csrExecUndoLogState; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE KEEPS TRACK OF WHEN TUP AND ACC HAVE CONFIRMED COMPLETION */ -/*OF A LOCAL CHECKPOINT ROUND. */ -/* ------------------------------------------------------------------------- */ - LcpCloseState clcpCompletedState; -/* ------------------------------------------------------------------------- */ -/*DURING CONNECTION PROCESSES IN SYSTEM RESTART THESE VARIABLES KEEP TRACK */ -/*OF HOW MANY CONNECTIONS AND RELEASES THAT ARE TO BE PERFORMED. */ -/* ------------------------------------------------------------------------- */ -/***************************************************************************>*/ -/*THESE VARIABLES CONTAIN INFORMATION USED DURING SYSTEM RESTART. */ -/***************************************************************************>*/ -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE IS ZTRUE IF THE SIGNAL START_REC_REQ HAVE BEEN RECEIVED. */ -/*RECEPTION OF THIS SIGNAL INDICATES THAT ALL FRAGMENTS THAT THIS NODE */ -/*SHOULD START HAVE BEEN RECEIVED. */ -/* ------------------------------------------------------------------------- */ - Uint8 cstartRecReq; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE KEEPS TRACK OF HOW MANY FRAGMENTS THAT PARTICIPATE IN */ -/*EXECUTING THE LOG. IF ZERO WE DON'T NEED TO EXECUTE THE LOG AT ALL. */ -/* ------------------------------------------------------------------------- */ - Uint32 cnoFragmentsExecSr; - - /** - * This is no of sent GSN_EXEC_FRAGREQ during this log phase - */ - Uint32 cnoOutstandingExecFragReq; - -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE KEEPS TRACK OF WHICH OF THE FIRST TWO RESTART PHASES THAT */ -/*HAVE COMPLETED. */ -/* ------------------------------------------------------------------------- */ - Uint8 csrPhaseStarted; -/* ------------------------------------------------------------------------- */ -/*NUMBER OF PHASES COMPLETED OF EXECUTING THE FRAGMENT LOG. */ -/* ------------------------------------------------------------------------- */ - Uint8 csrPhasesCompleted; -/* ------------------------------------------------------------------------- */ -/*THE BLOCK REFERENCE OF THE MASTER DIH DURING SYSTEM RESTART. */ -/* ------------------------------------------------------------------------- */ - BlockReference cmasterDihBlockref; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE IS THE HEAD OF A LINKED LIST OF FRAGMENTS WAITING TO BE */ -/*RESTORED FROM DISK. */ -/* ------------------------------------------------------------------------- */ - DLFifoList c_lcp_waiting_fragments; // StartFragReq'ed - DLFifoList c_lcp_restoring_fragments; // Restoring as we speek - DLFifoList c_lcp_complete_fragments; // Restored - -/* ------------------------------------------------------------------------- */ -/*USED DURING SYSTEM RESTART, INDICATES THE OLDEST GCI THAT CAN BE RESTARTED */ -/*FROM AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG TAIL. */ -/* ------------------------------------------------------------------------- */ - UintR crestartOldestGci; -/* ------------------------------------------------------------------------- */ -/*USED DURING SYSTEM RESTART, INDICATES THE NEWEST GCI THAT CAN BE RESTARTED */ -/*AFTER THIS SYSTEM RESTART. USED TO FIND THE LOG HEAD. */ -/* ------------------------------------------------------------------------- */ - UintR crestartNewestGci; -/* ------------------------------------------------------------------------- */ -/*THE NUMBER OF LOG FILES. SET AS A PARAMETER WHEN NDB IS STARTED. */ -/* ------------------------------------------------------------------------- */ - UintR cnoLogFiles; -/* ------------------------------------------------------------------------- */ -/*THESE TWO VARIABLES CONTAIN THE NEWEST GCI RECEIVED IN THE BLOCK AND THE */ -/*NEWEST COMPLETED GCI IN THE BLOCK. */ -/* ------------------------------------------------------------------------- */ - UintR cnewestGci; - UintR cnewestCompletedGci; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE ONLY PASSES INFORMATION FROM STTOR TO STTORRY = TEMPORARY */ -/* ------------------------------------------------------------------------- */ - Uint16 csignalKey; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE CONTAINS THE CURRENT START PHASE IN THE BLOCK. IS ZNIL IF */ -/*NO SYSTEM RESTART IS ONGOING. */ -/* ------------------------------------------------------------------------- */ - Uint16 cstartPhase; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE CONTAIN THE CURRENT GLOBAL CHECKPOINT RECORD. IT'S RNIL IF */ -/*NOT A GCP SAVE IS ONGOING. */ -/* ------------------------------------------------------------------------- */ - UintR ccurrentGcprec; -/* ------------------------------------------------------------------------- */ -/*THESE VARIABLES ARE USED TO KEEP TRACK OF ALL ACTIVE COPY FRAGMENTS IN LQH.*/ -/* ------------------------------------------------------------------------- */ - Uint8 cnoActiveCopy; - UintR cactiveCopy[4]; - -/* ------------------------------------------------------------------------- */ -/*THESE VARIABLES CONTAIN THE BLOCK REFERENCES OF THE OTHER NDB BLOCKS. */ -/*ALSO THE BLOCK REFERENCE OF MY OWN BLOCK = LQH */ -/* ------------------------------------------------------------------------- */ - BlockReference caccBlockref; - BlockReference ctupBlockref; - BlockReference ctuxBlockref; - BlockReference cownref; - UintR cLqhTimeOutCount; - UintR cLqhTimeOutCheckCount; - UintR cnoOfLogPages; -/* ------------------------------------------------------------------------- */ -/*THIS VARIABLE CONTAINS MY OWN PROCESSOR ID. */ -/* ------------------------------------------------------------------------- */ - NodeId cownNodeid; - -/* ------------------------------------------------------------------------- */ -/*THESE VARIABLES CONTAIN INFORMATION ABOUT THE OTHER NODES IN THE SYSTEM */ -/*THESE VARIABLES ARE MOSTLY USED AT SYSTEM RESTART AND ADD NODE TO SET-UP */ -/*AND RELEASE CONNECTIONS TO OTHER NODES IN THE CLUSTER. */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/*THIS ARRAY CONTAINS THE PROCESSOR ID'S OF THE NODES THAT ARE ALIVE. */ -/*CNO_OF_NODES SPECIFIES HOW MANY NODES THAT ARE CURRENTLY ALIVE. */ -/*CNODE_VERSION SPECIFIES THE NDB VERSION EXECUTING ON THE NODE. */ -/* ------------------------------------------------------------------------- */ - UintR cpackedListIndex; - Uint16 cpackedList[MAX_NDB_NODES]; - UintR cnodeData[MAX_NDB_NODES]; - UintR cnodeStatus[MAX_NDB_NODES]; - UintR cnoOfNodes; - - NdbNodeBitmask m_sr_nodes; - NdbNodeBitmask m_sr_exec_sr_req; - NdbNodeBitmask m_sr_exec_sr_conf; - -/* ------------------------------------------------------------------------- */ -/* THIS VARIABLE CONTAINS THE DIRECTORY OF A HASH TABLE OF ALL ACTIVE */ -/* OPERATION IN THE BLOCK. IT IS USED TO BE ABLE TO QUICKLY ABORT AN */ -/* OPERATION WHERE THE CONNECTION WAS LOST DUE TO NODE FAILURES. IT IS */ -/* ACTUALLY USED FOR ALL ABORTS COMMANDED BY TC. */ -/* ------------------------------------------------------------------------- */ - UintR preComputedRequestInfoMask; - UintR ctransidHash[1024]; - - Uint32 c_diskless; - Uint32 c_o_direct; - Uint32 c_error_insert_table_id; - -public: - bool is_same_trans(Uint32 opId, Uint32 trid1, Uint32 trid2); - void get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci); - void accminupdate(Signal*, Uint32 opPtrI, const Local_key*); - - /** - * - */ - struct CommitAckMarker { - CommitAckMarker() {} - Uint32 transid1; - Uint32 transid2; - - Uint32 apiRef; // Api block ref - Uint32 apiOprec; // Connection Object in NDB API - Uint32 tcNodeId; - union { Uint32 nextPool; Uint32 nextHash; }; - Uint32 prevHash; - - inline bool equal(const CommitAckMarker & p) const { - return ((p.transid1 == transid1) && (p.transid2 == transid2)); - } - - inline Uint32 hashValue() const { - return transid1; - } - }; - - typedef Ptr CommitAckMarkerPtr; - ArrayPool m_commitAckMarkerPool; - DLHashTable m_commitAckMarkerHash; - typedef DLHashTable::Iterator CommitAckMarkerIterator; - void execREMOVE_MARKER_ORD(Signal* signal); - void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i); - - struct Counters { - Counters() {} - Uint32 operations; - - inline void clear(){ - operations = 0; - } - }; - - Counters c_Counters; - - inline bool getAllowRead() const { - return getNodeState().startLevel < NodeState::SL_STOPPING_3; - } - - DLHashTable c_scanTakeOverHash; - - inline bool TRACE_OP_CHECK(const TcConnectionrec* regTcPtr); -#ifdef ERROR_INSERT - void TRACE_OP_DUMP(const TcConnectionrec* regTcPtr, const char * pos); -#endif -}; - -inline -bool -Dblqh::ScanRecord::check_scan_batch_completed() const -{ - Uint32 max_rows = m_max_batch_size_rows; - Uint32 max_bytes = m_max_batch_size_bytes; - - return (max_rows > 0 && (m_curr_batch_size_rows >= max_rows)) || - (max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes)); -} - -inline -void -Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index) -{ - if (index == 0) { - acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0]; - } else { - Uint32 attr_buf_index, attr_buf_rec; - - AttrbufPtr regAttrPtr; - jam(); - attr_buf_rec= (index + 31) / 32; - attr_buf_index= (index - 1) & 31; - regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec]; - ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf); - acc_ptr= (Uint32*)®AttrPtr.p->attrbuf[attr_buf_index]; - } -} - -inline -bool -Dblqh::is_same_trans(Uint32 opId, Uint32 trid1, Uint32 trid2) -{ - TcConnectionrecPtr regTcPtr; - regTcPtr.i= opId; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - return ((regTcPtr.p->transid[0] == trid1) && - (regTcPtr.p->transid[1] == trid2)); -} - -inline -void -Dblqh::get_op_info(Uint32 opId, Uint32 *hash, Uint32* gci) -{ - TcConnectionrecPtr regTcPtr; - regTcPtr.i= opId; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - *hash= regTcPtr.p->hashValue; - *gci= regTcPtr.p->gci; -} - -#include "../dbacc/Dbacc.hpp" - -inline -void -Dblqh::accminupdate(Signal* signal, Uint32 opId, const Local_key* key) -{ - TcConnectionrecPtr regTcPtr; - regTcPtr.i= opId; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - signal->theData[0] = regTcPtr.p->accConnectrec; - signal->theData[1] = key->m_page_no << MAX_TUPLES_BITS | key->m_page_idx; - c_acc->execACCMINUPDATE(signal); - - if (ERROR_INSERTED(5712) || ERROR_INSERTED(5713)) - ndbout << " LK: " << *key; - regTcPtr.p->m_row_id = *key; -} - -inline -bool -Dblqh::TRACE_OP_CHECK(const TcConnectionrec* regTcPtr) -{ - return (ERROR_INSERTED(5712) && - (regTcPtr->operation == ZINSERT || - regTcPtr->operation == ZDELETE)) || - ERROR_INSERTED(5713); -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp deleted file mode 100644 index 79b3c6ce8d8..00000000000 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp +++ /dev/null @@ -1,415 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#include -#define DBLQH_C -#include "Dblqh.hpp" -#include - -#define DEBUG(x) { ndbout << "LQH::" << x << endl; } - -void Dblqh::initData() -{ - caddfragrecFileSize = ZADDFRAGREC_FILE_SIZE; - cattrinbufFileSize = ZATTRINBUF_FILE_SIZE; - c_no_attrinbuf_recs= ZATTRINBUF_FILE_SIZE; - cdatabufFileSize = ZDATABUF_FILE_SIZE; - cgcprecFileSize = ZGCPREC_FILE_SIZE; - chostFileSize = MAX_NDB_NODES; - clcpFileSize = ZNO_CONCURRENT_LCP; - clfoFileSize = 0; - clogFileFileSize = 0; - clogPartFileSize = ZLOG_PART_FILE_SIZE; - cpageRefFileSize = ZPAGE_REF_FILE_SIZE; - cscanrecFileSize = 0; - ctabrecFileSize = 0; - ctcConnectrecFileSize = 0; - ctcNodeFailrecFileSize = MAX_NDB_NODES; - - addFragRecord = 0; - attrbuf = 0; - databuf = 0; - gcpRecord = 0; - hostRecord = 0; - lcpRecord = 0; - logPartRecord = 0; - logFileRecord = 0; - logFileOperationRecord = 0; - logPageRecord = 0; - logPageRecordUnaligned= 0; - pageRefRecord = 0; - tablerec = 0; - tcConnectionrec = 0; - tcNodeFailRecord = 0; - - // Records with constant sizes - - cLqhTimeOutCount = 0; - cLqhTimeOutCheckCount = 0; - cbookedAccOps = 0; - m_backup_ptr = RNIL; - clogFileSize = 16; - cmaxLogFilesInPageZero = 40; -}//Dblqh::initData() - -void Dblqh::initRecords() -{ - // Records with dynamic sizes - addFragRecord = (AddFragRecord*)allocRecord("AddFragRecord", - sizeof(AddFragRecord), - caddfragrecFileSize); - attrbuf = (Attrbuf*)allocRecord("Attrbuf", - sizeof(Attrbuf), - cattrinbufFileSize); - - databuf = (Databuf*)allocRecord("Databuf", - sizeof(Databuf), - cdatabufFileSize); - - gcpRecord = (GcpRecord*)allocRecord("GcpRecord", - sizeof(GcpRecord), - cgcprecFileSize); - - hostRecord = (HostRecord*)allocRecord("HostRecord", - sizeof(HostRecord), - chostFileSize); - - lcpRecord = (LcpRecord*)allocRecord("LcpRecord", - sizeof(LcpRecord), - clcpFileSize); - - for(Uint32 i = 0; i -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// Use DEBUG to print messages that should be -// seen only when we debug the product -#ifdef VM_TRACE -#define DEBUG(x) ndbout << "DBLQH: "<< x << endl; -static -NdbOut & -operator<<(NdbOut& out, Dblqh::TcConnectionrec::TransactionState state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Dblqh::TcConnectionrec::LogWriteState state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Dblqh::TcConnectionrec::ListState state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Dblqh::TcConnectionrec::AbortState state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Dblqh::ScanRecord::ScanState state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Dblqh::LogFileOperationRecord::LfoState state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){ - out << (int)state; - return out; -} - -static -NdbOut & -operator<<(NdbOut& out, Operation_t op) -{ - switch(op){ - case ZREAD: out << "READ"; break; - case ZREAD_EX: out << "READ-EX"; break; - case ZINSERT: out << "INSERT"; break; - case ZUPDATE: out << "UPDATE"; break; - case ZDELETE: out << "DELETE"; break; - case ZWRITE: out << "WRITE"; break; - } - return out; -} - -#else -#define DEBUG(x) -#endif - -//#define MARKER_TRACE 1 -//#define TRACE_SCAN_TAKEOVER 1 - -#ifndef DEBUG_REDO -#define DEBUG_REDO 0 -#endif - -const Uint32 NR_ScanNo = 0; - -#if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR -#include -static NdbOut * tracenrout = 0; -static int TRACENR_FLAG = 0; -#define TRACENR(x) (* tracenrout) << x -#define SET_TRACENR_FLAG TRACENR_FLAG = 1 -#define CLEAR_TRACENR_FLAG TRACENR_FLAG = 0 -#else -#define TRACENR_FLAG 0 -#define TRACENR(x) -#define SET_TRACENR_FLAG -#define CLEAR_TRACENR_FLAG -#endif - -#ifdef ERROR_INSERT -static NdbOut * traceopout = 0; -#define TRACE_OP(regTcPtr, place) do { if (TRACE_OP_CHECK(regTcPtr)) TRACE_OP_DUMP(regTcPtr, place); } while(0) -#else -#define TRACE_OP(x, y) {} -#endif - -/* ------------------------------------------------------------------------- */ -/* ------- SEND SYSTEM ERROR ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::systemError(Signal* signal, int line) -{ - signal->theData[0] = 2304; - execDUMP_STATE_ORD(signal); - progError(line, NDBD_EXIT_NDBREQUIRE); -}//Dblqh::systemError() - -/* *************** */ -/* ACCSEIZEREF > */ -/* *************** */ -void Dblqh::execACCSEIZEREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dblqh::execACCSEIZEREF() - -/* ******************************************************>> */ -/* THIS SIGNAL IS USED TO HANDLE REAL-TIME */ -/* BREAKS THAT ARE NECESSARY TO ENSURE REAL-TIME */ -/* OPERATION OF LQH. */ -/* This signal is also used for signal loops, for example */ -/* the timeout handling for writing logs every second. */ -/* ******************************************************>> */ -void Dblqh::execCONTINUEB(Signal* signal) -{ - jamEntry(); - Uint32 tcase = signal->theData[0]; - Uint32 data0 = signal->theData[1]; - Uint32 data1 = signal->theData[2]; - Uint32 data2 = signal->theData[3]; -#if 0 - if (tcase == RNIL) { - tcConnectptr.i = data0; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - ndbout << "State = " << tcConnectptr.p->transactionState; - ndbout << " seqNoReplica = " << tcConnectptr.p->seqNoReplica; - ndbout << " tcNodeFailrec = " << tcConnectptr.p->tcNodeFailrec; - ndbout << " activeCreat = " << tcConnectptr.p->activeCreat; - ndbout << endl; - ndbout << "tupkeyData0 = " << tcConnectptr.p->tupkeyData[0]; - ndbout << "tupkeyData1 = " << tcConnectptr.p->tupkeyData[1]; - ndbout << "tupkeyData2 = " << tcConnectptr.p->tupkeyData[2]; - ndbout << "tupkeyData3 = " << tcConnectptr.p->tupkeyData[3]; - ndbout << endl; - ndbout << "abortState = " << tcConnectptr.p->abortState; - ndbout << "listState = " << tcConnectptr.p->listState; - ndbout << endl; - return; - }//if -#endif - switch (tcase) { - case ZLOG_LQHKEYREQ: - if (cnoOfLogPages == 0) { - jam(); - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2); - return; - }//if - logPartPtr.i = data0; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - - tcConnectptr.i = logPartPtr.p->firstLogQueue; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - logPartPtr.p->LogLqhKeyReqSent = ZFALSE; - getFirstInLogQueue(signal); - - switch (tcConnectptr.p->transactionState) { - case TcConnectionrec::LOG_QUEUED: - if (tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE) { - jam(); - logNextStart(signal); - abortCommonLab(signal); - return; - } else { - jam(); -/*------------------------------------------------------------*/ -/* WE MUST SET THE STATE OF THE LOG PART TO IDLE TO */ -/* ENSURE THAT WE ARE NOT QUEUED AGAIN ON THE LOG PART */ -/* WE WILL SET THE LOG PART STATE TO ACTIVE IMMEDIATELY */ -/* SO NO OTHER PROCESS WILL SEE THIS STATE. IT IS MERELY*/ -/* USED TO ENABLE REUSE OF CODE. */ -/*------------------------------------------------------------*/ - if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) { - jam(); - logPartPtr.p->logPartState = LogPartRecord::IDLE; - }//if - logLqhkeyreqLab(signal); - return; - }//if - break; - case TcConnectionrec::LOG_ABORT_QUEUED: - jam(); - writeAbortLog(signal); - removeLogTcrec(signal); - logNextStart(signal); - continueAfterLogAbortWriteLab(signal); - return; - break; - case TcConnectionrec::LOG_COMMIT_QUEUED: - case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL: - jam(); - writeCommitLog(signal, logPartPtr); - logNextStart(signal); - if (tcConnectptr.p->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) { - if (tcConnectptr.p->seqNoReplica == 0 || - tcConnectptr.p->activeCreat == Fragrecord::AC_NR_COPY) { - jam(); - localCommitLab(signal); - } else { - jam(); - commitReplyLab(signal); - }//if - return; - } else { - jam(); - tcConnectptr.p->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL; - return; - }//if - break; - case TcConnectionrec::COMMIT_QUEUED: - jam(); - logNextStart(signal); - localCommitLab(signal); - break; - case TcConnectionrec::ABORT_QUEUED: - jam(); - logNextStart(signal); - abortCommonLab(signal); - break; - default: - ndbrequire(false); - break; - }//switch - return; - break; - case ZSR_GCI_LIMITS: - jam(); - signal->theData[0] = data0; - srGciLimits(signal); - return; - break; - case ZSR_LOG_LIMITS: - jam(); - signal->theData[0] = data0; - signal->theData[1] = data1; - signal->theData[2] = data2; - srLogLimits(signal); - return; - break; - case ZSEND_EXEC_CONF: - jam(); - signal->theData[0] = data0; - sendExecConf(signal); - return; - break; - case ZEXEC_SR: - jam(); - signal->theData[0] = data0; - execSr(signal); - return; - break; - case ZSR_FOURTH_COMP: - jam(); - signal->theData[0] = data0; - srFourthComp(signal); - return; - break; - case ZINIT_FOURTH: - jam(); - signal->theData[0] = data0; - initFourth(signal); - return; - break; - case ZTIME_SUPERVISION: - jam(); - signal->theData[0] = data0; - timeSup(signal); - return; - break; - case ZSR_PHASE3_START: - jam(); - srPhase3Start(signal); - return; - break; - case ZLQH_TRANS_NEXT: - jam(); - tcNodeFailptr.i = data0; - ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord); - lqhTransNextLab(signal); - return; - break; - case ZSCAN_TC_CONNECT: - jam(); - tabptr.i = data1; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - scanTcConnectLab(signal, data0, data2); - return; - break; - case ZINITIALISE_RECORDS: - jam(); - initialiseRecordsLab(signal, data0, data2, signal->theData[4]); - return; - break; - case ZINIT_GCP_REC: - jam(); - gcpPtr.i = 0; - ptrAss(gcpPtr, gcpRecord); - initGcpRecLab(signal); - return; - break; - case ZCHECK_LCP_STOP_BLOCKED: - jam(); - c_scanRecordPool.getPtr(scanptr, data0); - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - checkLcpStopBlockedLab(signal); - return; - case ZSCAN_MARKERS: - jam(); - scanMarkers(signal, data0, data1, data2); - return; - break; - - case ZOPERATION_EVENT_REP: - jam(); - /* --------------------------------------------------------------------- */ - // Report information about transaction activity once per second. - /* --------------------------------------------------------------------- */ - if (signal->theData[1] == 0) { - signal->theData[0] = NDB_LE_OperationReportCounters; - signal->theData[1] = c_Counters.operations; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - }//if - c_Counters.clear(); - signal->theData[0] = ZOPERATION_EVENT_REP; - signal->theData[1] = 0; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 2); - break; - case ZPREP_DROP_TABLE: - jam(); - checkDropTab(signal); - return; - break; - case ZENABLE_EXPAND_CHECK: - { - jam(); - fragptr.i = signal->theData[1]; - if (fragptr.i != RNIL) - { - jam(); - c_lcp_complete_fragments.getPtr(fragptr); - signal->theData[0] = fragptr.p->tabRef; - signal->theData[1] = fragptr.p->fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - Ptr save = fragptr; - - c_lcp_complete_fragments.next(fragptr); - signal->theData[0] = ZENABLE_EXPAND_CHECK; - signal->theData[1] = fragptr.i; - sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); - - c_lcp_complete_fragments.remove(save); - return; - } - else - { - jam(); - cstartRecReq = 2; - ndbrequire(c_lcp_complete_fragments.isEmpty()); - StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); - conf->startingNodeId = getOwnNodeId(); - sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, - StartRecConf::SignalLength, JBB); - return; - } - } - case ZRETRY_TCKEYREF: - { - jam(); - Uint32 cnt = signal->theData[1]; - Uint32 ref = signal->theData[2]; - if (cnt < (10 * 60 * 5)) - { - jam(); - /** - * Only retry for 5 minutes...then hope that API has handled it..somehow - */ - memmove(signal->theData, signal->theData+3, 4*TcKeyRef::SignalLength); - sendTCKEYREF(signal, ref, 0, cnt); - } - return; - } - default: - ndbrequire(false); - break; - }//switch -}//Dblqh::execCONTINUEB() - -/* *********************************************************> */ -/* Request from DBDIH to include a new node in the node list */ -/* and so forth. */ -/* *********************************************************> */ -void Dblqh::execINCL_NODEREQ(Signal* signal) -{ - jamEntry(); - BlockReference retRef = signal->theData[0]; - Uint32 nodeId = signal->theData[1]; - cnewestGci = signal->theData[2]; - cnewestCompletedGci = signal->theData[2] - 1; - ndbrequire(cnoOfNodes < MAX_NDB_NODES); - for (Uint32 i = 0; i < cnoOfNodes; i++) { - jam(); - if (cnodeData[i] == nodeId) { - jam(); - cnodeStatus[i] = ZNODE_UP; - }//if - }//for - signal->theData[0] = nodeId; - signal->theData[1] = cownref; - sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB); - return; -}//Dblqh::execINCL_NODEREQ() - -void Dblqh::execTUPSEIZEREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dblqh::execTUPSEIZEREF() - -/* ########################################################################## */ -/* ####### START / RESTART MODULE ####### */ -/* ########################################################################## */ -/* ************************************************************************>> */ -/* This is first signal that arrives in a start / restart. Sender is NDBCNTR_REF. */ -/* ************************************************************************>> */ -void Dblqh::execSTTOR(Signal* signal) -{ - UintR tstartPhase; - - jamEntry(); - /* START CASE */ - tstartPhase = signal->theData[1]; - /* SYSTEM RESTART RANK */ - csignalKey = signal->theData[6]; -#if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR - char *name; - FILE *out = 0; -#endif - switch (tstartPhase) { - case ZSTART_PHASE1: - jam(); - cstartPhase = tstartPhase; - c_tup = (Dbtup*)globalData.getBlock(DBTUP); - c_acc = (Dbacc*)globalData.getBlock(DBACC); - ndbrequire(c_tup != 0 && c_acc != 0); - sendsttorryLab(signal); - -#if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR -#ifdef VM_TRACE - out = globalSignalLoggers.getOutputStream(); -#endif - if (out == 0) { - name = NdbConfig_SignalLogFileName(getOwnNodeId()); - out = fopen(name, "a"); - } - tracenrout = new NdbOut(* new FileOutputStream(out)); -#endif - -#ifdef ERROR_INSERT - traceopout = &ndbout; -#endif - - return; - break; - case 4: - jam(); - define_backup(signal); - break; - default: - jam(); - /*empty*/; - sendsttorryLab(signal); - return; - break; - }//switch -}//Dblqh::execSTTOR() - -void -Dblqh::define_backup(Signal* signal) -{ - DefineBackupReq * req = (DefineBackupReq*)signal->getDataPtrSend(); - req->backupId = 0; - req->clientRef = 0; - req->clientData = 0; - req->senderRef = reference(); - req->backupPtr = 0; - req->backupKey[0] = 0; - req->backupKey[1] = 0; - req->nodes.clear(); - req->nodes.set(getOwnNodeId()); - req->backupDataLen = ~0; - - sendSignal(BACKUP_REF, GSN_DEFINE_BACKUP_REQ, signal, - DefineBackupReq::SignalLength, JBB); -} - -void -Dblqh::execDEFINE_BACKUP_REF(Signal* signal) -{ - jamEntry(); - m_backup_ptr = RNIL; - DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); - int err_code = 0; - char * extra_msg = NULL; - - switch(ref->errorCode){ - case DefineBackupRef::Undefined: - case DefineBackupRef::FailedToSetupFsBuffers: - case DefineBackupRef::FailedToAllocateBuffers: - case DefineBackupRef::FailedToAllocateTables: - case DefineBackupRef::FailedAllocateTableMem: - case DefineBackupRef::FailedToAllocateFileRecord: - case DefineBackupRef::FailedToAllocateAttributeRecord: - case DefineBackupRef::FailedInsertFileHeader: - case DefineBackupRef::FailedInsertTableList: - jam(); - err_code = NDBD_EXIT_INVALID_CONFIG; - extra_msg = (char*) "Probably Backup parameters configuration error, Please consult the manual"; - progError(__LINE__, err_code, extra_msg); - } - - sendsttorryLab(signal); -} - -void -Dblqh::execDEFINE_BACKUP_CONF(Signal* signal) -{ - jamEntry(); - DefineBackupConf * conf = (DefineBackupConf*)signal->getDataPtrSend(); - m_backup_ptr = conf->backupPtr; - sendsttorryLab(signal); -} - -/* ***************************************> */ -/* Restart phases 1 - 6, sender is Ndbcntr */ -/* ***************************************> */ -void Dblqh::execNDB_STTOR(Signal* signal) -{ - jamEntry(); - Uint32 ownNodeId = signal->theData[1]; /* START PHASE*/ - cstartPhase = signal->theData[2]; /* MY NODE ID */ - cstartType = signal->theData[3]; /* START TYPE */ - - switch (cstartPhase) { - case ZSTART_PHASE1: - jam(); - preComputedRequestInfoMask = 0; - LqhKeyReq::setKeyLen(preComputedRequestInfoMask, RI_KEYLEN_MASK); - LqhKeyReq::setLastReplicaNo(preComputedRequestInfoMask, RI_LAST_REPL_MASK); - // Dont LqhKeyReq::setApplicationAddressFlag - LqhKeyReq::setDirtyFlag(preComputedRequestInfoMask, 1); - // Dont LqhKeyReq::setInterpretedFlag - LqhKeyReq::setSimpleFlag(preComputedRequestInfoMask, 1); - LqhKeyReq::setOperation(preComputedRequestInfoMask, RI_OPERATION_MASK); - LqhKeyReq::setGCIFlag(preComputedRequestInfoMask, 1); - LqhKeyReq::setNrCopyFlag(preComputedRequestInfoMask, 1); - // Dont setAIInLqhKeyReq - // Dont setSeqNoReplica - // Dont setSameClientAndTcFlag - // Dont setReturnedReadLenAIFlag - // Dont setAPIVersion - LqhKeyReq::setMarkerFlag(preComputedRequestInfoMask, 1); - //preComputedRequestInfoMask = 0x003d7fff; - startphase1Lab(signal, /* dummy */ ~0, ownNodeId); - - signal->theData[0] = ZOPERATION_EVENT_REP; - signal->theData[1] = 1; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2); - return; - break; - case ZSTART_PHASE2: - jam(); - startphase2Lab(signal, /* dummy */ ~0); - return; - break; - case ZSTART_PHASE3: - jam(); - startphase3Lab(signal); - return; - break; - case ZSTART_PHASE4: - jam(); - startphase4Lab(signal); - return; - break; - case ZSTART_PHASE6: - jam(); - startphase6Lab(signal); - return; - break; - default: - jam(); - /*empty*/; - sendNdbSttorryLab(signal); - return; - break; - }//switch -}//Dblqh::execNDB_STTOR() - -/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -/* +++++++ START PHASE 2 +++++++ */ -/* */ -/* INITIATE ALL RECORDS WITHIN THE BLOCK */ -/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -void Dblqh::startphase1Lab(Signal* signal, Uint32 _dummy, Uint32 ownNodeId) -{ - UintR Ti; - HostRecordPtr ThostPtr; - -/* ------- INITIATE ALL RECORDS ------- */ - cownNodeid = ownNodeId; - caccBlockref = calcAccBlockRef (cownNodeid); - ctupBlockref = calcTupBlockRef (cownNodeid); - ctuxBlockref = calcTuxBlockRef (cownNodeid); - cownref = calcLqhBlockRef (cownNodeid); - for (Ti = 0; Ti < chostFileSize; Ti++) { - ThostPtr.i = Ti; - ptrCheckGuard(ThostPtr, chostFileSize, hostRecord); - ThostPtr.p->hostLqhBlockRef = calcLqhBlockRef(ThostPtr.i); - ThostPtr.p->hostTcBlockRef = calcTcBlockRef(ThostPtr.i); - ThostPtr.p->inPackedList = false; - ThostPtr.p->noOfPackedWordsLqh = 0; - ThostPtr.p->noOfPackedWordsTc = 0; - }//for - cpackedListIndex = 0; - sendNdbSttorryLab(signal); - return; -}//Dblqh::startphase1Lab() - -/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -/* +++++++ START PHASE 2 +++++++ */ -/* */ -/* CONNECT LQH WITH ACC AND TUP. */ -/* EVERY CONNECTION RECORD IN LQH IS ASSIGNED TO ONE ACC CONNECTION RECORD */ -/* AND ONE TUP CONNECTION RECORD. */ -/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -void Dblqh::startphase2Lab(Signal* signal, Uint32 _dummy) -{ - cmaxWordsAtNodeRec = MAX_NO_WORDS_OUTSTANDING_COPY_FRAGMENT; -/* -- ACC AND TUP CONNECTION PROCESS -- */ - tcConnectptr.i = 0; - ptrAss(tcConnectptr, tcConnectionrec); - moreconnectionsLab(signal); - return; -}//Dblqh::startphase2Lab() - -void Dblqh::moreconnectionsLab(Signal* signal) -{ - tcConnectptr.p->tcAccBlockref = caccBlockref; - // set TUX block here (no operation is seized in TUX) - tcConnectptr.p->tcTuxBlockref = ctuxBlockref; -/* NO STATE CHECKING IS PERFORMED, ASSUMED TO WORK */ -/* *************** */ -/* ACCSEIZEREQ < */ -/* *************** */ - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - sendSignal(caccBlockref, GSN_ACCSEIZEREQ, signal, 2, JBB); - return; -}//Dblqh::moreconnectionsLab() - -/* ***************> */ -/* ACCSEIZECONF > */ -/* ***************> */ -void Dblqh::execACCSEIZECONF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - tcConnectptr.p->accConnectrec = signal->theData[1]; -/* *************** */ -/* TUPSEIZEREQ < */ -/* *************** */ - tcConnectptr.p->tcTupBlockref = ctupBlockref; - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - sendSignal(ctupBlockref, GSN_TUPSEIZEREQ, signal, 2, JBB); - return; -}//Dblqh::execACCSEIZECONF() - -/* ***************> */ -/* TUPSEIZECONF > */ -/* ***************> */ -void Dblqh::execTUPSEIZECONF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - tcConnectptr.p->tupConnectrec = signal->theData[1]; -/* ------- CHECK IF THERE ARE MORE CONNECTIONS TO BE CONNECTED ------- */ - tcConnectptr.i = tcConnectptr.p->nextTcConnectrec; - if (tcConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - moreconnectionsLab(signal); - return; - }//if -/* ALL LQH_CONNECT RECORDS ARE CONNECTED TO ACC AND TUP ---- */ - sendNdbSttorryLab(signal); - return; -}//Dblqh::execTUPSEIZECONF() - -/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -/* +++++++ START PHASE 4 +++++++ */ -/* */ -/* CONNECT LQH WITH LQH. */ -/* CONNECT EACH LQH WITH EVERY LQH IN THE DATABASE SYSTEM. */ -/* IF INITIAL START THEN CREATE THE FRAGMENT LOG FILES */ -/*IF SYSTEM RESTART OR NODE RESTART THEN OPEN THE FRAGMENT LOG FILES AND */ -/*FIND THE END OF THE LOG FILES. */ -/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -/* WAIT UNTIL ADD NODE PROCESSES ARE COMPLETED */ -/* IF INITIAL START ALSO WAIT FOR LOG FILES TO INITIALISED */ -/*START TIME SUPERVISION OF LOG FILES. WE HAVE TO WRITE LOG PAGES TO DISK */ -/*EVEN IF THE PAGES ARE NOT FULL TO ENSURE THAT THEY COME TO DISK ASAP. */ -/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -void Dblqh::startphase3Lab(Signal* signal) -{ - LogFileRecordPtr prevLogFilePtr; - LogFileRecordPtr zeroLogFilePtr; - - caddNodeState = ZTRUE; -/* ***************<< */ -/* READ_NODESREQ < */ -/* ***************<< */ - cinitialStartOngoing = ZTRUE; - ndbrequire(cnoLogFiles != 0); - - for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - initLogpart(signal); - for (Uint32 fileNo = 0; fileNo < cnoLogFiles; fileNo++) { - seizeLogfile(signal); - if (fileNo != 0) { - jam(); - prevLogFilePtr.p->nextLogFile = logFilePtr.i; - logFilePtr.p->prevLogFile = prevLogFilePtr.i; - } else { - jam(); - logPartPtr.p->firstLogfile = logFilePtr.i; - logPartPtr.p->currentLogfile = logFilePtr.i; - zeroLogFilePtr.i = logFilePtr.i; - zeroLogFilePtr.p = logFilePtr.p; - }//if - prevLogFilePtr.i = logFilePtr.i; - prevLogFilePtr.p = logFilePtr.p; - initLogfile(signal, fileNo); - if ((cstartType == NodeState::ST_INITIAL_START) || - (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) { - if (logFilePtr.i == zeroLogFilePtr.i) { - jam(); -/* ------------------------------------------------------------------------- */ -/*IN AN INITIAL START WE START BY CREATING ALL LOG FILES AND SETTING THEIR */ -/*PROPER SIZE AND INITIALISING PAGE ZERO IN ALL FILES. */ -/*WE START BY CREATING FILE ZERO IN EACH LOG PART AND THEN PROCEED */ -/*SEQUENTIALLY THROUGH ALL LOG FILES IN THE LOG PART. */ -/* ------------------------------------------------------------------------- */ - openLogfileInit(signal); - }//if - }//if - }//for - zeroLogFilePtr.p->prevLogFile = logFilePtr.i; - logFilePtr.p->nextLogFile = zeroLogFilePtr.i; - }//for - if (cstartType != NodeState::ST_INITIAL_START && - cstartType != NodeState::ST_INITIAL_NODE_RESTART) { - jam(); - ndbrequire(cstartType == NodeState::ST_NODE_RESTART || - cstartType == NodeState::ST_SYSTEM_RESTART); - /** -------------------------------------------------------------------- - * THIS CODE KICKS OFF THE SYSTEM RESTART AND NODE RESTART. IT STARTS UP - * THE RESTART BY FINDING THE END OF THE LOG AND FROM THERE FINDING THE - * INFO ABOUT THE GLOBAL CHECKPOINTS IN THE FRAGMENT LOG. - --------------------------------------------------------------------- */ - for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) { - jam(); - LogFileRecordPtr locLogFilePtr; - ptrAss(logPartPtr, logPartRecord); - locLogFilePtr.i = logPartPtr.p->firstLogfile; - ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FRONTPAGE; - openFileRw(signal, locLogFilePtr); - }//for - }//if - - signal->theData[0] = cownref; - sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB); - return; -}//Dblqh::startphase3Lab() - -/* ****************** */ -/* READ_NODESCONF > */ -/* ****************** */ -void Dblqh::execREAD_NODESCONF(Signal* signal) -{ - jamEntry(); - - ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0]; - cnoOfNodes = readNodes->noOfNodes; - - unsigned ind = 0; - unsigned i = 0; - for (i = 1; i < MAX_NDB_NODES; i++) { - jam(); - if (NodeBitmask::get(readNodes->allNodes, i)) { - jam(); - cnodeData[ind] = i; - cnodeStatus[ind] = NodeBitmask::get(readNodes->inactiveNodes, i); - //readNodes->getVersionId(i, readNodes->theVersionIds) not used - if (!NodeBitmask::get(readNodes->inactiveNodes, i)) - { - jam(); - m_sr_nodes.set(i); - } - ind++; - }//if - }//for - ndbrequire(ind == cnoOfNodes); - ndbrequire(cnoOfNodes >= 1 && cnoOfNodes < MAX_NDB_NODES); - ndbrequire(!(cnoOfNodes == 1 && cstartType == NodeState::ST_NODE_RESTART)); - - caddNodeState = ZFALSE; - if (cstartType == NodeState::ST_SYSTEM_RESTART) - { - jam(); - sendNdbSttorryLab(signal); - return; - } - else if (cstartType == NodeState::ST_NODE_RESTART) - { - jam(); - SET_TRACENR_FLAG; - m_sr_nodes.clear(); - m_sr_nodes.set(getOwnNodeId()); - sendNdbSttorryLab(signal); - return; - } - SET_TRACENR_FLAG; - - checkStartCompletedLab(signal); - return; -}//Dblqh::execREAD_NODESCONF() - -void Dblqh::checkStartCompletedLab(Signal* signal) -{ - if (caddNodeState == ZFALSE) { - if (cinitialStartOngoing == ZFALSE) { - jam(); - sendNdbSttorryLab(signal); - return; - }//if - }//if - return; -}//Dblqh::checkStartCompletedLab() - -void Dblqh::startphase4Lab(Signal* signal) -{ - sendNdbSttorryLab(signal); - return; -}//Dblqh::startphase4Lab() - -/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -/* SET CONCURRENCY OF LOCAL CHECKPOINTS TO BE USED AFTER SYSTEM RESTART. */ -/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -void Dblqh::startphase6Lab(Signal* signal) -{ - cstartPhase = ZNIL; - cstartType = ZNIL; - CLEAR_TRACENR_FLAG; - sendNdbSttorryLab(signal); - return; -}//Dblqh::startphase6Lab() - -void Dblqh::sendNdbSttorryLab(Signal* signal) -{ - signal->theData[0] = cownref; - sendSignal(NDBCNTR_REF, GSN_NDB_STTORRY, signal, 1, JBB); - return; -}//Dblqh::sendNdbSttorryLab() - -void Dblqh::sendsttorryLab(Signal* signal) -{ -/* *********<< */ -/* STTORRY < */ -/* *********<< */ - signal->theData[0] = csignalKey; /* SIGNAL KEY */ - signal->theData[1] = 3; /* BLOCK CATEGORY */ - signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */ - signal->theData[3] = ZSTART_PHASE1; - signal->theData[4] = 4; - signal->theData[5] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB); - return; -}//Dblqh::sendsttorryLab() - -/* ***************>> */ -/* READ_NODESREF > */ -/* ***************>> */ -void Dblqh::execREAD_NODESREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dblqh::execREAD_NODESREF() - -/* *************** */ -/* SIZEALT_REP > */ -/* *************** */ -void Dblqh::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - jamEntry(); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - cnoLogFiles = 8; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_REDOLOG_FILES, - &cnoLogFiles)); - ndbrequire(cnoLogFiles > 0); - - Uint32 log_page_size= 0; - ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER, - &log_page_size); - - /** - * Always set page size in half MBytes - */ - clogPageFileSize= (log_page_size / sizeof(LogPageRecord)); - Uint32 mega_byte_part= clogPageFileSize & 15; - if (mega_byte_part != 0) { - jam(); - clogPageFileSize+= (16 - mega_byte_part); - } - - /* maximum number of log file operations */ - clfoFileSize = clogPageFileSize; - if (clfoFileSize < ZLFO_MIN_FILE_SIZE) - clfoFileSize = ZLFO_MIN_FILE_SIZE; - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TABLE, &ctabrecFileSize)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_TC_CONNECT, - &ctcConnectrecFileSize)); - clogFileFileSize = 4 * cnoLogFiles; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize)); - cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN; - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless)); - c_o_direct = true; - ndb_mgm_get_int_parameter(p, CFG_DB_O_DIRECT, &c_o_direct); - - Uint32 tmp= 0; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_FRAG, &tmp)); - c_fragment_pool.setSize(tmp); - - if (!ndb_mgm_get_int_parameter(p, CFG_DB_REDOLOG_FILE_SIZE, - &clogFileSize)) - { - // convert to mbyte - clogFileSize = (clogFileSize + 1024*1024 - 1) / (1024 * 1024); - ndbrequire(clogFileSize >= 4 && clogFileSize <= 1024); - } - - cmaxLogFilesInPageZero = (ZPAGE_SIZE - ZPAGE_HEADER_SIZE - 128) / - (ZFD_MBYTE_SIZE * clogFileSize); - - /** - * "Old" cmaxLogFilesInPageZero was 40 - * Each FD need 3 words per mb, require that they can fit into 1 page - * (atleast 1 FD) - * Is also checked in ConfigInfo.cpp (max FragmentLogFileSize = 1Gb) - * 1Gb = 1024Mb => 3(ZFD_MBYTE_SIZE) * 1024 < 8192 (ZPAGE_SIZE) - */ - if (cmaxLogFilesInPageZero > 40) - { - jam(); - cmaxLogFilesInPageZero = 40; - } - else - { - ndbrequire(cmaxLogFilesInPageZero); - } - - initRecords(); - initialiseRecordsLab(signal, 0, ref, senderData); - - return; -}//Dblqh::execSIZEALT_REP() - -/* ########################################################################## */ -/* ####### ADD/DELETE FRAGMENT MODULE ####### */ -/* THIS MODULE IS USED BY DICTIONARY TO CREATE NEW FRAGMENTS AND DELETE */ -/* OLD FRAGMENTS. */ -/* */ -/* ########################################################################## */ -/* -------------------------------------------------------------- */ -/* FRAG REQ */ -/* -------------------------------------------------------------- */ -/* *********************************************************> */ -/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */ -/* *********************************************************> */ - -// this unbelievable mess could be replaced by one signal to LQH -// and execute direct to local DICT to get everything at once - -void Dblqh::execLQHFRAGREQ(Signal* signal) -{ - jamEntry(); - LqhFragReq * req = (LqhFragReq*)signal->getDataPtr(); - - Uint32 retPtr = req->senderData; - BlockReference retRef = req->senderRef; - Uint32 fragId = req->fragmentId; - Uint32 reqinfo = req->requestInfo; - tabptr.i = req->tableId; - Uint16 tlocalKeylen = req->localKeyLength; - Uint32 tmaxLoadFactor = req->maxLoadFactor; - Uint32 tminLoadFactor = req->minLoadFactor; - Uint8 tk = req->kValue; - Uint8 tlhstar = req->lh3DistrBits; - Uint8 tlh = req->lh3PageBits; - Uint32 tnoOfAttr = req->noOfAttributes; - Uint32 tnoOfNull = req->noOfNullAttributes; - Uint32 maxRowsLow = req->maxRowsLow; - Uint32 maxRowsHigh = req->maxRowsHigh; - Uint32 minRowsLow = req->minRowsLow; - Uint32 minRowsHigh = req->minRowsHigh; - Uint32 tschemaVersion = req->schemaVersion; - Uint32 ttupKeyLength = req->keyLength; - Uint32 noOfKeyAttr = req->noOfKeyAttr; - Uint32 noOfCharsets = req->noOfCharsets; - Uint32 checksumIndicator = req->checksumIndicator; - Uint32 gcpIndicator = req->GCPIndicator; - Uint32 startGci = req->startGci; - Uint32 tableType = req->tableType; - Uint32 primaryTableId = req->primaryTableId; - Uint32 tablespace= req->tablespace_id; - Uint32 logPart = req->logPartId; - Uint32 forceVarPartFlag = req->forceVarPartFlag; - - if (signal->getLength() < 20) - { - logPart = (fragId & 1) + 2 * (tabptr.i & 1); - } - logPart &= 3; - - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - bool tempTable = ((reqinfo & LqhFragReq::TemporaryTable) != 0); - - /* Temporary tables set to defined in system restart */ - if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED){ - tabptr.p->tableStatus = Tablerec::ADD_TABLE_ONGOING; - tabptr.p->tableType = tableType; - tabptr.p->primaryTableId = - (primaryTableId == RNIL ? tabptr.i : primaryTableId); - tabptr.p->schemaVersion = tschemaVersion; - tabptr.p->m_disk_table= 0; - }//if - - if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING){ - jam(); - fragrefLab(signal, retRef, retPtr, ZTAB_STATE_ERROR); - return; - }//if - //-------------------------------------------------------------------- - // We could arrive here if we create the fragment as part of a take - // over by a hot spare node. The table is then is already created - // and bit 31 is set, thus indicating that we are creating a fragment - // by copy creation. Also since the node has already been started we - // know that it is not a node restart ongoing. - //-------------------------------------------------------------------- - - if (getFragmentrec(signal, fragId)) { - jam(); - fragrefLab(signal, retRef, retPtr, terrorCode); - return; - }//if - if (!insertFragrec(signal, fragId)) { - jam(); - fragrefLab(signal, retRef, retPtr, terrorCode); - return; - }//if - Uint32 copyType = reqinfo & 3; - initFragrec(signal, tabptr.i, fragId, copyType); - fragptr.p->startGci = startGci; - fragptr.p->newestGci = startGci; - fragptr.p->tableType = tableType; - fragptr.p->m_log_part_ptr_i = logPart; // assumes array - - if (DictTabInfo::isOrderedIndex(tableType)) { - jam(); - // find corresponding primary table fragment - TablerecPtr tTablePtr; - tTablePtr.i = primaryTableId; - ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec); - FragrecordPtr tFragPtr; - tFragPtr.i = RNIL; - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - if (tTablePtr.p->fragid[i] == fragptr.p->fragId) { - jam(); - tFragPtr.i = tTablePtr.p->fragrec[i]; - break; - } - } - ndbrequire(tFragPtr.i != RNIL); - // store it - fragptr.p->tableFragptr = tFragPtr.i; - } else { - fragptr.p->tableFragptr = fragptr.i; - } - - if (tempTable) { -//-------------------------------------------- -// reqinfo bit 3-4 = 2 means temporary table -// without logging or checkpointing. -//-------------------------------------------- - jam(); - fragptr.p->logFlag = Fragrecord::STATE_FALSE; - fragptr.p->lcpFlag = Fragrecord::LCP_STATE_FALSE; - }//if - -//---------------------------------------------- -// For node restarts it is not necessarily zero -//---------------------------------------------- - if (cfirstfreeAddfragrec == RNIL) { - jam(); - deleteFragrec(fragId); - fragrefLab(signal, retRef, retPtr, ZNO_ADD_FRAGREC); - return; - }//if - seizeAddfragrec(signal); - addfragptr.p->addFragid = fragId; - addfragptr.p->fragmentPtr = fragptr.i; - addfragptr.p->dictBlockref = retRef; - addfragptr.p->dictConnectptr = retPtr; - addfragptr.p->m_senderAttrPtr = RNIL; - addfragptr.p->noOfAttr = tnoOfAttr; - addfragptr.p->noOfNull = tnoOfNull; - addfragptr.p->maxRowsLow = maxRowsLow; - addfragptr.p->maxRowsHigh = maxRowsHigh; - addfragptr.p->minRowsLow = minRowsLow; - addfragptr.p->minRowsHigh = minRowsHigh; - addfragptr.p->tabId = tabptr.i; - addfragptr.p->totalAttrReceived = 0; - addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */ - addfragptr.p->schemaVer = tschemaVersion; - Uint32 tmp = (reqinfo & LqhFragReq::CreateInRunning); - addfragptr.p->fragCopyCreation = (tmp == 0 ? 0 : 1); - addfragptr.p->addfragErrorCode = 0; - addfragptr.p->noOfKeyAttr = noOfKeyAttr; - addfragptr.p->noOfCharsets = noOfCharsets; - addfragptr.p->checksumIndicator = checksumIndicator; - addfragptr.p->GCPIndicator = gcpIndicator; - addfragptr.p->lh3DistrBits = tlhstar; - addfragptr.p->tableType = tableType; - addfragptr.p->primaryTableId = primaryTableId; - addfragptr.p->tablespace_id= tablespace; - addfragptr.p->forceVarPartFlag = forceVarPartFlag; - // - addfragptr.p->tupConnectptr = RNIL; - addfragptr.p->tuxConnectptr = RNIL; - - if (DictTabInfo::isTable(tableType) || - DictTabInfo::isHashIndex(tableType)) { - jam(); - AccFragReq* const accreq = (AccFragReq*)signal->getDataPtrSend(); - accreq->userPtr = addfragptr.i; - accreq->userRef = cownref; - accreq->tableId = tabptr.i; - accreq->reqInfo = copyType << 4; - accreq->fragId = fragId; - accreq->localKeyLen = tlocalKeylen; - accreq->maxLoadFactor = tmaxLoadFactor; - accreq->minLoadFactor = tminLoadFactor; - accreq->kValue = tk; - accreq->lhFragBits = tlhstar; - accreq->lhDirBits = tlh; - accreq->keyLength = ttupKeyLength; - /* --------------------------------------------------------------------- */ - /* Send ACCFRAGREQ, when confirmation is received send 2 * TUPFRAGREQ to */ - /* create 2 tuple fragments on this node. */ - /* --------------------------------------------------------------------- */ - addfragptr.p->addfragStatus = AddFragRecord::ACC_ADDFRAG; - sendSignal(fragptr.p->accBlockref, GSN_ACCFRAGREQ, - signal, AccFragReq::SignalLength, JBB); - return; - } - if (DictTabInfo::isOrderedIndex(tableType)) { - jam(); - addfragptr.p->addfragStatus = AddFragRecord::WAIT_TUP; - sendAddFragReq(signal); - return; - } - ndbrequire(false); -}//Dblqh::execLQHFRAGREQ() - -/* *************** */ -/* ACCFRAGCONF > */ -/* *************** */ -void Dblqh::execACCFRAGCONF(Signal* signal) -{ - jamEntry(); - addfragptr.i = signal->theData[0]; - Uint32 taccConnectptr = signal->theData[1]; - //Uint32 fragId1 = signal->theData[2]; - Uint32 accFragPtr1 = signal->theData[4]; - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG); - - addfragptr.p->accConnectptr = taccConnectptr; - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - fragptr.p->accFragptr = accFragPtr1; - - addfragptr.p->addfragStatus = AddFragRecord::WAIT_TUP; - sendAddFragReq(signal); -}//Dblqh::execACCFRAGCONF() - -/* *************** */ -/* TUPFRAGCONF > */ -/* *************** */ -void Dblqh::execTUPFRAGCONF(Signal* signal) -{ - jamEntry(); - addfragptr.i = signal->theData[0]; - Uint32 tupConnectptr = signal->theData[1]; - Uint32 tupFragPtr = signal->theData[2]; /* TUP FRAGMENT POINTER */ - //Uint32 localFragId = signal->theData[3]; /* LOCAL FRAGMENT ID */ - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - fragptr.p->tupFragptr = tupFragPtr; - switch (addfragptr.p->addfragStatus) { - case AddFragRecord::WAIT_TUP: - jam(); - fragptr.p->tupFragptr = tupFragPtr; - addfragptr.p->tupConnectptr = tupConnectptr; - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { - addfragptr.p->addfragStatus = AddFragRecord::WAIT_TUX; - sendAddFragReq(signal); - break; - } - goto done_with_frag; - break; - case AddFragRecord::WAIT_TUX: - jam(); - fragptr.p->tuxFragptr = tupFragPtr; - addfragptr.p->tuxConnectptr = tupConnectptr; - goto done_with_frag; - break; - done_with_frag: - /* ---------------------------------------------------------------- */ - /* Finished create of fragments. Now ready for creating attributes. */ - /* ---------------------------------------------------------------- */ - addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR; - { - LqhFragConf* conf = (LqhFragConf*)signal->getDataPtrSend(); - conf->senderData = addfragptr.p->dictConnectptr; - conf->lqhFragPtr = addfragptr.i; - sendSignal(addfragptr.p->dictBlockref, GSN_LQHFRAGCONF, - signal, LqhFragConf::SignalLength, JBB); - } - break; - default: - ndbrequire(false); - break; - } -}//Dblqh::execTUPFRAGCONF() - -/* *************** */ -/* TUXFRAGCONF > */ -/* *************** */ -void Dblqh::execTUXFRAGCONF(Signal* signal) -{ - jamEntry(); - execTUPFRAGCONF(signal); -}//Dblqh::execTUXFRAGCONF - -/* - * Add fragment in TUP or TUX. Called up to 4 times. - */ -void -Dblqh::sendAddFragReq(Signal* signal) -{ - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TUP){ - TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); - if (DictTabInfo::isTable(addfragptr.p->tableType) || - DictTabInfo::isHashIndex(addfragptr.p->tableType)) { - jam(); - tupFragReq->userPtr = addfragptr.i; - tupFragReq->userRef = cownref; - tupFragReq->reqInfo = 0; /* ADD TABLE */ - tupFragReq->tableId = addfragptr.p->tabId; - tupFragReq->noOfAttr = addfragptr.p->noOfAttr; - tupFragReq->fragId = addfragptr.p->addFragid; - tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; - tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; - tupFragReq->minRowsLow = addfragptr.p->minRowsLow; - tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; - tupFragReq->noOfNullAttr = addfragptr.p->noOfNull; - tupFragReq->schemaVersion = addfragptr.p->schemaVer; - tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr; - tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; - tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; - tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; - tupFragReq->tablespaceid = addfragptr.p->tablespace_id; - tupFragReq->forceVarPartFlag = addfragptr.p->forceVarPartFlag; - sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, - signal, TupFragReq::SignalLength, JBB); - return; - } - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { - jam(); - tupFragReq->userPtr = addfragptr.i; - tupFragReq->userRef = cownref; - tupFragReq->reqInfo = 0; /* ADD TABLE */ - tupFragReq->tableId = addfragptr.p->tabId; - tupFragReq->noOfAttr = 1; /* ordered index: one array attr */ - tupFragReq->fragId = addfragptr.p->addFragid; - tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; - tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; - tupFragReq->minRowsLow = addfragptr.p->minRowsLow; - tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; - tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */ - tupFragReq->schemaVersion = addfragptr.p->schemaVer; - tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */ - tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; - tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; - tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; - tupFragReq->forceVarPartFlag = addfragptr.p->forceVarPartFlag; - sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, - signal, TupFragReq::SignalLength, JBB); - return; - } - } - if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TUX) { - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { - jam(); - TuxFragReq* const tuxreq = (TuxFragReq*)signal->getDataPtrSend(); - tuxreq->userPtr = addfragptr.i; - tuxreq->userRef = cownref; - tuxreq->reqInfo = 0; /* ADD TABLE */ - tuxreq->tableId = addfragptr.p->tabId; - ndbrequire(addfragptr.p->noOfAttr >= 2); - tuxreq->noOfAttr = addfragptr.p->noOfAttr - 1; /* skip NDB$TNODE */ - tuxreq->fragId = addfragptr.p->addFragid; - tuxreq->fragOff = addfragptr.p->lh3DistrBits; - tuxreq->tableType = addfragptr.p->tableType; - tuxreq->primaryTableId = addfragptr.p->primaryTableId; - // pointer to index fragment in TUP - tuxreq->tupIndexFragPtrI = fragptr.p->tupFragptr; - // pointers to table fragments in TUP and ACC - FragrecordPtr tFragPtr; - tFragPtr.i = fragptr.p->tableFragptr; - c_fragment_pool.getPtr(tFragPtr); - tuxreq->tupTableFragPtrI[0] = tFragPtr.p->tupFragptr; - tuxreq->tupTableFragPtrI[1] = RNIL; - tuxreq->accTableFragPtrI[0] = tFragPtr.p->accFragptr; - tuxreq->accTableFragPtrI[1] = RNIL; - sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, - signal, TuxFragReq::SignalLength, JBB); - return; - } - } - ndbrequire(false); -}//Dblqh::sendAddFragReq - -/* ************************************************************************> */ -/* LQHADDATTRREQ: Request from DICT to create attributes for the new table. */ -/* ************************************************************************> */ -void Dblqh::execLQHADDATTREQ(Signal* signal) -{ - jamEntry(); - LqhAddAttrReq * const req = (LqhAddAttrReq*)signal->getDataPtr(); - - addfragptr.i = req->lqhFragPtr; - const Uint32 tnoOfAttr = req->noOfAttributes; - const Uint32 senderData = req->senderData; - const Uint32 senderAttrPtr = req->senderAttrPtr; - - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::WAIT_ADD_ATTR); - ndbrequire((tnoOfAttr != 0) && (tnoOfAttr <= LqhAddAttrReq::MAX_ATTRIBUTES)); - addfragptr.p->totalAttrReceived += tnoOfAttr; - ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr); - - addfragptr.p->attrReceived = tnoOfAttr; - for (Uint32 i = 0; i < tnoOfAttr; i++) { - addfragptr.p->attributes[i] = req->attributes[i]; - if(AttributeDescriptor::getDiskBased(req->attributes[i].attrDescriptor)) - { - TablerecPtr tabPtr; - tabPtr.i = addfragptr.p->tabId; - ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec); - tabPtr.p->m_disk_table = 1; - } - }//for - addfragptr.p->attrSentToTup = 0; - ndbrequire(addfragptr.p->dictConnectptr == senderData); - addfragptr.p->m_senderAttrPtr = senderAttrPtr; - addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT; - sendAddAttrReq(signal); -}//Dblqh::execLQHADDATTREQ() - -/* *********************>> */ -/* TUP_ADD_ATTCONF > */ -/* *********************>> */ -void Dblqh::execTUP_ADD_ATTCONF(Signal* signal) -{ - jamEntry(); - addfragptr.i = signal->theData[0]; - // implies that operation was released on the other side - const bool lastAttr = signal->theData[1]; - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - switch (addfragptr.p->addfragStatus) { - case AddFragRecord::TUP_ATTR_WAIT: - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { - addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT; - sendAddAttrReq(signal); - break; - } - goto done_with_attr; - break; - case AddFragRecord::TUX_ATTR_WAIT: - jam(); - if (lastAttr) - addfragptr.p->tuxConnectptr = RNIL; - goto done_with_attr; - break; - done_with_attr: - addfragptr.p->attrSentToTup = addfragptr.p->attrSentToTup + 1; - ndbrequire(addfragptr.p->attrSentToTup <= addfragptr.p->attrReceived); - ndbrequire(addfragptr.p->totalAttrReceived <= addfragptr.p->noOfAttr); - if (addfragptr.p->attrSentToTup < addfragptr.p->attrReceived) { - // more in this batch - jam(); - addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT; - sendAddAttrReq(signal); - } else if (addfragptr.p->totalAttrReceived < addfragptr.p->noOfAttr) { - // more batches to receive - jam(); - addfragptr.p->addfragStatus = AddFragRecord::WAIT_ADD_ATTR; - LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend(); - conf->senderData = addfragptr.p->dictConnectptr; - conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr; - conf->fragId = addfragptr.p->addFragid; - sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF, - signal, LqhAddAttrConf::SignalLength, JBB); - } else { - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - /* ------------------------------------------------------------------ - * WE HAVE NOW COMPLETED ADDING THIS FRAGMENT. WE NOW NEED TO SET THE - * PROPER STATE IN FRAG_STATUS DEPENDENT ON IF WE ARE CREATING A NEW - * REPLICA OR IF WE ARE CREATING A TABLE. FOR FRAGMENTS IN COPY - * PROCESS WE DO NOT WANT LOGGING ACTIVATED. - * ----------------------------------------------------------------- */ - if (addfragptr.p->fragCopyCreation == 1) { - jam(); - if (! DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) - { - fragptr.p->m_copy_started_state = Fragrecord::AC_IGNORED; - //fragptr.p->m_copy_started_state = Fragrecord::AC_NR_COPY; - fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION; - } - else - { - fragptr.p->fragStatus = Fragrecord::FSACTIVE; - } - fragptr.p->logFlag = Fragrecord::STATE_FALSE; - } else { - jam(); - fragptr.p->fragStatus = Fragrecord::FSACTIVE; - }//if - LqhAddAttrConf *const conf = (LqhAddAttrConf*)signal->getDataPtrSend(); - conf->senderData = addfragptr.p->dictConnectptr; - conf->senderAttrPtr = addfragptr.p->m_senderAttrPtr; - conf->fragId = addfragptr.p->addFragid; - sendSignal(addfragptr.p->dictBlockref, GSN_LQHADDATTCONF, signal, - LqhAddAttrConf::SignalLength, JBB); - releaseAddfragrec(signal); - }//if - break; - default: - ndbrequire(false); - break; - } -} - -/* **********************>> */ -/* TUX_ADD_ATTRCONF > */ -/* **********************>> */ -void Dblqh::execTUX_ADD_ATTRCONF(Signal* signal) -{ - jamEntry(); - execTUP_ADD_ATTCONF(signal); -}//Dblqh::execTUX_ADD_ATTRCONF - -/* - * Add attribute in TUP or TUX. Called up to 4 times. - */ -void -Dblqh::sendAddAttrReq(Signal* signal) -{ - arrGuard(addfragptr.p->attrSentToTup, LqhAddAttrReq::MAX_ATTRIBUTES); - LqhAddAttrReq::Entry& entry = - addfragptr.p->attributes[addfragptr.p->attrSentToTup]; - const Uint32 attrId = entry.attrId & 0xffff; - const Uint32 primaryAttrId = entry.attrId >> 16; - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - if (addfragptr.p->addfragStatus == AddFragRecord::TUP_ATTR_WAIT) { - if (DictTabInfo::isTable(addfragptr.p->tableType) || - DictTabInfo::isHashIndex(addfragptr.p->tableType) || - (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) && - primaryAttrId == ZNIL)) { - jam(); - TupAddAttrReq* const tupreq = (TupAddAttrReq*)signal->getDataPtrSend(); - tupreq->tupConnectPtr = addfragptr.p->tupConnectptr; - tupreq->notused1 = 0; - tupreq->attrId = attrId; - tupreq->attrDescriptor = entry.attrDescriptor; - tupreq->extTypeInfo = entry.extTypeInfo; - sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ, - signal, TupAddAttrReq::SignalLength, JBB); - return; - } - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) && - primaryAttrId != ZNIL) { - // this attribute is not for TUP - jam(); - TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend(); - tupconf->userPtr = addfragptr.i; - tupconf->lastAttr = false; - sendSignal(reference(), GSN_TUP_ADD_ATTCONF, - signal, TupAddAttrConf::SignalLength, JBB); - return; - } - } - if (addfragptr.p->addfragStatus == AddFragRecord::TUX_ATTR_WAIT) { - jam(); - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) && - primaryAttrId != ZNIL) { - jam(); - TuxAddAttrReq* const tuxreq = (TuxAddAttrReq*)signal->getDataPtrSend(); - tuxreq->tuxConnectPtr = addfragptr.p->tuxConnectptr; - tuxreq->notused1 = 0; - tuxreq->attrId = attrId; - tuxreq->attrDescriptor = entry.attrDescriptor; - tuxreq->extTypeInfo = entry.extTypeInfo; - tuxreq->primaryAttrId = primaryAttrId; - sendSignal(fragptr.p->tuxBlockref, GSN_TUX_ADD_ATTRREQ, - signal, TuxAddAttrReq::SignalLength, JBB); - return; - } - if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType) && - primaryAttrId == ZNIL) { - // this attribute is not for TUX - jam(); - TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend(); - tuxconf->userPtr = addfragptr.i; - tuxconf->lastAttr = false; - sendSignal(reference(), GSN_TUX_ADD_ATTRCONF, - signal, TuxAddAttrConf::SignalLength, JBB); - return; - } - } - ndbrequire(false); -}//Dblqh::sendAddAttrReq - -/* ************************************************************************>> */ -/* TAB_COMMITREQ: Commit the new table for use in transactions. Sender DICT. */ -/* ************************************************************************>> */ -void Dblqh::execTAB_COMMITREQ(Signal* signal) -{ - jamEntry(); - Uint32 dihPtr = signal->theData[0]; - BlockReference dihBlockref = signal->theData[1]; - tabptr.i = signal->theData[2]; - - if (tabptr.i >= ctabrecFileSize) { - jam(); - terrorCode = ZTAB_FILE_SIZE; - signal->theData[0] = dihPtr; - signal->theData[1] = cownNodeid; - signal->theData[2] = tabptr.i; - signal->theData[3] = terrorCode; - sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 4, JBB); - return; - }//if - ptrAss(tabptr, tablerec); - if (tabptr.p->tableStatus != Tablerec::ADD_TABLE_ONGOING) { - jam(); - terrorCode = ZTAB_STATE_ERROR; - signal->theData[0] = dihPtr; - signal->theData[1] = cownNodeid; - signal->theData[2] = tabptr.i; - signal->theData[3] = terrorCode; - signal->theData[4] = tabptr.p->tableStatus; - sendSignal(dihBlockref, GSN_TAB_COMMITREF, signal, 5, JBB); - ndbrequire(false); - return; - }//if - tabptr.p->usageCount = 0; - tabptr.p->tableStatus = Tablerec::TABLE_DEFINED; - signal->theData[0] = dihPtr; - signal->theData[1] = cownNodeid; - signal->theData[2] = tabptr.i; - sendSignal(dihBlockref, GSN_TAB_COMMITCONF, signal, 3, JBB); - return; -}//Dblqh::execTAB_COMMITREQ() - - -void Dblqh::fragrefLab(Signal* signal, - BlockReference fragBlockRef, - Uint32 fragConPtr, - Uint32 errorCode) -{ - LqhFragRef * ref = (LqhFragRef*)signal->getDataPtrSend(); - ref->senderData = fragConPtr; - ref->errorCode = errorCode; - sendSignal(fragBlockRef, GSN_LQHFRAGREF, signal, - LqhFragRef::SignalLength, JBB); - return; -}//Dblqh::fragrefLab() - -/* - * Abort on-going ops. - */ -void Dblqh::abortAddFragOps(Signal* signal) -{ - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - if (addfragptr.p->tupConnectptr != RNIL) { - jam(); - TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); - tupFragReq->userPtr = (Uint32)-1; - tupFragReq->userRef = addfragptr.p->tupConnectptr; - sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); - addfragptr.p->tupConnectptr = RNIL; - } - if (addfragptr.p->tuxConnectptr != RNIL) { - jam(); - TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); - tuxFragReq->userPtr = (Uint32)-1; - tuxFragReq->userRef = addfragptr.p->tuxConnectptr; - sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); - addfragptr.p->tuxConnectptr = RNIL; - } -} - -/* ************>> */ -/* ACCFRAGREF > */ -/* ************>> */ -void Dblqh::execACCFRAGREF(Signal* signal) -{ - jamEntry(); - addfragptr.i = signal->theData[0]; - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - terrorCode = signal->theData[1]; - ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG); - addfragptr.p->addfragErrorCode = terrorCode; - - const Uint32 ref = addfragptr.p->dictBlockref; - const Uint32 senderData = addfragptr.p->dictConnectptr; - const Uint32 errorCode = addfragptr.p->addfragErrorCode; - releaseAddfragrec(signal); - fragrefLab(signal, ref, senderData, errorCode); - - return; -}//Dblqh::execACCFRAGREF() - -/* ************>> */ -/* TUPFRAGREF > */ -/* ************>> */ -void Dblqh::execTUPFRAGREF(Signal* signal) -{ - jamEntry(); - addfragptr.i = signal->theData[0]; - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - terrorCode = signal->theData[1]; - fragptr.i = addfragptr.p->fragmentPtr; - c_fragment_pool.getPtr(fragptr); - addfragptr.p->addfragErrorCode = terrorCode; - - // no operation to release, just add some jams - switch (addfragptr.p->addfragStatus) { - case AddFragRecord::WAIT_TUP: - jam(); - break; - case AddFragRecord::WAIT_TUX: - jam(); - break; - default: - ndbrequire(false); - break; - } - abortAddFragOps(signal); - - const Uint32 ref = addfragptr.p->dictBlockref; - const Uint32 senderData = addfragptr.p->dictConnectptr; - const Uint32 errorCode = addfragptr.p->addfragErrorCode; - releaseAddfragrec(signal); - fragrefLab(signal, ref, senderData, errorCode); - -}//Dblqh::execTUPFRAGREF() - -/* ************>> */ -/* TUXFRAGREF > */ -/* ************>> */ -void Dblqh::execTUXFRAGREF(Signal* signal) -{ - jamEntry(); - execTUPFRAGREF(signal); -}//Dblqh::execTUXFRAGREF - -/* *********************> */ -/* TUP_ADD_ATTREF > */ -/* *********************> */ -void Dblqh::execTUP_ADD_ATTRREF(Signal* signal) -{ - jamEntry(); - addfragptr.i = signal->theData[0]; - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - terrorCode = signal->theData[1]; - addfragptr.p->addfragErrorCode = terrorCode; - - // operation was released on the other side - switch (addfragptr.p->addfragStatus) { - case AddFragRecord::TUP_ATTR_WAIT: - jam(); - ndbrequire(addfragptr.p->tupConnectptr != RNIL); - addfragptr.p->tupConnectptr = RNIL; - break; - case AddFragRecord::TUX_ATTR_WAIT: - jam(); - ndbrequire(addfragptr.p->tuxConnectptr != RNIL); - addfragptr.p->tuxConnectptr = RNIL; - break; - default: - ndbrequire(false); - break; - } - abortAddFragOps(signal); - - const Uint32 Ref = addfragptr.p->dictBlockref; - const Uint32 senderData = addfragptr.p->dictConnectptr; - const Uint32 errorCode = addfragptr.p->addfragErrorCode; - releaseAddfragrec(signal); - - LqhAddAttrRef *const ref = (LqhAddAttrRef*)signal->getDataPtrSend(); - ref->senderData = senderData; - ref->errorCode = errorCode; - sendSignal(Ref, GSN_LQHADDATTREF, signal, - LqhAddAttrRef::SignalLength, JBB); - -}//Dblqh::execTUP_ADD_ATTRREF() - -/* **********************> */ -/* TUX_ADD_ATTRREF > */ -/* **********************> */ -void Dblqh::execTUX_ADD_ATTRREF(Signal* signal) -{ - jamEntry(); - execTUP_ADD_ATTRREF(signal); -}//Dblqh::execTUX_ADD_ATTRREF - -void -Dblqh::execPREP_DROP_TAB_REQ(Signal* signal){ - jamEntry(); - - PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr(); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - - TablerecPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec); - - Uint32 errCode = 0; - errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_PREP_DROP_TAB_REQ); - if(errCode != 0){ - jam(); - - PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = errCode; - sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal, - PrepDropTabRef::SignalLength, JBB); - return; - } - - tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_ONGOING; - tabPtr.p->waitingTC.clear(); - tabPtr.p->waitingDIH.clear(); - - PrepDropTabConf * conf = (PrepDropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(senderRef, GSN_PREP_DROP_TAB_CONF, signal, - PrepDropTabConf::SignalLength, JBB); - - signal->theData[0] = ZPREP_DROP_TABLE; - signal->theData[1] = tabPtr.i; - signal->theData[2] = senderRef; - signal->theData[3] = senderData; - checkDropTab(signal); -} - -void -Dblqh::checkDropTab(Signal* signal){ - - TablerecPtr tabPtr; - tabPtr.i = signal->theData[1]; - ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec); - - ndbrequire(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING); - - if(tabPtr.p->usageCount > 0){ - jam(); - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4); - return; - } - - bool lcpDone = true; - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - if(lcpPtr.p->lcpState != LcpRecord::LCP_IDLE){ - jam(); - - if(lcpPtr.p->currentFragment.lcpFragOrd.tableId == tabPtr.i){ - jam(); - lcpDone = false; - } - - if(lcpPtr.p->lcpQueued && - lcpPtr.p->queuedFragment.lcpFragOrd.tableId == tabPtr.i){ - jam(); - lcpDone = false; - } - } - - if(!lcpDone){ - jam(); - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 4); - return; - } - - tabPtr.p->tableStatus = Tablerec::PREP_DROP_TABLE_DONE; - - WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - for(Uint32 i = 1; iwaitingTC.get(i)){ - tabPtr.p->waitingTC.clear(i); - sendSignal(calcTcBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal, - WaitDropTabConf::SignalLength, JBB); - } - if(tabPtr.p->waitingDIH.get(i)){ - tabPtr.p->waitingDIH.clear(i); - sendSignal(calcDihBlockRef(i), GSN_WAIT_DROP_TAB_CONF, signal, - WaitDropTabConf::SignalLength, JBB); - } - } -} - -void -Dblqh::execWAIT_DROP_TAB_REQ(Signal* signal){ - jamEntry(); - WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtr(); - - TablerecPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec); - - Uint32 senderRef = req->senderRef; - Uint32 nodeId = refToNode(senderRef); - Uint32 blockNo = refToBlock(senderRef); - - if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING){ - jam(); - switch(blockNo){ - case DBTC: - tabPtr.p->waitingTC.set(nodeId); - break; - case DBDIH: - tabPtr.p->waitingDIH.set(nodeId); - break; - default: - ndbrequire(false); - } - return; - } - - if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){ - jam(); - WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - sendSignal(senderRef, GSN_WAIT_DROP_TAB_CONF, signal, - WaitDropTabConf::SignalLength, JBB); - return; - } - - WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtrSend(); - ref->tableId = tabPtr.i; - ref->senderRef = reference(); - - bool ok = false; - switch(tabPtr.p->tableStatus){ - case Tablerec::TABLE_DEFINED: - ok = true; - ref->errorCode = WaitDropTabRef::IllegalTableState; - break; - case Tablerec::NOT_DEFINED: - ok = true; - ref->errorCode = WaitDropTabRef::NoSuchTable; - break; - case Tablerec::ADD_TABLE_ONGOING: - ok = true; - ref->errorCode = WaitDropTabRef::IllegalTableState; - break; - case Tablerec::PREP_DROP_TABLE_ONGOING: - case Tablerec::PREP_DROP_TABLE_DONE: - // Should have been take care of above - ndbrequire(false); - } - ndbrequire(ok); - ref->tableStatus = tabPtr.p->tableStatus; - sendSignal(senderRef, GSN_WAIT_DROP_TAB_REF, signal, - WaitDropTabRef::SignalLength, JBB); - return; -} - -void -Dblqh::execDROP_TAB_REQ(Signal* signal){ - jamEntry(); - - DropTabReq* req = (DropTabReq*)signal->getDataPtr(); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - - TablerecPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec); - - do { - if(req->requestType == DropTabReq::RestartDropTab){ - jam(); - break; - } - - if(req->requestType == DropTabReq::OnlineDropTab){ - jam(); - Uint32 errCode = 0; - errCode = checkDropTabState(tabPtr.p->tableStatus, GSN_DROP_TAB_REQ); - if(errCode != 0){ - jam(); - - DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = errCode; - sendSignal(senderRef, GSN_DROP_TAB_REF, signal, - DropTabRef::SignalLength, JBB); - return; - } - } - - removeTable(tabPtr.i); - - } while(false); - - ndbrequire(tabPtr.p->usageCount == 0); - tabPtr.p->tableStatus = Tablerec::NOT_DEFINED; - - DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend(); - dropConf->senderRef = reference(); - dropConf->senderData = senderData; - dropConf->tableId = tabPtr.i; - sendSignal(senderRef, GSN_DROP_TAB_CONF, - signal, DropTabConf::SignalLength, JBB); -} - -Uint32 -Dblqh::checkDropTabState(Tablerec::TableStatus status, Uint32 gsn) const{ - - if(gsn == GSN_PREP_DROP_TAB_REQ){ - switch(status){ - case Tablerec::NOT_DEFINED: - jam(); - // Fall through - case Tablerec::ADD_TABLE_ONGOING: - jam(); - return PrepDropTabRef::NoSuchTable; - break; - case Tablerec::PREP_DROP_TABLE_ONGOING: - jam(); - return PrepDropTabRef::PrepDropInProgress; - break; - case Tablerec::PREP_DROP_TABLE_DONE: - jam(); - return PrepDropTabRef::DropInProgress; - break; - case Tablerec::TABLE_DEFINED: - jam(); - return 0; - break; - } - ndbrequire(0); - } - - if(gsn == GSN_DROP_TAB_REQ){ - switch(status){ - case Tablerec::NOT_DEFINED: - jam(); - // Fall through - case Tablerec::ADD_TABLE_ONGOING: - jam(); - return DropTabRef::NoSuchTable; - break; - case Tablerec::PREP_DROP_TABLE_ONGOING: - jam(); - return DropTabRef::PrepDropInProgress; - break; - case Tablerec::PREP_DROP_TABLE_DONE: - jam(); - return 0; - break; - case Tablerec::TABLE_DEFINED: - jam(); - return DropTabRef::DropWoPrep; - } - ndbrequire(0); - } - ndbrequire(0); - return RNIL; -} - -void Dblqh::removeTable(Uint32 tableId) -{ - tabptr.i = tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragid[i] != ZNIL) { - jam(); - deleteFragrec(tabptr.p->fragid[i]); - }//if - }//for -}//Dblqh::removeTable() - -void -Dblqh::execALTER_TAB_REQ(Signal* signal) -{ - jamEntry(); - AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - const Uint32 changeMask = req->changeMask; - const Uint32 tableId = req->tableId; - const Uint32 tableVersion = req->tableVersion; - const Uint32 gci = req->gci; - AlterTabReq::RequestType requestType = - (AlterTabReq::RequestType) req->requestType; - - TablerecPtr tablePtr; - tablePtr.i = tableId; - ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec); - tablePtr.p->schemaVersion = tableVersion; - - // Request handled successfully - AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->changeMask = changeMask; - conf->tableId = tableId; - conf->tableVersion = tableVersion; - conf->gci = gci; - conf->requestType = requestType; - sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal, - AlterTabConf::SignalLength, JBB); -} - -/* ************************************************************************>> - * TIME_SIGNAL: Handles time-out of local operations. This is a clean-up - * handler. If no other measure has succeeded in cleaning up after time-outs - * or else then this routine will remove the transaction after 120 seconds of - * inactivity. The check is performed once per 10 second. Sender is QMGR. - * ************************************************************************>> */ -void Dblqh::execTIME_SIGNAL(Signal* signal) -{ - jamEntry(); - cLqhTimeOutCount++; - cLqhTimeOutCheckCount++; - if (cLqhTimeOutCheckCount < 10) { - jam(); - return; - }//if - cLqhTimeOutCheckCount = 0; -#ifdef VM_TRACE - TcConnectionrecPtr tTcConptr; - - for (tTcConptr.i = 0; tTcConptr.i < ctcConnectrecFileSize; - tTcConptr.i++) { - jam(); - ptrAss(tTcConptr, tcConnectionrec); - if ((tTcConptr.p->tcTimer != 0) && - ((tTcConptr.p->tcTimer + 120) < cLqhTimeOutCount)) { - ndbout << "Dblqh::execTIME_SIGNAL"<noOfPackedWordsLqh > 0) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - }//if - if (Thostptr.p->noOfPackedWordsTc > 0) { - jam(); - sendPackedSignalTc(signal, Thostptr.p); - }//if - Thostptr.p->inPackedList = false; - }//for - cpackedListIndex = 0; - return; -}//Dblqh::execSEND_PACKED() - -void -Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId) -{ - Uint32 TpackedListIndex = cpackedListIndex; - if (ahostptr->inPackedList == false) { - jam(); - ahostptr->inPackedList = true; - cpackedList[TpackedListIndex] = hostId; - cpackedListIndex = TpackedListIndex + 1; - }//if -}//Dblqh::updatePackedList() - -void -Dblqh::execREAD_PSEUDO_REQ(Signal* signal){ - jamEntry(); - TcConnectionrecPtr regTcPtr; - regTcPtr.i = signal->theData[0]; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - - if (signal->theData[1] == AttributeHeader::RANGE_NO) { - signal->theData[0] = regTcPtr.p->m_scan_curr_range_no; - } - else if (signal->theData[1] != AttributeHeader::RECORDS_IN_RANGE) - { - jam(); - FragrecordPtr regFragptr; - regFragptr.i = regTcPtr.p->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - - signal->theData[0] = regFragptr.p->accFragptr; - EXECUTE_DIRECT(DBACC, GSN_READ_PSEUDO_REQ, signal, 2); - } - else - { - jam(); - // scanptr gets reset somewhere within the timeslice - ScanRecordPtr tmp; - tmp.i = regTcPtr.p->tcScanRec; - c_scanRecordPool.getPtr(tmp); - signal->theData[0] = tmp.p->scanAccPtr; - EXECUTE_DIRECT(DBTUX, GSN_READ_PSEUDO_REQ, signal, 2); - } -} - -/* ************>> */ -/* TUPKEYCONF > */ -/* ************>> */ -void Dblqh::execTUPKEYCONF(Signal* signal) -{ - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr(); - Uint32 tcIndex = tupKeyConf->userPtr; - jamEntry(); - tcConnectptr.i = tcIndex; - ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec); - TcConnectionrec * regTcPtr = tcConnectptr.p; - Uint32 activeCreat = regTcPtr->activeCreat; - - FragrecordPtr regFragptr; - regFragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - fragptr = regFragptr; - - switch (tcConnectptr.p->transactionState) { - case TcConnectionrec::WAIT_TUP: - jam(); - if (tcConnectptr.p->seqNoReplica == 0) // Primary replica - tcConnectptr.p->noFiredTriggers = tupKeyConf->noFiredTriggers; - tupkeyConfLab(signal); - break; - case TcConnectionrec::COPY_TUPKEY: - jam(); - copyTupkeyConfLab(signal); - break; - case TcConnectionrec::SCAN_TUPKEY: - jam(); - scanTupkeyConfLab(signal); - break; - case TcConnectionrec::WAIT_TUP_TO_ABORT: - jam(); -/* ------------------------------------------------------------------------- */ -// Abort was not ready to start until this signal came back. Now we are ready -// to start the abort. -/* ------------------------------------------------------------------------- */ - if (unlikely(activeCreat == Fragrecord::AC_NR_COPY)) - { - jam(); - ndbrequire(regTcPtr->m_nr_delete.m_cnt); - regTcPtr->m_nr_delete.m_cnt--; - if (regTcPtr->m_nr_delete.m_cnt) - { - jam(); - /** - * Let operation wait for pending NR operations - * even for before writing log...(as it's simpler) - */ - -#ifdef VM_TRACE - /** - * Only disk table can have pending ops... - */ - TablerecPtr tablePtr; - tablePtr.i = regTcPtr->tableref; - ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec); - ndbrequire(tablePtr.p->m_disk_table); -#endif - return; - } - } - - abortCommonLab(signal); - break; - case TcConnectionrec::WAIT_ACC_ABORT: - case TcConnectionrec::ABORT_QUEUED: - jam(); -/* ------------------------------------------------------------------------- */ -/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */ -/* ------------------------------------------------------------------------- */ - break; - default: - ndbrequire(false); - break; - }//switch - -}//Dblqh::execTUPKEYCONF() - -/* ************> */ -/* TUPKEYREF > */ -/* ************> */ -void Dblqh::execTUPKEYREF(Signal* signal) -{ - const TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtr(); - - jamEntry(); - tcConnectptr.i = tupKeyRef->userRef; - terrorCode = tupKeyRef->errorCode; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec* regTcPtr = tcConnectptr.p; - Uint32 activeCreat = regTcPtr->activeCreat; - - FragrecordPtr regFragptr; - regFragptr.i = regTcPtr->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - fragptr = regFragptr; - - TRACE_OP(regTcPtr, "TUPKEYREF"); - - if (unlikely(activeCreat == Fragrecord::AC_NR_COPY)) - { - jam(); - ndbrequire(regTcPtr->m_nr_delete.m_cnt); - regTcPtr->m_nr_delete.m_cnt--; - ndbassert(regTcPtr->transactionState == TcConnectionrec::WAIT_TUP || - regTcPtr->transactionState ==TcConnectionrec::WAIT_TUP_TO_ABORT); - } - - switch (tcConnectptr.p->transactionState) { - case TcConnectionrec::WAIT_TUP: - jam(); - abortErrorLab(signal); - break; - case TcConnectionrec::COPY_TUPKEY: - ndbrequire(false); - break; - case TcConnectionrec::SCAN_TUPKEY: - jam(); - scanTupkeyRefLab(signal); - break; - case TcConnectionrec::WAIT_TUP_TO_ABORT: - jam(); -/* ------------------------------------------------------------------------- */ -// Abort was not ready to start until this signal came back. Now we are ready -// to start the abort. -/* ------------------------------------------------------------------------- */ - abortCommonLab(signal); - break; - case TcConnectionrec::WAIT_ACC_ABORT: - case TcConnectionrec::ABORT_QUEUED: - jam(); -/* ------------------------------------------------------------------------- */ -/* IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */ -/* ------------------------------------------------------------------------- */ - break; - default: - ndbrequire(false); - break; - }//switch -}//Dblqh::execTUPKEYREF() - -void Dblqh::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr) -{ - Uint32 noOfWords = ahostptr->noOfPackedWordsLqh; - BlockReference hostRef = ahostptr->hostLqhBlockRef; - MEMCOPY_NO_WORDS(&signal->theData[0], - &ahostptr->packedWordsLqh[0], - noOfWords); - sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB); - ahostptr->noOfPackedWordsLqh = 0; -}//Dblqh::sendPackedSignalLqh() - -void Dblqh::sendPackedSignalTc(Signal* signal, HostRecord * ahostptr) -{ - Uint32 noOfWords = ahostptr->noOfPackedWordsTc; - BlockReference hostRef = ahostptr->hostTcBlockRef; - MEMCOPY_NO_WORDS(&signal->theData[0], - &ahostptr->packedWordsTc[0], - noOfWords); - sendSignal(hostRef, GSN_PACKED_SIGNAL, signal, noOfWords, JBB); - ahostptr->noOfPackedWordsTc = 0; -}//Dblqh::sendPackedSignalTc() - -void Dblqh::sendCommitLqh(Signal* signal, BlockReference alqhBlockref) -{ - HostRecordPtr Thostptr; - Thostptr.i = refToNode(alqhBlockref); - ptrCheckGuard(Thostptr, chostFileSize, hostRecord); - if (Thostptr.p->noOfPackedWordsLqh > 21) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - Uint32 pos = Thostptr.p->noOfPackedWordsLqh; - Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMIT << 28); - Uint32 gci = tcConnectptr.p->gci; - Uint32 transid1 = tcConnectptr.p->transid[0]; - Uint32 transid2 = tcConnectptr.p->transid[1]; - Thostptr.p->packedWordsLqh[pos] = ptrAndType; - Thostptr.p->packedWordsLqh[pos + 1] = gci; - Thostptr.p->packedWordsLqh[pos + 2] = transid1; - Thostptr.p->packedWordsLqh[pos + 3] = transid2; - Thostptr.p->noOfPackedWordsLqh = pos + 4; -}//Dblqh::sendCommitLqh() - -void Dblqh::sendCompleteLqh(Signal* signal, BlockReference alqhBlockref) -{ - HostRecordPtr Thostptr; - Thostptr.i = refToNode(alqhBlockref); - ptrCheckGuard(Thostptr, chostFileSize, hostRecord); - if (Thostptr.p->noOfPackedWordsLqh > 22) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - Uint32 pos = Thostptr.p->noOfPackedWordsLqh; - Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETE << 28); - Uint32 transid1 = tcConnectptr.p->transid[0]; - Uint32 transid2 = tcConnectptr.p->transid[1]; - Thostptr.p->packedWordsLqh[pos] = ptrAndType; - Thostptr.p->packedWordsLqh[pos + 1] = transid1; - Thostptr.p->packedWordsLqh[pos + 2] = transid2; - Thostptr.p->noOfPackedWordsLqh = pos + 3; -}//Dblqh::sendCompleteLqh() - -void Dblqh::sendCommittedTc(Signal* signal, BlockReference atcBlockref) -{ - HostRecordPtr Thostptr; - Thostptr.i = refToNode(atcBlockref); - ptrCheckGuard(Thostptr, chostFileSize, hostRecord); - if (Thostptr.p->noOfPackedWordsTc > 22) { - jam(); - sendPackedSignalTc(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - Uint32 pos = Thostptr.p->noOfPackedWordsTc; - Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMMITTED << 28); - Uint32 transid1 = tcConnectptr.p->transid[0]; - Uint32 transid2 = tcConnectptr.p->transid[1]; - Thostptr.p->packedWordsTc[pos] = ptrAndType; - Thostptr.p->packedWordsTc[pos + 1] = transid1; - Thostptr.p->packedWordsTc[pos + 2] = transid2; - Thostptr.p->noOfPackedWordsTc = pos + 3; -}//Dblqh::sendCommittedTc() - -void Dblqh::sendCompletedTc(Signal* signal, BlockReference atcBlockref) -{ - HostRecordPtr Thostptr; - Thostptr.i = refToNode(atcBlockref); - ptrCheckGuard(Thostptr, chostFileSize, hostRecord); - if (Thostptr.p->noOfPackedWordsTc > 22) { - jam(); - sendPackedSignalTc(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - Uint32 pos = Thostptr.p->noOfPackedWordsTc; - Uint32 ptrAndType = tcConnectptr.p->clientConnectrec | (ZCOMPLETED << 28); - Uint32 transid1 = tcConnectptr.p->transid[0]; - Uint32 transid2 = tcConnectptr.p->transid[1]; - Thostptr.p->packedWordsTc[pos] = ptrAndType; - Thostptr.p->packedWordsTc[pos + 1] = transid1; - Thostptr.p->packedWordsTc[pos + 2] = transid2; - Thostptr.p->noOfPackedWordsTc = pos + 3; -}//Dblqh::sendCompletedTc() - -void Dblqh::sendLqhkeyconfTc(Signal* signal, BlockReference atcBlockref) -{ - LqhKeyConf* lqhKeyConf; - HostRecordPtr Thostptr; - - bool packed= true; - Thostptr.i = refToNode(atcBlockref); - ptrCheckGuard(Thostptr, chostFileSize, hostRecord); - if (refToBlock(atcBlockref) == DBTC) { - jam(); -/******************************************************************* -// This signal was intended for DBTC as part of the normal transaction -// execution. -********************************************************************/ - if (Thostptr.p->noOfPackedWordsTc > (25 - LqhKeyConf::SignalLength)) { - jam(); - sendPackedSignalTc(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - lqhKeyConf = (LqhKeyConf *) - &Thostptr.p->packedWordsTc[Thostptr.p->noOfPackedWordsTc]; - Thostptr.p->noOfPackedWordsTc += LqhKeyConf::SignalLength; - } else if(refToBlock(atcBlockref) == DBLQH){ - jam(); -/******************************************************************* -// This signal was intended for DBLQH as part of log execution or -// node recovery. -********************************************************************/ - if (Thostptr.p->noOfPackedWordsLqh > (25 - LqhKeyConf::SignalLength)) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - lqhKeyConf = (LqhKeyConf *) - &Thostptr.p->packedWordsLqh[Thostptr.p->noOfPackedWordsLqh]; - Thostptr.p->noOfPackedWordsLqh += LqhKeyConf::SignalLength; - } else { - packed= false; - lqhKeyConf = (LqhKeyConf *)signal->getDataPtrSend(); - } - Uint32 ptrAndType = tcConnectptr.i | (ZLQHKEYCONF << 28); - Uint32 tcOprec = tcConnectptr.p->tcOprec; - Uint32 ownRef = cownref; - Uint32 readlenAi = tcConnectptr.p->readlenAi; - Uint32 transid1 = tcConnectptr.p->transid[0]; - Uint32 transid2 = tcConnectptr.p->transid[1]; - Uint32 noFiredTriggers = tcConnectptr.p->noFiredTriggers; - lqhKeyConf->connectPtr = ptrAndType; - lqhKeyConf->opPtr = tcOprec; - lqhKeyConf->userRef = ownRef; - lqhKeyConf->readLen = readlenAi; - lqhKeyConf->transId1 = transid1; - lqhKeyConf->transId2 = transid2; - lqhKeyConf->noFiredTriggers = noFiredTriggers; - - if(!packed) - { - lqhKeyConf->connectPtr = tcConnectptr.i; - if(Thostptr.i == 0 || Thostptr.i == getOwnNodeId()) - { - EXECUTE_DIRECT(refToBlock(atcBlockref), GSN_LQHKEYCONF, - signal, LqhKeyConf::SignalLength); - } - else - { - sendSignal(atcBlockref, GSN_LQHKEYCONF, - signal, LqhKeyConf::SignalLength, JBB); - } - } -}//Dblqh::sendLqhkeyconfTc() - -/* ************************************************************************>> - * KEYINFO: Get tuple request from DBTC. Next step is to contact DBACC to get - * key to tuple if all key/attrinfo has been received, else for more attrinfo - * signals. - * ************************************************************************>> */ -void Dblqh::execKEYINFO(Signal* signal) -{ - Uint32 tcOprec = signal->theData[0]; - Uint32 transid1 = signal->theData[1]; - Uint32 transid2 = signal->theData[2]; - jamEntry(); - if (findTransaction(transid1, transid2, tcOprec) != ZOK) { - jam(); - return; - }//if - - receive_keyinfo(signal, - signal->theData+KeyInfo::HeaderLength, - signal->getLength()-KeyInfo::HeaderLength); -} - -void -Dblqh::receive_keyinfo(Signal* signal, - Uint32 * data, Uint32 len) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - TcConnectionrec::TransactionState state = regTcPtr->transactionState; - if (state != TcConnectionrec::WAIT_TUPKEYINFO && - state != TcConnectionrec::WAIT_SCAN_AI) - { - jam(); -/*****************************************************************************/ -/* TRANSACTION WAS ABORTED, THIS IS MOST LIKELY A SIGNAL BELONGING TO THE */ -/* ABORTED TRANSACTION. THUS IGNORE THE SIGNAL. */ -/*****************************************************************************/ - return; - }//if - - Uint32 errorCode = - handleLongTupKey(signal, data, len); - - if (errorCode != 0) { - if (errorCode == 1) { - jam(); - return; - }//if - jam(); - terrorCode = errorCode; - if(state == TcConnectionrec::WAIT_TUPKEYINFO) - abortErrorLab(signal); - else - abort_scan(signal, regTcPtr->tcScanRec, errorCode); - return; - }//if - if(state == TcConnectionrec::WAIT_TUPKEYINFO) - { - FragrecordPtr regFragptr; - regFragptr.i = regTcPtr->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - fragptr = regFragptr; - endgettupkeyLab(signal); - } - return; -}//Dblqh::execKEYINFO() - -/* ------------------------------------------------------------------------- */ -/* FILL IN KEY DATA INTO DATA BUFFERS. */ -/* ------------------------------------------------------------------------- */ -Uint32 Dblqh::handleLongTupKey(Signal* signal, - Uint32* dataPtr, - Uint32 len) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 dataPos = 0; - Uint32 total = regTcPtr->save1 + len; - Uint32 primKeyLen = regTcPtr->primKeyLen; - while (dataPos < len) { - if (cfirstfreeDatabuf == RNIL) { - jam(); - return ZGET_DATAREC_ERROR; - }//if - seizeTupkeybuf(signal); - Databuf * const regDataPtr = databufptr.p; - Uint32 data0 = dataPtr[dataPos]; - Uint32 data1 = dataPtr[dataPos + 1]; - Uint32 data2 = dataPtr[dataPos + 2]; - Uint32 data3 = dataPtr[dataPos + 3]; - regDataPtr->data[0] = data0; - regDataPtr->data[1] = data1; - regDataPtr->data[2] = data2; - regDataPtr->data[3] = data3; - dataPos += 4; - } - - regTcPtr->save1 = total; - return (total >= primKeyLen ? 0 : 1); -}//Dblqh::handleLongTupKey() - -/* ------------------------------------------------------------------------- */ -/* ------- HANDLE ATTRINFO SIGNALS ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ************************************************************************>> */ -/* ATTRINFO: Continuation of KEYINFO signal (except for scans that do not use*/ -/* any KEYINFO). When all key and attribute info is received we contact DBACC*/ -/* for index handling. */ -/* ************************************************************************>> */ -void Dblqh::execATTRINFO(Signal* signal) -{ - Uint32 tcOprec = signal->theData[0]; - Uint32 transid1 = signal->theData[1]; - Uint32 transid2 = signal->theData[2]; - jamEntry(); - if (findTransaction(transid1, - transid2, - tcOprec) != ZOK) { - jam(); - return; - }//if - - receive_attrinfo(signal, - signal->getDataPtrSend()+AttrInfo::HeaderLength, - signal->getLength()-AttrInfo::HeaderLength); -}//Dblqh::execATTRINFO() - -void -Dblqh::receive_attrinfo(Signal* signal, Uint32 * dataPtr, Uint32 length) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 totReclenAi = regTcPtr->totReclenAi; - Uint32 currReclenAi = regTcPtr->currReclenAi + length; - regTcPtr->currReclenAi = currReclenAi; - if (totReclenAi == currReclenAi) { - switch (regTcPtr->transactionState) { - case TcConnectionrec::WAIT_ATTR: - { - jam(); - fragptr.i = regTcPtr->fragmentptr; - c_fragment_pool.getPtr(fragptr); - lqhAttrinfoLab(signal, dataPtr, length); - endgettupkeyLab(signal); - return; - break; - } - case TcConnectionrec::WAIT_SCAN_AI: - jam(); - scanAttrinfoLab(signal, dataPtr, length); - return; - break; - case TcConnectionrec::WAIT_TUP_TO_ABORT: - case TcConnectionrec::LOG_ABORT_QUEUED: - case TcConnectionrec::ABORT_QUEUED: - case TcConnectionrec::ABORT_STOPPED: - case TcConnectionrec::WAIT_ACC_ABORT: - case TcConnectionrec::WAIT_AI_AFTER_ABORT: - jam(); - aiStateErrorCheckLab(signal, dataPtr,length); - return; - break; - default: - jam(); - ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE); - break; - }//switch - } else if (currReclenAi < totReclenAi) { - jam(); - switch (regTcPtr->transactionState) { - case TcConnectionrec::WAIT_ATTR: - jam(); - lqhAttrinfoLab(signal, dataPtr, length); - return; - break; - case TcConnectionrec::WAIT_SCAN_AI: - jam(); - scanAttrinfoLab(signal, dataPtr, length); - return; - break; - case TcConnectionrec::WAIT_TUP_TO_ABORT: - case TcConnectionrec::LOG_ABORT_QUEUED: - case TcConnectionrec::ABORT_QUEUED: - case TcConnectionrec::ABORT_STOPPED: - case TcConnectionrec::WAIT_ACC_ABORT: - case TcConnectionrec::WAIT_AI_AFTER_ABORT: - jam(); - aiStateErrorCheckLab(signal, dataPtr, length); - return; - break; - default: - jam(); - ndbrequire(regTcPtr->abortState != TcConnectionrec::ABORT_IDLE); - break; - }//switch - } else { - switch (regTcPtr->transactionState) { - case TcConnectionrec::WAIT_SCAN_AI: - jam(); - scanAttrinfoLab(signal, dataPtr, length); - return; - break; - default: - ndbout_c("%d", regTcPtr->transactionState); - ndbrequire(false); - break; - }//switch - }//if - return; -} - -/* ************************************************************************>> */ -/* TUP_ATTRINFO: Interpreted execution in DBTUP generates redo-log info */ -/* which is sent back to DBLQH for logging. This is because the decision */ -/* to execute or not is made in DBTUP and thus we cannot start logging until */ -/* DBTUP part has been run. */ -/* ************************************************************************>> */ -void Dblqh::execTUP_ATTRINFO(Signal* signal) -{ - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 length = signal->length() - 3; - Uint32 tcIndex = signal->theData[0]; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - jamEntry(); - tcConnectptr.i = tcIndex; - ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec); - ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_TUP); - if (saveTupattrbuf(signal, &signal->theData[3], length) == ZOK) { - return; - } else { - jam(); -/* ------------------------------------------------------------------------- */ -/* WE ARE WAITING FOR RESPONSE FROM TUP HERE. THUS WE NEED TO */ -/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */ -/* ------------------------------------------------------------------------- */ - localAbortStateHandlerLab(signal); - }//if -}//Dblqh::execTUP_ATTRINFO() - -/* ------------------------------------------------------------------------- */ -/* ------- HANDLE ATTRINFO FROM LQH ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->operation != ZREAD) { - if (regTcPtr->operation != ZDELETE) - { - if (regTcPtr->opExec != 1) { - if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { - ; - } else { - jam(); -/* ------------------------------------------------------------------------- */ -/* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */ -/* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */ -/* ------------------------------------------------------------------------- */ - localAbortStateHandlerLab(signal); - return; - }//if - }//if - }//if - } - c_tup->receive_attrinfo(signal, regTcPtr->tupConnectrec, dataPtr, length); -}//Dblqh::lqhAttrinfoLab() - -/* ------------------------------------------------------------------------- */ -/* ------ FIND TRANSACTION BY USING HASH TABLE ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -int Dblqh::findTransaction(UintR Transid1, UintR Transid2, UintR TcOprec) -{ - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - TcConnectionrecPtr locTcConnectptr; - - Uint32 ThashIndex = (Transid1 ^ TcOprec) & 1023; - locTcConnectptr.i = ctransidHash[ThashIndex]; - while (locTcConnectptr.i != RNIL) { - ptrCheckGuard(locTcConnectptr, ttcConnectrecFileSize, regTcConnectionrec); - if ((locTcConnectptr.p->transid[0] == Transid1) && - (locTcConnectptr.p->transid[1] == Transid2) && - (locTcConnectptr.p->tcOprec == TcOprec)) { -/* FIRST PART OF TRANSACTION CORRECT */ -/* SECOND PART ALSO CORRECT */ -/* THE OPERATION RECORD POINTER IN TC WAS ALSO CORRECT */ - jam(); - tcConnectptr.i = locTcConnectptr.i; - tcConnectptr.p = locTcConnectptr.p; - return (int)ZOK; - }//if - jam(); -/* THIS WAS NOT THE TRANSACTION WHICH WAS SOUGHT */ - locTcConnectptr.i = locTcConnectptr.p->nextHashRec; - }//while -/* WE DID NOT FIND THE TRANSACTION, REPORT NOT FOUND */ - return (int)ZNOT_FOUND; -}//Dblqh::findTransaction() - -/* ------------------------------------------------------------------------- */ -/* ------- SAVE ATTRINFO FROM TUP IN ATTRINBUF ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -int Dblqh::saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 len) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - while(len) - { - Uint32 length = len > AttrInfo::DataLength ? AttrInfo::DataLength : len; - Uint32 tfirstfreeAttrinbuf = cfirstfreeAttrinbuf; - Uint32 currTupAiLen = regTcPtr->currTupAiLen; - if (tfirstfreeAttrinbuf == RNIL) { - jam(); - terrorCode = ZGET_ATTRINBUF_ERROR; - return ZGET_ATTRINBUF_ERROR; - }//if - seizeAttrinbuf(signal); - Attrbuf * const regAttrPtr = attrinbufptr.p; - MEMCOPY_NO_WORDS(®AttrPtr->attrbuf[0], dataPtr, length); - regTcPtr->currTupAiLen = currTupAiLen + length; - regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = length; - - len -= length; - dataPtr += length; - } - return ZOK; -}//Dblqh::saveTupattrbuf() - -/* ========================================================================== - * ======= SEIZE ATTRIBUTE IN BUFFER ======= - * - * GET A NEW ATTRINBUF AND SETS ATTRINBUFPTR. - * ========================================================================= */ -void Dblqh::seizeAttrinbuf(Signal* signal) -{ - AttrbufPtr tmpAttrinbufptr; - AttrbufPtr regAttrinbufptr; - Attrbuf *regAttrbuf = attrbuf; - Uint32 tattrinbufFileSize = cattrinbufFileSize; - - regAttrinbufptr.i = seize_attrinbuf(); - tmpAttrinbufptr.i = tcConnectptr.p->lastAttrinbuf; - ptrCheckGuard(regAttrinbufptr, tattrinbufFileSize, regAttrbuf); - tcConnectptr.p->lastAttrinbuf = regAttrinbufptr.i; - regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = 0; - if (tmpAttrinbufptr.i == RNIL) { - jam(); - tcConnectptr.p->firstAttrinbuf = regAttrinbufptr.i; - } else { - jam(); - ptrCheckGuard(tmpAttrinbufptr, tattrinbufFileSize, regAttrbuf); - tmpAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = regAttrinbufptr.i; - }//if - regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; - attrinbufptr = regAttrinbufptr; -}//Dblqh::seizeAttrinbuf() - -/* ========================================================================== - * ======= SEIZE TC CONNECT RECORD ======= - * - * GETS A NEW TC CONNECT RECORD FROM FREELIST. - * ========================================================================= */ -void Dblqh::seizeTcrec() -{ - TcConnectionrecPtr locTcConnectptr; - - locTcConnectptr.i = cfirstfreeTcConrec; - ptrCheckGuard(locTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - Uint32 nextTc = locTcConnectptr.p->nextTcConnectrec; - locTcConnectptr.p->nextTcConnectrec = RNIL; - locTcConnectptr.p->clientConnectrec = RNIL; - locTcConnectptr.p->clientBlockref = RNIL; - locTcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE; - locTcConnectptr.p->tcTimer = cLqhTimeOutCount; - locTcConnectptr.p->tableref = RNIL; - locTcConnectptr.p->savePointId = 0; - locTcConnectptr.p->gci = 0; - cfirstfreeTcConrec = nextTc; - tcConnectptr = locTcConnectptr; - locTcConnectptr.p->connectState = TcConnectionrec::CONNECTED; -}//Dblqh::seizeTcrec() - -/* ========================================================================== - * ======= SEIZE DATA BUFFER ======= - * ========================================================================= */ -void Dblqh::seizeTupkeybuf(Signal* signal) -{ - Databuf *regDatabuf = databuf; - DatabufPtr tmpDatabufptr; - DatabufPtr regDatabufptr; - Uint32 tdatabufFileSize = cdatabufFileSize; - -/* ------- GET A DATABUF. ------- */ - regDatabufptr.i = cfirstfreeDatabuf; - tmpDatabufptr.i = tcConnectptr.p->lastTupkeybuf; - ptrCheckGuard(regDatabufptr, tdatabufFileSize, regDatabuf); - Uint32 nextFirst = regDatabufptr.p->nextDatabuf; - tcConnectptr.p->lastTupkeybuf = regDatabufptr.i; - if (tmpDatabufptr.i == RNIL) { - jam(); - tcConnectptr.p->firstTupkeybuf = regDatabufptr.i; - } else { - jam(); - ptrCheckGuard(tmpDatabufptr, tdatabufFileSize, regDatabuf); - tmpDatabufptr.p->nextDatabuf = regDatabufptr.i; - }//if - cfirstfreeDatabuf = nextFirst; - regDatabufptr.p->nextDatabuf = RNIL; - databufptr = regDatabufptr; -}//Dblqh::seizeTupkeybuf() - -/* ------------------------------------------------------------------------- */ -/* ------- TAKE CARE OF LQHKEYREQ ------- */ -/* LQHKEYREQ IS THE SIGNAL THAT STARTS ALL OPERATIONS IN THE LQH BLOCK */ -/* THIS SIGNAL CONTAINS A LOT OF INFORMATION ABOUT WHAT TYPE OF OPERATION, */ -/* KEY INFORMATION, ATTRIBUTE INFORMATION, NODE INFORMATION AND A LOT MORE */ -/* ------------------------------------------------------------------------- */ -void Dblqh::execLQHKEYREQ(Signal* signal) -{ - UintR sig0, sig1, sig2, sig3, sig4, sig5; - Uint8 tfragDistKey; - - const LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtr(); - - sig0 = lqhKeyReq->clientConnectPtr; - if (cfirstfreeTcConrec != RNIL && !ERROR_INSERTED(5031)) { - jamEntry(); - seizeTcrec(); - } else { -/* ------------------------------------------------------------------------- */ -/* NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST. */ -/* ------------------------------------------------------------------------- */ - if (ERROR_INSERTED(5031)) { - CLEAR_ERROR_INSERT_VALUE; - } - noFreeRecordLab(signal, lqhKeyReq, ZNO_TC_CONNECT_ERROR); - return; - }//if - - if(ERROR_INSERTED(5038) && - refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){ - jam(); - SET_ERROR_INSERT_VALUE(5039); - return; - } - - c_Counters.operations++; - - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 senderRef = regTcPtr->clientBlockref = signal->senderBlockRef(); - regTcPtr->clientConnectrec = sig0; - regTcPtr->tcOprec = sig0; - regTcPtr->storedProcId = ZNIL; - - UintR TtotReclenAi = lqhKeyReq->attrLen; - sig1 = lqhKeyReq->savePointId; - sig2 = lqhKeyReq->hashValue; - UintR Treqinfo = lqhKeyReq->requestInfo; - sig4 = lqhKeyReq->tableSchemaVersion; - sig5 = lqhKeyReq->tcBlockref; - - regTcPtr->savePointId = sig1; - regTcPtr->hashValue = sig2; - const Uint32 schemaVersion = regTcPtr->schemaVersion = LqhKeyReq::getSchemaVersion(sig4); - tabptr.i = LqhKeyReq::getTableId(sig4); - regTcPtr->tcBlockref = sig5; - - const Uint8 op = LqhKeyReq::getOperation(Treqinfo); - if ((op == ZREAD || op == ZREAD_EX) && !getAllowRead()){ - noFreeRecordLab(signal, lqhKeyReq, ZNODE_SHUTDOWN_IN_PROGESS); - return; - } - - Uint32 senderVersion = getNodeInfo(refToNode(senderRef)).m_version; - - regTcPtr->totReclenAi = LqhKeyReq::getAttrLen(TtotReclenAi); - regTcPtr->tcScanInfo = lqhKeyReq->scanInfo; - regTcPtr->indTakeOver = LqhKeyReq::getScanTakeOverFlag(TtotReclenAi); - - regTcPtr->readlenAi = 0; - regTcPtr->currTupAiLen = 0; - regTcPtr->listState = TcConnectionrec::NOT_IN_LIST; - regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED; - regTcPtr->fragmentptr = RNIL; - - sig0 = lqhKeyReq->fragmentData; - sig1 = lqhKeyReq->transId1; - sig2 = lqhKeyReq->transId2; - sig3 = lqhKeyReq->variableData[0]; - sig4 = lqhKeyReq->variableData[1]; - - regTcPtr->fragmentid = LqhKeyReq::getFragmentId(sig0); - regTcPtr->nextReplica = LqhKeyReq::getNextReplicaNodeId(sig0); - regTcPtr->transid[0] = sig1; - regTcPtr->transid[1] = sig2; - regTcPtr->applRef = sig3; - regTcPtr->applOprec = sig4; - - regTcPtr->commitAckMarker = RNIL; - if(LqhKeyReq::getMarkerFlag(Treqinfo)){ - jam(); - - CommitAckMarkerPtr markerPtr; - m_commitAckMarkerHash.seize(markerPtr); - if(markerPtr.i == RNIL){ - noFreeRecordLab(signal, lqhKeyReq, ZNO_FREE_MARKER_RECORDS_ERROR); - return; - } - markerPtr.p->transid1 = sig1; - markerPtr.p->transid2 = sig2; - markerPtr.p->apiRef = sig3; - markerPtr.p->apiOprec = sig4; - const NodeId tcNodeId = refToNode(sig5); - markerPtr.p->tcNodeId = tcNodeId; - - CommitAckMarkerPtr tmp; -#if defined VM_TRACE || defined ERROR_INSERT -#ifdef MARKER_TRACE - ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2); -#endif - ndbrequire(!m_commitAckMarkerHash.find(tmp, * markerPtr.p)); -#endif - m_commitAckMarkerHash.add(markerPtr); - regTcPtr->commitAckMarker = markerPtr.i; - } - - regTcPtr->reqinfo = Treqinfo; - regTcPtr->lastReplicaNo = LqhKeyReq::getLastReplicaNo(Treqinfo); - regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo); - regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo); - regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo); - regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo); - UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo); - regTcPtr->apiVersionNo = 0; - regTcPtr->m_use_rowid = LqhKeyReq::getRowidFlag(Treqinfo); - regTcPtr->m_dealloc = 0; - if (unlikely(senderVersion < NDBD_ROWID_VERSION)) - { - regTcPtr->operation = op; - regTcPtr->lockType = LqhKeyReq::getLockType(Treqinfo); - } - else - { - regTcPtr->operation = (Operation_t) op == ZREAD_EX ? ZREAD : (Operation_t) op; - regTcPtr->lockType = - op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; - } - - if (regTcPtr->dirtyOp) - { - ndbrequire(regTcPtr->opSimple); - } - - CRASH_INSERTION2(5041, (op == ZREAD && - (regTcPtr->opSimple || regTcPtr->dirtyOp) && - refToNode(signal->senderBlockRef()) != cownNodeid)); - - regTcPtr->reclenAiLqhkey = TreclenAiLqhkey; - regTcPtr->currReclenAi = TreclenAiLqhkey; - UintR TitcKeyLen = LqhKeyReq::getKeyLen(Treqinfo); - regTcPtr->primKeyLen = TitcKeyLen; - regTcPtr->noFiredTriggers = lqhKeyReq->noFiredTriggers; - - UintR TapplAddressInd = LqhKeyReq::getApplicationAddressFlag(Treqinfo); - UintR nextPos = (TapplAddressInd << 1); - UintR TsameClientAndTcOprec = LqhKeyReq::getSameClientAndTcFlag(Treqinfo); - if (TsameClientAndTcOprec == 1) { - regTcPtr->tcOprec = lqhKeyReq->variableData[nextPos]; - nextPos++; - }//if - UintR TnextReplicasIndicator = regTcPtr->lastReplicaNo - - regTcPtr->seqNoReplica; - if (TnextReplicasIndicator > 1) { - regTcPtr->nodeAfterNext[0] = lqhKeyReq->variableData[nextPos] & 0xFFFF; - regTcPtr->nodeAfterNext[1] = lqhKeyReq->variableData[nextPos] >> 16; - nextPos++; - }//if - UintR TstoredProcIndicator = LqhKeyReq::getStoredProcFlag(TtotReclenAi); - if (TstoredProcIndicator == 1) { - regTcPtr->storedProcId = lqhKeyReq->variableData[nextPos] & ZNIL; - nextPos++; - }//if - UintR TreadLenAiIndicator = LqhKeyReq::getReturnedReadLenAIFlag(Treqinfo); - if (TreadLenAiIndicator == 1) { - regTcPtr->readlenAi = lqhKeyReq->variableData[nextPos] & ZNIL; - nextPos++; - }//if - sig0 = lqhKeyReq->variableData[nextPos + 0]; - sig1 = lqhKeyReq->variableData[nextPos + 1]; - sig2 = lqhKeyReq->variableData[nextPos + 2]; - sig3 = lqhKeyReq->variableData[nextPos + 3]; - - regTcPtr->tupkeyData[0] = sig0; - regTcPtr->tupkeyData[1] = sig1; - regTcPtr->tupkeyData[2] = sig2; - regTcPtr->tupkeyData[3] = sig3; - - if (TitcKeyLen > 0) { - if (TitcKeyLen < 4) { - nextPos += TitcKeyLen; - } else { - nextPos += 4; - }//if - } - else if (! (LqhKeyReq::getNrCopyFlag(Treqinfo))) - { - LQHKEY_error(signal, 3); - return; - }//if - - sig0 = lqhKeyReq->variableData[nextPos + 0]; - sig1 = lqhKeyReq->variableData[nextPos + 1]; - regTcPtr->m_row_id.m_page_no = sig0; - regTcPtr->m_row_id.m_page_idx = sig1; - nextPos += 2 * LqhKeyReq::getRowidFlag(Treqinfo); - - sig2 = lqhKeyReq->variableData[nextPos + 0]; - sig3 = cnewestGci; - regTcPtr->gci = LqhKeyReq::getGCIFlag(Treqinfo) ? sig2 : sig3; - nextPos += LqhKeyReq::getGCIFlag(Treqinfo); - - if (LqhKeyReq::getRowidFlag(Treqinfo)) - { - ndbassert(refToBlock(senderRef) != DBTC); - } - else if(op == ZINSERT) - { - ndbassert(refToBlock(senderRef) == DBTC); - } - - if ((LqhKeyReq::FixedSignalLength + nextPos + TreclenAiLqhkey) != - signal->length()) { - LQHKEY_error(signal, 2); - return; - }//if - UintR TseqNoReplica = regTcPtr->seqNoReplica; - UintR TlastReplicaNo = regTcPtr->lastReplicaNo; - if (TseqNoReplica == TlastReplicaNo) { - jam(); - regTcPtr->nextReplica = ZNIL; - } else { - if (TseqNoReplica < TlastReplicaNo) { - jam(); - regTcPtr->nextSeqNoReplica = TseqNoReplica + 1; - if ((regTcPtr->nextReplica == 0) || - (regTcPtr->nextReplica == cownNodeid)) { - LQHKEY_error(signal, 0); - }//if - } else { - LQHKEY_error(signal, 4); - return; - }//if - }//if - TcConnectionrecPtr localNextTcConnectptr; - Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023; - localNextTcConnectptr.i = ctransidHash[hashIndex]; - ctransidHash[hashIndex] = tcConnectptr.i; - regTcPtr->prevHashRec = RNIL; - regTcPtr->nextHashRec = localNextTcConnectptr.i; - if (localNextTcConnectptr.i != RNIL) { -/* -------------------------------------------------------------------------- */ -/* ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD IF IT EXISTS */ -/* -------------------------------------------------------------------------- */ - ptrCheckGuard(localNextTcConnectptr, - ctcConnectrecFileSize, tcConnectionrec); - jam(); - localNextTcConnectptr.p->prevHashRec = tcConnectptr.i; - }//if - if (tabptr.i >= ctabrecFileSize) { - LQHKEY_error(signal, 5); - return; - }//if - ptrAss(tabptr, tablerec); - if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){ - LQHKEY_abort(signal, 4); - return; - } - if(table_version_major(tabptr.p->schemaVersion) != - table_version_major(schemaVersion)){ - LQHKEY_abort(signal, 5); - return; - } - - regTcPtr->tableref = tabptr.i; - regTcPtr->m_disk_table = tabptr.p->m_disk_table; - if(refToBlock(signal->senderBlockRef()) == RESTORE) - regTcPtr->m_disk_table &= !LqhKeyReq::getNoDiskFlag(Treqinfo); - else if(op == ZREAD || op == ZREAD_EX || op == ZUPDATE) - regTcPtr->m_disk_table &= !LqhKeyReq::getNoDiskFlag(Treqinfo); - - tabptr.p->usageCount++; - - if (!getFragmentrec(signal, regTcPtr->fragmentid)) { - LQHKEY_error(signal, 6); - return; - }//if - - if (LqhKeyReq::getNrCopyFlag(Treqinfo)) - { - ndbassert(refToBlock(senderRef) == DBLQH); - ndbassert(LqhKeyReq::getRowidFlag(Treqinfo)); - if (! (fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION)) - { - ndbout_c("fragptr.p->fragStatus: %d", - fragptr.p->fragStatus); - CRASH_INSERTION(5046); - } - ndbassert(fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION); - fragptr.p->m_copy_started_state = Fragrecord::AC_NR_COPY; - } - - Uint8 TcopyType = fragptr.p->fragCopy; - Uint32 logPart = fragptr.p->m_log_part_ptr_i; - tfragDistKey = fragptr.p->fragDistributionKey; - if (fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION) { - jam(); - regTcPtr->activeCreat = fragptr.p->m_copy_started_state; - CRASH_INSERTION(5002); - CRASH_INSERTION2(5042, tabptr.i == c_error_insert_table_id); - } else { - regTcPtr->activeCreat = Fragrecord::AC_NORMAL; - }//if - regTcPtr->replicaType = TcopyType; - regTcPtr->fragmentptr = fragptr.i; - regTcPtr->m_log_part_ptr_i = logPart; - Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi); - if ((tfragDistKey != TdistKey) && - (regTcPtr->seqNoReplica == 0) && - (regTcPtr->dirtyOp == ZFALSE)) - { - /* ---------------------------------------------------------------------- - * WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION. - * THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER - * VALID TO USE. THIS MUST BE CHECKED. - * ONE IS ADDED TO THE DISTRIBUTION KEY EVERY TIME WE ADD A NEW REPLICA. - * FAILED REPLICAS DO NOT AFFECT THE DISTRIBUTION KEY. THIS MEANS THAT THE - * MAXIMUM DEVIATION CAN BE ONE BETWEEN THOSE TWO VALUES. - * --------------------------------------------------------------------- */ - Int32 tmp = TdistKey - tfragDistKey; - tmp = (tmp < 0 ? - tmp : tmp); - if ((tmp <= 1) || (tfragDistKey == 0)) { - LQHKEY_abort(signal, 0); - return; - }//if - LQHKEY_error(signal, 1); - }//if - if (TreclenAiLqhkey != 0) { - if (regTcPtr->operation != ZREAD) { - if (regTcPtr->operation != ZDELETE) { - if (regTcPtr->opExec != 1) { - jam(); -/*---------------------------------------------------------------------------*/ -/* */ -/* UPDATES, WRITES AND INSERTS THAT ARE NOT INTERPRETED WILL USE THE */ -/* SAME ATTRINFO IN ALL REPLICAS. THUS WE SAVE THE ATTRINFO ALREADY */ -/* TO SAVE A SIGNAL FROM TUP TO LQH. INTERPRETED EXECUTION IN TUP */ -/* WILL CREATE NEW ATTRINFO FOR THE OTHER REPLICAS AND IT IS THUS NOT */ -/* A GOOD IDEA TO SAVE THE INFORMATION HERE. READS WILL ALSO BE */ -/* UNNECESSARY TO SAVE SINCE THAT ATTRINFO WILL NEVER BE SENT TO ANY */ -/* MORE REPLICAS. */ -/*---------------------------------------------------------------------------*/ -/* READS AND DELETES CAN ONLY HAVE INFORMATION ABOUT WHAT IS TO BE READ. */ -/* NO INFORMATION THAT NEEDS LOGGING. */ -/*---------------------------------------------------------------------------*/ - sig0 = lqhKeyReq->variableData[nextPos + 0]; - sig1 = lqhKeyReq->variableData[nextPos + 1]; - sig2 = lqhKeyReq->variableData[nextPos + 2]; - sig3 = lqhKeyReq->variableData[nextPos + 3]; - sig4 = lqhKeyReq->variableData[nextPos + 4]; - - regTcPtr->firstAttrinfo[0] = sig0; - regTcPtr->firstAttrinfo[1] = sig1; - regTcPtr->firstAttrinfo[2] = sig2; - regTcPtr->firstAttrinfo[3] = sig3; - regTcPtr->firstAttrinfo[4] = sig4; - regTcPtr->currTupAiLen = TreclenAiLqhkey; - } else { - jam(); - regTcPtr->reclenAiLqhkey = 0; - }//if - } else { - jam(); - regTcPtr->reclenAiLqhkey = 0; - }//if - }//if - sig0 = lqhKeyReq->variableData[nextPos + 0]; - sig1 = lqhKeyReq->variableData[nextPos + 1]; - sig2 = lqhKeyReq->variableData[nextPos + 2]; - sig3 = lqhKeyReq->variableData[nextPos + 3]; - sig4 = lqhKeyReq->variableData[nextPos + 4]; - - c_tup->receive_attrinfo(signal, regTcPtr->tupConnectrec, - lqhKeyReq->variableData+nextPos, TreclenAiLqhkey); - - if (signal->theData[0] == (UintR)-1) { - LQHKEY_abort(signal, 2); - return; - }//if - }//if -/* ------- TAKE CARE OF PRIM KEY DATA ------- */ - if (regTcPtr->primKeyLen <= 4) { - endgettupkeyLab(signal); - return; - } else { - jam(); -/*--------------------------------------------------------------------*/ -/* KEY LENGTH WAS MORE THAN 4 WORDS (WORD = 4 BYTE). THUS WE */ -/* HAVE TO ALLOCATE A DATA BUFFER TO STORE THE KEY DATA AND */ -/* WAIT FOR THE KEYINFO SIGNAL. */ -/*--------------------------------------------------------------------*/ - regTcPtr->save1 = 4; - regTcPtr->transactionState = TcConnectionrec::WAIT_TUPKEYINFO; - return; - }//if - return; -}//Dblqh::execLQHKEYREQ() - -void Dblqh::endgettupkeyLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->totReclenAi == regTcPtr->currReclenAi) { - ; - } else { - jam(); - ndbrequire(regTcPtr->currReclenAi < regTcPtr->totReclenAi); - regTcPtr->transactionState = TcConnectionrec::WAIT_ATTR; - return; - }//if - -/* ---------------------------------------------------------------------- */ -/* NOW RECEPTION OF LQHKEYREQ IS COMPLETED THE NEXT STEP IS TO START*/ -/* PROCESSING THE MESSAGE. IF THE MESSAGE IS TO A STAND-BY NODE */ -/* WITHOUT NETWORK REDUNDANCY OR PREPARE-TO-COMMIT ACTIVATED THE */ -/* PREPARATION TO SEND TO THE NEXT NODE WILL START IMMEDIATELY. */ -/* */ -/* OTHERWISE THE PROCESSING WILL START AFTER SETTING THE PROPER */ -/* STATE. HOWEVER BEFORE PROCESSING THE MESSAGE */ -/* IT IS NECESSARY TO CHECK THAT THE FRAGMENT IS NOT PERFORMING */ -/* A CHECKPOINT. THE OPERATION SHALL ALSO BE LINKED INTO THE */ -/* FRAGMENT QUEUE OR LIST OF ACTIVE OPERATIONS. */ -/* */ -/* THE FIRST STEP IN PROCESSING THE MESSAGE IS TO CONTACT DBACC. */ -/*------------------------------------------------------------------------*/ - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - case Fragrecord::CRASH_RECOVERING: - case Fragrecord::ACTIVE_CREATION: - prepareContinueAfterBlockedLab(signal); - return; - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - regTcPtr->transactionState = TcConnectionrec::STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - break; - }//switch - return; -}//Dblqh::endgettupkeyLab() - -void Dblqh::prepareContinueAfterBlockedLab(Signal* signal) -{ - UintR ttcScanOp; - -/* -------------------------------------------------------------------------- */ -/* INPUT: TC_CONNECTPTR ACTIVE CONNECTION RECORD */ -/* FRAGPTR FRAGMENT RECORD */ -/* -------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- */ -/* CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */ -/* -------------------------------------------------------------------------- */ -/* ALSO AFTER NORMAL PROCEDURE WE CONTINUE HERE */ -/* -------------------------------------------------------------------------- */ - Uint32 tc_ptr_i = tcConnectptr.i; - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 activeCreat = regTcPtr->activeCreat; - if (regTcPtr->indTakeOver == ZTRUE) { - jam(); - ttcScanOp = KeyInfo20::getScanOp(regTcPtr->tcScanInfo); - scanptr.i = RNIL; - { - ScanRecord key; - key.scanNumber = KeyInfo20::getScanNo(regTcPtr->tcScanInfo); - key.fragPtrI = fragptr.i; - c_scanTakeOverHash.find(scanptr, key); -#ifdef TRACE_SCAN_TAKEOVER - if(scanptr.i == RNIL) - ndbout_c("not finding (%d %d)", key.scanNumber, key.fragPtrI); -#endif - } - if (scanptr.i == RNIL) { - jam(); - takeOverErrorLab(signal); - return; - }//if - Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p, - ttcScanOp, - true); - if (accOpPtr == RNIL) { - jam(); - takeOverErrorLab(signal); - return; - }//if - signal->theData[1] = accOpPtr; - signal->theData[2] = regTcPtr->transid[0]; - signal->theData[3] = regTcPtr->transid[1]; - EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACC_TO_REQ, - signal, 4); - if (signal->theData[0] == (UintR)-1) { - execACC_TO_REF(signal); - return; - }//if - jamEntry(); - }//if -/*-------------------------------------------------------------------*/ -/* IT IS NOW TIME TO CONTACT ACC. THE TUPLE KEY WILL BE SENT */ -/* AND THIS WILL BE TRANSLATED INTO A LOCAL KEY BY USING THE */ -/* LOCAL PART OF THE LH3-ALGORITHM. ALSO PROPER LOCKS ON THE */ -/* TUPLE WILL BE SET. FOR INSERTS AND DELETES THE MESSAGE WILL */ -/* START AN INSERT/DELETE INTO THE HASH TABLE. */ -/* */ -/* BEFORE SENDING THE MESSAGE THE REQUEST INFORMATION IS SET */ -/* PROPERLY. */ -/* ----------------------------------------------------------------- */ - if (TRACENR_FLAG) - { - TRACE_OP(regTcPtr, "RECEIVED"); - switch (regTcPtr->operation) { - case ZREAD: TRACENR("READ"); break; - case ZUPDATE: TRACENR("UPDATE"); break; - case ZWRITE: TRACENR("WRITE"); break; - case ZINSERT: TRACENR("INSERT"); break; - case ZDELETE: TRACENR("DELETE"); break; - default: TRACENR("operation << ">"); break; - } - - TRACENR(" tab: " << regTcPtr->tableref - << " frag: " << regTcPtr->fragmentid - << " activeCreat: " << (Uint32)activeCreat); - if (LqhKeyReq::getNrCopyFlag(regTcPtr->reqinfo)) - TRACENR(" NrCopy"); - if (LqhKeyReq::getRowidFlag(regTcPtr->reqinfo)) - TRACENR(" rowid: " << regTcPtr->m_row_id); - TRACENR(" key: " << regTcPtr->tupkeyData[0]); - } - - if (likely(activeCreat == Fragrecord::AC_NORMAL)) - { - if (TRACENR_FLAG) - TRACENR(endl); - ndbassert(!LqhKeyReq::getNrCopyFlag(regTcPtr->reqinfo)); - exec_acckeyreq(signal, tcConnectptr); - } - else if (activeCreat == Fragrecord::AC_NR_COPY) - { - regTcPtr->totSendlenAi = regTcPtr->totReclenAi; - handle_nr_copy(signal, tcConnectptr); - } - else - { - ndbassert(activeCreat == Fragrecord::AC_IGNORED); - if (TRACENR_FLAG) - TRACENR(" IGNORING (activeCreat == 2)" << endl); - - signal->theData[0] = tc_ptr_i; - regTcPtr->transactionState = TcConnectionrec::WAIT_ACC_ABORT; - - signal->theData[0] = regTcPtr->tupConnectrec; - EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1); - jamEntry(); - - regTcPtr->totSendlenAi = regTcPtr->totReclenAi; - packLqhkeyreqLab(signal); - } -} - -void -Dblqh::exec_acckeyreq(Signal* signal, TcConnectionrecPtr regTcPtr) -{ - Uint32 taccreq; - regTcPtr.p->transactionState = TcConnectionrec::WAIT_ACC; - taccreq = regTcPtr.p->operation; - taccreq = taccreq + (regTcPtr.p->opSimple << 3); - taccreq = taccreq + (regTcPtr.p->lockType << 4); - taccreq = taccreq + (regTcPtr.p->dirtyOp << 6); - taccreq = taccreq + (regTcPtr.p->replicaType << 7); - taccreq = taccreq + (regTcPtr.p->apiVersionNo << 9); -/* ************ */ -/* ACCKEYREQ < */ -/* ************ */ - Uint32 sig0, sig1, sig2, sig3, sig4; - sig0 = regTcPtr.p->accConnectrec; - sig1 = fragptr.p->accFragptr; - sig2 = regTcPtr.p->hashValue; - sig3 = regTcPtr.p->primKeyLen; - sig4 = regTcPtr.p->transid[0]; - signal->theData[0] = sig0; - signal->theData[1] = sig1; - signal->theData[2] = taccreq; - signal->theData[3] = sig2; - signal->theData[4] = sig3; - signal->theData[5] = sig4; - - sig0 = regTcPtr.p->transid[1]; - sig1 = regTcPtr.p->tupkeyData[0]; - sig2 = regTcPtr.p->tupkeyData[1]; - sig3 = regTcPtr.p->tupkeyData[2]; - sig4 = regTcPtr.p->tupkeyData[3]; - signal->theData[6] = sig0; - signal->theData[7] = sig1; - signal->theData[8] = sig2; - signal->theData[9] = sig3; - signal->theData[10] = sig4; - - TRACE_OP(regTcPtr.p, "ACC"); - - if (regTcPtr.p->primKeyLen > 4) { - sendKeyinfoAcc(signal, 11); - }//if - EXECUTE_DIRECT(refToBlock(regTcPtr.p->tcAccBlockref), GSN_ACCKEYREQ, - signal, 7 + regTcPtr.p->primKeyLen); - if (signal->theData[0] < RNIL) { - signal->theData[0] = regTcPtr.i; - execACCKEYCONF(signal); - return; - } else if (signal->theData[0] == RNIL) { - ; - } else { - ndbrequire(signal->theData[0] == (UintR)-1); - signal->theData[0] = regTcPtr.i; - execACCKEYREF(signal); - }//if - return; -}//Dblqh::prepareContinueAfterBlockedLab() - -void -Dblqh::handle_nr_copy(Signal* signal, Ptr regTcPtr) -{ - jam(); - Uint32 fragPtr = fragptr.p->tupFragptr; - Uint32 op = regTcPtr.p->operation; - - const bool copy = LqhKeyReq::getNrCopyFlag(regTcPtr.p->reqinfo); - - if (!LqhKeyReq::getRowidFlag(regTcPtr.p->reqinfo)) - { - /** - * Rowid not set, that mean that primary has finished copying... - */ - jam(); - if (TRACENR_FLAG) - TRACENR(" Waiting for COPY_ACTIVEREQ" << endl); - ndbassert(!LqhKeyReq::getNrCopyFlag(regTcPtr.p->reqinfo)); - regTcPtr.p->activeCreat = Fragrecord::AC_NORMAL; - exec_acckeyreq(signal, regTcPtr); - return; - } - - regTcPtr.p->m_nr_delete.m_cnt = 1; // Wait for real op aswell - Uint32* dst = signal->theData+24; - bool uncommitted; - const int len = c_tup->nr_read_pk(fragPtr, ®TcPtr.p->m_row_id, dst, - uncommitted); - const bool match = (len>0) ? compare_key(regTcPtr.p, dst, len) == 0 : false; - - if (TRACENR_FLAG) - TRACENR(" len: " << len << " match: " << match - << " uncommitted: " << uncommitted); - - if (copy) - { - ndbassert(LqhKeyReq::getGCIFlag(regTcPtr.p->reqinfo)); - if (match) - { - /** - * Case 1 - */ - jam(); - ndbassert(op == ZINSERT); - if (TRACENR_FLAG) - TRACENR(" Changing from INSERT to ZUPDATE" << endl); - regTcPtr.p->operation = ZUPDATE; - goto run; - } - else if (len > 0 && op == ZDELETE) - { - /** - * Case 4 - * Perform delete using rowid - * primKeyLen == 0 - * tupkeyData[0] == rowid - */ - jam(); - ndbassert(regTcPtr.p->primKeyLen == 0); - if (TRACENR_FLAG) - TRACENR(" performing DELETE key: " - << dst[0] << endl); - - nr_copy_delete_row(signal, regTcPtr, ®TcPtr.p->m_row_id, len); - ndbassert(regTcPtr.p->m_nr_delete.m_cnt); - regTcPtr.p->m_nr_delete.m_cnt--; // No real op is run - if (regTcPtr.p->m_nr_delete.m_cnt) - { - jam(); - return; - } - packLqhkeyreqLab(signal); - return; - } - else if (len == 0 && op == ZDELETE) - { - /** - * Case 7 - */ - jam(); - if (TRACENR_FLAG) - TRACENR(" UPDATE_GCI" << endl); - c_tup->nr_update_gci(fragPtr, ®TcPtr.p->m_row_id, regTcPtr.p->gci); - goto update_gci_ignore; - } - - /** - * 1) Delete row at specified rowid (if len > 0) - * 2) Delete specified row at different rowid (if exists) - * 3) Run insert - */ - if (len > 0) - { - /** - * 1) Delete row at specified rowid (if len > 0) - */ - jam(); - nr_copy_delete_row(signal, regTcPtr, ®TcPtr.p->m_row_id, len); - } - /** - * 2) Delete specified row at different rowid (if exists) - */ - jam(); - nr_copy_delete_row(signal, regTcPtr, 0, 0); - if (TRACENR_FLAG) - TRACENR(" RUN INSERT" << endl); - goto run; - } - else - { - if (!match && op != ZINSERT) - { - jam(); - if (TRACENR_FLAG) - TRACENR(" IGNORE " << endl); - goto ignore; - } - if (match) - { - jam(); - if (op != ZDELETE) - { - if (TRACENR_FLAG) - TRACENR(" Changing from to ZWRITE" << endl); - regTcPtr.p->operation = ZWRITE; - } - goto run; - } - - /** - * 1) Delete row at specified rowid (if len > 0) - * 2) Delete specified row at different rowid (if exists) - * 3) Run insert - */ - if (len > 0) - { - /** - * 1) Delete row at specified rowid (if len > 0) - */ - jam(); - nr_copy_delete_row(signal, regTcPtr, ®TcPtr.p->m_row_id, len); - } - - /** - * 2) Delete specified row at different rowid (if exists) - */ - jam(); - nr_copy_delete_row(signal, regTcPtr, 0, 0); - if (TRACENR_FLAG) - TRACENR(" RUN op: " << op << endl); - goto run; - } - -run: - jam(); - exec_acckeyreq(signal, regTcPtr); - return; - -ignore: - jam(); - ndbassert(!LqhKeyReq::getNrCopyFlag(regTcPtr.p->reqinfo)); -update_gci_ignore: - regTcPtr.p->activeCreat = Fragrecord::AC_IGNORED; - signal->theData[0] = regTcPtr.p->tupConnectrec; - EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1); - - packLqhkeyreqLab(signal); -} - -int -Dblqh::compare_key(const TcConnectionrec* regTcPtr, - const Uint32 * ptr, Uint32 len) -{ - if (regTcPtr->primKeyLen != len) - return 1; - - if (len <= 4) - return memcmp(ptr, regTcPtr->tupkeyData, 4*len); - - if (memcmp(ptr, regTcPtr->tupkeyData, sizeof(regTcPtr->tupkeyData))) - return 1; - - len -= (sizeof(regTcPtr->tupkeyData) >> 2); - ptr += (sizeof(regTcPtr->tupkeyData) >> 2); - - DatabufPtr regDatabufptr; - regDatabufptr.i = tcConnectptr.p->firstTupkeybuf; - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - while(len > 4) - { - if (memcmp(ptr, regDatabufptr.p, 4*4)) - return 1; - - ptr += 4; - len -= 4; - regDatabufptr.i = regDatabufptr.p->nextDatabuf; - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - } - - if (memcmp(ptr, regDatabufptr.p, 4*len)) - return 1; - - return 0; -} - -void -Dblqh::nr_copy_delete_row(Signal* signal, - Ptr regTcPtr, - Local_key* rowid, Uint32 len) -{ - Ptr fragPtr = fragptr; - - Uint32 keylen; - Uint32 tableId = regTcPtr.p->tableref; - Uint32 accPtr = regTcPtr.p->accConnectrec; - - signal->theData[0] = accPtr; - signal->theData[1] = fragptr.p->accFragptr; - signal->theData[2] = ZDELETE + (ZDELETE << 4); - signal->theData[5] = regTcPtr.p->transid[0]; - signal->theData[6] = regTcPtr.p->transid[1]; - - if (rowid) - { - jam(); - keylen = 1; - if (g_key_descriptor_pool.getPtr(tableId)->hasCharAttr) - { - signal->theData[3] = calculateHash(tableId, signal->theData+24); - } - else - { - signal->theData[3] = md5_hash((Uint64*)(signal->theData+24), len); - } - signal->theData[4] = 0; // seach by local key - signal->theData[7] = rowid->ref(); - } - else - { - jam(); - keylen = regTcPtr.p->primKeyLen; - signal->theData[3] = regTcPtr.p->hashValue; - signal->theData[4] = keylen; - signal->theData[7] = regTcPtr.p->tupkeyData[0]; - signal->theData[8] = regTcPtr.p->tupkeyData[1]; - signal->theData[9] = regTcPtr.p->tupkeyData[2]; - signal->theData[10] = regTcPtr.p->tupkeyData[3]; - if (keylen > 4) - sendKeyinfoAcc(signal, 11); - } - const Uint32 ref = refToBlock(regTcPtr.p->tcAccBlockref); - EXECUTE_DIRECT(ref, GSN_ACCKEYREQ, signal, 7 + keylen); - jamEntry(); - - Uint32 retValue = signal->theData[0]; - ndbrequire(retValue != RNIL); // This should never block... - ndbrequire(retValue != (Uint32)-1 || rowid == 0); // rowid should never fail - - if (retValue == (Uint32)-1) - { - /** - * Only delete by pk, may fail - */ - jam(); - ndbrequire(rowid == 0); - signal->theData[0] = accPtr; - signal->theData[1] = 0; - EXECUTE_DIRECT(ref, GSN_ACC_ABORTREQ, signal, 2); - jamEntry(); - return; - } - - /** - * We found row (and have it locked in ACC) - */ - ndbrequire(regTcPtr.p->m_dealloc == 0); - Local_key save = regTcPtr.p->m_row_id; - - c_acc->execACCKEY_ORD(signal, accPtr); - signal->theData[0] = accPtr; - EXECUTE_DIRECT(ref, GSN_ACC_COMMITREQ, signal, 1); - jamEntry(); - - ndbrequire(regTcPtr.p->m_dealloc == 1); - int ret = c_tup->nr_delete(signal, regTcPtr.i, - fragPtr.p->tupFragptr, ®TcPtr.p->m_row_id, - regTcPtr.p->gci); - jamEntry(); - - if (ret) - { - ndbassert(ret == 1); - Uint32 pos = regTcPtr.p->m_nr_delete.m_cnt - 1; - memcpy(regTcPtr.p->m_nr_delete.m_disk_ref + pos, - signal->theData, sizeof(Local_key)); - regTcPtr.p->m_nr_delete.m_page_id[pos] = RNIL; - regTcPtr.p->m_nr_delete.m_cnt = pos + 2; - if (0) ndbout << "PENDING DISK DELETE: " << - regTcPtr.p->m_nr_delete.m_disk_ref[pos] << endl; - } - - TRACENR("DELETED: " << regTcPtr.p->m_row_id << endl); - - regTcPtr.p->m_dealloc = 0; - regTcPtr.p->m_row_id = save; - fragptr = fragPtr; - tcConnectptr = regTcPtr; -} - -void -Dblqh::get_nr_op_info(Nr_op_info* op, Uint32 page_id) -{ - Ptr tcPtr; - tcPtr.i = op->m_ptr_i; - ptrCheckGuard(tcPtr, ctcConnectrecFileSize, tcConnectionrec); - - Ptr fragPtr; - c_fragment_pool.getPtr(fragPtr, tcPtr.p->fragmentptr); - - op->m_gci = tcPtr.p->gci; - op->m_tup_frag_ptr_i = fragPtr.p->tupFragptr; - - ndbrequire(tcPtr.p->activeCreat == Fragrecord::AC_NR_COPY); - ndbrequire(tcPtr.p->m_nr_delete.m_cnt); - - - if (page_id == RNIL) - { - // get log buffer callback - for (Uint32 i = 0; i<2; i++) - { - if (tcPtr.p->m_nr_delete.m_page_id[i] != RNIL) - { - op->m_page_id = tcPtr.p->m_nr_delete.m_page_id[i]; - op->m_disk_ref = tcPtr.p->m_nr_delete.m_disk_ref[i]; - return; - } - } - } - else - { - // get page callback - for (Uint32 i = 0; i<2; i++) - { - Local_key key = tcPtr.p->m_nr_delete.m_disk_ref[i]; - if (op->m_disk_ref.m_page_no == key.m_page_no && - op->m_disk_ref.m_file_no == key.m_file_no && - tcPtr.p->m_nr_delete.m_page_id[i] == RNIL) - { - op->m_disk_ref = key; - tcPtr.p->m_nr_delete.m_page_id[i] = page_id; - return; - } - } - } - ndbrequire(false); -} - -void -Dblqh::nr_delete_complete(Signal* signal, Nr_op_info* op) -{ - jamEntry(); - Ptr tcPtr; - tcPtr.i = op->m_ptr_i; - ptrCheckGuard(tcPtr, ctcConnectrecFileSize, tcConnectionrec); - - ndbrequire(tcPtr.p->activeCreat == Fragrecord::AC_NR_COPY); - ndbrequire(tcPtr.p->m_nr_delete.m_cnt); - - tcPtr.p->m_nr_delete.m_cnt--; - if (tcPtr.p->m_nr_delete.m_cnt == 0) - { - jam(); - tcConnectptr = tcPtr; - c_fragment_pool.getPtr(fragptr, tcPtr.p->fragmentptr); - - if (tcPtr.p->abortState != TcConnectionrec::ABORT_IDLE) - { - jam(); - tcPtr.p->activeCreat = Fragrecord::AC_NORMAL; - abortCommonLab(signal); - } - else if (tcPtr.p->operation == ZDELETE && - LqhKeyReq::getNrCopyFlag(tcPtr.p->reqinfo)) - { - /** - * This is run directly in handle_nr_copy - */ - jam(); - packLqhkeyreqLab(signal); - } - else - { - jam(); - rwConcludedLab(signal); - } - return; - } - - if (memcmp(&tcPtr.p->m_nr_delete.m_disk_ref[0], - &op->m_disk_ref, sizeof(Local_key)) == 0) - { - jam(); - ndbassert(tcPtr.p->m_nr_delete.m_page_id[0] != RNIL); - tcPtr.p->m_nr_delete.m_page_id[0] = tcPtr.p->m_nr_delete.m_page_id[1]; - tcPtr.p->m_nr_delete.m_disk_ref[0] = tcPtr.p->m_nr_delete.m_disk_ref[1]; - } -} - -Uint32 -Dblqh::readPrimaryKeys(Uint32 opPtrI, Uint32 * dst, bool xfrm) -{ - TcConnectionrecPtr regTcPtr; - DatabufPtr regDatabufptr; - Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS >> 1]; - - jamEntry(); - regTcPtr.i = opPtrI; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - - Uint32 tableId = regTcPtr.p->tableref; - Uint32 keyLen = regTcPtr.p->primKeyLen; - regDatabufptr.i = regTcPtr.p->firstTupkeybuf; - Uint32 * tmp = xfrm ? (Uint32*)Tmp : dst; - - memcpy(tmp, regTcPtr.p->tupkeyData, sizeof(regTcPtr.p->tupkeyData)); - if (keyLen > 4) - { - tmp += 4; - Uint32 pos = 4; - do { - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - memcpy(tmp, regDatabufptr.p->data, sizeof(regDatabufptr.p->data)); - regDatabufptr.i = regDatabufptr.p->nextDatabuf; - tmp += sizeof(regDatabufptr.p->data) >> 2; - pos += sizeof(regDatabufptr.p->data) >> 2; - } while(pos < keyLen); - } - - if (xfrm) - { - jam(); - Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]; - return xfrm_key(tableId, (Uint32*)Tmp, dst, ~0, keyPartLen); - } - - return keyLen; -} - -/* =*======================================================================= */ -/* ======= SEND KEYINFO TO ACC ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::sendKeyinfoAcc(Signal* signal, Uint32 Ti) -{ - DatabufPtr regDatabufptr; - regDatabufptr.i = tcConnectptr.p->firstTupkeybuf; - - do { - jam(); - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - Uint32 sig0 = regDatabufptr.p->data[0]; - Uint32 sig1 = regDatabufptr.p->data[1]; - Uint32 sig2 = regDatabufptr.p->data[2]; - Uint32 sig3 = regDatabufptr.p->data[3]; - signal->theData[Ti] = sig0; - signal->theData[Ti + 1] = sig1; - signal->theData[Ti + 2] = sig2; - signal->theData[Ti + 3] = sig3; - regDatabufptr.i = regDatabufptr.p->nextDatabuf; - Ti += 4; - } while (regDatabufptr.i != RNIL); -}//Dblqh::sendKeyinfoAcc() - -void Dblqh::execLQH_ALLOCREQ(Signal* signal) -{ - TcConnectionrecPtr regTcPtr; - FragrecordPtr regFragptr; - - jamEntry(); - regTcPtr.i = signal->theData[0]; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - - regFragptr.i = regTcPtr.p->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - - signal->theData[0] = regTcPtr.p->tupConnectrec; - signal->theData[1] = regFragptr.p->tupFragptr; - signal->theData[2] = regTcPtr.p->tableref; - Uint32 tup = refToBlock(regTcPtr.p->tcTupBlockref); - EXECUTE_DIRECT(tup, GSN_TUP_ALLOCREQ, signal, 3); -}//Dblqh::execTUP_ALLOCREQ() - -void Dblqh::execTUP_DEALLOCREQ(Signal* signal) -{ - TcConnectionrecPtr regTcPtr; - - jamEntry(); - regTcPtr.i = signal->theData[4]; - - if (TRACENR_FLAG) - { - Local_key tmp; - tmp.m_page_no = signal->theData[2]; - tmp.m_page_idx = signal->theData[3]; - TRACENR("TUP_DEALLOC: " << tmp << - (signal->theData[5] ? " DIRECT " : " DELAYED") << endl); - } - - if (signal->theData[5]) - { - jam(); - Local_key tmp; - tmp.m_page_no = signal->theData[2]; - tmp.m_page_idx = signal->theData[3]; - EXECUTE_DIRECT(DBTUP, GSN_TUP_DEALLOCREQ, signal, signal->getLength()); - return; - } - else - { - jam(); - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - regTcPtr.p->m_row_id.m_page_no = signal->theData[2]; - regTcPtr.p->m_row_id.m_page_idx = signal->theData[3]; - - ndbrequire(regTcPtr.p->m_dealloc == 0); - regTcPtr.p->m_dealloc = 1; - } -}//Dblqh::execTUP_ALLOCREQ() - -/* ************>> */ -/* ACCKEYCONF > */ -/* ************>> */ -void Dblqh::execACCKEYCONF(Signal* signal) -{ - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - Uint32 tcIndex = signal->theData[0]; - Uint32 localKey1 = signal->theData[3]; - //Uint32 localKey2 = signal->theData[4]; - Uint32 localKeyFlag = signal->theData[5]; - jamEntry(); - tcConnectptr.i = tcIndex; - ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec); - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->transactionState != TcConnectionrec::WAIT_ACC) { - LQHKEY_abort(signal, 3); - return; - }//if - - /* ------------------------------------------------------------------------ - * IT IS NOW TIME TO CONTACT THE TUPLE MANAGER. THE TUPLE MANAGER NEEDS THE - * INFORMATION ON WHICH TABLE AND FRAGMENT, THE LOCAL KEY AND IT NEEDS TO - * KNOW THE TYPE OF OPERATION TO PERFORM. TUP CAN SEND THE ATTRINFO DATA - * EITHER TO THE TC BLOCK OR DIRECTLY TO THE APPLICATION. THE SCHEMA VERSION - * IS NEEDED SINCE TWO SCHEMA VERSIONS CAN BE ACTIVE SIMULTANEOUSLY ON A - * TABLE. - * ----------------------------------------------------------------------- */ - if (regTcPtr->operation == ZWRITE) - { - ndbassert(regTcPtr->seqNoReplica == 0 || - regTcPtr->activeCreat == Fragrecord::AC_NR_COPY); - Uint32 op= signal->theData[1]; - Uint32 requestInfo = regTcPtr->reqinfo; - if(likely(op == ZINSERT || op == ZUPDATE)) - { - jam(); - regTcPtr->operation = op; - } - else - { - jam(); - warningEvent("Convering %d to ZUPDATE", op); - op = regTcPtr->operation = ZUPDATE; - } - if (regTcPtr->seqNoReplica == 0) - { - jam(); - requestInfo &= ~(RI_OPERATION_MASK << RI_OPERATION_SHIFT); - LqhKeyReq::setOperation(requestInfo, op); - regTcPtr->reqinfo = requestInfo; - } - }//if - - /* ------------------------------------------------------------------------ - * IT IS NOW TIME TO CONTACT THE TUPLE MANAGER. THE TUPLE MANAGER NEEDS THE - * INFORMATION ON WHICH TABLE AND FRAGMENT, THE LOCAL KEY AND IT NEEDS TO - * KNOW THE TYPE OF OPERATION TO PERFORM. TUP CAN SEND THE ATTRINFO DATA - * EITHER TO THE TC BLOCK OR DIRECTLY TO THE APPLICATION. THE SCHEMA VERSION - * IS NEEDED SINCE TWO SCHEMA VERSIONS CAN BE ACTIVE SIMULTANEOUSLY ON A - * TABLE. - * ----------------------------------------------------------------------- */ - FragrecordPtr regFragptr; - regFragptr.i = regTcPtr->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - - ndbrequire(localKeyFlag == 1); - if(!regTcPtr->m_disk_table) - acckeyconf_tupkeyreq(signal, regTcPtr, regFragptr.p, localKey1, RNIL); - else - acckeyconf_load_diskpage(signal, tcConnectptr, regFragptr.p, localKey1); -} - -void -Dblqh::acckeyconf_tupkeyreq(Signal* signal, TcConnectionrec* regTcPtr, - Fragrecord* regFragptrP, - Uint32 local_key, - Uint32 disk_page) -{ - Uint32 op = regTcPtr->operation; - regTcPtr->transactionState = TcConnectionrec::WAIT_TUP; - /* ------------------------------------------------------------------------ - * IT IS NOW TIME TO CONTACT THE TUPLE MANAGER. THE TUPLE MANAGER NEEDS THE - * INFORMATION ON WHICH TABLE AND FRAGMENT, THE LOCAL KEY AND IT NEEDS TO - * KNOW THE TYPE OF OPERATION TO PERFORM. TUP CAN SEND THE ATTRINFO DATA - * EITHER TO THE TC BLOCK OR DIRECTLY TO THE APPLICATION. THE SCHEMA VERSION - * IS NEEDED SINCE TWO SCHEMA VERSIONS CAN BE ACTIVE SIMULTANEOUSLY ON A - * TABLE. - * ----------------------------------------------------------------------- */ - Uint32 page_idx = local_key & MAX_TUPLES_PER_PAGE; - Uint32 page_no = local_key >> MAX_TUPLES_BITS; - Uint32 Ttupreq = regTcPtr->dirtyOp; - Ttupreq = Ttupreq + (regTcPtr->opSimple << 1); - Ttupreq = Ttupreq + (op << 6); - Ttupreq = Ttupreq + (regTcPtr->opExec << 10); - Ttupreq = Ttupreq + (regTcPtr->apiVersionNo << 11); - Ttupreq = Ttupreq + (regTcPtr->m_use_rowid << 11); - - /* --------------------------------------------------------------------- - * Clear interpreted mode bit since we do not want the next replica to - * use interpreted mode. The next replica will receive a normal write. - * --------------------------------------------------------------------- */ - regTcPtr->opExec = 0; - /* ************< */ - /* TUPKEYREQ < */ - /* ************< */ - Uint32 sig0, sig1, sig2, sig3; - sig0 = regTcPtr->tupConnectrec; - - TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend(); - tupKeyReq->connectPtr = sig0; - tupKeyReq->request = Ttupreq; - tupKeyReq->keyRef1 = page_no; - tupKeyReq->keyRef2 = page_idx; - - sig0 = regTcPtr->totReclenAi; - sig1 = regTcPtr->applOprec; - sig2 = regTcPtr->applRef; - - tupKeyReq->attrBufLen = sig0; - tupKeyReq->opRef = sig1; - tupKeyReq->applRef = sig2; - - sig0 = regTcPtr->storedProcId; - sig1 = regTcPtr->transid[0]; - sig2 = regTcPtr->transid[1]; - sig3 = regFragptrP->tupFragptr; - Uint32 tup = refToBlock(regTcPtr->tcTupBlockref); - - tupKeyReq->storedProcedure = sig0; - tupKeyReq->transId1 = sig1; - tupKeyReq->transId2 = sig2; - tupKeyReq->fragPtr = sig3; - - sig0 = regTcPtr->m_row_id.m_page_no; - sig1 = regTcPtr->m_row_id.m_page_idx; - - tupKeyReq->primaryReplica = (tcConnectptr.p->seqNoReplica == 0)?true:false; - tupKeyReq->coordinatorTC = tcConnectptr.p->tcBlockref; - tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec; - tupKeyReq->savePointId = tcConnectptr.p->savePointId; - tupKeyReq->disk_page= disk_page; - - tupKeyReq->m_row_id_page_no = sig0; - tupKeyReq->m_row_id_page_idx = sig1; - - TRACE_OP(regTcPtr, "TUPKEYREQ"); - - regTcPtr->m_use_rowid |= (op == ZINSERT); - regTcPtr->m_row_id.m_page_no = page_no; - regTcPtr->m_row_id.m_page_idx = page_idx; - - EXECUTE_DIRECT(tup, GSN_TUPKEYREQ, signal, TupKeyReq::SignalLength); -}//Dblqh::execACCKEYCONF() - -void -Dblqh::acckeyconf_load_diskpage(Signal* signal, TcConnectionrecPtr regTcPtr, - Fragrecord* regFragptrP, Uint32 local_key) -{ - int res; - if((res= c_tup->load_diskpage(signal, - regTcPtr.p->tupConnectrec, - regFragptrP->tupFragptr, - local_key, - regTcPtr.p->operation)) > 0) - { - acckeyconf_tupkeyreq(signal, regTcPtr.p, regFragptrP, local_key, res); - } - else if(res == 0) - { - regTcPtr.p->transactionState = TcConnectionrec::WAIT_TUP; - regTcPtr.p->m_row_id.assref(local_key); - } - else - { - regTcPtr.p->transactionState = TcConnectionrec::WAIT_TUP; - TupKeyRef * ref = (TupKeyRef *)signal->getDataPtr(); - ref->userRef= regTcPtr.i; - ref->errorCode= ~0; - execTUPKEYREF(signal); - } -} - -void -Dblqh::acckeyconf_load_diskpage_callback(Signal* signal, - Uint32 callbackData, - Uint32 disk_page) -{ - jamEntry(); - tcConnectptr.i = callbackData; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec * const regTcPtr = tcConnectptr.p; - - TcConnectionrec::TransactionState state = regTcPtr->transactionState; - if (likely(disk_page > 0 && state == TcConnectionrec::WAIT_TUP)) - { - FragrecordPtr fragPtr; - c_fragment_pool.getPtr(fragPtr, regTcPtr->fragmentptr); - - acckeyconf_tupkeyreq(signal, regTcPtr, fragPtr.p, - regTcPtr->m_row_id.ref(), - disk_page); - } - else if (state != TcConnectionrec::WAIT_TUP) - { - ndbrequire(state == TcConnectionrec::WAIT_TUP_TO_ABORT); - abortCommonLab(signal); - return; - } - else - { - regTcPtr->transactionState = TcConnectionrec::WAIT_TUP; - TupKeyRef * ref = (TupKeyRef *)signal->getDataPtr(); - ref->userRef= callbackData; - ref->errorCode= disk_page; - execTUPKEYREF(signal); - } -} - -/* -------------------------------------------------------------------------- - * ------- ENTER TUP... ------- - * ENTER TUPKEYCONF WITH - * TC_CONNECTPTR, - * TDATA2, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT - * TDATA3, LOCAL KEY REFERENCE 1, ONLY INTERESTING AFTER INSERT - * TDATA4, TOTAL LENGTH OF READ DATA SENT TO TC/APPLICATION - * TDATA5 TOTAL LENGTH OF UPDATE DATA SENT TO/FROM TUP - * GOTO TUPKEY_CONF - * - * TAKE CARE OF RESPONSES FROM TUPLE MANAGER. - * -------------------------------------------------------------------------- */ -void Dblqh::tupkeyConfLab(Signal* signal) -{ -/* ---- GET OPERATION TYPE AND CHECK WHAT KIND OF OPERATION IS REQUESTED --- */ - const TupKeyConf * const tupKeyConf = (TupKeyConf *)&signal->theData[0]; - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 activeCreat = regTcPtr->activeCreat; - Uint32 readLen = tupKeyConf->readLength; - Uint32 writeLen = tupKeyConf->writeLength; - - Uint32 accOp = regTcPtr->accConnectrec; - c_acc->execACCKEY_ORD(signal, accOp); - - TRACE_OP(regTcPtr, "TUPKEYCONF"); - - if (readLen != 0) - { - jam(); - - /* SET BIT 15 IN REQINFO */ - LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1); - regTcPtr->readlenAi = readLen; - }//if - - if (regTcPtr->operation == ZREAD && - (regTcPtr->opSimple || regTcPtr->dirtyOp)) - { - jam(); - /* ---------------------------------------------------------------------- - * THE OPERATION IS A SIMPLE READ. - * WE WILL IMMEDIATELY COMMIT THE OPERATION. - * SINCE WE HAVE NOT RELEASED THE FRAGMENT LOCK - * (FOR LOCAL CHECKPOINTS) YET - * WE CAN GO IMMEDIATELY TO COMMIT_CONTINUE_AFTER_BLOCKED. - * WE HAVE ALREADY SENT THE RESPONSE SO WE ARE NOT INTERESTED IN - * READ LENGTH - * --------------------------------------------------------------------- */ - commitContinueAfterBlockedLab(signal); - return; - }//if - regTcPtr->totSendlenAi = writeLen; - ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen); - - if (unlikely(activeCreat == Fragrecord::AC_NR_COPY)) - { - jam(); - ndbrequire(regTcPtr->m_nr_delete.m_cnt); - regTcPtr->m_nr_delete.m_cnt--; - if (regTcPtr->m_nr_delete.m_cnt) - { - jam(); - /** - * Let operation wait for pending NR operations - * even for before writing log...(as it's simpler) - */ - -#ifdef VM_TRACE - /** - * Only disk table can have pending ops... - */ - TablerecPtr tablePtr; - tablePtr.i = regTcPtr->tableref; - ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec); - ndbrequire(tablePtr.p->m_disk_table); -#endif - - return; - } - } - - rwConcludedLab(signal); - return; -}//Dblqh::tupkeyConfLab() - -/* -------------------------------------------------------------------------- - * THE CODE IS FOUND IN THE SIGNAL RECEPTION PART OF LQH - * -------------------------------------------------------------------------- */ -void Dblqh::rwConcludedLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - /* ------------------------------------------------------------------------ - * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION. - * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND - * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION. - * ------------------------------------------------------------------------ */ - if (regTcPtr->operation == ZREAD) { - jam(); - /* ---------------------------------------------------------------------- - * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL THE - * COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE. - * ---------------------------------------------------------------------- */ - packLqhkeyreqLab(signal); - return; - } else { - FragrecordPtr regFragptr = fragptr; - if (regFragptr.p->logFlag == Fragrecord::STATE_FALSE){ - if (regTcPtr->dirtyOp == ZTRUE) { - jam(); - /* ------------------------------------------------------------------ - * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND - * THAT CAN CAN BE COMMITTED IMMEDIATELY. - * ----------------------------------------------------------------- */ - commitContinueAfterBlockedLab(signal); - return; - } else { - jam(); - /* ------------------------------------------------------------------ - * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING. - * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC. - * ------------------------------------------------------------------ */ - regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN; - packLqhkeyreqLab(signal); - return; - }//if - } else { - jam(); - /* -------------------------------------------------------------------- - * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE - * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST. - * -------------------------------------------------------------------- - * A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE - * PREMATURELY COMMITTED. - * -------------------------------------------------------------------- */ - logLqhkeyreqLab(signal); - return; - }//if - }//if -}//Dblqh::rwConcludedLab() - -void Dblqh::rwConcludedAiLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - fragptr.i = regTcPtr->fragmentptr; - /* ------------------------------------------------------------------------ - * WE HAVE NOW CONCLUDED READING/WRITING IN ACC AND TUP FOR THIS OPERATION. - * IT IS NOW TIME TO LOG THE OPERATION, SEND REQUEST TO NEXT NODE OR TC AND - * FOR SOME TYPES OF OPERATIONS IT IS EVEN TIME TO COMMIT THE OPERATION. - * IN THIS CASE WE HAVE ALREADY RELEASED THE FRAGMENT LOCK. - * ERROR CASES AT FRAGMENT CREATION AND STAND-BY NODES ARE THE REASONS FOR - * COMING HERE. - * ------------------------------------------------------------------------ */ - if (regTcPtr->operation == ZREAD) { - if (regTcPtr->opSimple == 1) { - jam(); - /* -------------------------------------------------------------------- - * THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE - * OPERATION. - * -------------------------------------------------------------------- */ - localCommitLab(signal); - return; - } else { - jam(); - /* -------------------------------------------------------------------- - * A NORMAL READ OPERATION IS NOT LOGGED BUT IS NOT COMMITTED UNTIL - * THE COMMIT SIGNAL ARRIVES. THUS WE CONTINUE PACKING THE RESPONSE. - * -------------------------------------------------------------------- */ - c_fragment_pool.getPtr(fragptr); - packLqhkeyreqLab(signal); - return; - }//if - } else { - jam(); - c_fragment_pool.getPtr(fragptr); - if (fragptr.p->logFlag == Fragrecord::STATE_FALSE) { - if (regTcPtr->dirtyOp == ZTRUE) { - /* ------------------------------------------------------------------ - * THIS OPERATION WAS A WRITE OPERATION THAT DO NOT NEED LOGGING AND - * THAT CAN CAN BE COMMITTED IMMEDIATELY. - * ----------------------------------------------------------------- */ - jam(); - /* ---------------------------------------------------------------- - * IT MUST BE ACTIVE CREATION OF A FRAGMENT. - * ---------------------------------------------------------------- */ - localCommitLab(signal); - return; - } else { - /* ------------------------------------------------------------------ - * A NORMAL WRITE OPERATION ON A FRAGMENT WHICH DO NOT NEED LOGGING. - * WE WILL PACK THE REQUEST/RESPONSE TO THE NEXT NODE/TO TC. - * ------------------------------------------------------------------ */ - jam(); - /* --------------------------------------------------------------- - * IT MUST BE ACTIVE CREATION OF A FRAGMENT. - * NOT A DIRTY OPERATION THUS PACK REQUEST/RESPONSE. - * ---------------------------------------------------------------- */ - regTcPtr->logWriteState = TcConnectionrec::NOT_WRITTEN; - packLqhkeyreqLab(signal); - return; - }//if - } else { - jam(); - /* -------------------------------------------------------------------- - * A DIRTY OPERATION WHICH NEEDS LOGGING. WE START BY LOGGING THE - * REQUEST. IN THIS CASE WE WILL RELEASE THE FRAGMENT LOCK FIRST. - * -------------------------------------------------------------------- */ - /* A NORMAL WRITE OPERATION THAT NEEDS LOGGING AND WILL NOT BE - * PREMATURELY COMMITTED. - * -------------------------------------------------------------------- */ - logLqhkeyreqLab(signal); - return; - }//if - }//if -}//Dblqh::rwConcludedAiLab() - -/* ########################################################################## - * ####### LOG MODULE ####### - * - * ########################################################################## - * -------------------------------------------------------------------------- - * THE LOG MODULE HANDLES THE READING AND WRITING OF THE LOG - * IT IS ALSO RESPONSIBLE FOR HANDLING THE SYSTEM RESTART. - * IT CONTROLS THE SYSTEM RESTART IN TUP AND ACC AS WELL. - * -------------------------------------------------------------------------- */ -void Dblqh::logLqhkeyreqLab(Signal* signal) -{ - UintR tcurrentFilepage; - TcConnectionrecPtr tmpTcConnectptr; - - if (cnoOfLogPages < ZMIN_LOG_PAGES_OPERATION || ERROR_INSERTED(5032)) { - jam(); - if(ERROR_INSERTED(5032)){ - CLEAR_ERROR_INSERT_VALUE; - } -/*---------------------------------------------------------------------------*/ -// The log disk is having problems in catching up with the speed of execution. -// We must wait with writing the log of this operation to ensure we do not -// overload the log. -/*---------------------------------------------------------------------------*/ - terrorCode = ZTEMPORARY_REDO_LOG_FAILURE; - abortErrorLab(signal); - return; - }//if - TcConnectionrec * const regTcPtr = tcConnectptr.p; - logPartPtr.i = regTcPtr->m_log_part_ptr_i; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); -/* -------------------------------------------------- */ -/* THIS PART IS USED TO WRITE THE LOG */ -/* -------------------------------------------------- */ -/* -------------------------------------------------- */ -/* CHECK IF A LOG OPERATION IS ONGOING ALREADY. */ -/* IF SO THEN QUEUE THE OPERATION FOR LATER */ -/* RESTART WHEN THE LOG PART IS FREE AGAIN. */ -/* -------------------------------------------------- */ - LogPartRecord * const regLogPartPtr = logPartPtr.p; - - if(ERROR_INSERTED(5033)){ - jam(); - CLEAR_ERROR_INSERT_VALUE; - - if ((regLogPartPtr->firstLogQueue != RNIL) && - (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) { - /* -------------------------------------------------- */ - /* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */ - /* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/ - /* -------------------------------------------------- */ - /* -------------------------------------------------- */ - /* WE MUST STILL RESTART QUEUED OPERATIONS SO */ - /* THEY ALSO CAN BE ABORTED. */ - /* -------------------------------------------------- */ - regLogPartPtr->LogLqhKeyReqSent = ZTRUE; - signal->theData[0] = ZLOG_LQHKEYREQ; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//if - - terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR; - abortErrorLab(signal); - return; - } - - if (regLogPartPtr->logPartState == LogPartRecord::IDLE) { - ; - } else if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) { - jam(); - linkWaitLog(signal, logPartPtr); - regTcPtr->transactionState = TcConnectionrec::LOG_QUEUED; - return; - } else { - if ((regLogPartPtr->firstLogQueue != RNIL) && - (regLogPartPtr->LogLqhKeyReqSent == ZFALSE)) { -/* -------------------------------------------------- */ -/* WE HAVE A PROBLEM IN THAT THE LOG HAS NO */ -/* ROOM FOR ADDITIONAL OPERATIONS AT THE MOMENT.*/ -/* -------------------------------------------------- */ -/* -------------------------------------------------- */ -/* WE MUST STILL RESTART QUEUED OPERATIONS SO */ -/* THEY ALSO CAN BE ABORTED. */ -/* -------------------------------------------------- */ - regLogPartPtr->LogLqhKeyReqSent = ZTRUE; - signal->theData[0] = ZLOG_LQHKEYREQ; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//if - if (regLogPartPtr->logPartState == LogPartRecord::TAIL_PROBLEM) { - jam(); - terrorCode = ZTAIL_PROBLEM_IN_LOG_ERROR; - } else { - ndbrequire(regLogPartPtr->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM); - jam(); - terrorCode = ZFILE_CHANGE_PROBLEM_IN_LOG_ERROR; - }//if - abortErrorLab(signal); - return; - }//if - regLogPartPtr->logPartState = LogPartRecord::ACTIVE; - logFilePtr.i = regLogPartPtr->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); -/* -------------------------------------------------- */ -/* CHECK IF A NEW MBYTE IS TO BE STARTED. IF */ -/* SO INSERT A NEXT LOG RECORD, WRITE THE LOG */ -/* AND PLACE THE LOG POINTER ON THE NEW POSITION*/ -/* IF A NEW FILE IS TO BE USED, CHANGE FILE AND */ -/* ALSO START OPENING THE NEXT LOG FILE. IF A */ -/* LAP HAS BEEN COMPLETED THEN ADD ONE TO LAP */ -/* COUNTER. */ -/* -------------------------------------------------- */ - checkNewMbyte(signal); -/* -------------------------------------------------- */ -/* INSERT THE OPERATION RECORD LAST IN THE LIST */ -/* OF NOT COMPLETED OPERATIONS. ALSO RECORD THE */ -/* FILE NO, PAGE NO AND PAGE INDEX OF THE START */ -/* OF THIS LOG RECORD. */ -/* IT IS NOT ALLOWED TO INSERT IT INTO THE LIST */ -/* BEFORE CHECKING THE NEW MBYTE SINCE THAT WILL*/ -/* CAUSE THE OLD VALUES OF TC_CONNECTPTR TO BE */ -/* USED IN WRITE_FILE_DESCRIPTOR. */ -/* -------------------------------------------------- */ - Uint32 tcIndex = tcConnectptr.i; - tmpTcConnectptr.i = regLogPartPtr->lastLogTcrec; - regLogPartPtr->lastLogTcrec = tcIndex; - if (tmpTcConnectptr.i == RNIL) { - jam(); - regLogPartPtr->firstLogTcrec = tcIndex; - } else { - ptrCheckGuard(tmpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - tmpTcConnectptr.p->nextLogTcrec = tcIndex; - }//if - Uint32 fileNo = logFilePtr.p->fileNo; - tcurrentFilepage = logFilePtr.p->currentFilepage; - logPagePtr.i = logFilePtr.p->currentLogpage; - regTcPtr->nextLogTcrec = RNIL; - regTcPtr->prevLogTcrec = tmpTcConnectptr.i; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - Uint32 pageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - regTcPtr->logStartFileNo = fileNo; - regTcPtr->logStartPageNo = tcurrentFilepage; - regTcPtr->logStartPageIndex = pageIndex; -/* -------------------------------------------------- */ -/* WRITE THE LOG HEADER OF THIS OPERATION. */ -/* -------------------------------------------------- */ - writeLogHeader(signal); -/* -------------------------------------------------- */ -/* WRITE THE TUPLE KEY OF THIS OPERATION. */ -/* -------------------------------------------------- */ - writeKey(signal); -/* -------------------------------------------------- */ -/* WRITE THE ATTRIBUTE INFO OF THIS OPERATION. */ -/* -------------------------------------------------- */ - writeAttrinfoLab(signal); - - logNextStart(signal); -/* -------------------------------------------------- */ -/* RESET THE STATE OF THE LOG PART. IF ANY */ -/* OPERATIONS HAVE QUEUED THEN START THE FIRST */ -/* OF THESE. */ -/* -------------------------------------------------- */ -/* -------------------------------------------------- */ -/* CONTINUE WITH PACKING OF LQHKEYREQ */ -/* -------------------------------------------------- */ - tcurrentFilepage = logFilePtr.p->currentFilepage; - if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) { - jam(); - tcurrentFilepage--; - }//if - regTcPtr->logStopPageNo = tcurrentFilepage; - regTcPtr->logWriteState = TcConnectionrec::WRITTEN; - if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) { -/* -------------------------------------------------- */ -/* AN ABORT HAVE BEEN ORDERED. THE ABORT WAITED */ -/* FOR THE LOG WRITE TO BE COMPLETED. NOW WE */ -/* CAN PROCEED WITH THE NORMAL ABORT HANDLING. */ -/* -------------------------------------------------- */ - abortCommonLab(signal); - return; - }//if - if (regTcPtr->dirtyOp != ZTRUE) { - packLqhkeyreqLab(signal); - } else { - /* ---------------------------------------------------------------------- - * I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS - * TRANSACTION. SINCE WE RELEASED THE LOG LOCK JUST NOW NO ONE ELSE CAN BE - * ACTIVE IN WRITING THE LOG. WE THUS WRITE THE LOG WITHOUT GETTING A LOCK - * SINCE WE ARE ONLY WRITING A COMMIT LOG RECORD. - * ---------------------------------------------------------------------- */ - writeCommitLog(signal, logPartPtr); - /* ---------------------------------------------------------------------- - * DIRTY OPERATIONS SHOULD COMMIT BEFORE THEY PACK THE REQUEST/RESPONSE. - * ---------------------------------------------------------------------- */ - localCommitLab(signal); - }//if -}//Dblqh::logLqhkeyreqLab() - -/* ------------------------------------------------------------------------- */ -/* ------- SEND LQHKEYREQ */ -/* */ -/* NO STATE CHECKING SINCE THE SIGNAL IS A LOCAL SIGNAL. THE EXECUTION OF */ -/* THE OPERATION IS COMPLETED. IT IS NOW TIME TO SEND THE OPERATION TO THE */ -/* NEXT REPLICA OR TO TC. */ -/* ------------------------------------------------------------------------- */ -void Dblqh::packLqhkeyreqLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->nextReplica == ZNIL) { -/* ------------------------------------------------------------------------- */ -/* ------- SEND LQHKEYCONF ------- */ -/* */ -/* ------------------------------------------------------------------------- */ - sendLqhkeyconfTc(signal, regTcPtr->tcBlockref); - if (! (regTcPtr->dirtyOp || - (regTcPtr->operation == ZREAD && regTcPtr->opSimple))) - { - jam(); - regTcPtr->transactionState = TcConnectionrec::PREPARED; - releaseOprec(signal); - } else { - jam(); - -/*************************************************************>*/ -/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */ -/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/ -/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */ -/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */ -/* SENT AS PART OF A COPY FRAGMENT PROCESS. */ -/* */ -/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */ -/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */ -/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */ -/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */ -/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */ -/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */ -/* THOSE OPERATIONS ARE NOT INTERESTING. */ -/* */ -/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */ -/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */ -/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */ -/* THIS NODE. */ -/*************************************************************>*/ - cleanUp(signal); - }//if - return; - }//if -/* ------------------------------------------------------------------------- */ -/* ------- SEND LQHKEYREQ ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* THERE ARE MORE REPLICAS TO SEND THE OPERATION TO. A NEW LQHKEYREQ WILL BE */ -/* PREPARED FOR THE NEXT REPLICA. */ -/* ------------------------------------------------------------------------- */ -/* CLEAR REPLICA TYPE, ATTRINFO INDICATOR (IN LQHKEYREQ), */ -/* INTERPRETED EXECUTION, SEQUENTIAL NUMBER OF REPLICA. */ -// Set bit indicating Client and TC record not the same. -// Set readlenAi indicator if readlenAi != 0 -// Stored Procedure Indicator not set. -/* ------------------------------------------------------------------------- */ - LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)&signal->theData[0]; - - UintR Treqinfo; - UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6; - Treqinfo = preComputedRequestInfoMask & regTcPtr->reqinfo; - - Uint32 nextNodeId = regTcPtr->nextReplica; - Uint32 nextVersion = getNodeInfo(nextNodeId).m_version; - UintR TAiLen = regTcPtr->reclenAiLqhkey; - - UintR TapplAddressIndicator = (regTcPtr->nextSeqNoReplica == 0 ? 0 : 1); - LqhKeyReq::setApplicationAddressFlag(Treqinfo, TapplAddressIndicator); - LqhKeyReq::setInterpretedFlag(Treqinfo, regTcPtr->opExec); - LqhKeyReq::setSeqNoReplica(Treqinfo, regTcPtr->nextSeqNoReplica); - LqhKeyReq::setAIInLqhKeyReq(Treqinfo, TAiLen); - - if (unlikely(nextVersion < NDBD_ROWID_VERSION)) - { - LqhKeyReq::setLockType(Treqinfo, regTcPtr->lockType); - } - else - { - regTcPtr->m_use_rowid |= - fragptr.p->m_copy_started_state == Fragrecord::AC_NR_COPY; - LqhKeyReq::setRowidFlag(Treqinfo, regTcPtr->m_use_rowid); - } - - if (LqhKeyReq::getRowidFlag(Treqinfo)) - { - //ndbassert(LqhKeyReq::getOperation(Treqinfo) == ZINSERT); - } - else - { - ndbassert(LqhKeyReq::getOperation(Treqinfo) != ZINSERT); - } - - UintR TreadLenAiInd = (regTcPtr->readlenAi == 0 ? 0 : 1); - UintR TsameLqhAndClient = (tcConnectptr.i == - regTcPtr->tcOprec ? 0 : 1); - LqhKeyReq::setSameClientAndTcFlag(Treqinfo, TsameLqhAndClient); - LqhKeyReq::setReturnedReadLenAIFlag(Treqinfo, TreadLenAiInd); - - UintR TotReclenAi = regTcPtr->totSendlenAi; -/* ------------------------------------------------------------------------- */ -/* WE ARE NOW PREPARED TO SEND THE LQHKEYREQ. WE HAVE TO DECIDE IF ATTRINFO */ -/* IS INCLUDED IN THE LQHKEYREQ SIGNAL AND THEN SEND IT. */ -/* TAKE OVER SCAN OPERATION IS NEVER USED ON BACKUPS, LOG RECORDS AND START-UP*/ -/* OF NEW REPLICA AND THUS ONLY TOT_SENDLEN_AI IS USED THE UPPER 16 BITS ARE */ -/* ZERO. */ -/* ------------------------------------------------------------------------- */ - sig0 = tcConnectptr.i; - sig1 = regTcPtr->savePointId; - sig2 = regTcPtr->hashValue; - sig4 = regTcPtr->tcBlockref; - - lqhKeyReq->clientConnectPtr = sig0; - lqhKeyReq->attrLen = TotReclenAi; - lqhKeyReq->savePointId = sig1; - lqhKeyReq->hashValue = sig2; - lqhKeyReq->requestInfo = Treqinfo; - lqhKeyReq->tcBlockref = sig4; - - sig0 = regTcPtr->tableref + ((regTcPtr->schemaVersion << 16) & 0xFFFF0000); - sig1 = regTcPtr->fragmentid + (regTcPtr->nodeAfterNext[0] << 16); - sig2 = regTcPtr->transid[0]; - sig3 = regTcPtr->transid[1]; - sig4 = regTcPtr->applRef; - sig5 = regTcPtr->applOprec; - sig6 = regTcPtr->tcOprec; - UintR nextPos = (TapplAddressIndicator << 1); - - lqhKeyReq->tableSchemaVersion = sig0; - lqhKeyReq->fragmentData = sig1; - lqhKeyReq->transId1 = sig2; - lqhKeyReq->transId2 = sig3; - lqhKeyReq->noFiredTriggers = regTcPtr->noFiredTriggers; - lqhKeyReq->variableData[0] = sig4; - lqhKeyReq->variableData[1] = sig5; - lqhKeyReq->variableData[2] = sig6; - - nextPos += TsameLqhAndClient; - - if ((regTcPtr->lastReplicaNo - regTcPtr->nextSeqNoReplica) > 1) { - sig0 = (UintR)regTcPtr->nodeAfterNext[1] + - (UintR)(regTcPtr->nodeAfterNext[2] << 16); - lqhKeyReq->variableData[nextPos] = sig0; - nextPos++; - }//if - sig0 = regTcPtr->readlenAi; - sig1 = regTcPtr->tupkeyData[0]; - sig2 = regTcPtr->tupkeyData[1]; - sig3 = regTcPtr->tupkeyData[2]; - sig4 = regTcPtr->tupkeyData[3]; - - lqhKeyReq->variableData[nextPos] = sig0; - nextPos += TreadLenAiInd; - lqhKeyReq->variableData[nextPos] = sig1; - lqhKeyReq->variableData[nextPos + 1] = sig2; - lqhKeyReq->variableData[nextPos + 2] = sig3; - lqhKeyReq->variableData[nextPos + 3] = sig4; - UintR TkeyLen = LqhKeyReq::getKeyLen(Treqinfo); - if (TkeyLen < 4) { - nextPos += TkeyLen; - } else { - nextPos += 4; - }//if - - sig0 = regTcPtr->gci; - Local_key tmp = regTcPtr->m_row_id; - - lqhKeyReq->variableData[nextPos + 0] = tmp.m_page_no; - lqhKeyReq->variableData[nextPos + 1] = tmp.m_page_idx; - nextPos += 2*LqhKeyReq::getRowidFlag(Treqinfo); - - lqhKeyReq->variableData[nextPos + 0] = sig0; - nextPos += LqhKeyReq::getGCIFlag(Treqinfo); - - BlockReference lqhRef = calcLqhBlockRef(regTcPtr->nextReplica); - - if (likely(nextPos + TAiLen + LqhKeyReq::FixedSignalLength <= 25)) - { - jam(); - sig0 = regTcPtr->firstAttrinfo[0]; - sig1 = regTcPtr->firstAttrinfo[1]; - sig2 = regTcPtr->firstAttrinfo[2]; - sig3 = regTcPtr->firstAttrinfo[3]; - sig4 = regTcPtr->firstAttrinfo[4]; - - lqhKeyReq->variableData[nextPos] = sig0; - lqhKeyReq->variableData[nextPos + 1] = sig1; - lqhKeyReq->variableData[nextPos + 2] = sig2; - lqhKeyReq->variableData[nextPos + 3] = sig3; - lqhKeyReq->variableData[nextPos + 4] = sig4; - - nextPos += TAiLen; - TAiLen = 0; - } - else - { - Treqinfo &= ~(Uint32)(RI_AI_IN_THIS_MASK << RI_AI_IN_THIS_SHIFT); - lqhKeyReq->requestInfo = Treqinfo; - } - - sendSignal(lqhRef, GSN_LQHKEYREQ, signal, - nextPos + LqhKeyReq::FixedSignalLength, JBB); - if (regTcPtr->primKeyLen > 4) { - jam(); -/* ------------------------------------------------------------------------- */ -/* MORE THAN 4 WORDS OF KEY DATA IS IN THE OPERATION. THEREFORE WE NEED TO */ -/* PREPARE A KEYINFO SIGNAL. MORE THAN ONE KEYINFO SIGNAL CAN BE SENT. */ -/* ------------------------------------------------------------------------- */ - sendTupkey(signal); - }//if -/* ------------------------------------------------------------------------- */ -/* NOW I AM PREPARED TO SEND ALL THE ATTRINFO SIGNALS. AT THE MOMENT A LOOP */ -/* SENDS ALL AT ONCE. LATER WE HAVE TO ADDRESS THE PROBLEM THAT THESE COULD */ -/* LEAD TO BUFFER EXPLOSION => NODE CRASH. */ -/* ------------------------------------------------------------------------- */ -/* NEW CODE TO SEND ATTRINFO IN PACK_LQHKEYREQ */ -/* THIS CODE USES A REAL-TIME BREAK AFTER */ -/* SENDING 16 SIGNALS. */ -/* -------------------------------------------------- */ - sig0 = regTcPtr->tcOprec; - sig1 = regTcPtr->transid[0]; - sig2 = regTcPtr->transid[1]; - signal->theData[0] = sig0; - signal->theData[1] = sig1; - signal->theData[2] = sig2; - - if (unlikely(nextPos + TAiLen + LqhKeyReq::FixedSignalLength > 25)) - { - jam(); - /** - * 4 replicas... - */ - memcpy(signal->theData+3, regTcPtr->firstAttrinfo, TAiLen << 2); - sendSignal(lqhRef, GSN_ATTRINFO, signal, 3 + TAiLen, JBB); - } - - AttrbufPtr regAttrinbufptr; - regAttrinbufptr.i = regTcPtr->firstAttrinbuf; - while (regAttrinbufptr.i != RNIL) { - ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf); - jam(); - Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN]; - ndbrequire(dataLen != 0); - MEMCOPY_NO_WORDS(&signal->theData[3], ®Attrinbufptr.p->attrbuf[0], dataLen); - regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT]; - sendSignal(lqhRef, GSN_ATTRINFO, signal, dataLen + 3, JBB); - }//while - regTcPtr->transactionState = TcConnectionrec::PREPARED; - if (regTcPtr->dirtyOp == ZTRUE) { - jam(); -/*************************************************************>*/ -/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */ -/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/ -/* OTHER ATTRIBUTES WHICH ARE NOT SENSITIVE TO CONSISTE- */ -/* NCY. THE SECOND SITUATION IS BY OPERATIONS THAT ARE */ -/* SENT AS PART OF A COPY FRAGMENT PROCESS. */ -/* */ -/* DURING A COPY FRAGMENT PROCESS THERE IS NO LOGGING */ -/* ONGOING SINCE THE FRAGMENT IS NOT COMPLETE YET. THE */ -/* LOGGING STARTS AFTER COMPLETING THE LAST COPY TUPLE */ -/* OPERATION. THE EXECUTION OF THE LAST COPY TUPLE DOES */ -/* ALSO START A LOCAL CHECKPOINT SO THAT THE FRAGMENT */ -/* REPLICA IS RECOVERABLE. THUS GLOBAL CHECKPOINT ID FOR */ -/* THOSE OPERATIONS ARE NOT INTERESTING. */ -/* */ -/* A DIRTY WRITE IS BY DEFINITION NOT CONSISTENT. THUS */ -/* IT CAN USE ANY GLOBAL CHECKPOINT. THE IDEA HERE IS TO */ -/* ALWAYS USE THE LATEST DEFINED GLOBAL CHECKPOINT ID IN */ -/* THIS NODE. */ -/*************************************************************>*/ - cleanUp(signal); - return; - }//if - /* ------------------------------------------------------------------------ - * ALL INFORMATION NEEDED BY THE COMMIT PHASE AND COMPLETE PHASE IS - * KEPT IN THE TC_CONNECT RECORD. TO ENSURE PROPER USE OF MEMORY - * RESOURCES WE DEALLOCATE THE ATTRINFO RECORD AND KEY RECORDS - * AS SOON AS POSSIBLE. - * ------------------------------------------------------------------------ */ - releaseOprec(signal); -}//Dblqh::packLqhkeyreqLab() - -/* ========================================================================= */ -/* ==== CHECK IF THE LOG RECORD FITS INTO THE CURRENT MBYTE, ======= */ -/* OTHERWISE SWITCH TO NEXT MBYTE. */ -/* */ -/* ========================================================================= */ -void Dblqh::checkNewMbyte(Signal* signal) -{ - UintR tcnmTmp; - UintR ttotalLogSize; - -/* -------------------------------------------------- */ -/* CHECK IF A NEW MBYTE OF LOG RECORD IS TO BE */ -/* OPENED BEFORE WRITING THE LOG RECORD. NO LOG */ -/* RECORDS ARE ALLOWED TO SPAN A MBYTE BOUNDARY */ -/* */ -/* INPUT: TC_CONNECTPTR THE OPERATION */ -/* LOG_FILE_PTR THE LOG FILE */ -/* OUTPUT: LOG_FILE_PTR THE NEW LOG FILE */ -/* -------------------------------------------------- */ - ttotalLogSize = ZLOG_HEAD_SIZE + tcConnectptr.p->currTupAiLen; - ttotalLogSize = ttotalLogSize + tcConnectptr.p->primKeyLen; - tcnmTmp = logFilePtr.p->remainingWordsInMbyte; - if ((ttotalLogSize + ZNEXT_LOG_SIZE) <= tcnmTmp) { - ndbrequire(tcnmTmp >= ttotalLogSize); - logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize; - return; - } else { - jam(); -/* -------------------------------------------------- */ -/* IT WAS NOT ENOUGH SPACE IN THIS MBYTE FOR */ -/* THIS LOG RECORD. MOVE TO NEXT MBYTE */ -/* THIS MIGHT INCLUDE CHANGING LOG FILE */ -/* -------------------------------------------------- */ -/* WE HAVE TO INSERT A NEXT LOG RECORD FIRST */ -/* -------------------------------------------------- */ -/* THEN CONTINUE BY WRITING THE FILE DESCRIPTORS*/ -/* -------------------------------------------------- */ - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - changeMbyte(signal); - tcnmTmp = logFilePtr.p->remainingWordsInMbyte; - }//if - ndbrequire(tcnmTmp >= ttotalLogSize); - logFilePtr.p->remainingWordsInMbyte = tcnmTmp - ttotalLogSize; -}//Dblqh::checkNewMbyte() - -/* -------------------------------------------------------------------------- - * ------- WRITE OPERATION HEADER TO LOG ------- - * - * SUBROUTINE SHORT NAME: WLH - * ------------------------------------------------------------------------- */ -void Dblqh::writeLogHeader(Signal* signal) -{ - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - Uint32 hashValue = tcConnectptr.p->hashValue; - Uint32 operation = tcConnectptr.p->operation; - Uint32 keyLen = tcConnectptr.p->primKeyLen; - Uint32 aiLen = tcConnectptr.p->currTupAiLen; - Local_key rowid = tcConnectptr.p->m_row_id; - Uint32 totLogLen = ZLOG_HEAD_SIZE + aiLen + keyLen; - - if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) { - Uint32* dataPtr = &logPagePtr.p->logPageWord[logPos]; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE; - dataPtr[0] = ZPREP_OP_TYPE; - dataPtr[1] = totLogLen; - dataPtr[2] = hashValue; - dataPtr[3] = operation; - dataPtr[4] = aiLen; - dataPtr[5] = keyLen; - dataPtr[6] = rowid.m_page_no; - dataPtr[7] = rowid.m_page_idx; - } else { - writeLogWord(signal, ZPREP_OP_TYPE); - writeLogWord(signal, totLogLen); - writeLogWord(signal, hashValue); - writeLogWord(signal, operation); - writeLogWord(signal, aiLen); - writeLogWord(signal, keyLen); - writeLogWord(signal, rowid.m_page_no); - writeLogWord(signal, rowid.m_page_idx); - }//if -}//Dblqh::writeLogHeader() - -/* -------------------------------------------------------------------------- - * ------- WRITE TUPLE KEY TO LOG ------- - * - * SUBROUTINE SHORT NAME: WK - * ------------------------------------------------------------------------- */ -void Dblqh::writeKey(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 logPos, endPos, dataLen; - Int32 remainingLen; - logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - remainingLen = regTcPtr->primKeyLen; - dataLen = remainingLen; - if (remainingLen > 4) - dataLen = 4; - remainingLen -= dataLen; - endPos = logPos + dataLen; - if (endPos < ZPAGE_SIZE) { - MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos], - ®TcPtr->tupkeyData[0], - dataLen); - } else { - jam(); - for (Uint32 i = 0; i < dataLen; i++) - writeLogWord(signal, regTcPtr->tupkeyData[i]); - endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - }//if - DatabufPtr regDatabufptr; - regDatabufptr.i = regTcPtr->firstTupkeybuf; - while (remainingLen > 0) { - logPos = endPos; - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - dataLen = remainingLen; - if (remainingLen > 4) - dataLen = 4; - remainingLen -= dataLen; - endPos += dataLen; - if (endPos < ZPAGE_SIZE) { - MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos], - ®Databufptr.p->data[0], - dataLen); - } else { - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos; - for (Uint32 i = 0; i < dataLen; i++) - writeLogWord(signal, regDatabufptr.p->data[i]); - endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - }//if - regDatabufptr.i = regDatabufptr.p->nextDatabuf; - }//while - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos; - ndbrequire(regDatabufptr.i == RNIL); -}//Dblqh::writeKey() - -/* -------------------------------------------------------------------------- - * ------- WRITE ATTRINFO TO LOG ------- - * - * SUBROUTINE SHORT NAME: WA - * ------------------------------------------------------------------------- */ -void Dblqh::writeAttrinfoLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 totLen = regTcPtr->currTupAiLen; - if (totLen == 0) - return; - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - Uint32 lqhLen = regTcPtr->reclenAiLqhkey; - ndbrequire(totLen >= lqhLen); - Uint32 endPos = logPos + lqhLen; - totLen -= lqhLen; - if (endPos < ZPAGE_SIZE) { - MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos], - ®TcPtr->firstAttrinfo[0], - lqhLen); - } else { - for (Uint32 i = 0; i < lqhLen; i++) - writeLogWord(signal, regTcPtr->firstAttrinfo[i]); - endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - }//if - AttrbufPtr regAttrinbufptr; - regAttrinbufptr.i = regTcPtr->firstAttrinbuf; - while (totLen > 0) { - logPos = endPos; - ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf); - Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN]; - ndbrequire(totLen >= dataLen); - ndbrequire(dataLen > 0); - totLen -= dataLen; - endPos += dataLen; - if (endPos < ZPAGE_SIZE) { - MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[logPos], - ®Attrinbufptr.p->attrbuf[0], - dataLen); - } else { - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos; - for (Uint32 i = 0; i < dataLen; i++) - writeLogWord(signal, regAttrinbufptr.p->attrbuf[i]); - endPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - }//if - regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT]; - }//while - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = endPos; - ndbrequire(regAttrinbufptr.i == RNIL); -}//Dblqh::writeAttrinfoLab() - -/* ------------------------------------------------------------------------- */ -/* ------- SEND TUPLE KEY IN KEYINFO SIGNAL(S) ------- */ -/* */ -/* SUBROUTINE SHORT NAME: STU */ -/* ------------------------------------------------------------------------- */ -void Dblqh::sendTupkey(Signal* signal) -{ - UintR TdataPos = 3; - BlockReference lqhRef = calcLqhBlockRef(tcConnectptr.p->nextReplica); - signal->theData[0] = tcConnectptr.p->tcOprec; - signal->theData[1] = tcConnectptr.p->transid[0]; - signal->theData[2] = tcConnectptr.p->transid[1]; - databufptr.i = tcConnectptr.p->firstTupkeybuf; - do { - ptrCheckGuard(databufptr, cdatabufFileSize, databuf); - signal->theData[TdataPos] = databufptr.p->data[0]; - signal->theData[TdataPos + 1] = databufptr.p->data[1]; - signal->theData[TdataPos + 2] = databufptr.p->data[2]; - signal->theData[TdataPos + 3] = databufptr.p->data[3]; - - databufptr.i = databufptr.p->nextDatabuf; - TdataPos += 4; - if (databufptr.i == RNIL) { - jam(); - sendSignal(lqhRef, GSN_KEYINFO, signal, TdataPos, JBB); - return; - } else if (TdataPos == 23) { - jam(); - sendSignal(lqhRef, GSN_KEYINFO, signal, 23, JBB); - TdataPos = 3; - } - } while (1); -}//Dblqh::sendTupkey() - -void Dblqh::cleanUp(Signal* signal) -{ - releaseOprec(signal); - deleteTransidHash(signal); - releaseTcrec(signal, tcConnectptr); -}//Dblqh::cleanUp() - -/* -------------------------------------------------------------------------- - * ---- RELEASE ALL RECORDS CONNECTED TO THE OPERATION RECORD AND THE ---- - * OPERATION RECORD ITSELF - * ------------------------------------------------------------------------- */ -void Dblqh::releaseOprec(Signal* signal) -{ - UintR Tmpbuf; - TcConnectionrec * const regTcPtr = tcConnectptr.p; -/* ---- RELEASE DATA BUFFERS ------------------- */ - DatabufPtr regDatabufptr; - regDatabufptr.i = regTcPtr->firstTupkeybuf; -/* -------------------------------------------------------------------------- - * ------- RELEASE DATA BUFFERS ------- - * - * ------------------------------------------------------------------------- */ - - while (regDatabufptr.i != RNIL) { - jam(); - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - Tmpbuf = regDatabufptr.p->nextDatabuf; - regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf; - cfirstfreeDatabuf = regDatabufptr.i; - regDatabufptr.i = Tmpbuf; - }//while -/* ---- RELEASE ATTRINFO BUFFERS ------------------- */ - AttrbufPtr regAttrinbufptr; - regAttrinbufptr.i = regTcPtr->firstAttrinbuf; - /* ######################################################################## - * ####### RELEASE_ATTRINBUF ####### - * - * ####################################################################### */ - while (regAttrinbufptr.i != RNIL) { - jam(); - regAttrinbufptr.i= release_attrinbuf(regAttrinbufptr.i); - }//while - regTcPtr->firstAttrinbuf = RNIL; - regTcPtr->lastAttrinbuf = RNIL; - regTcPtr->firstTupkeybuf = RNIL; - regTcPtr->lastTupkeybuf = RNIL; - - if (regTcPtr->m_dealloc) - { - jam(); - regTcPtr->m_dealloc = 0; - - if (TRACENR_FLAG) - TRACENR("DELETED: " << regTcPtr->m_row_id << endl); - - TRACE_OP(regTcPtr, "DEALLOC"); - - signal->theData[0] = regTcPtr->fragmentid; - signal->theData[1] = regTcPtr->tableref; - signal->theData[2] = regTcPtr->m_row_id.m_page_no; - signal->theData[3] = regTcPtr->m_row_id.m_page_idx; - signal->theData[4] = RNIL; - EXECUTE_DIRECT(DBTUP, GSN_TUP_DEALLOCREQ, signal, 5); - } -}//Dblqh::releaseOprec() - -/* ------------------------------------------------------------------------- */ -/* ------ DELETE TRANSACTION ID FROM HASH TABLE ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::deleteTransidHash(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - TcConnectionrecPtr prevHashptr; - TcConnectionrecPtr nextHashptr; - - prevHashptr.i = regTcPtr->prevHashRec; - nextHashptr.i = regTcPtr->nextHashRec; - if (prevHashptr.i != RNIL) { - jam(); - ptrCheckGuard(prevHashptr, ctcConnectrecFileSize, tcConnectionrec); - prevHashptr.p->nextHashRec = nextHashptr.i; - } else { - jam(); -/* ------------------------------------------------------------------------- */ -/* THE OPERATION WAS PLACED FIRST IN THE LIST OF THE HASH TABLE. NEED TO SET */ -/* A NEW LEADER OF THE LIST. */ -/* ------------------------------------------------------------------------- */ - Uint32 hashIndex = (regTcPtr->transid[0] ^ regTcPtr->tcOprec) & 1023; - ctransidHash[hashIndex] = nextHashptr.i; - }//if - if (nextHashptr.i != RNIL) { - jam(); - ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec); - nextHashptr.p->prevHashRec = prevHashptr.i; - }//if -}//Dblqh::deleteTransidHash() - -/* ------------------------------------------------------------------------- - * ------- RELEASE OPERATION FROM ACTIVE LIST ON FRAGMENT ------- - * - * SUBROUTINE SHORT NAME = RAF - * ------------------------------------------------------------------------- */ -/* ######################################################################### */ -/* ####### TRANSACTION MODULE ####### */ -/* THIS MODULE HANDLES THE COMMIT AND THE COMPLETE PHASE. */ -/* ######################################################################### */ -void Dblqh::warningReport(Signal* signal, int place) -{ - switch (place) { - case 0: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMMIT in wrong state in Dblqh" << endl; -#endif - break; - case 1: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMMIT with wrong transid in Dblqh" << endl; -#endif - break; - case 2: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMPLETE in wrong state in Dblqh" << endl; -#endif - break; - case 3: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMPLETE with wrong transid in Dblqh" << endl; -#endif - break; - case 4: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMMITREQ in wrong state in Dblqh" << endl; -#endif - break; - case 5: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMMITREQ with wrong transid in Dblqh" << endl; -#endif - break; - case 6: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMPLETEREQ in wrong state in Dblqh" << endl; -#endif - break; - case 7: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMPLETEREQ with wrong transid in Dblqh" << endl; -#endif - break; - case 8: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received ABORT with non-existing transid in Dblqh" << endl; -#endif - break; - case 9: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received ABORTREQ with non-existing transid in Dblqh" << endl; -#endif - break; - case 10: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received ABORTREQ in wrong state in Dblqh" << endl; -#endif - break; - case 11: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMMIT when tc-rec released in Dblqh" << endl; -#endif - break; - case 12: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received COMPLETE when tc-rec released in Dblqh" << endl; -#endif - break; - case 13: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received LQHKEYREF when tc-rec released in Dblqh" << endl; -#endif - break; - case 14: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received LQHKEYREF with wrong transid in Dblqh" << endl; -#endif - break; - case 15: - jam(); -#ifdef ABORT_TRACE - ndbout << "W: Received LQHKEYREF when already aborting in Dblqh" << endl; -#endif - break; - case 16: - jam(); - ndbrequire(cstartPhase == ZNIL); -#ifdef ABORT_TRACE - ndbout << "W: Received LQHKEYREF in wrong state in Dblqh" << endl; -#endif - break; - default: - jam(); - break; - }//switch - return; -}//Dblqh::warningReport() - -void Dblqh::errorReport(Signal* signal, int place) -{ - switch (place) { - case 0: - jam(); - break; - case 1: - jam(); - break; - case 2: - jam(); - break; - case 3: - jam(); - break; - default: - jam(); - break; - }//switch - systemErrorLab(signal, __LINE__); - return; -}//Dblqh::errorReport() - -/* ************************************************************************>> - * COMMIT: Start commit request from TC. This signal is originally sent as a - * packed signal and this function is called from execPACKED_SIGNAL. - * This is the normal commit protocol where TC first send this signal to the - * backup node which then will send COMMIT to the primary node. If - * everything is ok the primary node send COMMITTED back to TC. - * ************************************************************************>> */ -void Dblqh::execCOMMIT(Signal* signal) -{ - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - Uint32 tcIndex = signal->theData[0]; - Uint32 gci = signal->theData[1]; - Uint32 transid1 = signal->theData[2]; - Uint32 transid2 = signal->theData[3]; - jamEntry(); - if (tcIndex >= ttcConnectrecFileSize) { - errorReport(signal, 0); - return; - }//if - if (ERROR_INSERTED(5011)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4); - return; - }//if - if (ERROR_INSERTED(5012)) { - SET_ERROR_INSERT_VALUE(5017); - sendSignalWithDelay(cownref, GSN_COMMIT, signal, 2000, 4); - return; - }//if - tcConnectptr.i = tcIndex; - ptrAss(tcConnectptr, regTcConnectionrec); - if ((tcConnectptr.p->transid[0] == transid1) && - (tcConnectptr.p->transid[1] == transid2)) { - - TcConnectionrec * const regTcPtr = tcConnectptr.p; - TRACE_OP(regTcPtr, "COMMIT"); - - CRASH_INSERTION(5048); - if (ERROR_INSERTED(5049)) - { - SET_ERROR_INSERT_VALUE(5048); - } - - commitReqLab(signal, gci); - return; - }//if - warningReport(signal, 1); - return; -}//Dblqh::execCOMMIT() - -/* ************************************************************************>> - * COMMITREQ: Commit request from TC. This is the commit protocol used if - * one of the nodes is not behaving correctly. TC explicitly sends COMMITREQ - * to both the backup and primary node and gets a COMMITCONF back if the - * COMMIT was ok. - * ************************************************************************>> */ -void Dblqh::execCOMMITREQ(Signal* signal) -{ - jamEntry(); - Uint32 reqPtr = signal->theData[0]; - BlockReference reqBlockref = signal->theData[1]; - Uint32 gci = signal->theData[2]; - Uint32 transid1 = signal->theData[3]; - Uint32 transid2 = signal->theData[4]; - Uint32 tcOprec = signal->theData[6]; - if (ERROR_INSERTED(5004)) { - systemErrorLab(signal, __LINE__); - } - if (ERROR_INSERTED(5017)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMMITREQ, signal, 2000, 7); - return; - }//if - if (findTransaction(transid1, - transid2, - tcOprec) != ZOK) { - warningReport(signal, 5); - return; - }//if - TcConnectionrec * const regTcPtr = tcConnectptr.p; - switch (regTcPtr->transactionState) { - case TcConnectionrec::PREPARED: - case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL: - case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL: - jam(); -/*-------------------------------------------------------*/ -/* THE NORMAL CASE. */ -/*-------------------------------------------------------*/ - regTcPtr->reqBlockref = reqBlockref; - regTcPtr->reqRef = reqPtr; - regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC; - commitReqLab(signal, gci); - return; - break; - case TcConnectionrec::COMMITTED: - jam(); -/*---------------------------------------------------------*/ -/* FOR SOME REASON THE COMMIT PHASE HAVE BEEN */ -/* FINISHED AFTER A TIME OUT. WE NEED ONLY SEND A */ -/* COMMITCONF SIGNAL. */ -/*---------------------------------------------------------*/ - regTcPtr->reqBlockref = reqBlockref; - regTcPtr->reqRef = reqPtr; - regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC; - signal->theData[0] = regTcPtr->reqRef; - signal->theData[1] = cownNodeid; - signal->theData[2] = regTcPtr->transid[0]; - signal->theData[3] = regTcPtr->transid[1]; - sendSignal(regTcPtr->reqBlockref, GSN_COMMITCONF, signal, 4, JBB); - break; - case TcConnectionrec::COMMIT_STOPPED: - case TcConnectionrec::WAIT_TUP_COMMIT: - jam(); - regTcPtr->reqBlockref = reqBlockref; - regTcPtr->reqRef = reqPtr; - regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC; - /*empty*/; - break; - default: - jam(); - warningReport(signal, 4); - return; - break; - }//switch - return; -}//Dblqh::execCOMMITREQ() - -/* ************************************************************************>> - * COMPLETE : Complete the transaction. Sent as a packed signal from TC. - * Works the same way as COMMIT protocol. This is the normal case with both - * primary and backup working (See COMMIT). - * ************************************************************************>> */ -void Dblqh::execCOMPLETE(Signal* signal) -{ - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - Uint32 tcIndex = signal->theData[0]; - Uint32 transid1 = signal->theData[1]; - Uint32 transid2 = signal->theData[2]; - jamEntry(); - if (tcIndex >= ttcConnectrecFileSize) { - errorReport(signal, 1); - return; - }//if - if (ERROR_INSERTED(5042)) { - ndbrequire(false); - } - if (ERROR_INSERTED(5013)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3); - return; - }//if - if (ERROR_INSERTED(5014)) { - SET_ERROR_INSERT_VALUE(5018); - sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3); - return; - }//if - tcConnectptr.i = tcIndex; - ptrAss(tcConnectptr, regTcConnectionrec); - if ((tcConnectptr.p->transactionState == TcConnectionrec::COMMITTED) && - (tcConnectptr.p->transid[0] == transid1) && - (tcConnectptr.p->transid[1] == transid2)) { - - TcConnectionrec * const regTcPtr = tcConnectptr.p; - TRACE_OP(regTcPtr, "COMPLETE"); - - if (tcConnectptr.p->seqNoReplica != 0 && - tcConnectptr.p->activeCreat == Fragrecord::AC_NORMAL) { - jam(); - localCommitLab(signal); - return; - } - else if (tcConnectptr.p->seqNoReplica == 0) - { - jam(); - completeTransLastLab(signal); - return; - } - else - { - jam(); - completeTransNotLastLab(signal); - return; - } - }//if - if (tcConnectptr.p->transactionState != TcConnectionrec::COMMITTED) { - warningReport(signal, 2); - } else { - warningReport(signal, 3); - }//if -}//Dblqh::execCOMPLETE() - -/* ************************************************************************>> - * COMPLETEREQ: Complete request from TC. Same as COMPLETE but used if one - * node is not working ok (See COMMIT). - * ************************************************************************>> */ -void Dblqh::execCOMPLETEREQ(Signal* signal) -{ - jamEntry(); - Uint32 reqPtr = signal->theData[0]; - BlockReference reqBlockref = signal->theData[1]; - Uint32 transid1 = signal->theData[2]; - Uint32 transid2 = signal->theData[3]; - Uint32 tcOprec = signal->theData[5]; - if (ERROR_INSERTED(5005)) { - systemErrorLab(signal, __LINE__); - } - if (ERROR_INSERTED(5018)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMPLETEREQ, signal, 2000, 6); - return; - }//if - if (findTransaction(transid1, - transid2, - tcOprec) != ZOK) { - jam(); -/*---------------------------------------------------------*/ -/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */ -/* A TIME OUT. THE TRANSACTION IS GONE. WE NEED TO */ -/* REPORT COMPLETION ANYWAY. */ -/*---------------------------------------------------------*/ - signal->theData[0] = reqPtr; - signal->theData[1] = cownNodeid; - signal->theData[2] = transid1; - signal->theData[3] = transid2; - sendSignal(reqBlockref, GSN_COMPLETECONF, signal, 4, JBB); - warningReport(signal, 7); - return; - }//if - TcConnectionrec * const regTcPtr = tcConnectptr.p; - switch (regTcPtr->transactionState) { - case TcConnectionrec::COMMITTED: - jam(); - regTcPtr->reqBlockref = reqBlockref; - regTcPtr->reqRef = reqPtr; - regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC; - /*empty*/; - break; -/*---------------------------------------------------------*/ -/* THE NORMAL CASE. */ -/*---------------------------------------------------------*/ - case TcConnectionrec::COMMIT_STOPPED: - case TcConnectionrec::WAIT_TUP_COMMIT: - jam(); -/*---------------------------------------------------------*/ -/* FOR SOME REASON THE COMPLETE PHASE STARTED AFTER */ -/* A TIME OUT. WE HAVE SET THE PROPER VARIABLES SUCH */ -/* THAT A COMPLETECONF WILL BE SENT WHEN COMPLETE IS */ -/* FINISHED. */ -/*---------------------------------------------------------*/ - regTcPtr->reqBlockref = reqBlockref; - regTcPtr->reqRef = reqPtr; - regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC; - return; - break; - default: - jam(); - warningReport(signal, 6); - return; - break; - }//switch - if (regTcPtr->seqNoReplica != 0 && - regTcPtr->activeCreat != Fragrecord::AC_NR_COPY) { - jam(); - localCommitLab(signal); - } - else if (regTcPtr->seqNoReplica == 0) - { - jam(); - completeTransLastLab(signal); - } - else - { - jam(); - completeTransNotLastLab(signal); - } -}//Dblqh::execCOMPLETEREQ() - -/* ************> */ -/* COMPLETED > */ -/* ************> */ -void Dblqh::execLQHKEYCONF(Signal* signal) -{ - LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr(); - Uint32 tcIndex = lqhKeyConf->opPtr; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - jamEntry(); - if (tcIndex >= ttcConnectrecFileSize) { - errorReport(signal, 2); - return; - }//if - tcConnectptr.i = tcIndex; - ptrAss(tcConnectptr, regTcConnectionrec); - switch (tcConnectptr.p->connectState) { - case TcConnectionrec::LOG_CONNECTED: - jam(); - completedLab(signal); - return; - break; - case TcConnectionrec::COPY_CONNECTED: - jam(); - copyCompletedLab(signal); - return; - break; - default: - jam(); - ndbrequire(false); - break; - }//switch - return; -}//Dblqh::execLQHKEYCONF() - -/* ------------------------------------------------------------------------- */ -/* ------- COMMIT PHASE ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::commitReqLab(Signal* signal, Uint32 gci) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState; - TcConnectionrec::TransactionState transState = regTcPtr->transactionState; - regTcPtr->gci = gci; - if (transState == TcConnectionrec::PREPARED) { - if (logWriteState == TcConnectionrec::WRITTEN) { - jam(); - regTcPtr->transactionState = TcConnectionrec::PREPARED_RECEIVED_COMMIT; - TcConnectionrecPtr saveTcPtr = tcConnectptr; - Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref); - signal->theData[0] = regTcPtr->tupConnectrec; - signal->theData[1] = gci; - EXECUTE_DIRECT(blockNo, GSN_TUP_WRITELOG_REQ, signal, 2); - jamEntry(); - if (regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_QUEUED) { - jam(); - return; - }//if - ndbrequire(regTcPtr->transactionState == TcConnectionrec::LOG_COMMIT_WRITTEN); - tcConnectptr = saveTcPtr; - } else if (logWriteState == TcConnectionrec::NOT_STARTED) { - jam(); - } else if (logWriteState == TcConnectionrec::NOT_WRITTEN) { - jam(); -/*---------------------------------------------------------------------------*/ -/* IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG. */ -/*---------------------------------------------------------------------------*/ -/*---------------------------------------------------------------------------*/ -/* THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE. THIS CAN OCCUR */ -/* WHEN WE ARE STARTING A NEW FRAGMENT. */ -/*---------------------------------------------------------------------------*/ - regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED; - } else { - ndbrequire(logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT); - jam(); -/*---------------------------------------------------------------------------*/ -/* THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER A SCAN OF ALL */ -/* OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT. THIS INDICATES THAT WE */ -/* ARE WAITING FOR THIS OPERATION TO COMMIT OR ABORT SO THAT WE CAN FIND THE */ -/* STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT. */ -/*---------------------------------------------------------------------------*/ - checkScanTcCompleted(signal); - }//if - } else if (transState == TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL) { - jam(); - regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED; - return; - } else if (transState == TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL) { - jam(); - } else { - warningReport(signal, 0); - return; - }//if - if (regTcPtr->seqNoReplica == 0 || - regTcPtr->activeCreat == Fragrecord::AC_NR_COPY) { - jam(); - localCommitLab(signal); - return; - }//if - commitReplyLab(signal); - return; -}//Dblqh::commitReqLab() - -void Dblqh::execLQH_WRITELOG_REQ(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Uint32 gci = signal->theData[1]; - Uint32 newestGci = cnewestGci; - TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState; - TcConnectionrec::TransactionState transState = regTcPtr->transactionState; - regTcPtr->gci = gci; - if (gci > newestGci) { - jam(); -/* ------------------------------------------------------------------------- */ -/* KEEP TRACK OF NEWEST GLOBAL CHECKPOINT THAT LQH HAS HEARD OF. */ -/* ------------------------------------------------------------------------- */ - cnewestGci = gci; - }//if - if (logWriteState == TcConnectionrec::WRITTEN) { -/*---------------------------------------------------------------------------*/ -/* I NEED TO INSERT A COMMIT LOG RECORD SINCE WE ARE WRITING LOG IN THIS */ -/* TRANSACTION. */ -/*---------------------------------------------------------------------------*/ - jam(); - LogPartRecordPtr regLogPartPtr; - Uint32 noOfLogPages = cnoOfLogPages; - jam(); - regLogPartPtr.i = regTcPtr->m_log_part_ptr_i; - ptrCheckGuard(regLogPartPtr, clogPartFileSize, logPartRecord); - if ((regLogPartPtr.p->logPartState == LogPartRecord::ACTIVE) || - (noOfLogPages == 0)) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS LOG PART WAS CURRENTLY ACTIVE WRITING ANOTHER LOG RECORD. WE MUST */ -/* WAIT UNTIL THIS PART HAS COMPLETED ITS OPERATION. */ -/*---------------------------------------------------------------------------*/ -// We must delay the write of commit info to the log to safe-guard against -// a crash due to lack of log pages. We temporary stop all log writes to this -// log part to ensure that we don't get a buffer explosion in the delayed -// signal buffer instead. -/*---------------------------------------------------------------------------*/ - linkWaitLog(signal, regLogPartPtr); - if (transState == TcConnectionrec::PREPARED) { - jam(); - regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL; - } else { - jam(); - ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT); - regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_QUEUED; - }//if - if (regLogPartPtr.p->logPartState == LogPartRecord::IDLE) { - jam(); - regLogPartPtr.p->logPartState = LogPartRecord::ACTIVE; - }//if - return; - }//if - writeCommitLog(signal, regLogPartPtr); - if (transState == TcConnectionrec::PREPARED) { - jam(); - regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL; - } else { - jam(); - ndbrequire(transState == TcConnectionrec::PREPARED_RECEIVED_COMMIT); - regTcPtr->transactionState = TcConnectionrec::LOG_COMMIT_WRITTEN; - }//if - }//if -}//Dblqh::execLQH_WRITELOG_REQ() - -void Dblqh::localCommitLab(Signal* signal) -{ - FragrecordPtr regFragptr; - regFragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - Fragrecord::FragStatus status = regFragptr.p->fragStatus; - fragptr = regFragptr; - switch (status) { - case Fragrecord::FSACTIVE: - case Fragrecord::CRASH_RECOVERING: - case Fragrecord::ACTIVE_CREATION: - jam(); - commitContinueAfterBlockedLab(signal); - return; - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::COMMIT_STOPPED; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - break; - }//switch -}//Dblqh::localCommitLab() - -void Dblqh::commitContinueAfterBlockedLab(Signal* signal) -{ -/* ------------------------------------------------------------------------- */ -/*INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/*CONTINUE HERE AFTER BEING BLOCKED FOR A WHILE DURING LOCAL CHECKPOINT. */ -/*The operation is already removed from the active list since there is no */ -/*chance for any real-time breaks before we need to release it. */ -/* ------------------------------------------------------------------------- */ -/*ALSO AFTER NORMAL PROCEDURE WE CONTINUE */ -/*WE MUST COMMIT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN AND SEES A */ -/*DIRTY STATE IN TUP. */ -/* ------------------------------------------------------------------------- */ - Ptr regTcPtr = tcConnectptr; - Ptr regFragptr = fragptr; - Uint32 operation = regTcPtr.p->operation; - Uint32 dirtyOp = regTcPtr.p->dirtyOp; - Uint32 opSimple = regTcPtr.p->opSimple; - if (regTcPtr.p->activeCreat != Fragrecord::AC_IGNORED) { - if (operation != ZREAD) { - TupCommitReq * const tupCommitReq = - (TupCommitReq *)signal->getDataPtrSend(); - Uint32 sig0 = regTcPtr.p->tupConnectrec; - Uint32 tup = refToBlock(regTcPtr.p->tcTupBlockref); - jam(); - tupCommitReq->opPtr = sig0; - tupCommitReq->gci = regTcPtr.p->gci; - tupCommitReq->hashValue = regTcPtr.p->hashValue; - tupCommitReq->diskpage = RNIL; - EXECUTE_DIRECT(tup, GSN_TUP_COMMITREQ, signal, - TupCommitReq::SignalLength); - - if(signal->theData[0] != 0) - { - regTcPtr.p->transactionState = TcConnectionrec::WAIT_TUP_COMMIT; - return; // TUP_COMMIT was timesliced - } - - if (TRACENR_FLAG) - { - TRACENR("COMMIT: "); - switch (regTcPtr.p->operation) { - case ZREAD: TRACENR("READ"); break; - case ZUPDATE: TRACENR("UPDATE"); break; - case ZWRITE: TRACENR("WRITE"); break; - case ZINSERT: TRACENR("INSERT"); break; - case ZDELETE: TRACENR("DELETE"); break; - } - - TRACENR(" tab: " << regTcPtr.p->tableref - << " frag: " << regTcPtr.p->fragmentid - << " activeCreat: " << (Uint32)regTcPtr.p->activeCreat); - if (LqhKeyReq::getNrCopyFlag(regTcPtr.p->reqinfo)) - TRACENR(" NrCopy"); - if (LqhKeyReq::getRowidFlag(regTcPtr.p->reqinfo)) - TRACENR(" rowid: " << regTcPtr.p->m_row_id); - TRACENR(" key: " << regTcPtr.p->tupkeyData[0]); - TRACENR(endl); - } - - TRACE_OP(regTcPtr.p, "ACC_COMMITREQ"); - - Uint32 acc = refToBlock(regTcPtr.p->tcAccBlockref); - signal->theData[0] = regTcPtr.p->accConnectrec; - EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1); - - } else { - if(!dirtyOp){ - TRACE_OP(regTcPtr.p, "ACC_COMMITREQ"); - - Uint32 acc = refToBlock(regTcPtr.p->tcAccBlockref); - signal->theData[0] = regTcPtr.p->accConnectrec; - EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1); - } - - if (dirtyOp) - { - jam(); - /** - * The dirtyRead does not send anything but TRANSID_AI from LDM - */ - fragptr = regFragptr; - tcConnectptr = regTcPtr; - cleanUp(signal); - return; - } - - /** - * The simpleRead will send a LQHKEYCONF - * but have already released the locks - */ - if (opSimple) - { - fragptr = regFragptr; - tcConnectptr = regTcPtr; - packLqhkeyreqLab(signal); - return; - } - } - }//if - jamEntry(); - fragptr = regFragptr; - tcConnectptr = regTcPtr; - tupcommit_conf(signal, regTcPtr.p, regFragptr.p); -} - -void -Dblqh::tupcommit_conf_callback(Signal* signal, Uint32 tcPtrI) -{ - jamEntry(); - - tcConnectptr.i = tcPtrI; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec * tcPtr = tcConnectptr.p; - - ndbrequire(tcPtr->transactionState == TcConnectionrec::WAIT_TUP_COMMIT); - - FragrecordPtr regFragptr; - regFragptr.i = tcPtr->fragmentptr; - c_fragment_pool.getPtr(regFragptr); - fragptr = regFragptr; - - TRACE_OP(tcPtr, "ACC_COMMITREQ"); - - Uint32 acc = refToBlock(tcPtr->tcAccBlockref); - signal->theData[0] = tcPtr->accConnectrec; - EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1); - jamEntry(); - - tcConnectptr.i = tcPtrI; - tcConnectptr.p = tcPtr; - tupcommit_conf(signal, tcPtr, regFragptr.p); -} - -void -Dblqh::tupcommit_conf(Signal* signal, - TcConnectionrec * tcPtrP, - Fragrecord * regFragptr) -{ - Uint32 dirtyOp = tcPtrP->dirtyOp; - Uint32 seqNoReplica = tcPtrP->seqNoReplica; - Uint32 activeCreat = tcPtrP->activeCreat; - if (tcPtrP->gci > regFragptr->newestGci) { - jam(); -/* ------------------------------------------------------------------------- */ -/*IT IS THE FIRST TIME THIS GLOBAL CHECKPOINT IS INVOLVED IN UPDATING THIS */ -/*FRAGMENT. UPDATE THE VARIABLE THAT KEEPS TRACK OF NEWEST GCI IN FRAGMENT */ -/* ------------------------------------------------------------------------- */ - regFragptr->newestGci = tcPtrP->gci; - }//if - if (dirtyOp != ZTRUE) - { - if (seqNoReplica == 0 || activeCreat == Fragrecord::AC_NR_COPY) - { - jam(); - commitReplyLab(signal); - return; - }//if - if (seqNoReplica == 0) - { - jam(); - completeTransLastLab(signal); - } - else - { - jam(); - completeTransNotLastLab(signal); - } - return; - } else { -/* ------------------------------------------------------------------------- */ -/*WE MUST HANDLE DIRTY WRITES IN A SPECIAL WAY. THESE OPERATIONS WILL NOT */ -/*SEND ANY COMMIT OR COMPLETE MESSAGES TO OTHER NODES. THEY WILL MERELY SEND */ -/*THOSE SIGNALS INTERNALLY. */ -/* ------------------------------------------------------------------------- */ - if (tcPtrP->abortState == TcConnectionrec::ABORT_IDLE) - { - jam(); - if (activeCreat == Fragrecord::AC_NR_COPY) - { - jam(); - ndbrequire(LqhKeyReq::getNrCopyFlag(tcPtrP->reqinfo)); - ndbrequire(tcPtrP->m_nr_delete.m_cnt == 0); - } - packLqhkeyreqLab(signal); - } - else - { - ndbrequire(tcPtrP->abortState != TcConnectionrec::NEW_FROM_TC); - jam(); - sendLqhTransconf(signal, LqhTransConf::Committed); - cleanUp(signal); - }//if - }//if -}//Dblqh::commitContinueAfterBlockedLab() - -void Dblqh::commitReplyLab(Signal* signal) -{ -/* -------------------------------------------------------------- */ -/* BACKUP AND STAND-BY REPLICAS ONLY UPDATE THE TRANSACTION STATE */ -/* -------------------------------------------------------------- */ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - TcConnectionrec::AbortState abortState = regTcPtr->abortState; - regTcPtr->transactionState = TcConnectionrec::COMMITTED; - if (abortState == TcConnectionrec::ABORT_IDLE) { - Uint32 clientBlockref = regTcPtr->clientBlockref; - if (regTcPtr->seqNoReplica == 0) { - jam(); - sendCommittedTc(signal, clientBlockref); - return; - } else { - jam(); - sendCommitLqh(signal, clientBlockref); - return; - }//if - } else if (regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC) { - jam(); - signal->theData[0] = regTcPtr->reqRef; - signal->theData[1] = cownNodeid; - signal->theData[2] = regTcPtr->transid[0]; - signal->theData[3] = regTcPtr->transid[1]; - sendSignal(tcConnectptr.p->reqBlockref, GSN_COMMITCONF, signal, 4, JBB); - } else { - ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC); - jam(); - sendLqhTransconf(signal, LqhTransConf::Committed); - }//if - return; -}//Dblqh::commitReplyLab() - -/* ------------------------------------------------------------------------- */ -/* ------- COMPLETE PHASE ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::completeTransNotLastLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) { - Uint32 clientBlockref = regTcPtr->clientBlockref; - jam(); - sendCompleteLqh(signal, clientBlockref); - cleanUp(signal); - return; - } else { - jam(); - completeUnusualLab(signal); - return; - }//if -}//Dblqh::completeTransNotLastLab() - -void Dblqh::completeTransLastLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) { - Uint32 clientBlockref = regTcPtr->clientBlockref; - jam(); -/* ------------------------------------------------------------------------- */ -/*DIRTY WRITES WHICH ARE LAST IN THE CHAIN OF REPLICAS WILL SEND COMPLETED */ -/*INSTEAD OF SENDING PREPARED TO THE TC (OR OTHER INITIATOR OF OPERATION). */ -/* ------------------------------------------------------------------------- */ - sendCompletedTc(signal, clientBlockref); - cleanUp(signal); - return; - } else { - jam(); - completeUnusualLab(signal); - return; - }//if -}//Dblqh::completeTransLastLab() - -void Dblqh::completeUnusualLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) { - jam(); - sendAborted(signal); - } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) { - jam(); - sendLqhTransconf(signal, LqhTransConf::Committed); - } else { - ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC); - jam(); - signal->theData[0] = regTcPtr->reqRef; - signal->theData[1] = cownNodeid; - signal->theData[2] = regTcPtr->transid[0]; - signal->theData[3] = regTcPtr->transid[1]; - sendSignal(regTcPtr->reqBlockref, - GSN_COMPLETECONF, signal, 4, JBB); - }//if - cleanUp(signal); - return; -}//Dblqh::completeUnusualLab() - -/* ========================================================================= */ -/* ======= RELEASE TC CONNECT RECORD ======= */ -/* */ -/* RELEASE A TC CONNECT RECORD TO THE FREELIST. */ -/* ========================================================================= */ -void Dblqh::releaseTcrec(Signal* signal, TcConnectionrecPtr locTcConnectptr) -{ - jam(); - locTcConnectptr.p->tcTimer = 0; - locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED; - locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec; - cfirstfreeTcConrec = locTcConnectptr.i; - - TablerecPtr tabPtr; - tabPtr.i = locTcConnectptr.p->tableref; - if(tabPtr.i == RNIL) - return; - - ptrCheckGuard(tabPtr, ctabrecFileSize, tablerec); - - /** - * Normal case - */ - ndbrequire(tabPtr.p->usageCount > 0); - tabPtr.p->usageCount--; -}//Dblqh::releaseTcrec() - -void Dblqh::releaseTcrecLog(Signal* signal, TcConnectionrecPtr locTcConnectptr) -{ - jam(); - locTcConnectptr.p->tcTimer = 0; - locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED; - locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec; - cfirstfreeTcConrec = locTcConnectptr.i; - - TablerecPtr tabPtr; - tabPtr.i = locTcConnectptr.p->tableref; - if(tabPtr.i == RNIL) - return; - -}//Dblqh::releaseTcrecLog() - -/* ------------------------------------------------------------------------- */ -/* ------- ABORT PHASE ------- */ -/* */ -/*THIS PART IS USED AT ERRORS THAT CAUSE ABORT OF TRANSACTION. */ -/* ------------------------------------------------------------------------- */ -/* ***************************************************>> */ -/* ABORT: Abort transaction in connection. Sender TC. */ -/* This is the normal protocol (See COMMIT) */ -/* ***************************************************>> */ -void Dblqh::execABORT(Signal* signal) -{ - jamEntry(); - Uint32 tcOprec = signal->theData[0]; - BlockReference tcBlockref = signal->theData[1]; - Uint32 transid1 = signal->theData[2]; - Uint32 transid2 = signal->theData[3]; - CRASH_INSERTION(5003); - if (ERROR_INSERTED(5015)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_ABORT, signal, 2000, 4); - return; - }//if - if (findTransaction(transid1, - transid2, - tcOprec) != ZOK) { - jam(); - - if(ERROR_INSERTED(5039) && - refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){ - jam(); - SET_ERROR_INSERT_VALUE(5040); - return; - } - - if(ERROR_INSERTED(5040) && - refToNode(signal->getSendersBlockRef()) != getOwnNodeId()){ - jam(); - SET_ERROR_INSERT_VALUE(5003); - return; - } - -/* ------------------------------------------------------------------------- */ -// SEND ABORTED EVEN IF NOT FOUND. -//THE TRANSACTION MIGHT NEVER HAVE ARRIVED HERE. -/* ------------------------------------------------------------------------- */ - signal->theData[0] = tcOprec; - signal->theData[1] = transid1; - signal->theData[2] = transid2; - signal->theData[3] = cownNodeid; - signal->theData[4] = ZTRUE; - sendSignal(tcBlockref, GSN_ABORTED, signal, 5, JBB); - warningReport(signal, 8); - return; - }//if - - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (ERROR_INSERTED(5100)) - { - SET_ERROR_INSERT_VALUE(5101); - return; - } - CRASH_INSERTION2(5101, regTcPtr->nextReplica != ZNIL); - -/* ------------------------------------------------------------------------- */ -/*A GUIDING DESIGN PRINCIPLE IN HANDLING THESE ERROR SITUATIONS HAVE BEEN */ -/*KEEP IT SIMPLE. THUS WE RATHER INSERT A WAIT AND SET THE ABORT_STATE TO */ -/*ACTIVE RATHER THAN WRITE NEW CODE TO HANDLE EVERY SPECIAL SITUATION. */ -/* ------------------------------------------------------------------------- */ - if (regTcPtr->nextReplica != ZNIL) { -/* ------------------------------------------------------------------------- */ -// We will immediately send the ABORT message also to the next LQH node in line. -/* ------------------------------------------------------------------------- */ - BlockReference TLqhRef = calcLqhBlockRef(regTcPtr->nextReplica); - signal->theData[0] = regTcPtr->tcOprec; - signal->theData[1] = regTcPtr->tcBlockref; - signal->theData[2] = regTcPtr->transid[0]; - signal->theData[3] = regTcPtr->transid[1]; - sendSignal(TLqhRef, GSN_ABORT, signal, 4, JBB); - }//if - regTcPtr->abortState = TcConnectionrec::ABORT_FROM_TC; - - const Uint32 commitAckMarker = regTcPtr->commitAckMarker; - if(commitAckMarker != RNIL) - { - jam(); -#ifdef MARKER_TRACE - { - CommitAckMarkerPtr tmp; - m_commitAckMarkerHash.getPtr(tmp, commitAckMarker); - ndbout_c("Ab2 marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2); - } -#endif - m_commitAckMarkerHash.release(commitAckMarker); - regTcPtr->commitAckMarker = RNIL; - } - - TRACE_OP(regTcPtr, "ABORT"); - - abortStateHandlerLab(signal); - - return; -}//Dblqh::execABORT() - -/* ************************************************************************>> - * ABORTREQ: Same as ABORT but used in case one node isn't working ok. - * (See COMMITREQ) - * ************************************************************************>> */ -void Dblqh::execABORTREQ(Signal* signal) -{ - jamEntry(); - Uint32 reqPtr = signal->theData[0]; - BlockReference reqBlockref = signal->theData[1]; - Uint32 transid1 = signal->theData[2]; - Uint32 transid2 = signal->theData[3]; - Uint32 tcOprec = signal->theData[5]; - if (ERROR_INSERTED(5006)) { - systemErrorLab(signal, __LINE__); - } - if (ERROR_INSERTED(5016)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_ABORTREQ, signal, 2000, 6); - return; - }//if - if (findTransaction(transid1, - transid2, - tcOprec) != ZOK) { - signal->theData[0] = reqPtr; - signal->theData[2] = cownNodeid; - signal->theData[3] = transid1; - signal->theData[4] = transid2; - sendSignal(reqBlockref, GSN_ABORTCONF, signal, 5, JBB); - warningReport(signal, 9); - return; - }//if - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->transactionState != TcConnectionrec::PREPARED) { - warningReport(signal, 10); - return; - }//if - regTcPtr->reqBlockref = reqBlockref; - regTcPtr->reqRef = reqPtr; - regTcPtr->abortState = TcConnectionrec::REQ_FROM_TC; - - abortCommonLab(signal); - return; -}//Dblqh::execABORTREQ() - -/* ************>> */ -/* ACC_TO_REF > */ -/* ************>> */ -void Dblqh::execACC_TO_REF(Signal* signal) -{ - jamEntry(); - terrorCode = signal->theData[1]; - abortErrorLab(signal); - return; -}//Dblqh::execACC_TO_REF() - -/* ************> */ -/* ACCKEYREF > */ -/* ************> */ -void Dblqh::execACCKEYREF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - terrorCode = signal->theData[1]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec * const tcPtr = tcConnectptr.p; - switch (tcPtr->transactionState) { - case TcConnectionrec::WAIT_ACC: - jam(); - break; - case TcConnectionrec::WAIT_ACC_ABORT: - case TcConnectionrec::ABORT_STOPPED: - case TcConnectionrec::ABORT_QUEUED: - jam(); -/* ------------------------------------------------------------------------- */ -/*IGNORE SINCE ABORT OF THIS OPERATION IS ONGOING ALREADY. */ -/* ------------------------------------------------------------------------- */ - return; - break; - default: - ndbrequire(false); - break; - }//switch - const Uint32 errCode = terrorCode; - tcPtr->errorCode = errCode; - - if (TRACENR_FLAG) - { - TRACENR("ACCKEYREF: " << errCode << " "); - switch (tcPtr->operation) { - case ZREAD: TRACENR("READ"); break; - case ZUPDATE: TRACENR("UPDATE"); break; - case ZWRITE: TRACENR("WRITE"); break; - case ZINSERT: TRACENR("INSERT"); break; - case ZDELETE: TRACENR("DELETE"); break; - default: TRACENR("operation << ">"); break; - } - - TRACENR(" tab: " << tcPtr->tableref - << " frag: " << tcPtr->fragmentid - << " activeCreat: " << (Uint32)tcPtr->activeCreat); - if (LqhKeyReq::getNrCopyFlag(tcPtr->reqinfo)) - TRACENR(" NrCopy"); - if (LqhKeyReq::getRowidFlag(tcPtr->reqinfo)) - TRACENR(" rowid: " << tcPtr->m_row_id); - TRACENR(" key: " << tcPtr->tupkeyData[0]); - TRACENR(endl); - - } - - ndbrequire(tcPtr->activeCreat == Fragrecord::AC_NORMAL); - ndbrequire(!LqhKeyReq::getNrCopyFlag(tcPtr->reqinfo)); - - /** - * Only primary replica can get ZTUPLE_ALREADY_EXIST || ZNO_TUPLE_FOUND - * - * Unless it's a simple or dirty read - * - * NOT TRUE! - * 1) op1 - primary insert ok - * 2) op1 - backup insert fail (log full or what ever) - * 3) op1 - delete ok @ primary - * 4) op1 - delete fail @ backup - * - * -> ZNO_TUPLE_FOUND is possible - */ - ndbrequire - (tcPtr->seqNoReplica == 0 || - errCode != ZTUPLE_ALREADY_EXIST || - (tcPtr->operation == ZREAD && (tcPtr->dirtyOp || tcPtr->opSimple))); - - tcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH; - abortCommonLab(signal); - return; -}//Dblqh::execACCKEYREF() - -void Dblqh::localAbortStateHandlerLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->abortState != TcConnectionrec::ABORT_IDLE) { - jam(); - return; - }//if - regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH; - regTcPtr->errorCode = terrorCode; - abortStateHandlerLab(signal); - return; -}//Dblqh::localAbortStateHandlerLab() - -void Dblqh::abortStateHandlerLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - switch (regTcPtr->transactionState) { - case TcConnectionrec::PREPARED: - jam(); -/* ------------------------------------------------------------------------- */ -/*THE OPERATION IS ALREADY PREPARED AND SENT TO THE NEXT LQH OR BACK TO TC. */ -/*WE CAN SIMPLY CONTINUE WITH THE ABORT PROCESS. */ -/*IF IT WAS A CHECK FOR TRANSACTION STATUS THEN WE REPORT THE STATUS TO THE */ -/*NEW TC AND CONTINUE WITH THE NEXT OPERATION IN LQH. */ -/* ------------------------------------------------------------------------- */ - if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) { - jam(); - sendLqhTransconf(signal, LqhTransConf::Prepared); - return; - }//if - break; - case TcConnectionrec::LOG_COMMIT_WRITTEN_WAIT_SIGNAL: - case TcConnectionrec::LOG_COMMIT_QUEUED_WAIT_SIGNAL: - jam(); -/* ------------------------------------------------------------------------- */ -// We can only reach these states for multi-updates on a record in a transaction. -// We know that at least one of those has received the COMMIT signal, thus we -// declare us only prepared since we then receive the expected COMMIT signal. -/* ------------------------------------------------------------------------- */ - ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC); - sendLqhTransconf(signal, LqhTransConf::Prepared); - break; - case TcConnectionrec::WAIT_TUPKEYINFO: - case TcConnectionrec::WAIT_ATTR: - jam(); -/* ------------------------------------------------------------------------- */ -/* WE ARE CURRENTLY WAITING FOR MORE INFORMATION. WE CAN START THE ABORT */ -/* PROCESS IMMEDIATELY. THE KEYINFO AND ATTRINFO SIGNALS WILL BE DROPPED */ -/* SINCE THE ABORT STATE WILL BE SET. */ -/* ------------------------------------------------------------------------- */ - break; - case TcConnectionrec::WAIT_TUP: - jam(); -/* ------------------------------------------------------------------------- */ -// TUP is currently active. We have to wait for the TUPKEYREF or TUPKEYCONF -// to arrive since we might otherwise jeopardise the local checkpoint -// consistency in overload situations. -/* ------------------------------------------------------------------------- */ - regTcPtr->transactionState = TcConnectionrec::WAIT_TUP_TO_ABORT; - return; - case TcConnectionrec::WAIT_ACC: - jam(); -/* ------------------------------------------------------------------------- */ -// We start the abort immediately since the operation is still in the active -// list and the fragment cannot have been frozen yet. By sending LCP_HOLDOPCONF -// as direct signals we avoid the problem that we might find the operation -// in an unexpected list in ACC. -// We cannot accept being blocked before aborting ACC here since that would -// lead to seriously complex issues. -/* ------------------------------------------------------------------------- */ - abortContinueAfterBlockedLab(signal, false); - return; - break; - case TcConnectionrec::LOG_QUEUED: - jam(); -/* ------------------------------------------------------------------------- */ -/*CURRENTLY QUEUED FOR LOGGING. WAIT UNTIL THE LOG RECORD HAVE BEEN INSERTED */ -/*AND THEN CONTINUE THE ABORT PROCESS. */ -//Could also be waiting for an overloaded log disk. In this case it is easy -//to abort when CONTINUEB arrives. -/* ------------------------------------------------------------------------- */ - return; - break; - case TcConnectionrec::STOPPED: - jam(); - /* --------------------------------------------------------------------- - * WE ARE CURRENTLY QUEUED FOR ACCESS TO THE FRAGMENT BY A LCP - * Since nothing has been done, just release operation - * i.e. no prepare log record has been written - * so no abort log records needs to be written - */ - releaseWaitQueue(signal); - continueAfterLogAbortWriteLab(signal); - return; - break; - case TcConnectionrec::WAIT_AI_AFTER_ABORT: - jam(); -/* ------------------------------------------------------------------------- */ -/* ABORT OF ACC AND TUP ALREADY COMPLETED. THIS STATE IS ONLY USED WHEN */ -/* CREATING A NEW FRAGMENT. */ -/* ------------------------------------------------------------------------- */ - continueAbortLab(signal); - return; - break; - case TcConnectionrec::WAIT_TUP_TO_ABORT: - case TcConnectionrec::ABORT_STOPPED: - case TcConnectionrec::LOG_ABORT_QUEUED: - case TcConnectionrec::WAIT_ACC_ABORT: - case TcConnectionrec::ABORT_QUEUED: - jam(); -/* ------------------------------------------------------------------------- */ -/*ABORT IS ALREADY ONGOING DUE TO SOME ERROR. WE HAVE ALREADY SET THE STATE */ -/*OF THE ABORT SO THAT WE KNOW THAT TC EXPECTS A REPORT. WE CAN THUS SIMPLY */ -/*EXIT. */ -/* ------------------------------------------------------------------------- */ - return; - break; - case TcConnectionrec::WAIT_TUP_COMMIT: - case TcConnectionrec::COMMIT_STOPPED: - case TcConnectionrec::LOG_COMMIT_QUEUED: - case TcConnectionrec::COMMIT_QUEUED: - jam(); -/* ------------------------------------------------------------------------- */ -/*THIS IS ONLY AN ALLOWED STATE IF A DIRTY WRITE OR SIMPLE READ IS PERFORMED.*/ -/*IF WE ARE MERELY CHECKING THE TRANSACTION STATE IT IS ALSO AN ALLOWED STATE*/ -/* ------------------------------------------------------------------------- */ - if (regTcPtr->dirtyOp == ZTRUE) { - jam(); -/* ------------------------------------------------------------------------- */ -/*COMPLETE THE DIRTY WRITE AND THEN REPORT COMPLETED BACK TO TC. SINCE IT IS */ -/*A DIRTY WRITE IT IS ALLOWED TO COMMIT EVEN IF THE TRANSACTION ABORTS. */ -/* ------------------------------------------------------------------------- */ - return; - }//if - if (regTcPtr->opSimple) { - jam(); -/* ------------------------------------------------------------------------- */ -/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */ -/*ACC TO CLEAR THE LOCKS. COMPLETE THIS PROCESS AND THEN RETURN AS NORMAL. */ -/*NO DATA HAS CHANGED DUE TO THIS SIMPLE READ ANYWAY. */ -/* ------------------------------------------------------------------------- */ - return; - }//if - ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC); - jam(); -/* ------------------------------------------------------------------------- */ -/*WE ARE ONLY CHECKING THE STATUS OF THE TRANSACTION. IT IS COMMITTING. */ -/*COMPLETE THE COMMIT LOCALLY AND THEN SEND REPORT OF COMMITTED TO THE NEW TC*/ -/* ------------------------------------------------------------------------- */ - return; - break; - case TcConnectionrec::COMMITTED: - jam(); - ndbrequire(regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC); -/* ------------------------------------------------------------------------- */ -/*WE ARE CHECKING TRANSACTION STATUS. REPORT COMMITTED AND CONTINUE WITH THE */ -/*NEXT OPERATION. */ -/* ------------------------------------------------------------------------- */ - sendLqhTransconf(signal, LqhTransConf::Committed); - return; - break; - default: - ndbrequire(false); -/* ------------------------------------------------------------------------- */ -/*THE STATE WAS NOT AN ALLOWED STATE ON A NORMAL OPERATION. SCANS AND COPY */ -/*FRAGMENT OPERATIONS SHOULD HAVE EXECUTED IN ANOTHER PATH. */ -/* ------------------------------------------------------------------------- */ - break; - }//switch - abortCommonLab(signal); - return; -}//Dblqh::abortStateHandlerLab() - -void Dblqh::abortErrorLab(Signal* signal) -{ - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->abortState == TcConnectionrec::ABORT_IDLE) { - jam(); - regTcPtr->abortState = TcConnectionrec::ABORT_FROM_LQH; - regTcPtr->errorCode = terrorCode; - }//if - abortCommonLab(signal); - return; -}//Dblqh::abortErrorLab() - -void Dblqh::abortCommonLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - const Uint32 commitAckMarker = regTcPtr->commitAckMarker; - const Uint32 activeCreat = regTcPtr->activeCreat; - if (commitAckMarker != RNIL) - { - /** - * There is no NR ongoing and we have a marker - */ - jam(); -#ifdef MARKER_TRACE - { - CommitAckMarkerPtr tmp; - m_commitAckMarkerHash.getPtr(tmp, commitAckMarker); - ndbout_c("Abo marker[%.8x %.8x]", tmp.p->transid1, tmp.p->transid2); - } -#endif - m_commitAckMarkerHash.release(commitAckMarker); - regTcPtr->commitAckMarker = RNIL; - } - - if (unlikely(activeCreat == Fragrecord::AC_NR_COPY)) - { - jam(); - if (regTcPtr->m_nr_delete.m_cnt) - { - jam(); - /** - * Let operation wait for pending NR operations - */ - -#ifdef VM_TRACE - /** - * Only disk table can have pending ops... - */ - TablerecPtr tablePtr; - tablePtr.i = regTcPtr->tableref; - ptrCheckGuard(tablePtr, ctabrecFileSize, tablerec); - ndbrequire(tablePtr.p->m_disk_table); -#endif - return; - } - } - - fragptr.i = regTcPtr->fragmentptr; - if (fragptr.i != RNIL) { - jam(); - c_fragment_pool.getPtr(fragptr); - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - case Fragrecord::CRASH_RECOVERING: - case Fragrecord::ACTIVE_CREATION: - jam(); - abortContinueAfterBlockedLab(signal, true); - return; - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - regTcPtr->transactionState = TcConnectionrec::ABORT_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - break; - }//switch - } else { - jam(); - continueAbortLab(signal); - }//if -}//Dblqh::abortCommonLab() - -void Dblqh::abortContinueAfterBlockedLab(Signal* signal, bool canBlock) -{ - /* ------------------------------------------------------------------------ - * INPUT: TC_CONNECTPTR ACTIVE OPERATION RECORD - * ------------------------------------------------------------------------ - * ------------------------------------------------------------------------ - * CAN COME HERE AS RESTART AFTER BEING BLOCKED BY A LOCAL CHECKPOINT. - * ------------------------------------------------------------------------ - * ALSO AS PART OF A NORMAL ABORT WITHOUT BLOCKING. - * WE MUST ABORT TUP BEFORE ACC TO ENSURE THAT NO ONE RACES IN - * AND SEES A STATE IN TUP. - * ----------------------------------------------------------------------- */ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - - TRACE_OP(regTcPtr, "ACC ABORT"); - - regTcPtr->transactionState = TcConnectionrec::WAIT_ACC_ABORT; - signal->theData[0] = regTcPtr->accConnectrec; - signal->theData[1] = 2; // JOB BUFFER IF NEEDED - EXECUTE_DIRECT(DBACC, GSN_ACC_ABORTREQ, signal, 2); - - if (signal->theData[1] == RNIL) - { - jam(); - /* ------------------------------------------------------------------------ - * We need to insert a real-time break by sending ACC_ABORTCONF through the - * job buffer to ensure that we catch any ACCKEYCONF or TUPKEYCONF or - * TUPKEYREF that are in the job buffer but not yet processed. Doing - * everything without that would race and create a state error when they - * are executed. - * --------------------------------------------------------------------- */ - return; - } - - execACC_ABORTCONF(signal); - return; -}//Dblqh::abortContinueAfterBlockedLab() - -/* ******************>> */ -/* ACC_ABORTCONF > */ -/* ******************>> */ -void Dblqh::execACC_ABORTCONF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - TcConnectionrec * const regTcPtr = tcConnectptr.p; - ndbrequire(regTcPtr->transactionState == TcConnectionrec::WAIT_ACC_ABORT); - - TRACE_OP(regTcPtr, "ACC_ABORTCONF"); - signal->theData[0] = regTcPtr->tupConnectrec; - EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1); - - jamEntry(); - continueAbortLab(signal); - return; -}//Dblqh::execACC_ABORTCONF() - -void Dblqh::continueAbortLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - /* ------------------------------------------------------------------------ - * AN ERROR OCCURED IN THE ACTIVE CREATION AFTER THE ABORT PHASE. - * WE NEED TO CONTINUE WITH A NORMAL ABORT. - * ------------------------------------------------------------------------ - * ALSO USED FOR NORMAL CLEAN UP AFTER A NORMAL ABORT. - * ------------------------------------------------------------------------ - * ALSO USED WHEN NO FRAGMENT WAS SET UP ON OPERATION. - * ------------------------------------------------------------------------ */ - if (regTcPtr->logWriteState == TcConnectionrec::WRITTEN) { - jam(); - /* ---------------------------------------------------------------------- - * I NEED TO INSERT A ABORT LOG RECORD SINCE WE ARE WRITING LOG IN THIS - * TRANSACTION. - * ---------------------------------------------------------------------- */ - initLogPointers(signal); - if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) { - jam(); - /* -------------------------------------------------------------------- - * A PREPARE OPERATION IS CURRENTLY WRITING IN THE LOG. - * WE MUST WAIT ON OUR TURN TO WRITE THE LOG. - * IT IS NECESSARY TO WRITE ONE LOG RECORD COMPLETELY - * AT A TIME OTHERWISE WE WILL SCRAMBLE THE LOG. - * -------------------------------------------------------------------- */ - linkWaitLog(signal, logPartPtr); - regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED; - return; - }//if - if (cnoOfLogPages == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -// We must delay the write of commit info to the log to safe-guard against -// a crash due to lack of log pages. We temporary stop all log writes to this -// log part to ensure that we don't get a buffer explosion in the delayed -// signal buffer instead. -/*---------------------------------------------------------------------------*/ - linkWaitLog(signal, logPartPtr); - regTcPtr->transactionState = TcConnectionrec::LOG_ABORT_QUEUED; - if (logPartPtr.p->logPartState == LogPartRecord::IDLE) { - jam(); - logPartPtr.p->logPartState = LogPartRecord::ACTIVE; - }//if - return; - }//if - writeAbortLog(signal); - removeLogTcrec(signal); - } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_STARTED) { - jam(); - } else if (regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN) { - jam(); - /* ------------------------------------------------------------------ - * IT IS A READ OPERATION OR OTHER OPERATION THAT DO NOT USE THE LOG. - * ------------------------------------------------------------------ */ - /* ------------------------------------------------------------------ - * THE LOG HAS NOT BEEN WRITTEN SINCE THE LOG FLAG WAS FALSE. - * THIS CAN OCCUR WHEN WE ARE STARTING A NEW FRAGMENT. - * ------------------------------------------------------------------ */ - regTcPtr->logWriteState = TcConnectionrec::NOT_STARTED; - } else { - ndbrequire(regTcPtr->logWriteState == TcConnectionrec::NOT_WRITTEN_WAIT); - jam(); - /* ---------------------------------------------------------------- - * THE STATE WAS SET TO NOT_WRITTEN BY THE OPERATION BUT LATER - * A SCAN OF ALL OPERATION RECORD CHANGED IT INTO NOT_WRITTEN_WAIT. - * THIS INDICATES THAT WE ARE WAITING FOR THIS OPERATION TO COMMIT - * OR ABORT SO THAT WE CAN FIND THE - * STARTING GLOBAL CHECKPOINT OF THIS NEW FRAGMENT. - * ---------------------------------------------------------------- */ - checkScanTcCompleted(signal); - }//if - continueAfterLogAbortWriteLab(signal); - return; -}//Dblqh::continueAbortLab() - -void Dblqh::continueAfterLogAbortWriteLab(Signal* signal) -{ - TcConnectionrec * const regTcPtr = tcConnectptr.p; - if (regTcPtr->operation == ZREAD && regTcPtr->dirtyOp) - { - jam(); - TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend(); - - tcKeyRef->connectPtr = regTcPtr->applOprec; - tcKeyRef->transId[0] = regTcPtr->transid[0]; - tcKeyRef->transId[1] = regTcPtr->transid[1]; - tcKeyRef->errorCode = regTcPtr->errorCode; - sendTCKEYREF(signal, regTcPtr->applRef, regTcPtr->clientBlockref, 0); - cleanUp(signal); - return; - }//if - if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_LQH) { - LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtrSend(); - - jam(); - lqhKeyRef->userRef = regTcPtr->clientConnectrec; - lqhKeyRef->connectPtr = regTcPtr->tcOprec; - lqhKeyRef->errorCode = regTcPtr->errorCode; - lqhKeyRef->transId1 = regTcPtr->transid[0]; - lqhKeyRef->transId2 = regTcPtr->transid[1]; - sendSignal(regTcPtr->clientBlockref, GSN_LQHKEYREF, signal, - LqhKeyRef::SignalLength, JBB); - } else if (regTcPtr->abortState == TcConnectionrec::ABORT_FROM_TC) { - jam(); - sendAborted(signal); - } else if (regTcPtr->abortState == TcConnectionrec::NEW_FROM_TC) { - jam(); - sendLqhTransconf(signal, LqhTransConf::Aborted); - } else { - ndbrequire(regTcPtr->abortState == TcConnectionrec::REQ_FROM_TC); - jam(); - signal->theData[0] = regTcPtr->reqRef; - signal->theData[1] = tcConnectptr.i; - signal->theData[2] = cownNodeid; - signal->theData[3] = regTcPtr->transid[0]; - signal->theData[4] = regTcPtr->transid[1]; - sendSignal(regTcPtr->reqBlockref, GSN_ABORTCONF, - signal, 5, JBB); - }//if - cleanUp(signal); -}//Dblqh::continueAfterLogAbortWriteLab() - -void -Dblqh::sendTCKEYREF(Signal* signal, Uint32 ref, Uint32 routeRef, Uint32 cnt) -{ - const Uint32 nodeId = refToNode(ref); - const bool connectedToNode = getNodeInfo(nodeId).m_connected; - - if (likely(connectedToNode)) - { - jam(); - sendSignal(ref, GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB); - } - else - { - if (routeRef && - getNodeInfo(refToNode(routeRef)).m_version >= MAKE_VERSION(5,1,14)) - { - jam(); - memmove(signal->theData+25, signal->theData, 4*TcKeyRef::SignalLength); - RouteOrd* ord = (RouteOrd*)signal->getDataPtrSend(); - ord->dstRef = ref; - ord->srcRef = reference(); - ord->gsn = GSN_TCKEYREF; - ord->cnt = 0; - LinearSectionPtr ptr[3]; - ptr[0].p = signal->theData+25; - ptr[0].sz = TcKeyRef::SignalLength; - sendSignal(routeRef, GSN_ROUTE_ORD, signal, RouteOrd::SignalLength, JBB, - ptr, 1); - } - else - { - jam(); - memmove(signal->theData + 3, signal->theData, 4*TcKeyRef::SignalLength); - signal->theData[0] = ZRETRY_TCKEYREF; - signal->theData[1] = cnt + 1; - signal->theData[2] = ref; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, - TcKeyRef::SignalLength + 3); - } - } -} - -/* ########################################################################## - * ####### MODULE TO HANDLE TC FAILURE ####### - * - * ########################################################################## */ - -/* ************************************************************************>> - * NODE_FAILREP: Node failure report. Sender Ndbcntr. Set status of failed - * node to down and reply with NF_COMPLETEREP to DIH which will report that - * LQH has completed failure handling. - * ************************************************************************>> */ -void Dblqh::execNODE_FAILREP(Signal* signal) -{ - UintR TfoundNodes = 0; - UintR TnoOfNodes; - UintR Tdata[MAX_NDB_NODES]; - Uint32 i; - - NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; - - TnoOfNodes = nodeFail->noOfNodes; - UintR index = 0; - for (i = 1; i < MAX_NDB_NODES; i++) { - jam(); - if(NodeBitmask::get(nodeFail->theNodes, i)){ - jam(); - Tdata[index] = i; - index++; - }//if - }//for - - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - - ndbrequire(index == TnoOfNodes); - ndbrequire(cnoOfNodes - 1 < MAX_NDB_NODES); - for (i = 0; i < TnoOfNodes; i++) { - const Uint32 nodeId = Tdata[i]; - lcpPtr.p->m_EMPTY_LCP_REQ.clear(nodeId); - - for (Uint32 j = 0; j < cnoOfNodes; j++) { - jam(); - if (cnodeData[j] == nodeId){ - jam(); - cnodeStatus[j] = ZNODE_DOWN; - - TfoundNodes++; - }//if - }//for - NFCompleteRep * const nfCompRep = (NFCompleteRep *)&signal->theData[0]; - nfCompRep->blockNo = DBLQH; - nfCompRep->nodeId = cownNodeid; - nfCompRep->failedNodeId = Tdata[i]; - sendSignal(DBDIH_REF, GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); - }//for - ndbrequire(TnoOfNodes == TfoundNodes); -}//Dblqh::execNODE_FAILREP() - -/* ************************************************************************>> - * LQH_TRANSREQ: Report status of all transactions where TC was coordinated - * by a crashed TC - * ************************************************************************>> */ -/* ************************************************************************>> - * THIS SIGNAL IS RECEIVED AFTER A NODE CRASH. - * THE NODE HAD A TC AND COORDINATED A NUMBER OF TRANSACTIONS. - * NOW THE MASTER NODE IS PICKING UP THOSE TRANSACTIONS - * TO COMPLETE THEM. EITHER ABORT THEM OR COMMIT THEM. - * ************************************************************************>> */ -void Dblqh::execLQH_TRANSREQ(Signal* signal) -{ - jamEntry(); - Uint32 newTcPtr = signal->theData[0]; - BlockReference newTcBlockref = signal->theData[1]; - Uint32 oldNodeId = signal->theData[2]; - tcNodeFailptr.i = oldNodeId; - ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord); - if ((tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_TRUE) || - (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK)) { - jam(); - tcNodeFailptr.p->lastNewTcBlockref = newTcBlockref; - /* ------------------------------------------------------------------------ - * WE HAVE RECEIVED A SIGNAL SPECIFYING THAT WE NEED TO HANDLE THE FAILURE - * OF A TC. NOW WE RECEIVE ANOTHER SIGNAL WITH THE SAME ORDER. THIS CAN - * OCCUR IF THE NEW TC FAILS. WE MUST BE CAREFUL IN THIS CASE SO THAT WE DO - * NOT START PARALLEL ACTIVITIES TRYING TO DO THE SAME THING. WE SAVE THE - * NEW BLOCK REFERENCE TO THE LAST NEW TC IN A VARIABLE AND ASSIGN TO IT TO - * NEW_TC_BLOCKREF WHEN THE OLD PROCESS RETURNS TO LQH_TRANS_NEXT. IT IS - * CERTAIN TO COME THERE SINCE THIS IS THE ONLY PATH TO TAKE CARE OF THE - * NEXT TC CONNECT RECORD. WE SET THE STATUS TO BREAK TO INDICATE TO THE OLD - * PROCESS WHAT IS HAPPENING. - * ------------------------------------------------------------------------ */ - tcNodeFailptr.p->lastNewTcRef = newTcPtr; - tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_BREAK; - return; - }//if - tcNodeFailptr.p->oldNodeId = oldNodeId; - tcNodeFailptr.p->newTcBlockref = newTcBlockref; - tcNodeFailptr.p->newTcRef = newTcPtr; - tcNodeFailptr.p->tcRecNow = 0; - tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE; - signal->theData[0] = ZLQH_TRANS_NEXT; - signal->theData[1] = tcNodeFailptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::execLQH_TRANSREQ() - -void Dblqh::lqhTransNextLab(Signal* signal) -{ - UintR tend; - UintR tstart; - UintR guard0; - - if (tcNodeFailptr.p->tcFailStatus == TcNodeFailRecord::TC_STATE_BREAK) { - jam(); - /* ---------------------------------------------------------------------- - * AN INTERRUPTION TO THIS NODE FAIL HANDLING WAS RECEIVED AND A NEW - * TC HAVE BEEN ASSIGNED TO TAKE OVER THE FAILED TC. PROBABLY THE OLD - * NEW TC HAVE FAILED. - * ---------------------------------------------------------------------- */ - tcNodeFailptr.p->newTcBlockref = tcNodeFailptr.p->lastNewTcBlockref; - tcNodeFailptr.p->newTcRef = tcNodeFailptr.p->lastNewTcRef; - tcNodeFailptr.p->tcRecNow = 0; - tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_TRUE; - }//if - tstart = tcNodeFailptr.p->tcRecNow; - tend = tstart + 200; - guard0 = tend; - for (tcConnectptr.i = tstart; tcConnectptr.i <= guard0; tcConnectptr.i++) { - jam(); - if (tcConnectptr.i >= ctcConnectrecFileSize) { - jam(); - /** - * Finished with scanning operation record - * - * now scan markers - */ - scanMarkers(signal, tcNodeFailptr.i, 0, RNIL); - return; - }//if - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) { - if (tcConnectptr.p->transactionState != TcConnectionrec::TC_NOT_CONNECTED) { - if (tcConnectptr.p->tcScanRec == RNIL) { - if (refToNode(tcConnectptr.p->tcBlockref) == tcNodeFailptr.p->oldNodeId) { - if (tcConnectptr.p->operation != ZREAD) { - jam(); - tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i; - tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC; - abortStateHandlerLab(signal); - return; - } else { - jam(); - if (tcConnectptr.p->opSimple != ZTRUE) { - jam(); - tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i; - tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC; - abortStateHandlerLab(signal); - return; - }//if - }//if - }//if - } else { - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - switch(scanptr.p->scanType){ - case ScanRecord::COPY: - { - jam(); - if (scanptr.p->scanNodeId == tcNodeFailptr.p->oldNodeId) { - jam(); - /* ------------------------------------------------------------ - * THE RECEIVER OF THE COPY HAVE FAILED. - * WE HAVE TO CLOSE THE COPY PROCESS. - * ----------------------------------------------------------- */ - if (0) ndbout_c("close copy"); - tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i; - tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC; - closeCopyRequestLab(signal); - return; - } - break; - } - case ScanRecord::SCAN: - { - jam(); - if (refToNode(tcConnectptr.p->tcBlockref) == - tcNodeFailptr.p->oldNodeId) { - jam(); - tcConnectptr.p->tcNodeFailrec = tcNodeFailptr.i; - tcConnectptr.p->abortState = TcConnectionrec::NEW_FROM_TC; - closeScanRequestLab(signal); - return; - }//if - break; - } - default: - ndbrequire(false); - } - }//if - }//if - }//if - }//for - tcNodeFailptr.p->tcRecNow = tend + 1; - signal->theData[0] = ZLQH_TRANS_NEXT; - signal->theData[1] = tcNodeFailptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::lqhTransNextLab() - -void -Dblqh::scanMarkers(Signal* signal, - Uint32 tcNodeFail, - Uint32 startBucket, - Uint32 i){ - - jam(); - - TcNodeFailRecordPtr tcNodeFailPtr; - tcNodeFailPtr.i = tcNodeFail; - ptrCheckGuard(tcNodeFailPtr, ctcNodeFailrecFileSize, tcNodeFailRecord); - const Uint32 crashedTcNodeId = tcNodeFailPtr.p->oldNodeId; - - CommitAckMarkerIterator iter; - if(i == RNIL){ - m_commitAckMarkerHash.next(startBucket, iter); - } else { - jam(); - iter.curr.i = i; - iter.bucket = startBucket; - m_commitAckMarkerHash.getPtr(iter.curr); - m_commitAckMarkerHash.next(iter); - } - - const Uint32 RT_BREAK = 256; - for(i = 0; itcFailStatus = TcNodeFailRecord::TC_STATE_FALSE; - signal->theData[0] = tcNodeFailPtr.p->newTcRef; - signal->theData[1] = cownNodeid; - signal->theData[2] = LqhTransConf::LastTransConf; - sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF, - signal, 3, JBB); - return; - } - - if(iter.curr.p->tcNodeId == crashedTcNodeId){ - jam(); - - /** - * Found marker belonging to crashed node - */ - LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0]; - lqhTransConf->tcRef = tcNodeFailPtr.p->newTcRef; - lqhTransConf->lqhNodeId = cownNodeid; - lqhTransConf->operationStatus = LqhTransConf::Marker; - lqhTransConf->transId1 = iter.curr.p->transid1; - lqhTransConf->transId2 = iter.curr.p->transid2; - lqhTransConf->apiRef = iter.curr.p->apiRef; - lqhTransConf->apiOpRec = iter.curr.p->apiOprec; - sendSignal(tcNodeFailPtr.p->newTcBlockref, GSN_LQH_TRANSCONF, - signal, 7, JBB); - - signal->theData[0] = ZSCAN_MARKERS; - signal->theData[1] = tcNodeFailPtr.i; - signal->theData[2] = iter.bucket; - signal->theData[3] = iter.curr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - return; - } - - m_commitAckMarkerHash.next(iter); - } - - signal->theData[0] = ZSCAN_MARKERS; - signal->theData[1] = tcNodeFailPtr.i; - signal->theData[2] = iter.bucket; - signal->theData[3] = RNIL; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); -} - -/* ######################################################################### - * ####### SCAN MODULE ####### - * - * ######################################################################### - * ------------------------------------------------------------------------- - * THIS MODULE CONTAINS THE CODE THAT HANDLES A SCAN OF A PARTICULAR FRAGMENT - * IT OPERATES UNDER THE CONTROL OF TC AND ORDERS ACC TO PERFORM A SCAN OF - * ALL TUPLES IN THE FRAGMENT. TUP PERFORMS THE NECESSARY SEARCH CONDITIONS - * TO ENSURE THAT ONLY VALID TUPLES ARE RETURNED TO THE APPLICATION. - * ------------------------------------------------------------------------- */ -/* *************** */ -/* ACC_SCANCONF > */ -/* *************** */ -void Dblqh::execACC_SCANCONF(Signal* signal) -{ - AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0]; - jamEntry(); - scanptr.i = accScanConf->scanPtr; - c_scanRecordPool.getPtr(scanptr); - if (scanptr.p->scanState == ScanRecord::WAIT_ACC_SCAN) { - accScanConfScanLab(signal); - } else { - ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_ACC_COPY); - accScanConfCopyLab(signal); - }//if -}//Dblqh::execACC_SCANCONF() - -/* ************>> */ -/* ACC_SCANREF > */ -/* ************>> */ -void Dblqh::execACC_SCANREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dblqh::execACC_SCANREF() - -/* ***************>> */ -/* NEXT_SCANCONF > */ -/* ***************>> */ -void Dblqh::execNEXT_SCANCONF(Signal* signal) -{ - NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0]; - jamEntry(); - scanptr.i = nextScanConf->scanPtr; - c_scanRecordPool.getPtr(scanptr); - if (likely(nextScanConf->localKeyLength == 1)) - { - jam(); - scanptr.p->m_row_id.assref(nextScanConf->localKey[0]); - } - else - { - jam(); - scanptr.p->m_row_id.m_page_no = nextScanConf->localKey[0]; - scanptr.p->m_row_id.m_page_idx = nextScanConf->localKey[1]; - } - -#ifdef VM_TRACE - if (signal->getLength() > 2 && nextScanConf->accOperationPtr != RNIL) - { - Ptr regTcPtr; - regTcPtr.i = scanptr.p->scanTcrec; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - ndbassert(regTcPtr.p->fragmentid == nextScanConf->fragId); - } -#endif - - fragptr.i = scanptr.p->fragPtrI; - c_fragment_pool.getPtr(fragptr); - switch (scanptr.p->scanState) { - case ScanRecord::WAIT_CLOSE_SCAN: - jam(); - accScanCloseConfLab(signal); - break; - case ScanRecord::WAIT_CLOSE_COPY: - jam(); - accCopyCloseConfLab(signal); - break; - case ScanRecord::WAIT_NEXT_SCAN: - jam(); - nextScanConfScanLab(signal); - break; - case ScanRecord::WAIT_NEXT_SCAN_COPY: - jam(); - nextScanConfCopyLab(signal); - break; - case ScanRecord::WAIT_RELEASE_LOCK: - jam(); - ndbrequire(signal->length() == 1); - scanLockReleasedLab(signal); - break; - default: - ndbout_c("%d", scanptr.p->scanState); - ndbrequire(false); - }//switch -}//Dblqh::execNEXT_SCANCONF() - -/* ***************> */ -/* NEXT_SCANREF > */ -/* ***************> */ -void Dblqh::execNEXT_SCANREF(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Dblqh::execNEXT_SCANREF() - -/* ******************> */ -/* STORED_PROCCONF > */ -/* ******************> */ -void Dblqh::execSTORED_PROCCONF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - Uint32 storedProcId = signal->theData[1]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - switch (scanptr.p->scanState) { - case ScanRecord::WAIT_STORED_PROC_SCAN: - jam(); - scanptr.p->scanStoredProcId = storedProcId; - storedProcConfScanLab(signal); - break; - case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN: - jam(); - tupScanCloseConfLab(signal); - break; - case ScanRecord::WAIT_STORED_PROC_COPY: - jam(); - scanptr.p->scanStoredProcId = storedProcId; - storedProcConfCopyLab(signal); - break; - case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY: - jam(); - tupCopyCloseConfLab(signal); - break; - default: - ndbrequire(false); - }//switch -}//Dblqh::execSTORED_PROCCONF() - -/* ****************** */ -/* STORED_PROCREF > */ -/* ****************** */ -void Dblqh::execSTORED_PROCREF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - Uint32 errorCode = signal->theData[1]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - switch (scanptr.p->scanState) { - case ScanRecord::WAIT_STORED_PROC_SCAN: - jam(); - scanptr.p->scanCompletedStatus = ZTRUE; - scanptr.p->scanStoredProcId = signal->theData[2]; - tcConnectptr.p->errorCode = errorCode; - closeScanLab(signal); - break; - default: - ndbrequire(false); - }//switch -}//Dblqh::execSTORED_PROCREF() - -/* -------------------------------------------------------------------------- - * ENTER SCAN_NEXTREQ - * -------------------------------------------------------------------------- - * PRECONDITION: - * TRANSACTION_STATE = SCAN_STATE - * SCAN_STATE = WAIT_SCAN_NEXTREQ - * - * Case scanLockHold: ZTRUE = Unlock previous round of - * scanned row(s) and fetch next set of rows. - * ZFALSE = Fetch new set of rows. - * Number of rows to read depends on parallelism and how many rows - * left to scan in the fragment. SCAN_NEXTREQ can also be sent with - * closeFlag == ZTRUE to close the scan. - * ------------------------------------------------------------------------- */ -void Dblqh::execSCAN_NEXTREQ(Signal* signal) -{ - jamEntry(); - const ScanFragNextReq * const nextReq = - (ScanFragNextReq*)&signal->theData[0]; - const Uint32 transid1 = nextReq->transId1; - const Uint32 transid2 = nextReq->transId2; - const Uint32 senderData = nextReq->senderData; - - if (findTransaction(transid1, transid2, senderData) != ZOK){ - jam(); - DEBUG(senderData << - " Received SCAN_NEXTREQ in LQH with close flag when closed"); - ndbrequire(nextReq->closeFlag == ZTRUE); - return; - } - - // Crash node if signal sender is same node - CRASH_INSERTION2(5021, refToNode(signal->senderBlockRef()) == cownNodeid); - // Crash node if signal sender is NOT same node - CRASH_INSERTION2(5022, refToNode(signal->senderBlockRef()) != cownNodeid); - - if (ERROR_INSERTED(5023)){ - // Drop signal if sender is same node - if (refToNode(signal->senderBlockRef()) == cownNodeid) { - CLEAR_ERROR_INSERT_VALUE; - return; - } - }//if - if (ERROR_INSERTED(5024)){ - // Drop signal if sender is NOT same node - if (refToNode(signal->senderBlockRef()) != cownNodeid) { - CLEAR_ERROR_INSERT_VALUE; - return; - } - }//if - if (ERROR_INSERTED(5025)){ - // Delay signal if sender is NOT same node - if (refToNode(signal->senderBlockRef()) != cownNodeid) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_SCAN_NEXTREQ, signal, 1000, - signal->length()); - return; - } - }//if - if (ERROR_INSERTED(5030)){ - ndbout << "ERROR 5030" << endl; - CLEAR_ERROR_INSERT_VALUE; - // Drop signal - return; - }//if - - if(ERROR_INSERTED(5036)){ - return; - } - - scanptr.i = tcConnectptr.p->tcScanRec; - ndbrequire(scanptr.i != RNIL); - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanTcWaiting = ZTRUE; - - /* ------------------------------------------------------------------ - * If close flag is set this scan should be closed - * If we are waiting for SCAN_NEXTREQ set flag to stop scanning and - * continue execution else set flags and wait until the scan - * completes itself - * ------------------------------------------------------------------ */ - if (nextReq->closeFlag == ZTRUE){ - jam(); - if(ERROR_INSERTED(5034)){ - CLEAR_ERROR_INSERT_VALUE; - } - if(ERROR_INSERTED(5036)){ - CLEAR_ERROR_INSERT_VALUE; - return; - } - closeScanRequestLab(signal); - return; - }//if - - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - - /** - * Change parameters while running - * (is currently not supported) - */ - const Uint32 max_rows = nextReq->batch_size_rows; - const Uint32 max_bytes = nextReq->batch_size_bytes; - ndbrequire(scanptr.p->m_max_batch_size_rows == max_rows); - ndbrequire(scanptr.p->m_max_batch_size_bytes == max_bytes); - - /* -------------------------------------------------------------------- - * If scanLockHold = TRUE we need to unlock previous round of - * scanned records. - * scanReleaseLocks will set states for this and send a NEXT_SCANREQ. - * When confirm signal NEXT_SCANCONF arrives we call - * continueScanNextReqLab to continue scanning new rows and - * acquiring new locks. - * -------------------------------------------------------------------- */ - if ((scanptr.p->scanLockHold == ZTRUE) && - (scanptr.p->m_curr_batch_size_rows > 0)) { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - - /* ----------------------------------------------------------------------- - * We end up here when scanLockHold = FALSE or no rows was locked from - * previous round. - * Simply continue scanning. - * ----------------------------------------------------------------------- */ - continueScanNextReqLab(signal); -}//Dblqh::execSCAN_NEXTREQ() - -void Dblqh::continueScanNextReqLab(Signal* signal) -{ - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); - closeScanLab(signal); - return; - }//if - - if(scanptr.p->m_last_row){ - jam(); - scanptr.p->scanCompletedStatus = ZTRUE; - scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; - sendScanFragConf(signal, ZFALSE); - return; - } - - // Update timer on tcConnectRecord - tcConnectptr.p->tcTimer = cLqhTimeOutCount; - init_acc_ptr_list(scanptr.p); - scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT; - scanNextLoopLab(signal); -}//Dblqh::continueScanNextReqLab() - -/* ------------------------------------------------------------------------- - * WE NEED TO RELEASE LOCKS BEFORE CONTINUING - * ------------------------------------------------------------------------- */ -void Dblqh::scanReleaseLocksLab(Signal* signal) -{ - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_RELEASE_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - }//switch - continueScanReleaseAfterBlockedLab(signal); -}//Dblqh::scanReleaseLocksLab() - -void Dblqh::continueScanReleaseAfterBlockedLab(Signal* signal) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanState = ScanRecord::WAIT_RELEASE_LOCK; - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1]= - get_acc_ptr_from_scan_record(scanptr.p, - scanptr.p->scanReleaseCounter -1, - false); - signal->theData[2] = NextScanReq::ZSCAN_COMMIT; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); -}//Dblqh::continueScanReleaseAfterBlockedLab() - -/* ------------------------------------------------------------------------- - * ENTER SCAN_NEXTREQ - * ------------------------------------------------------------------------- - * SCAN_NEXT_REQ SIGNAL ARRIVED IN THE MIDDLE OF EXECUTION OF THE SCAN. - * IT WAS A REQUEST TO CLOSE THE SCAN. WE WILL CLOSE THE SCAN IN A - * CAREFUL MANNER TO ENSURE THAT NO ERROR OCCURS. - * ------------------------------------------------------------------------- - * PRECONDITION: - * TRANSACTION_STATE = SCAN_STATE_USED - * TSCAN_COMPLETED = ZTRUE - * ------------------------------------------------------------------------- - * WE CAN ALSO ARRIVE AT THIS LABEL AFTER A NODE CRASH OF THE SCAN - * COORDINATOR. - * ------------------------------------------------------------------------- */ -void Dblqh::closeScanRequestLab(Signal* signal) -{ - DEBUG("transactionState = " << tcConnectptr.p->transactionState); - switch (tcConnectptr.p->transactionState) { - case TcConnectionrec::SCAN_STATE_USED: - DEBUG("scanState = " << scanptr.p->scanState); - switch (scanptr.p->scanState) { - case ScanRecord::IN_QUEUE: - jam(); - tupScanCloseConfLab(signal); - break; - case ScanRecord::WAIT_NEXT_SCAN: - jam(); - /* ------------------------------------------------------------------- - * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN. - * ------------------------------------------------------------------- */ - scanptr.p->scanCompletedStatus = ZTRUE; - break; - case ScanRecord::WAIT_ACC_SCAN: - case ScanRecord::WAIT_STORED_PROC_SCAN: - jam(); - /* ------------------------------------------------------------------- - * WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS - * AND WAIT FOR COMPLETION OF STARTUP. - * ------------------------------------------------------------------- */ - scanptr.p->scanCompletedStatus = ZTRUE; - break; - case ScanRecord::WAIT_CLOSE_SCAN: - case ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN: - jam(); - /*empty*/; - break; - /* ------------------------------------------------------------------- - * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING. - * ------------------------------------------------------------------- */ - case ScanRecord::WAIT_RELEASE_LOCK: - jam(); - /* ------------------------------------------------------------------- - * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING THIS - * WE WILL START TO CLOSE THE SCAN. - * ------------------------------------------------------------------- */ - scanptr.p->scanCompletedStatus = ZTRUE; - break; - case ScanRecord::WAIT_SCAN_NEXTREQ: - jam(); - /* ------------------------------------------------------------------- - * WE ARE WAITING FOR A SCAN_NEXTREQ FROM SCAN COORDINATOR(TC) - * WICH HAVE CRASHED. CLOSE THE SCAN - * ------------------------------------------------------------------- */ - scanptr.p->scanCompletedStatus = ZTRUE; - - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - - if (scanptr.p->scanLockHold == ZTRUE) { - if (scanptr.p->m_curr_batch_size_rows > 0) { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - }//if - closeScanLab(signal); - break; - default: - ndbrequire(false); - }//switch - break; - case TcConnectionrec::WAIT_SCAN_AI: - jam(); - /* --------------------------------------------------------------------- - * WE ARE STILL WAITING FOR THE ATTRIBUTE INFORMATION THAT - * OBVIOUSLY WILL NOT ARRIVE. WE CAN QUIT IMMEDIATELY HERE. - * --------------------------------------------------------------------- */ - //XXX jonas this have to be wrong... - releaseOprec(signal); - if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) { - jam(); - tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec; - ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord); - tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1; - signal->theData[0] = ZLQH_TRANS_NEXT; - signal->theData[1] = tcNodeFailptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; - }//if - tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE; - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes= 0; - sendScanFragConf(signal, ZTRUE); - abort_scan(signal, scanptr.i, 0); - return; - break; - case TcConnectionrec::SCAN_TUPKEY: - case TcConnectionrec::SCAN_FIRST_STOPPED: - case TcConnectionrec::SCAN_CHECK_STOPPED: - case TcConnectionrec::SCAN_STOPPED: - jam(); - /* --------------------------------------------------------------------- - * SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN. - * --------------------------------------------------------------------- */ - scanptr.p->scanCompletedStatus = ZTRUE; - break; - case TcConnectionrec::SCAN_RELEASE_STOPPED: - jam(); - /* --------------------------------------------------------------------- - * WE ARE CURRENTLY RELEASING RECORD LOCKS. AFTER COMPLETING - * THIS WE WILL START TO CLOSE THE SCAN. - * --------------------------------------------------------------------- */ - scanptr.p->scanCompletedStatus = ZTRUE; - break; - case TcConnectionrec::SCAN_CLOSE_STOPPED: - jam(); - /* --------------------------------------------------------------------- - * CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING. - * --------------------------------------------------------------------- */ - /*empty*/; - break; - default: - ndbrequire(false); - }//switch -}//Dblqh::closeScanRequestLab() - -/* ------------------------------------------------------------------------- - * ENTER NEXT_SCANCONF - * ------------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_RELEASE_LOCK - * ------------------------------------------------------------------------- */ -void Dblqh::scanLockReleasedLab(Signal* signal) -{ - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - - if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) { - if ((scanptr.p->scanErrorCounter > 0) || - (scanptr.p->scanCompletedStatus == ZTRUE)) { - jam(); - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes = 0; - closeScanLab(signal); - } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) { - jam(); - closeScanLab(signal); - return; - } else if (scanptr.p->check_scan_batch_completed() && - scanptr.p->scanLockHold != ZTRUE) { - jam(); - scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; - sendScanFragConf(signal, ZFALSE); - } else { - jam(); - /* - * We came here after releasing locks after - * receiving SCAN_NEXTREQ from TC. We only come here - * when scanHoldLock == ZTRUE - */ - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes = 0; - continueScanNextReqLab(signal); - }//if - } else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) { - jam(); - scanptr.p->scanReleaseCounter++; - scanReleaseLocksLab(signal); - } else { - jam(); - /* - We come here when we have been scanning for a long time and not been able - to find m_max_batch_size_rows records to return. We needed to release - the record we didn't want, but now we are returning all found records to - the API. - */ - scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; - sendScanFragConf(signal, ZFALSE); - }//if -}//Dblqh::scanLockReleasedLab() - -bool -Dblqh::seize_acc_ptr_list(ScanRecord* scanP, Uint32 batch_size) -{ - Uint32 i; - Uint32 attr_buf_recs= (batch_size + 30) / 32; - - if (batch_size > 1) { - if (c_no_attrinbuf_recs < attr_buf_recs) { - jam(); - return false; - } - for (i= 1; i <= attr_buf_recs; i++) { - scanP->scan_acc_op_ptr[i]= seize_attrinbuf(); - } - } - scanP->scan_acc_attr_recs= attr_buf_recs; - scanP->scan_acc_index = 0; - return true; -} - -void -Dblqh::release_acc_ptr_list(ScanRecord* scanP) -{ - Uint32 i, attr_buf_recs; - attr_buf_recs= scanP->scan_acc_attr_recs; - - for (i= 1; i <= attr_buf_recs; i++) { - release_attrinbuf(scanP->scan_acc_op_ptr[i]); - } - scanP->scan_acc_attr_recs= 0; - scanP->scan_acc_index = 0; -} - -Uint32 -Dblqh::seize_attrinbuf() -{ - AttrbufPtr regAttrPtr; - Uint32 ret_attr_buf; - ndbrequire(c_no_attrinbuf_recs > 0); - c_no_attrinbuf_recs--; - ret_attr_buf= cfirstfreeAttrinbuf; - regAttrPtr.i= ret_attr_buf; - ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf); - cfirstfreeAttrinbuf= regAttrPtr.p->attrbuf[ZINBUF_NEXT]; - return ret_attr_buf; -} - -Uint32 -Dblqh::release_attrinbuf(Uint32 attr_buf_i) -{ - Uint32 next_buf; - AttrbufPtr regAttrPtr; - c_no_attrinbuf_recs++; - regAttrPtr.i= attr_buf_i; - ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf); - next_buf= regAttrPtr.p->attrbuf[ZINBUF_NEXT]; - regAttrPtr.p->attrbuf[ZINBUF_NEXT]= cfirstfreeAttrinbuf; - cfirstfreeAttrinbuf= regAttrPtr.i; - return next_buf; -} - -void -Dblqh::init_acc_ptr_list(ScanRecord* scanP) -{ - scanP->scan_acc_index = 0; -} - -Uint32 -Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP, - Uint32 index, - bool crash_flag) -{ - Uint32* acc_ptr; - if (!((index < MAX_PARALLEL_OP_PER_SCAN) && - index < scanP->scan_acc_index)) { - ndbrequire(crash_flag); - return RNIL; - } - i_get_acc_ptr(scanP, acc_ptr, index); - return *acc_ptr; -} - -void -Dblqh::set_acc_ptr_in_scan_record(ScanRecord* scanP, - Uint32 index, Uint32 acc) -{ - Uint32 *acc_ptr; - ndbrequire((index == 0 || scanP->scan_acc_index == index) && - (index < MAX_PARALLEL_OP_PER_SCAN)); - scanP->scan_acc_index= index + 1; - i_get_acc_ptr(scanP, acc_ptr, index); - *acc_ptr= acc; -} - -/* ------------------------------------------------------------------------- - * SCAN_FRAGREQ: Request to start scanning the specified fragment of a table. - * ------------------------------------------------------------------------- */ -void Dblqh::execSCAN_FRAGREQ(Signal* signal) -{ - ScanFragReq * const scanFragReq = (ScanFragReq *)&signal->theData[0]; - ScanFragRef * ref; - const Uint32 transid1 = scanFragReq->transId1; - const Uint32 transid2 = scanFragReq->transId2; - Uint32 errorCode= 0; - Uint32 senderData; - Uint32 hashIndex; - TcConnectionrecPtr nextHashptr; - - jamEntry(); - const Uint32 reqinfo = scanFragReq->requestInfo; - const Uint32 fragId = (scanFragReq->fragmentNoKeyLen & 0xFFFF); - const Uint32 keyLen = (scanFragReq->fragmentNoKeyLen >> 16); - tabptr.i = scanFragReq->tableId; - const Uint32 max_rows = scanFragReq->batch_size_rows; - const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo); - const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo); - const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo); - - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){ - senderData = scanFragReq->senderData; - goto error_handler_early_1; - } - - if (cfirstfreeTcConrec != RNIL) { - seizeTcrec(); - tcConnectptr.p->clientConnectrec = scanFragReq->senderData; - tcConnectptr.p->clientBlockref = signal->senderBlockRef(); - tcConnectptr.p->savePointId = scanFragReq->savePointId; - } else { - jam(); - /* -------------------------------------------------------------------- - * NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST. - * -------------------------------------------------------------------- */ - errorCode = ZNO_TC_CONNECT_ERROR; - senderData = scanFragReq->senderData; - goto error_handler_early; - }//if - /** - * A write allways have to get keyinfo - */ - ndbrequire(scanLockMode == 0 || keyinfo); - - ndbrequire(max_rows > 0 && max_rows <= MAX_PARALLEL_OP_PER_SCAN); - if (!getFragmentrec(signal, fragId)) { - errorCode = 1231; - goto error_handler; - }//if - - // Verify scan type vs table type (both sides are boolean) - if (rangeScan != DictTabInfo::isOrderedIndex(fragptr.p->tableType)) { - errorCode = 1232; - goto error_handler; - }//if - - // 1 scan record is reserved for node recovery - if (cscanNoFreeRec < 2) { - jam(); - errorCode = ScanFragRef::ZNO_FREE_SCANREC_ERROR; - goto error_handler; - } - - // XXX adjust cmaxAccOps for range scans and remove this comment - if ((cbookedAccOps + max_rows) > cmaxAccOps) { - jam(); - errorCode = ScanFragRef::ZSCAN_BOOK_ACC_OP_ERROR; - goto error_handler; - }//if - - ndbrequire(c_scanRecordPool.seize(scanptr)); - initScanTc(scanFragReq, - transid1, - transid2, - fragId, - ZNIL); - tcConnectptr.p->save1 = 4; - tcConnectptr.p->primKeyLen = keyLen + 4; // hard coded in execKEYINFO - errorCode = initScanrec(scanFragReq); - if (errorCode != ZOK) { - jam(); - goto error_handler2; - }//if - cscanNoFreeRec--; - cbookedAccOps += max_rows; - - hashIndex = (tcConnectptr.p->transid[0] ^ tcConnectptr.p->tcOprec) & 1023; - nextHashptr.i = ctransidHash[hashIndex]; - ctransidHash[hashIndex] = tcConnectptr.i; - tcConnectptr.p->prevHashRec = RNIL; - tcConnectptr.p->nextHashRec = nextHashptr.i; - if (nextHashptr.i != RNIL) { - jam(); - /* --------------------------------------------------------------------- - * ENSURE THAT THE NEXT RECORD HAS SET PREVIOUS TO OUR RECORD - * IF IT EXISTS - * --------------------------------------------------------------------- */ - ptrCheckGuard(nextHashptr, ctcConnectrecFileSize, tcConnectionrec); - nextHashptr.p->prevHashRec = tcConnectptr.i; - }//if - if (scanptr.p->scanAiLength > 0) { - jam(); - tcConnectptr.p->transactionState = TcConnectionrec::WAIT_SCAN_AI; - return; - }//if - continueAfterReceivingAllAiLab(signal); - return; - -error_handler2: - // no scan number allocated - c_scanRecordPool.release(scanptr); -error_handler: - ref = (ScanFragRef*)&signal->theData[0]; - tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE; - ref->senderData = tcConnectptr.p->clientConnectrec; - ref->transId1 = transid1; - ref->transId2 = transid2; - ref->errorCode = errorCode; - sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal, - ScanFragRef::SignalLength, JBB); - releaseOprec(signal); - releaseTcrec(signal, tcConnectptr); - return; - - error_handler_early_1: - if(tabptr.p->tableStatus == Tablerec::NOT_DEFINED){ - jam(); - errorCode = ZTABLE_NOT_DEFINED; - } else if (tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING || - tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){ - jam(); - errorCode = ZDROP_TABLE_IN_PROGRESS; - } else { - ndbrequire(0); - } - error_handler_early: - ref = (ScanFragRef*)&signal->theData[0]; - ref->senderData = senderData; - ref->transId1 = transid1; - ref->transId2 = transid2; - ref->errorCode = errorCode; - sendSignal(signal->senderBlockRef(), GSN_SCAN_FRAGREF, signal, - ScanFragRef::SignalLength, JBB); -}//Dblqh::execSCAN_FRAGREQ() - -void Dblqh::continueAfterReceivingAllAiLab(Signal* signal) -{ - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; - - if(scanptr.p->scanState == ScanRecord::IN_QUEUE){ - jam(); - return; - } - - scanptr.p->scanState = ScanRecord::WAIT_ACC_SCAN; - AccScanReq * req = (AccScanReq*)&signal->theData[0]; - req->senderData = scanptr.i; - req->senderRef = cownref; - req->tableId = tcConnectptr.p->tableref; - req->fragmentNo = tcConnectptr.p->fragmentid; - req->requestInfo = 0; - AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode); - AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted); - AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending); - - if (refToBlock(tcConnectptr.p->clientBlockref) == BACKUP) - { - if (scanptr.p->lcpScan) - { - AccScanReq::setNoDiskScanFlag(req->requestInfo, 1); - AccScanReq::setLcpScanFlag(req->requestInfo, 1); - } - else - { - /* If backup scan disktables in disk order */ - AccScanReq::setNoDiskScanFlag(req->requestInfo, - !tcConnectptr.p->m_disk_table); - AccScanReq::setLcpScanFlag(req->requestInfo, 0); - } - } - else - { -#if BUG_27776_FIXED - AccScanReq::setNoDiskScanFlag(req->requestInfo, - !tcConnectptr.p->m_disk_table); -#else - AccScanReq::setNoDiskScanFlag(req->requestInfo, 1); -#endif - AccScanReq::setLcpScanFlag(req->requestInfo, 0); - } - - req->transId1 = tcConnectptr.p->transid[0]; - req->transId2 = tcConnectptr.p->transid[1]; - req->savePointId = tcConnectptr.p->savePointId; - sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal, - AccScanReq::SignalLength, JBB); -}//Dblqh::continueAfterReceivingAllAiLab() - -void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { - if (tcConnectptr.p->currTupAiLen < scanptr.p->scanAiLength) { - jam(); - } else { - jam(); - ndbrequire(tcConnectptr.p->currTupAiLen == scanptr.p->scanAiLength); - continueAfterReceivingAllAiLab(signal); - }//if - return; - }//if - abort_scan(signal, scanptr.i, ZGET_ATTRINBUF_ERROR); -} - -void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){ - jam(); - scanptr.i = scan_ptr_i; - c_scanRecordPool.getPtr(scanptr); - - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - finishScanrec(signal); - releaseScanrec(signal); - tcConnectptr.p->transactionState = TcConnectionrec::IDLE; - tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE; - - if(errcode) - { - jam(); - ScanFragRef * ref = (ScanFragRef*)&signal->theData[0]; - ref->senderData = tcConnectptr.p->clientConnectrec; - ref->transId1 = tcConnectptr.p->transid[0]; - ref->transId2 = tcConnectptr.p->transid[1]; - ref->errorCode = errcode; - sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal, - ScanFragRef::SignalLength, JBB); - } - deleteTransidHash(signal); - releaseOprec(signal); - releaseTcrec(signal, tcConnectptr); -} - -/*---------------------------------------------------------------------*/ -/* Send this 'I am alive' signal to TC when it is received from ACC */ -/* We include the scanPtr.i that comes from ACC in signalData[1], this */ -/* tells TC which fragment record to check for a timeout. */ -/*---------------------------------------------------------------------*/ -void Dblqh::execSCAN_HBREP(Signal* signal) -{ - jamEntry(); - scanptr.i = signal->theData[0]; - c_scanRecordPool.getPtr(scanptr); - switch(scanptr.p->scanType){ - case ScanRecord::SCAN: - if (scanptr.p->scanTcWaiting == ZTRUE) { - jam(); - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - const Uint32 transid1 = signal->theData[1]; - const Uint32 transid2 = signal->theData[2]; - ndbrequire(transid1 == tcConnectptr.p->transid[0] && - transid2 == tcConnectptr.p->transid[1]); - - // Update counter on tcConnectPtr - if (tcConnectptr.p->tcTimer != 0){ - tcConnectptr.p->tcTimer = cLqhTimeOutCount; - } else { - jam(); - //ndbout << "SCAN_HBREP when tcTimer was off" << endl; - } - - signal->theData[0] = tcConnectptr.p->clientConnectrec; - signal->theData[1] = tcConnectptr.p->transid[0]; - signal->theData[2] = tcConnectptr.p->transid[1]; - sendSignal(tcConnectptr.p->clientBlockref, - GSN_SCAN_HBREP, signal, 3, JBB); - }//if - break; - case ScanRecord::COPY: - // ndbout << "Dblqh::execSCAN_HBREP Dropping SCAN_HBREP" << endl; - break; - default: - ndbrequire(false); - } -} - -void Dblqh::accScanConfScanLab(Signal* signal) -{ - AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0]; - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - /* ----------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_ACC_SCAN - * ----------------------------------------------------------------------- */ - if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) { - jam(); - /* --------------------------------------------------------------------- - * THE FRAGMENT WAS EMPTY. - * REPORT SUCCESSFUL COPYING. - * --------------------------------------------------------------------- */ - tupScanCloseConfLab(signal); - return; - }//if - scanptr.p->scanAccPtr = accScanConf->accPtr; - if (scanptr.p->rangeScan) { - jam(); - TuxBoundInfo* req = (TuxBoundInfo*)signal->getDataPtrSend(); - req->errorCode = RNIL; - req->tuxScanPtrI = scanptr.p->scanAccPtr; - Uint32 len = req->boundAiLength = copy_bounds(req->data, tcConnectptr.p); - EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal, - TuxBoundInfo::SignalLength + len); - - jamEntry(); - if (req->errorCode != 0) { - jam(); - /* - * Cannot use STORED_PROCREF to abort since even the REF - * returns a stored proc id. So record error and continue. - * The scan is already Invalid in TUX and returns empty set. - */ - tcConnectptr.p->errorCode = req->errorCode; - } - } - - scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_SCAN; - if(scanptr.p->scanStoredProcId == RNIL) - { - jam(); - signal->theData[0] = tcConnectptr.p->tupConnectrec; - signal->theData[1] = tcConnectptr.p->tableref; - signal->theData[2] = scanptr.p->scanSchemaVersion; - signal->theData[3] = ZSTORED_PROC_SCAN; - - signal->theData[4] = scanptr.p->scanAiLength; - sendSignal(tcConnectptr.p->tcTupBlockref, - GSN_STORED_PROCREQ, signal, 5, JBB); - - signal->theData[0] = tcConnectptr.p->tupConnectrec; - AttrbufPtr regAttrinbufptr; - Uint32 firstAttr = regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf; - while (regAttrinbufptr.i != RNIL) { - ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf); - jam(); - Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN]; - ndbrequire(dataLen != 0); - // first 3 words already set in STORED_PROCREQ - MEMCOPY_NO_WORDS(&signal->theData[3], - ®Attrinbufptr.p->attrbuf[0], - dataLen); - sendSignal(tcConnectptr.p->tcTupBlockref, - GSN_ATTRINFO, signal, dataLen + 3, JBB); - regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT]; - c_no_attrinbuf_recs++; - }//while - - /** - * Release attr info - */ - if(firstAttr != RNIL) - { - regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = cfirstfreeAttrinbuf; - cfirstfreeAttrinbuf = firstAttr; - tcConnectptr.p->firstAttrinbuf = tcConnectptr.p->lastAttrinbuf = RNIL; - } - } - else - { - jam(); - storedProcConfScanLab(signal); - } -}//Dblqh::accScanConfScanLab() - -#define print_buf(s,idx,len) {\ - printf(s); Uint32 t2=len; DatabufPtr t3; t3.i = idx; \ - while(t3.i != RNIL && t2-- > 0){\ - ptrCheckGuard(t3, cdatabufFileSize, databuf);\ - printf("%d ", t3.i); t3.i= t3.p->nextDatabuf;\ - } printf("\n"); } - -Uint32 -Dblqh::copy_bounds(Uint32 * dst, TcConnectionrec* tcPtrP) -{ - /** - * copy_bounds handles multiple bounds by - * in the 16 upper bits of the first words (used to specify bound type) - * setting the length of this specific bound - * - */ - - DatabufPtr regDatabufptr; - Uint32 left = 4 - tcPtrP->m_offset_current_keybuf; // left in buf - Uint32 totalLen = tcPtrP->primKeyLen - 4; - regDatabufptr.i = tcPtrP->firstTupkeybuf; - - ndbassert(tcPtrP->primKeyLen >= 4); - ndbassert(tcPtrP->m_offset_current_keybuf < 4); - ndbassert(!(totalLen == 0 && regDatabufptr.i != RNIL)); - ndbassert(!(totalLen != 0 && regDatabufptr.i == RNIL)); - - if(totalLen) - { - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - Uint32 sig0 = regDatabufptr.p->data[0]; - Uint32 sig1 = regDatabufptr.p->data[1]; - Uint32 sig2 = regDatabufptr.p->data[2]; - Uint32 sig3 = regDatabufptr.p->data[3]; - - switch(left){ - case 4: - * dst++ = sig0; - case 3: - * dst++ = sig1; - case 2: - * dst++ = sig2; - case 1: - * dst++ = sig3; - } - - Uint32 first = (* (dst - left)); // First word in range - - // Length of this range - Uint8 offset; - const Uint32 len = (first >> 16) ? (first >> 16) : totalLen; - tcPtrP->m_scan_curr_range_no = (first & 0xFFF0) >> 4; - (* (dst - left)) = (first & 0xF); // Remove length & range no - - if(len < left) - { - offset = len; - } - else - { - Databuf * lastP; - left = (len - left); - regDatabufptr.i = regDatabufptr.p->nextDatabuf; - - while(left >= 4) - { - left -= 4; - lastP = regDatabufptr.p; - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - sig0 = regDatabufptr.p->data[0]; - sig1 = regDatabufptr.p->data[1]; - sig2 = regDatabufptr.p->data[2]; - sig3 = regDatabufptr.p->data[3]; - regDatabufptr.i = regDatabufptr.p->nextDatabuf; - - * dst++ = sig0; - * dst++ = sig1; - * dst++ = sig2; - * dst++ = sig3; - } - - if(left > 0) - { - lastP = regDatabufptr.p; - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - sig0 = regDatabufptr.p->data[0]; - sig1 = regDatabufptr.p->data[1]; - sig2 = regDatabufptr.p->data[2]; - sig3 = regDatabufptr.p->data[3]; - * dst++ = sig0; - * dst++ = sig1; - * dst++ = sig2; - * dst++ = sig3; - } - else - { - lastP = regDatabufptr.p; - } - offset = left & 3; - lastP->nextDatabuf = cfirstfreeDatabuf; - cfirstfreeDatabuf = tcPtrP->firstTupkeybuf; - ndbassert(cfirstfreeDatabuf != RNIL); - } - - if(len == totalLen && regDatabufptr.i != RNIL) - { - regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf; - cfirstfreeDatabuf = regDatabufptr.i; - tcPtrP->lastTupkeybuf = regDatabufptr.i = RNIL; - ndbassert(cfirstfreeDatabuf != RNIL); - } - - tcPtrP->m_offset_current_keybuf = offset; - tcPtrP->firstTupkeybuf = regDatabufptr.i; - tcPtrP->primKeyLen = 4 + totalLen - len; - - return len; - } - return totalLen; -} - -/* ------------------------------------------------------------------------- - * ENTER STORED_PROCCONF WITH - * TC_CONNECTPTR, - * TSTORED_PROC_ID - * ------------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_SCAN - * ------------------------------------------------------------------------- */ -void Dblqh::storedProcConfScanLab(Signal* signal) -{ - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); - // STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. - closeScanLab(signal); - return; - }//if - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_FIRST_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - jamLine(fragptr.p->fragStatus); - ndbout_c("fragptr.p->fragStatus: %u", - fragptr.p->fragStatus); - ndbrequire(false); - break; - }//switch - continueFirstScanAfterBlockedLab(signal); -}//Dblqh::storedProcConfScanLab() - -void Dblqh::continueFirstScanAfterBlockedLab(Signal* signal) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN; - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = RNIL; - signal->theData[2] = NextScanReq::ZSCAN_NEXT; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); - return; -}//Dblqh::continueFirstScanAfterBlockedLab() - -/* ------------------------------------------------------------------------- - * When executing a scan we must come up to the surface at times to make - * sure we can quickly start local checkpoints. - * ------------------------------------------------------------------------- */ -void Dblqh::execCHECK_LCP_STOP(Signal* signal) -{ - jamEntry(); - scanptr.i = signal->theData[0]; - c_scanRecordPool.getPtr(scanptr); - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - if (signal->theData[1] == ZTRUE) { - jam(); - signal->theData[0] = ZCHECK_LCP_STOP_BLOCKED; - signal->theData[1] = scanptr.i; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2); - signal->theData[0] = RNIL; - return; - }//if - if (fragptr.p->fragStatus != Fragrecord::FSACTIVE) { - ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED; - signal->theData[0] = RNIL; - }//if -}//Dblqh::execCHECK_LCP_STOP() - -void Dblqh::checkLcpStopBlockedLab(Signal* signal) -{ - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - continueAfterCheckLcpStopBlocked(signal); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CHECK_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - }//switch -}//Dblqh::checkLcpStopBlockedLab() - -void Dblqh::continueAfterCheckLcpStopBlocked(Signal* signal) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP; - EXECUTE_DIRECT(refToBlock(scanptr.p->scanBlockref), GSN_ACC_CHECK_SCAN, - signal, 2); -}//Dblqh::continueAfterCheckLcpStopBlocked() - -/* ------------------------------------------------------------------------- - * ENTER NEXT_SCANCONF - * ------------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN - * ------------------------------------------------------------------------- */ -void Dblqh::nextScanConfScanLab(Signal* signal) -{ - NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0]; - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - if (nextScanConf->fragId == RNIL) { - jam(); - /* --------------------------------------------------------------------- - * THERE ARE NO MORE TUPLES TO FETCH. IF WE HAVE ANY - * OPERATIONS STILL NEEDING A LOCK WE REPORT TO THE - * APPLICATION AND CLOSE THE SCAN WHEN THE NEXT SCAN - * REQUEST IS RECEIVED. IF WE DO NOT HAVE ANY NEED FOR - * LOCKS WE CAN CLOSE THE SCAN IMMEDIATELY. - * --------------------------------------------------------------------- */ - /************************************************************* - * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. - ************************************************************ */ - if (!scanptr.p->scanLockHold) - { - jam(); - closeScanLab(signal); - return; - } - - if (scanptr.p->scanCompletedStatus == ZTRUE) { - if ((scanptr.p->scanLockHold == ZTRUE) && - (scanptr.p->m_curr_batch_size_rows > 0)) { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - jam(); - closeScanLab(signal); - return; - }//if - - if (scanptr.p->m_curr_batch_size_rows > 0) { - jam(); - - if((tcConnectptr.p->primKeyLen - 4) == 0) - scanptr.p->scanCompletedStatus = ZTRUE; - - scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; - sendScanFragConf(signal, ZFALSE); - return; - }//if - closeScanLab(signal); - return; - }//if - - // If accOperationPtr == RNIL no record was returned by ACC - Uint32 accOpPtr = nextScanConf->accOperationPtr; - if (accOpPtr == RNIL) - { - jam(); - /************************************************************* - * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. - ************************************************************ */ - if (scanptr.p->scanCompletedStatus == ZTRUE) { - if ((scanptr.p->scanLockHold == ZTRUE) && - (scanptr.p->m_curr_batch_size_rows > 0)) { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - jam(); - closeScanLab(signal); - return; - }//if - - if (scanptr.p->m_curr_batch_size_rows > 0) { - jam(); - scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; - sendScanFragConf(signal, ZFALSE); - return; - }//if - - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - sendSignal(scanptr.p->scanBlockref, - GSN_ACC_CHECK_SCAN, signal, 2, JBB); - return; - }//if - jam(); - set_acc_ptr_in_scan_record(scanptr.p, - scanptr.p->m_curr_batch_size_rows, - accOpPtr); - - jam(); - nextScanConfLoopLab(signal); -}//Dblqh::nextScanConfScanLab() - -void Dblqh::nextScanConfLoopLab(Signal* signal) -{ - /* ---------------------------------------------------------------------- - * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. - * ---------------------------------------------------------------------- */ - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); - if ((scanptr.p->scanLockHold == ZTRUE) && - (scanptr.p->m_curr_batch_size_rows > 0)) { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - closeScanLab(signal); - return; - }//if - - Fragrecord* fragPtrP= fragptr.p; - if (scanptr.p->rangeScan) { - jam(); - // for ordered index use primary table - fragPtrP= c_fragment_pool.getPtr(fragPtrP->tableFragptr); - } - - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_TUPKEY; - if(tcConnectptr.p->m_disk_table) - { - next_scanconf_load_diskpage(signal, scanptr, tcConnectptr,fragPtrP); - } - else - { - next_scanconf_tupkeyreq(signal, scanptr, tcConnectptr.p, fragPtrP, RNIL); - } -} - -void -Dblqh::next_scanconf_load_diskpage(Signal* signal, - ScanRecordPtr scanPtr, - Ptr regTcPtr, - Fragrecord* fragPtrP) -{ - jam(); - - int res; - Uint32 local_key = scanPtr.p->m_row_id.ref(); - - if((res= c_tup->load_diskpage_scan(signal, - regTcPtr.p->tupConnectrec, - fragPtrP->tupFragptr, - local_key, - 0)) > 0) - { - next_scanconf_tupkeyreq(signal, scanptr, regTcPtr.p, fragPtrP, res); - } - else if(unlikely(res != 0)) - { - jam(); - TupKeyRef * ref = (TupKeyRef *)signal->getDataPtr(); - ref->userRef= regTcPtr.i; - ref->errorCode= ~0; - execTUPKEYREF(signal); - } -} - -void -Dblqh::next_scanconf_load_diskpage_callback(Signal* signal, - Uint32 callbackData, - Uint32 disk_page) -{ - jamEntry(); - - Ptr regTcPtr; - regTcPtr.i= callbackData; - ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec); - - ScanRecordPtr scanPtr; - c_scanRecordPool.getPtr(scanPtr, regTcPtr.p->tcScanRec); - - if(disk_page > 0) - { - FragrecordPtr fragPtr; - c_fragment_pool.getPtr(fragPtr, regTcPtr.p->fragmentptr); - - if (scanPtr.p->rangeScan) { - jam(); - // for ordered index use primary table - fragPtr.p = c_fragment_pool.getPtr(fragPtr.p->tableFragptr); - } - - next_scanconf_tupkeyreq(signal, scanPtr, regTcPtr.p, fragPtr.p, disk_page); - } - else - { - TupKeyRef * ref = (TupKeyRef *)signal->getDataPtr(); - ref->userRef= callbackData; - ref->errorCode= disk_page; - execTUPKEYREF(signal); - } -} - -void -Dblqh::next_scanconf_tupkeyreq(Signal* signal, - Ptr scanPtr, - TcConnectionrec * regTcPtr, - Fragrecord* fragPtrP, - Uint32 disk_page) -{ - jam(); - Uint32 reqinfo = (scanPtr.p->scanLockHold == ZFALSE); - reqinfo = reqinfo + (regTcPtr->operation << 6); - reqinfo = reqinfo + (regTcPtr->opExec << 10); - - TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend(); - - tupKeyReq->connectPtr = regTcPtr->tupConnectrec; - tupKeyReq->request = reqinfo; - tupKeyReq->keyRef1 = scanPtr.p->m_row_id.m_page_no; - tupKeyReq->keyRef2 = scanPtr.p->m_row_id.m_page_idx; - tupKeyReq->attrBufLen = 0; - tupKeyReq->opRef = scanPtr.p->scanApiOpPtr; - tupKeyReq->applRef = scanPtr.p->scanApiBlockref; - tupKeyReq->storedProcedure = scanPtr.p->scanStoredProcId; - tupKeyReq->transId1 = regTcPtr->transid[0]; - tupKeyReq->transId2 = regTcPtr->transid[1]; - tupKeyReq->fragPtr = fragPtrP->tupFragptr; - tupKeyReq->primaryReplica = (regTcPtr->seqNoReplica == 0)?true:false; - tupKeyReq->coordinatorTC = regTcPtr->tcBlockref; - tupKeyReq->tcOpIndex = regTcPtr->tcOprec; - tupKeyReq->savePointId = regTcPtr->savePointId; - tupKeyReq->disk_page= disk_page; - Uint32 blockNo = refToBlock(regTcPtr->tcTupBlockref); - EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal, - TupKeyReq::SignalLength); -} - -/* ------------------------------------------------------------------------- - * RECEPTION OF FURTHER KEY INFORMATION WHEN KEY SIZE > 16 BYTES. - * ------------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_SCAN_KEYINFO - * ------------------------------------------------------------------------- */ -void -Dblqh::keyinfoLab(const Uint32 * src, const Uint32 * end) -{ - do { - jam(); - seizeTupkeybuf(0); - databufptr.p->data[0] = * src ++; - databufptr.p->data[1] = * src ++; - databufptr.p->data[2] = * src ++; - databufptr.p->data[3] = * src ++; - } while (src < end); -}//Dblqh::keyinfoLab() - -Uint32 -Dblqh::readPrimaryKeys(ScanRecord *scanP, TcConnectionrec *tcConP, Uint32 *dst) -{ - Uint32 tableId = tcConP->tableref; - Uint32 fragId = tcConP->fragmentid; - Uint32 fragPageId = scanP->m_row_id.m_page_no; - Uint32 pageIndex = scanP->m_row_id.m_page_idx; - - if(scanP->rangeScan) - { - jam(); - // for ordered index use primary table - FragrecordPtr tFragPtr; - tFragPtr.i = fragptr.p->tableFragptr; - c_fragment_pool.getPtr(tFragPtr); - tableId = tFragPtr.p->tabRef; - } - - int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, dst, false); - jamEntry(); - if(0) - ndbout_c("readPrimaryKeys(table: %d fragment: %d [ %d %d ] -> %d", - tableId, fragId, fragPageId, pageIndex, ret); - ndbassert(ret > 0); - - return ret; -} - -/* ------------------------------------------------------------------------- - * ENTER TUPKEYCONF - * ------------------------------------------------------------------------- - * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY - * ------------------------------------------------------------------------- */ -void Dblqh::scanTupkeyConfLab(Signal* signal) -{ - const TupKeyConf * conf = (TupKeyConf *)signal->getDataPtr(); - UintR tdata4 = conf->readLength; - UintR tdata5 = conf->lastRow; - - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - - Uint32 rows = scanptr.p->m_curr_batch_size_rows; - Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p, rows, false); - if (accOpPtr != (Uint32)-1) - { - c_acc->execACCKEY_ORD(signal, accOpPtr); - jamEntry(); - } - else - { - ndbassert(refToBlock(scanptr.p->scanBlockref) != DBACC); - } - - if (scanptr.p->scanCompletedStatus == ZTRUE) { - /* --------------------------------------------------------------------- - * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. - * --------------------------------------------------------------------- */ - if ((scanptr.p->scanLockHold == ZTRUE) && rows) - { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - jam(); - closeScanLab(signal); - return; - }//if - if (scanptr.p->scanKeyinfoFlag) { - jam(); - // Inform API about keyinfo len aswell - tdata4 += sendKeyinfo20(signal, scanptr.p, tcConnectptr.p); - }//if - ndbrequire(scanptr.p->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN); - scanptr.p->m_curr_batch_size_bytes+= tdata4; - scanptr.p->m_curr_batch_size_rows = rows + 1; - scanptr.p->m_last_row = tdata5; - if (scanptr.p->check_scan_batch_completed() | tdata5){ - if (scanptr.p->scanLockHold == ZTRUE) { - jam(); - scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ; - sendScanFragConf(signal, ZFALSE); - return; - } else { - jam(); - scanptr.p->scanReleaseCounter = rows + 1; - scanReleaseLocksLab(signal); - return; - } - } else { - if (scanptr.p->scanLockHold == ZTRUE) { - jam(); - scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT; - } else { - jam(); - scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT; - } - } - scanNextLoopLab(signal); -}//Dblqh::scanTupkeyConfLab() - -void Dblqh::scanNextLoopLab(Signal* signal) -{ - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - }//switch - continueScanAfterBlockedLab(signal); -}//Dblqh::scanNextLoopLab() - -void Dblqh::continueScanAfterBlockedLab(Signal* signal) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - Uint32 accOpPtr; - if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_ABORT) { - jam(); - scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT; - accOpPtr= get_acc_ptr_from_scan_record(scanptr.p, - scanptr.p->m_curr_batch_size_rows, - false); - scanptr.p->scan_acc_index--; - } else if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_COMMIT) { - jam(); - accOpPtr= get_acc_ptr_from_scan_record(scanptr.p, - scanptr.p->m_curr_batch_size_rows-1, - false); - } else { - jam(); - accOpPtr = RNIL; // The value is not used in ACC - }//if - scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN; - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = accOpPtr; - signal->theData[2] = scanptr.p->scanFlag; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); -}//Dblqh::continueScanAfterBlockedLab() - -/* ------------------------------------------------------------------------- - * ENTER TUPKEYREF WITH - * TC_CONNECTPTR, - * TERROR_CODE - * ------------------------------------------------------------------------- - * PRECONDITION: TRANSACTION_STATE = SCAN_TUPKEY - * ------------------------------------------------------------------------- */ -void Dblqh::scanTupkeyRefLab(Signal* signal) -{ - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - - Uint32 rows = scanptr.p->m_curr_batch_size_rows; - Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p, rows, false); - if (accOpPtr != (Uint32)-1) - { - c_acc->execACCKEY_ORD(signal, accOpPtr); - } - else - { - ndbassert(refToBlock(scanptr.p->scanBlockref) != DBACC); - } - - if (scanptr.p->scanCompletedStatus == ZTRUE) { - /* --------------------------------------------------------------------- - * STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED. - * --------------------------------------------------------------------- */ - if ((scanptr.p->scanLockHold == ZTRUE) && rows) - { - jam(); - scanptr.p->scanReleaseCounter = 1; - scanReleaseLocksLab(signal); - return; - }//if - jam(); - closeScanLab(signal); - return; - }//if - if ((terrorCode != ZSEARCH_CONDITION_FALSE) && - (terrorCode != ZNO_TUPLE_FOUND) && - (terrorCode >= ZUSER_ERROR_CODE_LIMIT)) { - scanptr.p->scanErrorCounter++; - tcConnectptr.p->errorCode = terrorCode; - - if (scanptr.p->scanLockHold == ZTRUE) { - jam(); - scanptr.p->scanReleaseCounter = 1; - } else { - jam(); - scanptr.p->m_curr_batch_size_rows = rows + 1; - scanptr.p->scanReleaseCounter = rows + 1; - }//if - /* -------------------------------------------------------------------- - * WE NEED TO RELEASE ALL LOCKS CURRENTLY - * HELD BY THIS SCAN. - * -------------------------------------------------------------------- */ - scanReleaseLocksLab(signal); - return; - }//if - Uint32 time_passed= tcConnectptr.p->tcTimer - cLqhTimeOutCount; - if (rows) { - if (time_passed > 1) { - /* ----------------------------------------------------------------------- - * WE NEED TO ENSURE THAT WE DO NOT SEARCH FOR THE NEXT TUPLE FOR A - * LONG TIME WHILE WE KEEP A LOCK ON A FOUND TUPLE. WE RATHER REPORT - * THE FOUND TUPLE IF FOUND TUPLES ARE RARE. If more than 10 ms passed we - * send the found tuples to the API. - * ----------------------------------------------------------------------- */ - scanptr.p->scanReleaseCounter = rows + 1; - scanReleaseLocksLab(signal); - return; - } - } else { - if (time_passed > 10) { - jam(); - signal->theData[0]= scanptr.i; - signal->theData[1]= tcConnectptr.p->transid[0]; - signal->theData[2]= tcConnectptr.p->transid[1]; - execSCAN_HBREP(signal); - } - } - scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_ABORT; - scanNextLoopLab(signal); -}//Dblqh::scanTupkeyRefLab() - -/* ------------------------------------------------------------------------- - * THE SCAN HAS BEEN COMPLETED. EITHER BY REACHING THE END OR BY COMMAND - * FROM THE APPLICATION OR BY SOME SORT OF ERROR CONDITION. - * ------------------------------------------------------------------------- */ -void Dblqh::closeScanLab(Signal* signal) -{ - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_CLOSE_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - ndbrequire(false); - }//switch - continueCloseScanAfterBlockedLab(signal); -}//Dblqh::closeScanLab() - -void Dblqh::continueCloseScanAfterBlockedLab(Signal* signal) -{ - tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED; - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanState = ScanRecord::WAIT_CLOSE_SCAN; - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = RNIL; - signal->theData[2] = NextScanReq::ZSCAN_CLOSE; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); -}//Dblqh::continueCloseScanAfterBlockedLab() - -/* ------------------------------------------------------------------------- - * ENTER NEXT_SCANCONF - * ------------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_CLOSE_SCAN - * ------------------------------------------------------------------------- */ -void Dblqh::accScanCloseConfLab(Signal* signal) -{ - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - - if((tcConnectptr.p->primKeyLen - 4) > 0 && - scanptr.p->scanCompletedStatus != ZTRUE) - { - jam(); - continueAfterReceivingAllAiLab(signal); - return; - } - - scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN; - signal->theData[0] = tcConnectptr.p->tupConnectrec; - signal->theData[1] = tcConnectptr.p->tableref; - signal->theData[2] = scanptr.p->scanSchemaVersion; - signal->theData[3] = ZDELETE_STORED_PROC_ID; - signal->theData[4] = scanptr.p->scanStoredProcId; - sendSignal(tcConnectptr.p->tcTupBlockref, - GSN_STORED_PROCREQ, signal, 5, JBB); -}//Dblqh::accScanCloseConfLab() - -/* ------------------------------------------------------------------------- - * ENTER STORED_PROCCONF WITH - * ------------------------------------------------------------------------- - * PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_SCAN - * ------------------------------------------------------------------------- */ -void Dblqh::tupScanCloseConfLab(Signal* signal) -{ - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) { - jam(); - tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec; - ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord); - tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1; - signal->theData[0] = ZLQH_TRANS_NEXT; - signal->theData[1] = tcNodeFailptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - } else if (tcConnectptr.p->errorCode != 0) { - jam(); - ScanFragRef * ref = (ScanFragRef*)&signal->theData[0]; - ref->senderData = tcConnectptr.p->clientConnectrec; - ref->transId1 = tcConnectptr.p->transid[0]; - ref->transId2 = tcConnectptr.p->transid[1]; - ref->errorCode = tcConnectptr.p->errorCode; - sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal, - ScanFragRef::SignalLength, JBB); - } else { - jam(); - sendScanFragConf(signal, ZSCAN_FRAG_CLOSED); - }//if - finishScanrec(signal); - releaseScanrec(signal); - tcConnectptr.p->tcScanRec = RNIL; - deleteTransidHash(signal); - releaseOprec(signal); - releaseTcrec(signal, tcConnectptr); -}//Dblqh::tupScanCloseConfLab() - -/* ========================================================================= - * ======= INITIATE SCAN RECORD ======= - * - * SUBROUTINE SHORT NAME = ISC - * ========================================================================= */ -Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) -{ - const Uint32 reqinfo = scanFragReq->requestInfo; - const Uint32 max_rows = scanFragReq->batch_size_rows; - const Uint32 max_bytes = scanFragReq->batch_size_bytes; - const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo); - const Uint32 scanLockHold = ScanFragReq::getHoldLockFlag(reqinfo); - const Uint32 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo); - const Uint32 readCommitted = ScanFragReq::getReadCommittedFlag(reqinfo); - const Uint32 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo); - const Uint32 descending = ScanFragReq::getDescendingFlag(reqinfo); - Uint32 tupScan = ScanFragReq::getTupScanFlag(reqinfo); - const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo); - const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo); - - scanptr.p->scanKeyinfoFlag = keyinfo; - scanptr.p->scanLockHold = scanLockHold; - scanptr.p->scanCompletedStatus = ZFALSE; - scanptr.p->scanType = ScanRecord::SCAN; - scanptr.p->scanApiBlockref = scanFragReq->resultRef; - scanptr.p->scanAiLength = attrLen; - scanptr.p->scanTcrec = tcConnectptr.i; - scanptr.p->scanSchemaVersion = scanFragReq->schemaVersion; - - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes= 0; - scanptr.p->m_max_batch_size_rows = max_rows; - scanptr.p->m_max_batch_size_bytes = max_bytes; - -#if 0 - if (! rangeScan) - tupScan = 1; -#endif - - if (! rangeScan && ! tupScan) - scanptr.p->scanBlockref = tcConnectptr.p->tcAccBlockref; - else if (! tupScan) - scanptr.p->scanBlockref = tcConnectptr.p->tcTuxBlockref; - else - scanptr.p->scanBlockref = tcConnectptr.p->tcTupBlockref; - - scanptr.p->scanErrorCounter = 0; - scanptr.p->scanLockMode = scanLockMode; - scanptr.p->readCommitted = readCommitted; - scanptr.p->rangeScan = rangeScan; - scanptr.p->descending = descending; - scanptr.p->tupScan = tupScan; - scanptr.p->lcpScan = ScanFragReq::getLcpScanFlag(reqinfo); - scanptr.p->scanState = ScanRecord::SCAN_FREE; - scanptr.p->scanFlag = ZFALSE; - scanptr.p->m_row_id.setNull(); - scanptr.p->scanTcWaiting = ZTRUE; - scanptr.p->scanNumber = ~0; - scanptr.p->scanApiOpPtr = scanFragReq->clientOpPtr; - scanptr.p->m_last_row = 0; - scanptr.p->scanStoredProcId = RNIL; - scanptr.p->copyPtr = RNIL; - if (max_rows == 0 || (max_bytes > 0 && max_rows > max_bytes)){ - jam(); - return ScanFragRef::ZWRONG_BATCH_SIZE; - } - if (!seize_acc_ptr_list(scanptr.p, max_rows)){ - jam(); - return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR; - } - /** - * Used for scan take over - */ - FragrecordPtr tFragPtr; - tFragPtr.i = fragptr.p->tableFragptr; - c_fragment_pool.getPtr(tFragPtr); - scanptr.p->fragPtrI = fragptr.p->tableFragptr; - - /** - * !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11 - * idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42) - */ - tupScan = 0; // Make sure that close tup scan does not start acc scan incorrectly - Uint32 start = (rangeScan || tupScan) ? MAX_PARALLEL_SCANS_PER_FRAG : 1 ; - Uint32 stop = (rangeScan || tupScan) ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : - MAX_PARALLEL_SCANS_PER_FRAG - 1; - stop += start; - Uint32 free = tFragPtr.p->m_scanNumberMask.find(start); - - if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){ - jam(); - - if(scanPrio == 0){ - jam(); - return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR; - } - - /** - * Put on queue - */ - scanptr.p->scanState = ScanRecord::IN_QUEUE; - LocalDLFifoList queue(c_scanRecordPool, - fragptr.p->m_queuedScans); - queue.add(scanptr); - return ZOK; - } - - scanptr.p->scanNumber = free; - tFragPtr.p->m_scanNumberMask.clear(free);// Update mask - - LocalDLList active(c_scanRecordPool, fragptr.p->m_activeScans); - active.add(scanptr); - if(scanptr.p->scanKeyinfoFlag){ - jam(); -#if defined VM_TRACE || defined ERROR_INSERT - ScanRecordPtr tmp; - ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p)); -#endif -#ifdef TRACE_SCAN_TAKEOVER - ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d", - scanptr.p->scanNumber, scanptr.p->fragPtrI, - tabptr.i, scanFragReq->fragmentNoKeyLen & 0xFFFF, - fragptr.i, fragptr.p->tableFragptr); -#endif - c_scanTakeOverHash.add(scanptr); - } - init_acc_ptr_list(scanptr.p); - return ZOK; -} - -/* ========================================================================= - * ======= INITIATE TC RECORD AT SCAN ======= - * - * SUBROUTINE SHORT NAME = IST - * ========================================================================= */ -void Dblqh::initScanTc(const ScanFragReq* req, - Uint32 transid1, - Uint32 transid2, - Uint32 fragId, - Uint32 nodeId) -{ - tcConnectptr.p->transid[0] = transid1; - tcConnectptr.p->transid[1] = transid2; - tcConnectptr.p->tcScanRec = scanptr.i; - tcConnectptr.p->tableref = tabptr.i; - tcConnectptr.p->fragmentid = fragId; - tcConnectptr.p->fragmentptr = fragptr.i; - tcConnectptr.p->tcOprec = tcConnectptr.p->clientConnectrec; - tcConnectptr.p->tcBlockref = tcConnectptr.p->clientBlockref; - tcConnectptr.p->errorCode = 0; - tcConnectptr.p->reclenAiLqhkey = 0; - tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE; - tcConnectptr.p->nextReplica = nodeId; - tcConnectptr.p->currTupAiLen = 0; - tcConnectptr.p->opExec = 1; - tcConnectptr.p->operation = ZREAD; - tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST; - tcConnectptr.p->commitAckMarker = RNIL; - tcConnectptr.p->m_offset_current_keybuf = 0; - tcConnectptr.p->m_scan_curr_range_no = 0; - tcConnectptr.p->m_dealloc = 0; - tcConnectptr.p->activeCreat = Fragrecord::AC_NORMAL; - TablerecPtr tTablePtr; - tTablePtr.i = tabptr.p->primaryTableId; - ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec); - tcConnectptr.p->m_disk_table = tTablePtr.p->m_disk_table && - (!req || !ScanFragReq::getNoDiskFlag(req->requestInfo)); - - tabptr.p->usageCount++; -}//Dblqh::initScanTc() - -/* ========================================================================= - * ======= FINISH SCAN RECORD ======= - * - * REMOVE SCAN RECORD FROM PER FRAGMENT LIST. - * ========================================================================= */ -void Dblqh::finishScanrec(Signal* signal) -{ - release_acc_ptr_list(scanptr.p); - - LocalDLFifoList queue(c_scanRecordPool, - fragptr.p->m_queuedScans); - - if(scanptr.p->scanState == ScanRecord::IN_QUEUE){ - jam(); - queue.release(scanptr); - return; - } - - if(scanptr.p->scanKeyinfoFlag){ - jam(); - ScanRecordPtr tmp; -#ifdef TRACE_SCAN_TAKEOVER - ndbout_c("removing (%d %d)", scanptr.p->scanNumber, scanptr.p->fragPtrI); -#endif - c_scanTakeOverHash.remove(tmp, * scanptr.p); - ndbrequire(tmp.p == scanptr.p); - } - - LocalDLList scans(c_scanRecordPool, fragptr.p->m_activeScans); - scans.release(scanptr); - - FragrecordPtr tFragPtr; - tFragPtr.i = scanptr.p->fragPtrI; - c_fragment_pool.getPtr(tFragPtr); - - const Uint32 scanNumber = scanptr.p->scanNumber; - ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber)); - ScanRecordPtr restart; - - /** - * Start on of queued scans - */ - if(scanNumber == NR_ScanNo || !queue.first(restart)){ - jam(); - tFragPtr.p->m_scanNumberMask.set(scanNumber); - return; - } - - if(ERROR_INSERTED(5034)){ - jam(); - tFragPtr.p->m_scanNumberMask.set(scanNumber); - return; - } - - ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE); - - ScanRecordPtr tmpScan = scanptr; - TcConnectionrecPtr tmpTc = tcConnectptr; - - tcConnectptr.i = restart.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - restart.p->scanNumber = scanNumber; - - queue.remove(restart); - scans.add(restart); - if(restart.p->scanKeyinfoFlag){ - jam(); -#if defined VM_TRACE || defined ERROR_INSERT - ScanRecordPtr tmp; - ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p)); -#endif - c_scanTakeOverHash.add(restart); -#ifdef TRACE_SCAN_TAKEOVER - ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI); -#endif - } - - /** - * This state is a bit weird, but that what set in initScanRec - */ - restart.p->scanState = ScanRecord::SCAN_FREE; - if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED) - { - scanptr = restart; - continueAfterReceivingAllAiLab(signal); - } - else - { - ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI); - } - - scanptr = tmpScan; - tcConnectptr = tmpTc; -}//Dblqh::finishScanrec() - -/* ========================================================================= - * ======= RELEASE SCAN RECORD ======= - * - * RELEASE A SCAN RECORD TO THE FREELIST. - * ========================================================================= */ -void Dblqh::releaseScanrec(Signal* signal) -{ - scanptr.p->scanState = ScanRecord::SCAN_FREE; - scanptr.p->scanType = ScanRecord::ST_IDLE; - scanptr.p->scanTcWaiting = ZFALSE; - cbookedAccOps -= scanptr.p->m_max_batch_size_rows; - cscanNoFreeRec++; -}//Dblqh::releaseScanrec() - -/* ------------------------------------------------------------------------ - * ------- SEND KEYINFO20 TO API ------- - * - * ------------------------------------------------------------------------ */ -Uint32 Dblqh::sendKeyinfo20(Signal* signal, - ScanRecord * scanP, - TcConnectionrec * tcConP) -{ - ndbrequire(scanP->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN); - KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0]; - - /** - * Note that this code requires signal->theData to be big enough for - * a entire key - */ - const BlockReference ref = scanP->scanApiBlockref; - const Uint32 scanOp = scanP->m_curr_batch_size_rows; - const Uint32 nodeId = refToNode(ref); - const bool connectedToNode = getNodeInfo(nodeId).m_connected; -#ifdef NOT_USED - const Uint32 type = getNodeInfo(nodeId).m_type; - const bool is_api= (type >= NodeInfo::API && type <= NodeInfo::REP); - const bool old_dest= (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0)); -#endif - const bool longable = true; // TODO is_api && !old_dest; - - Uint32 * dst = keyInfo->keyData; - dst += nodeId == getOwnNodeId() ? 0 : KeyInfo20::DataLength; - - Uint32 keyLen = readPrimaryKeys(scanP, tcConP, dst); - Uint32 fragId = tcConP->fragmentid; - keyInfo->clientOpPtr = scanP->scanApiOpPtr; - keyInfo->keyLen = keyLen; - keyInfo->scanInfo_Node = - KeyInfo20::setScanInfo(scanOp, scanP->scanNumber) + (fragId << 20); - keyInfo->transId1 = tcConP->transid[0]; - keyInfo->transId2 = tcConP->transid[1]; - - Uint32 * src = signal->theData+25; - if(connectedToNode){ - jam(); - - if(nodeId != getOwnNodeId()){ - jam(); - - if(keyLen <= KeyInfo20::DataLength || !longable) { - while(keyLen > KeyInfo20::DataLength){ - jam(); - MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength); - sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB); - src += KeyInfo20::DataLength;; - keyLen -= KeyInfo20::DataLength; - } - - MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen); - sendSignal(ref, GSN_KEYINFO20, signal, - KeyInfo20::HeaderLength+keyLen, JBB); - return keyLen; - } - - LinearSectionPtr ptr[3]; - ptr[0].p = src; - ptr[0].sz = keyLen; - sendSignal(ref, GSN_KEYINFO20, signal, KeyInfo20::HeaderLength, - JBB, ptr, 1); - return keyLen; - } - - EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal, - KeyInfo20::HeaderLength + keyLen); - jamEntry(); - return keyLen; - } - - /** - * If this node does not have a direct connection - * to the receiving node we want to send the signals - * routed via the node that controls this read - */ - Uint32 routeBlockref = tcConP->clientBlockref; - - if(keyLen < KeyInfo20::DataLength || !longable){ - jam(); - - while (keyLen > (KeyInfo20::DataLength - 1)) { - jam(); - MEMCOPY_NO_WORDS(keyInfo->keyData, src, KeyInfo20::DataLength - 1); - keyInfo->keyData[KeyInfo20::DataLength-1] = ref; - sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, 25, JBB); - src += KeyInfo20::DataLength - 1; - keyLen -= KeyInfo20::DataLength - 1; - } - - MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen); - keyInfo->keyData[keyLen] = ref; - sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, - KeyInfo20::HeaderLength+keyLen+1, JBB); - return keyLen; - } - - keyInfo->keyData[0] = ref; - LinearSectionPtr ptr[3]; - ptr[0].p = src; - ptr[0].sz = keyLen; - sendSignal(routeBlockref, GSN_KEYINFO20_R, signal, - KeyInfo20::HeaderLength+1, JBB, ptr, 1); - return keyLen; -} - -/* ------------------------------------------------------------------------ - * ------- SEND SCAN_FRAGCONF TO TC THAT CONTROLS THE SCAN ------- - * - * ------------------------------------------------------------------------ */ -void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted) -{ - Uint32 completed_ops= scanptr.p->m_curr_batch_size_rows; - Uint32 total_len= scanptr.p->m_curr_batch_size_bytes; - scanptr.p->scanTcWaiting = ZFALSE; - - if(ERROR_INSERTED(5037)){ - CLEAR_ERROR_INSERT_VALUE; - return; - } - ScanFragConf * conf = (ScanFragConf*)&signal->theData[0]; -#ifdef NOT_USED - NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref); -#endif - Uint32 trans_id1= tcConnectptr.p->transid[0]; - Uint32 trans_id2= tcConnectptr.p->transid[1]; - - conf->senderData = tcConnectptr.p->clientConnectrec; - conf->completedOps = completed_ops; - conf->fragmentCompleted = scanCompleted; - conf->transId1 = trans_id1; - conf->transId2 = trans_id2; - conf->total_len= total_len; - sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF, - signal, ScanFragConf::SignalLength, JBB); - - if(!scanptr.p->scanLockHold) - { - jam(); - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes= 0; - } -}//Dblqh::sendScanFragConf() - -/* ######################################################################### */ -/* ####### NODE RECOVERY MODULE ####### */ -/* */ -/* ######################################################################### */ -/*---------------------------------------------------------------------------*/ -/* */ -/* THIS MODULE IS USED WHEN A NODE HAS FAILED. IT PERFORMS A COPY OF A */ -/* FRAGMENT TO A NEW REPLICA OF THE FRAGMENT. IT DOES ALSO SHUT DOWN ALL */ -/* CONNECTIONS TO THE FAILED NODE. */ -/*---------------------------------------------------------------------------*/ -Uint32 -Dblqh::calculateHash(Uint32 tableId, const Uint32* src) -{ - jam(); - Uint64 Tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1]; - Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]; - Uint32 keyLen = xfrm_key(tableId, src, (Uint32*)Tmp, sizeof(Tmp) >> 2, - keyPartLen); - ndbrequire(keyLen); - - return md5_hash(Tmp, keyLen); -}//Dblqh::calculateHash() - -/** - * PREPARE COPY FRAG REQ - */ -void -Dblqh::execPREPARE_COPY_FRAG_REQ(Signal* signal) -{ - jamEntry(); - PrepareCopyFragReq req = *(PrepareCopyFragReq*)signal->getDataPtr(); - - CRASH_INSERTION(5045); - - tabptr.i = req.tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - - Uint32 max_page = RNIL; - - if (getOwnNodeId() != req.startingNodeId) - { - jam(); - /** - * This is currently dead code... - * but is provided so we can impl. a better scan+delete on - * starting node wo/ having to change running node - */ - ndbrequire(getOwnNodeId() == req.copyNodeId); - c_tup->get_frag_info(req.tableId, req.fragId, &max_page); - - PrepareCopyFragConf* conf = (PrepareCopyFragConf*)signal->getDataPtrSend(); - conf->senderData = req.senderData; - conf->senderRef = reference(); - conf->tableId = req.tableId; - conf->fragId = req.fragId; - conf->copyNodeId = req.copyNodeId; - conf->startingNodeId = req.startingNodeId; - conf->maxPageNo = max_page; - sendSignal(req.senderRef, GSN_PREPARE_COPY_FRAG_CONF, - signal, PrepareCopyFragConf::SignalLength, JBB); - - return; - } - - if (! DictTabInfo::isOrderedIndex(tabptr.p->tableType)) - { - jam(); - ndbrequire(getFragmentrec(signal, req.fragId)); - - /** - * - */ - if (cstartType == NodeState::ST_SYSTEM_RESTART) - { - jam(); - signal->theData[0] = fragptr.p->tabRef; - signal->theData[1] = fragptr.p->fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - } - - - /** - * - */ - fragptr.p->m_copy_started_state = Fragrecord::AC_IGNORED; - fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION; - fragptr.p->logFlag = Fragrecord::STATE_FALSE; - - c_tup->get_frag_info(req.tableId, req.fragId, &max_page); - } - - PrepareCopyFragConf* conf = (PrepareCopyFragConf*)signal->getDataPtrSend(); - conf->senderData = req.senderData; - conf->senderRef = reference(); - conf->tableId = req.tableId; - conf->fragId = req.fragId; - conf->copyNodeId = req.copyNodeId; - conf->startingNodeId = req.startingNodeId; - conf->maxPageNo = max_page; - sendSignal(req.senderRef, GSN_PREPARE_COPY_FRAG_CONF, - signal, PrepareCopyFragConf::SignalLength, JBB); -} - -/* *************************************** */ -/* COPY_FRAGREQ: Start copying a fragment */ -/* *************************************** */ -void Dblqh::execCOPY_FRAGREQ(Signal* signal) -{ - jamEntry(); - const CopyFragReq * const copyFragReq = (CopyFragReq *)&signal->theData[0]; - tabptr.i = copyFragReq->tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - Uint32 i; - const Uint32 fragId = copyFragReq->fragId; - const Uint32 copyPtr = copyFragReq->userPtr; - const Uint32 userRef = copyFragReq->userRef; - const Uint32 nodeId = copyFragReq->nodeId; - const Uint32 gci = copyFragReq->gci; - - ndbrequire(cnoActiveCopy < 3); - ndbrequire(getFragmentrec(signal, fragId)); - ndbrequire(fragptr.p->copyFragState == ZIDLE); - ndbrequire(cfirstfreeTcConrec != RNIL); - ndbrequire(fragptr.p->m_scanNumberMask.get(NR_ScanNo)); - - Uint32 key = fragptr.p->fragDistributionKey = copyFragReq->distributionKey; - - Uint32 checkversion = NDB_VERSION >= MAKE_VERSION(5,1,0) ? - NDBD_UPDATE_FRAG_DIST_KEY_51 : NDBD_UPDATE_FRAG_DIST_KEY_50; - - Uint32 nodeCount = copyFragReq->nodeCount; - NdbNodeBitmask nodemask; - if (getNodeInfo(refToNode(userRef)).m_version >= checkversion) - { - ndbrequire(nodeCount <= MAX_REPLICAS); - for (i = 0; inodeList[i]); - } - Uint32 maxPage = copyFragReq->nodeList[nodeCount]; - Uint32 version = getNodeInfo(refToNode(userRef)).m_version; - if (ndb_check_prep_copy_frag_version(version) < 2) - { - jam(); - maxPage = RNIL; - } - - if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) { - jam(); - /** - * Ordered index doesn't need to be copied - */ - CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0]; - conf->userPtr = copyPtr; - conf->sendingNodeId = cownNodeid; - conf->startingNodeId = nodeId; - conf->tableId = tabptr.i; - conf->fragId = fragId; - sendSignal(userRef, GSN_COPY_FRAGCONF, signal, - CopyFragConf::SignalLength, JBB); - return; - }//if - - LocalDLList scans(c_scanRecordPool, fragptr.p->m_activeScans); - ndbrequire(scans.seize(scanptr)); -/* ------------------------------------------------------------------------- */ -// We keep track of how many operation records in ACC that has been booked. -// Copy fragment has records always booked and thus need not book any. The -// most operations in parallel use is the m_max_batch_size_rows. -// This variable has to be set-up here since it is used by releaseScanrec -// to unbook operation records in ACC. -/* ------------------------------------------------------------------------- */ - scanptr.p->m_max_batch_size_rows = 0; - scanptr.p->rangeScan = 0; - scanptr.p->tupScan = 0; - seizeTcrec(); - tcConnectptr.p->clientBlockref = userRef; - - /** - * Remove implicit cast/usage of CopyFragReq - */ - //initCopyrec(signal); - scanptr.p->copyPtr = copyPtr; - scanptr.p->scanType = ScanRecord::COPY; - scanptr.p->scanNodeId = nodeId; - scanptr.p->scanTcrec = tcConnectptr.i; - scanptr.p->scanSchemaVersion = copyFragReq->schemaVersion; - scanptr.p->scanCompletedStatus = ZFALSE; - scanptr.p->scanErrorCounter = 0; - scanptr.p->scanNumber = NR_ScanNo; - scanptr.p->scanKeyinfoFlag = 0; // Don't put into hash - scanptr.p->fragPtrI = fragptr.i; - scanptr.p->scanApiOpPtr = tcConnectptr.i; - scanptr.p->scanApiBlockref = reference(); - fragptr.p->m_scanNumberMask.clear(NR_ScanNo); - scanptr.p->scanBlockref = DBTUP_REF; - scanptr.p->scanLockHold = ZFALSE; - scanptr.p->m_curr_batch_size_rows = 0; - scanptr.p->m_curr_batch_size_bytes= 0; - - initScanTc(0, - 0, - (DBLQH << 20) + (cownNodeid << 8), - fragId, - copyFragReq->nodeId); - cactiveCopy[cnoActiveCopy] = fragptr.i; - cnoActiveCopy++; - - tcConnectptr.p->copyCountWords = 0; - tcConnectptr.p->tcOprec = tcConnectptr.i; - tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion; - tcConnectptr.p->savePointId = gci; - scanptr.p->scanState = ScanRecord::WAIT_ACC_COPY; - AccScanReq * req = (AccScanReq*)&signal->theData[0]; - req->senderData = scanptr.i; - req->senderRef = cownref; - req->tableId = tabptr.i; - req->fragmentNo = fragId; - req->requestInfo = 0; - AccScanReq::setLockMode(req->requestInfo, 0); - AccScanReq::setReadCommittedFlag(req->requestInfo, 0); - AccScanReq::setNRScanFlag(req->requestInfo, 1); - AccScanReq::setNoDiskScanFlag(req->requestInfo, 1); - - req->transId1 = tcConnectptr.p->transid[0]; - req->transId2 = tcConnectptr.p->transid[1]; - req->savePointId = tcConnectptr.p->savePointId; - req->maxPage = maxPage; - sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal, - AccScanReq::SignalLength + 1, JBB); - - if (! nodemask.isclear()) - { - ndbrequire(nodemask.get(getOwnNodeId())); - ndbrequire(nodemask.get(nodeId)); // cpy dest - nodemask.clear(getOwnNodeId()); - nodemask.clear(nodeId); - - UpdateFragDistKeyOrd* - ord = (UpdateFragDistKeyOrd*)signal->getDataPtrSend(); - ord->tableId = tabptr.i; - ord->fragId = fragId; - ord->fragDistributionKey = key; - i = 0; - while ((i = nodemask.find(i+1)) != NdbNodeBitmask::NotFound) - { - if (getNodeInfo(i).m_version >= checkversion) - sendSignal(calcLqhBlockRef(i), GSN_UPDATE_FRAG_DIST_KEY_ORD, - signal, UpdateFragDistKeyOrd::SignalLength, JBB); - } - } - return; -}//Dblqh::execCOPY_FRAGREQ() - -void -Dblqh::execUPDATE_FRAG_DIST_KEY_ORD(Signal * signal) -{ - jamEntry(); - UpdateFragDistKeyOrd* ord =(UpdateFragDistKeyOrd*)signal->getDataPtr(); - - tabptr.i = ord->tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - ndbrequire(getFragmentrec(signal, ord->fragId)); - fragptr.p->fragDistributionKey = ord->fragDistributionKey; -} - -void Dblqh::accScanConfCopyLab(Signal* signal) -{ - AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0]; - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); -/*--------------------------------------------------------------------------*/ -/* PRECONDITION: SCAN_STATE = WAIT_ACC_COPY */ -/*--------------------------------------------------------------------------*/ - if (accScanConf->flag == AccScanConf::ZEMPTY_FRAGMENT) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE FRAGMENT WAS EMPTY. */ -/* REPORT SUCCESSFUL COPYING. */ -/*---------------------------------------------------------------------------*/ - tupCopyCloseConfLab(signal); - return; - }//if - scanptr.p->scanAccPtr = accScanConf->accPtr; - scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_COPY; - signal->theData[0] = tcConnectptr.p->tupConnectrec; - signal->theData[1] = tcConnectptr.p->tableref; - signal->theData[2] = scanptr.p->scanSchemaVersion; - signal->theData[3] = ZSTORED_PROC_COPY; -// theData[4] is not used in TUP with ZSTORED_PROC_COPY - sendSignal(scanptr.p->scanBlockref, GSN_STORED_PROCREQ, signal, 5, JBB); - return; -}//Dblqh::accScanConfCopyLab() - -/*---------------------------------------------------------------------------*/ -/* ENTER STORED_PROCCONF WITH */ -/* TC_CONNECTPTR, */ -/* TSTORED_PROC_ID */ -/*---------------------------------------------------------------------------*/ -void Dblqh::storedProcConfCopyLab(Signal* signal) -{ -/*---------------------------------------------------------------------------*/ -/* PRECONDITION: SCAN_STATE = WAIT_STORED_PROC_COPY */ -/*---------------------------------------------------------------------------*/ - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE COPY PROCESS HAVE BEEN COMPLETED, MOST LIKELY DUE TO A NODE FAILURE.*/ -/*---------------------------------------------------------------------------*/ - closeCopyLab(signal); - return; - }//if - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY; - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::COPY_FIRST_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - continueFirstCopyAfterBlockedLab(signal); - return; -}//Dblqh::storedProcConfCopyLab() - -void Dblqh::continueFirstCopyAfterBlockedLab(Signal* signal) -{ - /** - * Start sending ROWID for all operations from now on - */ - fragptr.p->m_copy_started_state = Fragrecord::AC_NR_COPY; - - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - - if (false && fragptr.p->tabRef > 4) - { - ndbout_c("STOPPING COPY X = [ %d %d %d %d ]", - refToBlock(scanptr.p->scanBlockref), - scanptr.p->scanAccPtr, RNIL, NextScanReq::ZSCAN_NEXT); - - /** - * RESTART: > DUMP 7020 332 X - */ - return; - } - - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = RNIL; - signal->theData[2] = NextScanReq::ZSCAN_NEXT; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); - return; -}//Dblqh::continueFirstCopyAfterBlockedLab() - -/*---------------------------------------------------------------------------*/ -/* ENTER NEXT_SCANCONF WITH */ -/* SCANPTR, */ -/* TFRAGID, */ -/* TACC_OPPTR, */ -/* TLOCAL_KEY1, */ -/* TLOCAL_KEY2, */ -/* TKEY_LENGTH, */ -/* TKEY1, */ -/* TKEY2, */ -/* TKEY3, */ -/* TKEY4 */ -/*---------------------------------------------------------------------------*/ -/* PRECONDITION: SCAN_STATE = WAIT_NEXT_SCAN_COPY */ -/*---------------------------------------------------------------------------*/ -void Dblqh::nextScanConfCopyLab(Signal* signal) -{ - NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0]; - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - if (nextScanConf->fragId == RNIL) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THERE ARE NO MORE TUPLES TO FETCH. WE NEED TO CLOSE */ -/* THE COPY IN ACC AND DELETE THE STORED PROCEDURE IN TUP */ -/*---------------------------------------------------------------------------*/ - if (tcConnectptr.p->copyCountWords == 0) { - closeCopyLab(signal); - return; - }//if -/*---------------------------------------------------------------------------*/ -// Wait until copying is completed also at the starting node before reporting -// completion. Signal completion through scanCompletedStatus-flag. -/*---------------------------------------------------------------------------*/ - scanptr.p->scanCompletedStatus = ZTRUE; - scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY; - if (ERROR_INSERTED(5043)) - { - CLEAR_ERROR_INSERT_VALUE; - tcConnectptr.p->copyCountWords = ~0; - signal->theData[0] = 9999; - sendSignal(numberToRef(CMVMI, scanptr.p->scanNodeId), - GSN_NDB_TAMPER, signal, 1, JBA); - } - return; - }//if - - TcConnectionrec * tcConP = tcConnectptr.p; - - tcConP->m_use_rowid = true; - tcConP->m_row_id = scanptr.p->m_row_id; - - if (signal->getLength() == 7) - { - jam(); - ndbrequire(nextScanConf->accOperationPtr == RNIL); - initCopyTc(signal, ZDELETE); - set_acc_ptr_in_scan_record(scanptr.p, 0, RNIL); - tcConP->gci = nextScanConf->gci; - - tcConP->primKeyLen = 0; - tcConP->totSendlenAi = 0; - tcConP->connectState = TcConnectionrec::COPY_CONNECTED; - -/*---------------------------------------------------------------------------*/ -// To avoid using up to many operation records in ACC we will increase the -// constant to ensure that we never send more than 40 records at a time. -// This is where the constant 56 comes from. For long records this constant -// will not matter that much. The current maximum is 6000 words outstanding -// (including a number of those 56 words not really sent). We also have to -// ensure that there are never more simultaneous usage of these operation -// records to ensure that node recovery does not fail because of simultaneous -// scanning. -/*---------------------------------------------------------------------------*/ - UintR TnoOfWords = 8; - TnoOfWords = TnoOfWords + MAGIC_CONSTANT; - TnoOfWords = TnoOfWords + (TnoOfWords >> 2); - - /*----------------------------------------------------------------- - * NOTE for transid1! - * Transid1 in the tcConnection record is used load regulate the - * copy(node recovery) process. - * The number of outstanding words are written in the transid1 - * variable. This will be sent to the starting node in the - * LQHKEYREQ signal and when the answer is returned in the LQHKEYCONF - * we can reduce the number of outstanding words and check to see - * if more LQHKEYREQ signals should be sent. - * - * However efficient this method is rather unsafe in such way that - * it overwrites the transid1 original data. - * - * Also see TR 587. - *----------------------------------------------------------------*/ - tcConP->transid[0] = TnoOfWords; // Data overload, see note! - packLqhkeyreqLab(signal); - tcConP->copyCountWords += TnoOfWords; - scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY; - if (tcConP->copyCountWords < cmaxWordsAtNodeRec) { - nextRecordCopy(signal); - } - return; - } - else - { - // If accOperationPtr == RNIL no record was returned by ACC - if (nextScanConf->accOperationPtr == RNIL) { - jam(); - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; - sendSignal(scanptr.p->scanBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); - return; - } - - initCopyTc(signal, ZINSERT); - set_acc_ptr_in_scan_record(scanptr.p, 0, nextScanConf->accOperationPtr); - - Fragrecord* fragPtrP= fragptr.p; - scanptr.p->scanState = ScanRecord::WAIT_TUPKEY_COPY; - tcConP->transactionState = TcConnectionrec::COPY_TUPKEY; - if(tcConP->m_disk_table) - { - next_scanconf_load_diskpage(signal, scanptr, tcConnectptr,fragPtrP); - } - else - { - next_scanconf_tupkeyreq(signal, scanptr, tcConP, fragPtrP, RNIL); - } - } -}//Dblqh::nextScanConfCopyLab() - - -/*---------------------------------------------------------------------------*/ -/* USED IN COPYING OPERATION TO RECEIVE ATTRINFO FROM TUP. */ -/*---------------------------------------------------------------------------*/ -/* ************>> */ -/* TRANSID_AI > */ -/* ************>> */ -void Dblqh::execTRANSID_AI(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - Uint32 length = signal->length() - 3; - ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::COPY_TUPKEY); - Uint32 * src = &signal->theData[3]; - while(length > 22){ - if (saveTupattrbuf(signal, src, 22) == ZOK) { - ; - } else { - jam(); - tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR; - return; - }//if - src += 22; - length -= 22; - } - if (saveTupattrbuf(signal, src, length) == ZOK) { - return; - } - jam(); - tcConnectptr.p->errorCode = ZGET_ATTRINBUF_ERROR; -}//Dblqh::execTRANSID_AI() - -/*--------------------------------------------------------------------------*/ -/* ENTER TUPKEYCONF WITH */ -/* TC_CONNECTPTR, */ -/* TDATA2, */ -/* TDATA3, */ -/* TDATA4, */ -/* TDATA5 */ -/*--------------------------------------------------------------------------*/ -/* PRECONDITION: TRANSACTION_STATE = COPY_TUPKEY */ -/*--------------------------------------------------------------------------*/ -void Dblqh::copyTupkeyConfLab(Signal* signal) -{ - const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr(); - - UintR readLength = tupKeyConf->readLength; - Uint32 tableId = tcConnectptr.p->tableref; - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - ScanRecord* scanP = scanptr.p; - - Uint32 rows = scanP->m_curr_batch_size_rows; - Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanP, rows, false); - ndbassert(accOpPtr != (Uint32)-1); - c_acc->execACCKEY_ORD(signal, accOpPtr); - - if (tcConnectptr.p->errorCode != 0) { - jam(); - closeCopyLab(signal); - return; - }//if - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE COPY PROCESS HAVE BEEN CLOSED. MOST LIKELY A NODE FAILURE. */ -/*---------------------------------------------------------------------------*/ - closeCopyLab(signal); - return; - }//if - TcConnectionrec * tcConP = tcConnectptr.p; - tcConnectptr.p->totSendlenAi = readLength; - tcConnectptr.p->connectState = TcConnectionrec::COPY_CONNECTED; - - // Read primary keys (used to get here via scan keyinfo) - Uint32* tmp = signal->getDataPtrSend()+24; - Uint32 len= tcConnectptr.p->primKeyLen = readPrimaryKeys(scanP, tcConP, tmp); - - tcConP->gci = tmp[len]; - // Calculate hash (no need to linearies key) - if (g_key_descriptor_pool.getPtr(tableId)->hasCharAttr) - { - tcConnectptr.p->hashValue = calculateHash(tableId, tmp); - } - else - { - tcConnectptr.p->hashValue = md5_hash((Uint64*)tmp, len); - } - - // Move into databuffer to make packLqhkeyreqLab happy - memcpy(tcConP->tupkeyData, tmp, 4*4); - if(len > 4) - keyinfoLab(tmp+4, tmp + len); - LqhKeyReq::setKeyLen(tcConP->reqinfo, len); - -/*---------------------------------------------------------------------------*/ -// To avoid using up to many operation records in ACC we will increase the -// constant to ensure that we never send more than 40 records at a time. -// This is where the constant 56 comes from. For long records this constant -// will not matter that much. The current maximum is 6000 words outstanding -// (including a number of those 56 words not really sent). We also have to -// ensure that there are never more simultaneous usage of these operation -// records to ensure that node recovery does not fail because of simultaneous -// scanning. -/*---------------------------------------------------------------------------*/ - UintR TnoOfWords = readLength + len; - TnoOfWords = TnoOfWords + MAGIC_CONSTANT; - TnoOfWords = TnoOfWords + (TnoOfWords >> 2); - - /*----------------------------------------------------------------- - * NOTE for transid1! - * Transid1 in the tcConnection record is used load regulate the - * copy(node recovery) process. - * The number of outstanding words are written in the transid1 - * variable. This will be sent to the starting node in the - * LQHKEYREQ signal and when the answer is returned in the LQHKEYCONF - * we can reduce the number of outstanding words and check to see - * if more LQHKEYREQ signals should be sent. - * - * However efficient this method is rather unsafe in such way that - * it overwrites the transid1 original data. - * - * Also see TR 587. - *----------------------------------------------------------------*/ - tcConnectptr.p->transid[0] = TnoOfWords; // Data overload, see note! - packLqhkeyreqLab(signal); - tcConnectptr.p->copyCountWords += TnoOfWords; - scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY; - if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) { - nextRecordCopy(signal); - return; - }//if - return; -}//Dblqh::copyTupkeyConfLab() - -/*---------------------------------------------------------------------------*/ -/* ENTER LQHKEYCONF */ -/*---------------------------------------------------------------------------*/ -/* PRECONDITION: CONNECT_STATE = COPY_CONNECTED */ -/*---------------------------------------------------------------------------*/ -void Dblqh::copyCompletedLab(Signal* signal) -{ - const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr(); - - ndbrequire(tcConnectptr.p->transid[1] == lqhKeyConf->transId2); - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - if (tcConnectptr.p->copyCountWords >= cmaxWordsAtNodeRec) { - tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note! - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); -/*---------------------------------------------------------------------------*/ -// Copy to complete, we will not start any new copying. -/*---------------------------------------------------------------------------*/ - closeCopyLab(signal); - return; - }//if - if (tcConnectptr.p->copyCountWords < cmaxWordsAtNodeRec) { - jam(); - nextRecordCopy(signal); - }//if - return; - }//if - tcConnectptr.p->copyCountWords -= lqhKeyConf->transId1; // Data overload, see note! - ndbrequire(tcConnectptr.p->copyCountWords <= cmaxWordsAtNodeRec); - if (tcConnectptr.p->copyCountWords > 0) { - jam(); - return; - }//if -/*---------------------------------------------------------------------------*/ -// No more outstanding copies. We will only start new ones from here if it was -// stopped before and this only happens when copyCountWords is bigger than the -// threshold value. Since this did not occur we must be waiting for completion. -// Check that this is so. If not we crash to find out what is going on. -/*---------------------------------------------------------------------------*/ - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); - closeCopyLab(signal); - return; - }//if - - if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY && - scanptr.p->scanErrorCounter) - { - jam(); - closeCopyLab(signal); - return; - } - - if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY) { - jam(); -/*---------------------------------------------------------------------------*/ -// Make sure that something is in progress. Otherwise we will simply stop -// and nothing more will happen. -/*---------------------------------------------------------------------------*/ - systemErrorLab(signal, __LINE__); - return; - }//if - return; -}//Dblqh::copyCompletedLab() - -void Dblqh::nextRecordCopy(Signal* signal) -{ - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - if (scanptr.p->scanState != ScanRecord::WAIT_LQHKEY_COPY) { - jam(); -/*---------------------------------------------------------------------------*/ -// Make sure that nothing is in progress. Otherwise we will have to simultaneous -// scans on the same record and this will certainly lead to unexpected -// behaviour. -/*---------------------------------------------------------------------------*/ - systemErrorLab(signal, __LINE__); - return; - }//if - scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN_COPY; - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::COPY_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - continueCopyAfterBlockedLab(signal); - return; -}//Dblqh::nextRecordCopy() - -void Dblqh::continueCopyAfterBlockedLab(Signal* signal) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - tcConnectptr.p->errorCode = 0; - Uint32 acc_op_ptr= get_acc_ptr_from_scan_record(scanptr.p, 0, false); - if (acc_op_ptr != RNIL) - { - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = acc_op_ptr; - signal->theData[2] = NextScanReq::ZSCAN_NEXT_COMMIT; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); - } - else - { - /** - * No need to commit (unlock) - */ - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = RNIL; - signal->theData[2] = NextScanReq::ZSCAN_NEXT; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); - } - return; -}//Dblqh::continueCopyAfterBlockedLab() - -void Dblqh::copyLqhKeyRefLab(Signal* signal) -{ - ndbrequire(tcConnectptr.p->transid[1] == signal->theData[4]); - Uint32 copyWords = signal->theData[3]; - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanErrorCounter++; - tcConnectptr.p->errorCode = terrorCode; - - LqhKeyConf* conf = (LqhKeyConf*)signal->getDataPtrSend(); - conf->transId1 = copyWords; - conf->transId2 = tcConnectptr.p->transid[1]; - copyCompletedLab(signal); -}//Dblqh::copyLqhKeyRefLab() - -void Dblqh::closeCopyLab(Signal* signal) -{ - if (tcConnectptr.p->copyCountWords > 0) { -/*---------------------------------------------------------------------------*/ -// We are still waiting for responses from the starting node. -// Wait until all of those have arrived until we start the -// close process. -/*---------------------------------------------------------------------------*/ - scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY; - jam(); - return; - }//if - tcConnectptr.p->transid[0] = 0; - tcConnectptr.p->transid[1] = 0; - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - - /** - * Stop sending ROWID for all operations from now on - */ - fragptr.p->m_copy_started_state = Fragrecord::AC_NORMAL; - - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - scanptr.p->scanState = ScanRecord::WAIT_CLOSE_COPY; - switch (fragptr.p->fragStatus) { - case Fragrecord::FSACTIVE: - jam(); - break; - case Fragrecord::BLOCKED: - jam(); - linkFragQueue(signal); - tcConnectptr.p->transactionState = TcConnectionrec::COPY_CLOSE_STOPPED; - return; - break; - case Fragrecord::FREE: - jam(); - case Fragrecord::ACTIVE_CREATION: - jam(); - case Fragrecord::CRASH_RECOVERING: - jam(); - case Fragrecord::DEFINED: - jam(); - case Fragrecord::REMOVING: - jam(); - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - continueCloseCopyAfterBlockedLab(signal); - return; -}//Dblqh::closeCopyLab() - -void Dblqh::continueCloseCopyAfterBlockedLab(Signal* signal) -{ - scanptr.i = tcConnectptr.p->tcScanRec; - c_scanRecordPool.getPtr(scanptr); - signal->theData[0] = scanptr.p->scanAccPtr; - signal->theData[1] = RNIL; - signal->theData[2] = NextScanReq::ZSCAN_CLOSE; - sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB); - return; -}//Dblqh::continueCloseCopyAfterBlockedLab() - -/*---------------------------------------------------------------------------*/ -/* ENTER NEXT_SCANCONF WITH */ -/* SCANPTR, */ -/* TFRAGID, */ -/* TACC_OPPTR, */ -/* TLOCAL_KEY1, */ -/* TLOCAL_KEY2, */ -/* TKEY_LENGTH, */ -/* TKEY1, */ -/* TKEY2, */ -/* TKEY3, */ -/* TKEY4 */ -/*---------------------------------------------------------------------------*/ -/* PRECONDITION: SCAN_STATE = WAIT_CLOSE_COPY */ -/*---------------------------------------------------------------------------*/ -void Dblqh::accCopyCloseConfLab(Signal* signal) -{ - tcConnectptr.i = scanptr.p->scanTcrec; - scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - signal->theData[0] = tcConnectptr.p->tupConnectrec; - signal->theData[1] = tcConnectptr.p->tableref; - signal->theData[2] = scanptr.p->scanSchemaVersion; - signal->theData[3] = ZDELETE_STORED_PROC_ID; - signal->theData[4] = scanptr.p->scanStoredProcId; - sendSignal(tcConnectptr.p->tcTupBlockref, GSN_STORED_PROCREQ, signal, 5, JBB); - return; -}//Dblqh::accCopyCloseConfLab() - -/*---------------------------------------------------------------------------*/ -/* ENTER STORED_PROCCONF WITH */ -/* TC_CONNECTPTR, */ -/* TSTORED_PROC_ID */ -/*---------------------------------------------------------------------------*/ -/* PRECONDITION: SCAN_STATE = WAIT_DELETE_STORED_PROC_ID_COPY */ -/*---------------------------------------------------------------------------*/ -void Dblqh::tupCopyCloseConfLab(Signal* signal) -{ - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - fragptr.p->copyFragState = ZIDLE; - - if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) { - jam(); - tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec; - ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord); - tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1; - signal->theData[0] = ZLQH_TRANS_NEXT; - signal->theData[1] = tcNodeFailptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - - CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0]; - ref->userPtr = scanptr.p->copyPtr; - ref->sendingNodeId = cownNodeid; - ref->startingNodeId = scanptr.p->scanNodeId; - ref->tableId = fragptr.p->tabRef; - ref->fragId = fragptr.p->fragId; - ref->errorCode = ZNODE_FAILURE_ERROR; - sendSignal(tcConnectptr.p->clientBlockref, GSN_COPY_FRAGREF, signal, - CopyFragRef::SignalLength, JBB); - } else { - if (scanptr.p->scanErrorCounter > 0) { - jam(); - CopyFragRef * const ref = (CopyFragRef *)&signal->theData[0]; - ref->userPtr = scanptr.p->copyPtr; - ref->sendingNodeId = cownNodeid; - ref->startingNodeId = scanptr.p->scanNodeId; - ref->tableId = fragptr.p->tabRef; - ref->fragId = fragptr.p->fragId; - ref->errorCode = tcConnectptr.p->errorCode; - sendSignal(tcConnectptr.p->clientBlockref, GSN_COPY_FRAGREF, signal, - CopyFragRef::SignalLength, JBB); - } else { - jam(); - CopyFragConf * const conf = (CopyFragConf *)&signal->theData[0]; - conf->userPtr = scanptr.p->copyPtr; - conf->sendingNodeId = cownNodeid; - conf->startingNodeId = scanptr.p->scanNodeId; - conf->tableId = tcConnectptr.p->tableref; - conf->fragId = tcConnectptr.p->fragmentid; - sendSignal(tcConnectptr.p->clientBlockref, GSN_COPY_FRAGCONF, signal, - CopyFragConf::SignalLength, JBB); - }//if - }//if - releaseActiveCopy(signal); - tcConnectptr.p->tcScanRec = RNIL; - finishScanrec(signal); - releaseOprec(signal); - releaseTcrec(signal, tcConnectptr); - releaseScanrec(signal); -}//Dblqh::tupCopyCloseConfLab() - -/*---------------------------------------------------------------------------*/ -/* A NODE FAILURE OCCURRED DURING THE COPY PROCESS. WE NEED TO CLOSE THE */ -/* COPY PROCESS SINCE A NODE FAILURE DURING THE COPY PROCESS WILL ALSO */ -/* FAIL THE NODE THAT IS TRYING TO START-UP. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::closeCopyRequestLab(Signal* signal) -{ - scanptr.p->scanErrorCounter++; - if (0) ndbout_c("closeCopyRequestLab: scanState: %d", scanptr.p->scanState); - switch (scanptr.p->scanState) { - case ScanRecord::WAIT_TUPKEY_COPY: - case ScanRecord::WAIT_NEXT_SCAN_COPY: - jam(); -/*---------------------------------------------------------------------------*/ -/* SET COMPLETION STATUS AND WAIT FOR OPPORTUNITY TO STOP THE SCAN. */ -// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT. -/*---------------------------------------------------------------------------*/ - scanptr.p->scanCompletedStatus = ZTRUE; - tcConnectptr.p->copyCountWords = 0; - break; - case ScanRecord::WAIT_ACC_COPY: - case ScanRecord::WAIT_STORED_PROC_COPY: - jam(); -/*---------------------------------------------------------------------------*/ -/* WE ARE CURRENTLY STARTING UP THE SCAN. SET COMPLETED STATUS AND WAIT FOR*/ -/* COMPLETION OF STARTUP. */ -/*---------------------------------------------------------------------------*/ - scanptr.p->scanCompletedStatus = ZTRUE; - break; - case ScanRecord::WAIT_CLOSE_COPY: - case ScanRecord::WAIT_DELETE_STORED_PROC_ID_COPY: - jam(); -/*---------------------------------------------------------------------------*/ -/* CLOSE IS ALREADY ONGOING. WE NEED NOT DO ANYTHING. */ -/*---------------------------------------------------------------------------*/ - break; - case ScanRecord::WAIT_LQHKEY_COPY: - jam(); -/*---------------------------------------------------------------------------*/ -/* WE ARE WAITING FOR THE FAILED NODE. THE NODE WILL NEVER COME BACK. */ -// WE NEED TO START THE FAILURE HANDLING IMMEDIATELY. -// ALSO SET NO OF WORDS OUTSTANDING TO ZERO TO AVOID ETERNAL WAIT. -/*---------------------------------------------------------------------------*/ - tcConnectptr.p->copyCountWords = 0; - closeCopyLab(signal); - break; - default: - ndbrequire(false); - break; - }//switch - return; -}//Dblqh::closeCopyRequestLab() - -/* ****************************************************** */ -/* COPY_ACTIVEREQ: Change state of a fragment to ACTIVE. */ -/* ****************************************************** */ -void Dblqh::execCOPY_ACTIVEREQ(Signal* signal) -{ - CRASH_INSERTION(5026); - - const CopyActiveReq * const req = (CopyActiveReq *)&signal->theData[0]; - jamEntry(); - Uint32 masterPtr = req->userPtr; - BlockReference masterRef = req->userRef; - tabptr.i = req->tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - Uint32 fragId = req->fragId; - ndbrequire(getFragmentrec(signal, fragId)); - - fragptr.p->fragDistributionKey = req->distributionKey; - - ndbrequire(cnoActiveCopy < 3); - cactiveCopy[cnoActiveCopy] = fragptr.i; - cnoActiveCopy++; - fragptr.p->masterBlockref = masterRef; - fragptr.p->masterPtr = masterPtr; - if (fragptr.p->fragStatus == Fragrecord::FSACTIVE) { - jam(); -/*------------------------------------------------------*/ -/* PROCESS HAVE ALREADY BEEN STARTED BY PREVIOUS */ -/* MASTER. WE HAVE ALREADY SET THE PROPER MASTER */ -/* BLOCK REFERENCE. */ -/*------------------------------------------------------*/ - if (fragptr.p->activeTcCounter == 0) { - jam(); -/*------------------------------------------------------*/ -/* PROCESS WAS EVEN COMPLETED. */ -/*------------------------------------------------------*/ - sendCopyActiveConf(signal, tabptr.i); - }//if - return; - }//if - - fragptr.p->fragStatus = Fragrecord::FSACTIVE; - if (TRACENR_FLAG) - TRACENR("tab: " << tabptr.i - << " frag: " << fragId - << " COPY ACTIVE" << endl); - - if (fragptr.p->lcpFlag == Fragrecord::LCP_STATE_TRUE) { - jam(); - fragptr.p->logFlag = Fragrecord::STATE_TRUE; - }//if - fragptr.p->activeTcCounter = 1; -/*------------------------------------------------------*/ -/* SET IT TO ONE TO ENSURE THAT IT IS NOT POSSIBLE*/ -/* TO DECREASE IT TO ZERO UNTIL WE HAVE COMPLETED */ -/* THE SCAN. */ -/*------------------------------------------------------*/ - signal->theData[0] = ZSCAN_TC_CONNECT; - signal->theData[1] = 0; - signal->theData[2] = tabptr.i; - signal->theData[3] = fragId; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - return; -}//Dblqh::execCOPY_ACTIVEREQ() - -void Dblqh::scanTcConnectLab(Signal* signal, Uint32 tstartTcConnect, Uint32 fragId) -{ - Uint32 tendTcConnect; - - ndbrequire(getFragmentrec(signal, fragId)); - if ((tstartTcConnect + 200) >= ctcConnectrecFileSize) { - jam(); - tendTcConnect = ctcConnectrecFileSize - 1; - } else { - jam(); - tendTcConnect = tstartTcConnect + 200; - }//if - for (tcConnectptr.i = tstartTcConnect; - tcConnectptr.i <= tendTcConnect; - tcConnectptr.i++) { - jam(); - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) { - switch (tcConnectptr.p->logWriteState) { - case TcConnectionrec::NOT_WRITTEN: - jam(); - if (fragptr.i == tcConnectptr.p->fragmentptr) { - jam(); - fragptr.p->activeTcCounter = fragptr.p->activeTcCounter + 1; - tcConnectptr.p->logWriteState = TcConnectionrec::NOT_WRITTEN_WAIT; - }//if - break; - default: - jam(); - /*empty*/; - break; - }//switch - }//if - }//for - if (tendTcConnect < (ctcConnectrecFileSize - 1)) { - jam(); - signal->theData[0] = ZSCAN_TC_CONNECT; - signal->theData[1] = tendTcConnect + 1; - signal->theData[2] = tabptr.i; - signal->theData[3] = fragId; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - } else { - jam(); -/*------------------------------------------------------*/ -/* THE SCAN HAVE BEEN COMPLETED. WE CHECK IF ALL */ -/* OPERATIONS HAVE ALREADY BEEN COMPLETED. */ -/*------------------------------------------------------*/ - ndbrequire(fragptr.p->activeTcCounter > 0); - fragptr.p->activeTcCounter--; - if (fragptr.p->activeTcCounter == 0) { - jam(); -/*------------------------------------------------------*/ -/* SET START GLOBAL CHECKPOINT TO THE NEXT */ -/* CHECKPOINT WE HAVE NOT YET HEARD ANYTHING ABOUT*/ -/* THIS GCP WILL BE COMPLETELY COVERED BY THE LOG.*/ -/*------------------------------------------------------*/ - fragptr.p->startGci = cnewestGci + 1; - sendCopyActiveConf(signal, tabptr.i); - }//if - }//if - return; -}//Dblqh::scanTcConnectLab() - -/*---------------------------------------------------------------------------*/ -/* A NEW MASTER IS REQUESTING THE STATE IN LQH OF THE COPY FRAGMENT PARTS. */ -/*---------------------------------------------------------------------------*/ -/* ***************>> */ -/* COPY_STATEREQ > */ -/* ***************>> */ -void Dblqh::execCOPY_STATEREQ(Signal* signal) -{ - jamEntry(); - ndbrequire(0) -#if 0 - Uint32* dataPtr = &signal->theData[2]; - BlockReference tmasterBlockref = signal->theData[0]; - Uint32 tnoCopy = 0; - do { - jam(); - arrGuard(tnoCopy, 4); - fragptr.i = cactiveCopy[tnoCopy]; - if (fragptr.i == RNIL) { - jam(); - break; - }//if - c_fragment_pool.getPtr(fragptr); - if (fragptr.p->copyFragState != ZIDLE) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS FRAGMENT IS CURRENTLY ACTIVE IN COPYING THE FRAGMENT. */ -/*---------------------------------------------------------------------------*/ - scanptr.i = fragptr.p->fragScanRec[NR_ScanNo]; - c_scanRecordPool.getPtr(scanptr); - if (scanptr.p->scanCompletedStatus == ZTRUE) { - jam(); - dataPtr[3 + (tnoCopy << 2)] = ZCOPY_CLOSING; - } else { - jam(); - dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ONGOING; - }//if - dataPtr[2 + (tnoCopy << 2)] = scanptr.p->scanSchemaVersion; - scanptr.p->scanApiBlockref = tmasterBlockref; - } else { - ndbrequire(fragptr.p->activeTcCounter != 0); -/*---------------------------------------------------------------------------*/ -/* COPY FRAGMENT IS COMPLETED AND WE ARE CURRENTLY GETTING THE STARTING */ -/* GCI OF THE NEW REPLICA OF THIS FRAGMENT. */ -/*---------------------------------------------------------------------------*/ - fragptr.p->masterBlockref = tmasterBlockref; - dataPtr[3 + (tnoCopy << 2)] = ZCOPY_ACTIVATION; - }//if - dataPtr[tnoCopy << 2] = fragptr.p->tabRef; - dataPtr[1 + (tnoCopy << 2)] = fragptr.p->fragId; - tnoCopy++; - } while (tnoCopy < cnoActiveCopy); - signal->theData[0] = cownNodeid; - signal->theData[1] = tnoCopy; - sendSignal(tmasterBlockref, GSN_COPY_STATECONF, signal, 18, JBB); -#endif - return; -}//Dblqh::execCOPY_STATEREQ() - -/* ========================================================================= */ -/* ======= INITIATE TC RECORD AT COPY FRAGMENT ======= */ -/* */ -/* SUBROUTINE SHORT NAME = ICT */ -/* ========================================================================= */ -void Dblqh::initCopyTc(Signal* signal, Operation_t op) -{ - tcConnectptr.p->operation = ZREAD; - tcConnectptr.p->apiVersionNo = 0; - tcConnectptr.p->opExec = 0; /* NOT INTERPRETED MODE */ - tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion; - Uint32 reqinfo = 0; - LqhKeyReq::setDirtyFlag(reqinfo, 1); - LqhKeyReq::setSimpleFlag(reqinfo, 1); - LqhKeyReq::setOperation(reqinfo, op); - LqhKeyReq::setGCIFlag(reqinfo, 1); - LqhKeyReq::setNrCopyFlag(reqinfo, 1); - /* AILen in LQHKEYREQ IS ZERO */ - tcConnectptr.p->reqinfo = reqinfo; -/* ------------------------------------------------------------------------ */ -/* THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */ -/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */ -/* ------------------------------------------------------------------------ */ - tcConnectptr.p->nodeAfterNext[0] = ZNIL; - tcConnectptr.p->nodeAfterNext[1] = ZNIL; - tcConnectptr.p->tcBlockref = cownref; - tcConnectptr.p->readlenAi = 0; - tcConnectptr.p->storedProcId = ZNIL; - tcConnectptr.p->opExec = 0; - tcConnectptr.p->nextSeqNoReplica = 0; - tcConnectptr.p->dirtyOp = ZFALSE; - tcConnectptr.p->lastReplicaNo = 0; - tcConnectptr.p->currTupAiLen = 0; - tcConnectptr.p->tcTimer = cLqhTimeOutCount; -}//Dblqh::initCopyTc() - -/* ------------------------------------------------------------------------- */ -/* ------- SEND COPY_ACTIVECONF TO MASTER DIH ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::sendCopyActiveConf(Signal* signal, Uint32 tableId) -{ - releaseActiveCopy(signal); - CopyActiveConf * const conf = (CopyActiveConf *)&signal->theData[0]; - conf->userPtr = fragptr.p->masterPtr; - conf->tableId = tableId; - conf->fragId = fragptr.p->fragId; - conf->startingNodeId = cownNodeid; - conf->startGci = fragptr.p->startGci; - sendSignal(fragptr.p->masterBlockref, GSN_COPY_ACTIVECONF, signal, - CopyActiveConf::SignalLength, JBB); -}//Dblqh::sendCopyActiveConf() - -/* ########################################################################## - * ####### LOCAL CHECKPOINT MODULE ####### - * - * ########################################################################## - * -------------------------------------------------------------------------- - * THIS MODULE HANDLES THE EXECUTION AND CONTROL OF LOCAL CHECKPOINTS - * IT CONTROLS THE LOCAL CHECKPOINTS IN TUP AND ACC. IT DOES ALSO INTERACT - * WITH DIH TO CONTROL WHICH GLOBAL CHECKPOINTS THAT ARE RECOVERABLE - * ------------------------------------------------------------------------- */ -void Dblqh::execEMPTY_LCP_REQ(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(5008); - EmptyLcpReq * const emptyLcpOrd = (EmptyLcpReq*)&signal->theData[0]; - - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - - Uint32 nodeId = refToNode(emptyLcpOrd->senderRef); - - lcpPtr.p->m_EMPTY_LCP_REQ.set(nodeId); - lcpPtr.p->reportEmpty = true; - - if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE){ - jam(); - bool ok = false; - switch(clcpCompletedState){ - case LCP_IDLE: - ok = true; - sendEMPTY_LCP_CONF(signal, true); - break; - case LCP_RUNNING: - ok = true; - sendEMPTY_LCP_CONF(signal, false); - break; - case LCP_CLOSE_STARTED: - jam(); - case ACC_LCP_CLOSE_COMPLETED: - jam(); - case TUP_LCP_CLOSE_COMPLETED: - jam(); - ok = true; - break; - } - ndbrequire(ok); - - }//if - - return; -}//Dblqh::execEMPTY_LCPREQ() - -#ifdef NDB_DEBUG_FULL -static struct TraceLCP { - void sendSignal(Uint32 ref, Uint32 gsn, Signal* signal, - Uint32 len, Uint32 prio); - void save(Signal*); - void restore(SimulatedBlock&, Signal* sig); - struct Sig { - enum { - Sig_save = 0, - Sig_send = 1 - } type; - SignalHeader header; - Uint32 theData[25]; - }; - Vector m_signals; -} g_trace_lcp; -template class Vector; -#else -#endif - -void Dblqh::execLCP_FRAG_ORD(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(5010); - LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; - - Uint32 lcpId = lcpFragOrd->lcpId; - - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - - lcpPtr.p->lastFragmentFlag = lcpFragOrd->lastFragmentFlag; - if (lcpFragOrd->lastFragmentFlag) { - jam(); - if (lcpPtr.p->lcpState == LcpRecord::LCP_IDLE) { - jam(); - /* ---------------------------------------------------------- - * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED. - * -------------------------------------------------------- */ - if (cnoOfFragsCheckpointed > 0) { - jam(); - completeLcpRoundLab(signal, lcpId); - } else { - jam(); - sendLCP_COMPLETE_REP(signal, lcpId); - }//if - } - return; - }//if - tabptr.i = lcpFragOrd->tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - - ndbrequire(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING || - tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE || - tabptr.p->tableStatus == Tablerec::TABLE_DEFINED); - - ndbrequire(getFragmentrec(signal, lcpFragOrd->fragmentId)); - - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - ndbrequire(!lcpPtr.p->lcpQueued); - - if (c_lcpId < lcpFragOrd->lcpId) { - jam(); - - lcpPtr.p->firstFragmentFlag= true; - - c_lcpId = lcpFragOrd->lcpId; - ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_IDLE); - setLogTail(signal, lcpFragOrd->keepGci); - ndbrequire(clcpCompletedState == LCP_IDLE); - clcpCompletedState = LCP_RUNNING; - } - cnoOfFragsCheckpointed++; - - if(tabptr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){ - jam(); - LcpRecord::FragOrd fragOrd; - fragOrd.fragPtrI = fragptr.i; - fragOrd.lcpFragOrd = * lcpFragOrd; - sendLCP_FRAG_REP(signal, fragOrd); - return; - } - - if (lcpPtr.p->lcpState != LcpRecord::LCP_IDLE) { - ndbrequire(lcpPtr.p->lcpQueued == false); - lcpPtr.p->lcpQueued = true; - lcpPtr.p->queuedFragment.fragPtrI = fragptr.i; - lcpPtr.p->queuedFragment.lcpFragOrd = * lcpFragOrd; - return; - }//if - - lcpPtr.p->currentFragment.fragPtrI = fragptr.i; - lcpPtr.p->currentFragment.lcpFragOrd = * lcpFragOrd; - - sendLCP_FRAGIDREQ(signal); -}//Dblqh::execLCP_FRAGORD() - -void Dblqh::execLCP_PREPARE_REF(Signal* signal) -{ - jamEntry(); - - LcpPrepareRef* ref= (LcpPrepareRef*)signal->getDataPtr(); - - lcpPtr.i = ref->senderData; - ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord); - ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_FRAGID); - - fragptr.i = lcpPtr.p->currentFragment.fragPtrI; - c_fragment_pool.getPtr(fragptr); - - ndbrequire(ref->tableId == fragptr.p->tabRef); - ndbrequire(ref->fragmentId == fragptr.p->fragId); - - tabptr.i = ref->tableId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - - ndbrequire(lcpPtr.p->m_outstanding); - lcpPtr.p->m_outstanding--; - - /** - * Only BACKUP is allowed to ref LCP_PREPARE - */ - ndbrequire(refToBlock(signal->getSendersBlockRef()) == BACKUP); - lcpPtr.p->m_error = ref->errorCode; - - if (lcpPtr.p->m_outstanding == 0) - { - jam(); - - if(lcpPtr.p->firstFragmentFlag) - { - jam(); - LcpFragOrd *ord= (LcpFragOrd*)signal->getDataPtrSend(); - lcpPtr.p->firstFragmentFlag= false; - *ord = lcpPtr.p->currentFragment.lcpFragOrd; - EXECUTE_DIRECT(PGMAN, GSN_LCP_FRAG_ORD, signal, signal->length()); - jamEntry(); - - /** - * First fragment mean that last LCP is complete :-) - */ - EXECUTE_DIRECT(TSMAN, GSN_LCP_FRAG_ORD, signal, signal->length()); - jamEntry(); - } - - lcpPtr.p->lcpState = LcpRecord::LCP_COMPLETED; - contChkpNextFragLab(signal); - } -} - -/* -------------------------------------------------------------------------- - * PRECONDITION: LCP_PTR:LCP_STATE = WAIT_FRAGID - * -------------------------------------------------------------------------- - * WE NOW HAVE THE LOCAL FRAGMENTS THAT THE LOCAL CHECKPOINT WILL USE. - * -------------------------------------------------------------------------- */ -void Dblqh::execLCP_PREPARE_CONF(Signal* signal) -{ - jamEntry(); - - LcpPrepareConf* conf= (LcpPrepareConf*)signal->getDataPtr(); - - lcpPtr.i = conf->senderData; - ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord); - ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_WAIT_FRAGID); - - fragptr.i = lcpPtr.p->currentFragment.fragPtrI; - c_fragment_pool.getPtr(fragptr); - - if (refToBlock(signal->getSendersBlockRef()) != PGMAN) - { - ndbrequire(conf->tableId == fragptr.p->tabRef); - ndbrequire(conf->fragmentId == fragptr.p->fragId); - } - - ndbrequire(lcpPtr.p->m_outstanding); - lcpPtr.p->m_outstanding--; - if (lcpPtr.p->m_outstanding == 0) - { - jam(); - - if(lcpPtr.p->firstFragmentFlag) - { - jam(); - LcpFragOrd *ord= (LcpFragOrd*)signal->getDataPtrSend(); - lcpPtr.p->firstFragmentFlag= false; - *ord = lcpPtr.p->currentFragment.lcpFragOrd; - EXECUTE_DIRECT(PGMAN, GSN_LCP_FRAG_ORD, signal, signal->length()); - jamEntry(); - - /** - * First fragment mean that last LCP is complete :-) - */ - EXECUTE_DIRECT(TSMAN, GSN_LCP_FRAG_ORD, signal, signal->length()); - jamEntry(); - } - - if (lcpPtr.p->m_error) - { - jam(); - - lcpPtr.p->lcpState = LcpRecord::LCP_COMPLETED; - contChkpNextFragLab(signal); - return; - } - - lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_HOLDOPS; - lcpPtr.p->lcpState = LcpRecord::LCP_START_CHKP; - - /* ---------------------------------------------------------------------- - * UPDATE THE MAX_GCI_IN_LCP AND MAX_GCI_COMPLETED_IN_LCP NOW BEFORE - * ACTIVATING THE FRAGMENT AGAIN. - * --------------------------------------------------------------------- */ - ndbrequire(lcpPtr.p->currentFragment.lcpFragOrd.lcpNo < MAX_LCP_STORED); - fragptr.p->maxGciInLcp = fragptr.p->newestGci; - fragptr.p->maxGciCompletedInLcp = cnewestCompletedGci; - - { - LcpFragOrd *ord= (LcpFragOrd*)signal->getDataPtrSend(); - *ord = lcpPtr.p->currentFragment.lcpFragOrd; - EXECUTE_DIRECT(LGMAN, GSN_LCP_FRAG_ORD, signal, signal->length()); - jamEntry(); - - *ord = lcpPtr.p->currentFragment.lcpFragOrd; - EXECUTE_DIRECT(DBTUP, GSN_LCP_FRAG_ORD, signal, signal->length()); - jamEntry(); - } - - BackupFragmentReq* req= (BackupFragmentReq*)signal->getDataPtr(); - req->tableId = lcpPtr.p->currentFragment.lcpFragOrd.tableId; - req->fragmentNo = 0; - req->backupPtr = m_backup_ptr; - req->backupId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId; - req->count = 0; - -#ifdef NDB_DEBUG_FULL - if(ERROR_INSERTED(5904)) - { - g_trace_lcp.sendSignal(BACKUP_REF, GSN_BACKUP_FRAGMENT_REQ, signal, - BackupFragmentReq::SignalLength, JBB); - } - else -#endif - { - if (ERROR_INSERTED(5044) && - (fragptr.p->tabRef == c_error_insert_table_id) && - fragptr.p->fragId) // Not first frag - { - /** - * Force CRASH_INSERTION in 10s - */ - ndbout_c("table: %d frag: %d", fragptr.p->tabRef, fragptr.p->fragId); - SET_ERROR_INSERT_VALUE(5027); - sendSignalWithDelay(reference(), GSN_START_RECREQ, signal, 10000, 1); - } - else - { - sendSignal(BACKUP_REF, GSN_BACKUP_FRAGMENT_REQ, signal, - BackupFragmentReq::SignalLength, JBB); - } - } - } -} - -void Dblqh::execBACKUP_FRAGMENT_REF(Signal* signal) -{ - BackupFragmentRef *ref= (BackupFragmentRef*)signal->getDataPtr(); - char buf[100]; - BaseString::snprintf(buf,sizeof(buf), - "Unable to store fragment during LCP. NDBFS Error: %u", - ref->errorCode); - - progError(__LINE__, - (ref->errorCode & FsRef::FS_ERR_BIT)? - NDBD_EXIT_AFS_UNKNOWN - : ref->errorCode, - buf); -} - -void Dblqh::execBACKUP_FRAGMENT_CONF(Signal* signal) -{ - jamEntry(); - //BackupFragmentConf* conf= (BackupFragmentConf*)signal->getDataPtr(); - - lcpPtr.i = 0; - ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord); - ndbrequire(lcpPtr.p->lcpState == LcpRecord::LCP_START_CHKP); - lcpPtr.p->lcpState = LcpRecord::LCP_COMPLETED; - - /* ------------------------------------------------------------------------ - * THE LOCAL CHECKPOINT HAS BEEN COMPLETED. IT IS NOW TIME TO START - * A LOCAL CHECKPOINT ON THE NEXT FRAGMENT OR COMPLETE THIS LCP ROUND. - * ------------------------------------------------------------------------ - * WE START BY SENDING LCP_REPORT TO DIH TO REPORT THE COMPLETED LCP. - * TO CATER FOR NODE CRASHES WE SEND IT IN PARALLEL TO ALL NODES. - * ----------------------------------------------------------------------- */ - fragptr.i = lcpPtr.p->currentFragment.fragPtrI; - c_fragment_pool.getPtr(fragptr); - - contChkpNextFragLab(signal); - return; -}//Dblqh::lcpCompletedLab() - -void -Dblqh::sendLCP_FRAG_REP(Signal * signal, - const LcpRecord::FragOrd & fragOrd) const { - - const Fragrecord* fragPtrP = c_fragment_pool.getConstPtr(fragOrd.fragPtrI); - - ndbrequire(fragOrd.lcpFragOrd.lcpNo < MAX_LCP_STORED); - LcpFragRep * const lcpReport = (LcpFragRep *)&signal->theData[0]; - lcpReport->nodeId = cownNodeid; - lcpReport->lcpId = fragOrd.lcpFragOrd.lcpId; - lcpReport->lcpNo = fragOrd.lcpFragOrd.lcpNo; - lcpReport->tableId = fragOrd.lcpFragOrd.tableId; - lcpReport->fragId = fragOrd.lcpFragOrd.fragmentId; - lcpReport->maxGciCompleted = fragPtrP->maxGciCompletedInLcp; - lcpReport->maxGciStarted = fragPtrP->maxGciInLcp; - - for (Uint32 i = 0; i < cnoOfNodes; i++) { - jam(); - Uint32 nodeId = cnodeData[i]; - if(cnodeStatus[i] == ZNODE_UP){ - jam(); - BlockReference Tblockref = calcDihBlockRef(nodeId); - sendSignal(Tblockref, GSN_LCP_FRAG_REP, signal, - LcpFragRep::SignalLength, JBB); - }//if - }//for -} - -void Dblqh::contChkpNextFragLab(Signal* signal) -{ - /* ------------------------------------------------------------------------ - * UPDATE THE LATEST LOCAL CHECKPOINT COMPLETED ON FRAGMENT. - * UPDATE THE LCP_ID OF THIS CHECKPOINT. - * REMOVE THE LINK BETWEEN THE FRAGMENT RECORD AND THE LCP RECORD. - * ----------------------------------------------------------------------- */ - if (fragptr.p->fragStatus == Fragrecord::BLOCKED) { - jam(); - /** - * LCP of fragment complete - * but restarting of operations isn't - */ - lcpPtr.p->lcpState = LcpRecord::LCP_BLOCKED_COMP; - return; - }//if - - /** - * Send rep when fragment is done + unblocked - */ - sendLCP_FRAG_REP(signal, lcpPtr.p->currentFragment); - - /* ------------------------------------------------------------------------ - * WE ALSO RELEASE THE LOCAL LCP RECORDS. - * ----------------------------------------------------------------------- */ - if (lcpPtr.p->lcpQueued) { - jam(); - /* ---------------------------------------------------------------------- - * Transfer the state from the queued to the active LCP. - * --------------------------------------------------------------------- */ - lcpPtr.p->lcpQueued = false; - lcpPtr.p->currentFragment = lcpPtr.p->queuedFragment; - - /* ---------------------------------------------------------------------- - * START THE QUEUED LOCAL CHECKPOINT. - * --------------------------------------------------------------------- */ - sendLCP_FRAGIDREQ(signal); - return; - }//if - - lcpPtr.p->lcpState = LcpRecord::LCP_IDLE; - if (lcpPtr.p->lastFragmentFlag){ - jam(); - /* ---------------------------------------------------------------------- - * NOW THE COMPLETE LOCAL CHECKPOINT ROUND IS COMPLETED. - * --------------------------------------------------------------------- */ - completeLcpRoundLab(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId); - return; - }//if - - if (lcpPtr.p->reportEmpty) { - jam(); - sendEMPTY_LCP_CONF(signal, false); - }//if - return; -}//Dblqh::contChkpNextFragLab() - -void Dblqh::sendLCP_FRAGIDREQ(Signal* signal) -{ - TablerecPtr tabPtr; - tabPtr.i = lcpPtr.p->currentFragment.lcpFragOrd.tableId; - ptrAss(tabPtr, tablerec); - if(tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_ONGOING || - tabPtr.p->tableStatus == Tablerec::PREP_DROP_TABLE_DONE){ - jam(); - /** - * Fake that the fragment is done - */ - contChkpNextFragLab(signal); - return; - } - - lcpPtr.p->m_error = 0; - lcpPtr.p->m_outstanding = 1; - - ndbrequire(tabPtr.p->tableStatus == Tablerec::TABLE_DEFINED); - - lcpPtr.p->lcpState = LcpRecord::LCP_WAIT_FRAGID; - LcpPrepareReq* req= (LcpPrepareReq*)signal->getDataPtr(); - req->senderData = lcpPtr.i; - req->senderRef = reference(); - req->lcpNo = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo; - req->tableId = lcpPtr.p->currentFragment.lcpFragOrd.tableId; - req->fragmentId = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId; - req->lcpId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId % MAX_LCP_STORED; - req->backupPtr = m_backup_ptr; - req->backupId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId; - sendSignal(BACKUP_REF, GSN_LCP_PREPARE_REQ, signal, - LcpPrepareReq::SignalLength, JBB); - -}//Dblqh::sendLCP_FRAGIDREQ() - -void Dblqh::sendEMPTY_LCP_CONF(Signal* signal, bool idle) -{ - - EmptyLcpConf * const rep = (EmptyLcpConf*)&signal->theData[0]; - /* ---------------------------------------------------------------------- - * We have been requested to report when there are no more local - * waiting to be started or ongoing. In this signal we also report - * the last completed fragments state. - * ---------------------------------------------------------------------- */ - rep->senderNodeId = getOwnNodeId(); - if(!idle){ - jam(); - rep->idle = 0 ; - rep->tableId = lcpPtr.p->currentFragment.lcpFragOrd.tableId; - rep->fragmentId = lcpPtr.p->currentFragment.lcpFragOrd.fragmentId; - rep->lcpNo = lcpPtr.p->currentFragment.lcpFragOrd.lcpNo; - rep->lcpId = lcpPtr.p->currentFragment.lcpFragOrd.lcpId; - } else { - jam(); - rep->idle = 1; - rep->tableId = ~0; - rep->fragmentId = ~0; - rep->lcpNo = ~0; - rep->lcpId = c_lcpId; - } - - for (Uint32 i = 0; i < cnoOfNodes; i++) { - jam(); - Uint32 nodeId = cnodeData[i]; - if (lcpPtr.p->m_EMPTY_LCP_REQ.get(nodeId)) { - jam(); - - BlockReference blockref = calcDihBlockRef(nodeId); - sendSignal(blockref, GSN_EMPTY_LCP_CONF, signal, - EmptyLcpConf::SignalLength, JBB); - }//if - }//for - - lcpPtr.p->reportEmpty = false; - lcpPtr.p->m_EMPTY_LCP_REQ.clear(); -}//Dblqh::sendEMPTY_LCPCONF() - -/* -------------------------------------------------------------------------- - * THE LOCAL CHECKPOINT ROUND IS NOW COMPLETED. SEND COMPLETED MESSAGE - * TO THE MASTER DIH. - * ------------------------------------------------------------------------- */ -void Dblqh::completeLcpRoundLab(Signal* signal, Uint32 lcpId) -{ - clcpCompletedState = LCP_CLOSE_STARTED; - - EndLcpReq* req= (EndLcpReq*)signal->getDataPtr(); - req->senderData= lcpPtr.i; - req->senderRef= reference(); - req->backupPtr= m_backup_ptr; - req->backupId= lcpId; - sendSignal(BACKUP_REF, GSN_END_LCP_REQ, signal, - EndLcpReq::SignalLength, JBB); - - sendSignal(PGMAN_REF, GSN_END_LCP_REQ, signal, - EndLcpReq::SignalLength, JBB); - - sendSignal(LGMAN_REF, GSN_END_LCP_REQ, signal, - EndLcpReq::SignalLength, JBB); - - EXECUTE_DIRECT(TSMAN, GSN_END_LCP_REQ, signal, EndLcpReq::SignalLength); - jamEntry(); - - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - lcpPtr.p->m_outstanding = 3; - return; -}//Dblqh::completeLcpRoundLab() - -void Dblqh::execEND_LCPCONF(Signal* signal) -{ - jamEntry(); - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - - ndbrequire(clcpCompletedState == LCP_CLOSE_STARTED); - ndbrequire(lcpPtr.p->m_outstanding); - - lcpPtr.p->m_outstanding--; - if(lcpPtr.p->m_outstanding == 0) - { - jam(); - clcpCompletedState = LCP_IDLE; - sendLCP_COMPLETE_REP(signal, lcpPtr.p->currentFragment.lcpFragOrd.lcpId); - } -}//Dblqh::execEND_LCPCONF() - -void Dblqh::sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId) -{ - cnoOfFragsCheckpointed = 0; - ndbrequire((cnoOfNodes - 1) < (MAX_NDB_NODES - 1)); - /* ------------------------------------------------------------------------ - * WE SEND COMP_LCP_ROUND TO ALL NODES TO PREPARE FOR NODE CRASHES. - * ----------------------------------------------------------------------- */ - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - lcpPtr.p->lastFragmentFlag = false; - lcpPtr.p->firstFragmentFlag = false; - - LcpCompleteRep* rep = (LcpCompleteRep*)signal->getDataPtrSend(); - rep->nodeId = getOwnNodeId(); - rep->lcpId = lcpId; - rep->blockNo = DBLQH; - - for (Uint32 i = 0; i < cnoOfNodes; i++) { - jam(); - Uint32 nodeId = cnodeData[i]; - if(cnodeStatus[i] == ZNODE_UP){ - jam(); - - BlockReference blockref = calcDihBlockRef(nodeId); - sendSignal(blockref, GSN_LCP_COMPLETE_REP, signal, - LcpCompleteRep::SignalLength, JBB); - }//if - }//for - - if(lcpPtr.p->reportEmpty){ - jam(); - sendEMPTY_LCP_CONF(signal, true); - } - - if (getNodeState().getNodeRestartInProgress() && cstartRecReq != 3) - { - jam(); - ndbrequire(cstartRecReq == 2); - cstartRecReq = 3; - } - return; - -}//Dblqh::sendCOMP_LCP_ROUND() - - -/* ------------------------------------------------------------------------- */ -/* ------- SEND ACC_LCPREQ AND TUP_LCPREQ ------- */ -/* */ -/* INPUT: LCP_PTR LOCAL CHECKPOINT RECORD */ -/* FRAGPTR FRAGMENT RECORD */ -/* SUBROUTINE SHORT NAME = STL */ -/* ------------------------------------------------------------------------- */ -void Dblqh::sendStartLcp(Signal* signal) -{ -}//Dblqh::sendStartLcp() - -/* ------------------------------------------------------------------------- */ -/* ------- SET THE LOG TAIL IN THE LOG FILES ------- */ -/* */ -/*THIS SUBROUTINE HAVE BEEN BUGGY AND IS RATHER COMPLEX. IT IS IMPORTANT TO */ -/*REMEMBER THAT WE SEARCH FROM THE TAIL UNTIL WE REACH THE HEAD (CURRENT). */ -/*THE TAIL AND HEAD CAN BE ON THE SAME MBYTE. WE SEARCH UNTIL WE FIND A MBYTE*/ -/*THAT WE NEED TO KEEP. WE THEN SET THE TAIL TO BE THE PREVIOUS. IF WE DO */ -/*NOT FIND A MBYTE THAT WE NEED TO KEEP UNTIL WE REACH THE HEAD THEN WE USE */ -/*THE HEAD AS TAIL. FINALLY WE HAVE TO MOVE BACK THE TAIL TO ALSO INCLUDE */ -/*ALL PREPARE RECORDS. THIS MEANS THAT LONG-LIVED TRANSACTIONS ARE DANGEROUS */ -/*FOR SHORT LOGS. */ -/* ------------------------------------------------------------------------- */ - -// this function has not been verified yet -Uint32 Dblqh::remainingLogSize(const LogFileRecordPtr &sltCurrLogFilePtr, - const LogPartRecordPtr &sltLogPartPtr) -{ - Uint32 hf = sltCurrLogFilePtr.p->fileNo*clogFileSize+sltCurrLogFilePtr.p->currentMbyte; - Uint32 tf = sltLogPartPtr.p->logTailFileNo*clogFileSize+sltLogPartPtr.p->logTailMbyte; - Uint32 sz = sltLogPartPtr.p->noLogFiles*clogFileSize; - if (tf > hf) hf += sz; - return sz-(hf-tf); -} - -void Dblqh::setLogTail(Signal* signal, Uint32 keepGci) -{ - LogPartRecordPtr sltLogPartPtr; - LogFileRecordPtr sltLogFilePtr; -#if 0 - LogFileRecordPtr sltCurrLogFilePtr; -#endif - UintR tsltMbyte; - UintR tsltStartMbyte; - UintR tsltIndex; - UintR tsltFlag; - - for (sltLogPartPtr.i = 0; sltLogPartPtr.i < 4; sltLogPartPtr.i++) { - jam(); - ptrAss(sltLogPartPtr, logPartRecord); - findLogfile(signal, sltLogPartPtr.p->logTailFileNo, - sltLogPartPtr, &sltLogFilePtr); - -#if 0 - sltCurrLogFilePtr.i = sltLogPartPtr.p->currentLogfile; - ptrCheckGuard(sltCurrLogFilePtr, clogFileFileSize, logFileRecord); - infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i, - remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte); -#endif - - tsltMbyte = sltLogPartPtr.p->logTailMbyte; - tsltStartMbyte = tsltMbyte; - tsltFlag = ZFALSE; - if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) { -/* ------------------------------------------------------------------------- */ -/*THE LOG AND THE TAIL IS ALREADY IN THE SAME FILE. */ -/* ------------------------------------------------------------------------- */ - if (sltLogFilePtr.p->currentMbyte >= sltLogPartPtr.p->logTailMbyte) { - jam(); -/* ------------------------------------------------------------------------- */ -/*THE CURRENT MBYTE IS AHEAD OF OR AT THE TAIL. THUS WE WILL ONLY LOOK FOR */ -/*THE TAIL UNTIL WE REACH THE CURRENT MBYTE WHICH IS IN THIS LOG FILE. */ -/*IF THE LOG TAIL IS AHEAD OF THE CURRENT MBYTE BUT IN THE SAME LOG FILE */ -/*THEN WE HAVE TO SEARCH THROUGH ALL FILES BEFORE WE COME TO THE CURRENT */ -/*MBYTE. WE ALWAYS STOP WHEN WE COME TO THE CURRENT MBYTE SINCE THE TAIL */ -/*CAN NEVER BE BEFORE THE HEAD. */ -/* ------------------------------------------------------------------------- */ - tsltFlag = ZTRUE; - }//if - }//if - -/* ------------------------------------------------------------------------- */ -/*NOW START SEARCHING FOR THE NEW TAIL, STARTING AT THE CURRENT TAIL AND */ -/*PROCEEDING UNTIL WE FIND A MBYTE WHICH IS NEEDED TO KEEP OR UNTIL WE REACH */ -/*CURRENT MBYTE (THE HEAD). */ -/* ------------------------------------------------------------------------- */ - SLT_LOOP: - for (tsltIndex = tsltStartMbyte; - tsltIndex <= clogFileSize - 1; - tsltIndex++) { - if (sltLogFilePtr.p->logMaxGciStarted[tsltIndex] >= keepGci) { -/* ------------------------------------------------------------------------- */ -/*WE ARE NOT ALLOWED TO STEP THE LOG ANY FURTHER AHEAD */ -/*SET THE NEW LOG TAIL AND CONTINUE WITH NEXT LOG PART. */ -/*THIS MBYTE IS NOT TO BE INCLUDED SO WE NEED TO STEP BACK ONE MBYTE. */ -/* ------------------------------------------------------------------------- */ - if (tsltIndex != 0) { - jam(); - tsltMbyte = tsltIndex - 1; - } else { - jam(); -/* ------------------------------------------------------------------------- */ -/*STEPPING BACK INCLUDES ALSO STEPPING BACK TO THE PREVIOUS LOG FILE. */ -/* ------------------------------------------------------------------------- */ - tsltMbyte = clogFileSize - 1; - sltLogFilePtr.i = sltLogFilePtr.p->prevLogFile; - ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord); - }//if - goto SLT_BREAK; - } else { - jam(); - if (tsltFlag == ZTRUE) { -/* ------------------------------------------------------------------------- */ -/*WE ARE IN THE SAME FILE AS THE CURRENT MBYTE AND WE CAN REACH THE CURRENT */ -/*MBYTE BEFORE WE REACH A NEW TAIL. */ -/* ------------------------------------------------------------------------- */ - if (tsltIndex == sltLogFilePtr.p->currentMbyte) { - jam(); -/* ------------------------------------------------------------------------- */ -/*THE TAIL OF THE LOG IS ACTUALLY WITHIN THE CURRENT MBYTE. THUS WE SET THE */ -/*LOG TAIL TO BE THE CURRENT MBYTE. */ -/* ------------------------------------------------------------------------- */ - tsltMbyte = sltLogFilePtr.p->currentMbyte; - goto SLT_BREAK; - }//if - }//if - }//if - }//for - sltLogFilePtr.i = sltLogFilePtr.p->nextLogFile; - ptrCheckGuard(sltLogFilePtr, clogFileFileSize, logFileRecord); - if (sltLogFilePtr.i == sltLogPartPtr.p->currentLogfile) { - jam(); - tsltFlag = ZTRUE; - }//if - tsltStartMbyte = 0; - goto SLT_LOOP; - SLT_BREAK: - jam(); - { - UintR ToldTailFileNo = sltLogPartPtr.p->logTailFileNo; - UintR ToldTailMByte = sltLogPartPtr.p->logTailMbyte; - - arrGuard(tsltMbyte, clogFileSize); - sltLogPartPtr.p->logTailFileNo = - sltLogFilePtr.p->logLastPrepRef[tsltMbyte] >> 16; -/* ------------------------------------------------------------------------- */ -/*SINCE LOG_MAX_GCI_STARTED ONLY KEEP TRACK OF COMMIT LOG RECORDS WE ALSO */ -/*HAVE TO STEP BACK THE TAIL SO THAT WE INCLUDE ALL PREPARE RECORDS */ -/*NEEDED FOR THOSE COMMIT RECORDS IN THIS MBYTE. THIS IS A RATHER */ -/*CONSERVATIVE APPROACH BUT IT WORKS. */ -/* ------------------------------------------------------------------------- */ - sltLogPartPtr.p->logTailMbyte = - sltLogFilePtr.p->logLastPrepRef[tsltMbyte] & 65535; - if ((ToldTailFileNo != sltLogPartPtr.p->logTailFileNo) || - (ToldTailMByte != sltLogPartPtr.p->logTailMbyte)) { - jam(); - if (sltLogPartPtr.p->logPartState == LogPartRecord::TAIL_PROBLEM) { - if (sltLogPartPtr.p->firstLogQueue == RNIL) { - jam(); - sltLogPartPtr.p->logPartState = LogPartRecord::IDLE; - } else { - jam(); - sltLogPartPtr.p->logPartState = LogPartRecord::ACTIVE; - }//if - }//if - }//if - } -#if 0 - infoEvent("setLogTail: Available log file %d size = %d[mbytes]+%d[words]", sltLogPartPtr.i, - remainingLogSize(sltCurrLogFilePtr, sltLogPartPtr), sltCurrLogFilePtr.p->remainingWordsInMbyte); -#endif - }//for - -}//Dblqh::setLogTail() - -/* ######################################################################### */ -/* ####### GLOBAL CHECKPOINT MODULE ####### */ -/* */ -/* ######################################################################### */ -/*---------------------------------------------------------------------------*/ -/* THIS MODULE HELPS DIH IN DISCOVERING WHEN GLOBAL CHECKPOINTS ARE */ -/* RECOVERABLE. IT HANDLES THE REQUEST GCP_SAVEREQ THAT REQUESTS LQH TO */ -/* SAVE A PARTICULAR GLOBAL CHECKPOINT TO DISK AND RESPOND WHEN COMPLETED. */ -/*---------------------------------------------------------------------------*/ -/* *************** */ -/* GCP_SAVEREQ > */ -/* *************** */ -void Dblqh::execGCP_SAVEREQ(Signal* signal) -{ - jamEntry(); - const GCPSaveReq * const saveReq = (GCPSaveReq *)&signal->theData[0]; - - if (ERROR_INSERTED(5000)) { - systemErrorLab(signal, __LINE__); - } - - if (ERROR_INSERTED(5007)){ - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_GCP_SAVEREQ, signal, 10000, - signal->length()); - return; - } - - const Uint32 dihBlockRef = saveReq->dihBlockRef; - const Uint32 dihPtr = saveReq->dihPtr; - const Uint32 gci = saveReq->gci; - - if(getNodeState().startLevel >= NodeState::SL_STOPPING_4){ - GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0]; - saveRef->dihPtr = dihPtr; - saveRef->nodeId = getOwnNodeId(); - saveRef->gci = gci; - saveRef->errorCode = GCPSaveRef::NodeShutdownInProgress; - sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal, - GCPSaveRef::SignalLength, JBB); - return; - } - - if (getNodeState().getNodeRestartInProgress() && cstartRecReq < 2) - { - GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0]; - saveRef->dihPtr = dihPtr; - saveRef->nodeId = getOwnNodeId(); - saveRef->gci = gci; - saveRef->errorCode = GCPSaveRef::NodeRestartInProgress; - sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal, - GCPSaveRef::SignalLength, JBB); - return; - } - - ndbrequire(gci >= cnewestCompletedGci); - - if (gci == cnewestCompletedGci) { -/*---------------------------------------------------------------------------*/ -/* GLOBAL CHECKPOINT HAVE ALREADY BEEN HANDLED. REQUEST MUST HAVE BEEN SENT */ -/* FROM NEW MASTER DIH. */ -/*---------------------------------------------------------------------------*/ - if (ccurrentGcprec == RNIL) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS INDICATES THAT WE HAVE ALREADY SENT GCP_SAVECONF TO PREVIOUS MASTER. */ -/* WE SIMPLY SEND IT ALSO TO THE NEW MASTER. */ -/*---------------------------------------------------------------------------*/ - GCPSaveConf * const saveConf = (GCPSaveConf*)&signal->theData[0]; - saveConf->dihPtr = dihPtr; - saveConf->nodeId = getOwnNodeId(); - saveConf->gci = cnewestCompletedGci; - sendSignal(dihBlockRef, GSN_GCP_SAVECONF, signal, - GCPSaveConf::SignalLength, JBA); - return; - } - jam(); -/*---------------------------------------------------------------------------*/ -/* WE HAVE NOT YET SENT THE RESPONSE TO THE OLD MASTER. WE WILL SET THE NEW */ -/* RECEIVER OF THE RESPONSE AND THEN EXIT SINCE THE PROCESS IS ALREADY */ -/* STARTED. */ -/*---------------------------------------------------------------------------*/ - gcpPtr.i = ccurrentGcprec; - ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord); - gcpPtr.p->gcpUserptr = dihPtr; - gcpPtr.p->gcpBlockref = dihBlockRef; - return; - }//if - - ndbrequire(ccurrentGcprec == RNIL); - cnewestCompletedGci = gci; - if (gci > cnewestGci) { - jam(); - cnewestGci = gci; - }//if - - if(getNodeState().getNodeRestartInProgress() && cstartRecReq < 3) - { - GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0]; - saveRef->dihPtr = dihPtr; - saveRef->nodeId = getOwnNodeId(); - saveRef->gci = gci; - saveRef->errorCode = GCPSaveRef::NodeRestartInProgress; - sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal, - GCPSaveRef::SignalLength, JBB); - return; - } - - ccurrentGcprec = 0; - gcpPtr.i = ccurrentGcprec; - ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord); - - gcpPtr.p->gcpBlockref = dihBlockRef; - gcpPtr.p->gcpUserptr = dihPtr; - gcpPtr.p->gcpId = gci; - bool tlogActive = false; - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - ptrAss(logPartPtr, logPartRecord); - if (logPartPtr.p->logPartState == LogPartRecord::ACTIVE) { - jam(); - logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_TRUE; - tlogActive = true; - } else { - jam(); - logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE; - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - writeCompletedGciLog(signal); - }//if - }//for - if (tlogActive == true) { - jam(); - return; - }//if - initGcpRecLab(signal); - startTimeSupervision(signal); - return; -}//Dblqh::execGCP_SAVEREQ() - -/* ------------------------------------------------------------------------- */ -/* START TIME SUPERVISION OF THE LOG PARTS. */ -/* ------------------------------------------------------------------------- */ -void Dblqh::startTimeSupervision(Signal* signal) -{ - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); -/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ -/* WE HAVE TO START CHECKING IF THE LOG IS TO BE WRITTEN EVEN IF PAGES ARE */ -/* FULL. INITIALISE THE VALUES OF WHERE WE ARE IN THE LOG CURRENTLY. */ -/* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ - logPartPtr.p->logPartTimer = 0; - logPartPtr.p->logTimer = 1; - signal->theData[0] = ZTIME_SUPERVISION; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//for -}//Dblqh::startTimeSupervision() - -/*---------------------------------------------------------------------------*/ -/* WE SET THE GLOBAL CHECKPOINT VARIABLES AFTER WRITING THE COMPLETED GCI LOG*/ -/* RECORD. THIS ENSURES THAT WE WILL ENCOUNTER THE COMPLETED GCI RECORD WHEN */ -/* WE EXECUTE THE FRAGMENT LOG. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::initGcpRecLab(Signal* signal) -{ -/* ======================================================================== */ -/* ======= INITIATE GCP RECORD ======= */ -/* */ -/* SUBROUTINE SHORT NAME = IGR */ -/* ======================================================================== */ - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); -/*--------------------------------------------------*/ -/* BY SETTING THE GCPREC = 0 WE START THE */ -/* CHECKING BY CHECK_GCP_COMPLETED. THIS */ -/* CHECKING MUST NOT BE STARTED UNTIL WE HAVE */ -/* INSERTED ALL COMPLETE GCI LOG RECORDS IN */ -/* ALL LOG PARTS. */ -/*--------------------------------------------------*/ - logPartPtr.p->gcprec = 0; - gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZWAIT_DISK; - gcpPtr.p->gcpSyncReady[logPartPtr.i] = ZFALSE; - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - gcpPtr.p->gcpFilePtr[logPartPtr.i] = logFilePtr.i; - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - if (logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] == ZPAGE_HEADER_SIZE) { - jam(); -/*--------------------------------------------------*/ -/* SINCE THE CURRENT FILEPAGE POINTS AT THE */ -/* NEXT WORD TO BE WRITTEN WE HAVE TO ADJUST */ -/* FOR THIS BY DECREASING THE FILE PAGE BY ONE*/ -/* IF NO WORD HAS BEEN WRITTEN ON THE CURRENT */ -/* FILEPAGE. */ -/*--------------------------------------------------*/ - gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage - 1; - gcpPtr.p->gcpWordNo[logPartPtr.i] = ZPAGE_SIZE - 1; - } else { - jam(); - gcpPtr.p->gcpPageNo[logPartPtr.i] = logFilePtr.p->currentFilepage; - gcpPtr.p->gcpWordNo[logPartPtr.i] = - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1; - }//if - }//for - return; -}//Dblqh::initGcpRecLab() - -/* ========================================================================= */ -/* ==== CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED AFTER A COMPLETED===== */ -/* DISK WRITE. */ -/* */ -/* SUBROUTINE SHORT NAME = CGC */ -/* ========================================================================= */ -void Dblqh::checkGcpCompleted(Signal* signal, - Uint32 tcgcPageWritten, - Uint32 tcgcWordWritten) -{ - UintR tcgcFlag; - UintR tcgcJ; - - gcpPtr.i = logPartPtr.p->gcprec; - if (gcpPtr.i != RNIL) { - jam(); -/* ------------------------------------------------------------------------- */ -/* IF THE GLOBAL CHECKPOINT IS NOT WAITING FOR COMPLETION THEN WE CAN QUIT */ -/* THE SEARCH IMMEDIATELY. */ -/* ------------------------------------------------------------------------- */ - ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord); - if (gcpPtr.p->gcpFilePtr[logPartPtr.i] == logFilePtr.i) { -/* ------------------------------------------------------------------------- */ -/* IF THE COMPLETED DISK OPERATION WAS ON ANOTHER FILE THAN THE ONE WE ARE */ -/* WAITING FOR, THEN WE CAN ALSO QUIT THE SEARCH IMMEDIATELY. */ -/* ------------------------------------------------------------------------- */ - if (tcgcPageWritten < gcpPtr.p->gcpPageNo[logPartPtr.i]) { - jam(); -/* ------------------------------------------------------------------------- */ -/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */ -/* ------------------------------------------------------------------------- */ - return; - } else { - if (tcgcPageWritten == gcpPtr.p->gcpPageNo[logPartPtr.i]) { - if (tcgcWordWritten < gcpPtr.p->gcpWordNo[logPartPtr.i]) { - jam(); -/* ------------------------------------------------------------------------- */ -/* THIS LOG PART HAVE NOT YET WRITTEN THE GLOBAL CHECKPOINT TO DISK. */ -/* ------------------------------------------------------------------------- */ - return; - }//if - }//if - }//if -/* ------------------------------------------------------------------------- */ -/* THIS LOG PART HAVE WRITTEN THE GLOBAL CHECKPOINT TO DISK. */ -/* ------------------------------------------------------------------------- */ - logPartPtr.p->gcprec = RNIL; - gcpPtr.p->gcpLogPartState[logPartPtr.i] = ZON_DISK; - tcgcFlag = ZTRUE; - for (tcgcJ = 0; tcgcJ <= 3; tcgcJ++) { - jam(); - if (gcpPtr.p->gcpLogPartState[tcgcJ] != ZON_DISK) { - jam(); -/* ------------------------------------------------------------------------- */ -/*ALL LOG PARTS HAVE NOT SAVED THIS GLOBAL CHECKPOINT TO DISK YET. WAIT FOR */ -/*THEM TO COMPLETE. */ -/* ------------------------------------------------------------------------- */ - tcgcFlag = ZFALSE; - }//if - }//for - if (tcgcFlag == ZTRUE) { - jam(); -/* ------------------------------------------------------------------------- */ -/*WE HAVE FOUND A COMPLETED GLOBAL CHECKPOINT OPERATION. WE NOW NEED TO SEND */ -/*GCP_SAVECONF, REMOVE THE GCP RECORD FROM THE LIST OF WAITING GCP RECORDS */ -/*ON THIS LOG PART AND RELEASE THE GCP RECORD. */ -// After changing the log implementation we need to perform a FSSYNCREQ on all -// log files where the last log word resided first before proceeding. -/* ------------------------------------------------------------------------- */ - UintR Ti; - for (Ti = 0; Ti < 4; Ti++) { - LogFileRecordPtr loopLogFilePtr; - loopLogFilePtr.i = gcpPtr.p->gcpFilePtr[Ti]; - ptrCheckGuard(loopLogFilePtr, clogFileFileSize, logFileRecord); - if (loopLogFilePtr.p->logFileStatus == LogFileRecord::OPEN) { - jam(); - signal->theData[0] = loopLogFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = gcpPtr.p->gcpFilePtr[Ti]; - sendSignal(NDBFS_REF, GSN_FSSYNCREQ, signal, 3, JBA); - } else { - ndbrequire((loopLogFilePtr.p->logFileStatus == - LogFileRecord::CLOSED) || - (loopLogFilePtr.p->logFileStatus == - LogFileRecord::CLOSING_WRITE_LOG) || - (loopLogFilePtr.p->logFileStatus == - LogFileRecord::OPENING_WRITE_LOG)); - signal->theData[0] = loopLogFilePtr.i; - execFSSYNCCONF(signal); - }//if - }//for - return; - }//if - }//if - }//if -}//Dblqh::checkGcpCompleted() - -void -Dblqh::execFSSYNCCONF(Signal* signal) -{ - GcpRecordPtr localGcpPtr; - LogFileRecordPtr localLogFilePtr; - LogPartRecordPtr localLogPartPtr; - localLogFilePtr.i = signal->theData[0]; - ptrCheckGuard(localLogFilePtr, clogFileFileSize, logFileRecord); - localLogPartPtr.i = localLogFilePtr.p->logPartRec; - localGcpPtr.i = ccurrentGcprec; - ptrCheckGuard(localGcpPtr, cgcprecFileSize, gcpRecord); - localGcpPtr.p->gcpSyncReady[localLogPartPtr.i] = ZTRUE; - UintR Ti; - for (Ti = 0; Ti < 4; Ti++) { - jam(); - if (localGcpPtr.p->gcpSyncReady[Ti] == ZFALSE) { - jam(); - return; - }//if - }//for - GCPSaveConf * const saveConf = (GCPSaveConf *)&signal->theData[0]; - saveConf->dihPtr = localGcpPtr.p->gcpUserptr; - saveConf->nodeId = getOwnNodeId(); - saveConf->gci = localGcpPtr.p->gcpId; - sendSignal(localGcpPtr.p->gcpBlockref, GSN_GCP_SAVECONF, signal, - GCPSaveConf::SignalLength, JBA); - ccurrentGcprec = RNIL; -}//Dblqh::execFSSYNCCONF() - - -/* ######################################################################### */ -/* ####### FILE HANDLING MODULE ####### */ -/* */ -/* ######################################################################### */ -/* THIS MODULE HANDLES RESPONSE MESSAGES FROM THE FILE SYSTEM */ -/* ######################################################################### */ -/* ######################################################################### */ -/* SIGNAL RECEPTION MODULE */ -/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */ -/* */ -/* THIS MODULE CHECKS THE STATE AND JUMPS TO THE PROPER PART OF THE FILE */ -/* HANDLING MODULE. */ -/* ######################################################################### */ -/* *************** */ -/* FSCLOSECONF > */ -/* *************** */ -void Dblqh::execFSCLOSECONF(Signal* signal) -{ - jamEntry(); - logFilePtr.i = signal->theData[0]; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - switch (logFilePtr.p->logFileStatus) { - case LogFileRecord::CLOSE_SR_INVALIDATE_PAGES: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - - logPartPtr.i = logFilePtr.p->logPartRec; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - - exitFromInvalidate(signal); - return; - case LogFileRecord::CLOSING_INIT: - jam(); - closingInitLab(signal); - return; - case LogFileRecord::CLOSING_SR: - jam(); - closingSrLab(signal); - return; - case LogFileRecord::CLOSING_EXEC_SR: - jam(); - closeExecSrLab(signal); - return; - case LogFileRecord::CLOSING_EXEC_SR_COMPLETED: - jam(); - closeExecSrCompletedLab(signal); - return; - case LogFileRecord::CLOSING_WRITE_LOG: - jam(); - closeWriteLogLab(signal); - return; - case LogFileRecord::CLOSING_EXEC_LOG: - jam(); - closeExecLogLab(signal); - return; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch -}//Dblqh::execFSCLOSECONF() - - -/* ************>> */ -/* FSOPENCONF > */ -/* ************>> */ -void Dblqh::execFSOPENCONF(Signal* signal) -{ - jamEntry(); - initFsopenconf(signal); - switch (logFilePtr.p->logFileStatus) { - case LogFileRecord::OPEN_SR_INVALIDATE_PAGES: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - readFileInInvalidate(signal, false); - return; - case LogFileRecord::OPENING_INIT: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openFileInitLab(signal); - return; - case LogFileRecord::OPEN_SR_FRONTPAGE: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openSrFrontpageLab(signal); - return; - case LogFileRecord::OPEN_SR_LAST_FILE: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openSrLastFileLab(signal); - return; - case LogFileRecord::OPEN_SR_NEXT_FILE: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openSrNextFileLab(signal); - return; - case LogFileRecord::OPEN_EXEC_SR_START: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openExecSrStartLab(signal); - return; - case LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openExecSrNewMbyteLab(signal); - return; - case LogFileRecord::OPEN_SR_FOURTH_PHASE: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openSrFourthPhaseLab(signal); - return; - case LogFileRecord::OPEN_SR_FOURTH_NEXT: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openSrFourthNextLab(signal); - return; - case LogFileRecord::OPEN_SR_FOURTH_ZERO: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openSrFourthZeroLab(signal); - return; - case LogFileRecord::OPENING_WRITE_LOG: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - return; - case LogFileRecord::OPEN_EXEC_LOG: - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN; - openExecLogLab(signal); - return; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch -}//Dblqh::execFSOPENCONF() - -void -Dblqh::execFSOPENREF(Signal* signal) -{ - jamEntry(); - FsRef* ref = (FsRef*)signal->getDataPtr(); - Uint32 err = ref->errorCode; - if (err == FsRef::fsErrInvalidFileSize) - { - char buf[256]; - BaseString::snprintf(buf, sizeof(buf), - "Invalid file size for redo logfile, " - " size only changable with --initial"); - progError(__LINE__, - NDBD_EXIT_INVALID_CONFIG, - buf); - return; - } - - SimulatedBlock::execFSOPENREF(signal); -} - -/* ************>> */ -/* FSREADCONF > */ -/* ************>> */ -void Dblqh::execFSREADCONF(Signal* signal) -{ - jamEntry(); - initFsrwconf(signal, false); - - switch (lfoPtr.p->lfoState) { - case LogFileOperationRecord::READ_SR_LAST_MBYTE: - jam(); - releaseLfo(signal); - readSrLastMbyteLab(signal); - return; - case LogFileOperationRecord::READ_SR_FRONTPAGE: - jam(); - releaseLfo(signal); - readSrFrontpageLab(signal); - return; - case LogFileOperationRecord::READ_SR_LAST_FILE: - jam(); - releaseLfo(signal); - readSrLastFileLab(signal); - return; - case LogFileOperationRecord::READ_SR_NEXT_FILE: - jam(); - releaseLfo(signal); - readSrNextFileLab(signal); - return; - case LogFileOperationRecord::READ_EXEC_SR: - jam(); - readExecSrLab(signal); - return; - case LogFileOperationRecord::READ_EXEC_LOG: - jam(); - readExecLogLab(signal); - return; - case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES: - jam(); - invalidateLogAfterLastGCI(signal); - return; - case LogFileOperationRecord::READ_SR_FOURTH_PHASE: - jam(); - releaseLfo(signal); - readSrFourthPhaseLab(signal); - return; - case LogFileOperationRecord::READ_SR_FOURTH_ZERO: - jam(); - releaseLfo(signal); - readSrFourthZeroLab(signal); - return; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch -}//Dblqh::execFSREADCONF() - -/* ************>> */ -/* FSREADCONF > */ -/* ************>> */ -void Dblqh::execFSREADREF(Signal* signal) -{ - jamEntry(); - lfoPtr.i = signal->theData[0]; - ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord); - switch (lfoPtr.p->lfoState) { - case LogFileOperationRecord::READ_SR_LAST_MBYTE: - jam(); - break; - case LogFileOperationRecord::READ_SR_FRONTPAGE: - jam(); - break; - case LogFileOperationRecord::READ_SR_LAST_FILE: - jam(); - break; - case LogFileOperationRecord::READ_SR_NEXT_FILE: - jam(); - break; - case LogFileOperationRecord::READ_EXEC_SR: - jam(); - break; - case LogFileOperationRecord::READ_EXEC_LOG: - jam(); - break; - case LogFileOperationRecord::READ_SR_FOURTH_PHASE: - jam(); - break; - case LogFileOperationRecord::READ_SR_FOURTH_ZERO: - jam(); - break; - case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES: - jam() - break; - default: - jam(); - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system read failed during LogFileOperationRecord state %d", (Uint32)lfoPtr.p->lfoState); - fsRefError(signal,__LINE__,msg); - } -}//Dblqh::execFSREADREF() - -/* *************** */ -/* FSWRITECONF > */ -/* *************** */ -void Dblqh::execFSWRITECONF(Signal* signal) -{ - jamEntry(); - initFsrwconf(signal, true); - switch (lfoPtr.p->lfoState) { - case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES: - jam(); - invalidateLogAfterLastGCI(signal); - CRASH_INSERTION(5047); - return; - case LogFileOperationRecord::WRITE_PAGE_ZERO: - jam(); - writePageZeroLab(signal); - return; - case LogFileOperationRecord::LAST_WRITE_IN_FILE: - jam(); - lastWriteInFileLab(signal); - return; - case LogFileOperationRecord::INIT_WRITE_AT_END: - jam(); - initWriteEndLab(signal); - return; - case LogFileOperationRecord::INIT_FIRST_PAGE: - jam(); - initFirstPageLab(signal); - return; - case LogFileOperationRecord::WRITE_GCI_ZERO: - jam(); - writeGciZeroLab(signal); - return; - case LogFileOperationRecord::WRITE_DIRTY: - jam(); - writeDirtyLab(signal); - return; - case LogFileOperationRecord::WRITE_INIT_MBYTE: - jam(); - writeInitMbyteLab(signal); - return; - case LogFileOperationRecord::ACTIVE_WRITE_LOG: - jam(); - writeLogfileLab(signal); - return; - case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE: - jam(); - firstPageWriteLab(signal); - return; - case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0: - jam(); - // We are done...send completed signal and exit this phase. - releaseLfo(signal); - signal->theData[0] = ZSR_FOURTH_COMP; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch -}//Dblqh::execFSWRITECONF() - -/* ************>> */ -/* FSWRITEREF > */ -/* ************>> */ -void Dblqh::execFSWRITEREF(Signal* signal) -{ - jamEntry(); - lfoPtr.i = signal->theData[0]; - ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord); - terrorCode = signal->theData[1]; - switch (lfoPtr.p->lfoState) { - case LogFileOperationRecord::WRITE_PAGE_ZERO: - jam(); - break; - case LogFileOperationRecord::LAST_WRITE_IN_FILE: - jam(); - break; - case LogFileOperationRecord::INIT_WRITE_AT_END: - jam(); - break; - case LogFileOperationRecord::INIT_FIRST_PAGE: - jam(); - break; - case LogFileOperationRecord::WRITE_GCI_ZERO: - jam(); - break; - case LogFileOperationRecord::WRITE_DIRTY: - jam(); - break; - case LogFileOperationRecord::WRITE_INIT_MBYTE: - jam(); - break; - case LogFileOperationRecord::ACTIVE_WRITE_LOG: - jam(); - break; - case LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE: - jam(); - break; - case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES: - jam(); - systemErrorLab(signal, __LINE__); - default: - jam(); - break; - }//switch - { - char msg[100]; - sprintf(msg, "File system write failed during LogFileOperationRecord state %d", (Uint32)lfoPtr.p->lfoState); - fsRefError(signal,__LINE__,msg); - } -}//Dblqh::execFSWRITEREF() - - -/* ========================================================================= */ -/* ======= INITIATE WHEN RECEIVING FSOPENCONF ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initFsopenconf(Signal* signal) -{ - logFilePtr.i = signal->theData[0]; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logFilePtr.p->fileRef = signal->theData[1]; - logPartPtr.i = logFilePtr.p->logPartRec; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logFilePtr.p->currentMbyte = 0; - logFilePtr.p->filePosition = 0; -}//Dblqh::initFsopenconf() - -/* ========================================================================= */ -/* ======= INITIATE WHEN RECEIVING FSREADCONF AND FSWRITECONF ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initFsrwconf(Signal* signal, bool write) -{ - LogPageRecordPtr logP; - Uint32 noPages, totPages; - lfoPtr.i = signal->theData[0]; - ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord); - totPages= lfoPtr.p->noPagesRw; - logFilePtr.i = lfoPtr.p->logFileRec; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPartPtr.i = logFilePtr.p->logPartRec; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logPagePtr.i = lfoPtr.p->firstLfoPage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logP= logPagePtr; - noPages= 1; - ndbassert(totPages > 0); - for (;;) - { - logP.p->logPageWord[ZPOS_IN_WRITING]= 0; - logP.p->logPageWord[ZPOS_IN_FREE_LIST]= 0; - if (noPages == totPages) - return; - if (write) - logP.i= logP.p->logPageWord[ZNEXT_PAGE]; - else - logP.i= lfoPtr.p->logPageArray[noPages]; - ptrCheckGuard(logP, clogPageFileSize, logPageRecord); - noPages++; - } -}//Dblqh::initFsrwconf() - -/* ######################################################################### */ -/* NORMAL OPERATION MODULE */ -/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */ -/* */ -/* THIS PART HANDLES THE NORMAL OPENING, CLOSING AND WRITING OF LOG FILES */ -/* DURING NORMAL OPERATION. */ -/* ######################################################################### */ -/*---------------------------------------------------------------------------*/ -/* THIS SIGNAL IS USED TO SUPERVISE THAT THE LOG RECORDS ARE NOT KEPT IN MAIN*/ -/* MEMORY FOR MORE THAN 1 SECOND TO ACHIEVE THE PROPER RELIABILITY. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::timeSup(Signal* signal) -{ - LogPageRecordPtr origLogPagePtr; - Uint32 wordWritten; - - jamEntry(); - logPartPtr.i = signal->theData[0]; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - if (logPartPtr.p->logPartTimer != logPartPtr.p->logTimer) { - jam(); -/*--------------------------------------------------------------------------*/ -/* THIS LOG PART HAS NOT WRITTEN TO DISK DURING THE LAST SECOND. */ -/*--------------------------------------------------------------------------*/ - switch (logPartPtr.p->logPartState) { - case LogPartRecord::FILE_CHANGE_PROBLEM: - jam(); -/*--------------------------------------------------------------------------*/ -/* THIS LOG PART HAS PROBLEMS IN CHANGING FILES MAKING IT IMPOSSIBLE */ -// TO WRITE TO THE FILE CURRENTLY. WE WILL COMEBACK LATER AND SEE IF -// THE PROBLEM HAS BEEN FIXED. -/*--------------------------------------------------------------------------*/ - case LogPartRecord::ACTIVE: - jam(); -/*---------------------------------------------------------------------------*/ -/* AN OPERATION IS CURRENTLY ACTIVE IN WRITING THIS LOG PART. WE THUS CANNOT */ -/* WRITE ANYTHING TO DISK AT THIS MOMENT. WE WILL SEND A SIGNAL DELAYED FOR */ -/* 10 MS AND THEN TRY AGAIN. POSSIBLY THE LOG PART WILL HAVE BEEN WRITTEN */ -/* UNTIL THEN OR ELSE IT SHOULD BE FREE TO WRITE AGAIN. */ -/*---------------------------------------------------------------------------*/ - signal->theData[0] = ZTIME_SUPERVISION; - signal->theData[1] = logPartPtr.i; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2); - return; - break; - case LogPartRecord::IDLE: - case LogPartRecord::TAIL_PROBLEM: - jam(); -/*---------------------------------------------------------------------------*/ -/* IDLE AND NOT WRITTEN TO DISK IN A SECOND. ALSO WHEN WE HAVE A TAIL PROBLEM*/ -/* WE HAVE TO WRITE TO DISK AT TIMES. WE WILL FIRST CHECK WHETHER ANYTHING */ -/* AT ALL HAVE BEEN WRITTEN TO THE PAGES BEFORE WRITING TO DISK. */ -/*---------------------------------------------------------------------------*/ -/* WE HAVE TO WRITE TO DISK IN ALL CASES SINCE THERE COULD BE INFORMATION */ -/* STILL IN THE LOG THAT WAS GENERATED BEFORE THE PREVIOUS TIME SUPERVISION */ -/* BUT AFTER THE LAST DISK WRITE. THIS PREVIOUSLY STOPPED ALL DISK WRITES */ -/* WHEN NO MORE LOG WRITES WERE PERFORMED (THIS HAPPENED WHEN LOG GOT FULL */ -/* AND AFTER LOADING THE INITIAL RECORDS IN INITIAL START). */ -/*---------------------------------------------------------------------------*/ - if (((logFilePtr.p->currentFilepage + 1) & (ZPAGES_IN_MBYTE -1)) == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS IS THE LAST PAGE IN THIS MBYTE. WRITE NEXT LOG AND SWITCH TO NEXT */ -/* MBYTE. */ -/*---------------------------------------------------------------------------*/ - changeMbyte(signal); - } else { -/*---------------------------------------------------------------------------*/ -/* WRITE THE LOG PAGE TO DISK EVEN IF IT IS NOT FULL. KEEP PAGE AND WRITE A */ -/* COPY. THE ORIGINAL PAGE WILL BE WRITTEN AGAIN LATER ON. */ -/*---------------------------------------------------------------------------*/ - wordWritten = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] - 1; - origLogPagePtr.i = logPagePtr.i; - origLogPagePtr.p = logPagePtr.p; - seizeLogpage(signal); - MEMCOPY_NO_WORDS(&logPagePtr.p->logPageWord[0], - &origLogPagePtr.p->logPageWord[0], - wordWritten + 1); - ndbrequire(wordWritten < ZPAGE_SIZE); - if (logFilePtr.p->noLogpagesInBuffer > 0) { - jam(); - completedLogPage(signal, ZENFORCE_WRITE, __LINE__); -/*---------------------------------------------------------------------------*/ -/*SINCE WE ARE ONLY WRITING PART OF THE LAST PAGE WE HAVE TO UPDATE THE WORD */ -/*WRITTEN TO REFLECT THE REAL LAST WORD WRITTEN. WE ALSO HAVE TO MOVE THE */ -/*FILE POSITION ONE STEP BACKWARDS SINCE WE ARE NOT WRITING THE LAST PAGE */ -/*COMPLETELY. IT WILL BE WRITTEN AGAIN. */ -/*---------------------------------------------------------------------------*/ - lfoPtr.p->lfoWordWritten = wordWritten; - logFilePtr.p->filePosition = logFilePtr.p->filePosition - 1; - } else { - if (wordWritten == (ZPAGE_HEADER_SIZE - 1)) { -/*---------------------------------------------------------------------------*/ -/*THIS IS POSSIBLE BUT VERY UNLIKELY. IF THE PAGE WAS COMPLETED AFTER THE LAST*/ -/*WRITE TO DISK THEN NO_LOG_PAGES_IN_BUFFER > 0 AND IF NOT WRITTEN SINCE LAST*/ -/*WRITE TO DISK THEN THE PREVIOUS PAGE MUST HAVE BEEN WRITTEN BY SOME */ -/*OPERATION AND THAT BECAME COMPLETELY FULL. IN ANY CASE WE NEED NOT WRITE AN*/ -/*EMPTY PAGE TO DISK. */ -/*---------------------------------------------------------------------------*/ - jam(); - releaseLogpage(signal); - } else { - jam(); - writeSinglePage(signal, logFilePtr.p->currentFilepage, - wordWritten, __LINE__); - lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG; - }//if - }//if - }//if - break; - default: - ndbrequire(false); - break; - }//switch - }//if - logPartPtr.p->logTimer++; - return; -}//Dblqh::timeSup() - -void Dblqh::writeLogfileLab(Signal* signal) -{ -/*---------------------------------------------------------------------------*/ -/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */ -/* WRITE. */ -/*---------------------------------------------------------------------------*/ - switch (logFilePtr.p->fileChangeState) { - case LogFileRecord::NOT_ONGOING: - jam(); - checkGcpCompleted(signal, - ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1), - lfoPtr.p->lfoWordWritten); - break; -#if 0 - case LogFileRecord::BOTH_WRITES_ONGOING: - jam(); - ndbout_c("not crashing!!"); - // Fall-through -#endif - case LogFileRecord::WRITE_PAGE_ZERO_ONGOING: - case LogFileRecord::LAST_WRITE_ONGOING: - jam(); - logFilePtr.p->lastPageWritten = (lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1; - logFilePtr.p->lastWordWritten = lfoPtr.p->lfoWordWritten; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - releaseLfoPages(signal); - releaseLfo(signal); - return; -}//Dblqh::writeLogfileLab() - -void Dblqh::closeWriteLogLab(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - return; -}//Dblqh::closeWriteLogLab() - -/* ######################################################################### */ -/* FILE CHANGE MODULE */ -/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */ -/* */ -/*THIS PART OF THE FILE MODULE HANDLES WHEN WE ARE CHANGING LOG FILE DURING */ -/*NORMAL OPERATION. WE HAVE TO BE CAREFUL WHEN WE ARE CHANGING LOG FILE SO */ -/*THAT WE DO NOT COMPLICATE THE SYSTEM RESTART PROCESS TOO MUCH. */ -/*THE IDEA IS THAT WE START BY WRITING THE LAST WRITE IN THE OLD FILE AND WE */ -/*ALSO WRITE THE FIRST PAGE OF THE NEW FILE CONCURRENT WITH THAT. THIS FIRST */ -/*PAGE IN THE NEW FILE DO NOT CONTAIN ANY LOG RECORDS OTHER THAN A DESCRIPTOR*/ -/*CONTAINING INFORMATION ABOUT GCI'S NEEDED AT SYSTEM RESTART AND A NEXT LOG */ -/*RECORD. */ -/* */ -/*WHEN BOTH OF THOSE WRITES HAVE COMPLETED WE ALSO WRITE PAGE ZERO IN FILE */ -/*ZERO. THE ONLY INFORMATION WHICH IS INTERESTING HERE IS THE NEW FILE NUMBER*/ -/* */ -/*IF OPTIMISATIONS ARE NEEDED OF THE LOG HANDLING THEN IT IS POSSIBLE TO */ -/*AVOID WRITING THE FIRST PAGE OF THE NEW PAGE IMMEDIATELY. THIS COMPLICATES */ -/*THE SYSTEM RESTART AND ONE HAS TO TAKE SPECIAL CARE WITH FILE ZERO. IT IS */ -/*HOWEVER NO LARGE PROBLEM TO CHANGE INTO THIS SCENARIO. TO AVOID ALSO THE */ -/*WRITING OF PAGE ZERO IS ALSO POSSIBLE BUT COMPLICATES THE DESIGN EVEN */ -/*FURTHER. IT GETS FAIRLY COMPLEX TO FIND THE END OF THE LOG. SOME SORT OF */ -/*BINARY SEARCH IS HOWEVER MOST LIKELY A GOOD METHODOLOGY FOR THIS. */ -/* ######################################################################### */ -void Dblqh::firstPageWriteLab(Signal* signal) -{ - releaseLfo(signal); -/*---------------------------------------------------------------------------*/ -/* RELEASE PAGE ZERO IF THE FILE IS NOT FILE 0. */ -/*---------------------------------------------------------------------------*/ - Uint32 fileNo = logFilePtr.p->fileNo; - if (fileNo != 0) { - jam(); - releaseLogpage(signal); - }//if -/*---------------------------------------------------------------------------*/ -/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */ -/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */ -/* LAST FILE WHERE LOGGING HAS STARTED. */ -/*---------------------------------------------------------------------------*/ -/* FIRST CHECK WHETHER THE LAST WRITE IN THE PREVIOUS FILE HAVE COMPLETED */ -/*---------------------------------------------------------------------------*/ - if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE LAST WRITE WAS STILL ONGOING. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->fileChangeState = LogFileRecord::LAST_WRITE_ONGOING; - return; - } else { - jam(); - ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::FIRST_WRITE_ONGOING); -/*---------------------------------------------------------------------------*/ -/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING; - if (fileNo == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING; - return; - } else { - jam(); -/*---------------------------------------------------------------------------*/ -/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */ -/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */ -/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */ -/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */ -/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */ -/* LOG PART. */ -/*---------------------------------------------------------------------------*/ - Uint32 currLogFile = logFilePtr.i; - logFilePtr.i = logPartPtr.p->firstLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->logPageZero; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo; - writeSinglePage(signal, 0, ZPAGE_SIZE - 1, __LINE__); - lfoPtr.p->logFileRec = currLogFile; - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO; - return; - }//if - }//if -}//Dblqh::firstPageWriteLab() - -void Dblqh::lastWriteInFileLab(Signal* signal) -{ - LogFileRecordPtr locLogFilePtr; -/*---------------------------------------------------------------------------*/ -/* CHECK IF ANY GLOBAL CHECKPOINTS ARE COMPLETED DUE TO THIS COMPLETED DISK */ -/* WRITE. */ -/*---------------------------------------------------------------------------*/ - checkGcpCompleted(signal, - ((lfoPtr.p->lfoPageNo + lfoPtr.p->noPagesRw) - 1), - (ZPAGE_SIZE - 1)); - releaseLfoPages(signal); - releaseLfo(signal); -/*---------------------------------------------------------------------------*/ -/* IF THE FILE IS NOT IN USE OR THE NEXT FILE TO BE USED WE WILL CLOSE IT. */ -/*---------------------------------------------------------------------------*/ - locLogFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord); - if (logFilePtr.i != locLogFilePtr.i) { - if (logFilePtr.i != locLogFilePtr.p->nextLogFile) { - if (logFilePtr.p->fileNo != 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE FILE IS NOT FILE ZERO EITHER. WE WILL NOT CLOSE FILE ZERO SINCE WE */ -/* USE IT TO KEEP TRACK OF THE CURRENT LOG FILE BY WRITING PAGE ZERO IN */ -/* FILE ZERO. */ -/*---------------------------------------------------------------------------*/ -/* WE WILL CLOSE THE FILE. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_WRITE_LOG; - closeFile(signal, logFilePtr, __LINE__); - }//if - }//if - }//if -/*---------------------------------------------------------------------------*/ -/* IF A NEW FILE HAS BEEN OPENED WE SHALL ALWAYS ALSO WRITE TO PAGE O IN */ -/* FILE 0. THE AIM IS TO MAKE RESTARTS EASIER BY SPECIFYING WHICH IS THE */ -/* LAST FILE WHERE LOGGING HAS STARTED. */ -/*---------------------------------------------------------------------------*/ -/* FIRST CHECK WHETHER THE FIRST WRITE IN THE NEW FILE HAVE COMPLETED */ -/* THIS STATE INFORMATION IS IN THE NEW LOG FILE AND THUS WE HAVE TO MOVE */ -/* THE LOG FILE POINTER TO THIS LOG FILE. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - if (logFilePtr.p->fileChangeState == LogFileRecord::BOTH_WRITES_ONGOING) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE FIRST WRITE WAS STILL ONGOING. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->fileChangeState = LogFileRecord::FIRST_WRITE_ONGOING; - return; - } else { - ndbrequire(logFilePtr.p->fileChangeState == LogFileRecord::LAST_WRITE_ONGOING); -/*---------------------------------------------------------------------------*/ -/* WRITE TO PAGE 0 IN IN FILE 0 NOW. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->fileChangeState = LogFileRecord::WRITE_PAGE_ZERO_ONGOING; - Uint32 fileNo = logFilePtr.p->fileNo; - if (fileNo == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/* IF THE NEW FILE WAS 0 THEN WE HAVE ALREADY WRITTEN PAGE ZERO IN FILE 0. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING; - return; - } else { - jam(); -/*---------------------------------------------------------------------------*/ -/* WRITE PAGE ZERO IN FILE ZERO. LOG_FILE_REC WILL REFER TO THE LOG FILE WE */ -/* HAVE JUST WRITTEN PAGE ZERO IN TO GET HOLD OF LOG_FILE_PTR FOR THIS */ -/* RECORD QUICKLY. THIS IS NEEDED TO GET HOLD OF THE FILE_CHANGE_STATE. */ -/* THE ONLY INFORMATION WE WANT TO CHANGE IS THE LAST FILE NUMBER IN THE */ -/* FILE DESCRIPTOR. THIS IS USED AT SYSTEM RESTART TO FIND THE END OF THE */ -/* LOG PART. */ -/*---------------------------------------------------------------------------*/ - Uint32 currLogFile = logFilePtr.i; - logFilePtr.i = logPartPtr.p->firstLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->logPageZero; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = fileNo; - writeSinglePage(signal, 0, ZPAGE_SIZE - 1, __LINE__); - lfoPtr.p->logFileRec = currLogFile; - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_PAGE_ZERO; - return; - }//if - }//if -}//Dblqh::lastWriteInFileLab() - -void Dblqh::writePageZeroLab(Signal* signal) -{ - if (logPartPtr.p->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM) - { - if (logPartPtr.p->firstLogQueue == RNIL) - { - jam(); - logPartPtr.p->logPartState = LogPartRecord::IDLE; - } - else - { - jam(); - logPartPtr.p->logPartState = LogPartRecord::ACTIVE; - } - } - - logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING; -/*---------------------------------------------------------------------------*/ -/* IT COULD HAVE ARRIVED PAGE WRITES TO THE CURRENT FILE WHILE WE WERE */ -/* WAITING FOR THIS DISK WRITE TO COMPLETE. THEY COULD NOT CHECK FOR */ -/* COMPLETED GLOBAL CHECKPOINTS. THUS WE SHOULD DO THAT NOW INSTEAD. */ -/*---------------------------------------------------------------------------*/ - checkGcpCompleted(signal, - logFilePtr.p->lastPageWritten, - logFilePtr.p->lastWordWritten); - releaseLfo(signal); - return; -}//Dblqh::writePageZeroLab() - -/* ######################################################################### */ -/* INITIAL START MODULE */ -/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */ -/* */ -/*THIS MODULE INITIALISES ALL THE LOG FILES THAT ARE NEEDED AT A SYSTEM */ -/*RESTART AND WHICH ARE USED DURING NORMAL OPERATIONS. IT CREATES THE FILES */ -/*AND SETS A PROPER SIZE OF THEM AND INITIALISES THE FIRST PAGE IN EACH FILE */ -/* ######################################################################### */ -void Dblqh::openFileInitLab(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::OPEN_INIT; - seizeLogpage(signal); - writeSinglePage(signal, (clogFileSize * ZPAGES_IN_MBYTE) - 1, - ZPAGE_SIZE - 1, __LINE__); - lfoPtr.p->lfoState = LogFileOperationRecord::INIT_WRITE_AT_END; - return; -}//Dblqh::openFileInitLab() - -void Dblqh::initWriteEndLab(Signal* signal) -{ - releaseLfo(signal); - initLogpage(signal); - if (logFilePtr.p->fileNo == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/* PAGE ZERO IN FILE ZERO MUST SET LOG LAP TO ONE SINCE IT HAS STARTED */ -/* WRITING TO THE LOG, ALSO GLOBAL CHECKPOINTS ARE SET TO ZERO. */ -/*---------------------------------------------------------------------------*/ - logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1; - logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = 0; - logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = 0; - logFilePtr.p->logMaxGciStarted[0] = 0; - logFilePtr.p->logMaxGciCompleted[0] = 0; - }//if -/*---------------------------------------------------------------------------*/ -/* REUSE CODE FOR INITIALISATION OF FIRST PAGE IN ALL LOG FILES. */ -/*---------------------------------------------------------------------------*/ - writeFileHeaderOpen(signal, ZINIT); - return; -}//Dblqh::initWriteEndLab() - -void Dblqh::initFirstPageLab(Signal* signal) -{ - releaseLfo(signal); - if (logFilePtr.p->fileNo == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/* IN FILE ZERO WE WILL INSERT A PAGE ONE WHERE WE WILL INSERT A COMPLETED */ -/* GCI RECORD FOR GCI = 0. */ -/*---------------------------------------------------------------------------*/ - initLogpage(signal); - logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1; - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE] = ZCOMPLETED_GCI_TYPE; - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + 1] = 1; - writeSinglePage(signal, 1, ZPAGE_SIZE - 1, __LINE__); - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_GCI_ZERO; - return; - }//if - logFilePtr.p->currentMbyte = 1; - writeInitMbyte(signal); - return; -}//Dblqh::initFirstPageLab() - -void Dblqh::writeGciZeroLab(Signal* signal) -{ - releaseLfo(signal); - logFilePtr.p->currentMbyte = 1; - writeInitMbyte(signal); - return; -}//Dblqh::writeGciZeroLab() - -void Dblqh::writeInitMbyteLab(Signal* signal) -{ - releaseLfo(signal); - logFilePtr.p->currentMbyte = logFilePtr.p->currentMbyte + 1; - if (logFilePtr.p->currentMbyte == clogFileSize) { - jam(); - releaseLogpage(signal); - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_INIT; - closeFile(signal, logFilePtr, __LINE__); - return; - }//if - writeInitMbyte(signal); - return; -}//Dblqh::writeInitMbyteLab() - -void Dblqh::closingInitLab(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - logPartPtr.i = logFilePtr.p->logPartRec; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - if (logFilePtr.p->nextLogFile == logPartPtr.p->firstLogfile) { - jam(); - checkInitCompletedLab(signal); - return; - } else { - jam(); - logFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - openLogfileInit(signal); - }//if - return; -}//Dblqh::closingInitLab() - -void Dblqh::checkInitCompletedLab(Signal* signal) -{ - logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED; -/*---------------------------------------------------------------------------*/ -/* WE HAVE NOW INITIALISED ALL FILES IN THIS LOG PART. WE CAN NOW SET THE */ -/* THE LOG LAP TO ONE SINCE WE WILL START WITH LOG LAP ONE. LOG LAP = ZERO */ -/* MEANS THIS PART OF THE LOG IS NOT WRITTEN YET. */ -/*---------------------------------------------------------------------------*/ - logPartPtr.p->logLap = 1; - logPartPtr.i = 0; -CHECK_LOG_PARTS_LOOP: - ptrAss(logPartPtr, logPartRecord); - if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS PART HAS STILL NOT COMPLETED. WAIT FOR THIS TO OCCUR. */ -/*---------------------------------------------------------------------------*/ - return; - }//if - if (logPartPtr.i == 3) { - jam(); -/*---------------------------------------------------------------------------*/ -/* ALL LOG PARTS ARE COMPLETED. NOW WE CAN CONTINUE WITH THE RESTART */ -/* PROCESSING. THE NEXT STEP IS TO PREPARE FOR EXECUTING OPERATIONS. THUS WE */ -/* NEED TO INITIALISE ALL NEEDED DATA AND TO OPEN FILE ZERO AND THE NEXT AND */ -/* TO SET THE CURRENT LOG PAGE TO BE PAGE 1 IN FILE ZERO. */ -/*---------------------------------------------------------------------------*/ - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - ptrAss(logPartPtr, logPartRecord); - signal->theData[0] = ZINIT_FOURTH; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//for - return; - } else { - jam(); - logPartPtr.i = logPartPtr.i + 1; - goto CHECK_LOG_PARTS_LOOP; - }//if -}//Dblqh::checkInitCompletedLab() - -/* ========================================================================= */ -/* ======= INITIATE LOG FILE OPERATION RECORD WHEN ALLOCATED ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initLfo(Signal* signal) -{ - lfoPtr.p->firstLfoPage = RNIL; - lfoPtr.p->lfoState = LogFileOperationRecord::IDLE; - lfoPtr.p->logFileRec = logFilePtr.i; - lfoPtr.p->noPagesRw = 0; - lfoPtr.p->lfoPageNo = ZNIL; -}//Dblqh::initLfo() - -/* ========================================================================= */ -/* ======= INITIATE LOG FILE WHEN ALLOCATED ======= */ -/* */ -/* INPUT: TFILE_NO NUMBER OF THE FILE INITIATED */ -/* LOG_PART_PTR NUMBER OF LOG PART */ -/* SUBROUTINE SHORT NAME = IL */ -/* ========================================================================= */ -void Dblqh::initLogfile(Signal* signal, Uint32 fileNo) -{ - UintR tilTmp; - UintR tilIndex; - - logFilePtr.p->currentFilepage = 0; - logFilePtr.p->currentLogpage = RNIL; - logFilePtr.p->fileName[0] = (UintR)-1; - logFilePtr.p->fileName[1] = (UintR)-1; /* = H'FFFFFFFF = -1 */ - logFilePtr.p->fileName[2] = fileNo; /* Sfile_no */ - tilTmp = 1; /* VERSION 1 OF FILE NAME */ - tilTmp = (tilTmp << 8) + 1; /* FRAGMENT LOG => .FRAGLOG AS EXTENSION */ - tilTmp = (tilTmp << 8) + (8 + logPartPtr.i); /* DIRECTORY = D(8+Part)/DBLQH */ - tilTmp = (tilTmp << 8) + 255; /* IGNORE Pxx PART OF FILE NAME */ - logFilePtr.p->fileName[3] = tilTmp; -/* ========================================================================= */ -/* FILE NAME BECOMES /D2/DBLQH/Tpart_no/Sfile_no.FRAGLOG */ -/* ========================================================================= */ - logFilePtr.p->fileNo = fileNo; - logFilePtr.p->filePosition = 0; - logFilePtr.p->firstLfo = RNIL; - logFilePtr.p->lastLfo = RNIL; - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - logFilePtr.p->logPartRec = logPartPtr.i; - logFilePtr.p->noLogpagesInBuffer = 0; - logFilePtr.p->firstFilledPage = RNIL; - logFilePtr.p->lastFilledPage = RNIL; - logFilePtr.p->lastPageWritten = 0; - logFilePtr.p->logPageZero = RNIL; - logFilePtr.p->currentMbyte = 0; - for (tilIndex = 0; tilIndex < clogFileSize; tilIndex++) { - logFilePtr.p->logMaxGciCompleted[tilIndex] = (UintR)-1; - logFilePtr.p->logMaxGciStarted[tilIndex] = (UintR)-1; - logFilePtr.p->logLastPrepRef[tilIndex] = 0; - }//for -}//Dblqh::initLogfile() - -/* ========================================================================= */ -/* ======= INITIATE LOG PAGE WHEN ALLOCATED ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initLogpage(Signal* signal) -{ - TcConnectionrecPtr ilpTcConnectptr; - - logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = logPartPtr.p->logLap; - logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED] = - logPartPtr.p->logPartNewestCompletedGCI; - logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED] = cnewestGci; - logPagePtr.p->logPageWord[ZPOS_VERSION] = NDB_VERSION; - logPagePtr.p->logPageWord[ZPOS_NO_LOG_FILES] = logPartPtr.p->noLogFiles; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE; - ilpTcConnectptr.i = logPartPtr.p->firstLogTcrec; - if (ilpTcConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(ilpTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] = - (ilpTcConnectptr.p->logStartFileNo << 16) + - (ilpTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE); - } else { - jam(); - logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF] = - (logFilePtr.p->fileNo << 16) + - (logFilePtr.p->currentFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE); - }//if -}//Dblqh::initLogpage() - -/* ------------------------------------------------------------------------- */ -/* ------- OPEN LOG FILE FOR READ AND WRITE ------- */ -/* */ -/* SUBROUTINE SHORT NAME = OFR */ -/* ------------------------------------------------------------------------- */ -void Dblqh::openFileRw(Signal* signal, LogFileRecordPtr olfLogFilePtr) -{ - FsOpenReq* req = (FsOpenReq*)signal->getDataPtrSend(); - signal->theData[0] = cownref; - signal->theData[1] = olfLogFilePtr.i; - signal->theData[2] = olfLogFilePtr.p->fileName[0]; - signal->theData[3] = olfLogFilePtr.p->fileName[1]; - signal->theData[4] = olfLogFilePtr.p->fileName[2]; - signal->theData[5] = olfLogFilePtr.p->fileName[3]; - signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE; - if (c_o_direct) - signal->theData[6] |= FsOpenReq::OM_DIRECT; - req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); - Uint64 sz = clogFileSize; - sz *= 1024; sz *= 1024; - req->file_size_hi = sz >> 32; - req->file_size_lo = sz & 0xFFFFFFFF; - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); -}//Dblqh::openFileRw() - -/* ------------------------------------------------------------------------- */ -/* ------- OPEN LOG FILE DURING INITIAL START ------- */ -/* */ -/* SUBROUTINE SHORT NAME = OLI */ -/* ------------------------------------------------------------------------- */ -void Dblqh::openLogfileInit(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::OPENING_INIT; - FsOpenReq* req = (FsOpenReq*)signal->getDataPtrSend(); - signal->theData[0] = cownref; - signal->theData[1] = logFilePtr.i; - signal->theData[2] = logFilePtr.p->fileName[0]; - signal->theData[3] = logFilePtr.p->fileName[1]; - signal->theData[4] = logFilePtr.p->fileName[2]; - signal->theData[5] = logFilePtr.p->fileName[3]; - signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE | FsOpenReq::OM_AUTOSYNC; - if (c_o_direct) - signal->theData[6] |= FsOpenReq::OM_DIRECT; - req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); -}//Dblqh::openLogfileInit() - -/* OPEN FOR READ/WRITE, DO CREATE AND DO TRUNCATE FILE */ -/* ------------------------------------------------------------------------- */ -/* ------- OPEN NEXT LOG FILE ------- */ -/* */ -/* SUBROUTINE SHORT NAME = ONL */ -/* ------------------------------------------------------------------------- */ -void Dblqh::openNextLogfile(Signal* signal) -{ - LogFileRecordPtr onlLogFilePtr; - - if (logPartPtr.p->noLogFiles > 2) { - jam(); -/* -------------------------------------------------- */ -/* IF ONLY 1 OR 2 LOG FILES EXIST THEN THEY ARE */ -/* ALWAYS OPEN AND THUS IT IS NOT NECESSARY TO */ -/* OPEN THEM NOW. */ -/* -------------------------------------------------- */ - onlLogFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(onlLogFilePtr, clogFileFileSize, logFileRecord); - if (onlLogFilePtr.p->logFileStatus != LogFileRecord::CLOSED) { - ndbrequire(onlLogFilePtr.p->fileNo == 0); - return; - }//if - onlLogFilePtr.p->logFileStatus = LogFileRecord::OPENING_WRITE_LOG; - FsOpenReq* req = (FsOpenReq*)signal->getDataPtrSend(); - signal->theData[0] = cownref; - signal->theData[1] = onlLogFilePtr.i; - signal->theData[2] = onlLogFilePtr.p->fileName[0]; - signal->theData[3] = onlLogFilePtr.p->fileName[1]; - signal->theData[4] = onlLogFilePtr.p->fileName[2]; - signal->theData[5] = onlLogFilePtr.p->fileName[3]; - signal->theData[6] = FsOpenReq::OM_READWRITE | FsOpenReq::OM_AUTOSYNC | FsOpenReq::OM_CHECK_SIZE; - if (c_o_direct) - signal->theData[6] |= FsOpenReq::OM_DIRECT; - req->auto_sync_size = MAX_REDO_PAGES_WITHOUT_SYNCH * sizeof(LogPageRecord); - Uint64 sz = clogFileSize; - sz *= 1024; sz *= 1024; - req->file_size_hi = sz >> 32; - req->file_size_lo = sz & 0xFFFFFFFF; - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBA); - }//if -}//Dblqh::openNextLogfile() - - /* OPEN FOR READ/WRITE, DON'T CREATE AND DON'T TRUNCATE FILE */ -/* ------------------------------------------------------------------------- */ -/* ------- RELEASE LFO RECORD ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::releaseLfo(Signal* signal) -{ -#ifdef VM_TRACE - // Check that lfo record isn't already in free list - LogFileOperationRecordPtr TlfoPtr; - TlfoPtr.i = cfirstfreeLfo; - while (TlfoPtr.i != RNIL){ - ptrCheckGuard(TlfoPtr, clfoFileSize, logFileOperationRecord); - ndbrequire(TlfoPtr.i != lfoPtr.i); - TlfoPtr.i = TlfoPtr.p->nextLfo; - } -#endif - lfoPtr.p->nextLfo = cfirstfreeLfo; - lfoPtr.p->lfoTimer = 0; - cfirstfreeLfo = lfoPtr.i; - lfoPtr.p->lfoState = LogFileOperationRecord::IDLE; -}//Dblqh::releaseLfo() - -/* ------------------------------------------------------------------------- */ -/* ------- RELEASE ALL LOG PAGES CONNECTED TO A LFO RECORD ------- */ -/* */ -/* SUBROUTINE SHORT NAME = RLP */ -/* ------------------------------------------------------------------------- */ -void Dblqh::releaseLfoPages(Signal* signal) -{ - LogPageRecordPtr rlpLogPagePtr; - - logPagePtr.i = lfoPtr.p->firstLfoPage; -RLP_LOOP: - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - rlpLogPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - releaseLogpage(signal); - if (rlpLogPagePtr.i != RNIL) { - jam(); - logPagePtr.i = rlpLogPagePtr.i; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - goto RLP_LOOP; - }//if - lfoPtr.p->firstLfoPage = RNIL; -}//Dblqh::releaseLfoPages() - -/* ------------------------------------------------------------------------- */ -/* ------- RELEASE LOG PAGE ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::releaseLogpage(Signal* signal) -{ -#ifdef VM_TRACE - // Check that log page isn't already in free list - ndbrequire(logPagePtr.p->logPageWord[ZPOS_IN_FREE_LIST] == 0); -#endif - - cnoOfLogPages++; - logPagePtr.p->logPageWord[ZNEXT_PAGE] = cfirstfreeLogPage; - logPagePtr.p->logPageWord[ZPOS_IN_WRITING]= 0; - logPagePtr.p->logPageWord[ZPOS_IN_FREE_LIST]= 1; - cfirstfreeLogPage = logPagePtr.i; -}//Dblqh::releaseLogpage() - -/* ------------------------------------------------------------------------- */ -/* ------- SEIZE LFO RECORD ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::seizeLfo(Signal* signal) -{ - lfoPtr.i = cfirstfreeLfo; - ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord); - cfirstfreeLfo = lfoPtr.p->nextLfo; - lfoPtr.p->nextLfo = RNIL; - lfoPtr.p->lfoTimer = cLqhTimeOutCount; -}//Dblqh::seizeLfo() - -/* ------------------------------------------------------------------------- */ -/* ------- SEIZE LOG FILE RECORD ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::seizeLogfile(Signal* signal) -{ - logFilePtr.i = cfirstfreeLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); -/* ------------------------------------------------------------------------- */ -/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_FILE_PTR = RNIL */ -/* ------------------------------------------------------------------------- */ - cfirstfreeLogFile = logFilePtr.p->nextLogFile; - logFilePtr.p->nextLogFile = RNIL; -}//Dblqh::seizeLogfile() - -/* ------------------------------------------------------------------------- */ -/* ------- SEIZE LOG PAGE RECORD ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::seizeLogpage(Signal* signal) -{ - cnoOfLogPages--; - logPagePtr.i = cfirstfreeLogPage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); -/* ------------------------------------------------------------------------- */ -/*IF LIST IS EMPTY THEN A SYSTEM CRASH IS INVOKED SINCE LOG_PAGE_PTR = RNIL */ -/* ------------------------------------------------------------------------- */ - cfirstfreeLogPage = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL; - logPagePtr.p->logPageWord[ZPOS_IN_FREE_LIST] = 0; -}//Dblqh::seizeLogpage() - -/* ------------------------------------------------------------------------- */ -/* ------- WRITE FILE DESCRIPTOR INFORMATION ------- */ -/* */ -/* SUBROUTINE SHORT NAME: WFD */ -// Pointer handling: -// logFilePtr in -// logPartPtr in -/* ------------------------------------------------------------------------- */ -void Dblqh::writeFileDescriptor(Signal* signal) -{ - TcConnectionrecPtr wfdTcConnectptr; - UintR twfdFileNo; - UintR twfdMbyte; - -/* -------------------------------------------------- */ -/* START BY WRITING TO LOG FILE RECORD */ -/* -------------------------------------------------- */ - arrGuard(logFilePtr.p->currentMbyte, clogFileSize); - logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = - logPartPtr.p->logPartNewestCompletedGCI; - logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = cnewestGci; - wfdTcConnectptr.i = logPartPtr.p->firstLogTcrec; - if (wfdTcConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(wfdTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - twfdFileNo = wfdTcConnectptr.p->logStartFileNo; - twfdMbyte = wfdTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE; - logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] = - (twfdFileNo << 16) + twfdMbyte; - } else { - jam(); - logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] = - (logFilePtr.p->fileNo << 16) + logFilePtr.p->currentMbyte; - }//if -}//Dblqh::writeFileDescriptor() - -/* ------------------------------------------------------------------------- */ -/* ------- WRITE THE HEADER PAGE OF A NEW FILE ------- */ -/* */ -/* SUBROUTINE SHORT NAME: WMO */ -/* ------------------------------------------------------------------------- */ -void Dblqh::writeFileHeaderOpen(Signal* signal, Uint32 wmoType) -{ - UintR twmoNoLogDescriptors; - -/* -------------------------------------------------- */ -/* WRITE HEADER INFORMATION IN THE NEW FILE. */ -/* -------------------------------------------------- */ - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_LOG_TYPE] = ZFD_TYPE; - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = - logFilePtr.p->fileNo; - if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { - jam(); - twmoNoLogDescriptors = cmaxLogFilesInPageZero; - } else { - jam(); - twmoNoLogDescriptors = logPartPtr.p->noLogFiles; - }//if - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD] = - twmoNoLogDescriptors; - - { - Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE; - LogFileRecordPtr filePtr = logFilePtr; - for (Uint32 fd = 0; fd < twmoNoLogDescriptors; fd++) - { - jam(); - ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord); - for (Uint32 mb = 0; mb < clogFileSize; mb ++) - { - jam(); - Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb; - Uint32 pos1 = pos0 + clogFileSize; - Uint32 pos2 = pos1 + clogFileSize; - arrGuard(pos0, ZPAGE_SIZE); - arrGuard(pos1, ZPAGE_SIZE); - arrGuard(pos2, ZPAGE_SIZE); - logPagePtr.p->logPageWord[pos0] = filePtr.p->logMaxGciCompleted[mb]; - logPagePtr.p->logPageWord[pos1] = filePtr.p->logMaxGciStarted[mb]; - logPagePtr.p->logPageWord[pos2] = filePtr.p->logLastPrepRef[mb]; - } - filePtr.i = filePtr.p->prevLogFile; - } - pos += (twmoNoLogDescriptors * ZFD_MBYTE_SIZE * clogFileSize); - arrGuard(pos, ZPAGE_SIZE); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = pos; - logPagePtr.p->logPageWord[pos] = ZNEXT_LOG_RECORD_TYPE; - } - -/* ------------------------------------------------------- */ -/* THIS IS A SPECIAL WRITE OF THE FIRST PAGE IN THE */ -/* LOG FILE. THIS HAS SPECIAL SIGNIFANCE TO FIND */ -/* THE END OF THE LOG AT SYSTEM RESTART. */ -/* ------------------------------------------------------- */ - writeSinglePage(signal, 0, ZPAGE_SIZE - 1, __LINE__); - if (wmoType == ZINIT) { - jam(); - lfoPtr.p->lfoState = LogFileOperationRecord::INIT_FIRST_PAGE; - } else { - jam(); - lfoPtr.p->lfoState = LogFileOperationRecord::FIRST_PAGE_WRITE_IN_LOGFILE; - }//if - logFilePtr.p->filePosition = 1; - if (wmoType == ZNORMAL) { - jam(); -/* -------------------------------------------------- */ -/* ALLOCATE A NEW PAGE SINCE THE CURRENT IS */ -/* WRITTEN. */ -/* -------------------------------------------------- */ - seizeLogpage(signal); - initLogpage(signal); - logFilePtr.p->currentLogpage = logPagePtr.i; - logFilePtr.p->currentFilepage = logFilePtr.p->currentFilepage + 1; - }//if -}//Dblqh::writeFileHeaderOpen() - -/* -------------------------------------------------- */ -/* THE NEW FILE POSITION WILL ALWAYS BE 1 SINCE */ -/* WE JUST WROTE THE FIRST PAGE IN THE LOG FILE */ -/* -------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------- WRITE A MBYTE HEADER DURING INITIAL START ------- */ -/* */ -/* SUBROUTINE SHORT NAME: WIM */ -/* ------------------------------------------------------------------------- */ -void Dblqh::writeInitMbyte(Signal* signal) -{ - initLogpage(signal); - writeSinglePage(signal, logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE, - ZPAGE_SIZE - 1, __LINE__); - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_INIT_MBYTE; -}//Dblqh::writeInitMbyte() - -/* ------------------------------------------------------------------------- */ -/* ------- WRITE A SINGLE PAGE INTO A FILE ------- */ -/* */ -/* INPUT: TWSP_PAGE_NO THE PAGE NUMBER WRITTEN */ -/* SUBROUTINE SHORT NAME: WSP */ -/* ------------------------------------------------------------------------- */ -void Dblqh::writeSinglePage(Signal* signal, Uint32 pageNo, - Uint32 wordWritten, Uint32 place) -{ - seizeLfo(signal); - initLfo(signal); - lfoPtr.p->firstLfoPage = logPagePtr.i; - logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL; - - writeDbgInfoPageHeader(logPagePtr, place, pageNo, wordWritten); - // Calculate checksum for page - logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr); - - lfoPtr.p->lfoPageNo = pageNo; - lfoPtr.p->lfoWordWritten = wordWritten; - lfoPtr.p->noPagesRw = 1; -/* -------------------------------------------------- */ -/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */ -/* LOG RECORD HAS BEEN SENT AT THIS TIME. */ -/* -------------------------------------------------- */ - logPartPtr.p->logPartTimer = logPartPtr.p->logTimer; - signal->theData[0] = logFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = lfoPtr.i; - signal->theData[3] = ZLIST_OF_PAIRS_SYNCH; - signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD; - signal->theData[5] = 1; /* ONE PAGE WRITTEN */ - signal->theData[6] = logPagePtr.i; - signal->theData[7] = pageNo; - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA); - - if (DEBUG_REDO) - ndbout_c("writeSingle 1 page at part: %u file: %u pos: %u", - logPartPtr.i, - logFilePtr.p->fileNo, - pageNo); -}//Dblqh::writeSinglePage() - -/* ########################################################################## - * SYSTEM RESTART PHASE ONE MODULE - * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. - * - * THIS MODULE CONTAINS THE CODE FOR THE FIRST PHASE OF THE SYSTEM RESTART. - * THE AIM OF THIS PHASE IS TO FIND THE END OF THE LOG AND TO FIND - * INFORMATION ABOUT WHERE GLOBAL CHECKPOINTS ARE COMPLETED AND STARTED - * IN THE LOG. THIS INFORMATION IS NEEDED TO START PHASE THREE OF - * THE SYSTEM RESTART. - * ########################################################################## */ -/* -------------------------------------------------------------------------- - * A SYSTEM RESTART OR NODE RESTART IS ONGOING. WE HAVE NOW OPENED FILE 0 - * NOW WE NEED TO READ PAGE 0 TO FIND WHICH LOG FILE THAT WAS OPEN AT - * CRASH TIME. - * -------------------------------------------------------------------------- */ -void Dblqh::openSrFrontpageLab(Signal* signal) -{ - readSinglePage(signal, 0); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FRONTPAGE; - return; -}//Dblqh::openSrFrontpageLab() - -/* ------------------------------------------------------------------------- - * WE HAVE NOW READ PAGE 0 IN FILE 0. CHECK THE LAST OPEN FILE. ACTUALLY THE - * LAST OPEN FILE COULD BE THE NEXT AFTER THAT. CHECK THAT FIRST. WHEN THE - * LAST WAS FOUND WE CAN FIND ALL THE NEEDED INFORMATION WHERE TO START AND - * STOP READING THE LOG. - * -------------------------------------------------------------------------- */ -void Dblqh::readSrFrontpageLab(Signal* signal) -{ - Uint32 fileNo = logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO]; - if (fileNo == 0) { - jam(); - /* ---------------------------------------------------------------------- - * FILE 0 WAS ALSO LAST FILE SO WE DO NOT NEED TO READ IT AGAIN. - * ---------------------------------------------------------------------- */ - readSrLastFileLab(signal); - return; - }//if - /* ------------------------------------------------------------------------ - * CLOSE FILE 0 SO THAT WE HAVE CLOSED ALL FILES WHEN STARTING TO READ - * THE FRAGMENT LOG. ALSO RELEASE PAGE ZERO. - * ------------------------------------------------------------------------ */ - releaseLogpage(signal); - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; - closeFile(signal, logFilePtr, __LINE__); - LogFileRecordPtr locLogFilePtr; - findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_LAST_FILE; - openFileRw(signal, locLogFilePtr); - return; -}//Dblqh::readSrFrontpageLab() - -void Dblqh::openSrLastFileLab(Signal* signal) -{ - readSinglePage(signal, 0); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_FILE; - return; -}//Dblqh::openSrLastFileLab() - -void Dblqh::readSrLastFileLab(Signal* signal) -{ - logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP]; - if (DEBUG_REDO) - ndbout_c("readSrLastFileLab part: %u logExecState: %u logPartState: %u logLap: %u", - logPartPtr.i, - logPartPtr.p->logExecState, - logPartPtr.p->logPartState, - logPartPtr.p->logLap); - if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { - jam(); - initGciInLogFileRec(signal, cmaxLogFilesInPageZero); - } else { - jam(); - initGciInLogFileRec(signal, logPartPtr.p->noLogFiles); - }//if - releaseLogpage(signal); - /* ------------------------------------------------------------------------ - * NOW WE HAVE FOUND THE LAST LOG FILE. WE ALSO NEED TO FIND THE LAST - * MBYTE THAT WAS LAST WRITTEN BEFORE THE SYSTEM CRASH. - * ------------------------------------------------------------------------ */ - logPartPtr.p->lastLogfile = logFilePtr.i; - readSinglePage(signal, 0); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE; - logFilePtr.p->currentMbyte = 0; - return; -}//Dblqh::readSrLastFileLab() - -void Dblqh::readSrLastMbyteLab(Signal* signal) -{ - if (logPartPtr.p->lastMbyte == ZNIL) { - if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] < logPartPtr.p->logLap) { - jam(); - logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1; - }//if - }//if - arrGuard(logFilePtr.p->currentMbyte, clogFileSize); - logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = - logPagePtr.p->logPageWord[ZPOS_MAX_GCI_COMPLETED]; - logFilePtr.p->logMaxGciStarted[logFilePtr.p->currentMbyte] = - logPagePtr.p->logPageWord[ZPOS_MAX_GCI_STARTED]; - logFilePtr.p->logLastPrepRef[logFilePtr.p->currentMbyte] = - logPagePtr.p->logPageWord[ZLAST_LOG_PREP_REF]; - releaseLogpage(signal); - if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) { - jam(); - logFilePtr.p->currentMbyte++; - readSinglePage(signal, ZPAGES_IN_MBYTE * logFilePtr.p->currentMbyte); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_LAST_MBYTE; - return; - } else { - jam(); - /* ---------------------------------------------------------------------- - * THE LOG WAS IN THE LAST MBYTE WHEN THE CRASH OCCURRED SINCE ALL - * LOG LAPS ARE EQUAL TO THE CURRENT LOG LAP. - * ---------------------------------------------------------------------- */ - if (logPartPtr.p->lastMbyte == ZNIL) { - jam(); - logPartPtr.p->lastMbyte = clogFileSize - 1; - }//if - }//if - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; - closeFile(signal, logFilePtr, __LINE__); - if (logPartPtr.p->noLogFiles > cmaxLogFilesInPageZero) { - Uint32 fileNo; - if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) { - jam(); - fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero; - } else { - jam(); - fileNo = - (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) - - cmaxLogFilesInPageZero; - }//if - if (fileNo == 0) { - jam(); - /* -------------------------------------------------------------------- - * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE - * MOMENT. - * -------------------------------------------------------------------- */ - fileNo = 1; - logPartPtr.p->srRemainingFiles = - logPartPtr.p->noLogFiles - (cmaxLogFilesInPageZero - 1); - } else { - jam(); - logPartPtr.p->srRemainingFiles = - logPartPtr.p->noLogFiles - cmaxLogFilesInPageZero; - }//if - LogFileRecordPtr locLogFilePtr; - findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE; - openFileRw(signal, locLogFilePtr); - return; - }//if - /* ------------------------------------------------------------------------ - * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES. - * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED. - * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE. - * ------------------------------------------------------------------------ */ - return; -}//Dblqh::readSrLastMbyteLab() - -void Dblqh::openSrNextFileLab(Signal* signal) -{ - readSinglePage(signal, 0); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_NEXT_FILE; - return; -}//Dblqh::openSrNextFileLab() - -void Dblqh::readSrNextFileLab(Signal* signal) -{ - if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) { - jam(); - initGciInLogFileRec(signal, cmaxLogFilesInPageZero); - } else { - jam(); - initGciInLogFileRec(signal, logPartPtr.p->srRemainingFiles); - }//if - releaseLogpage(signal); - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_SR; - closeFile(signal, logFilePtr, __LINE__); - if (logPartPtr.p->srRemainingFiles > cmaxLogFilesInPageZero) { - Uint32 fileNo; - if (logFilePtr.p->fileNo >= cmaxLogFilesInPageZero) { - jam(); - fileNo = logFilePtr.p->fileNo - cmaxLogFilesInPageZero; - } else { - jam(); - fileNo = - (logPartPtr.p->noLogFiles + logFilePtr.p->fileNo) - - cmaxLogFilesInPageZero; - }//if - if (fileNo == 0) { - jam(); - /* -------------------------------------------------------------------- - * AVOID USING FILE 0 AGAIN SINCE THAT IS PROBABLY CLOSING AT THE MOMENT. - * -------------------------------------------------------------------- */ - fileNo = 1; - logPartPtr.p->srRemainingFiles = - logPartPtr.p->srRemainingFiles - (cmaxLogFilesInPageZero - 1); - } else { - jam(); - logPartPtr.p->srRemainingFiles = - logPartPtr.p->srRemainingFiles - cmaxLogFilesInPageZero; - }//if - LogFileRecordPtr locLogFilePtr; - findLogfile(signal, fileNo, logPartPtr, &locLogFilePtr); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_NEXT_FILE; - openFileRw(signal, locLogFilePtr); - }//if - /* ------------------------------------------------------------------------ - * THERE WERE NO NEED TO READ ANY MORE PAGE ZERO IN OTHER FILES. - * WE NOW HAVE ALL THE NEEDED INFORMATION ABOUT THE GCI'S THAT WE NEED. - * NOW JUST WAIT FOR CLOSE OPERATIONS TO COMPLETE. - * ------------------------------------------------------------------------ */ - return; -}//Dblqh::readSrNextFileLab() - -void Dblqh::closingSrLab(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - logPartPtr.i = logFilePtr.p->logPartRec; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logFilePtr.i = logPartPtr.p->firstLogfile; - do { - jam(); - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - if (logFilePtr.p->logFileStatus != LogFileRecord::CLOSED) { - jam(); - /* -------------------------------------------------------------------- - * EXIT AND WAIT FOR REMAINING LOG FILES TO COMPLETE THEIR WORK. - * -------------------------------------------------------------------- */ - return; - }//if - logFilePtr.i = logFilePtr.p->nextLogFile; - } while (logFilePtr.i != logPartPtr.p->firstLogfile); - /* ------------------------------------------------------------------------ - * ALL FILES IN THIS PART HAVE BEEN CLOSED. THIS INDICATES THAT THE FIRST - * PHASE OF THE SYSTEM RESTART HAVE BEEN CONCLUDED FOR THIS LOG PART. - * CHECK IF ALL OTHER LOG PARTS ARE ALSO COMPLETED. - * ------------------------------------------------------------------------ */ - logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE_COMPLETED; - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - if (logPartPtr.p->logPartState != LogPartRecord::SR_FIRST_PHASE_COMPLETED) { - jam(); - /* -------------------------------------------------------------------- - * EXIT AND WAIT FOR THE REST OF THE LOG PARTS TO COMPLETE. - * -------------------------------------------------------------------- */ - return; - }//if - }//for - /* ------------------------------------------------------------------------ - * THE FIRST PHASE HAVE BEEN COMPLETED. - * ------------------------------------------------------------------------ */ - signal->theData[0] = ZSR_PHASE3_START; - signal->theData[1] = ZSR_PHASE1_COMPLETED; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::closingSrLab() - -/* ########################################################################## - * ####### SYSTEM RESTART PHASE TWO MODULE ####### - * - * THIS MODULE HANDLES THE SYSTEM RESTART WHERE LQH CONTROLS TUP AND ACC TO - * ENSURE THAT THEY HAVE KNOWLEDGE OF ALL FRAGMENTS AND HAVE DONE THE NEEDED - * READING OF DATA FROM FILE AND EXECUTION OF LOCAL LOGS. THIS PROCESS - * EXECUTES CONCURRENTLY WITH PHASE ONE OF THE SYSTEM RESTART. THIS PHASE - * FINDS THE INFORMATION ABOUT THE FRAGMENT LOG NEEDED TO EXECUTE THE FRAGMENT - * LOG. - * WHEN TUP AND ACC HAVE PREPARED ALL FRAGMENTS THEN LQH ORDERS THOSE LQH'S - * THAT ARE RESPONSIBLE TO EXECUTE THE FRAGMENT LOGS TO DO SO. IT IS POSSIBLE - * THAT ANOTHER NODE EXECUTES THE LOG FOR A FRAGMENT RESIDING AT THIS NODE. - * ########################################################################## */ -/* ***************>> */ -/* START_FRAGREQ > */ -/* ***************>> */ -void Dblqh::execSTART_FRAGREQ(Signal* signal) -{ - const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0]; - jamEntry(); - - tabptr.i = startFragReq->tableId; - Uint32 fragId = startFragReq->fragId; - - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - if (!getFragmentrec(signal, fragId)) { - startFragRefLab(signal); - return; - }//if - tabptr.p->tableStatus = Tablerec::TABLE_DEFINED; - - Uint32 lcpNo = startFragReq->lcpNo; - Uint32 noOfLogNodes = startFragReq->noOfLogNodes; - Uint32 lcpId = startFragReq->lcpId; - - ndbrequire(noOfLogNodes <= 4); - fragptr.p->fragStatus = Fragrecord::CRASH_RECOVERING; - fragptr.p->srBlockref = startFragReq->userRef; - fragptr.p->srUserptr = startFragReq->userPtr; - fragptr.p->srChkpnr = lcpNo; - if (lcpNo == (MAX_LCP_STORED - 1)) { - jam(); - fragptr.p->lcpId[lcpNo] = lcpId; - } else if (lcpNo < (MAX_LCP_STORED - 1)) { - jam(); - fragptr.p->lcpId[lcpNo] = lcpId; - } else { - ndbrequire(lcpNo == ZNIL); - jam(); - }//if - fragptr.p->srNoLognodes = noOfLogNodes; - fragptr.p->logFlag = Fragrecord::STATE_FALSE; - fragptr.p->srStatus = Fragrecord::SS_IDLE; - - if (noOfLogNodes > 0) { - jam(); - for (Uint32 i = 0; i < noOfLogNodes; i++) { - jam(); - fragptr.p->srStartGci[i] = startFragReq->startGci[i]; - fragptr.p->srLastGci[i] = startFragReq->lastGci[i]; - fragptr.p->srLqhLognode[i] = startFragReq->lqhLogNode[i]; - }//for - fragptr.p->newestGci = startFragReq->lastGci[noOfLogNodes - 1]; - } else { - fragptr.p->newestGci = cnewestGci; - }//if - - if (lcpNo == ZNIL) - { - jam(); - /** - * THERE WAS NO LOCAL CHECKPOINT AVAILABLE FOR THIS FRAGMENT. WE DO - * NOT NEED TO READ IN THE LOCAL FRAGMENT. - */ - /** - * Or this is not "first" fragment in table - * RESTORE_LCP_REQ will currently restore all fragments - */ - c_lcp_complete_fragments.add(fragptr); - - signal->theData[0] = tabptr.i; - signal->theData[1] = fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - c_tup->disk_restart_lcp_id(tabptr.i, fragId, RNIL); - jamEntry(); - return; - } - else - { - jam(); - c_tup->disk_restart_lcp_id(tabptr.i, fragId, lcpId); - jamEntry(); - } - - c_lcpId = (c_lcpId == 0 ? lcpId : c_lcpId); - c_lcpId = (c_lcpId < lcpId ? c_lcpId : lcpId); - c_lcp_waiting_fragments.add(fragptr); - if(c_lcp_restoring_fragments.isEmpty()) - send_restore_lcp(signal); -}//Dblqh::execSTART_FRAGREQ() - -void -Dblqh::send_restore_lcp(Signal * signal) -{ - c_lcp_waiting_fragments.first(fragptr); - c_lcp_waiting_fragments.remove(fragptr); - c_lcp_restoring_fragments.add(fragptr); - - RestoreLcpReq* req= (RestoreLcpReq*)signal->getDataPtrSend(); - req->senderData = fragptr.i; - req->senderRef = reference(); - req->tableId = fragptr.p->tabRef; - req->fragmentId = fragptr.p->fragId; - req->lcpNo = fragptr.p->srChkpnr; - req->lcpId = fragptr.p->lcpId[fragptr.p->srChkpnr]; - - sendSignal(RESTORE_REF, GSN_RESTORE_LCP_REQ, signal, - RestoreLcpReq::SignalLength, JBB); -} - -void Dblqh::startFragRefLab(Signal* signal) -{ - const StartFragReq * const startFragReq = (StartFragReq *)&signal->theData[0]; - BlockReference userRef = startFragReq->userRef; - Uint32 userPtr = startFragReq->userPtr; - signal->theData[0] = userPtr; - signal->theData[1] = terrorCode; - signal->theData[2] = cownNodeid; - sendSignal(userRef, GSN_START_FRAGREF, signal, 3, JBB); - return; -}//Dblqh::startFragRefLab() - -void Dblqh::execRESTORE_LCP_REF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); - return; -} - -void Dblqh::execRESTORE_LCP_CONF(Signal* signal) -{ - jamEntry(); - RestoreLcpConf* conf= (RestoreLcpConf*)signal->getDataPtr(); - fragptr.i = conf->senderData; - c_fragment_pool.getPtr(fragptr); - - c_lcp_restoring_fragments.remove(fragptr); - c_lcp_complete_fragments.add(fragptr); - - /** - * Disable expand check in ACC - * before running REDO - */ - tabptr.i = fragptr.p->tabRef; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - - signal->theData[0] = fragptr.p->tabRef; - signal->theData[1] = fragptr.p->fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - - if (!c_lcp_waiting_fragments.isEmpty()) - { - send_restore_lcp(signal); - return; - } - - if (c_lcp_restoring_fragments.isEmpty() && cstartRecReq == 1) - { - jam(); - /* ---------------------------------------------------------------- - * WE HAVE ALSO RECEIVED AN INDICATION THAT NO MORE FRAGMENTS - * NEEDS RESTART. - * NOW IT IS TIME TO START EXECUTING THE UNDO LOG. - * ---------------------------------------------------------------- - * WE ARE NOW IN A POSITION TO ORDER TUP AND ACC TO START - * EXECUTING THEIR UNDO LOGS. THIS MUST BE DONE BEFORE THE - * FRAGMENT LOGS CAN BE EXECUTED. - * ---------------------------------------------------------------- */ - csrExecUndoLogState = EULS_STARTED; - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - lcpPtr.p->m_outstanding = 1; - - signal->theData[0] = c_lcpId; - sendSignal(LGMAN_REF, GSN_START_RECREQ, signal, 1, JBB); - return; - } -} - -/* ***************> */ -/* START_RECREQ > */ -/* ***************> */ -void Dblqh::execSTART_RECREQ(Signal* signal) -{ - CRASH_INSERTION(5027); - - jamEntry(); - StartRecReq * const req = (StartRecReq*)&signal->theData[0]; - cmasterDihBlockref = req->senderRef; - - crestartOldestGci = req->keepGci; - crestartNewestGci = req->lastCompletedGci; - cnewestGci = req->newestGci; - - ndbrequire(req->receivingNodeId == cownNodeid); - - cnewestCompletedGci = cnewestGci; - cstartRecReq = 1; - for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) { - ptrAss(logPartPtr, logPartRecord); - logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci; - }//for - /* ------------------------------------------------------------------------ - * WE HAVE TO SET THE OLDEST AND THE NEWEST GLOBAL CHECKPOINT IDENTITY - * THAT WILL SURVIVE THIS SYSTEM RESTART. THIS IS NEEDED SO THAT WE CAN - * SET THE LOG HEAD AND LOG TAIL PROPERLY BEFORE STARTING THE SYSTEM AGAIN. - * WE ALSO NEED TO SET CNEWEST_GCI TO ENSURE THAT LOG RECORDS ARE EXECUTED - * WITH A PROPER GCI. - *------------------------------------------------------------------------ */ - - if (c_lcp_restoring_fragments.isEmpty()) - { - jam(); - csrExecUndoLogState = EULS_STARTED; - - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - lcpPtr.p->m_outstanding = 1; - - signal->theData[0] = c_lcpId; - sendSignal(LGMAN_REF, GSN_START_RECREQ, signal, 1, JBB); - }//if -}//Dblqh::execSTART_RECREQ() - -/* ***************>> */ -/* START_RECCONF > */ -/* ***************>> */ -void Dblqh::execSTART_RECCONF(Signal* signal) -{ - jamEntry(); - lcpPtr.i = 0; - ptrAss(lcpPtr, lcpRecord); - ndbrequire(csrExecUndoLogState == EULS_STARTED); - ndbrequire(lcpPtr.p->m_outstanding); - - Uint32 sender= signal->theData[0]; - - lcpPtr.p->m_outstanding--; - if(lcpPtr.p->m_outstanding) - { - jam(); - return; - } - - switch(refToBlock(sender)){ - case TSMAN: - jam(); - break; - case LGMAN: - jam(); - lcpPtr.p->m_outstanding++; - signal->theData[0] = c_lcpId; - sendSignal(TSMAN_REF, GSN_START_RECREQ, signal, 1, JBB); - return; - break; - default: - ndbrequire(false); - } - - jam(); - csrExecUndoLogState = EULS_COMPLETED; - - if(cstartType == NodeState::ST_INITIAL_NODE_RESTART) - { - jam(); - cstartRecReq = 2; - - StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); - conf->startingNodeId = getOwnNodeId(); - sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, - StartRecConf::SignalLength, JBB); - return; - } - - startExecSr(signal); -} - -/* ***************> */ -/* START_RECREF > */ -/* ***************> */ -void Dblqh::execSTART_RECREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -}//Dblqh::execSTART_RECREF() - -/* ***************>> */ -/* START_EXEC_SR > */ -/* ***************>> */ -void Dblqh::execSTART_EXEC_SR(Signal* signal) -{ - jamEntry(); - fragptr.i = signal->theData[0]; - Uint32 next = RNIL; - - if (fragptr.i == RNIL) - { - jam(); - /* ---------------------------------------------------------------------- - * NO MORE FRAGMENTS TO START EXECUTING THE LOG ON. - * SEND EXEC_SRREQ TO ALL LQH TO INDICATE THAT THIS NODE WILL - * NOT REQUEST ANY MORE FRAGMENTS TO EXECUTE THE FRAGMENT LOG ON. - * ---------------------------------------------------------------------- - * WE NEED TO SEND THOSE SIGNALS EVEN IF WE HAVE NOT REQUESTED - * ANY FRAGMENTS PARTICIPATE IN THIS PHASE. - * --------------------------------------------------------------------- */ - NodeReceiverGroup rg(DBLQH, m_sr_nodes); - signal->theData[0] = cownNodeid; - sendSignal(rg, GSN_EXEC_SRREQ, signal, 1, JBB); - return; - } else { - jam(); - c_lcp_complete_fragments.getPtr(fragptr); - next = fragptr.p->nextList; - - if (fragptr.p->srNoLognodes > csrPhasesCompleted) - { - jam(); - cnoOutstandingExecFragReq++; - - Uint32 index = csrPhasesCompleted; - arrGuard(index, MAX_LOG_EXEC); - BlockReference ref = calcLqhBlockRef(fragptr.p->srLqhLognode[index]); - fragptr.p->srStatus = Fragrecord::SS_STARTED; - - /* -------------------------------------------------------------------- - * SINCE WE CAN HAVE SEVERAL LQH NODES PER FRAGMENT WE CALCULATE - * THE LQH POINTER IN SUCH A WAY THAT WE CAN DEDUCE WHICH OF THE - * LQH NODES THAT HAS RESPONDED WHEN EXEC_FRAGCONF IS RECEIVED. - * ------------------------------------------------------------------- */ - ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0]; - execFragReq->userPtr = fragptr.i; - execFragReq->userRef = cownref; - execFragReq->tableId = fragptr.p->tabRef; - execFragReq->fragId = fragptr.p->fragId; - execFragReq->startGci = fragptr.p->srStartGci[index]; - execFragReq->lastGci = fragptr.p->srLastGci[index]; - sendSignal(ref, GSN_EXEC_FRAGREQ, signal, - ExecFragReq::SignalLength, JBB); - - } - signal->theData[0] = next; - sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB); - }//if - return; -}//Dblqh::execSTART_EXEC_SR() - -/* ***************> */ -/* EXEC_FRAGREQ > */ -/* ***************> */ -/* -------------------------------------------------------------------------- - * THIS SIGNAL IS USED TO REQUEST THAT A FRAGMENT PARTICIPATES IN EXECUTING - * THE LOG IN THIS NODE. - * ------------------------------------------------------------------------- */ -void Dblqh::execEXEC_FRAGREQ(Signal* signal) -{ - ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0]; - jamEntry(); - tabptr.i = execFragReq->tableId; - Uint32 fragId = execFragReq->fragId; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - ndbrequire(getFragmentrec(signal, fragId)); - - ndbrequire(fragptr.p->execSrNoReplicas < 4); - fragptr.p->execSrBlockref[fragptr.p->execSrNoReplicas] = execFragReq->userRef; - fragptr.p->execSrUserptr[fragptr.p->execSrNoReplicas] = execFragReq->userPtr; - fragptr.p->execSrStartGci[fragptr.p->execSrNoReplicas] = execFragReq->startGci; - fragptr.p->execSrLastGci[fragptr.p->execSrNoReplicas] = execFragReq->lastGci; - fragptr.p->execSrStatus = Fragrecord::ACTIVE; - fragptr.p->execSrNoReplicas++; - cnoFragmentsExecSr++; - return; -}//Dblqh::execEXEC_FRAGREQ() - -void Dblqh::sendExecFragRefLab(Signal* signal) -{ - ExecFragReq * const execFragReq = (ExecFragReq *)&signal->theData[0]; - BlockReference retRef = execFragReq->userRef; - Uint32 retPtr = execFragReq->userPtr; - - signal->theData[0] = retPtr; - signal->theData[1] = terrorCode; - sendSignal(retRef, GSN_EXEC_FRAGREF, signal, 2, JBB); - return; -}//Dblqh::sendExecFragRefLab() - -/* ***************>> */ -/* EXEC_FRAGCONF > */ -/* ***************>> */ -void Dblqh::execEXEC_FRAGCONF(Signal* signal) -{ - jamEntry(); - fragptr.i = signal->theData[0]; - c_fragment_pool.getPtr(fragptr); - fragptr.p->srStatus = Fragrecord::SS_COMPLETED; - - ndbrequire(cnoOutstandingExecFragReq); - cnoOutstandingExecFragReq--; - if (fragptr.p->srNoLognodes == csrPhasesCompleted + 1) - { - jam(); - - fragptr.p->logFlag = Fragrecord::STATE_TRUE; - fragptr.p->fragStatus = Fragrecord::FSACTIVE; - - signal->theData[0] = fragptr.p->srUserptr; - signal->theData[1] = cownNodeid; - sendSignal(fragptr.p->srBlockref, GSN_START_FRAGCONF, signal, 2, JBB); - } - - return; -}//Dblqh::execEXEC_FRAGCONF() - -/* ***************> */ -/* EXEC_FRAGREF > */ -/* ***************> */ -void Dblqh::execEXEC_FRAGREF(Signal* signal) -{ - jamEntry(); - terrorCode = signal->theData[1]; - systemErrorLab(signal, __LINE__); - return; -}//Dblqh::execEXEC_FRAGREF() - -/* *************** */ -/* EXEC_SRCONF > */ -/* *************** */ -void Dblqh::execEXEC_SRCONF(Signal* signal) -{ - jamEntry(); - Uint32 nodeId = signal->theData[0]; - arrGuard(nodeId, MAX_NDB_NODES); - m_sr_exec_sr_conf.set(nodeId); - - if (!m_sr_nodes.equal(m_sr_exec_sr_conf)) - { - jam(); - /* ------------------------------------------------------------------ - * ALL NODES HAVE NOT REPORTED COMPLETION OF EXECUTING FRAGMENT - * LOGS YET. - * ----------------------------------------------------------------- */ - return; - } - - /* ------------------------------------------------------------------------ - * CLEAR NODE SYSTEM RESTART EXECUTION STATE TO PREPARE FOR NEXT PHASE OF - * LOG EXECUTION. - * ----------------------------------------------------------------------- */ - m_sr_exec_sr_conf.clear(); - - /* ------------------------------------------------------------------------ - * NOW CHECK IF ALL FRAGMENTS IN THIS PHASE HAVE COMPLETED. IF SO START THE - * NEXT PHASE. - * ----------------------------------------------------------------------- */ - ndbrequire(cnoOutstandingExecFragReq == 0); - - execSrCompletedLab(signal); - return; -}//Dblqh::execEXEC_SRCONF() - -void Dblqh::execSrCompletedLab(Signal* signal) -{ - csrPhasesCompleted++; - /* ------------------------------------------------------------------------ - * ALL FRAGMENTS WERE COMPLETED. THIS PHASE IS COMPLETED. IT IS NOW TIME TO - * START THE NEXT PHASE. - * ----------------------------------------------------------------------- */ - if (csrPhasesCompleted >= 4) { - jam(); - /* ---------------------------------------------------------------------- - * THIS WAS THE LAST PHASE. WE HAVE NOW COMPLETED THE EXECUTION THE - * FRAGMENT LOGS IN ALL NODES. BEFORE WE SEND START_RECCONF TO THE - * MASTER DIH TO INDICATE A COMPLETED SYSTEM RESTART IT IS NECESSARY - * TO FIND THE HEAD AND THE TAIL OF THE LOG WHEN NEW OPERATIONS START - * TO COME AGAIN. - * - * THE FIRST STEP IS TO FIND THE HEAD AND TAIL MBYTE OF EACH LOG PART. - * TO DO THIS WE REUSE THE CONTINUEB SIGNAL SR_LOG_LIMITS. THEN WE - * HAVE TO FIND THE ACTUAL PAGE NUMBER AND PAGE INDEX WHERE TO - * CONTINUE WRITING THE LOG AFTER THE SYSTEM RESTART. - * --------------------------------------------------------------------- */ - for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED; - logPartPtr.p->logLastGci = crestartNewestGci; - logPartPtr.p->logStartGci = crestartOldestGci; - logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP; - if (logPartPtr.p->headFileNo == ZNIL) { - jam(); - /* ----------------------------------------------------------------- - * IF WE HAVEN'T FOUND ANY HEAD OF THE LOG THEN WE ARE IN SERIOUS - * PROBLEM. THIS SHOULD NOT OCCUR. IF IT OCCURS ANYWAY THEN WE - * HAVE TO FIND A CURE FOR THIS PROBLEM. - * ----------------------------------------------------------------- */ - systemErrorLab(signal, __LINE__); - return; - }//if - signal->theData[0] = ZSR_LOG_LIMITS; - signal->theData[1] = logPartPtr.i; - signal->theData[2] = logPartPtr.p->lastLogfile; - signal->theData[3] = logPartPtr.p->lastMbyte; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - }//for - return; - } else { - jam(); - /* ---------------------------------------------------------------------- - * THERE ARE YET MORE PHASES TO RESTART. - * WE MUST INITIALISE DATA FOR NEXT PHASE AND SEND START SIGNAL. - * --------------------------------------------------------------------- */ - csrPhaseStarted = ZSR_PHASE1_COMPLETED; // Set correct state first... - startExecSr(signal); - }//if - return; -}//Dblqh::execSrCompletedLab() - -/* ************>> */ -/* EXEC_SRREQ > */ -/* ************>> */ -void Dblqh::execEXEC_SRREQ(Signal* signal) -{ - jamEntry(); - Uint32 nodeId = signal->theData[0]; - ndbrequire(nodeId < MAX_NDB_NODES); - m_sr_exec_sr_req.set(nodeId); - if (!m_sr_exec_sr_req.equal(m_sr_nodes)) - { - jam(); - return; - } - - /* ------------------------------------------------------------------------ - * CLEAR NODE SYSTEM RESTART STATE TO PREPARE FOR NEXT PHASE OF LOG - * EXECUTION - * ----------------------------------------------------------------------- */ - m_sr_exec_sr_req.clear(); - - if (csrPhasesCompleted != 0) { - /* ---------------------------------------------------------------------- - * THE FIRST PHASE MUST ALWAYS EXECUTE THE LOG. - * --------------------------------------------------------------------- */ - if (cnoFragmentsExecSr == 0) { - jam(); - /* -------------------------------------------------------------------- - * THERE WERE NO FRAGMENTS THAT NEEDED TO EXECUTE THE LOG IN THIS PHASE. - * ------------------------------------------------------------------- */ - srPhase3Comp(signal); - return; - }//if - }//if - /* ------------------------------------------------------------------------ - * NOW ALL NODES HAVE SENT ALL EXEC_FRAGREQ. NOW WE CAN START EXECUTING THE - * LOG FROM THE MINIMUM GCI NEEDED UNTIL THE MAXIMUM GCI NEEDED. - * - * WE MUST FIRST CHECK IF THE FIRST PHASE OF THE SYSTEM RESTART HAS BEEN - * COMPLETED. THIS HANDLING IS PERFORMED IN THE FILE SYSTEM MODULE - * ----------------------------------------------------------------------- */ - signal->theData[0] = ZSR_PHASE3_START; - signal->theData[1] = ZSR_PHASE2_COMPLETED; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::execEXEC_SRREQ() - -/* ######################################################################### */ -/* SYSTEM RESTART PHASE THREE MODULE */ -/* THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. */ -/* */ -/* THIS MODULE IS CONCERNED WITH EXECUTING THE FRAGMENT LOG. IT DOES ALSO */ -/* CONTAIN SIGNAL RECEPTIONS LQHKEYCONF AND LQHKEYREF SINCE LQHKEYREQ IS USED*/ -/* TO EXECUTE THE LOG RECORDS. */ -/* */ -/* BEFORE IT STARTS IT HAS BEEN DECIDED WHERE TO START AND WHERE TO STOP */ -/* READING THE FRAGMENT LOG BY USING THE INFORMATION ABOUT GCI DISCOVERED IN */ -/* PHASE ONE OF THE SYSTEM RESTART. */ -/* ######################################################################### */ -/*---------------------------------------------------------------------------*/ -/* PHASE THREE OF THE SYSTEM RESTART CAN NOW START. ONE OF THE PHASES HAVE */ -/* COMPLETED. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::srPhase3Start(Signal* signal) -{ - UintR tsrPhaseStarted; - - jamEntry(); - - tsrPhaseStarted = signal->theData[1]; - if (csrPhaseStarted == ZSR_NO_PHASE_STARTED) { - jam(); - csrPhaseStarted = tsrPhaseStarted; - return; - }//if - ndbrequire(csrPhaseStarted != tsrPhaseStarted); - ndbrequire(csrPhaseStarted != ZSR_BOTH_PHASES_STARTED); - - csrPhaseStarted = ZSR_BOTH_PHASES_STARTED; - for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_STARTED; - logPartPtr.p->logStartGci = (UintR)-1; - if (csrPhasesCompleted == 0) { - jam(); - /* -------------------------------------------------------------------- - * THE FIRST PHASE WE MUST ENSURE THAT IT REACHES THE END OF THE LOG. - * ------------------------------------------------------------------- */ - logPartPtr.p->logLastGci = crestartNewestGci; - } else { - jam(); - logPartPtr.p->logLastGci = 2; - }//if - }//for - - jam(); - c_lcp_complete_fragments.first(fragptr); - signal->theData[0] = ZSR_GCI_LIMITS; - signal->theData[1] = fragptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::srPhase3Start() - -/* -------------------------------------------------------------------------- - * WE NOW WE NEED TO FIND THE LIMITS WITHIN WHICH TO EXECUTE - * THE FRAGMENT LOG - * ------------------------------------------------------------------------- */ -void Dblqh::srGciLimits(Signal* signal) -{ - jamEntry(); - fragptr.i = signal->theData[0]; - Uint32 loopCount = 0; - logPartPtr.i = 0; - ptrAss(logPartPtr, logPartRecord); - while (fragptr.i != RNIL){ - jam(); - c_lcp_complete_fragments.getPtr(fragptr); - ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4); - for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) { - jam(); - if (fragptr.p->execSrStartGci[i] < logPartPtr.p->logStartGci) { - jam(); - logPartPtr.p->logStartGci = fragptr.p->execSrStartGci[i]; - }//if - if (fragptr.p->execSrLastGci[i] > logPartPtr.p->logLastGci) { - jam(); - logPartPtr.p->logLastGci = fragptr.p->execSrLastGci[i]; - } - } - - loopCount++; - if (loopCount > 20) { - jam(); - signal->theData[0] = ZSR_GCI_LIMITS; - signal->theData[1] = fragptr.p->nextList; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; - } else { - jam(); - fragptr.i = fragptr.p->nextList; - }//if - } - - for(Uint32 i = 1; i<4; i++) - { - LogPartRecordPtr tmp; - tmp.i = i; - ptrAss(tmp, logPartRecord); - tmp.p->logStartGci = logPartPtr.p->logStartGci; - tmp.p->logLastGci = logPartPtr.p->logLastGci; - } - - if (logPartPtr.p->logStartGci == (UintR)-1) { - jam(); - /* -------------------------------------------------------------------- - * THERE WERE NO FRAGMENTS TO INSTALL WE WILL EXECUTE THE LOG AS - * SHORT AS POSSIBLE TO REACH THE END OF THE LOG. THIS WE DO BY - * STARTING AT THE STOP GCI. - * ------------------------------------------------------------------- */ - logPartPtr.p->logStartGci = logPartPtr.p->logLastGci; - }//if - - for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP; - signal->theData[0] = ZSR_LOG_LIMITS; - signal->theData[1] = logPartPtr.i; - signal->theData[2] = logPartPtr.p->lastLogfile; - signal->theData[3] = logPartPtr.p->lastMbyte; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - }//for -}//Dblqh::srGciLimits() - -/* -------------------------------------------------------------------------- - * IT IS NOW TIME TO FIND WHERE TO START EXECUTING THE LOG. - * THIS SIGNAL IS SENT FOR EACH LOG PART AND STARTS THE EXECUTION - * OF THE LOG FOR THIS PART. - *-------------------------------------------------------------------------- */ -void Dblqh::srLogLimits(Signal* signal) -{ - Uint32 tlastPrepRef; - Uint32 tmbyte; - - jamEntry(); - logPartPtr.i = signal->theData[0]; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logFilePtr.i = signal->theData[1]; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - tmbyte = signal->theData[2]; - Uint32 loopCount = 0; - /* ------------------------------------------------------------------------ - * WE ARE SEARCHING FOR THE START AND STOP MBYTE OF THE LOG THAT IS TO BE - * EXECUTED. - * ----------------------------------------------------------------------- */ - while(true) { - ndbrequire(tmbyte < clogFileSize); - if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) { - if (logFilePtr.p->logMaxGciCompleted[tmbyte] <= logPartPtr.p->logLastGci) { - jam(); - /* -------------------------------------------------------------------- - * WE ARE STEPPING BACKWARDS FROM MBYTE TO MBYTE. THIS IS THE FIRST - * MBYTE WHICH IS TO BE INCLUDED IN THE LOG EXECUTION. THE STOP GCI - * HAS NOT BEEN COMPLETED BEFORE THIS MBYTE. THUS THIS MBYTE HAVE - * TO BE EXECUTED. - * ------------------------------------------------------------------- */ - logPartPtr.p->stopLogfile = logFilePtr.i; - logPartPtr.p->stopMbyte = tmbyte; - logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_START; - }//if - }//if - /* ------------------------------------------------------------------------ - * WHEN WE HAVEN'T FOUND THE STOP MBYTE IT IS NOT NECESSARY TO LOOK FOR THE - * START MBYTE. THE REASON IS THE FOLLOWING LOGIC CHAIN: - * MAX_GCI_STARTED >= MAX_GCI_COMPLETED >= LAST_GCI >= START_GCI - * THUS MAX_GCI_STARTED >= START_GCI. THUS MAX_GCI_STARTED < START_GCI CAN - * NOT BE TRUE AS WE WILL CHECK OTHERWISE. - * ----------------------------------------------------------------------- */ - if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_START) { - if (logFilePtr.p->logMaxGciStarted[tmbyte] < logPartPtr.p->logStartGci) { - jam(); - /* -------------------------------------------------------------------- - * WE HAVE NOW FOUND THE START OF THE EXECUTION OF THE LOG. - * WE STILL HAVE TO MOVE IT BACKWARDS TO ALSO INCLUDE THE - * PREPARE RECORDS WHICH WERE STARTED IN A PREVIOUS MBYTE. - * ------------------------------------------------------------------- */ - tlastPrepRef = logFilePtr.p->logLastPrepRef[tmbyte]; - logPartPtr.p->startMbyte = tlastPrepRef & 65535; - LogFileRecordPtr locLogFilePtr; - findLogfile(signal, tlastPrepRef >> 16, logPartPtr, &locLogFilePtr); - logPartPtr.p->startLogfile = locLogFilePtr.i; - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG; - }//if - }//if - if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) { - if (tmbyte == 0) { - jam(); - tmbyte = clogFileSize - 1; - logFilePtr.i = logFilePtr.p->prevLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - } else { - jam(); - tmbyte--; - }//if - if (logPartPtr.p->lastLogfile == logFilePtr.i) { - ndbrequire(logPartPtr.p->lastMbyte != tmbyte); - }//if - if (loopCount > 20) { - jam(); - signal->theData[0] = ZSR_LOG_LIMITS; - signal->theData[1] = logPartPtr.i; - signal->theData[2] = logFilePtr.i; - signal->theData[3] = tmbyte; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - return; - }//if - loopCount++; - } else { - jam(); - break; - }//if - }//while - - if (DEBUG_REDO) - { - LogFileRecordPtr tmp; - tmp.i = logPartPtr.p->stopLogfile; - ptrCheckGuard(tmp, clogFileFileSize, logFileRecord); - ndbout_c("srLogLimits part: %u start file: %u mb: %u stop file: %u mb: %u", - logPartPtr.i, - tlastPrepRef >> 16, - tlastPrepRef & 65535, - tmp.p->fileNo, - logPartPtr.p->stopMbyte); - } - - /* ------------------------------------------------------------------------ - * WE HAVE NOW FOUND BOTH THE START AND THE STOP OF THE LOG. NOW START - * EXECUTING THE LOG. THE FIRST ACTION IS TO OPEN THE LOG FILE WHERE TO - * START EXECUTING THE LOG. - * ----------------------------------------------------------------------- */ - if (logPartPtr.p->logPartState == LogPartRecord::SR_THIRD_PHASE_STARTED) { - jam(); - logFilePtr.i = logPartPtr.p->startLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_START; - openFileRw(signal, logFilePtr); - } else { - jam(); - ndbrequire(logPartPtr.p->logPartState == LogPartRecord::SR_FOURTH_PHASE_STARTED); - /* -------------------------------------------------------------------- - * WE HAVE NOW FOUND THE TAIL MBYTE IN THE TAIL FILE. - * SET THOSE PARAMETERS IN THE LOG PART. - * WE HAVE ALSO FOUND THE HEAD MBYTE. WE STILL HAVE TO SEARCH - * FOR THE PAGE NUMBER AND PAGE INDEX WHERE TO SET THE HEAD. - * ------------------------------------------------------------------- */ - logFilePtr.i = logPartPtr.p->startLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPartPtr.p->logTailFileNo = logFilePtr.p->fileNo; - logPartPtr.p->logTailMbyte = logPartPtr.p->startMbyte; - /* -------------------------------------------------------------------- - * THE HEAD WE ACTUALLY FOUND DURING EXECUTION OF LOG SO WE USE - * THIS INFO HERE RATHER THAN THE MBYTE WE FOUND TO BE THE HEADER. - * ------------------------------------------------------------------- */ - LogFileRecordPtr locLogFilePtr; - findLogfile(signal, logPartPtr.p->headFileNo, logPartPtr, &locLogFilePtr); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE; - openFileRw(signal, locLogFilePtr); - }//if - return; -}//Dblqh::srLogLimits() - -void Dblqh::openExecSrStartLab(Signal* signal) -{ - logPartPtr.p->currentLogfile = logFilePtr.i; - logFilePtr.p->currentMbyte = logPartPtr.p->startMbyte; - /* ------------------------------------------------------------------------ - * WE NEED A TC CONNECT RECORD TO HANDLE EXECUTION OF LOG RECORDS. - * ------------------------------------------------------------------------ */ - seizeTcrec(); - logPartPtr.p->logTcConrec = tcConnectptr.i; - /* ------------------------------------------------------------------------ - * THE FIRST LOG RECORD TO EXECUTE IS ALWAYS AT A NEW MBYTE. - * SET THE NUMBER OF PAGES IN THE MAIN MEMORY BUFFER TO ZERO AS AN INITIAL - * VALUE. THIS VALUE WILL BE UPDATED AND ENSURED THAT IT RELEASES PAGES IN - * THE SUBROUTINE READ_EXEC_SR. - * ----------------------------------------------------------------------- */ - logPartPtr.p->mmBufferSize = 0; - readExecSrNewMbyte(signal); - return; -}//Dblqh::openExecSrStartLab() - -/* --------------------------------------------------------------------------- - * WE WILL ALWAYS ENSURE THAT WE HAVE AT LEAST 16 KBYTE OF LOG PAGES WHEN WE - * START READING A LOG RECORD. THE ONLY EXCEPTION IS WHEN WE COME CLOSE TO A - * MBYTE BOUNDARY. SINCE WE KNOW THAT LOG RECORDS ARE NEVER WRITTEN ACROSS A - * MBYTE BOUNDARY THIS IS NOT A PROBLEM. - * - * WE START BY READING 64 KBYTE BEFORE STARTING TO EXECUTE THE LOG RECORDS. - * WHEN WE COME BELOW 64 KBYTE WE READ ANOTHER SET OF LOG PAGES. WHEN WE - * GO BELOW 16 KBYTE WE WAIT UNTIL THE READ PAGES HAVE ENTERED THE BLOCK. - * ------------------------------------------------------------------------- */ -/* -------------------------------------------------------------------------- - * NEW PAGES FROM LOG FILE DURING EXECUTION OF LOG HAS ARRIVED. - * ------------------------------------------------------------------------- */ -void Dblqh::readExecSrLab(Signal* signal) -{ - buildLinkedLogPageList(signal); - /* ------------------------------------------------------------------------ - * WE NEED TO SET THE CURRENT PAGE INDEX OF THE FIRST PAGE SINCE IT CAN BE - * USED IMMEDIATELY WITHOUT ANY OTHER INITIALISATION. THE REST OF THE PAGES - * WILL BE INITIALISED BY READ_LOGWORD. - * ----------------------------------------------------------------------- */ - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE; - if (logPartPtr.p->logExecState == - LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE) { - jam(); - /* ---------------------------------------------------------------------- - * THIS IS THE FIRST READ DURING THE EXECUTION OF THIS MBYTE. SET THE - * NEW CURRENT LOG PAGE TO THE FIRST OF THESE PAGES. CHANGE - * LOG_EXEC_STATE TO ENSURE THAT WE START EXECUTION OF THE LOG. - * --------------------------------------------------------------------- */ - logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte * - ZPAGES_IN_MBYTE; - logPartPtr.p->prevFilepage = logFilePtr.p->currentFilepage; - logFilePtr.p->currentLogpage = lfoPtr.p->firstLfoPage; - logPartPtr.p->prevLogpage = logFilePtr.p->currentLogpage; - }//if - moveToPageRef(signal); - releaseLfo(signal); - /* ------------------------------------------------------------------------ - * NOW WE HAVE COMPLETED THE RECEPTION OF THESE PAGES. - * NOW CHECK IF WE NEED TO READ MORE PAGES. - * ----------------------------------------------------------------------- */ - checkReadExecSr(signal); - if (logPartPtr.p->logExecState == LogPartRecord::LES_EXEC_LOG) { - jam(); - signal->theData[0] = ZEXEC_SR; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; - }//if - return; -}//Dblqh::readExecSrLab() - -void Dblqh::openExecSrNewMbyteLab(Signal* signal) -{ - readExecSrNewMbyte(signal); - return; -}//Dblqh::openExecSrNewMbyteLab() - -void Dblqh::closeExecSrLab(Signal* signal) -{ - LogFileRecordPtr locLogFilePtr; - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - logPartPtr.i = logFilePtr.p->logPartRec; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - locLogFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_SR_NEW_MBYTE; - openFileRw(signal, locLogFilePtr); - return; -}//Dblqh::closeExecSrLab() - -void Dblqh::writeDirtyLab(Signal* signal) -{ - releaseLfo(signal); - signal->theData[0] = logPartPtr.i; - execSr(signal); - return; -}//Dblqh::writeDirtyLab() - -/* -------------------------------------------------------------------------- - * EXECUTE A LOG RECORD WITHIN THE CURRENT MBYTE. - * ------------------------------------------------------------------------- */ -void Dblqh::execSr(Signal* signal) -{ - LogFileRecordPtr nextLogFilePtr; - LogPageRecordPtr tmpLogPagePtr; - Uint32 logWord; - Uint32 line; - const char * crash_msg = 0; - - jamEntry(); - logPartPtr.i = signal->theData[0]; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - - do { - jam(); - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logPartPtr.p->prevLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - if (logPagePtr.p->logPageWord[ZPOS_DIRTY] == ZDIRTY) { - jam(); - switch (logPartPtr.p->logExecState) { - case LogPartRecord::LES_EXEC_LOG_COMPLETED: - case LogPartRecord::LES_EXEC_LOG_NEW_FILE: - case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE: - jam(); - /* ------------------------------------------------------------------ - * IN THIS WE HAVE COMPLETED EXECUTION OF THE CURRENT LOG PAGE - * AND CAN WRITE IT TO DISK SINCE IT IS DIRTY. - * ----------------------------------------------------------------- */ - writeDirty(signal, __LINE__); - return; - break; - case LogPartRecord::LES_EXEC_LOG: - jam(); - /* -------------------------------------------------------------------- - * IN THIS CASE WE ONLY WRITE THE PAGE TO DISK IF WE HAVE COMPLETED - * EXECUTION OF LOG RECORDS BELONGING TO THIS LOG PAGE. - * ------------------------------------------------------------------- */ - if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) { - jam(); - writeDirty(signal, __LINE__); - return; - }//if - break; - default: - ndbrequire(false); - break; - }//switch - }//if - if (logFilePtr.p->currentLogpage != logPartPtr.p->prevLogpage) { - jam(); - logPartPtr.p->prevLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - logPartPtr.p->prevFilepage++; - continue; - }//if - switch (logPartPtr.p->logExecState) { - case LogPartRecord::LES_EXEC_LOG_COMPLETED: - jam(); - releaseMmPages(signal); - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR_COMPLETED; - closeFile(signal, logFilePtr, __LINE__); - return; - break; - case LogPartRecord::LES_EXEC_LOG_NEW_MBYTE: - jam(); - logFilePtr.p->currentMbyte++; - readExecSrNewMbyte(signal); - return; - break; - case LogPartRecord::LES_EXEC_LOG_NEW_FILE: - jam(); - nextLogFilePtr.i = logFilePtr.p->nextLogFile; - logPartPtr.p->currentLogfile = nextLogFilePtr.i; - ptrCheckGuard(nextLogFilePtr, clogFileFileSize, logFileRecord); - nextLogFilePtr.p->currentMbyte = 0; - logFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_SR; - closeFile(signal, logFilePtr, __LINE__); - return; - break; - case LogPartRecord::LES_EXEC_LOG: - jam(); - /*empty*/; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPartPtr.p->savePageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - if (logPartPtr.p->execSrPagesRead < ZMIN_READ_BUFFER_SIZE) { - /* -------------------------------------------------------------------- - * THERE WERE LESS THAN 16 KBYTE OF LOG PAGES REMAINING. WE WAIT UNTIL - * THE NEXT 64 KBYTE ARRIVES UNTIL WE CONTINUE AGAIN. - * ------------------------------------------------------------------- */ - if ((logPartPtr.p->execSrPagesRead + - logPartPtr.p->execSrPagesExecuted) < ZPAGES_IN_MBYTE) { - jam(); - /* ------------------------------------------------------------------ - * WE ONLY STOP AND WAIT IF THERE MORE PAGES TO READ. IF IT IS NOT - * THEN IT IS THE END OF THE MBYTE AND WE WILL CONTINUE. IT IS NO - * RISK THAT A LOG RECORD WE FIND WILL NOT BE READ AT THIS TIME - * SINCE THE LOG RECORDS NEVER SPAN OVER A MBYTE BOUNDARY. - * ----------------------------------------------------------------- */ - readExecSr(signal); - logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR; - return; - }//if - }//if - logWord = readLogword(signal); - switch (logWord) { -/* ========================================================================= */ -/* ========================================================================= */ - case ZPREP_OP_TYPE: - { - logWord = readLogword(signal); - stepAhead(signal, logWord - 2); - break; - } -/* ========================================================================= */ -/* ========================================================================= */ - case ZINVALID_COMMIT_TYPE: - jam(); - stepAhead(signal, ZCOMMIT_LOG_SIZE - 1); - break; -/* ========================================================================= */ -/* ========================================================================= */ - case ZCOMMIT_TYPE: - { - CommitLogRecord commitLogRecord; - jam(); - tcConnectptr.i = logPartPtr.p->logTcConrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - readCommitLog(signal, &commitLogRecord); - if (tcConnectptr.p->gci > crestartNewestGci) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS LOG RECORD MUST BE IGNORED. IT IS PART OF A GLOBAL CHECKPOINT WHICH */ -/* WILL BE INVALIDATED BY THE SYSTEM RESTART. IF NOT INVALIDATED IT MIGHT BE */ -/* EXECUTED IN A FUTURE SYSTEM RESTART. */ -/*---------------------------------------------------------------------------*/ - tmpLogPagePtr.i = logPartPtr.p->prevLogpage; - ptrCheckGuard(tmpLogPagePtr, clogPageFileSize, logPageRecord); - arrGuard(logPartPtr.p->savePageIndex, ZPAGE_SIZE); - tmpLogPagePtr.p->logPageWord[logPartPtr.p->savePageIndex] = - ZINVALID_COMMIT_TYPE; - tmpLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZDIRTY; - } else { - jam(); -/*---------------------------------------------------------------------------*/ -/* CHECK IF I AM SUPPOSED TO EXECUTE THIS LOG RECORD. IF I AM THEN SAVE PAGE */ -/* INDEX IN CURRENT LOG PAGE SINCE IT WILL BE OVERWRITTEN WHEN EXECUTING THE */ -/* LOG RECORD. */ -/*---------------------------------------------------------------------------*/ - logPartPtr.p->execSrExecuteIndex = 0; - Uint32 result = checkIfExecLog(signal); - if (result == ZOK) { - jam(); -//*---------------------------------------------------------------------------*/ -/* IN A NODE RESTART WE WILL NEVER END UP HERE SINCE NO FRAGMENTS HAVE BEEN */ -/* DEFINED YET. THUS NO EXTRA CHECKING FOR NODE RESTART IS NECESSARY. */ -/*---------------------------------------------------------------------------*/ - logPartPtr.p->savePageIndex = - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - tcConnectptr.p->fragmentptr = fragptr.i; - findPageRef(signal, &commitLogRecord); - logPartPtr.p->execSrLogPageIndex = commitLogRecord.startPageIndex; - if (logPagePtr.i != RNIL) { - jam(); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = commitLogRecord.startPageIndex; - logPartPtr.p->execSrLogPage = logPagePtr.i; - execLogRecord(signal); - return; - }//if - logPartPtr.p->execSrStartPageNo = commitLogRecord.startPageNo; - logPartPtr.p->execSrStopPageNo = commitLogRecord.stopPageNo; - findLogfile(signal, commitLogRecord.fileNo, logPartPtr, &logFilePtr); - logPartPtr.p->execSrExecLogFile = logFilePtr.i; - if (logFilePtr.i == logPartPtr.p->currentLogfile) { - jam(); - readExecLog(signal); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG; - return; - } else { - jam(); -/*---------------------------------------------------------------------------*/ -/* THE FILE IS CURRENTLY NOT OPEN. WE MUST OPEN IT BEFORE WE CAN READ FROM */ -/* THE FILE. */ -/*---------------------------------------------------------------------------*/ - logFilePtr.p->logFileStatus = LogFileRecord::OPEN_EXEC_LOG; - openFileRw(signal, logFilePtr); - return; - }//if - }//if - }//if - break; - } -/* ========================================================================= */ -/* ========================================================================= */ - case ZABORT_TYPE: - jam(); - stepAhead(signal, ZABORT_LOG_SIZE - 1); - break; -/* ========================================================================= */ -/* ========================================================================= */ - case ZFD_TYPE: - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS IS THE FIRST ITEM WE ENCOUNTER IN A NEW FILE. AT THIS MOMENT WE SHALL*/ -/* SIMPLY BYPASS IT. IT HAS NO SIGNIFANCE WHEN EXECUTING THE LOG. IT HAS ITS */ -/* SIGNIFANCE WHEN FINDING THE START END THE END OF THE LOG. */ -/* WE HARDCODE THE PAGE INDEX SINCE THIS SHOULD NEVER BE FOUND AT ANY OTHER */ -/* PLACE THAN IN THE FIRST PAGE OF A NEW FILE IN THE FIRST POSITION AFTER THE*/ -/* HEADER. */ -/*---------------------------------------------------------------------------*/ - if (unlikely(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] != - (ZPAGE_HEADER_SIZE + ZPOS_NO_FD))) - { - line = __LINE__; - logWord = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - crash_msg = "ZFD_TYPE at incorrect position!"; - goto crash; - } - { - Uint32 noFdDescriptors = - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_NO_FD]; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = - (ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE) + - (noFdDescriptors * ZFD_MBYTE_SIZE * clogFileSize); - } - break; -/* ========================================================================= */ -/* ========================================================================= */ - case ZNEXT_LOG_RECORD_TYPE: - jam(); - stepAhead(signal, ZPAGE_SIZE - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]); - break; -/* ========================================================================= */ -/* ========================================================================= */ - case ZNEXT_MBYTE_TYPE: -/*---------------------------------------------------------------------------*/ -/* WE WILL SKIP A PART OF THE LOG FILE. ACTUALLY THE NEXT POINTER IS TO */ -/* A NEW MBYTE. THEREFORE WE WILL START UP A NEW MBYTE. THIS NEW MBYTE IS */ -/* HOWEVER ONLY STARTED IF IT IS NOT AFTER THE STOP MBYTE. */ -/* IF WE HAVE REACHED THE END OF THE STOP MBYTE THEN THE EXECUTION OF THE LOG*/ -/* IS COMPLETED. */ -/*---------------------------------------------------------------------------*/ - if (logPartPtr.p->currentLogfile == logPartPtr.p->stopLogfile) { - if (logFilePtr.p->currentMbyte == logPartPtr.p->stopMbyte) { - jam(); -/*---------------------------------------------------------------------------*/ -/* THIS WAS THE LAST MBYTE TO EXECUTE IN THIS LOG PART. WE SHOULD HAVE FOUND */ -/* A COMPLETED GCI RECORD OF THE LAST GCI BEFORE THIS. FOR SOME REASON THIS */ -/* RECORD WAS NOT AVAILABLE ON THE LOG. CRASH THE SYSTEM, A VERY SERIOUS */ -/* ERROR WHICH WE MUST REALLY WORK HARD TO AVOID. */ -/*---------------------------------------------------------------------------*/ -/*---------------------------------------------------------------------------*/ -/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */ -/*---------------------------------------------------------------------------*/ - line = __LINE__; - logWord = ZNEXT_MBYTE_TYPE; - crash_msg = "end of log wo/ having found last GCI"; - goto crash; - }//if - }//if -/*---------------------------------------------------------------------------*/ -/* START EXECUTION OF A NEW MBYTE IN THE LOG. */ -/*---------------------------------------------------------------------------*/ - if (logFilePtr.p->currentMbyte < (clogFileSize - 1)) { - jam(); - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_MBYTE; - } else { - ndbrequire(logFilePtr.p->currentMbyte == (clogFileSize - 1)); - jam(); -/*---------------------------------------------------------------------------*/ -/* WE HAVE TO CHANGE FILE. CLOSE THIS ONE AND THEN OPEN THE NEXT. */ -/*---------------------------------------------------------------------------*/ - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_NEW_FILE; - }//if - break; -/* ========================================================================= */ -/* ========================================================================= */ - case ZCOMPLETED_GCI_TYPE: - jam(); - logWord = readLogword(signal); - if (DEBUG_REDO) - ndbout_c("found gci: %u part: %u file: %u page: %u", - logWord, - logPartPtr.i, - logFilePtr.p->fileNo, - logFilePtr.p->currentFilepage); - if (logWord == logPartPtr.p->logLastGci) { - jam(); -/*---------------------------------------------------------------------------*/ -/* IF IT IS THE LAST GCI TO LIVE AFTER SYSTEM RESTART THEN WE RECORD THE NEXT*/ -/* WORD AS THE NEW HEADER OF THE LOG FILE. OTHERWISE WE SIMPLY IGNORE THIS */ -/* LOG RECORD. */ -/*---------------------------------------------------------------------------*/ - if (csrPhasesCompleted == 0) { - jam(); -/*---------------------------------------------------------------------------*/ -/*WE ONLY RECORD THE HEAD OF THE LOG IN THE FIRST LOG ROUND OF LOG EXECUTION.*/ -/*---------------------------------------------------------------------------*/ - logPartPtr.p->headFileNo = logFilePtr.p->fileNo; - logPartPtr.p->headPageNo = logFilePtr.p->currentFilepage; - logPartPtr.p->headPageIndex = - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP]; - if (DEBUG_REDO) - ndbout_c("execSr part: %u logLap: %u", - logPartPtr.i, logPartPtr.p->logLap); - }//if -/*---------------------------------------------------------------------------*/ -/* THERE IS NO NEED OF EXECUTING PAST THIS LINE SINCE THERE WILL ONLY BE LOG */ -/* RECORDS THAT WILL BE OF NO INTEREST. THUS CLOSE THE FILE AND START THE */ -/* NEXT PHASE OF THE SYSTEM RESTART. */ -/*---------------------------------------------------------------------------*/ - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_COMPLETED; - }//if - break; - default: - jam(); -/* ========================================================================= */ -/* ========================================================================= */ -/*---------------------------------------------------------------------------*/ -/* SEND A SIGNAL TO THE SIGNAL LOG AND THEN CRASH THE SYSTEM. */ -/*---------------------------------------------------------------------------*/ - line = __LINE__; - crash_msg = "Invalid logword"; - goto crash; - break; - }//switch -/*---------------------------------------------------------------------------*/ -// We continue to execute log records until we find a proper one to execute or -// that we reach a new page. -/*---------------------------------------------------------------------------*/ - } while (1); - return; - -crash: - signal->theData[0] = RNIL; - signal->theData[1] = logPartPtr.i; - Uint32 tmp = logFilePtr.p->fileName[3]; - tmp = (tmp >> 8) & 0xff;// To get the Directory, DXX. - signal->theData[2] = tmp; - signal->theData[3] = logFilePtr.p->fileNo; - signal->theData[4] = logFilePtr.p->currentMbyte; - signal->theData[5] = logFilePtr.p->currentFilepage; - signal->theData[6] = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - signal->theData[7] = logWord; - signal->theData[8] = line; - - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Error while reading REDO log. from %d\n" - "D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d : %s", - signal->theData[8], - signal->theData[2], - signal->theData[3], - signal->theData[4], - signal->theData[5], - signal->theData[6], - signal->theData[7], - crash_msg ? crash_msg : ""); - - progError(__LINE__, NDBD_EXIT_SR_REDOLOG, buf); -}//Dblqh::execSr() - -/*---------------------------------------------------------------------------*/ -/* THIS SIGNAL IS ONLY RECEIVED TO BE CAPTURED IN THE SIGNAL LOG. IT IS */ -/* ALSO USED TO CRASH THE SYSTEM AFTER SENDING A SIGNAL TO THE LOG. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::execDEBUG_SIG(Signal* signal) -{ -/* -2.5 TEMPORARY VARIABLES ------------------------ -*/ - jamEntry(); - //logPagePtr.i = signal->theData[0]; - //tdebug = logPagePtr.p->logPageWord[0]; - - char buf[100]; - BaseString::snprintf(buf, 100, - "Error while reading REDO log. from %d\n" - "D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d", - signal->theData[8], - signal->theData[2], signal->theData[3], signal->theData[4], - signal->theData[5], signal->theData[6], signal->theData[7]); - - progError(__LINE__, NDBD_EXIT_SR_REDOLOG, buf); - - return; -}//Dblqh::execDEBUG_SIG() - -/*---------------------------------------------------------------------------*/ -/*---------------------------------------------------------------------------*/ -void Dblqh::closeExecLogLab(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - signal->theData[0] = ZEXEC_SR; - signal->theData[1] = logFilePtr.p->logPartRec; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::closeExecLogLab() - -void Dblqh::openExecLogLab(Signal* signal) -{ - readExecLog(signal); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_LOG; - return; -}//Dblqh::openExecLogLab() - -void Dblqh::readExecLogLab(Signal* signal) -{ - buildLinkedLogPageList(signal); - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOGREC_FROM_FILE; - logPartPtr.p->execSrLfoRec = lfoPtr.i; - logPartPtr.p->execSrLogPage = logPagePtr.i; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = - logPartPtr.p->execSrLogPageIndex; - execLogRecord(signal); - return; -}//Dblqh::readExecLogLab() - -/*---------------------------------------------------------------------------*/ -/* THIS CODE IS USED TO EXECUTE A LOG RECORD WHEN IT'S DATA HAVE BEEN LOCATED*/ -/* AND TRANSFERRED INTO MEMORY. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::execLogRecord(Signal* signal) -{ - jamEntry(); - - tcConnectptr.i = logPartPtr.p->logTcConrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - tcConnectptr.p->m_log_part_ptr_i = fragptr.p->m_log_part_ptr_i; - - // Read a log record and prepare it for execution - readLogHeader(signal); - readKey(signal); - readAttrinfo(signal); - initReqinfoExecSr(signal); - arrGuard(logPartPtr.p->execSrExecuteIndex, 4); - BlockReference ref = fragptr.p->execSrBlockref[logPartPtr.p->execSrExecuteIndex]; - tcConnectptr.p->nextReplica = refToNode(ref); - tcConnectptr.p->connectState = TcConnectionrec::LOG_CONNECTED; - tcConnectptr.p->tcOprec = tcConnectptr.i; - packLqhkeyreqLab(signal); - return; -}//Dblqh::execLogRecord() - -//---------------------------------------------------------------------------- -// This function invalidates log pages after the last GCI record in a -// system/node restart. This is to ensure that the end of the log is -// consistent. This function is executed last in start phase 3. -// RT 450. EDTJAMO. -//---------------------------------------------------------------------------- -void Dblqh::invalidateLogAfterLastGCI(Signal* signal) { - - jam(); - if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG_INVALIDATE) { - jam(); - systemError(signal, __LINE__); - } - - if (logFilePtr.p->fileNo != logPartPtr.p->invalidateFileNo) { - jam(); - systemError(signal, __LINE__); - } - - switch (lfoPtr.p->lfoState) { - case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES: - jam(); - // Check if this page must be invalidated. - // If the log lap number on a page after the head of the tail is the same - // as the actual log lap number we must invalidate this page. Otherwise it - // could be impossible to find the end of the log in a later system/node - // restart. - if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap) - { - // This page must be invalidated. - // We search for end - // read next - releaseLfo(signal); - releaseLogpage(signal); - readFileInInvalidate(signal, true); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES; - return; - } - - /** - * We found the "last" page to invalidate... - * Invalidate backwards until head... - */ - - // Fall through... - case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES: - jam(); - - releaseLfo(signal); - releaseLogpage(signal); - - // Step backwards... - logPartPtr.p->invalidatePageNo--; - - if (logPartPtr.p->invalidatePageNo == 0) - { - jam(); - - if (logFilePtr.p->fileNo == 0) - { - /** - * We're wrapping in the log... - * update logLap - */ - logPartPtr.p->logLap--; - ndbrequire(logPartPtr.p->logLap); // Should always be > 0 - if (DEBUG_REDO) - ndbout_c("invalidateLogAfterLastGCI part: %u wrap from file 0 -> logLap: %u", - logPartPtr.i, logPartPtr.p->logLap); - } - - /** - * Move to prev file - */ - logFilePtr.i = logFilePtr.p->prevLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo; - logPartPtr.p->invalidatePageNo = clogFileSize * ZPAGES_IN_MBYTE - 1; - } - - if (logPartPtr.p->invalidateFileNo == logPartPtr.p->headFileNo && - logPartPtr.p->invalidatePageNo == logPartPtr.p->headPageNo) - { - /** - * Done... - */ - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - - logFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - - // Close files if necessary. Current file and the next file should be - // left open. - exitFromInvalidate(signal); - return; - } - - seizeLogpage(signal); - - /** - * Make page really empty - */ - bzero(logPagePtr.p, sizeof(LogPageRecord)); - writeSinglePage(signal, logPartPtr.p->invalidatePageNo, - ZPAGE_SIZE - 1, __LINE__); - - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES; - return; - default: - jam(); - systemError(signal, __LINE__); - return; - break; - } -}//Dblqh::invalidateLogAfterLastGCI - -void Dblqh::readFileInInvalidate(Signal* signal, bool stepNext) -{ - jam(); - - if (stepNext) - { - logPartPtr.p->invalidatePageNo++; - if (logPartPtr.p->invalidatePageNo == (clogFileSize * ZPAGES_IN_MBYTE)) - { - // We continue in the next file. - logFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo; - // Page 0 is used for file descriptors. - logPartPtr.p->invalidatePageNo = 1; - - if (logFilePtr.p->fileNo == 0) - { - /** - * We're wrapping in the log... - * update logLap - */ - logPartPtr.p->logLap++; - if (DEBUG_REDO) - ndbout_c("readFileInInvalidate part: %u wrap to file 0 -> logLap: %u", - logPartPtr.i, logPartPtr.p->logLap); - } - if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN) - { - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES; - openFileRw(signal, logFilePtr); - return; - } - } - } - - // Contact NDBFS. Real time break. - readSinglePage(signal, logPartPtr.p->invalidatePageNo); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES; -} - -void Dblqh::exitFromInvalidate(Signal* signal) { - jam(); - -loop: - logFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - - if (logFilePtr.i == logPartPtr.p->currentLogfile) - { - jam(); - goto done; - } - - if (logFilePtr.p->fileNo == 0) - { - jam(); - /** - * Logfile 0 shoult *not* be closed - */ - goto loop; - } - - if (logFilePtr.p->logFileStatus == LogFileRecord::CLOSED) - { - jam(); - goto done; - } - - jam(); - ndbrequire(logFilePtr.p->logFileStatus == LogFileRecord::OPEN); - logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES; - closeFile(signal, logFilePtr, __LINE__); - return; - -done: - if (DEBUG_REDO) - ndbout_c("exitFromInvalidate part: %u head file: %u page: %u", - logPartPtr.i, - logPartPtr.p->headFileNo, - logPartPtr.p->headPageNo); - - logFilePtr.i = logPartPtr.p->firstLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->logPageZero; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] = - logPartPtr.p->headFileNo; - writeSinglePage(signal, 0, ZPAGE_SIZE - 1, __LINE__); - - lfoPtr.p->logFileRec = logFilePtr.i; - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0; - return; -} - -/*---------------------------------------------------------------------------*/ -/* THE EXECUTION OF A LOG RECORD IS COMPLETED. RELEASE PAGES IF THEY WERE */ -/* READ FROM DISK FOR THIS PARTICULAR OPERATION. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::completedLab(Signal* signal) -{ - Uint32 result = returnExecLog(signal); -/*---------------------------------------------------------------------------*/ -/* ENTER COMPLETED WITH */ -/* LQH_CONNECTPTR */ -/*---------------------------------------------------------------------------*/ - if (result == ZOK) { - jam(); - execLogRecord(signal); - return; - } else if (result == ZNOT_OK) { - jam(); - signal->theData[0] = ZEXEC_SR; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - } else { - jam(); - /*empty*/; - }//if -/*---------------------------------------------------------------------------*/ -/* WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE PROCEEDING IN */ -/* RARE CASES. */ -/*---------------------------------------------------------------------------*/ - return; -}//Dblqh::completedLab() - -/*---------------------------------------------------------------------------*/ -/* EXECUTION OF LOG RECORD WAS NOT SUCCESSFUL. CHECK IF IT IS OK ANYWAY, */ -/* THEN EXECUTE THE NEXT LOG RECORD. */ -/*---------------------------------------------------------------------------*/ -void Dblqh::logLqhkeyrefLab(Signal* signal) -{ - Uint32 result = returnExecLog(signal); - switch (tcConnectptr.p->operation) { - case ZUPDATE: - case ZDELETE: - jam(); - if (unlikely(terrorCode != ZNO_TUPLE_FOUND)) - goto error; - break; - case ZINSERT: - jam(); - if (unlikely(terrorCode != ZTUPLE_ALREADY_EXIST && terrorCode != 899)) - goto error; - - break; - default: - goto error; - } - - if (result == ZOK) { - jam(); - execLogRecord(signal); - return; - } else if (result == ZNOT_OK) { - jam(); - signal->theData[0] = ZEXEC_SR; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - } else { - jam(); - /*empty*/; - }//if - /* ------------------------------------------------------------------------ - * WE HAVE TO WAIT FOR CLOSING OF THE EXECUTED LOG FILE BEFORE - * PROCEEDING IN RARE CASES. - * ----------------------------------------------------------------------- */ - return; -error: - BaseString tmp; - tmp.appfmt("You have found a bug!" - " Failed op (%s) during REDO table: %d fragment: %d err: %d", - tcConnectptr.p->operation == ZINSERT ? "INSERT" : - tcConnectptr.p->operation == ZUPDATE ? "UPDATE" : - tcConnectptr.p->operation == ZDELETE ? "DELETE" : - tcConnectptr.p->operation == ZWRITE ? "WRITE" : "", - tcConnectptr.p->tableref, - tcConnectptr.p->fragmentid, - terrorCode); - progError(__LINE__, NDBD_EXIT_SYSTEM_ERROR, - tmp.c_str()); -}//Dblqh::logLqhkeyrefLab() - -void Dblqh::closeExecSrCompletedLab(Signal* signal) -{ - logFilePtr.p->logFileStatus = LogFileRecord::CLOSED; - signal->theData[0] = logFilePtr.p->logPartRec; - execLogComp(signal); - return; -}//Dblqh::closeExecSrCompletedLab() - -/* -------------------------------------------------------------------------- - * ONE OF THE LOG PARTS HAVE COMPLETED EXECUTING THE LOG. CHECK IF ALL LOG - * PARTS ARE COMPLETED. IF SO START SENDING EXEC_FRAGCONF AND EXEC_SRCONF. - * ------------------------------------------------------------------------- */ -void Dblqh::execLogComp(Signal* signal) -{ - logPartPtr.i = signal->theData[0]; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logPartPtr.p->logPartState = LogPartRecord::SR_THIRD_PHASE_COMPLETED; - /* ------------------------------------------------------------------------ - * WE MUST RELEASE THE TC CONNECT RECORD HERE SO THAT IT CAN BE REUSED. - * ----------------------------------------------------------------------- */ - tcConnectptr.i = logPartPtr.p->logTcConrec; - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - releaseTcrecLog(signal, tcConnectptr); - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_COMPLETED) { - if (logPartPtr.p->logPartState != LogPartRecord::SR_THIRD_PHASE_STARTED) { - jam(); - systemErrorLab(signal, __LINE__); - return; - } else { - jam(); - /* ------------------------------------------------------------------ - * THIS LOG PART WAS NOT COMPLETED YET. EXIT AND WAIT FOR IT - * TO COMPLETE - * ----------------------------------------------------------------- */ - return; - }//if - }//if - }//for - /* ------------------------------------------------------------------------ - * ALL LOG PARTS HAVE COMPLETED THE EXECUTION OF THE LOG. WE CAN NOW START - * SENDING THE EXEC_FRAGCONF SIGNALS TO ALL INVOLVED FRAGMENTS. - * ----------------------------------------------------------------------- */ - jam(); - c_lcp_complete_fragments.first(fragptr); - signal->theData[0] = ZSEND_EXEC_CONF; - signal->theData[1] = fragptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//Dblqh::execLogComp() - -/* -------------------------------------------------------------------------- - * GO THROUGH THE FRAGMENT RECORDS TO DEDUCE TO WHICH SHALL BE SENT - * EXEC_FRAGCONF AFTER COMPLETING THE EXECUTION OF THE LOG. - * ------------------------------------------------------------------------- */ -void Dblqh::sendExecConf(Signal* signal) -{ - jamEntry(); - fragptr.i = signal->theData[0]; - Uint32 loopCount = 0; - while (fragptr.i != RNIL) { - c_lcp_complete_fragments.getPtr(fragptr); - Uint32 next = fragptr.p->nextList; - if (fragptr.p->execSrStatus != Fragrecord::IDLE) { - jam(); - ndbrequire(fragptr.p->execSrNoReplicas - 1 < 4); - for (Uint32 i = 0; i < fragptr.p->execSrNoReplicas; i++) { - jam(); - signal->theData[0] = fragptr.p->execSrUserptr[i]; - sendSignal(fragptr.p->execSrBlockref[i], GSN_EXEC_FRAGCONF, - signal, 1, JBB); - }//for - fragptr.p->execSrNoReplicas = 0; - }//if - loopCount++; - if (loopCount > 20) { - jam(); - signal->theData[0] = ZSEND_EXEC_CONF; - signal->theData[1] = next; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; - } else { - jam(); - fragptr.i = next; - }//if - }//while - /* ---------------------------------------------------------------------- - * WE HAVE NOW SENT ALL EXEC_FRAGCONF. NOW IT IS TIME TO SEND - * EXEC_SRCONF TO ALL NODES. - * --------------------------------------------------------------------- */ - srPhase3Comp(signal); -}//Dblqh::sendExecConf() - -/* -------------------------------------------------------------------------- - * PHASE 3 HAS NOW COMPLETED. INFORM ALL OTHER NODES OF THIS EVENT. - * ------------------------------------------------------------------------- */ -void Dblqh::srPhase3Comp(Signal* signal) -{ - jamEntry(); - - signal->theData[0] = cownNodeid; - NodeReceiverGroup rg(DBLQH, m_sr_nodes); - sendSignal(rg, GSN_EXEC_SRCONF, signal, 1, JBB); - return; -}//Dblqh::srPhase3Comp() - -/* ########################################################################## - * SYSTEM RESTART PHASE FOUR MODULE - * THIS MODULE IS A SUB-MODULE OF THE FILE SYSTEM HANDLING. - * - * THIS MODULE SETS UP THE HEAD AND TAIL POINTERS OF THE LOG PARTS IN THE - * FRAGMENT LOG. WHEN IT IS COMPLETED IT REPORTS TO THE MASTER DIH THAT - * IT HAS COMPLETED THE PART OF THE SYSTEM RESTART WHERE THE DATABASE IS - * LOADED. - * IT ALSO OPENS THE CURRENT LOG FILE AND THE NEXT AND SETS UP THE FIRST - * LOG PAGE WHERE NEW LOG DATA IS TO BE INSERTED WHEN THE SYSTEM STARTS - * AGAIN. - * - * THIS PART IS ACTUALLY EXECUTED FOR ALL RESTART TYPES. - * ######################################################################### */ -void Dblqh::initFourth(Signal* signal) -{ - LogFileRecordPtr locLogFilePtr; - jamEntry(); - logPartPtr.i = signal->theData[0]; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - crestartNewestGci = 1; - crestartOldestGci = 1; - /* ------------------------------------------------------------------------ - * INITIALISE LOG PART AND LOG FILES AS NEEDED. - * ----------------------------------------------------------------------- */ - logPartPtr.p->headFileNo = 0; - logPartPtr.p->headPageNo = 1; - logPartPtr.p->headPageIndex = ZPAGE_HEADER_SIZE + 2; - logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_STARTED; - logPartPtr.p->logTailFileNo = 0; - logPartPtr.p->logTailMbyte = 0; - locLogFilePtr.i = logPartPtr.p->firstLogfile; - ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_PHASE; - openFileRw(signal, locLogFilePtr); - return; -}//Dblqh::initFourth() - -void Dblqh::openSrFourthPhaseLab(Signal* signal) -{ - /* ------------------------------------------------------------------------ - * WE HAVE NOW OPENED THE HEAD LOG FILE WE WILL NOW START READING IT - * FROM THE HEAD MBYTE TO FIND THE NEW HEAD OF THE LOG. - * ----------------------------------------------------------------------- */ - readSinglePage(signal, logPartPtr.p->headPageNo); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_PHASE; - return; -}//Dblqh::openSrFourthPhaseLab() - -void Dblqh::readSrFourthPhaseLab(Signal* signal) -{ - if(c_diskless){ - jam(); - logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 1; - } - - /* ------------------------------------------------------------------------ - * INITIALISE ALL LOG PART INFO AND LOG FILE INFO THAT IS NEEDED TO - * START UP THE SYSTEM. - * ------------------------------------------------------------------------ - * INITIALISE THE NEWEST GLOBAL CHECKPOINT IDENTITY AND THE NEWEST - * COMPLETED GLOBAL CHECKPOINT IDENITY AS THE NEWEST THAT WAS RESTARTED. - * ------------------------------------------------------------------------ - * INITIALISE THE HEAD PAGE INDEX IN THIS PAGE. - * ASSIGN IT AS THE CURRENT LOGPAGE. - * ASSIGN THE FILE AS THE CURRENT LOG FILE. - * ASSIGN THE CURRENT FILE NUMBER FROM THE CURRENT LOG FILE AND THE NEXT - * FILE NUMBER FROM THE NEXT LOG FILE. - * ASSIGN THE CURRENT FILEPAGE FROM HEAD PAGE NUMBER. - * ASSIGN THE CURRENT MBYTE BY DIVIDING PAGE NUMBER BY 128. - * INITIALISE LOG LAP TO BE THE LOG LAP AS FOUND IN THE HEAD PAGE. - * WE HAVE TO CALCULATE THE NUMBER OF REMAINING WORDS IN THIS MBYTE. - * ----------------------------------------------------------------------- */ - cnewestGci = crestartNewestGci; - cnewestCompletedGci = crestartNewestGci; - logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci; - logPartPtr.p->currentLogfile = logFilePtr.i; - logFilePtr.p->filePosition = logPartPtr.p->headPageNo; - logFilePtr.p->currentMbyte = - logPartPtr.p->headPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE; - logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING; - logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP]; - logFilePtr.p->currentFilepage = logPartPtr.p->headPageNo; - logFilePtr.p->currentLogpage = logPagePtr.i; - - initLogpage(signal); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->headPageIndex; - logFilePtr.p->remainingWordsInMbyte = - (( - ((logFilePtr.p->currentMbyte + 1) * ZPAGES_IN_MBYTE) - - logFilePtr.p->currentFilepage) * - (ZPAGE_SIZE - ZPAGE_HEADER_SIZE)) - - (logPartPtr.p->headPageIndex - ZPAGE_HEADER_SIZE); - /* ------------------------------------------------------------------------ - * THE NEXT STEP IS TO OPEN THE NEXT LOG FILE (IF THERE IS ONE). - * ----------------------------------------------------------------------- */ - if (logFilePtr.p->nextLogFile != logFilePtr.i) { - LogFileRecordPtr locLogFilePtr; - jam(); - locLogFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord); - locLogFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_NEXT; - openFileRw(signal, locLogFilePtr); - } else { - jam(); - /* ---------------------------------------------------------------------- - * THIS CAN ONLY OCCUR IF WE HAVE ONLY ONE LOG FILE. THIS LOG FILE MUST - * BE LOG FILE ZERO AND THAT IS THE FILE WE CURRENTLY HAVE READ. - * THUS WE CAN CONTINUE IMMEDIATELY TO READ PAGE ZERO IN FILE ZERO. - * --------------------------------------------------------------------- */ - openSrFourthZeroSkipInitLab(signal); - return; - }//if - return; -}//Dblqh::readSrFourthPhaseLab() - -void Dblqh::openSrFourthNextLab(Signal* signal) -{ - /* ------------------------------------------------------------------------ - * WE MUST ALSO HAVE FILE 0 OPEN ALL THE TIME. - * ----------------------------------------------------------------------- */ - logFilePtr.i = logPartPtr.p->firstLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - if (logFilePtr.p->logFileStatus == LogFileRecord::OPEN) { - jam(); - openSrFourthZeroSkipInitLab(signal); - return; - } else { - jam(); - logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_FOURTH_ZERO; - openFileRw(signal, logFilePtr); - }//if - return; -}//Dblqh::openSrFourthNextLab() - -void Dblqh::openSrFourthZeroLab(Signal* signal) -{ - openSrFourthZeroSkipInitLab(signal); - return; -}//Dblqh::openSrFourthZeroLab() - -void Dblqh::openSrFourthZeroSkipInitLab(Signal* signal) -{ - if (logFilePtr.i == logPartPtr.p->currentLogfile) { - if (logFilePtr.p->currentFilepage == 0) { - jam(); - /* ------------------------------------------------------------------- - * THE HEADER PAGE IN THE LOG IS PAGE ZERO IN FILE ZERO. - * THIS SHOULD NEVER OCCUR. - * ------------------------------------------------------------------- */ - systemErrorLab(signal, __LINE__); - return; - }//if - }//if - readSinglePage(signal, 0); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_FOURTH_ZERO; - return; -}//Dblqh::openSrFourthZeroSkipInitLab() - -void Dblqh::readSrFourthZeroLab(Signal* signal) -{ - logFilePtr.p->logPageZero = logPagePtr.i; - // -------------------------------------------------------------------- - // This is moved to invalidateLogAfterLastGCI(), RT453. - // signal->theData[0] = ZSR_FOURTH_COMP; - // signal->theData[1] = logPartPtr.i; - // sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - // -------------------------------------------------------------------- - - // Need to invalidate log pages after the head of the log. RT 453. EDTJAMO. - // Set the start of the invalidation. - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPartPtr.p->invalidateFileNo = logPartPtr.p->headFileNo; - logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo; - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_INVALIDATE; - - readFileInInvalidate(signal, true); - lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES; - return; -}//Dblqh::readSrFourthZeroLab() - -/* -------------------------------------------------------------------------- - * ONE OF THE LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART. - * CHECK IF ALL LOG PARTS ARE COMPLETED. IF SO SEND START_RECCONF - * ------------------------------------------------------------------------- */ -void Dblqh::srFourthComp(Signal* signal) -{ - jamEntry(); - logPartPtr.i = signal->theData[0]; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logPartPtr.p->logPartState = LogPartRecord::SR_FOURTH_PHASE_COMPLETED; - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - jam(); - ptrAss(logPartPtr, logPartRecord); - if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_COMPLETED) { - if (logPartPtr.p->logPartState != LogPartRecord::SR_FOURTH_PHASE_STARTED) { - jam(); - systemErrorLab(signal, __LINE__); - return; - } else { - jam(); - /* ------------------------------------------------------------------ - * THIS LOG PART WAS NOT COMPLETED YET. - * EXIT AND WAIT FOR IT TO COMPLETE - * ----------------------------------------------------------------- */ - return; - }//if - }//if - }//for - /* ------------------------------------------------------------------------ - * ALL LOG PARTS HAVE COMPLETED PHASE FOUR OF THE SYSTEM RESTART. - * WE CAN NOW SEND START_RECCONF TO THE MASTER DIH IF IT WAS A - * SYSTEM RESTART. OTHERWISE WE WILL CONTINUE WITH AN INITIAL START. - * SET LOG PART STATE TO IDLE TO - * INDICATE THAT NOTHING IS GOING ON IN THE LOG PART. - * ----------------------------------------------------------------------- */ - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - ptrAss(logPartPtr, logPartRecord); - logPartPtr.p->logPartState = LogPartRecord::IDLE; - }//for - - if ((cstartType == NodeState::ST_INITIAL_START) || - (cstartType == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - - ndbrequire(cinitialStartOngoing == ZTRUE); - cinitialStartOngoing = ZFALSE; - - checkStartCompletedLab(signal); - return; - } else if ((cstartType == NodeState::ST_NODE_RESTART) || - (cstartType == NodeState::ST_SYSTEM_RESTART)) { - jam(); - - if(cstartType == NodeState::ST_SYSTEM_RESTART) - { - jam(); - if (c_lcp_complete_fragments.first(fragptr)) - { - jam(); - signal->theData[0] = ZENABLE_EXPAND_CHECK; - signal->theData[1] = fragptr.i; - sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); - return; - } - } - cstartRecReq = 2; - StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); - conf->startingNodeId = getOwnNodeId(); - sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, - StartRecConf::SignalLength, JBB); - } else { - ndbrequire(false); - }//if - return; -}//Dblqh::srFourthComp() - -/* ######################################################################### */ -/* ####### ERROR MODULE ####### */ -/* */ -/* ######################################################################### */ - -/*---------------------------------------------------------------------------*/ -/* AN ERROR OCCURRED THAT WE WILL NOT TREAT AS SYSTEM ERROR. MOST OFTEN THIS */ -/* WAS CAUSED BY AN ERRONEUS SIGNAL SENT BY ANOTHER NODE. WE DO NOT WISH TO */ -/* CRASH BECAUSE OF FAULTS IN OTHER NODES. THUS WE ONLY REPORT A WARNING. */ -/* THIS IS CURRENTLY NOT IMPLEMENTED AND FOR THE MOMENT WE GENERATE A SYSTEM */ -/* ERROR SINCE WE WANT TO FIND FAULTS AS QUICKLY AS POSSIBLE IN A TEST PHASE.*/ -/* IN A LATER PHASE WE WILL CHANGE THIS TO BE A WARNING MESSAGE INSTEAD. */ -/*---------------------------------------------------------------------------*/ -/*---------------------------------------------------------------------------*/ -/* THIS TYPE OF ERROR SHOULD NOT GENERATE A SYSTEM ERROR IN A PRODUCT */ -/* RELEASE. THIS IS A TEMPORARY SOLUTION DURING TEST PHASE TO QUICKLY */ -/* FIND ERRORS. NORMALLY THIS SHOULD GENERATE A WARNING MESSAGE ONTO */ -/* SOME ERROR LOGGER. THIS WILL LATER BE IMPLEMENTED BY SOME SIGNAL. */ -/*---------------------------------------------------------------------------*/ -/* ------ SYSTEM ERROR SITUATIONS ------- */ -/* IN SITUATIONS WHERE THE STATE IS ERRONEOUS OR IF THE ERROR OCCURS IN */ -/* THE COMMIT, COMPLETE OR ABORT PHASE, WE PERFORM A CRASH OF THE AXE VM*/ -/*---------------------------------------------------------------------------*/ - -void Dblqh::systemErrorLab(Signal* signal, int line) -{ - systemError(signal, line); - progError(line, NDBD_EXIT_NDBREQUIRE); -/*************************************************************************>*/ -/* WE WANT TO INVOKE AN IMMEDIATE ERROR HERE SO WE GET THAT BY */ -/* INSERTING A CERTAIN POINTER OUT OF RANGE. */ -/*************************************************************************>*/ -}//Dblqh::systemErrorLab() - -/* ------- ERROR SITUATIONS ------- */ - -void Dblqh::aiStateErrorCheckLab(Signal* signal, Uint32* dataPtr, Uint32 length) -{ - ndbrequire(tcConnectptr.p->abortState != TcConnectionrec::ABORT_IDLE); - if (tcConnectptr.p->transactionState != TcConnectionrec::IDLE) { - jam(); -/*************************************************************************>*/ -/* TRANSACTION ABORT IS ONGOING. IT CAN STILL BE A PART OF AN */ -/* OPERATION THAT SHOULD CONTINUE SINCE THE TUPLE HAS NOT ARRIVED */ -/* YET. THIS IS POSSIBLE IF ACTIVE CREATION OF THE FRAGMENT IS */ -/* ONGOING. */ -/*************************************************************************>*/ - if (tcConnectptr.p->activeCreat == Fragrecord::AC_IGNORED) { - jam(); -/*************************************************************************>*/ -/* ONGOING ABORTS DURING ACTIVE CREATION MUST SAVE THE ATTRIBUTE INFO*/ -/* SO THAT IT CAN BE SENT TO THE NEXT NODE IN THE COMMIT CHAIN. THIS */ -/* IS NEEDED SINCE ALL ABORTS DURING CREATION OF A FRAGMENT ARE NOT */ -/* REALLY ERRORS. A MISSING TUPLE TO BE UPDATED SIMPLY MEANS THAT */ -/* IT HASN'T BEEN TRANSFERRED TO THE NEW REPLICA YET. */ -/*************************************************************************>*/ -/*************************************************************************>*/ -/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */ -/* ACTIVE CREATION TO FALSE. THIS WILL ENSURE THAT THE ABORT IS */ -/* COMPLETED. */ -/*************************************************************************>*/ - if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { - jam(); - if (tcConnectptr.p->transactionState == - TcConnectionrec::WAIT_AI_AFTER_ABORT) { - if (tcConnectptr.p->currTupAiLen == tcConnectptr.p->totReclenAi) { - jam(); -/*************************************************************************>*/ -/* WE WERE WAITING FOR MORE ATTRIBUTE INFO AFTER A SUCCESSFUL ABORT */ -/* IN ACTIVE CREATION STATE. THE TRANSACTION SHOULD CONTINUE AS IF */ -/* IT WAS COMMITTED. NOW ALL INFO HAS ARRIVED AND WE CAN CONTINUE */ -/* WITH NORMAL PROCESSING AS IF THE TRANSACTION WAS PREPARED. */ -/* SINCE THE FRAGMENT IS UNDER CREATION WE KNOW THAT LOGGING IS */ -/* DISABLED. WE STILL HAVE TO CATER FOR DIRTY OPERATION OR NOT. */ -/*************************************************************************>*/ - tcConnectptr.p->abortState = TcConnectionrec::ABORT_IDLE; - rwConcludedAiLab(signal); - return; - } else { - ndbrequire(tcConnectptr.p->currTupAiLen < tcConnectptr.p->totReclenAi); - jam(); - return; /* STILL WAITING FOR MORE ATTRIBUTE INFO */ - }//if - }//if - } else { - jam(); -/*************************************************************************>*/ -/* AFTER THIS ERROR THE ABORT MUST BE COMPLETED. TO ENSURE THIS SET */ -/* ACTIVE CREATION TO ABORT. THIS WILL ENSURE THAT THE ABORT IS */ -/* COMPLETED AND THAT THE ERROR CODE IS PROPERLY SET */ -/*************************************************************************>*/ - tcConnectptr.p->errorCode = terrorCode; - tcConnectptr.p->activeCreat = Fragrecord::AC_NORMAL; - if (tcConnectptr.p->transactionState == - TcConnectionrec::WAIT_AI_AFTER_ABORT) { - jam(); -/*************************************************************************>*/ -/* ABORT IS ALREADY COMPLETED. WE NEED TO RESTART IT FROM WHERE IT */ -/* WAS INTERRUPTED. */ -/*************************************************************************>*/ - continueAbortLab(signal); - return; - } else { - jam(); - return; -/*************************************************************************>*/ -// Abort is ongoing. It will complete since we set the activeCreat = ZFALSE -/*************************************************************************>*/ - }//if - }//if - }//if - }//if -/*************************************************************************>*/ -/* TRANSACTION HAVE BEEN ABORTED. THUS IGNORE ALL SIGNALS BELONGING TO IT. */ -/*************************************************************************>*/ - return; -}//Dblqh::aiStateErrorCheckLab() - -void Dblqh::takeOverErrorLab(Signal* signal) -{ - terrorCode = ZTAKE_OVER_ERROR; - abortErrorLab(signal); - return; -}//Dblqh::takeOverErrorLab() - -/* ########################################################################## - * TEST MODULE - * ######################################################################### */ -#ifdef VM_TRACE -void Dblqh::execTESTSIG(Signal* signal) -{ - jamEntry(); - Uint32 userpointer = signal->theData[0]; - BlockReference userblockref = signal->theData[1]; - Uint32 testcase = signal->theData[2]; - - signal->theData[0] = userpointer; - signal->theData[1] = cownref; - signal->theData[2] = testcase; - sendSignal(userblockref, GSN_TESTSIG, signal, 25, JBB); - return; -}//Dblqh::execTESTSIG() - -/* *************** */ -/* MEMCHECKREQ > */ -/* *************** */ -/* ************************************************************************>> - * THIS SIGNAL IS PURELY FOR TESTING PURPOSES. IT CHECKS THE FREE LIST - * AND REPORTS THE NUMBER OF FREE RECORDS. - * THIS CAN BE DONE TO ENSURE THAT NO RECORDS HAS BEEN LOST - * ************************************************************************> */ -void Dblqh::execMEMCHECKREQ(Signal* signal) -{ - Uint32* dataPtr = &signal->theData[0]; - jamEntry(); - BlockReference userblockref = signal->theData[0]; - Uint32 index = 0; - for (Uint32 i = 0; i < 7; i++) - dataPtr[i] = 0; - addfragptr.i = cfirstfreeAddfragrec; - while (addfragptr.i != RNIL) { - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - addfragptr.i = addfragptr.p->nextAddfragrec; - dataPtr[index]++; - }//while - index++; - attrinbufptr.i = cfirstfreeAttrinbuf; - while (attrinbufptr.i != RNIL) { - ptrCheckGuard(attrinbufptr, cattrinbufFileSize, attrbuf); - attrinbufptr.i = attrinbufptr.p->attrbuf[ZINBUF_NEXT]; - dataPtr[index]++; - }//while - index++; - databufptr.i = cfirstfreeDatabuf; - while (databufptr.i != RNIL) { - ptrCheckGuard(databufptr, cdatabufFileSize, databuf); - databufptr.i = databufptr.p->nextDatabuf; - dataPtr[index]++; - }//while - index++; - for (tabptr.i = 0; - tabptr.i < ctabrecFileSize; - tabptr.i++) { - ptrAss(tabptr, tablerec); - if (tabptr.p->tableStatus == Tablerec::NOT_DEFINED) { - dataPtr[index]++; - }//if - }//for - index++; - tcConnectptr.i = cfirstfreeTcConrec; - while (tcConnectptr.i != RNIL) { - ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - tcConnectptr.i = tcConnectptr.p->nextTcConnectrec; - dataPtr[index]++; - }//while - sendSignal(userblockref, GSN_MEMCHECKCONF, signal, 10, JBB); - return; -}//Dblqh::execMEMCHECKREQ() - -#endif - -/* ************************************************************************* */ -/* ************************* STATEMENT BLOCKS ****************************** */ -/* ************************************************************************* */ -/* ========================================================================= */ -/* ====== BUILD LINKED LIST OF LOG PAGES AFTER RECEIVING FSREADCONF ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::buildLinkedLogPageList(Signal* signal) -{ - LogPageRecordPtr bllLogPagePtr; - - arrGuard(lfoPtr.p->noPagesRw - 1, 16); - arrGuard(lfoPtr.p->noPagesRw, 16); - for (UintR tbllIndex = 0; tbllIndex < lfoPtr.p->noPagesRw; tbllIndex++) { - jam(); - /* ---------------------------------------------------------------------- - * BUILD LINKED LIST BUT ALSO ENSURE THAT PAGE IS NOT SEEN AS DIRTY - * INITIALLY. - * --------------------------------------------------------------------- */ - bllLogPagePtr.i = lfoPtr.p->logPageArray[tbllIndex]; - ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord); - -// #if VM_TRACE -// // Check logPage checksum before modifying it -// Uint32 calcCheckSum = calcPageCheckSum(bllLogPagePtr); -// Uint32 checkSum = bllLogPagePtr.p->logPageWord[ZPOS_CHECKSUM]; -// if (checkSum != calcCheckSum) { -// ndbout << "Redolog: Checksum failure." << endl; -// progError(__LINE__, NDBD_EXIT_NDBREQUIRE, "Redolog: Checksum failure."); -// } -// #endif - - bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] = - lfoPtr.p->logPageArray[tbllIndex + 1]; - bllLogPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY; - }//for - bllLogPagePtr.i = lfoPtr.p->logPageArray[lfoPtr.p->noPagesRw - 1]; - ptrCheckGuard(bllLogPagePtr, clogPageFileSize, logPageRecord); - bllLogPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL; -}//Dblqh::buildLinkedLogPageList() - -/* ========================================================================= - * ======= CHANGE TO NEXT MBYTE IN LOG ======= - * - * ========================================================================= */ -void Dblqh::changeMbyte(Signal* signal) -{ - writeNextLog(signal); - writeFileDescriptor(signal); -}//Dblqh::changeMbyte() - -/* ========================================================================= */ -/* ====== CHECK IF THIS COMMIT LOG RECORD IS TO BE EXECUTED ======= */ -/* */ -/* SUBROUTINE SHORT NAME = CEL */ -/* ========================================================================= */ -Uint32 Dblqh::checkIfExecLog(Signal* signal) -{ - tabptr.i = tcConnectptr.p->tableref; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - if (getFragmentrec(signal, tcConnectptr.p->fragmentid) && - (table_version_major(tabptr.p->schemaVersion) == table_version_major(tcConnectptr.p->schemaVersion))) { - if (fragptr.p->execSrStatus != Fragrecord::IDLE) { - if (fragptr.p->execSrNoReplicas > logPartPtr.p->execSrExecuteIndex) { - ndbrequire((fragptr.p->execSrNoReplicas - 1) < 4); - for (Uint32 i = logPartPtr.p->execSrExecuteIndex; - i < fragptr.p->execSrNoReplicas; - i++) { - jam(); - if (tcConnectptr.p->gci >= fragptr.p->execSrStartGci[i]) { - if (tcConnectptr.p->gci <= fragptr.p->execSrLastGci[i]) { - jam(); - logPartPtr.p->execSrExecuteIndex = i; - return ZOK; - }//if - }//if - }//for - }//if - }//if - }//if - return ZNOT_OK; -}//Dblqh::checkIfExecLog() - -/* ========================================================================= */ -/* == CHECK IF THERE IS LESS THAN 192 KBYTE IN THE BUFFER PLUS INCOMING === */ -/* READS ALREADY STARTED. IF SO IS THE CASE THEN START ANOTHER READ IF */ -/* THERE ARE MORE PAGES IN THIS MBYTE. */ -/* */ -/* ========================================================================= */ -void Dblqh::checkReadExecSr(Signal* signal) -{ - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG; - logPartPtr.p->execSrPagesRead = logPartPtr.p->execSrPagesRead + 8; - logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading - 8; - if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesReading) < - ZREAD_AHEAD_SIZE) { - jam(); - /* ---------------------------------------------------------------------- - * WE HAVE LESS THAN 64 KBYTE OF LOG PAGES REMAINING IN MEMORY OR ON - * ITS WAY TO MAIN MEMORY. READ IN 8 MORE PAGES. - * --------------------------------------------------------------------- */ - if ((logPartPtr.p->execSrPagesRead + logPartPtr.p->execSrPagesExecuted) < - ZPAGES_IN_MBYTE) { - jam(); - /* -------------------------------------------------------------------- - * THERE ARE MORE PAGES TO READ IN THIS MBYTE. READ THOSE FIRST - * IF >= ZPAGES_IN_MBYTE THEN THERE ARE NO MORE PAGES TO READ. THUS - * WE PROCEED WITH EXECUTION OF THE LOG. - * ------------------------------------------------------------------- */ - readExecSr(signal); - logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR; - }//if - }//if -}//Dblqh::checkReadExecSr() - -/* ========================================================================= */ -/* ==== CHECK IF START OF NEW FRAGMENT IS COMPLETED AND WE CAN ======= */ -/* ==== GET THE START GCI ======= */ -/* */ -/* SUBROUTINE SHORT NAME = CTC */ -/* ========================================================================= */ -void Dblqh::checkScanTcCompleted(Signal* signal) -{ - tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED; - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - fragptr.p->activeTcCounter = fragptr.p->activeTcCounter - 1; - if (fragptr.p->activeTcCounter == 0) { - jam(); - fragptr.p->startGci = cnewestGci + 1; - tabptr.i = tcConnectptr.p->tableref; - ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); - sendCopyActiveConf(signal, tcConnectptr.p->tableref); - }//if -}//Dblqh::checkScanTcCompleted() - -/* ------------------------------------------------------------------------- */ -/* ------ CLOSE A FILE DURING EXECUTION OF FRAGMENT LOG ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::closeFile(Signal* signal, - LogFileRecordPtr clfLogFilePtr, Uint32 line) -{ - signal->theData[0] = clfLogFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = clfLogFilePtr.i; - signal->theData[3] = ZCLOSE_NO_DELETE; - signal->theData[4] = line; - sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 5, JBA); -}//Dblqh::closeFile() - - -/* ---------------------------------------------------------------- */ -/* ---------------- A LOG PAGE HAVE BEEN COMPLETED ---------------- */ -/* */ -/* SUBROUTINE SHORT NAME = CLP */ -// Input Pointers: -// logFilePtr -// logPagePtr -// logPartPtr -// Defines lfoPtr -/* ---------------------------------------------------------------- */ -void Dblqh::completedLogPage(Signal* signal, Uint32 clpType, Uint32 place) -{ - LogPageRecordPtr clpLogPagePtr; - LogPageRecordPtr wlpLogPagePtr; - UintR twlpNoPages; - UintR twlpType; - - if (logFilePtr.p->firstFilledPage == RNIL) { - jam(); - logFilePtr.p->firstFilledPage = logPagePtr.i; - } else { - jam(); - clpLogPagePtr.i = logFilePtr.p->lastFilledPage; - ptrCheckGuard(clpLogPagePtr, clogPageFileSize, logPageRecord); - clpLogPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i; - }//if - logFilePtr.p->lastFilledPage = logPagePtr.i; - logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL; - logFilePtr.p->noLogpagesInBuffer = logFilePtr.p->noLogpagesInBuffer + 1; - if (logFilePtr.p->noLogpagesInBuffer != ZMAX_PAGES_WRITTEN) { - if (clpType != ZLAST_WRITE_IN_FILE) { - if (clpType != ZENFORCE_WRITE) { - jam(); - return; - }//if - }//if - }//if - twlpType = clpType; -/* ------------------------------------------------------------------------- */ -/* ------ WRITE A SET OF LOG PAGES TO DISK ------- */ -/* */ -/* SUBROUTINE SHORT NAME: WLP */ -/* ------------------------------------------------------------------------- */ - seizeLfo(signal); - initLfo(signal); - Uint32* dataPtr = &signal->theData[6]; - twlpNoPages = 0; - wlpLogPagePtr.i = logFilePtr.p->firstFilledPage; - do { - dataPtr[twlpNoPages] = wlpLogPagePtr.i; - twlpNoPages++; - ptrCheckGuard(wlpLogPagePtr, clogPageFileSize, logPageRecord); - - writeDbgInfoPageHeader(wlpLogPagePtr, place, - logFilePtr.p->filePosition + twlpNoPages - 1, - ZPAGE_SIZE); - // Calculate checksum for page - wlpLogPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(wlpLogPagePtr); - wlpLogPagePtr.i = wlpLogPagePtr.p->logPageWord[ZNEXT_PAGE]; - } while (wlpLogPagePtr.i != RNIL); - ndbrequire(twlpNoPages < 9); - dataPtr[twlpNoPages] = logFilePtr.p->filePosition; -/* -------------------------------------------------- */ -/* SET TIMER ON THIS LOG PART TO SIGNIFY THAT A */ -/* LOG RECORD HAS BEEN SENT AT THIS TIME. */ -/* -------------------------------------------------- */ - logPartPtr.p->logPartTimer = logPartPtr.p->logTimer; - signal->theData[0] = logFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = lfoPtr.i; - if (twlpType == ZLAST_WRITE_IN_FILE) { - jam(); - signal->theData[3] = ZLIST_OF_MEM_PAGES_SYNCH; - } else { - jam(); - signal->theData[3] = ZLIST_OF_MEM_PAGES; - }//if - signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD; - signal->theData[5] = twlpNoPages; - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA); - - if (DEBUG_REDO) - ndbout_c("writing %d pages at part: %u file: %u pos: %u", - twlpNoPages, - logPartPtr.i, - logFilePtr.p->fileNo, - logFilePtr.p->filePosition); - - if (twlpType == ZNORMAL) { - jam(); - lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG; - } else if (twlpType == ZLAST_WRITE_IN_FILE) { - jam(); - lfoPtr.p->lfoState = LogFileOperationRecord::LAST_WRITE_IN_FILE; - } else { - ndbrequire(twlpType == ZENFORCE_WRITE); - jam(); - lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG; - }//if - /* ----------------------------------------------------------------------- */ - /* ------ MOVE PAGES FROM LOG FILE TO LFO RECORD ------- */ - /* */ - /* ----------------------------------------------------------------------- */ - /* -------------------------------------------------- */ - /* MOVE PAGES TO LFO RECORD AND REMOVE THEM */ - /* FROM LOG FILE RECORD. */ - /* -------------------------------------------------- */ - lfoPtr.p->firstLfoPage = logFilePtr.p->firstFilledPage; - logFilePtr.p->firstFilledPage = RNIL; - logFilePtr.p->lastFilledPage = RNIL; - logFilePtr.p->noLogpagesInBuffer = 0; - - lfoPtr.p->noPagesRw = twlpNoPages; - lfoPtr.p->lfoPageNo = logFilePtr.p->filePosition; - lfoPtr.p->lfoWordWritten = ZPAGE_SIZE - 1; - logFilePtr.p->filePosition += twlpNoPages; -}//Dblqh::completedLogPage() - -/* ---------------------------------------------------------------- */ -/* ---------------- DELETE FRAGMENT RECORD ------------------------ */ -/* */ -/* SUBROUTINE SHORT NAME = DFR */ -/* ---------------------------------------------------------------- */ -void Dblqh::deleteFragrec(Uint32 fragId) -{ - Uint32 indexFound= RNIL; - fragptr.i = RNIL; - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragid[i] == fragId) { - fragptr.i = tabptr.p->fragrec[i]; - indexFound = i; - break; - }//if - }//for - if (fragptr.i != RNIL) { - jam(); - c_fragment_pool.getPtr(fragptr); - tabptr.p->fragid[indexFound] = ZNIL; - tabptr.p->fragrec[indexFound] = RNIL; - fragptr.p->fragStatus = Fragrecord::FREE; - c_fragment_pool.release(fragptr); - }//if -}//Dblqh::deleteFragrec() - -/* ------------------------------------------------------------------------- */ -/* ------- FIND LOG FILE RECORD GIVEN FILE NUMBER ------- */ -/* */ -/* INPUT: TFLF_FILE_NO THE FILE NUMBER */ -/* FLF_LOG_PART_PTR THE LOG PART RECORD */ -/* OUTPUT: FLF_LOG_FILE_PTR THE FOUND LOG FILE RECORD */ -/* SUBROUTINE SHORT NAME = FLF */ -/* ------------------------------------------------------------------------- */ -void Dblqh::findLogfile(Signal* signal, - Uint32 fileNo, - LogPartRecordPtr flfLogPartPtr, - LogFileRecordPtr* parLogFilePtr) -{ - LogFileRecordPtr locLogFilePtr; - locLogFilePtr.i = flfLogPartPtr.p->firstLogfile; - Uint32 loopCount = 0; - while (true) { - ptrCheckGuard(locLogFilePtr, clogFileFileSize, logFileRecord); - if (locLogFilePtr.p->fileNo == fileNo) { - jam(); - ndbrequire(loopCount == fileNo); - parLogFilePtr->i = locLogFilePtr.i; - parLogFilePtr->p = locLogFilePtr.p; - return; - }//if - locLogFilePtr.i = locLogFilePtr.p->nextLogFile; - loopCount++; - if (loopCount >= flfLogPartPtr.p->noLogFiles && - getNodeState().startLevel != NodeState::SL_STARTED) - { - goto error; - } - ndbrequire(loopCount < flfLogPartPtr.p->noLogFiles); - }//while - -error: - char buf[255]; - BaseString::snprintf(buf, sizeof(buf), - "Unable to restart, failed while reading redo." - " Likely invalid change of configuration"); - progError(__LINE__, - NDBD_EXIT_INVALID_CONFIG, - buf); -}//Dblqh::findLogfile() - -/* ------------------------------------------------------------------------- */ -/* ------ FIND PAGE REFERENCE IN MEMORY BUFFER AT LOG EXECUTION ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::findPageRef(Signal* signal, CommitLogRecord* commitLogRecord) -{ - UintR tfprIndex; - - logPagePtr.i = RNIL; - if (ERROR_INSERTED(5020)) { - // Force system to read page from disk - return; - } - pageRefPtr.i = logPartPtr.p->lastPageRef; - do { - ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord); - if (commitLogRecord->fileNo == pageRefPtr.p->prFileNo) { - if (commitLogRecord->startPageNo >= pageRefPtr.p->prPageNo) { - if (commitLogRecord->startPageNo < (Uint16) (pageRefPtr.p->prPageNo + 8)) { - jam(); - tfprIndex = commitLogRecord->startPageNo - pageRefPtr.p->prPageNo; - logPagePtr.i = pageRefPtr.p->pageRef[tfprIndex]; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - return; - }//if - }//if - }//if - pageRefPtr.i = pageRefPtr.p->prPrev; - } while (pageRefPtr.i != RNIL); -}//Dblqh::findPageRef() - -/* ------------------------------------------------------------------------- */ -/* ------ GET FIRST OPERATION QUEUED FOR LOGGING ------- */ -/* */ -/* SUBROUTINE SHORT NAME = GFL */ -/* ------------------------------------------------------------------------- */ -void Dblqh::getFirstInLogQueue(Signal* signal) -{ - TcConnectionrecPtr gflTcConnectptr; -/* -------------------------------------------------- */ -/* GET THE FIRST FROM THE LOG QUEUE AND REMOVE */ -/* IT FROM THE QUEUE. */ -/* -------------------------------------------------- */ - gflTcConnectptr.i = logPartPtr.p->firstLogQueue; - ptrCheckGuard(gflTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - logPartPtr.p->firstLogQueue = gflTcConnectptr.p->nextTcLogQueue; - if (logPartPtr.p->firstLogQueue == RNIL) { - jam(); - logPartPtr.p->lastLogQueue = RNIL; - }//if -}//Dblqh::getFirstInLogQueue() - -/* ---------------------------------------------------------------- */ -/* ---------------- GET FRAGMENT RECORD --------------------------- */ -/* INPUT: TFRAGID FRAGMENT ID LOOKING FOR */ -/* TABPTR TABLE ID */ -/* SUBROUTINE SHORT NAME = GFR */ -/* ---------------------------------------------------------------- */ -bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragid[i] == fragId) { - fragptr.i = tabptr.p->fragrec[i]; - c_fragment_pool.getPtr(fragptr); - return true; - }//if - }//for - return false; -}//Dblqh::getFragmentrec() - -/* ========================================================================= */ -/* ====== INITIATE FRAGMENT RECORD ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseAddfragrec(Signal* signal) -{ - if (caddfragrecFileSize != 0) { - for (addfragptr.i = 0; addfragptr.i < caddfragrecFileSize; addfragptr.i++) { - ptrAss(addfragptr, addFragRecord); - addfragptr.p->addfragStatus = AddFragRecord::FREE; - addfragptr.p->nextAddfragrec = addfragptr.i + 1; - }//for - addfragptr.i = caddfragrecFileSize - 1; - ptrAss(addfragptr, addFragRecord); - addfragptr.p->nextAddfragrec = RNIL; - cfirstfreeAddfragrec = 0; - } else { - jam(); - cfirstfreeAddfragrec = RNIL; - }//if -}//Dblqh::initialiseAddfragrec() - -/* ========================================================================= */ -/* ====== INITIATE ATTRIBUTE IN AND OUT DATA BUFFER ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseAttrbuf(Signal* signal) -{ - if (cattrinbufFileSize != 0) { - for (attrinbufptr.i = 0; - attrinbufptr.i < cattrinbufFileSize; - attrinbufptr.i++) { - refresh_watch_dog(); - ptrAss(attrinbufptr, attrbuf); - attrinbufptr.p->attrbuf[ZINBUF_NEXT] = attrinbufptr.i + 1; - }//for - /* NEXT ATTRINBUF */ - attrinbufptr.i = cattrinbufFileSize - 1; - ptrAss(attrinbufptr, attrbuf); - attrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRINBUF */ - cfirstfreeAttrinbuf = 0; - } else { - jam(); - cfirstfreeAttrinbuf = RNIL; - }//if -}//Dblqh::initialiseAttrbuf() - -/* ========================================================================= */ -/* ====== INITIATE DATA BUFFER ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseDatabuf(Signal* signal) -{ - if (cdatabufFileSize != 0) { - for (databufptr.i = 0; databufptr.i < cdatabufFileSize; databufptr.i++) { - refresh_watch_dog(); - ptrAss(databufptr, databuf); - databufptr.p->nextDatabuf = databufptr.i + 1; - }//for - databufptr.i = cdatabufFileSize - 1; - ptrAss(databufptr, databuf); - databufptr.p->nextDatabuf = RNIL; - cfirstfreeDatabuf = 0; - } else { - jam(); - cfirstfreeDatabuf = RNIL; - }//if -}//Dblqh::initialiseDatabuf() - -/* ========================================================================= */ -/* ====== INITIATE FRAGMENT RECORD ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseFragrec(Signal* signal) -{ - - SLList tmp(c_fragment_pool); - while(tmp.seize(fragptr)) - { - refresh_watch_dog(); - new (fragptr.p) Fragrecord(); - fragptr.p->fragStatus = Fragrecord::FREE; - fragptr.p->execSrStatus = Fragrecord::IDLE; - fragptr.p->srStatus = Fragrecord::SS_IDLE; - } - tmp.release(); -}//Dblqh::initialiseFragrec() - -/* ========================================================================= */ -/* ====== INITIATE FRAGMENT RECORD ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseGcprec(Signal* signal) -{ - UintR tigpIndex; - - if (cgcprecFileSize != 0) { - for (gcpPtr.i = 0; gcpPtr.i < cgcprecFileSize; gcpPtr.i++) { - ptrAss(gcpPtr, gcpRecord); - for (tigpIndex = 0; tigpIndex <= 3; tigpIndex++) { - gcpPtr.p->gcpLogPartState[tigpIndex] = ZIDLE; - gcpPtr.p->gcpSyncReady[tigpIndex] = ZFALSE; - }//for - }//for - }//if -}//Dblqh::initialiseGcprec() - -/* ========================================================================= */ -/* ====== INITIATE LCP RECORD ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseLcpRec(Signal* signal) -{ - if (clcpFileSize != 0) { - for (lcpPtr.i = 0; lcpPtr.i < clcpFileSize; lcpPtr.i++) { - ptrAss(lcpPtr, lcpRecord); - lcpPtr.p->lcpState = LcpRecord::LCP_IDLE; - lcpPtr.p->lcpQueued = false; - lcpPtr.p->reportEmpty = false; - lcpPtr.p->firstFragmentFlag = false; - lcpPtr.p->lastFragmentFlag = false; - }//for - }//if -}//Dblqh::initialiseLcpRec() - -/* ========================================================================= */ -/* ====== INITIATE LOG FILE OPERATION RECORD ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseLfo(Signal* signal) -{ - if (clfoFileSize != 0) { - for (lfoPtr.i = 0; lfoPtr.i < clfoFileSize; lfoPtr.i++) { - ptrAss(lfoPtr, logFileOperationRecord); - lfoPtr.p->lfoState = LogFileOperationRecord::IDLE; - lfoPtr.p->lfoTimer = 0; - lfoPtr.p->nextLfo = lfoPtr.i + 1; - }//for - lfoPtr.i = clfoFileSize - 1; - ptrAss(lfoPtr, logFileOperationRecord); - lfoPtr.p->nextLfo = RNIL; - cfirstfreeLfo = 0; - } else { - jam(); - cfirstfreeLfo = RNIL; - }//if -}//Dblqh::initialiseLfo() - -/* ========================================================================= */ -/* ====== INITIATE LOG FILE RECORD ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseLogFile(Signal* signal) -{ - if (clogFileFileSize != 0) { - for (logFilePtr.i = 0; logFilePtr.i < clogFileFileSize; logFilePtr.i++) { - ptrAss(logFilePtr, logFileRecord); - logFilePtr.p->nextLogFile = logFilePtr.i + 1; - logFilePtr.p->logFileStatus = LogFileRecord::LFS_IDLE; - - logFilePtr.p->logLastPrepRef = new Uint32[clogFileSize]; - logFilePtr.p->logMaxGciCompleted = new Uint32[clogFileSize]; - logFilePtr.p->logMaxGciStarted = new Uint32[clogFileSize]; - - if (logFilePtr.p->logLastPrepRef == 0 || - logFilePtr.p->logMaxGciCompleted == 0 || - logFilePtr.p->logMaxGciStarted == 0) - { - char buf[256]; - BaseString::snprintf(buf, sizeof(buf), - "Failed to alloc mbyte(%u) arrays for logfile %u", - clogFileSize, logFilePtr.i); - progError(__LINE__, NDBD_EXIT_MEMALLOC, buf); - } - - }//for - logFilePtr.i = clogFileFileSize - 1; - ptrAss(logFilePtr, logFileRecord); - logFilePtr.p->nextLogFile = RNIL; - cfirstfreeLogFile = 0; - } else { - jam(); - cfirstfreeLogFile = RNIL; - }//if -}//Dblqh::initialiseLogFile() - -/* ========================================================================= */ -/* ====== INITIATE LOG PAGES ======= */ -/* */ -/* ========================================================================= */ -void Dblqh::initialiseLogPage(Signal* signal) -{ - if (clogPageFileSize != 0) { - for (logPagePtr.i = 0; logPagePtr.i < clogPageFileSize; logPagePtr.i++) { - refresh_watch_dog(); - ptrAss(logPagePtr, logPageRecord); - logPagePtr.p->logPageWord[ZNEXT_PAGE] = logPagePtr.i + 1; - logPagePtr.p->logPageWord[ZPOS_IN_FREE_LIST]= 1; - logPagePtr.p->logPageWord[ZPOS_IN_WRITING]= 0; - }//for - logPagePtr.i = clogPageFileSize - 1; - ptrAss(logPagePtr, logPageRecord); - logPagePtr.p->logPageWord[ZNEXT_PAGE] = RNIL; - cfirstfreeLogPage = 0; - } else { - jam(); - cfirstfreeLogPage = RNIL; - }//if - cnoOfLogPages = clogPageFileSize; -}//Dblqh::initialiseLogPage() - -/* ========================================================================= - * ====== INITIATE LOG PART RECORD ======= - * - * ========================================================================= */ -void Dblqh::initialiseLogPart(Signal* signal) -{ - for (logPartPtr.i = 0; logPartPtr.i <= 3; logPartPtr.i++) { - ptrAss(logPartPtr, logPartRecord); - logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE; - logPartPtr.p->LogLqhKeyReqSent = ZFALSE; - logPartPtr.p->logPartNewestCompletedGCI = (UintR)-1; - }//for -}//Dblqh::initialiseLogPart() - -void Dblqh::initialisePageRef(Signal* signal) -{ - if (cpageRefFileSize != 0) { - for (pageRefPtr.i = 0; - pageRefPtr.i < cpageRefFileSize; - pageRefPtr.i++) { - ptrAss(pageRefPtr, pageRefRecord); - pageRefPtr.p->prNext = pageRefPtr.i + 1; - }//for - pageRefPtr.i = cpageRefFileSize - 1; - ptrAss(pageRefPtr, pageRefRecord); - pageRefPtr.p->prNext = RNIL; - cfirstfreePageRef = 0; - } else { - jam(); - cfirstfreePageRef = RNIL; - }//if -}//Dblqh::initialisePageRef() - -/* ========================================================================== - * ======= INITIATE RECORDS ======= - * - * TAKES CARE OF INITIATION OF ALL RECORDS IN THIS BLOCK. - * ========================================================================= */ -void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data, - Uint32 retRef, Uint32 retData) -{ - Uint32 i; - switch (data) { - case 0: - jam(); - m_sr_nodes.clear(); - m_sr_exec_sr_req.clear(); - m_sr_exec_sr_conf.clear(); - for (i = 0; i < 1024; i++) { - ctransidHash[i] = RNIL; - }//for - for (i = 0; i < 4; i++) { - cactiveCopy[i] = RNIL; - }//for - cnoActiveCopy = 0; - ccurrentGcprec = RNIL; - caddNodeState = ZFALSE; - cstartRecReq = 0; - cnewestGci = 0; - cnewestCompletedGci = 0; - crestartOldestGci = 0; - crestartNewestGci = 0; - csrPhaseStarted = ZSR_NO_PHASE_STARTED; - csrPhasesCompleted = 0; - cmasterDihBlockref = 0; - cnoFragmentsExecSr = 0; - clcpCompletedState = LCP_IDLE; - csrExecUndoLogState = EULS_IDLE; - c_lcpId = 0; - cnoOfFragsCheckpointed = 0; - break; - case 1: - jam(); - initialiseAddfragrec(signal); - break; - case 2: - jam(); - initialiseAttrbuf(signal); - break; - case 3: - jam(); - initialiseDatabuf(signal); - break; - case 4: - jam(); - initialiseFragrec(signal); - break; - case 5: - jam(); - initialiseGcprec(signal); - initialiseLcpRec(signal); - break; - case 6: - jam(); - initialiseLogPage(signal); - break; - case 7: - jam(); - initialiseLfo(signal); - break; - case 8: - jam(); - initialiseLogFile(signal); - initialiseLogPart(signal); - break; - case 9: - jam(); - initialisePageRef(signal); - break; - case 10: - jam(); - initialiseScanrec(signal); - break; - case 11: - jam(); - initialiseTabrec(signal); - break; - case 12: - jam(); - initialiseTcNodeFailRec(signal); - initialiseTcrec(signal); - { - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = retData; - sendSignal(retRef, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); - } - return; - break; - default: - ndbrequire(false); - break; - }//switch - - signal->theData[0] = ZINITIALISE_RECORDS; - signal->theData[1] = data + 1; - signal->theData[2] = 0; - signal->theData[3] = retRef; - signal->theData[4] = retData; - sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 5, JBB); - - return; -}//Dblqh::initialiseRecordsLab() - -/* ========================================================================== - * ======= INITIATE TC CONNECTION RECORD ======= - * - * ========================================================================= */ -void Dblqh::initialiseScanrec(Signal* signal) -{ - ndbrequire(cscanrecFileSize > 1); - DLList tmp(c_scanRecordPool); - while (tmp.seize(scanptr)){ - //new (scanptr.p) ScanRecord(); - refresh_watch_dog(); - scanptr.p->scanType = ScanRecord::ST_IDLE; - scanptr.p->scanState = ScanRecord::SCAN_FREE; - scanptr.p->scanTcWaiting = ZFALSE; - scanptr.p->nextHash = RNIL; - scanptr.p->prevHash = RNIL; - scanptr.p->scan_acc_index= 0; - scanptr.p->scan_acc_attr_recs= 0; - } - tmp.release(); -}//Dblqh::initialiseScanrec() - -/* ========================================================================== - * ======= INITIATE TABLE RECORD ======= - * - * ========================================================================= */ -void Dblqh::initialiseTabrec(Signal* signal) -{ - if (ctabrecFileSize != 0) { - for (tabptr.i = 0; tabptr.i < ctabrecFileSize; tabptr.i++) { - refresh_watch_dog(); - ptrAss(tabptr, tablerec); - tabptr.p->tableStatus = Tablerec::NOT_DEFINED; - tabptr.p->usageCount = 0; - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - tabptr.p->fragid[i] = ZNIL; - tabptr.p->fragrec[i] = RNIL; - }//for - }//for - }//if -}//Dblqh::initialiseTabrec() - -/* ========================================================================== - * ======= INITIATE TC CONNECTION RECORD ======= - * - * ========================================================================= */ -void Dblqh::initialiseTcrec(Signal* signal) -{ - if (ctcConnectrecFileSize != 0) { - for (tcConnectptr.i = 0; - tcConnectptr.i < ctcConnectrecFileSize; - tcConnectptr.i++) { - refresh_watch_dog(); - ptrAss(tcConnectptr, tcConnectionrec); - tcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED; - tcConnectptr.p->tcScanRec = RNIL; - tcConnectptr.p->logWriteState = TcConnectionrec::NOT_STARTED; - tcConnectptr.p->firstAttrinbuf = RNIL; - tcConnectptr.p->lastAttrinbuf = RNIL; - tcConnectptr.p->firstTupkeybuf = RNIL; - tcConnectptr.p->lastTupkeybuf = RNIL; - tcConnectptr.p->tcTimer = 0; - tcConnectptr.p->nextTcConnectrec = tcConnectptr.i + 1; - }//for - tcConnectptr.i = ctcConnectrecFileSize - 1; - ptrAss(tcConnectptr, tcConnectionrec); - tcConnectptr.p->nextTcConnectrec = RNIL; - cfirstfreeTcConrec = 0; - } else { - jam(); - cfirstfreeTcConrec = RNIL; - }//if -}//Dblqh::initialiseTcrec() - -/* ========================================================================== - * ======= INITIATE TC CONNECTION RECORD ======= - * - * ========================================================================= */ -void Dblqh::initialiseTcNodeFailRec(Signal* signal) -{ - if (ctcNodeFailrecFileSize != 0) { - for (tcNodeFailptr.i = 0; - tcNodeFailptr.i < ctcNodeFailrecFileSize; - tcNodeFailptr.i++) { - ptrAss(tcNodeFailptr, tcNodeFailRecord); - tcNodeFailptr.p->tcFailStatus = TcNodeFailRecord::TC_STATE_FALSE; - }//for - }//if -}//Dblqh::initialiseTcNodeFailRec() - -/* ========================================================================== - * ======= INITIATE FRAGMENT RECORD ======= - * - * SUBROUTINE SHORT NAME = IF - * ========================================================================= */ -void Dblqh::initFragrec(Signal* signal, - Uint32 tableId, - Uint32 fragId, - Uint32 copyType) -{ - new (fragptr.p) Fragrecord(); - fragptr.p->m_scanNumberMask.set(); // All is free - fragptr.p->accBlockref = caccBlockref; - fragptr.p->firstWaitQueue = RNIL; - fragptr.p->lastWaitQueue = RNIL; - fragptr.p->fragStatus = Fragrecord::DEFINED; - fragptr.p->fragCopy = copyType; - fragptr.p->tupBlockref = ctupBlockref; - fragptr.p->tuxBlockref = ctuxBlockref; - fragptr.p->logFlag = Fragrecord::STATE_TRUE; - fragptr.p->lcpFlag = Fragrecord::LCP_STATE_TRUE; - for (Uint32 i = 0; i < MAX_LCP_STORED; i++) { - fragptr.p->lcpId[i] = 0; - }//for - fragptr.p->maxGciCompletedInLcp = 0; - fragptr.p->maxGciInLcp = 0; - fragptr.p->copyFragState = ZIDLE; - fragptr.p->newestGci = cnewestGci; - fragptr.p->tabRef = tableId; - fragptr.p->fragId = fragId; - fragptr.p->srStatus = Fragrecord::SS_IDLE; - fragptr.p->execSrStatus = Fragrecord::IDLE; - fragptr.p->execSrNoReplicas = 0; - fragptr.p->fragDistributionKey = 0; - fragptr.p->activeTcCounter = 0; - fragptr.p->tableFragptr = RNIL; -}//Dblqh::initFragrec() - -/* ========================================================================== - * ======= INITIATE FRAGMENT RECORD FOR SYSTEM RESTART ======= - * - * SUBROUTINE SHORT NAME = IFS - * ========================================================================= */ - -/* ========================================================================== - * ======= INITIATE INFORMATION ABOUT GLOBAL CHECKPOINTS ======= - * IN LOG FILE RECORDS - * - * INPUT: LOG_FILE_PTR CURRENT LOG FILE - * TNO_FD_DESCRIPTORS THE NUMBER OF FILE DESCRIPTORS - * TO READ FROM THE LOG PAGE - * LOG_PAGE_PTR PAGE ZERO IN LOG FILE - * SUBROUTINE SHORT NAME = IGL - * ========================================================================= */ -void Dblqh::initGciInLogFileRec(Signal* signal, Uint32 noFdDescriptors) -{ - LogFileRecordPtr filePtr = logFilePtr; - Uint32 pos = ZPAGE_HEADER_SIZE + ZFD_HEADER_SIZE; - for (Uint32 fd = 0; fd < noFdDescriptors; fd++) - { - jam(); - for (Uint32 mb = 0; mb < clogFileSize; mb++) - { - jam(); - Uint32 pos0 = pos + fd * (ZFD_MBYTE_SIZE * clogFileSize) + mb; - Uint32 pos1 = pos0 + clogFileSize; - Uint32 pos2 = pos1 + clogFileSize; - arrGuard(pos0, ZPAGE_SIZE); - arrGuard(pos1, ZPAGE_SIZE); - arrGuard(pos2, ZPAGE_SIZE); - filePtr.p->logMaxGciCompleted[mb] = logPagePtr.p->logPageWord[pos0]; - filePtr.p->logMaxGciStarted[mb] = logPagePtr.p->logPageWord[pos1]; - filePtr.p->logLastPrepRef[mb] = logPagePtr.p->logPageWord[pos2]; - } - if (fd + 1 < noFdDescriptors) - { - jam(); - filePtr.i = filePtr.p->prevLogFile; - ptrCheckGuard(filePtr, clogFileFileSize, logFileRecord); - } - } -}//Dblqh::initGciInLogFileRec() - -/* ========================================================================== - * ======= INITIATE LCP RECORD WHEN USED FOR SYSTEM RESTART ======= - * - * SUBROUTINE SHORT NAME = ILS - * ========================================================================= */ -void Dblqh::initLcpSr(Signal* signal, - Uint32 lcpNo, - Uint32 lcpId, - Uint32 tableId, - Uint32 fragId, - Uint32 fragPtr) -{ - lcpPtr.p->lcpQueued = false; - lcpPtr.p->currentFragment.fragPtrI = fragPtr; - lcpPtr.p->currentFragment.lcpFragOrd.lcpNo = lcpNo; - lcpPtr.p->currentFragment.lcpFragOrd.lcpId = lcpId; - lcpPtr.p->currentFragment.lcpFragOrd.tableId = tableId; - lcpPtr.p->currentFragment.lcpFragOrd.fragmentId = fragId; - lcpPtr.p->lcpState = LcpRecord::LCP_SR_WAIT_FRAGID; -}//Dblqh::initLcpSr() - -/* ========================================================================== - * ======= INITIATE LOG PART ======= - * - * ========================================================================= */ -void Dblqh::initLogpart(Signal* signal) -{ - logPartPtr.p->execSrLogPage = RNIL; - logPartPtr.p->execSrLogPageIndex = ZNIL; - logPartPtr.p->execSrExecuteIndex = 0; - logPartPtr.p->noLogFiles = cnoLogFiles; - logPartPtr.p->logLap = 0; - logPartPtr.p->logTailFileNo = 0; - logPartPtr.p->logTailMbyte = 0; - logPartPtr.p->lastMbyte = ZNIL; - logPartPtr.p->logPartState = LogPartRecord::SR_FIRST_PHASE; - logPartPtr.p->logExecState = LogPartRecord::LES_IDLE; - logPartPtr.p->firstLogTcrec = RNIL; - logPartPtr.p->lastLogTcrec = RNIL; - logPartPtr.p->firstLogQueue = RNIL; - logPartPtr.p->lastLogQueue = RNIL; - logPartPtr.p->gcprec = RNIL; - logPartPtr.p->firstPageRef = RNIL; - logPartPtr.p->lastPageRef = RNIL; - logPartPtr.p->headFileNo = ZNIL; - logPartPtr.p->headPageNo = ZNIL; - logPartPtr.p->headPageIndex = ZNIL; -}//Dblqh::initLogpart() - -/* ========================================================================== - * ======= INITIATE LOG POINTERS ======= - * - * ========================================================================= */ -void Dblqh::initLogPointers(Signal* signal) -{ - logPartPtr.i = tcConnectptr.p->m_log_part_ptr_i; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - logFilePtr.i = logPartPtr.p->currentLogfile; - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); -}//Dblqh::initLogPointers() - -/* ------------------------------------------------------------------------- */ -/* ------- INIT REQUEST INFO BEFORE EXECUTING A LOG RECORD ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::initReqinfoExecSr(Signal* signal) -{ - UintR Treqinfo = 0; - TcConnectionrec * const regTcPtr = tcConnectptr.p; - LqhKeyReq::setKeyLen(Treqinfo, regTcPtr->primKeyLen); -/* ------------------------------------------------------------------------- */ -/* NUMBER OF BACKUPS AND STANDBYS ARE ZERO AND NEED NOT BE SET. */ -/* REPLICA TYPE IS CLEARED BY SEND_LQHKEYREQ. */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* SET LAST REPLICA NUMBER TO ZERO (BIT 10-11) */ -/* ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* SET DIRTY FLAG */ -/* ------------------------------------------------------------------------- */ - LqhKeyReq::setDirtyFlag(Treqinfo, 1); -/* ------------------------------------------------------------------------- */ -/* SET SIMPLE TRANSACTION */ -/* ------------------------------------------------------------------------- */ - LqhKeyReq::setSimpleFlag(Treqinfo, 1); - LqhKeyReq::setGCIFlag(Treqinfo, 1); -/* ------------------------------------------------------------------------- */ -/* SET OPERATION TYPE AND LOCK MODE (NEVER READ OPERATION OR SCAN IN LOG) */ -/* ------------------------------------------------------------------------- */ - LqhKeyReq::setOperation(Treqinfo, regTcPtr->operation); - regTcPtr->reqinfo = Treqinfo; -/* ------------------------------------------------------------------------ */ -/* NO OF BACKUP IS SET TO ONE AND NUMBER OF STANDBY NODES IS SET TO ZERO. */ -/* THUS THE RECEIVING NODE WILL EXPECT THAT IT IS THE LAST NODE AND WILL */ -/* SEND COMPLETED AS THE RESPONSE SIGNAL SINCE DIRTY_OP BIT IS SET. */ -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------- */ -/* SET REPLICA TYPE TO PRIMARY AND NUMBER OF REPLICA TO ONE */ -/* ------------------------------------------------------------------------- */ - regTcPtr->lastReplicaNo = 0; - regTcPtr->apiVersionNo = 0; - regTcPtr->nextSeqNoReplica = 0; - regTcPtr->opExec = 0; - regTcPtr->storedProcId = ZNIL; - regTcPtr->readlenAi = 0; - regTcPtr->nodeAfterNext[0] = ZNIL; - regTcPtr->nodeAfterNext[1] = ZNIL; - regTcPtr->dirtyOp = ZFALSE; - regTcPtr->tcBlockref = cownref; -}//Dblqh::initReqinfoExecSr() - -/* -------------------------------------------------------------------------- - * ------- INSERT FRAGMENT ------- - * - * ------------------------------------------------------------------------- */ -bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId) -{ - terrorCode = ZOK; - if(c_fragment_pool.seize(fragptr) == false) - { - terrorCode = ZNO_FREE_FRAGMENTREC; - return false; - } - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabptr.p->fragid[i] == ZNIL) { - jam(); - tabptr.p->fragid[i] = fragId; - tabptr.p->fragrec[i] = fragptr.i; - return true; - }//if - }//for - c_fragment_pool.release(fragptr); - terrorCode = ZTOO_MANY_FRAGMENTS; - return false; -}//Dblqh::insertFragrec() - -/* -------------------------------------------------------------------------- - * ------- LINK OPERATION IN ACTIVE LIST ON FRAGMENT ------- - * - * SUBROUTINE SHORT NAME: LFQ -// Input Pointers: -// tcConnectptr -// fragptr -* ------------------------------------------------------------------------- */ -void Dblqh::linkFragQueue(Signal* signal) -{ - TcConnectionrecPtr lfqTcConnectptr; - TcConnectionrec * const regTcPtr = tcConnectptr.p; - Fragrecord * const regFragPtr = fragptr.p; - Uint32 tcIndex = tcConnectptr.i; - - lfqTcConnectptr.i = regFragPtr->lastWaitQueue; - regTcPtr->nextTc = RNIL; - regFragPtr->lastWaitQueue = tcIndex; - regTcPtr->prevTc = lfqTcConnectptr.i; - ndbrequire(regTcPtr->listState == TcConnectionrec::NOT_IN_LIST); - regTcPtr->listState = TcConnectionrec::WAIT_QUEUE_LIST; - if (lfqTcConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(lfqTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - lfqTcConnectptr.p->nextTc = tcIndex; - } else { - regFragPtr->firstWaitQueue = tcIndex; - }//if - return; -}//Dblqh::linkFragQueue() - -/* ------------------------------------------------------------------------- - * ------- LINK OPERATION INTO WAITING FOR LOGGING ------- - * - * SUBROUTINE SHORT NAME = LWL -// Input Pointers: -// tcConnectptr -// logPartPtr - * ------------------------------------------------------------------------- */ -void Dblqh::linkWaitLog(Signal* signal, LogPartRecordPtr regLogPartPtr) -{ - TcConnectionrecPtr lwlTcConnectptr; - -/* -------------------------------------------------- */ -/* LINK ACTIVE OPERATION INTO QUEUE WAITING FOR */ -/* ACCESS TO THE LOG PART. */ -/* -------------------------------------------------- */ - lwlTcConnectptr.i = regLogPartPtr.p->lastLogQueue; - if (lwlTcConnectptr.i == RNIL) { - jam(); - regLogPartPtr.p->firstLogQueue = tcConnectptr.i; - } else { - jam(); - ptrCheckGuard(lwlTcConnectptr, ctcConnectrecFileSize, tcConnectionrec); - lwlTcConnectptr.p->nextTcLogQueue = tcConnectptr.i; - }//if - regLogPartPtr.p->lastLogQueue = tcConnectptr.i; - tcConnectptr.p->nextTcLogQueue = RNIL; - if (regLogPartPtr.p->LogLqhKeyReqSent == ZFALSE) { - jam(); - regLogPartPtr.p->LogLqhKeyReqSent = ZTRUE; - signal->theData[0] = ZLOG_LQHKEYREQ; - signal->theData[1] = regLogPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//if -}//Dblqh::linkWaitLog() - -/* -------------------------------------------------------------------------- - * ------- START THE NEXT OPERATION ON THIS LOG PART IF ANY ------- - * ------- OPERATIONS ARE QUEUED. ------- - * - * SUBROUTINE SHORT NAME = LNS -// Input Pointers: -// tcConnectptr -// logPartPtr - * ------------------------------------------------------------------------- */ -void Dblqh::logNextStart(Signal* signal) -{ - LogPartRecordPtr lnsLogPartPtr; - UintR tlnsStillWaiting; - LogPartRecord * const regLogPartPtr = logPartPtr.p; - - if ((regLogPartPtr->firstLogQueue == RNIL) && - (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) && - (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE)) { -// -------------------------------------------------------------------------- -// Optimised route for the common case -// -------------------------------------------------------------------------- - regLogPartPtr->logPartState = LogPartRecord::IDLE; - return; - }//if - if (regLogPartPtr->firstLogQueue != RNIL) { - jam(); - if (regLogPartPtr->LogLqhKeyReqSent == ZFALSE) { - jam(); - regLogPartPtr->LogLqhKeyReqSent = ZTRUE; - signal->theData[0] = ZLOG_LQHKEYREQ; - signal->theData[1] = logPartPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//if - } else { - if (regLogPartPtr->logPartState == LogPartRecord::ACTIVE) { - jam(); - regLogPartPtr->logPartState = LogPartRecord::IDLE; - } else { - jam(); - }//if - }//if - if (regLogPartPtr->waitWriteGciLog != LogPartRecord::WWGL_TRUE) { - jam(); - return; - } else { - jam(); -/* -------------------------------------------------------------------------- - * A COMPLETE GCI LOG RECORD IS WAITING TO BE WRITTEN. WE GIVE THIS HIGHEST - * PRIORITY AND WRITE IT IMMEDIATELY. AFTER WRITING IT WE CHECK IF ANY MORE - * LOG PARTS ARE WAITING. IF NOT WE SEND A SIGNAL THAT INITIALISES THE GCP - * RECORD TO WAIT UNTIL ALL COMPLETE GCI LOG RECORDS HAVE REACHED TO DISK. - * -------------------------------------------------------------------------- */ - writeCompletedGciLog(signal); - logPartPtr.p->waitWriteGciLog = LogPartRecord::WWGL_FALSE; - tlnsStillWaiting = ZFALSE; - for (lnsLogPartPtr.i = 0; lnsLogPartPtr.i < 4; lnsLogPartPtr.i++) { - jam(); - ptrAss(lnsLogPartPtr, logPartRecord); - if (lnsLogPartPtr.p->waitWriteGciLog == LogPartRecord::WWGL_TRUE) { - jam(); - tlnsStillWaiting = ZTRUE; - }//if - }//for - if (tlnsStillWaiting == ZFALSE) { - jam(); - signal->theData[0] = ZINIT_GCP_REC; - sendSignal(cownref, GSN_CONTINUEB, signal, 1, JBB); - }//if - }//if -}//Dblqh::logNextStart() - -/* -------------------------------------------------------------------------- - * ------- MOVE PAGES FROM LFO RECORD TO PAGE REFERENCE RECORD ------- - * WILL ALWAYS MOVE 8 PAGES TO A PAGE REFERENCE RECORD. - * - * SUBROUTINE SHORT NAME = MPR - * ------------------------------------------------------------------------- */ -void Dblqh::moveToPageRef(Signal* signal) -{ - LogPageRecordPtr mprLogPagePtr; - PageRefRecordPtr mprPageRefPtr; - UintR tmprIndex; - -/* -------------------------------------------------------------------------- - * ------- INSERT PAGE REFERENCE RECORD ------- - * - * INPUT: LFO_PTR LOG FILE OPERATION RECORD - * LOG_PART_PTR LOG PART RECORD - * PAGE_REF_PTR THE PAGE REFERENCE RECORD TO BE INSERTED. - * ------------------------------------------------------------------------- */ - PageRefRecordPtr iprPageRefPtr; - - if ((logPartPtr.p->mmBufferSize + 8) >= ZMAX_MM_BUFFER_SIZE) { - jam(); - pageRefPtr.i = logPartPtr.p->firstPageRef; - ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord); - releasePrPages(signal); - removePageRef(signal); - } else { - jam(); - logPartPtr.p->mmBufferSize = logPartPtr.p->mmBufferSize + 8; - }//if - seizePageRef(signal); - if (logPartPtr.p->firstPageRef == RNIL) { - jam(); - logPartPtr.p->firstPageRef = pageRefPtr.i; - } else { - jam(); - iprPageRefPtr.i = logPartPtr.p->lastPageRef; - ptrCheckGuard(iprPageRefPtr, cpageRefFileSize, pageRefRecord); - iprPageRefPtr.p->prNext = pageRefPtr.i; - }//if - pageRefPtr.p->prPrev = logPartPtr.p->lastPageRef; - logPartPtr.p->lastPageRef = pageRefPtr.i; - - pageRefPtr.p->prFileNo = logFilePtr.p->fileNo; - pageRefPtr.p->prPageNo = lfoPtr.p->lfoPageNo; - tmprIndex = 0; - mprLogPagePtr.i = lfoPtr.p->firstLfoPage; -MPR_LOOP: - arrGuard(tmprIndex, 8); - pageRefPtr.p->pageRef[tmprIndex] = mprLogPagePtr.i; - tmprIndex = tmprIndex + 1; - ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord); - mprLogPagePtr.i = mprLogPagePtr.p->logPageWord[ZNEXT_PAGE]; - if (mprLogPagePtr.i != RNIL) { - jam(); - goto MPR_LOOP; - }//if - mprPageRefPtr.i = pageRefPtr.p->prPrev; - if (mprPageRefPtr.i != RNIL) { - jam(); - ptrCheckGuard(mprPageRefPtr, cpageRefFileSize, pageRefRecord); - mprLogPagePtr.i = mprPageRefPtr.p->pageRef[7]; - ptrCheckGuard(mprLogPagePtr, clogPageFileSize, logPageRecord); - mprLogPagePtr.p->logPageWord[ZNEXT_PAGE] = pageRefPtr.p->pageRef[0]; - }//if -}//Dblqh::moveToPageRef() - -/* ------------------------------------------------------------------------- */ -/* ------- READ THE ATTRINFO FROM THE LOG ------- */ -/* */ -/* SUBROUTINE SHORT NAME = RA */ -/* ------------------------------------------------------------------------- */ -void Dblqh::readAttrinfo(Signal* signal) -{ - Uint32 remainingLen = tcConnectptr.p->totSendlenAi; - if (remainingLen == 0) { - jam(); - tcConnectptr.p->reclenAiLqhkey = 0; - return; - }//if - Uint32 dataLen = remainingLen; - if (remainingLen > 5) - dataLen = 5; - readLogData(signal, dataLen, &tcConnectptr.p->firstAttrinfo[0]); - tcConnectptr.p->reclenAiLqhkey = dataLen; - remainingLen -= dataLen; - while (remainingLen > 0) { - jam(); - dataLen = remainingLen; - if (remainingLen > 22) - dataLen = 22; - seizeAttrinbuf(signal); - readLogData(signal, dataLen, &attrinbufptr.p->attrbuf[0]); - attrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = dataLen; - remainingLen -= dataLen; - }//while -}//Dblqh::readAttrinfo() - -/* ------------------------------------------------------------------------- */ -/* ------- READ COMMIT LOG ------- */ -/* */ -/* SUBROUTINE SHORT NAME = RCL */ -/* ------------------------------------------------------------------------- */ -void Dblqh::readCommitLog(Signal* signal, CommitLogRecord* commitLogRecord) -{ - Uint32 trclPageIndex = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - if ((trclPageIndex + (ZCOMMIT_LOG_SIZE - 1)) < ZPAGE_SIZE) { - jam(); - tcConnectptr.p->tableref = logPagePtr.p->logPageWord[trclPageIndex + 0]; - tcConnectptr.p->schemaVersion = logPagePtr.p->logPageWord[trclPageIndex + 1]; - tcConnectptr.p->fragmentid = logPagePtr.p->logPageWord[trclPageIndex + 2]; - commitLogRecord->fileNo = logPagePtr.p->logPageWord[trclPageIndex + 3]; - commitLogRecord->startPageNo = logPagePtr.p->logPageWord[trclPageIndex + 4]; - commitLogRecord->startPageIndex = logPagePtr.p->logPageWord[trclPageIndex + 5]; - commitLogRecord->stopPageNo = logPagePtr.p->logPageWord[trclPageIndex + 6]; - tcConnectptr.p->gci = logPagePtr.p->logPageWord[trclPageIndex + 7]; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = - (trclPageIndex + ZCOMMIT_LOG_SIZE) - 1; - } else { - jam(); - tcConnectptr.p->tableref = readLogword(signal); - tcConnectptr.p->schemaVersion = readLogword(signal); - tcConnectptr.p->fragmentid = readLogword(signal); - commitLogRecord->fileNo = readLogword(signal); - commitLogRecord->startPageNo = readLogword(signal); - commitLogRecord->startPageIndex = readLogword(signal); - commitLogRecord->stopPageNo = readLogword(signal); - tcConnectptr.p->gci = readLogword(signal); - }//if - tcConnectptr.p->transid[0] = logPartPtr.i + 65536; - tcConnectptr.p->transid[1] = (DBLQH << 20) + (cownNodeid << 8); -}//Dblqh::readCommitLog() - -/* ------------------------------------------------------------------------- */ -/* ------- READ LOG PAGES FROM DISK IN ORDER TO EXECUTE A LOG ------- */ -/* RECORD WHICH WAS NOT FOUND IN MAIN MEMORY. */ -/* */ -/* SUBROUTINE SHORT NAME = REL */ -/* ------------------------------------------------------------------------- */ -void Dblqh::readExecLog(Signal* signal) -{ - UintR trelIndex; - UintR trelI; - - seizeLfo(signal); - initLfo(signal); - trelI = logPartPtr.p->execSrStopPageNo - logPartPtr.p->execSrStartPageNo; - arrGuard(trelI + 1, 16); - lfoPtr.p->logPageArray[trelI + 1] = logPartPtr.p->execSrStartPageNo; - for (trelIndex = logPartPtr.p->execSrStopPageNo; (trelIndex >= logPartPtr.p->execSrStartPageNo) && - (UintR)~trelIndex; trelIndex--) { - jam(); - seizeLogpage(signal); - arrGuard(trelI, 16); - lfoPtr.p->logPageArray[trelI] = logPagePtr.i; - trelI--; - }//for - lfoPtr.p->lfoPageNo = logPartPtr.p->execSrStartPageNo; - lfoPtr.p->noPagesRw = (logPartPtr.p->execSrStopPageNo - - logPartPtr.p->execSrStartPageNo) + 1; - lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0]; - signal->theData[0] = logFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = lfoPtr.i; - signal->theData[3] = ZLIST_OF_MEM_PAGES; // edtjamo TR509 //ZLIST_OF_PAIRS; - signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD; - signal->theData[5] = lfoPtr.p->noPagesRw; - signal->theData[6] = lfoPtr.p->logPageArray[0]; - signal->theData[7] = lfoPtr.p->logPageArray[1]; - signal->theData[8] = lfoPtr.p->logPageArray[2]; - signal->theData[9] = lfoPtr.p->logPageArray[3]; - signal->theData[10] = lfoPtr.p->logPageArray[4]; - signal->theData[11] = lfoPtr.p->logPageArray[5]; - signal->theData[12] = lfoPtr.p->logPageArray[6]; - signal->theData[13] = lfoPtr.p->logPageArray[7]; - signal->theData[14] = lfoPtr.p->logPageArray[8]; - signal->theData[15] = lfoPtr.p->logPageArray[9]; - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 16, JBA); - - if (DEBUG_REDO) - ndbout_c("readExecLog %u page at part: %u file: %u pos: %u", - lfoPtr.p->noPagesRw, - logPartPtr.i, - logFilePtr.p->fileNo, - logPartPtr.p->execSrStartPageNo); - -}//Dblqh::readExecLog() - -/* ------------------------------------------------------------------------- */ -/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */ -/* */ -/* SUBROUTINE SHORT NAME = RES */ -/* ------------------------------------------------------------------------- */ -void Dblqh::readExecSrNewMbyte(Signal* signal) -{ - logFilePtr.p->currentFilepage = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE; - logFilePtr.p->filePosition = logFilePtr.p->currentMbyte * ZPAGES_IN_MBYTE; - logPartPtr.p->execSrPagesRead = 0; - logPartPtr.p->execSrPagesReading = 0; - logPartPtr.p->execSrPagesExecuted = 0; - readExecSr(signal); - logPartPtr.p->logExecState = LogPartRecord::LES_WAIT_READ_EXEC_SR_NEW_MBYTE; -}//Dblqh::readExecSrNewMbyte() - -/* ------------------------------------------------------------------------- */ -/* ------- READ 64 KBYTES WHEN EXECUTING THE FRAGMENT LOG ------- */ -/* */ -/* SUBROUTINE SHORT NAME = RES */ -/* ------------------------------------------------------------------------- */ -void Dblqh::readExecSr(Signal* signal) -{ - UintR tresPageid; - UintR tresIndex; - - tresPageid = logFilePtr.p->filePosition; - seizeLfo(signal); - initLfo(signal); - for (tresIndex = 7; (UintR)~tresIndex; tresIndex--) { - jam(); -/* ------------------------------------------------------------------------- */ -/* GO BACKWARDS SINCE WE INSERT AT THE BEGINNING AND WE WANT THAT FIRST PAGE */ -/* SHALL BE FIRST AND LAST PAGE LAST. */ -/* ------------------------------------------------------------------------- */ - seizeLogpage(signal); - lfoPtr.p->logPageArray[tresIndex] = logPagePtr.i; - }//for - lfoPtr.p->lfoState = LogFileOperationRecord::READ_EXEC_SR; - lfoPtr.p->lfoPageNo = tresPageid; - logFilePtr.p->filePosition = logFilePtr.p->filePosition + 8; - logPartPtr.p->execSrPagesReading = logPartPtr.p->execSrPagesReading + 8; - lfoPtr.p->noPagesRw = 8; - lfoPtr.p->firstLfoPage = lfoPtr.p->logPageArray[0]; - signal->theData[0] = logFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = lfoPtr.i; - signal->theData[3] = ZLIST_OF_MEM_PAGES; - signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD; - signal->theData[5] = 8; - signal->theData[6] = lfoPtr.p->logPageArray[0]; - signal->theData[7] = lfoPtr.p->logPageArray[1]; - signal->theData[8] = lfoPtr.p->logPageArray[2]; - signal->theData[9] = lfoPtr.p->logPageArray[3]; - signal->theData[10] = lfoPtr.p->logPageArray[4]; - signal->theData[11] = lfoPtr.p->logPageArray[5]; - signal->theData[12] = lfoPtr.p->logPageArray[6]; - signal->theData[13] = lfoPtr.p->logPageArray[7]; - signal->theData[14] = tresPageid; - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA); - - if (DEBUG_REDO) - ndbout_c("readExecSr %u page at part: %u file: %u pos: %u", - 8, - logPartPtr.i, - logFilePtr.p->fileNo, - tresPageid); - -}//Dblqh::readExecSr() - -/* ------------------------------------------------------------------------- */ -/* ------------ READ THE PRIMARY KEY FROM THE LOG ---------------- */ -/* */ -/* SUBROUTINE SHORT NAME = RK */ -/* --------------------------------------------------------------------------*/ -void Dblqh::readKey(Signal* signal) -{ - Uint32 remainingLen = tcConnectptr.p->primKeyLen; - ndbrequire(remainingLen != 0); - Uint32 dataLen = remainingLen; - if (remainingLen > 4) - dataLen = 4; - readLogData(signal, dataLen, &tcConnectptr.p->tupkeyData[0]); - remainingLen -= dataLen; - while (remainingLen > 0) { - jam(); - seizeTupkeybuf(signal); - dataLen = remainingLen; - if (dataLen > 4) - dataLen = 4; - readLogData(signal, dataLen, &databufptr.p->data[0]); - remainingLen -= dataLen; - }//while -}//Dblqh::readKey() - -/* ------------------------------------------------------------------------- */ -/* ------------ READ A NUMBER OF WORDS FROM LOG INTO CDATA ---------------- */ -/* */ -/* SUBROUTINE SHORT NAME = RLD */ -/* --------------------------------------------------------------------------*/ -void Dblqh::readLogData(Signal* signal, Uint32 noOfWords, Uint32* dataPtr) -{ - ndbrequire(noOfWords < 32); - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - if ((logPos + noOfWords) >= ZPAGE_SIZE) { - for (Uint32 i = 0; i < noOfWords; i++) - dataPtr[i] = readLogwordExec(signal); - } else { - MEMCOPY_NO_WORDS(dataPtr, &logPagePtr.p->logPageWord[logPos], noOfWords); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + noOfWords; - }//if -}//Dblqh::readLogData() - -/* ------------------------------------------------------------------------- */ -/* ------------ READ THE LOG HEADER OF A PREPARE LOG HEADER ---------------- */ -/* */ -/* SUBROUTINE SHORT NAME = RLH */ -/* --------------------------------------------------------------------------*/ -void Dblqh::readLogHeader(Signal* signal) -{ - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - if ((logPos + ZLOG_HEAD_SIZE) < ZPAGE_SIZE) { - jam(); - tcConnectptr.p->hashValue = logPagePtr.p->logPageWord[logPos + 2]; - tcConnectptr.p->operation = logPagePtr.p->logPageWord[logPos + 3]; - tcConnectptr.p->totSendlenAi = logPagePtr.p->logPageWord[logPos + 4]; - tcConnectptr.p->primKeyLen = logPagePtr.p->logPageWord[logPos + 5]; - tcConnectptr.p->m_row_id.m_page_no = logPagePtr.p->logPageWord[logPos + 6]; - tcConnectptr.p->m_row_id.m_page_idx = logPagePtr.p->logPageWord[logPos+ 7]; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + ZLOG_HEAD_SIZE; - } else { - jam(); - readLogwordExec(signal); /* IGNORE PREPARE LOG RECORD TYPE */ - readLogwordExec(signal); /* IGNORE LOG RECORD SIZE */ - tcConnectptr.p->hashValue = readLogwordExec(signal); - tcConnectptr.p->operation = readLogwordExec(signal); - tcConnectptr.p->totSendlenAi = readLogwordExec(signal); - tcConnectptr.p->primKeyLen = readLogwordExec(signal); - tcConnectptr.p->m_row_id.m_page_no = readLogwordExec(signal); - tcConnectptr.p->m_row_id.m_page_idx = readLogwordExec(signal); - }//if - - tcConnectptr.p->m_use_rowid = (tcConnectptr.p->operation == ZINSERT); -}//Dblqh::readLogHeader() - -/* ------------------------------------------------------------------------- */ -/* ------- READ A WORD FROM THE LOG ------- */ -/* */ -/* OUTPUT: TLOG_WORD */ -/* SUBROUTINE SHORT NAME = RLW */ -/* ------------------------------------------------------------------------- */ -Uint32 Dblqh::readLogword(Signal* signal) -{ - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - ndbrequire(logPos < ZPAGE_SIZE); - Uint32 logWord = logPagePtr.p->logPageWord[logPos]; - logPos++; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos; - if (logPos >= ZPAGE_SIZE) { - jam(); - logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE; - logFilePtr.p->currentLogpage = logPagePtr.i; - logFilePtr.p->currentFilepage++; - logPartPtr.p->execSrPagesRead--; - logPartPtr.p->execSrPagesExecuted++; - }//if - return logWord; -}//Dblqh::readLogword() - -/* ------------------------------------------------------------------------- */ -/* ------- READ A WORD FROM THE LOG WHEN EXECUTING A LOG RECORD ------- */ -/* */ -/* OUTPUT: TLOG_WORD */ -/* SUBROUTINE SHORT NAME = RWE */ -/* ------------------------------------------------------------------------- */ -Uint32 Dblqh::readLogwordExec(Signal* signal) -{ - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - ndbrequire(logPos < ZPAGE_SIZE); - Uint32 logWord = logPagePtr.p->logPageWord[logPos]; - logPos++; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos; - if (logPos >= ZPAGE_SIZE) { - jam(); - logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - if (logPagePtr.i != RNIL){ - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE; - } else { - // Reading word at the last pos in the last page - // Don't step forward to next page! - jam(); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]++; - } - }//if - return logWord; -}//Dblqh::readLogwordExec() - -/* ------------------------------------------------------------------------- */ -/* ------- READ A SINGLE PAGE FROM THE LOG ------- */ -/* */ -/* INPUT: TRSP_PAGE_NO */ -/* SUBROUTINE SHORT NAME = RSP */ -/* ------------------------------------------------------------------------- */ -void Dblqh::readSinglePage(Signal* signal, Uint32 pageNo) -{ - seizeLfo(signal); - initLfo(signal); - seizeLogpage(signal); - lfoPtr.p->firstLfoPage = logPagePtr.i; - lfoPtr.p->lfoPageNo = pageNo; - lfoPtr.p->noPagesRw = 1; - signal->theData[0] = logFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = lfoPtr.i; - signal->theData[3] = ZLIST_OF_PAIRS; - signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD; - signal->theData[5] = 1; - signal->theData[6] = logPagePtr.i; - signal->theData[7] = pageNo; - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA); - - if (DEBUG_REDO) - ndbout_c("readSinglePage 1 page at part: %u file: %u pos: %u", - logPartPtr.i, - logFilePtr.p->fileNo, - pageNo); - -}//Dblqh::readSinglePage() - -/* -------------------------------------------------------------------------- - * ------- REMOVE COPY FRAGMENT FROM ACTIVE COPY LIST ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::releaseActiveCopy(Signal* signal) -{ - /* MUST BE 8 BIT */ - UintR tracFlag; - UintR tracIndex; - - tracFlag = ZFALSE; - for (tracIndex = 0; tracIndex < 4; tracIndex++) { - if (tracFlag == ZFALSE) { - jam(); - if (cactiveCopy[tracIndex] == fragptr.i) { - jam(); - tracFlag = ZTRUE; - }//if - } else { - if (tracIndex < 3) { - jam(); - cactiveCopy[tracIndex - 1] = cactiveCopy[tracIndex]; - } else { - jam(); - cactiveCopy[3] = RNIL; - }//if - }//if - }//for - ndbrequire(tracFlag == ZTRUE); - cnoActiveCopy--; -}//Dblqh::releaseActiveCopy() - - -/* -------------------------------------------------------------------------- - * ------- RELEASE ADD FRAGMENT RECORD ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::releaseAddfragrec(Signal* signal) -{ - addfragptr.p->addfragStatus = AddFragRecord::FREE; - addfragptr.p->nextAddfragrec = cfirstfreeAddfragrec; - cfirstfreeAddfragrec = addfragptr.i; -}//Dblqh::releaseAddfragrec() - -/* -------------------------------------------------------------------------- - * ------- RELEASE A PAGE REFERENCE RECORD. ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::releasePageRef(Signal* signal) -{ - pageRefPtr.p->prNext = cfirstfreePageRef; - cfirstfreePageRef = pageRefPtr.i; -}//Dblqh::releasePageRef() - -/* -------------------------------------------------------------------------- - * --- RELEASE ALL PAGES IN THE MM BUFFER AFTER EXECUTING THE LOG ON IT. ---- - * - * ------------------------------------------------------------------------- */ -void Dblqh::releaseMmPages(Signal* signal) -{ -RMP_LOOP: - jam(); - pageRefPtr.i = logPartPtr.p->firstPageRef; - if (pageRefPtr.i != RNIL) { - jam(); - ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord); - releasePrPages(signal); - removePageRef(signal); - goto RMP_LOOP; - }//if -}//Dblqh::releaseMmPages() - -/* -------------------------------------------------------------------------- - * ------- RELEASE A SET OF PAGES AFTER EXECUTING THE LOG ON IT. ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::releasePrPages(Signal* signal) -{ - UintR trppIndex; - - for (trppIndex = 0; trppIndex <= 7; trppIndex++) { - jam(); - logPagePtr.i = pageRefPtr.p->pageRef[trppIndex]; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - releaseLogpage(signal); - }//for -}//Dblqh::releasePrPages() - -/* -------------------------------------------------------------------------- - * ------- RELEASE OPERATION FROM WAIT QUEUE LIST ON FRAGMENT ------- - * - * SUBROUTINE SHORT NAME : RWA - * ------------------------------------------------------------------------- */ -void Dblqh::releaseWaitQueue(Signal* signal) -{ - TcConnectionrecPtr rwaTcNextConnectptr; - TcConnectionrecPtr rwaTcPrevConnectptr; - - fragptr.i = tcConnectptr.p->fragmentptr; - c_fragment_pool.getPtr(fragptr); - rwaTcPrevConnectptr.i = tcConnectptr.p->prevTc; - rwaTcNextConnectptr.i = tcConnectptr.p->nextTc; - if (tcConnectptr.p->listState != TcConnectionrec::WAIT_QUEUE_LIST) { - jam(); - systemError(signal, __LINE__); - }//if - tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST; - if (rwaTcNextConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(rwaTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec); - rwaTcNextConnectptr.p->prevTc = rwaTcPrevConnectptr.i; - } else { - jam(); - fragptr.p->lastWaitQueue = rwaTcPrevConnectptr.i; - }//if - if (rwaTcPrevConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(rwaTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec); - rwaTcPrevConnectptr.p->nextTc = rwaTcNextConnectptr.i; - } else { - jam(); - fragptr.p->firstWaitQueue = rwaTcNextConnectptr.i; - }//if -}//Dblqh::releaseWaitQueue() - -/* -------------------------------------------------------------------------- - * ------- REMOVE OPERATION RECORD FROM LIST ON LOG PART OF NOT ------- - * COMPLETED OPERATIONS IN THE LOG. - * - * SUBROUTINE SHORT NAME = RLO - * ------------------------------------------------------------------------- */ -void Dblqh::removeLogTcrec(Signal* signal) -{ - TcConnectionrecPtr rloTcNextConnectptr; - TcConnectionrecPtr rloTcPrevConnectptr; - rloTcPrevConnectptr.i = tcConnectptr.p->prevLogTcrec; - rloTcNextConnectptr.i = tcConnectptr.p->nextLogTcrec; - if (rloTcNextConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec); - rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i; - } else { - jam(); - logPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i; - }//if - if (rloTcPrevConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec); - rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i; - } else { - jam(); - logPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i; - }//if -}//Dblqh::removeLogTcrec() - -/* -------------------------------------------------------------------------- - * ------- REMOVE PAGE REFERENCE RECORD FROM LIST IN THIS LOG PART ------- - * - * SUBROUTINE SHORT NAME = RPR - * ------------------------------------------------------------------------- */ -void Dblqh::removePageRef(Signal* signal) -{ - PageRefRecordPtr rprPageRefPtr; - - pageRefPtr.i = logPartPtr.p->firstPageRef; - if (pageRefPtr.i != RNIL) { - jam(); - ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord); - if (pageRefPtr.p->prNext == RNIL) { - jam(); - logPartPtr.p->lastPageRef = RNIL; - logPartPtr.p->firstPageRef = RNIL; - } else { - jam(); - logPartPtr.p->firstPageRef = pageRefPtr.p->prNext; - rprPageRefPtr.i = pageRefPtr.p->prNext; - ptrCheckGuard(rprPageRefPtr, cpageRefFileSize, pageRefRecord); - rprPageRefPtr.p->prPrev = RNIL; - }//if - releasePageRef(signal); - }//if -}//Dblqh::removePageRef() - -/* ------------------------------------------------------------------------- */ -/* ------- RETURN FROM EXECUTION OF LOG ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -Uint32 Dblqh::returnExecLog(Signal* signal) -{ - tcConnectptr.p->connectState = TcConnectionrec::CONNECTED; - initLogPointers(signal); - logPartPtr.p->execSrExecuteIndex++; - Uint32 result = checkIfExecLog(signal); - if (result == ZOK) { - jam(); -/* ------------------------------------------------------------------------- */ -/* THIS LOG RECORD WILL BE EXECUTED AGAIN TOWARDS ANOTHER NODE. */ -/* ------------------------------------------------------------------------- */ - logPagePtr.i = logPartPtr.p->execSrLogPage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = - logPartPtr.p->execSrLogPageIndex; - } else { - jam(); -/* ------------------------------------------------------------------------- */ -/* NO MORE EXECUTION OF THIS LOG RECORD. */ -/* ------------------------------------------------------------------------- */ - if (logPartPtr.p->logExecState == - LogPartRecord::LES_EXEC_LOGREC_FROM_FILE) { - jam(); -/* ------------------------------------------------------------------------- */ -/* THE LOG RECORD WAS READ FROM DISK. RELEASE ITS PAGES IMMEDIATELY. */ -/* ------------------------------------------------------------------------- */ - lfoPtr.i = logPartPtr.p->execSrLfoRec; - ptrCheckGuard(lfoPtr, clfoFileSize, logFileOperationRecord); - releaseLfoPages(signal); - releaseLfo(signal); - logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG; - if (logPartPtr.p->execSrExecLogFile != logPartPtr.p->currentLogfile) { - jam(); - LogFileRecordPtr clfLogFilePtr; - clfLogFilePtr.i = logPartPtr.p->execSrExecLogFile; - ptrCheckGuard(clfLogFilePtr, clogFileFileSize, logFileRecord); - clfLogFilePtr.p->logFileStatus = LogFileRecord::CLOSING_EXEC_LOG; - closeFile(signal, clfLogFilePtr, __LINE__); - result = ZCLOSE_FILE; - }//if - }//if - logPartPtr.p->execSrExecuteIndex = 0; - logPartPtr.p->execSrLogPage = RNIL; - logPartPtr.p->execSrLogPageIndex = ZNIL; - logPagePtr.i = logFilePtr.p->currentLogpage; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPartPtr.p->savePageIndex; - }//if - return result; -}//Dblqh::returnExecLog() - -/* -------------------------------------------------------------------------- - * ------- SEIZE ADD FRAGMENT RECORD ------ - * - * ------------------------------------------------------------------------- */ -void Dblqh::seizeAddfragrec(Signal* signal) -{ - addfragptr.i = cfirstfreeAddfragrec; - ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord); - cfirstfreeAddfragrec = addfragptr.p->nextAddfragrec; -}//Dblqh::seizeAddfragrec() - -/* -------------------------------------------------------------------------- - * ------- SEIZE FRAGMENT RECORD ------- - * - * ------------------------------------------------------------------------- */ -/* ------------------------------------------------------------------------- */ -/* ------- SEIZE A PAGE REFERENCE RECORD. ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dblqh::seizePageRef(Signal* signal) -{ - pageRefPtr.i = cfirstfreePageRef; - ptrCheckGuard(pageRefPtr, cpageRefFileSize, pageRefRecord); - cfirstfreePageRef = pageRefPtr.p->prNext; - pageRefPtr.p->prNext = RNIL; -}//Dblqh::seizePageRef() - -/* -------------------------------------------------------------------------- - * ------- SEND ABORTED ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::sendAborted(Signal* signal) -{ - UintR TlastInd; - if (tcConnectptr.p->nextReplica == ZNIL) { - TlastInd = ZTRUE; - } else { - TlastInd = ZFALSE; - }//if - signal->theData[0] = tcConnectptr.p->tcOprec; - signal->theData[1] = tcConnectptr.p->transid[0]; - signal->theData[2] = tcConnectptr.p->transid[1]; - signal->theData[3] = cownNodeid; - signal->theData[4] = TlastInd; - sendSignal(tcConnectptr.p->tcBlockref, GSN_ABORTED, signal, 5, JBB); - return; -}//Dblqh::sendAborted() - -/* -------------------------------------------------------------------------- - * ------- SEND LQH_TRANSCONF ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::sendLqhTransconf(Signal* signal, LqhTransConf::OperationStatus stat) -{ - tcNodeFailptr.i = tcConnectptr.p->tcNodeFailrec; - ptrCheckGuard(tcNodeFailptr, ctcNodeFailrecFileSize, tcNodeFailRecord); - - Uint32 reqInfo = 0; - LqhTransConf::setReplicaType(reqInfo, tcConnectptr.p->replicaType); - LqhTransConf::setReplicaNo(reqInfo, tcConnectptr.p->seqNoReplica); - LqhTransConf::setLastReplicaNo(reqInfo, tcConnectptr.p->lastReplicaNo); - LqhTransConf::setSimpleFlag(reqInfo, tcConnectptr.p->opSimple); - LqhTransConf::setDirtyFlag(reqInfo, tcConnectptr.p->dirtyOp); - LqhTransConf::setOperation(reqInfo, tcConnectptr.p->operation); - - LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0]; - lqhTransConf->tcRef = tcNodeFailptr.p->newTcRef; - lqhTransConf->lqhNodeId = cownNodeid; - lqhTransConf->operationStatus = stat; - lqhTransConf->lqhConnectPtr = tcConnectptr.i; - lqhTransConf->transId1 = tcConnectptr.p->transid[0]; - lqhTransConf->transId2 = tcConnectptr.p->transid[1]; - lqhTransConf->oldTcOpRec = tcConnectptr.p->tcOprec; - lqhTransConf->requestInfo = reqInfo; - lqhTransConf->gci = tcConnectptr.p->gci; - lqhTransConf->nextNodeId1 = tcConnectptr.p->nextReplica; - lqhTransConf->nextNodeId2 = tcConnectptr.p->nodeAfterNext[0]; - lqhTransConf->nextNodeId3 = tcConnectptr.p->nodeAfterNext[1]; - lqhTransConf->apiRef = tcConnectptr.p->applRef; - lqhTransConf->apiOpRec = tcConnectptr.p->applOprec; - lqhTransConf->tableId = tcConnectptr.p->tableref; - sendSignal(tcNodeFailptr.p->newTcBlockref, GSN_LQH_TRANSCONF, - signal, LqhTransConf::SignalLength, JBB); - tcNodeFailptr.p->tcRecNow = tcConnectptr.i + 1; - signal->theData[0] = ZLQH_TRANS_NEXT; - signal->theData[1] = tcNodeFailptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); -}//Dblqh::sendLqhTransconf() - -/* -------------------------------------------------------------------------- - * ------- START ANOTHER PHASE OF LOG EXECUTION ------- - * RESET THE VARIABLES NEEDED BY THIS PROCESS AND SEND THE START SIGNAL - * - * ------------------------------------------------------------------------- */ -void Dblqh::startExecSr(Signal* signal) -{ - cnoFragmentsExecSr = 0; - cnoOutstandingExecFragReq = 0; - c_lcp_complete_fragments.first(fragptr); - signal->theData[0] = fragptr.i; - sendSignal(cownref, GSN_START_EXEC_SR, signal, 1, JBB); -}//Dblqh::startExecSr() - -/* ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤ - * ¤¤¤¤¤¤¤ LOG MODULE ¤¤¤¤¤¤¤ - * ¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤ */ -/* -------------------------------------------------------------------------- - * ------- STEP FORWARD IN FRAGMENT LOG DURING LOG EXECUTION ------- - * - * ------------------------------------------------------------------------- */ -void Dblqh::stepAhead(Signal* signal, Uint32 stepAheadWords) -{ - UintR tsaPos; - - tsaPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - while ((stepAheadWords + tsaPos) >= ZPAGE_SIZE) { - jam(); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_SIZE; - stepAheadWords = stepAheadWords - (ZPAGE_SIZE - tsaPos); - logFilePtr.p->currentLogpage = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - logPagePtr.i = logPagePtr.p->logPageWord[ZNEXT_PAGE]; - logFilePtr.p->currentFilepage++; - ptrCheckGuardErr(logPagePtr, clogPageFileSize, logPageRecord, - NDBD_EXIT_SR_REDOLOG); - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = ZPAGE_HEADER_SIZE; - logPartPtr.p->execSrPagesRead--; - logPartPtr.p->execSrPagesExecuted++; - tsaPos = ZPAGE_HEADER_SIZE; - }//while - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = stepAheadWords + tsaPos; -}//Dblqh::stepAhead() - -/* -------------------------------------------------------------------------- - * ------- WRITE A ABORT LOG RECORD ------- - * - * SUBROUTINE SHORT NAME: WAL - * ------------------------------------------------------------------------- */ -void Dblqh::writeAbortLog(Signal* signal) -{ - if ((ZABORT_LOG_SIZE + ZNEXT_LOG_SIZE) > - logFilePtr.p->remainingWordsInMbyte) { - jam(); - changeMbyte(signal); - }//if - logFilePtr.p->remainingWordsInMbyte = - logFilePtr.p->remainingWordsInMbyte - ZABORT_LOG_SIZE; - writeLogWord(signal, ZABORT_TYPE); - writeLogWord(signal, tcConnectptr.p->transid[0]); - writeLogWord(signal, tcConnectptr.p->transid[1]); -}//Dblqh::writeAbortLog() - -/* -------------------------------------------------------------------------- - * ------- WRITE A COMMIT LOG RECORD ------- - * - * SUBROUTINE SHORT NAME: WCL - * ------------------------------------------------------------------------- */ -void Dblqh::writeCommitLog(Signal* signal, LogPartRecordPtr regLogPartPtr) -{ - LogFileRecordPtr regLogFilePtr; - LogPageRecordPtr regLogPagePtr; - TcConnectionrec * const regTcPtr = tcConnectptr.p; - regLogFilePtr.i = regLogPartPtr.p->currentLogfile; - ptrCheckGuard(regLogFilePtr, clogFileFileSize, logFileRecord); - regLogPagePtr.i = regLogFilePtr.p->currentLogpage; - Uint32 twclTmp = regLogFilePtr.p->remainingWordsInMbyte; - ptrCheckGuard(regLogPagePtr, clogPageFileSize, logPageRecord); - logPartPtr = regLogPartPtr; - logFilePtr = regLogFilePtr; - logPagePtr = regLogPagePtr; - if ((ZCOMMIT_LOG_SIZE + ZNEXT_LOG_SIZE) > twclTmp) { - jam(); - changeMbyte(signal); - twclTmp = logFilePtr.p->remainingWordsInMbyte; - }//if - - Uint32 twclLogPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - Uint32 tableId = regTcPtr->tableref; - Uint32 schemaVersion = regTcPtr->schemaVersion; - Uint32 fragId = regTcPtr->fragmentid; - Uint32 fileNo = regTcPtr->logStartFileNo; - Uint32 startPageNo = regTcPtr->logStartPageNo; - Uint32 pageIndex = regTcPtr->logStartPageIndex; - Uint32 stopPageNo = regTcPtr->logStopPageNo; - Uint32 gci = regTcPtr->gci; - logFilePtr.p->remainingWordsInMbyte = twclTmp - ZCOMMIT_LOG_SIZE; - - if ((twclLogPos + ZCOMMIT_LOG_SIZE) >= ZPAGE_SIZE) { - writeLogWord(signal, ZCOMMIT_TYPE); - writeLogWord(signal, tableId); - writeLogWord(signal, schemaVersion); - writeLogWord(signal, fragId); - writeLogWord(signal, fileNo); - writeLogWord(signal, startPageNo); - writeLogWord(signal, pageIndex); - writeLogWord(signal, stopPageNo); - writeLogWord(signal, gci); - } else { - Uint32* dataPtr = &logPagePtr.p->logPageWord[twclLogPos]; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = twclLogPos + ZCOMMIT_LOG_SIZE; - dataPtr[0] = ZCOMMIT_TYPE; - dataPtr[1] = tableId; - dataPtr[2] = schemaVersion; - dataPtr[3] = fragId; - dataPtr[4] = fileNo; - dataPtr[5] = startPageNo; - dataPtr[6] = pageIndex; - dataPtr[7] = stopPageNo; - dataPtr[8] = gci; - }//if - TcConnectionrecPtr rloTcNextConnectptr; - TcConnectionrecPtr rloTcPrevConnectptr; - rloTcPrevConnectptr.i = regTcPtr->prevLogTcrec; - rloTcNextConnectptr.i = regTcPtr->nextLogTcrec; - if (rloTcNextConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(rloTcNextConnectptr, ctcConnectrecFileSize, tcConnectionrec); - rloTcNextConnectptr.p->prevLogTcrec = rloTcPrevConnectptr.i; - } else { - regLogPartPtr.p->lastLogTcrec = rloTcPrevConnectptr.i; - }//if - if (rloTcPrevConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(rloTcPrevConnectptr, ctcConnectrecFileSize, tcConnectionrec); - rloTcPrevConnectptr.p->nextLogTcrec = rloTcNextConnectptr.i; - } else { - regLogPartPtr.p->firstLogTcrec = rloTcNextConnectptr.i; - }//if -}//Dblqh::writeCommitLog() - -/* -------------------------------------------------------------------------- - * ------- WRITE A COMPLETED GCI LOG RECORD ------- - * - * SUBROUTINE SHORT NAME: WCG -// Input Pointers: -// logFilePtr -// logPartPtr - * ------------------------------------------------------------------------- */ -void Dblqh::writeCompletedGciLog(Signal* signal) -{ - if ((ZCOMPLETED_GCI_LOG_SIZE + ZNEXT_LOG_SIZE) > - logFilePtr.p->remainingWordsInMbyte) { - jam(); - changeMbyte(signal); - }//if - - logFilePtr.p->remainingWordsInMbyte = - logFilePtr.p->remainingWordsInMbyte - ZCOMPLETED_GCI_LOG_SIZE; - - if (DEBUG_REDO) - ndbout_c("writeCompletedGciLog gci: %u part: %u file: %u page: %u", - cnewestCompletedGci, - logPartPtr.i, - logFilePtr.p->fileNo, - logFilePtr.p->currentFilepage); - - writeLogWord(signal, ZCOMPLETED_GCI_TYPE); - writeLogWord(signal, cnewestCompletedGci); - logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci; -}//Dblqh::writeCompletedGciLog() - -/* -------------------------------------------------------------------------- - * ------- WRITE A DIRTY PAGE DURING LOG EXECUTION ------- - * - * SUBROUTINE SHORT NAME: WD - * ------------------------------------------------------------------------- */ -void Dblqh::writeDirty(Signal* signal, Uint32 place) -{ - logPagePtr.p->logPageWord[ZPOS_DIRTY] = ZNOT_DIRTY; - - ndbassert(logPartPtr.p->prevFilepage == - logPagePtr.p->logPageWord[ZPOS_PAGE_NO]); - writeDbgInfoPageHeader(logPagePtr, place, logPartPtr.p->prevFilepage, - ZPAGE_SIZE); - // Calculate checksum for page - logPagePtr.p->logPageWord[ZPOS_CHECKSUM] = calcPageCheckSum(logPagePtr); - - seizeLfo(signal); - initLfo(signal); - lfoPtr.p->lfoPageNo = logPartPtr.p->prevFilepage; - lfoPtr.p->noPagesRw = 1; - lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_DIRTY; - lfoPtr.p->firstLfoPage = logPagePtr.i; - signal->theData[0] = logFilePtr.p->fileRef; - signal->theData[1] = cownref; - signal->theData[2] = lfoPtr.i; - signal->theData[3] = ZLIST_OF_PAIRS_SYNCH; - signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD; - signal->theData[5] = 1; - signal->theData[6] = logPagePtr.i; - signal->theData[7] = logPartPtr.p->prevFilepage; - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA); - - if (DEBUG_REDO) - ndbout_c("writeDirty 1 page at part: %u file: %u pos: %u", - logPartPtr.i, - logFilePtr.p->fileNo, - logPartPtr.p->prevFilepage); - -}//Dblqh::writeDirty() - -/* -------------------------------------------------------------------------- - * ------- WRITE A WORD INTO THE LOG, CHECK FOR NEW PAGE ------- - * - * SUBROUTINE SHORT NAME: WLW - * ------------------------------------------------------------------------- */ -void Dblqh::writeLogWord(Signal* signal, Uint32 data) -{ - Uint32 logPos = logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]; - ndbrequire(logPos < ZPAGE_SIZE); - logPagePtr.p->logPageWord[logPos] = data; - logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] = logPos + 1; - if ((logPos + 1) == ZPAGE_SIZE) { - jam(); - completedLogPage(signal, ZNORMAL, __LINE__); - seizeLogpage(signal); - initLogpage(signal); - logFilePtr.p->currentLogpage = logPagePtr.i; - logFilePtr.p->currentFilepage++; - }//if -}//Dblqh::writeLogWord() - -/* -------------------------------------------------------------------------- - * ------- WRITE A NEXT LOG RECORD AND CHANGE TO NEXT MBYTE ------- - * - * SUBROUTINE SHORT NAME: WNL -// Input Pointers: -// logFilePtr(Redefines) -// logPagePtr (Redefines) -// logPartPtr - * ------------------------------------------------------------------------- */ -void Dblqh::writeNextLog(Signal* signal) -{ - LogFileRecordPtr wnlNextLogFilePtr; - UintR twnlNextFileNo; - UintR twnlNewMbyte; - UintR twnlRemWords; - UintR twnlNextMbyte; - -/* -------------------------------------------------- */ -/* CALCULATE THE NEW NUMBER OF REMAINING WORDS */ -/* AS 128*2036 WHERE 128 * 8 KBYTE = 1 MBYTE */ -/* AND 2036 IS THE NUMBER OF WORDS IN A PAGE */ -/* THAT IS USED FOR LOG INFORMATION. */ -/* -------------------------------------------------- */ - twnlRemWords = ZPAGE_SIZE - ZPAGE_HEADER_SIZE; - twnlRemWords = twnlRemWords * ZPAGES_IN_MBYTE; - wnlNextLogFilePtr.i = logFilePtr.p->nextLogFile; - ptrCheckGuard(wnlNextLogFilePtr, clogFileFileSize, logFileRecord); -/* -------------------------------------------------- */ -/* WRITE THE NEXT LOG RECORD. */ -/* -------------------------------------------------- */ - ndbrequire(logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX] < ZPAGE_SIZE); - logPagePtr.p->logPageWord[logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX]] = - ZNEXT_MBYTE_TYPE; - if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) { - jam(); -/* -------------------------------------------------- */ -/* CALCULATE THE NEW REMAINING WORDS WHEN */ -/* CHANGING LOG FILE IS PERFORMED */ -/* -------------------------------------------------- */ - twnlRemWords = twnlRemWords - (ZPAGE_SIZE - ZPAGE_HEADER_SIZE); -/* -------------------------------------------------- */ -/* ENSURE THAT THE LOG PAGES ARE WRITTEN AFTER */ -/* WE HAVE CHANGED MBYTE. */ -/* -------------------------------------------------- */ -/* ENSURE LAST PAGE IN PREVIOUS MBYTE IS */ -/* WRITTEN AND THAT THE STATE OF THE WRITE IS */ -/* PROPERLY SET. */ -/* -------------------------------------------------- */ -/* WE HAVE TO CHANGE LOG FILE */ -/* -------------------------------------------------- */ - completedLogPage(signal, ZLAST_WRITE_IN_FILE, __LINE__); - if (wnlNextLogFilePtr.p->fileNo == 0) { - jam(); -/* -------------------------------------------------- */ -/* WE HAVE FINALISED A LOG LAP, START FROM LOG */ -/* FILE 0 AGAIN */ -/* -------------------------------------------------- */ - logPartPtr.p->logLap++; - }//if - logPartPtr.p->currentLogfile = wnlNextLogFilePtr.i; - logFilePtr.i = wnlNextLogFilePtr.i; - logFilePtr.p = wnlNextLogFilePtr.p; - twnlNewMbyte = 0; - } else { - jam(); -/* -------------------------------------------------- */ -/* INCREMENT THE CURRENT MBYTE */ -/* SET PAGE INDEX TO PAGE HEADER SIZE */ -/* -------------------------------------------------- */ - completedLogPage(signal, ZENFORCE_WRITE, __LINE__); - twnlNewMbyte = logFilePtr.p->currentMbyte + 1; - }//if -/* -------------------------------------------------- */ -/* CHANGE TO NEW LOG FILE IF NECESSARY */ -/* UPDATE THE FILE POSITION TO THE NEW MBYTE */ -/* FOUND IN PAGE PART OF TNEXT_LOG_PTR */ -/* ALLOCATE AND INITIATE A NEW PAGE SINCE WE */ -/* HAVE SENT THE PREVIOUS PAGE TO DISK. */ -/* SET THE NEW NUMBER OF REMAINING WORDS IN THE */ -/* NEW MBYTE ALLOCATED. */ -/* -------------------------------------------------- */ - logFilePtr.p->currentMbyte = twnlNewMbyte; - logFilePtr.p->filePosition = twnlNewMbyte * ZPAGES_IN_MBYTE; - logFilePtr.p->currentFilepage = twnlNewMbyte * ZPAGES_IN_MBYTE; - logFilePtr.p->remainingWordsInMbyte = twnlRemWords; - seizeLogpage(signal); - if (logFilePtr.p->currentMbyte == 0) { - jam(); - logFilePtr.p->lastPageWritten = 0; - if (logFilePtr.p->fileNo == 0) { - jam(); - releaseLogpage(signal); - logPagePtr.i = logFilePtr.p->logPageZero; - ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord); - }//if - }//if - initLogpage(signal); - logFilePtr.p->currentLogpage = logPagePtr.i; - if (logFilePtr.p->currentMbyte == 0) { - jam(); -/* -------------------------------------------------- */ -/* THIS IS A NEW FILE, WRITE THE FILE DESCRIPTOR*/ -/* ALSO OPEN THE NEXT LOG FILE TO ENSURE THAT */ -/* THIS FILE IS OPEN WHEN ITS TURN COMES. */ -/* -------------------------------------------------- */ - writeFileHeaderOpen(signal, ZNORMAL); - openNextLogfile(signal); - logFilePtr.p->fileChangeState = LogFileRecord::BOTH_WRITES_ONGOING; - }//if - if (logFilePtr.p->fileNo == logPartPtr.p->logTailFileNo) { - if (logFilePtr.p->currentMbyte == logPartPtr.p->logTailMbyte) { - jam(); -/* -------------------------------------------------- */ -/* THE HEAD AND TAIL HAS MET. THIS SHOULD NEVER */ -/* OCCUR. CAN HAPPEN IF THE LOCAL CHECKPOINTS */ -/* TAKE FAR TOO LONG TIME. SO TIMING PROBLEMS */ -/* CAN INVOKE THIS SYSTEM CRASH. HOWEVER ONLY */ -/* VERY SERIOUS TIMING PROBLEMS. */ -/* -------------------------------------------------- */ - systemError(signal, __LINE__); - }//if - }//if - if (logFilePtr.p->currentMbyte == (clogFileSize - 1)) { - jam(); - twnlNextMbyte = 0; - if (logFilePtr.p->fileChangeState != LogFileRecord::NOT_ONGOING) { - jam(); - logPartPtr.p->logPartState = LogPartRecord::FILE_CHANGE_PROBLEM; - }//if - twnlNextFileNo = wnlNextLogFilePtr.p->fileNo; - } else { - jam(); - twnlNextMbyte = logFilePtr.p->currentMbyte + 1; - twnlNextFileNo = logFilePtr.p->fileNo; - }//if - if (twnlNextFileNo == logPartPtr.p->logTailFileNo) { - if (logPartPtr.p->logTailMbyte == twnlNextMbyte) { - jam(); -/* -------------------------------------------------- */ -/* THE NEXT MBYTE WILL BE THE TAIL. WE MUST */ -/* STOP LOGGING NEW OPERATIONS. THIS OPERATION */ -/* ALLOWED TO PASS. ALSO COMMIT, NEXT, COMPLETED*/ -/* GCI, ABORT AND FRAGMENT SPLIT IS ALLOWED. */ -/* OPERATIONS ARE ALLOWED AGAIN WHEN THE TAIL */ -/* IS MOVED FORWARD AS A RESULT OF A START_LCP */ -/* _ROUND SIGNAL ARRIVING FROM DBDIH. */ -/* -------------------------------------------------- */ - logPartPtr.p->logPartState = LogPartRecord::TAIL_PROBLEM; - }//if - }//if -}//Dblqh::writeNextLog() - -void -Dblqh::execDUMP_STATE_ORD(Signal* signal) -{ - jamEntry(); - DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0]; - Uint32 arg= dumpState->args[0]; - if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersSize){ - infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d", - m_commitAckMarkerPool.getNoOfFree(), - m_commitAckMarkerPool.getSize()); - } - if(dumpState->args[0] == DumpStateOrd::CommitAckMarkersDump){ - infoEvent("LQH: m_commitAckMarkerPool: %d free size: %d", - m_commitAckMarkerPool.getNoOfFree(), - m_commitAckMarkerPool.getSize()); - - CommitAckMarkerIterator iter; - for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL; - m_commitAckMarkerHash.next(iter)){ - infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)" - " ApiRef: 0x%x apiOprec: 0x%x TcNodeId: %d", - iter.curr.i, - iter.curr.p->transid1, - iter.curr.p->transid2, - iter.curr.p->apiRef, - iter.curr.p->apiOprec, - iter.curr.p->tcNodeId); - } - } - - // Dump info about number of log pages - if(dumpState->args[0] == DumpStateOrd::LqhDumpNoLogPages){ - infoEvent("LQH: Log pages : %d Free: %d", - clogPageFileSize, - cnoOfLogPages); - } - - // Dump all defined tables that LQH knowns about - if(dumpState->args[0] == DumpStateOrd::LqhDumpAllDefinedTabs){ - for(Uint32 i = 0; itableStatus != Tablerec::NOT_DEFINED){ - infoEvent("Table %d Status: %d Usage: %d", - i, tabPtr.p->tableStatus, tabPtr.p->usageCount); - - for (Uint32 j = 0; jfragrec[j]) != RNIL) - { - c_fragment_pool.getPtr(fragPtr); - infoEvent(" frag: %d distKey: %u", - tabPtr.p->fragid[j], - fragPtr.p->fragDistributionKey); - } - } - } - } - return; - } - - // Dump all ScanRecords - if (dumpState->args[0] == DumpStateOrd::LqhDumpAllScanRec){ - Uint32 recordNo = 0; - if (signal->length() == 1) - infoEvent("LQH: Dump all ScanRecords - size: %d", - cscanrecFileSize); - else if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - - if (recordNo < cscanrecFileSize-1){ - dumpState->args[0] = DumpStateOrd::LqhDumpAllScanRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - return; - } - - // Dump all active ScanRecords - if (dumpState->args[0] == DumpStateOrd::LqhDumpAllActiveScanRec){ - Uint32 recordNo = 0; - if (signal->length() == 1) - infoEvent("LQH: Dump active ScanRecord - size: %d", - cscanrecFileSize); - else if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - ScanRecordPtr sp; - sp.i = recordNo; - c_scanRecordPool.getPtr(scanptr); - if (sp.p->scanState != ScanRecord::SCAN_FREE){ - dumpState->args[0] = DumpStateOrd::LqhDumpOneScanRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - } - - if (recordNo < cscanrecFileSize-1){ - dumpState->args[0] = DumpStateOrd::LqhDumpAllActiveScanRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - return; - } - - if(dumpState->args[0] == DumpStateOrd::LqhDumpOneScanRec){ - Uint32 recordNo = RNIL; - if (signal->length() == 2) - recordNo = dumpState->args[1]; - else - return; - - if (recordNo >= cscanrecFileSize) - return; - - ScanRecordPtr sp; - sp.i = recordNo; - c_scanRecordPool.getPtr(sp); - infoEvent("Dblqh::ScanRecord[%d]: state=%d, type=%d, " - "complStatus=%d, scanNodeId=%d", - sp.i, - sp.p->scanState, - sp.p->scanType, - sp.p->scanCompletedStatus, - sp.p->scanNodeId); - infoEvent(" apiBref=0x%x, scanAccPtr=%d", - sp.p->scanApiBlockref, - sp.p->scanAccPtr); - infoEvent(" copyptr=%d, ailen=%d, complOps=%d, concurrOps=%d", - sp.p->copyPtr, - sp.p->scanAiLength, - sp.p->m_curr_batch_size_rows, - sp.p->m_max_batch_size_rows); - infoEvent(" errCnt=%d, schV=%d", - sp.p->scanErrorCounter, - sp.p->scanSchemaVersion); - infoEvent(" stpid=%d, flag=%d, lhold=%d, lmode=%d, num=%d", - sp.p->scanStoredProcId, - sp.p->scanFlag, - sp.p->scanLockHold, - sp.p->scanLockMode, - sp.p->scanNumber); - infoEvent(" relCount=%d, TCwait=%d, TCRec=%d, KIflag=%d", - sp.p->scanReleaseCounter, - sp.p->scanTcWaiting, - sp.p->scanTcrec, - sp.p->scanKeyinfoFlag); - return; - } - if(dumpState->args[0] == DumpStateOrd::LqhDumpLcpState){ - - infoEvent("== LQH LCP STATE =="); - infoEvent(" clcpCompletedState=%d, c_lcpId=%d, cnoOfFragsCheckpointed=%d", - clcpCompletedState, - c_lcpId, - cnoOfFragsCheckpointed); - - LcpRecordPtr TlcpPtr; - // Print information about the current local checkpoint - TlcpPtr.i = 0; - ptrAss(TlcpPtr, lcpRecord); - infoEvent(" lcpState=%d lastFragmentFlag=%d", - TlcpPtr.p->lcpState, TlcpPtr.p->lastFragmentFlag); - infoEvent("currentFragment.fragPtrI=%d", - TlcpPtr.p->currentFragment.fragPtrI); - infoEvent("currentFragment.lcpFragOrd.tableId=%d", - TlcpPtr.p->currentFragment.lcpFragOrd.tableId); - infoEvent(" lcpQueued=%d reportEmpty=%d", - TlcpPtr.p->lcpQueued, - TlcpPtr.p->reportEmpty); - char buf[8*_NDB_NODE_BITMASK_SIZE+1]; - infoEvent(" m_EMPTY_LCP_REQ=%d", - TlcpPtr.p->m_EMPTY_LCP_REQ.getText(buf)); - - return; - } - -#ifdef ERROR_INSERT -#ifdef NDB_DEBUG_FULL - if(dumpState->args[0] == DumpStateOrd::LCPContinue){ - switch(cerrorInsert){ - case 5904: - CLEAR_ERROR_INSERT_VALUE; - g_trace_lcp.restore(*globalData.getBlock(BACKUP), signal); - return; - default: - return; - } - } -#endif -#endif - - if(arg == 2304 || arg == 2305) - { - jam(); - Uint32 i; - GcpRecordPtr gcp; gcp.i = RNIL; - for(i = 0; i<4; i++) - { - logPartPtr.i = i; - ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord); - ndbout_c("LP %d state: %d WW_Gci: %d gcprec: %d flq: %d currfile: %d tailFileNo: %d logTailMbyte: %d", - i, - logPartPtr.p->logPartState, - logPartPtr.p->waitWriteGciLog, - logPartPtr.p->gcprec, - logPartPtr.p->firstLogQueue, - logPartPtr.p->currentLogfile, - logPartPtr.p->logTailFileNo, - logPartPtr.p->logTailMbyte); - - if(gcp.i == RNIL && logPartPtr.p->gcprec != RNIL) - gcp.i = logPartPtr.p->gcprec; - - LogFileRecordPtr logFilePtr; - Uint32 first= logFilePtr.i= logPartPtr.p->firstLogfile; - do - { - ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord); - ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage %d", - logFilePtr.p->fileNo, - logFilePtr.i, - logFilePtr.p->fileChangeState, - logFilePtr.p->logFileStatus, - logFilePtr.p->currentMbyte, - logFilePtr.p->currentFilepage); - logFilePtr.i = logFilePtr.p->nextLogFile; - } while(logFilePtr.i != first); - } - - if(gcp.i != RNIL) - { - ptrCheckGuard(gcp, cgcprecFileSize, gcpRecord); - for(i = 0; i<4; i++) - { - ndbout_c(" GCP %d file: %d state: %d sync: %d page: %d word: %d", - i, gcp.p->gcpFilePtr[i], gcp.p->gcpLogPartState[i], - gcp.p->gcpSyncReady[i], - gcp.p->gcpPageNo[i], - gcp.p->gcpWordNo[i]); - } - } - - if(arg== 2305) - { - progError(__LINE__, NDBD_EXIT_SYSTEM_ERROR, - "Please report this as a bug. " - "Provide as much info as possible, expecially all the " - "ndb_*_out.log files, Thanks. " - "Shutting down node due to failed handling of GCP_SAVEREQ"); - - } - } - - if (dumpState->args[0] == DumpStateOrd::LqhErrorInsert5042 && (signal->getLength() >= 2)) - { - c_error_insert_table_id = dumpState->args[1]; - if (signal->getLength() == 2) - { - SET_ERROR_INSERT_VALUE(5042); - } - else - { - SET_ERROR_INSERT_VALUE(dumpState->args[2]); - } - } - - TcConnectionrec *regTcConnectionrec = tcConnectionrec; - Uint32 ttcConnectrecFileSize = ctcConnectrecFileSize; - if(arg == 2306) - { - for(Uint32 i = 0; i<1024; i++) - { - TcConnectionrecPtr tcRec; - tcRec.i = ctransidHash[i]; - while(tcRec.i != RNIL) - { - ptrCheckGuard(tcRec, ttcConnectrecFileSize, regTcConnectionrec); - ndbout << "TcConnectionrec " << tcRec.i; - signal->theData[0] = 2307; - signal->theData[1] = tcRec.i; - execDUMP_STATE_ORD(signal); - tcRec.i = tcRec.p->nextHashRec; - } - } - } - - if(arg == 2307 || arg == 2308) - { - TcConnectionrecPtr tcRec; - tcRec.i = signal->theData[1]; - ptrCheckGuard(tcRec, ttcConnectrecFileSize, regTcConnectionrec); - - ndbout << " transactionState = " << tcRec.p->transactionState<logPageWord[i] ^ checkSum; -#endif - return checkSum; - } - -#ifdef NDB_DEBUG_FULL -#ifdef ERROR_INSERT -void -TraceLCP::sendSignal(Uint32 ref, Uint32 gsn, Signal* signal, - Uint32 len, Uint32 prio) -{ - Sig s; - s.type = Sig::Sig_send; - s.header = signal->header; - s.header.theVerId_signalNumber = gsn; - s.header.theReceiversBlockNumber = ref; - s.header.theLength = len; - memcpy(s.theData, signal->theData, 4 * len); - m_signals.push_back(s); - assert(signal->getNoOfSections() == 0); -} - -void -TraceLCP::save(Signal* signal){ - Sig s; - s.type = Sig::Sig_save; - s.header = signal->header; - memcpy(s.theData, signal->theData, 4 * signal->getLength()); - m_signals.push_back(s); - assert(signal->getNoOfSections() == 0); -} - -void -TraceLCP::restore(SimulatedBlock& lqh, Signal* sig){ - Uint32 cnt = m_signals.size(); - for(Uint32 i = 0; iheader = m_signals[i].header; - memcpy(sig->theData, m_signals[i].theData, 4 * sig->getLength()); - switch(m_signals[i].type){ - case Sig::Sig_send: - lqh.sendSignal(sig->header.theReceiversBlockNumber, - sig->header.theVerId_signalNumber, - sig, - sig->header.theLength, - JBB); - break; - case Sig::Sig_save: - lqh.executeFunction(sig->header.theVerId_signalNumber, sig); - break; - } - } - m_signals.clear(); -} -#endif -#endif - -void Dblqh::writeDbgInfoPageHeader(LogPageRecordPtr logP, Uint32 place, - Uint32 pageNo, Uint32 wordWritten) -{ - logP.p->logPageWord[ZPOS_LOG_TIMER]= logPartPtr.p->logTimer; - logP.p->logPageWord[ZPOS_PREV_PAGE_NO]= logP.p->logPageWord[ZPOS_PAGE_NO]; - logP.p->logPageWord[ZPOS_PAGE_I]= logP.i; - logP.p->logPageWord[ZPOS_PLACE_WRITTEN_FROM]= place; - logP.p->logPageWord[ZPOS_PAGE_NO]= pageNo; - logP.p->logPageWord[ZPOS_PAGE_FILE_NO]= logFilePtr.p->fileNo; - logP.p->logPageWord[ZPOS_WORD_WRITTEN]= wordWritten; - logP.p->logPageWord[ZPOS_IN_WRITING]= 1; -} - -#if defined ERROR_INSERT -void -Dblqh::TRACE_OP_DUMP(const Dblqh::TcConnectionrec* regTcPtr, const char * pos) -{ - (* traceopout) - << "[ " << hex << regTcPtr->transid[0] - << " " << hex << regTcPtr->transid[1] << " ] " << dec - << pos - << " " << (Operation_t)regTcPtr->operation - << " " << regTcPtr->tableref - << "(" << regTcPtr->fragmentid << ")" - << "(" << (regTcPtr->seqNoReplica == 0 ? "P" : "B") << ")" ; - - { - (* traceopout) << "key=[" << hex; - Uint32 i; - for(i = 0; iprimKeyLen && i < 4; i++){ - (* traceopout) << hex << regTcPtr->tupkeyData[i] << " "; - } - - DatabufPtr regDatabufptr; - regDatabufptr.i = regTcPtr->firstTupkeybuf; - while(i < regTcPtr->primKeyLen) - { - ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf); - for(Uint32 j = 0; j<4 && iprimKeyLen; j++, i++) - (* traceopout) << hex << regDatabufptr.p->data[j] << " "; - } - (* traceopout) << "] "; - } - - if (regTcPtr->m_use_rowid) - (* traceopout) << " " << regTcPtr->m_row_id; - (* traceopout) << endl; -} -#endif diff --git a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am b/storage/ndb/src/kernel/blocks/dblqh/Makefile.am deleted file mode 100644 index 31612b5c25e..00000000000 --- a/storage/ndb/src/kernel/blocks/dblqh/Makefile.am +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -EXTRA_PROGRAMS = ndbd_redo_log_reader - -ndbd_redo_log_reader_SOURCES = redoLogReader/records.cpp \ - redoLogReader/reader.cpp - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am - -ndbd_redo_log_reader_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp deleted file mode 100644 index ea483527c15..00000000000 --- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/reader.cpp +++ /dev/null @@ -1,416 +0,0 @@ -/* Copyright (c) 2003-2005, 2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//---------------------------------------------------------------- -// REDOLOGFILEREADER -// Reads a redo log file and checks it for errors and/or prints -// the file in a human readable format. -// -// Usage: redoLogFileReader [-noprint] [-nocheck] -// [-mbyte <0-15>] [-mbyteHeaders] [-pageHeaders] -// -//---------------------------------------------------------------- - - -#include - -#include "records.hpp" - -#define RETURN_ERROR 1 -#define RETURN_OK 0 - -#define FROM_BEGINNING 0 - -void usage(const char * prg); -Uint32 readFromFile(FILE * f, Uint32 *toPtr, Uint32 sizeInWords); -void readArguments(int argc, const char** argv); -void doExit(); - -FILE * f= 0; -char fileName[256]; -bool theDumpFlag = false; -bool thePrintFlag = true; -bool theCheckFlag = true; -bool onlyPageHeaders = false; -bool onlyMbyteHeaders = false; -bool onlyFileDesc = false; -bool firstLap = true; -Uint32 startAtMbyte = 0; -Uint32 startAtPage = 0; -Uint32 startAtPageIndex = 0; -Uint32 *redoLogPage; - -NDB_COMMAND(redoLogFileReader, "redoLogFileReader", "redoLogFileReader", "Read a redo log file", 16384) { - int wordIndex = 0; - int oldWordIndex = 0; - Uint32 recordType = 1234567890; - - PageHeader *thePageHeader; - CompletedGCIRecord *cGCIrecord; - PrepareOperationRecord *poRecord; - NextLogRecord *nlRecord; - FileDescriptor *fdRecord; - CommitTransactionRecord *ctRecord; - InvalidCommitTransactionRecord *ictRecord; - NextMbyteRecord *nmRecord; - AbortTransactionRecord *atRecord; - - readArguments(argc, argv); - - f = fopen(fileName, "rb"); - if(!f){ - perror("Error: open file"); - exit(RETURN_ERROR); - } - - Uint32 tmpFileOffset = startAtMbyte * PAGESIZE * NO_PAGES_IN_MBYTE * sizeof(Uint32); - if (fseek(f, tmpFileOffset, FROM_BEGINNING)) { - perror("Error: Move in file"); - exit(RETURN_ERROR); - } - - redoLogPage = new Uint32[PAGESIZE*NO_PAGES_IN_MBYTE]; - Uint32 words_from_previous_page = 0; - - // Loop for every mbyte. - bool lastPage = false; - for (Uint32 j = startAtMbyte; j < NO_MBYTE_IN_FILE && !lastPage; j++) { - - readFromFile(f, redoLogPage, PAGESIZE*NO_PAGES_IN_MBYTE); - - words_from_previous_page = 0; - - // Loop for every page. - for (int i = 0; i < NO_PAGES_IN_MBYTE; i++) { - wordIndex = 0; - thePageHeader = (PageHeader *) &redoLogPage[i*PAGESIZE]; - // Print out mbyte number, page number and page index. - ndbout << j << ":" << i << ":" << wordIndex << endl - << " " << j*32 + i << ":" << wordIndex << " "; - if (thePrintFlag) ndbout << (*thePageHeader); - if (theCheckFlag) { - if(!thePageHeader->check()) { - ndbout << "Error in thePageHeader->check()" << endl; - doExit(); - } - - Uint32 checkSum = 37; - for (int ps = 1; ps < PAGESIZE; ps++) - checkSum = redoLogPage[i*PAGESIZE+ps] ^ checkSum; - - if (checkSum != redoLogPage[i*PAGESIZE]){ - ndbout << "WRONG CHECKSUM: checksum = " << redoLogPage[i*PAGESIZE] - << " expected = " << checkSum << endl; - doExit(); - } - else - ndbout << "expected checksum: " << checkSum << endl; - - } - - lastPage = i != 0 && thePageHeader->lastPage(); - Uint32 lastWord = thePageHeader->lastWord(); - - if (onlyMbyteHeaders) { - // Show only the first page header in every mbyte of the file. - break; - } - - if (onlyPageHeaders) { - // Show only page headers. Continue with the next page in this for loop. - continue; - } - - - wordIndex = thePageHeader->getLogRecordSize() - words_from_previous_page; - Uint32 *redoLogPagePos = redoLogPage + i*PAGESIZE; - if (words_from_previous_page) - { - memmove(redoLogPagePos + wordIndex , - redoLogPagePos - words_from_previous_page, - words_from_previous_page*4); - } - - do { - if (words_from_previous_page) - { - // Print out mbyte number, page number and word index. - ndbout << j << ":" << i-1 << ":" << PAGESIZE-words_from_previous_page << endl - << j << ":" << i << ":" << wordIndex+words_from_previous_page << endl - << " " << j*32 + i-1 << ":" << PAGESIZE-words_from_previous_page << " "; - words_from_previous_page = 0; - } - else - { - // Print out mbyte number, page number and word index. - ndbout << j << ":" << i << ":" << wordIndex << endl - << " " << j*32 + i << ":" << wordIndex << " "; - } - redoLogPagePos = redoLogPage + i*PAGESIZE + wordIndex; - oldWordIndex = wordIndex; - recordType = *redoLogPagePos; - switch(recordType) { - case ZFD_TYPE: - fdRecord = (FileDescriptor *) redoLogPagePos; - if (thePrintFlag) ndbout << (*fdRecord); - if (theCheckFlag) { - if(!fdRecord->check()) { - ndbout << "Error in fdRecord->check()" << endl; - doExit(); - } - } - if (onlyFileDesc) { - delete [] redoLogPage; - exit(RETURN_OK); - } - wordIndex += fdRecord->getLogRecordSize(); - break; - - case ZNEXT_LOG_RECORD_TYPE: - nlRecord = (NextLogRecord *) redoLogPagePos; - wordIndex += nlRecord->getLogRecordSize(wordIndex); - if (wordIndex <= PAGESIZE) { - if (thePrintFlag) ndbout << (*nlRecord); - if (theCheckFlag) { - if(!nlRecord->check()) { - ndbout << "Error in nlRecord->check()" << endl; - doExit(); - } - } - } - break; - - case ZCOMPLETED_GCI_TYPE: - cGCIrecord = (CompletedGCIRecord *) redoLogPagePos; - wordIndex += cGCIrecord->getLogRecordSize(); - if (wordIndex <= PAGESIZE) { - if (thePrintFlag) ndbout << (*cGCIrecord); - if (theCheckFlag) { - if(!cGCIrecord->check()) { - ndbout << "Error in cGCIrecord->check()" << endl; - doExit(); - } - } - } - break; - - case ZPREP_OP_TYPE: - poRecord = (PrepareOperationRecord *) redoLogPagePos; - wordIndex += poRecord->getLogRecordSize(PAGESIZE-wordIndex); - if (wordIndex <= PAGESIZE) { - if (thePrintFlag) ndbout << (*poRecord); - if (theCheckFlag) { - if(!poRecord->check()) { - ndbout << "Error in poRecord->check()" << endl; - doExit(); - } - } - } - break; - - case ZCOMMIT_TYPE: - ctRecord = (CommitTransactionRecord *) redoLogPagePos; - wordIndex += ctRecord->getLogRecordSize(); - if (wordIndex <= PAGESIZE) { - if (thePrintFlag) ndbout << (*ctRecord); - if (theCheckFlag) { - if(!ctRecord->check()) { - ndbout << "Error in ctRecord->check()" << endl; - doExit(); - } - } - } - break; - - case ZINVALID_COMMIT_TYPE: - ictRecord = (InvalidCommitTransactionRecord *) redoLogPagePos; - wordIndex += ictRecord->getLogRecordSize(); - if (wordIndex <= PAGESIZE) { - if (thePrintFlag) ndbout << (*ictRecord); - if (theCheckFlag) { - if(!ictRecord->check()) { - ndbout << "Error in ictRecord->check()" << endl; - doExit(); - } - } - } - break; - - case ZNEXT_MBYTE_TYPE: - nmRecord = (NextMbyteRecord *) redoLogPagePos; - if (thePrintFlag) ndbout << (*nmRecord); - i = NO_PAGES_IN_MBYTE; - break; - - case ZABORT_TYPE: - atRecord = (AbortTransactionRecord *) redoLogPagePos; - wordIndex += atRecord->getLogRecordSize(); - if (wordIndex <= PAGESIZE) { - if (thePrintFlag) ndbout << (*atRecord); - if (theCheckFlag) { - if(!atRecord->check()) { - ndbout << "Error in atRecord->check()" << endl; - doExit(); - } - } - } - break; - - case ZNEW_PREP_OP_TYPE: - case ZFRAG_SPLIT_TYPE: - ndbout << endl << "Record type = " << recordType << " not implemented." << endl; - doExit(); - - default: - ndbout << " ------ERROR: UNKNOWN RECORD TYPE------" << endl; - - // Print out remaining data in this page - for (int k = wordIndex; k < PAGESIZE; k++){ - Uint32 unknown = redoLogPage[i*PAGESIZE + k]; - ndbout_c("%-30d%-12u%-12x", k, unknown, unknown); - } - - doExit(); - } - } while(wordIndex < lastWord && i < NO_PAGES_IN_MBYTE); - - - if (lastPage) - { - if (theDumpFlag) - { - ndbout << " ------PAGE END: DUMPING REST OF PAGE------" << endl; - for (int k = wordIndex > PAGESIZE ? oldWordIndex : wordIndex; - k < PAGESIZE; k++) - { - Uint32 word = redoLogPage[i*PAGESIZE + k]; - ndbout_c("%-30d%-12u%-12x", k, word, word); - } - } - break; - } - if (wordIndex > PAGESIZE) { - words_from_previous_page = PAGESIZE - oldWordIndex; - ndbout << " ----------- Record continues on next page -----------" << endl; - } else { - wordIndex = 0; - words_from_previous_page = 0; - } - ndbout << endl; - }//for - ndbout << endl; - if (startAtMbyte != 0) { - break; - } - }//for - fclose(f); - delete [] redoLogPage; - exit(RETURN_OK); -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -Uint32 readFromFile(FILE * f, Uint32 *toPtr, Uint32 sizeInWords) { - Uint32 noOfReadWords; - if ( !(noOfReadWords = fread(toPtr, sizeof(Uint32), sizeInWords, f)) ) { - ndbout << "Error reading file" << endl; - doExit(); - } - - return noOfReadWords; -} - - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - - -void usage(const char * prg){ - ndbout << endl << "Usage: " << endl << prg - << " [-noprint] [-nocheck] [-mbyte <0-15>] " - << "[-mbyteheaders] [-pageheaders] [-filedescriptors] [-page <0-31>] " - << "[-pageindex <12-8191>]" - << endl << endl; - -} -void readArguments(int argc, const char** argv) -{ - if(argc < 2 || argc > 9){ - usage(argv[0]); - doExit(); - } - - strcpy(fileName, argv[1]); - argc--; - - int i = 2; - while (argc > 1) - { - if (strcmp(argv[i], "-noprint") == 0) { - thePrintFlag = false; - } else if (strcmp(argv[i], "-dump") == 0) { - theDumpFlag = true; - } else if (strcmp(argv[i], "-nocheck") == 0) { - theCheckFlag = false; - } else if (strcmp(argv[i], "-mbyteheaders") == 0) { - onlyMbyteHeaders = true; - } else if (strcmp(argv[i], "-pageheaders") == 0) { - onlyPageHeaders = true; - } else if (strcmp(argv[i], "-filedescriptors") == 0) { - onlyFileDesc = true; - } else if (strcmp(argv[i], "-mbyte") == 0) { - startAtMbyte = atoi(argv[i+1]); - if (startAtMbyte > 15) { - usage(argv[0]); - doExit(); - } - argc--; - i++; - } else if (strcmp(argv[i], "-page") == 0) { - startAtPage = atoi(argv[i+1]); - if (startAtPage > 31) { - usage(argv[0]); - doExit(); - } - argc--; - i++; - } else if (strcmp(argv[i], "-pageindex") == 0) { - startAtPageIndex = atoi(argv[i+1]); - if (startAtPageIndex > 8191 || startAtPageIndex < 12) { - usage(argv[0]); - doExit(); - } - argc--; - i++; - } else { - usage(argv[0]); - doExit(); - } - argc--; - i++; - } - -} - -void doExit() { - ndbout << "Error in redoLogReader(). Exiting!" << endl; - if (f) fclose(f); - delete [] redoLogPage; - exit(RETURN_ERROR); -} diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp deleted file mode 100644 index 6431b4ba9b6..00000000000 --- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.cpp +++ /dev/null @@ -1,336 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "records.hpp" - -void printOut(const char *string, Uint32 value) { - ndbout_c("%-30s%-12u%-12x", string, value, value); -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool AbortTransactionRecord::check() { - // Not implemented yet. - return true; -} - -Uint32 AbortTransactionRecord::getLogRecordSize() { - return ABORTTRANSACTIONRECORDSIZE; -} - -NdbOut& operator<<(NdbOut& no, const AbortTransactionRecord& atr) { - no << "----------ABORT TRANSACTION RECORD-------------" << endl << endl; - printOut("Record type:", atr.m_recordType); - printOut("TransactionId1:", atr.m_transactionId1); - printOut("TransactionId2:", atr.m_transactionId2); - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool NextMbyteRecord::check() { - // Not implemented yet. - return true; -} - -Uint32 NextMbyteRecord::getLogRecordSize() { - return NEXTMBYTERECORDSIZE; -} - -NdbOut& operator<<(NdbOut& no, const NextMbyteRecord& nmr) { - no << "----------NEXT MBYTE RECORD--------------------" << endl << endl; - printOut("Record type:", nmr.m_recordType); - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool CommitTransactionRecord::check() { - // Not implemented yet. - return true; -} - -Uint32 CommitTransactionRecord::getLogRecordSize() { - return COMMITTRANSACTIONRECORDSIZE; -} - -NdbOut& operator<<(NdbOut& no, const CommitTransactionRecord& ctr) { - no << "----------COMMIT TRANSACTION RECORD------------" << endl << endl; - printOut("Record type:", ctr.m_recordType); - printOut("TableId", ctr.m_tableId); - printOut("SchemaVersion:", ctr.m_schemaVersion); - printOut("FfragmentId", ctr.m_fragmentId); - printOut("File no. of Prep. Op.", ctr.m_fileNumberOfPrepareOperation); - printOut("Start page no. of Prep. Op.", ctr.m_startPageNumberOfPrepareOperation); - printOut("Start page index of Prep. Op.", ctr.m_startPageIndexOfPrepareOperation); - printOut("Stop page no. of Prep. Op.", ctr.m_stopPageNumberOfPrepareOperation); - printOut("GlobalCheckpoint", ctr.m_globalCheckpoint); - - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool InvalidCommitTransactionRecord::check() { - // Not implemented yet. - return true; -} - -Uint32 InvalidCommitTransactionRecord::getLogRecordSize() { - return COMMITTRANSACTIONRECORDSIZE; -} - -NdbOut& operator<<(NdbOut& no, const InvalidCommitTransactionRecord& ictr) { - no << "------INVALID COMMIT TRANSACTION RECORD--------" << endl << endl; - printOut("Record type:", ictr.m_recordType); - printOut("TableId", ictr.m_tableId); - printOut("FfragmentId", ictr.m_fragmentId); - printOut("File no. of Prep. Op.", ictr.m_fileNumberOfPrepareOperation); - printOut("Start page no. of Prep. Op.", ictr.m_startPageNumberOfPrepareOperation); - printOut("Start page index of Prep. Op.", ictr.m_startPageIndexOfPrepareOperation); - printOut("Stop page no. of Prep. Op.", ictr.m_stopPageNumberOfPrepareOperation); - printOut("GlobalCheckpoint", ictr.m_globalCheckpoint); - - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool PrepareOperationRecord::check() { - // Not fully implemented. - if (m_operationType == 3 && m_attributeLength != 0) - return false; - - if (m_logRecordSize != (m_attributeLength + m_keyLength + 8)) - return false; - - return true; -} - -Uint32 PrepareOperationRecord::getLogRecordSize(Uint32 wordsRead) { - if (wordsRead < 2) - return 2; // make sure we read more - return m_logRecordSize; -} - -NdbOut& operator<<(NdbOut& no, const PrepareOperationRecord& por) { - no << "-----------PREPARE OPERATION RECORD------------" << endl << endl; - printOut("Record type:", por.m_recordType); - printOut("logRecordSize:", por.m_logRecordSize); - printOut("hashValue:", por.m_hashValue); - switch (por.m_operationType) { - case 0: - ndbout_c("%-30s%-12u%-6s", "operationType:", - por.m_operationType, "read"); - break; - case 1: - ndbout_c("%-30s%-12u%-6s", "operationType:", - por.m_operationType, "update"); - break; - case 2: - ndbout_c("%-30s%-12u%-6s", "operationType:", - por.m_operationType, "insert"); - break; - case 3: - ndbout_c("%-30s%-12u%-6s", "operationType:", - por.m_operationType, "delete"); - break; - default: - printOut("operationType:", por.m_operationType); - } - printOut("page_no: ", por.m_page_no); - printOut("page_idx: ", por.m_page_idx); - printOut("attributeLength:", por.m_attributeLength); - printOut("keyLength:", por.m_keyLength); - -#if 1 - // Print keydata - Uint32* p = (Uint32*)&por.m_keyInfo; - for(Uint32 i=0; i < por.m_keyLength; i++){ - printOut("keydata:", *p); - p++; - } - - // Print attrdata - for(Uint32 i=0; i < por.m_attributeLength; i++){ - printOut("attrdata:", *p); - p++; - } -#endif - - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool CompletedGCIRecord::check() { - // Not implemented yet. - return true; -} - -Uint32 CompletedGCIRecord::getLogRecordSize() { - return COMPLETEDGCIRECORDSIZE; -} - -NdbOut& operator<<(NdbOut& no, const CompletedGCIRecord& cGCIr) { - no << "-----------COMPLETED GCI RECORD----------------" << endl << endl; - printOut("Record type:", cGCIr.m_recordType); - printOut("Completed GCI:", cGCIr.m_theCompletedGCI); - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -bool NextLogRecord::check() { - // Not implemented yet. - return true; -} - -Uint32 NextLogRecord::getLogRecordSize(Uint32 pageIndex) { - return PAGESIZE - pageIndex; -} - -NdbOut& operator<<(NdbOut& no, const NextLogRecord& nl) { - no << "-----------NEXT LOG RECORD --------------------" << endl << endl; - printOut("Record type:", nl.m_recordType); - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -Uint32 PageHeader::getLogRecordSize() { - return PAGEHEADERSIZE; -} - -bool PageHeader::check() { - // Not implemented yet. - return true; -} - -bool PageHeader::lastPage() -{ - return m_next_page == 0xffffff00; -} - -Uint32 PageHeader::lastWord() -{ - return m_current_page_index; -} - - -NdbOut& operator<<(NdbOut& no, const PageHeader& ph) { - no << "------------PAGE HEADER------------------------" << endl << endl; - ndbout_c("%-30s%-12s%-12s\n", "", "Decimal", "Hex"); - printOut("Checksum:", ph.m_checksum); - printOut("Laps since initial start:", ph.m_lap); - printOut("Max gci completed:", ph.m_max_gci_completed); - printOut("Max gci started:", ph.m_max_gci_started); - printOut("Ptr to next page:", ph.m_next_page); - printOut("Ptr to previous page:", ph.m_previous_page); - printOut("Ndb version:", ph.m_ndb_version); - printOut("Number of log files:", ph.m_number_of_logfiles); - printOut("Current page index:", ph.m_current_page_index); - printOut("Oldest prepare op. file No.:", ph.m_old_prepare_file_number); - printOut("Oldest prepare op. page ref.:", ph.m_old_prepare_page_reference); - printOut("Dirty flag:", ph.m_dirty_flag); - printOut("Write Timer:", ph.m_log_timer); - printOut("Page i-val:", ph.m_page_i_value); - printOut("Place written:", ph.m_place_written_from); - printOut("Page No in File:", ph.m_page_no); - printOut("File No:", ph.m_file_no); - printOut("Word Written:", ph.m_word_written); - printOut("In Writing (should be 1)", ph.m_in_writing_flag); - printOut("Prev Page No (can be garbage)", ph.m_prev_page_no); - printOut("In Free List (should be 0):", ph.m_in_free_list); - no << endl; - return no; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -Uint32 FileDescriptor::getLogRecordSize() { - return FILEDESCRIPTORHEADERSIZE - + m_fdHeader.m_noOfDescriptors * FILEDESCRIPTORRECORDSIZE; -} - -NdbOut& operator<<(NdbOut& no, const FileDescriptor& fd) { - no << "-------FILE DESCRIPTOR HEADER------------------" << endl << endl; - printOut("Record type:", fd.m_fdHeader.m_recordType); - printOut("Number of file descriptors:", fd.m_fdHeader.m_noOfDescriptors); - printOut("File number:", fd.m_fdHeader.m_fileNo); - ndbout << endl; - for(Uint32 i = 0; i < fd.m_fdHeader.m_noOfDescriptors; i++) { - fd.printARecord(i); - } - return no; -} - -void FileDescriptor::printARecord( Uint32 recordIndex ) const { - ndbout << "------------------FILE DESCRIPTOR " << recordIndex - <<" ---------------------" << endl << endl; - ndbout_c("%-30s%-12s%-12s\n", "", "Decimal", "Hex"); - - for(int i = 1; i <= NO_MBYTE_IN_FILE; i++) { - ndbout_c("%s%2d%s%-12u%-12x", "Max GCI completed, mbyte ", i, ": ", - m_fdRecord[recordIndex].m_maxGciCompleted[i-1], - m_fdRecord[recordIndex].m_maxGciCompleted[i-1]); - } - for(int i = 1; i <= NO_MBYTE_IN_FILE; i++) { - ndbout_c("%s%2d%s%-12u%-12x", "Max GCI started, mbyte ", i, ": ", - m_fdRecord[recordIndex].m_maxGciStarted[i-1], - m_fdRecord[recordIndex].m_maxGciStarted[i-1]); - } - for(int i = 1; i <= NO_MBYTE_IN_FILE; i++) { - ndbout_c("%s%2d%s%-12u%-12x", "Last prepared ref, mbyte ", i, ": ", - m_fdRecord[recordIndex].m_lastPreparedReference[i-1], - m_fdRecord[recordIndex].m_lastPreparedReference[i-1]); - } - ndbout << endl; -} - -bool FileDescriptor::check() { - // Not implemented yet. - return true; -} - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- diff --git a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp b/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp deleted file mode 100644 index abdb57e8646..00000000000 --- a/storage/ndb/src/kernel/blocks/dblqh/redoLogReader/records.hpp +++ /dev/null @@ -1,250 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -#define ZNEW_PREP_OP_TYPE 0 -#define ZPREP_OP_TYPE 1 -#define ZCOMMIT_TYPE 2 -#define ZABORT_TYPE 3 -#define ZFD_TYPE 4 -#define ZFRAG_SPLIT_TYPE 5 -#define ZNEXT_LOG_RECORD_TYPE 6 -#define ZNEXT_MBYTE_TYPE 7 -#define ZCOMPLETED_GCI_TYPE 8 -#define ZINVALID_COMMIT_TYPE 9 - -#define MAX_FILE_DESCRIPTORS 40 -#define NO_MBYTE_IN_FILE 16 - -#define PAGESIZE 8192 -#define NO_PAGES_IN_MBYTE 32 -#define NO_MBYTE_IN_FILE 16 - -#define COMMITTRANSACTIONRECORDSIZE 9 -#define COMPLETEDGCIRECORDSIZE 2 -#define PAGEHEADERSIZE 32 -#define FILEDESCRIPTORHEADERSIZE 3 -#define FILEDESCRIPTORRECORDSIZE 48 -#define NEXTMBYTERECORDSIZE 1 -#define ABORTTRANSACTIONRECORDSIZE 3 - - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class AbortTransactionRecord { - friend NdbOut& operator<<(NdbOut&, const AbortTransactionRecord&); -public: - bool check(); - Uint32 getLogRecordSize(); -protected: - Uint32 m_recordType; - Uint32 m_transactionId1; - Uint32 m_transactionId2; -}; - - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class NextMbyteRecord { - friend NdbOut& operator<<(NdbOut&, const NextMbyteRecord&); -public: - bool check(); - Uint32 getLogRecordSize(); -protected: - Uint32 m_recordType; -}; - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - - -class PrepareOperationRecord { - friend NdbOut& operator<<(NdbOut&, const PrepareOperationRecord&); -public: - bool check(); - Uint32 getLogRecordSize(Uint32 wordsRead); - -protected: - Uint32 m_recordType; - Uint32 m_logRecordSize; - Uint32 m_hashValue; - Uint32 m_operationType; // 0 READ, 1 UPDATE, 2 INSERT, 3 DELETE - Uint32 m_attributeLength; - Uint32 m_keyLength; - Uint32 m_page_no; - Uint32 m_page_idx; - Uint32 *m_keyInfo; // In this order - Uint32 *m_attrInfo;// In this order -}; - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class CompletedGCIRecord { - friend NdbOut& operator<<(NdbOut&, const CompletedGCIRecord&); -public: - bool check(); - Uint32 getLogRecordSize(); -protected: - Uint32 m_recordType; - Uint32 m_theCompletedGCI; -}; - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class NextLogRecord { - friend NdbOut& operator<<(NdbOut&, const NextLogRecord&); -public: - bool check(); - Uint32 getLogRecordSize(Uint32); -protected: - Uint32 m_recordType; -}; - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class PageHeader { - friend NdbOut& operator<<(NdbOut&, const PageHeader&); -public: - bool check(); - Uint32 getLogRecordSize(); - bool lastPage(); - Uint32 lastWord(); -protected: - Uint32 m_checksum; - Uint32 m_lap; - Uint32 m_max_gci_completed; - Uint32 m_max_gci_started; - Uint32 m_next_page; - Uint32 m_previous_page; - Uint32 m_ndb_version; - Uint32 m_number_of_logfiles; - Uint32 m_current_page_index; - Uint32 m_old_prepare_file_number; - Uint32 m_old_prepare_page_reference; - Uint32 m_dirty_flag; -/* Debug info Start */ - Uint32 m_log_timer; - Uint32 m_page_i_value; - Uint32 m_place_written_from; - Uint32 m_page_no; - Uint32 m_file_no; - Uint32 m_word_written; - Uint32 m_in_writing_flag; - Uint32 m_prev_page_no; - Uint32 m_in_free_list; -/* Debug info End */ -}; - -//---------------------------------------------------------------- -// File descriptor. -//---------------------------------------------------------------- - -class FileDescriptorHeader { -public: - Uint32 m_recordType; - Uint32 m_noOfDescriptors; - Uint32 m_fileNo; -}; - -class FileDescriptorRecord { -public: - Uint32 m_maxGciCompleted[16]; - Uint32 m_maxGciStarted[16]; - Uint32 m_lastPreparedReference[16]; -}; - -class FileDescriptor { - friend NdbOut& operator<<(NdbOut&, const FileDescriptor&); -public: - bool check(); - Uint32 getLogRecordSize(); -protected: - void printARecord( Uint32 ) const; - FileDescriptorHeader m_fdHeader; - FileDescriptorRecord m_fdRecord[1]; -}; - - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class CommitTransactionRecord { - friend NdbOut& operator<<(NdbOut&, const CommitTransactionRecord&); -public: - bool check(); - Uint32 getLogRecordSize(); -protected: - Uint32 m_recordType; - Uint32 m_tableId; - Uint32 m_schemaVersion; - Uint32 m_fragmentId; - Uint32 m_fileNumberOfPrepareOperation; - Uint32 m_startPageNumberOfPrepareOperation; - Uint32 m_startPageIndexOfPrepareOperation; - Uint32 m_stopPageNumberOfPrepareOperation; - Uint32 m_globalCheckpoint; -}; - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -class InvalidCommitTransactionRecord { - friend NdbOut& operator<<(NdbOut&, const InvalidCommitTransactionRecord&); -public: - bool check(); - Uint32 getLogRecordSize(); -protected: - Uint32 m_recordType; - Uint32 m_tableId; - Uint32 m_fragmentId; - Uint32 m_fileNumberOfPrepareOperation; - Uint32 m_startPageNumberOfPrepareOperation; - Uint32 m_startPageIndexOfPrepareOperation; - Uint32 m_stopPageNumberOfPrepareOperation; - Uint32 m_globalCheckpoint; -}; - -//---------------------------------------------------------------- -// -//---------------------------------------------------------------- - -struct NextLogRec { - -}; - -struct NewPrepareOperation { - -}; - -struct FragmentSplit { - -}; diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp deleted file mode 100644 index 90338cb58cd..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ /dev/null @@ -1,1978 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBTC_H -#define DBTC_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef DBTC_C -/* - * 2.2 LOCAL SYMBOLS - * ----------------- - */ -#define Z8NIL 255 -#define ZAPI_CONNECT_FILESIZE 20 -#define ZATTRBUF_FILESIZE 4000 -#define ZCLOSED 2 -#define ZCOMMITING 0 /* VALUE FOR TRANSTATUS */ -#define ZCOMMIT_SETUP 2 -#define ZCONTINUE_ABORT_080 4 -#define ZDATABUF_FILESIZE 4000 -#define ZGCP_FILESIZE 10 -#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */ -#define ZINBUF_NEXT 27 /* POSITION OF 'NEXT'-VARIABLE. */ -#define ZINBUF_PREV 26 /* POSITION OF 'PREVIOUS'-VARIABLE. */ -#define ZINTSPH1 1 -#define ZINTSPH2 2 -#define ZINTSPH3 3 -#define ZINTSPH6 6 -#define ZLASTPHASE 255 -#define ZMAX_DATA_IN_LQHKEYREQ 12 -#define ZNODEBUF_FILESIZE 2000 -#define ZNR_OF_SEIZE 10 -#define ZSCANREC_FILE_SIZE 100 -#define ZSCAN_FRAGREC_FILE_SIZE 400 -#define ZSCAN_OPREC_FILE_SIZE 400 -#define ZSEND_ATTRINFO 0 -#define ZSPH1 1 -#define ZTABREC_FILESIZE 16 -#define ZTAKE_OVER_ACTIVE 1 -#define ZTAKE_OVER_IDLE 0 -#define ZTC_CONNECT_FILESIZE 200 -#define ZTCOPCONF_SIZE 6 - -// ---------------------------------------- -// Error Codes for Scan -// ---------------------------------------- -#define ZNO_CONCURRENCY_ERROR 242 -#define ZTOO_HIGH_CONCURRENCY_ERROR 244 -#define ZNO_SCANREC_ERROR 245 -#define ZNO_FRAGMENT_ERROR 246 -#define ZSCAN_AI_LEN_ERROR 269 -#define ZSCAN_LQH_ERROR 270 -#define ZSCAN_FRAG_LQH_ERROR 274 - -#define ZSCANTIME_OUT_ERROR 296 -#define ZSCANTIME_OUT_ERROR2 297 - -// ---------------------------------------- -// Error Codes for transactions -// ---------------------------------------- -#define ZSTATE_ERROR 202 -#define ZLENGTH_ERROR 207 // Also Scan -#define ZERO_KEYLEN_ERROR 208 -#define ZSIGNAL_ERROR 209 -#define ZGET_ATTRBUF_ERROR 217 // Also Scan -#define ZGET_DATAREC_ERROR 218 -#define ZMORE_AI_IN_TCKEYREQ_ERROR 220 -#define ZCOMMITINPROGRESS 230 -#define ZROLLBACKNOTALLOWED 232 -#define ZNO_FREE_TC_CONNECTION 233 // Also Scan -#define ZABORTINPROGRESS 237 -#define ZPREPAREINPROGRESS 238 -#define ZWRONG_SCHEMA_VERSION_ERROR 241 // Also Scan -#define ZSCAN_NODE_ERROR 250 -#define ZTRANS_STATUS_ERROR 253 -#define ZTIME_OUT_ERROR 266 -#define ZSIMPLE_READ_WITHOUT_AI 271 -#define ZNO_AI_WITH_UPDATE 272 -#define ZSEIZE_API_COPY_ERROR 275 -#define ZSCANINPROGRESS 276 -#define ZABORT_ERROR 277 -#define ZCOMMIT_TYPE_ERROR 278 - -#define ZNO_FREE_TC_MARKER 279 -#define ZNODE_SHUTDOWN_IN_PROGRESS 280 -#define ZCLUSTER_SHUTDOWN_IN_PROGRESS 281 -#define ZWRONG_STATE 282 -#define ZCLUSTER_IN_SINGLEUSER_MODE 299 - -#define ZDROP_TABLE_IN_PROGRESS 283 -#define ZNO_SUCH_TABLE 284 -#define ZUNKNOWN_TABLE_ERROR 285 -#define ZNODEFAIL_BEFORE_COMMIT 286 -#define ZINDEX_CORRUPT_ERROR 287 - -// ---------------------------------------- -// Seize error -// ---------------------------------------- -#define ZNO_FREE_API_CONNECTION 219 -#define ZSYSTEM_NOT_STARTED_ERROR 203 - -// ---------------------------------------- -// Release errors -// ---------------------------------------- -#define ZINVALID_CONNECTION 229 - - -#define ZNOT_FOUND 626 -#define ZALREADYEXIST 630 -#define ZNOTUNIQUE 893 - -#define ZINVALID_KEY 290 -#endif - -class Dbtc: public SimulatedBlock { -public: - enum ConnectionState { - CS_CONNECTED = 0, - CS_DISCONNECTED = 1, - CS_STARTED = 2, - CS_RECEIVING = 3, - CS_PREPARED = 4, - CS_START_PREPARING = 5, - CS_REC_PREPARING = 6, - CS_RESTART = 7, - CS_ABORTING = 8, - CS_COMPLETING = 9, - CS_COMPLETE_SENT = 10, - CS_PREPARE_TO_COMMIT = 11, - CS_COMMIT_SENT = 12, - CS_START_COMMITTING = 13, - CS_COMMITTING = 14, - CS_REC_COMMITTING = 15, - CS_WAIT_ABORT_CONF = 16, - CS_WAIT_COMPLETE_CONF = 17, - CS_WAIT_COMMIT_CONF = 18, - CS_FAIL_ABORTING = 19, - CS_FAIL_ABORTED = 20, - CS_FAIL_PREPARED = 21, - CS_FAIL_COMMITTING = 22, - CS_FAIL_COMMITTED = 23, - CS_FAIL_COMPLETED = 24, - CS_START_SCAN = 25 - }; - - enum OperationState { - OS_CONNECTING_DICT = 0, - OS_CONNECTED = 1, - OS_OPERATING = 2, - OS_PREPARED = 3, - OS_COMMITTING = 4, - OS_COMMITTED = 5, - OS_COMPLETING = 6, - OS_COMPLETED = 7, - OS_RESTART = 8, - OS_ABORTING = 9, - OS_ABORT_SENT = 10, - OS_TAKE_OVER = 11, - OS_WAIT_DIH = 12, - OS_WAIT_KEYINFO = 13, - OS_WAIT_ATTR = 14, - OS_WAIT_COMMIT_CONF = 15, - OS_WAIT_ABORT_CONF = 16, - OS_WAIT_COMPLETE_CONF = 17, - OS_WAIT_SCAN = 18 - }; - - enum AbortState { - AS_IDLE = 0, - AS_ACTIVE = 1 - }; - - enum HostState { - HS_ALIVE = 0, - HS_DEAD = 1 - }; - - enum LqhTransState { - LTS_IDLE = 0, - LTS_ACTIVE = 1 - }; - - enum FailState { - FS_IDLE = 0, - FS_LISTENING = 1, - FS_COMPLETING = 2 - }; - - enum SystemStartState { - SSS_TRUE = 0, - SSS_FALSE = 1 - }; - - enum TimeOutCheckState { - TOCS_TRUE = 0, - TOCS_FALSE = 1 - }; - - enum ReturnSignal { - RS_NO_RETURN = 0, - RS_TCKEYCONF = 1, - RS_TC_COMMITCONF = 3, - RS_TCROLLBACKCONF = 4, - RS_TCROLLBACKREP = 5 - }; - - enum IndexOperationState { - IOS_NOOP = 0, - IOS_INDEX_ACCESS = 1, - IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF = 2, - IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI = 3, - IOS_INDEX_OPERATION = 4 - }; - - enum IndexState { - IS_BUILDING = 0, // build in progress, start state at create - IS_ONLINE = 1 // ready to use - }; - - - /**-------------------------------------------------------------------------- - * LOCAL SYMBOLS PER 'SYMBOL-VALUED' VARIABLE - * - * - * NSYMB ZAPI_CONNECT_FILESIZE = 20 - * NSYMB ZTC_CONNECT_FILESIZE = 200 - * NSYMB ZHOST_FILESIZE = 16 - * NSYMB ZDATABUF_FILESIZE = 4000 - * NSYMB ZATTRBUF_FILESIZE = 4000 - * NSYMB ZGCP_FILESIZE = 10 - * - * - * ABORTED CODES - * TPHASE NSYMB ZSPH1 = 1 - * NSYMB ZLASTPHASE = 255 - * - * - * LQH_TRANS - * NSYMB ZTRANS_ABORTED = 1 - * NSYMB ZTRANS_PREPARED = 2 - * NSYMB ZTRANS_COMMITTED = 3 - * NSYMB ZCOMPLETED_LQH_TRANS = 4 - * NSYMB ZTRANS_COMPLETED = 5 - * - * - * TAKE OVER - * NSYMB ZTAKE_OVER_IDLE = 0 - * NSYMB ZTAKE_OVER_ACTIVE = 1 - * - * ATTRBUF (ATTRBUF_RECORD) - * NSYMB ZINBUF_DATA_LEN = 24 - * NSYMB ZINBUF_NEXTFREE = 25 (NOT USED ) - * NSYMB ZINBUF_PREV = 26 - * NSYMB ZINBUF_NEXT = 27 - -------------------------------------------------------------------------*/ - /* - 2.3 RECORDS AND FILESIZES - ------------------------- - */ - /* **************************************************************** */ - /* ---------------------------------------------------------------- */ - /* ------------------- TRIGGER AND INDEX DATA --------------------- */ - /* ---------------------------------------------------------------- */ - /* **************************************************************** */ - /* ********* DEFINED TRIGGER DATA ********* */ - /* THIS RECORD FORMS LISTS OF ACTIVE */ - /* TRIGGERS FOR EACH TABLE. */ - /* THE RECORDS ARE MANAGED BY A TRIGGER */ - /* POOL WHERE A TRIGGER RECORD IS SEIZED */ - /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */ - /* WHEN THE TRIGGER IS DEACTIVATED. */ - /* **************************************** */ - struct TcDefinedTriggerData { - TcDefinedTriggerData() {} - /** - * Trigger id, used to identify the trigger - */ - UintR triggerId; - - /** - * Trigger type, defines what the trigger is used for - */ - TriggerType::Value triggerType; - - /** - * Trigger type, defines what the trigger is used for - */ - TriggerEvent::Value triggerEvent; - - /** - * Attribute mask, defines what attributes are to be monitored - * Can be seen as a compact representation of SQL column name list - */ - Bitmask attributeMask; - - /** - * Next ptr (used in pool/list) - */ - union { - Uint32 nextPool; - Uint32 nextList; - }; - - /** - * Index id, only used by secondary_index triggers. This is same as - * index table id in DICT. - **/ - Uint32 indexId; - - /** - * Prev pointer (used in list) - */ - Uint32 prevList; - - inline void print(NdbOut & s) const { - s << "[DefinedTriggerData = " << triggerId << "]"; - } - }; - typedef Ptr DefinedTriggerPtr; - - /** - * Pool of trigger data record - */ - ArrayPool c_theDefinedTriggerPool; - - /** - * The list of active triggers - */ - DLList c_theDefinedTriggers; - - typedef DataBuffer<11> AttributeBuffer; - - AttributeBuffer::DataBufferPool c_theAttributeBufferPool; - - UintR c_transactionBufferSpace; - - - /* ********** FIRED TRIGGER DATA ********** */ - /* THIS RECORD FORMS LISTS OF FIRED */ - /* TRIGGERS FOR A TRANSACTION. */ - /* THE RECORDS ARE MANAGED BY A TRIGGER */ - /* POOL WHERE A TRIGGER RECORD IS SEIZED */ - /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */ - /* WHEN THE TRIGGER IS DEACTIVATED. */ - /* **************************************** */ - struct TcFiredTriggerData { - TcFiredTriggerData() {} - - /** - * Trigger id, used to identify the trigger - **/ - Uint32 triggerId; - - /** - * The operation that fired the trigger - */ - Uint32 fireingOperation; - - /** - * The fragment id of the firing operation. This will be appended - * to the Primary Key such that the record can be found even in the - * case of user defined partitioning. - */ - Uint32 fragId; - - /** - * Used for scrapping in case of node failure - */ - Uint32 nodeId; - - /** - * Trigger attribute info, primary key value(s) - */ - AttributeBuffer::Head keyValues; - - /** - * Trigger attribute info, attribute value(s) before operation - */ - AttributeBuffer::Head beforeValues; - - /** - * Trigger attribute info, attribute value(s) after operation - */ - AttributeBuffer::Head afterValues; - - /** - * Next ptr (used in pool/list) - */ - union { - Uint32 nextPool; - Uint32 nextList; - Uint32 nextHash; - }; - - /** - * Prev pointer (used in list) - */ - union { - Uint32 prevList; - Uint32 prevHash; - }; - - inline void print(NdbOut & s) const { - s << "[FiredTriggerData = " << triggerId << "]"; - } - - inline Uint32 hashValue() const { - return fireingOperation ^ nodeId; - } - - inline bool equal(const TcFiredTriggerData & rec) const { - return fireingOperation == rec.fireingOperation && nodeId == rec.nodeId; - } - }; - typedef Ptr FiredTriggerPtr; - - /** - * Pool of trigger data record - */ - ArrayPool c_theFiredTriggerPool; - DLHashTable c_firedTriggerHash; - AttributeBuffer::DataBufferPool c_theTriggerAttrInfoPool; - - Uint32 c_maxNumberOfDefinedTriggers; - Uint32 c_maxNumberOfFiredTriggers; - - struct AttrInfoRecord { - /** - * Pre-allocated AttrInfo signal - */ - AttrInfo attrInfo; - - /** - * Next ptr (used in pool/list) - */ - union { - Uint32 nextPool; - Uint32 nextList; - }; - /** - * Prev pointer (used in list) - */ - Uint32 prevList; - }; - - - /* ************* INDEX DATA *************** */ - /* THIS RECORD FORMS LISTS OF ACTIVE */ - /* INDEX FOR EACH TABLE. */ - /* THE RECORDS ARE MANAGED BY A INDEX */ - /* POOL WHERE AN INDEX RECORD IS SEIZED */ - /* WHEN AN INDEX IS CREATED AND RELEASED */ - /* WHEN THE INDEX IS DROPPED. */ - /* **************************************** */ - struct TcIndexData { - /** - * IndexState - */ - IndexState indexState; - - /** - * Index id, same as index table id in DICT - */ - Uint32 indexId; - - /** - * Index attribute list. Only the length is used in v21x. - */ - AttributeList attributeList; - - /** - * Primary table id, the primary table to be indexed - */ - Uint32 primaryTableId; - - /** - * Primary key position in secondary table - */ - Uint32 primaryKeyPos; - - /** - * Next ptr (used in pool/list) - */ - union { - Uint32 nextPool; - Uint32 nextList; - }; - /** - * Prev pointer (used in list) - */ - Uint32 prevList; - }; - - typedef Ptr TcIndexDataPtr; - - /** - * Pool of index data record - */ - ArrayPool c_theIndexPool; - - /** - * The list of defined indexes - */ - DLList c_theIndexes; - UintR c_maxNumberOfIndexes; - - struct TcIndexOperation { - TcIndexOperation(AttributeBuffer::DataBufferPool & abp) : - indexOpState(IOS_NOOP), - expectedKeyInfo(0), - keyInfo(abp), - expectedAttrInfo(0), - attrInfo(abp), - expectedTransIdAI(0), - transIdAI(abp), - indexReadTcConnect(RNIL) - {} - - ~TcIndexOperation() - { - } - - // Index data - Uint32 indexOpId; - IndexOperationState indexOpState; // Used to mark on-going TcKeyReq - Uint32 expectedKeyInfo; - AttributeBuffer keyInfo; // For accumulating IndxKeyInfo - Uint32 expectedAttrInfo; - AttributeBuffer attrInfo; // For accumulating IndxAttrInfo - Uint32 expectedTransIdAI; - AttributeBuffer transIdAI; // For accumulating TransId_AI - - TcKeyReq tcIndxReq; - UintR connectionIndex; - UintR indexReadTcConnect; // - - /** - * Next ptr (used in pool/list) - */ - union { - Uint32 nextPool; - Uint32 nextList; - }; - /** - * Prev pointer (used in list) - */ - Uint32 prevList; - }; - - typedef Ptr TcIndexOperationPtr; - - /** - * Pool of index data record - */ - ArrayPool c_theIndexOperationPool; - - UintR c_maxNumberOfIndexOperations; - - /************************** API CONNECT RECORD *********************** - * The API connect record contains the connection record to which the - * application connects. - * - * The application can send one operation at a time. It can send a - * new operation immediately after sending the previous operation. - * Thereby several operations can be active in one transaction within TC. - * This is achieved by using the API connect record. - * Each active operation is handled by the TC connect record. - * As soon as the TC connect record has sent the - * request to the LQH it is ready to receive new operations. - * The LQH connect record takes care of waiting for an operation to - * complete. - * When an operation has completed on the LQH connect record, - * a new operation can be started on this LQH connect record. - ******************************************************************* - * - * API CONNECT RECORD ALIGNED TO BE 256 BYTES - ********************************************************************/ - - /*******************************************************************>*/ - // We break out the API Timer for optimisation on scanning rather than - // on fast access. - /*******************************************************************>*/ - inline void setApiConTimer(Uint32 apiConPtrI, Uint32 value, Uint32 line){ - c_apiConTimer[apiConPtrI] = value; - c_apiConTimer_line[apiConPtrI] = line; - } - - inline Uint32 getApiConTimer(Uint32 apiConPtrI) const { - return c_apiConTimer[apiConPtrI]; - } - UintR* c_apiConTimer; - UintR* c_apiConTimer_line; - - struct ApiConnectRecord { - ApiConnectRecord(ArrayPool & firedTriggerPool, - ArrayPool & seizedIndexOpPool): - theFiredTriggers(firedTriggerPool), - isIndexOp(false), - theSeizedIndexOperations(seizedIndexOpPool) - {} - - //--------------------------------------------------- - // First 16 byte cache line. Hot variables. - //--------------------------------------------------- - ConnectionState apiConnectstate; - UintR transid[2]; - UintR firstTcConnect; - NdbNodeBitmask m_transaction_nodes; - - //--------------------------------------------------- - // Second 16 byte cache line. Hot variables. - //--------------------------------------------------- - UintR lqhkeyconfrec; - UintR cachePtr; - UintR currSavePointId; - UintR counter; - - //--------------------------------------------------- - // Third 16 byte cache line. First and second cache - // line plus this will be enough for copy API records. - // Variables used in late phases. - //--------------------------------------------------- - UintR nextGcpConnect; - UintR prevGcpConnect; - UintR gcpPointer; - UintR ndbapiConnect; - - //--------------------------------------------------- - // Fourth 16 byte cache line. Only used in late phases. - // Plus 4 bytes of error handling. - //--------------------------------------------------- - UintR nextApiConnect; - BlockReference ndbapiBlockref; - UintR apiCopyRecord; - UintR globalcheckpointid; - - //--------------------------------------------------- - // Second 64 byte cache line starts. First 16 byte - // cache line in this one. Variables primarily used - // in early phase. - //--------------------------------------------------- - UintR lastTcConnect; - UintR lqhkeyreqrec; - AbortState abortState; - Uint32 buddyPtr; - Uint8 m_exec_flag; - Uint8 unused2; - Uint8 takeOverRec; - Uint8 currentReplicaNo; - - //--------------------------------------------------- - // Error Handling variables. If cache line 32 bytes - // ensures that cache line is still only read in - // early phases. - //--------------------------------------------------- - union { - UintR apiScanRec; - UintR commitAckMarker; - }; - UintR currentTcConnect; - BlockReference tcBlockref; - Uint16 returncode; - Uint16 takeOverInd; - - //--------------------------------------------------- - // Second 64 byte cache line. Third 16 byte cache line - // in this one. Variables primarily used in early phase - // and checked in late phase. - // Fourth cache line is the tcSendArray that is used - // when two and three operations are responded to in - // parallel. The first two entries in tcSendArray is - // part of the third cache line. - //--------------------------------------------------- - //--------------------------------------------------- - // timeOutCounter is used waiting for ABORTCONF, COMMITCONF - // and COMPLETECONF - //--------------------------------------------------- - UintR failureNr; - Uint8 tckeyrec; // Ändrad från R - Uint8 tcindxrec; - Uint8 apiFailState; // Ändrad från R - Uint8 singleUserMode; - ReturnSignal returnsignal; - Uint8 timeOutCounter; - - UintR tcSendArray[6]; - - // Trigger data - - /** - * The list of fired triggers - */ - DLFifoList theFiredTriggers; - - bool triggerPending; // Used to mark waiting for a CONTINUEB - - // Index data - - Uint8 isIndexOp; // Used to mark on-going TcKeyReq as indx table access - bool indexOpReturn; - UintR noIndexOp; // No outstanding index ops - - // Index op return context - UintR indexOp; - UintR clientData; - Uint32 errorData; - UintR attrInfoLen; - - UintR accumulatingIndexOp; - UintR executingIndexOp; - UintR tcIndxSendArray[6]; - DLList theSeizedIndexOperations; - }; - - typedef Ptr ApiConnectRecordPtr; - - - /************************** TC CONNECT RECORD ************************/ - /* *******************************************************************/ - /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/ - /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */ - /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */ - /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */ - /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */ - /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */ - /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */ - /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */ - /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */ - /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */ - /* EXECUTED WITH THE TC CONNECT RECORD. */ - /*******************************************************************>*/ - /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */ - /*******************************************************************>*/ - struct TcConnectRecord { - //--------------------------------------------------- - // First 16 byte cache line. Those variables are only - // used in error cases. - //--------------------------------------------------- - UintR tcOprec; /* TC OPREC of operation being taken over */ - Uint16 failData[4]; /* Failed nodes when taking over an operation */ - UintR nextTcFailHash; - - //--------------------------------------------------- - // Second 16 byte cache line. Those variables are used - // from LQHKEYCONF to sending COMMIT and COMPLETED. - //--------------------------------------------------- - UintR lastLqhCon; /* Connect record in last replicas Lqh record */ - Uint16 lastLqhNodeId; /* Node id of last replicas Lqh */ - Uint16 m_execAbortOption;/* TcKeyReq::ExecuteAbortOption */ - UintR commitAckMarker; /* CommitMarker I value */ - - //--------------------------------------------------- - // Third 16 byte cache line. The hottest variables. - //--------------------------------------------------- - OperationState tcConnectstate; /* THE STATE OF THE CONNECT*/ - UintR apiConnect; /* POINTER TO API CONNECT RECORD */ - UintR nextTcConnect; /* NEXT TC RECORD*/ - Uint8 dirtyOp; - Uint8 opSimple; - Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */ - Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */ - Uint8 operation; /* OPERATION TYPE */ - /* 0 = READ REQUEST */ - /* 1 = UPDATE REQUEST */ - /* 2 = INSERT REQUEST */ - /* 3 = DELETE REQUEST */ - - //--------------------------------------------------- - // Fourth 16 byte cache line. The mildly hot variables. - // tcNodedata expands 4 Bytes into the next cache line - // with indexes almost never used. - //--------------------------------------------------- - UintR clientData; /* SENDERS OPERATION POINTER */ - UintR dihConnectptr; /* CONNECTION TO DIH BLOCK ON THIS NODE */ - UintR prevTcConnect; /* DOUBLY LINKED LIST OF TC CONNECT RECORDS*/ - UintR savePointId; - - Uint16 tcNodedata[4]; - - // Trigger data - FiredTriggerPtr accumulatingTriggerData; - UintR noFiredTriggers; - UintR noReceivedTriggers; - UintR triggerExecutionCount; - UintR triggeringOperation; - UintR savedState[LqhKeyConf::SignalLength]; - - // Index data - Uint8 isIndexOp; // Used to mark on-going TcKeyReq as index table access - UintR indexOp; - UintR currentIndexId; - UintR attrInfoLen; - }; - - friend struct TcConnectRecord; - - typedef Ptr TcConnectRecordPtr; - - // ********************** CACHE RECORD ************************************** - //--------------------------------------------------------------------------- - // This record is used between reception of TCKEYREQ and sending of LQHKEYREQ - // It is separatedso as to improve the cache hit rate and also to minimise - // the necessary memory storage in NDB Cluster. - //--------------------------------------------------------------------------- - - struct CacheRecord { - //--------------------------------------------------- - // First 16 byte cache line. Variables used by - // ATTRINFO processing. - //--------------------------------------------------- - UintR firstAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */ - UintR lastAttrbuf; /* POINTER TO LINKED LIST OF ATTRIBUTE BUFFERS */ - UintR currReclenAi; - Uint16 attrlength; /* ATTRIBUTE INFORMATION LENGTH */ - Uint16 save1; - - //--------------------------------------------------- - // Second 16 byte cache line. Variables initiated by - // TCKEYREQ and used in LQHKEYREQ. - //--------------------------------------------------- - UintR attrinfo15[4]; - - //--------------------------------------------------- - // Third 16 byte cache line. Variables initiated by - // TCKEYREQ and used in LQHKEYREQ. - //--------------------------------------------------- - UintR attrinfo0; - UintR schemaVersion;/* SCHEMA VERSION USED IN TRANSACTION */ - UintR tableref; /* POINTER TO THE TABLE IN WHICH THE FRAGMENT EXISTS*/ - Uint16 apiVersionNo; - Uint16 keylen; /* KEY LENGTH SENT BY REQUEST SIGNAL */ - - //--------------------------------------------------- - // Fourth 16 byte cache line. Variables initiated by - // TCKEYREQ and used in LQHKEYREQ. - //--------------------------------------------------- - UintR keydata[4]; /* RECEIVES FIRST 16 BYTES OF TUPLE KEY */ - - //--------------------------------------------------- - // First 16 byte cache line in second 64 byte cache - // line. Diverse use. - //--------------------------------------------------- - UintR fragmentid; /* THE COMPUTED FRAGMENT ID */ - UintR hashValue; /* THE HASH VALUE USED TO LOCATE FRAGMENT */ - - Uint8 distributionKeyIndicator; - Uint8 m_special_hash; // collation or distribution key - Uint8 m_no_disk_flag; - Uint8 lenAiInTckeyreq; /* LENGTH OF ATTRIBUTE INFORMATION IN TCKEYREQ */ - - Uint8 fragmentDistributionKey; /* DIH generation no */ - - /** - * EXECUTION MODE OF OPERATION - * 0 = NORMAL EXECUTION, 1 = INTERPRETED EXECUTION - */ - Uint8 opExec; - - Uint8 unused; - Uint8 unused1; - - //--------------------------------------------------- - // Second 16 byte cache line in second 64 byte cache - // line. Diverse use. - //--------------------------------------------------- - UintR distributionKey; - UintR nextCacheRec; - UintR unused3; - Uint32 scanInfo; - - //--------------------------------------------------- - // Third 16 byte cache line in second 64 - // byte cache line. Diverse use. - //--------------------------------------------------- - Uint32 unused4; - Uint32 scanTakeOverInd; - UintR firstKeybuf; /* POINTER THE LINKED LIST OF KEY BUFFERS */ - UintR lastKeybuf; /* VARIABLE POINTING TO THE LAST KEY BUFFER */ - - //--------------------------------------------------- - // Fourth 16 byte cache line in second 64 - // byte cache line. Not used currently. - //--------------------------------------------------- - UintR packedCacheVar[4]; - }; - - typedef Ptr CacheRecordPtr; - - /* ************************ HOST RECORD ********************************** */ - /********************************************************/ - /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/ - /* SYSTEM */ - /********************************************************/ - /* THIS RECORD IS ALIGNED TO BE 128 BYTES. */ - /********************************************************/ - struct HostRecord { - HostState hostStatus; - LqhTransState lqhTransStatus; - bool inPackedList; - UintR noOfPackedWordsLqh; - UintR packedWordsLqh[26]; - UintR noOfWordsTCKEYCONF; - UintR packedWordsTCKEYCONF[30]; - UintR noOfWordsTCINDXCONF; - UintR packedWordsTCINDXCONF[30]; - BlockReference hostLqhBlockRef; - - enum NodeFailBits - { - NF_TAKEOVER = 0x1, - NF_CHECK_SCAN = 0x2, - NF_CHECK_TRANSACTION = 0x4, - NF_CHECK_DROP_TAB = 0x8, - NF_NODE_FAIL_BITS = 0xF // All bits... - }; - Uint32 m_nf_bits; - NdbNodeBitmask m_lqh_trans_conf; - }; /* p2c: size = 128 bytes */ - - typedef Ptr HostRecordPtr; - - /* *********** TABLE RECORD ********************************************* */ - - /********************************************************/ - /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */ - /* ALL TABLES IN THE SYSTEM. */ - /********************************************************/ - struct TableRecord { - TableRecord() {} - Uint32 currentSchemaVersion; - Uint16 m_flags; - Uint8 tableType; - Uint8 singleUserMode; - - enum { - TR_ENABLED = 1 << 0, - TR_DROPPING = 1 << 1, - TR_STORED_TABLE = 1 << 2 - }; - Uint8 get_enabled() const { return (m_flags & TR_ENABLED) != 0; } - Uint8 get_dropping() const { return (m_flags & TR_DROPPING) != 0; } - Uint8 get_storedTable() const { return (m_flags & TR_STORED_TABLE) != 0; } - void set_enabled(Uint8 f) { f ? m_flags |= (Uint16)TR_ENABLED : m_flags &= ~(Uint16)TR_ENABLED; } - void set_dropping(Uint8 f) { f ? m_flags |= (Uint16)TR_DROPPING : m_flags &= ~(Uint16)TR_DROPPING; } - void set_storedTable(Uint8 f) { f ? m_flags |= (Uint16)TR_STORED_TABLE : m_flags &= ~(Uint16)TR_STORED_TABLE; } - - Uint8 noOfKeyAttr; - Uint8 hasCharAttr; - Uint8 noOfDistrKeys; - Uint8 hasVarKeys; - - bool checkTable(Uint32 schemaVersion) const { - return get_enabled() && !get_dropping() && - (table_version_major(schemaVersion) == table_version_major(currentSchemaVersion)); - } - - Uint32 getErrorCode(Uint32 schemaVersion) const; - - struct DropTable { - Uint32 senderRef; - Uint32 senderData; - SignalCounter waitDropTabCount; - } dropTable; - }; - typedef Ptr TableRecordPtr; - - /** - * There is max 16 ScanFragRec's for - * each scan started in TC. Each ScanFragRec is used by - * a scan fragment "process" that scans one fragment at a time. - * It will receive max 16 tuples in each request - */ - struct ScanFragRec { - ScanFragRec(){ - stopFragTimer(); - lqhBlockref = 0; - scanFragState = IDLE; - scanRec = RNIL; - } - /** - * ScanFragState - * WAIT_GET_PRIMCONF : Waiting for DIGETPRIMCONF when starting a new - * fragment scan - * LQH_ACTIVE : The scan process has sent a command to LQH and is - * waiting for the response - * LQH_ACTIVE_CLOSE : The scan process has sent close to LQH and is - * waiting for the response - * DELIVERED : The result have been delivered, this scan frag process - * are waiting for a SCAN_NEXTREQ to tell us to continue scanning - * RETURNING_FROM_DELIVERY : SCAN_NEXTREQ received and continuing scan - * soon - * QUEUED_FOR_DELIVERY : Result queued in TC and waiting for delivery - * to API - * COMPLETED : The fragment scan processes has completed and finally - * sent a SCAN_PROCCONF - */ - enum ScanFragState { - IDLE = 0, - WAIT_GET_PRIMCONF = 1, - LQH_ACTIVE = 2, - DELIVERED = 4, - QUEUED_FOR_DELIVERY = 6, - COMPLETED = 7 - }; - // Timer for checking timeout of this fragment scan - Uint32 scanFragTimer; - - // Id of the current scanned fragment - Uint32 scanFragId; - - // Blockreference of LQH - BlockReference lqhBlockref; - - // getNodeInfo.m_connectCount, set at seize used so that - // I don't accidently kill a starting node - Uint32 m_connectCount; - - // State of this fragment scan - ScanFragState scanFragState; - - // Id of the ScanRecord this fragment scan belongs to - Uint32 scanRec; - - // The value of fragmentCompleted in the last received SCAN_FRAGCONF - Uint8 m_scan_frag_conf_status; - - inline void startFragTimer(Uint32 timeVal){ - scanFragTimer = timeVal; - } - inline void stopFragTimer(void){ - scanFragTimer = 0; - } - - Uint32 m_ops; - Uint32 m_chksum; - Uint32 m_apiPtr; - Uint32 m_totalLen; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - - typedef Ptr ScanFragRecPtr; - typedef LocalDLList ScanFragList; - - /** - * Each scan allocates one ScanRecord to store information - * about the current scan - * - */ - struct ScanRecord { - ScanRecord() {} - /** NOTE! This is the old comment for ScanState. - MASV - * STATE TRANSITIONS OF SCAN_STATE. SCAN_STATE IS THE STATE - * VARIABLE OF THE RECEIVE AND DELIVERY PROCESS. - * THE PROCESS HAS THREE STEPS IT GOES THROUGH. - * 1) THE INITIAL STATES WHEN RECEIVING DATA FOR THE SCAN. - * - WAIT_SCAN_TAB_INFO - * - WAIT_AI - * - WAIT_FRAGMENT_COUNT - * 2) THE EXECUTION STATES WHEN THE SCAN IS PERFORMED. - * - SCAN_NEXT_ORDERED - * - DELIVERED - * - QUEUED_DELIVERED - * 3) THE CLOSING STATE WHEN THE SCAN PROCESS IS CLOSING UP - * EVERYTHING. - * - CLOSING_SCAN - * INITIAL START WHEN SCAN_TABREQ RECEIVED - * -> WAIT_SCAN_TAB_INFO (IF ANY SCAN_TABINFO TO BE RECEIVED) - * -> WAIT_AI (IF NO SCAN_TAB_INFO BUT ATTRINFO IS RECEIVED) - * -> WAIT_FRAGMENT_COUNT (IF NEITHER SCAN_TABINFO OR ATTRINFO - * RECEIVED) - * - * WAIT_SCAN_TAB_INFO TRANSITIONS: - * -> WAIT_SCAN_TABINFO (WHEN MORE SCAN_TABINFO RECEIVED) - * -> WAIT_AI (WHEN ATTRINFO RECEIVED AFTER RECEIVING ALL - * SCAN_TABINFO) - * -> WAIT_FRAGMENT_COUNT (WHEN NO ATTRINFO RECEIVED AFTER - * RECEIVING ALL SCAN_TABINFO ) - * WAIT_AI TRANSITIONS: - * -> WAIT_AI (WHEN MORE ATTRINFO RECEIVED) - * -> WAIT_FRAGMENT_COUNT (WHEN ALL ATTRINFO RECEIVED) - * - * WAIT_FRAGMENT_COUNT TRANSITIONS: - * -> SCAN_NEXT_ORDERED - * - * SCAN_NEXT_ORDERED TRANSITIONS: - * -> DELIVERED (WHEN FIRST SCAN_FRAGCONF ARRIVES WITH OPERATIONS - * TO REPORT IN IT) - * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME - * ERROR) - * - * DELIVERED TRANSITIONS: - * -> SCAN_NEXT_ORDERED (IF SCAN_NEXTREQ ARRIVES BEFORE ANY NEW - * OPERATIONS TO REPORT ARRIVES) - * -> QUEUED_DELIVERED (IF NEW OPERATION TO REPORT ARRIVES BEFORE - * SCAN_NEXTREQ) - * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY SOME - * ERROR) - * - * QUEUED_DELIVERED TRANSITIONS: - * -> DELIVERED (WHEN SCAN_NEXTREQ ARRIVES AND QUEUED OPERATIONS - * TO REPORT ARE SENT TO THE APPLICATION) - * -> CLOSING_SCAN (WHEN SCAN IS CLOSED BY SCAN_NEXTREQ OR BY - * SOME ERROR) - */ - enum ScanState { - IDLE = 0, - WAIT_SCAN_TAB_INFO = 1, - WAIT_AI = 2, - WAIT_FRAGMENT_COUNT = 3, - RUNNING = 4, - CLOSING_SCAN = 5 - }; - - // State of this scan - ScanState scanState; - - DLList::Head m_running_scan_frags; // Currently in LQH - union { Uint32 m_queued_count; Uint32 scanReceivedOperations; }; - DLList::Head m_queued_scan_frags; // In TC !sent to API - DLList::Head m_delivered_scan_frags;// Delivered to API - - // Id of the next fragment to be scanned. Used by scan fragment - // processes when they are ready for the next fragment - Uint32 scanNextFragId; - - // Total number of fragments in the table we are scanning - Uint32 scanNoFrag; - - // Index of next ScanRecords when in free list - Uint32 nextScan; - - // Length of expected attribute information - union { Uint32 scanAiLength; Uint32 m_booked_fragments_count; }; - - Uint32 scanKeyLen; - - // Reference to ApiConnectRecord - Uint32 scanApiRec; - - // Reference to TcConnectRecord - Uint32 scanTcrec; - - // Number of scan frag processes that belong to this scan - Uint32 scanParallel; - - // Schema version used by this scan - Uint32 scanSchemaVersion; - - // Index of stored procedure belonging to this scan - Uint32 scanStoredProcId; - - // The index of table that is scanned - Uint32 scanTableref; - - // Number of operation records per scanned fragment - // Number of operations in first batch - // Max number of bytes per batch - union { - Uint16 first_batch_size_rows; - Uint16 batch_size_rows; - }; - Uint32 batch_byte_size; - - Uint32 scanRequestInfo; // ScanFrag format - - // Close is ordered - bool m_close_scan_req; - }; - typedef Ptr ScanRecordPtr; - - /* **********************************************************************$ */ - /* ******$ DATA BUFFER ******$ */ - /* */ - /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */ - /* **********************************************************************$ */ - struct DatabufRecord { - UintR data[4]; - /* 4 * 1 WORD = 4 WORD */ - UintR nextDatabuf; - }; /* p2c: size = 20 bytes */ - - typedef Ptr DatabufRecordPtr; - - /* **********************************************************************$ */ - /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */ - /* - * CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR. - * INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY. - * SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS: - * DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY - * ZINBUF_DATA_LEN. - * NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY - * PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV - * (NOT USED YET). - * NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT. */ - /* ******************************************************************** */ - struct AttrbufRecord { - UintR attrbuf[32]; - }; /* p2c: size = 128 bytes */ - - typedef Ptr AttrbufRecordPtr; - - /*************************************************************************>*/ - /* GLOBAL CHECKPOINT INFORMATION RECORD */ - /* */ - /* THIS RECORD IS USED TO STORE THE GLOBALCHECKPOINT NUMBER AND A - * COUNTER DURING THE COMPLETION PHASE OF THE TRANSACTION */ - /*************************************************************************>*/ - /* */ - /* GCP RECORD ALIGNED TO BE 32 BYTES */ - /*************************************************************************>*/ - struct GcpRecord { - UintR gcpUnused1[2]; /* p2c: Not used */ - UintR firstApiConnect; - UintR lastApiConnect; - UintR gcpId; - UintR nextGcp; - UintR gcpUnused2; /* p2c: Not used */ - Uint16 gcpNomoretransRec; - }; /* p2c: size = 32 bytes */ - - typedef Ptr GcpRecordPtr; - - /*************************************************************************>*/ - /* TC_FAIL_RECORD */ - /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED - * TC NODE. */ - /*************************************************************************>*/ - struct TcFailRecord { - Uint16 queueList[MAX_NDB_NODES]; - Uint8 takeOverProcState[MAX_NDB_NODES]; - UintR completedTakeOver; - UintR currentHashIndexTakeOver; - FailState failStatus; - Uint16 queueIndex; - Uint16 takeOverNode; - }; /* p2c: size = 64 bytes */ - - typedef Ptr TcFailRecordPtr; - -public: - Dbtc(Block_context&); - virtual ~Dbtc(); - -private: - BLOCK_DEFINES(Dbtc); - - // Transit signals - void execPACKED_SIGNAL(Signal* signal); - void execABORTED(Signal* signal); - void execATTRINFO(Signal* signal); - void execCONTINUEB(Signal* signal); - void execKEYINFO(Signal* signal); - void execSCAN_NEXTREQ(Signal* signal); - void execSCAN_PROCREQ(Signal* signal); - void execSCAN_PROCCONF(Signal* signal); - void execTAKE_OVERTCREQ(Signal* signal); - void execTAKE_OVERTCCONF(Signal* signal); - void execLQHKEYREF(Signal* signal); - void execTRANSID_AI_R(Signal* signal); - void execKEYINFO20_R(Signal* signal); - void execROUTE_ORD(Signal* signal); - // Received signals - void execDUMP_STATE_ORD(Signal* signal); - void execSEND_PACKED(Signal* signal); - void execCOMPLETED(Signal* signal); - void execCOMMITTED(Signal* signal); - void execDIGETNODESREF(Signal* signal); - void execDIGETPRIMCONF(Signal* signal); - void execDIGETPRIMREF(Signal* signal); - void execDISEIZECONF(Signal* signal); - void execDIVERIFYCONF(Signal* signal); - void execDI_FCOUNTCONF(Signal* signal); - void execDI_FCOUNTREF(Signal* signal); - void execGCP_NOMORETRANS(Signal* signal); - void execLQHKEYCONF(Signal* signal); - void execNDB_STTOR(Signal* signal); - void execREAD_NODESCONF(Signal* signal); - void execREAD_NODESREF(Signal* signal); - void execSTTOR(Signal* signal); - void execTC_COMMITREQ(Signal* signal); - void execTC_CLOPSIZEREQ(Signal* signal); - void execTCGETOPSIZEREQ(Signal* signal); - void execTCKEYREQ(Signal* signal); - void execTCRELEASEREQ(Signal* signal); - void execTCSEIZEREQ(Signal* signal); - void execTCROLLBACKREQ(Signal* signal); - void execTC_HBREP(Signal* signal); - void execTC_SCHVERREQ(Signal* signal); - void execSCAN_TABREQ(Signal* signal); - void execSCAN_TABINFO(Signal* signal); - void execSCAN_FRAGCONF(Signal* signal); - void execSCAN_FRAGREF(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execLQH_TRANSCONF(Signal* signal); - void execCOMPLETECONF(Signal* signal); - void execCOMMITCONF(Signal* signal); - void execABORTCONF(Signal* signal); - void execNODE_FAILREP(Signal* signal); - void execINCL_NODEREQ(Signal* signal); - void execTIME_SIGNAL(Signal* signal); - void execAPI_FAILREQ(Signal* signal); - void execSCAN_HBREP(Signal* signal); - - void execABORT_ALL_REQ(Signal* signal); - - void execCREATE_TRIG_REQ(Signal* signal); - void execDROP_TRIG_REQ(Signal* signal); - void execFIRE_TRIG_ORD(Signal* signal); - void execTRIG_ATTRINFO(Signal* signal); - void execCREATE_INDX_REQ(Signal* signal); - void execDROP_INDX_REQ(Signal* signal); - void execTCINDXREQ(Signal* signal); - void execINDXKEYINFO(Signal* signal); - void execINDXATTRINFO(Signal* signal); - void execALTER_INDX_REQ(Signal* signal); - - // Index table lookup - void execTCKEYCONF(Signal* signal); - void execTCKEYREF(Signal* signal); - void execTRANSID_AI(Signal* signal); - void execTCROLLBACKREP(Signal* signal); - - void execCREATE_TAB_REQ(Signal* signal); - void execPREP_DROP_TAB_REQ(Signal* signal); - void execDROP_TAB_REQ(Signal* signal); - void execWAIT_DROP_TAB_REF(Signal* signal); - void execWAIT_DROP_TAB_CONF(Signal* signal); - void checkWaitDropTabFailedLqh(Signal*, Uint32 nodeId, Uint32 tableId); - void execALTER_TAB_REQ(Signal* signal); - void set_timeout_value(Uint32 timeOut); - void set_appl_timeout_value(Uint32 timeOut); - void set_no_parallel_takeover(Uint32); - void updateBuddyTimer(ApiConnectRecordPtr); - - // Statement blocks - void updatePackedList(Signal* signal, HostRecord* ahostptr, - Uint16 ahostIndex); - void clearTcNodeData(Signal* signal, - UintR TLastLqhIndicator, - UintR Tstart); - void errorReport(Signal* signal, int place); - void warningReport(Signal* signal, int place); - void printState(Signal* signal, int place); - int seizeTcRecord(Signal* signal); - int seizeCacheRecord(Signal* signal); - void TCKEY_abort(Signal* signal, int place); - void copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR copyLen); - void reportNodeFailed(Signal* signal, Uint32 nodeId); - void sendPackedTCKEYCONF(Signal* signal, - HostRecord * ahostptr, - UintR hostId); - void sendPackedTCINDXCONF(Signal* signal, - HostRecord * ahostptr, - UintR hostId); - void sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr); - void sendCommitLqh(Signal* signal, - TcConnectRecord * const regTcPtr); - void sendCompleteLqh(Signal* signal, - TcConnectRecord * const regTcPtr); - void sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord *); - void sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord *); - void checkStartTimeout(Signal* signal); - void checkStartFragTimeout(Signal* signal); - void timeOutFoundFragLab(Signal* signal, Uint32 TscanConPtr); - void timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr); - int releaseAndAbort(Signal* signal); - void findApiConnectFail(Signal* signal); - void findTcConnectFail(Signal* signal); - void initApiConnectFail(Signal* signal); - void initTcConnectFail(Signal* signal); - void initTcFail(Signal* signal); - void releaseTakeOver(Signal* signal); - void setupFailData(Signal* signal); - void updateApiStateFail(Signal* signal); - void updateTcStateFail(Signal* signal); - void handleApiFailState(Signal* signal, UintR anApiConnectptr); - void handleFailedApiNode(Signal* signal, - UintR aFailedNode, - UintR anApiConnectPtr); - void handleScanStop(Signal* signal, UintR aFailedNode); - void initScanTcrec(Signal* signal); - void initScanrec(ScanRecordPtr, const class ScanTabReq*, - const UintR scanParallel, - const UintR noOprecPerFrag); - void initScanfragrec(Signal* signal); - void releaseScanResources(ScanRecordPtr, bool not_started = false); - ScanRecordPtr seizeScanrec(Signal* signal); - void sendScanFragReq(Signal*, ScanRecord*, ScanFragRec*); - void sendScanTabConf(Signal* signal, ScanRecordPtr); - void close_scan_req(Signal*, ScanRecordPtr, bool received_req); - void close_scan_req_send_conf(Signal*, ScanRecordPtr); - - void checkGcp(Signal* signal); - void commitGciHandling(Signal* signal, UintR Tgci); - void copyApi(Signal* signal); - void DIVER_node_fail_handling(Signal* signal, UintR Tgci); - void gcpTcfinished(Signal* signal); - void handleGcp(Signal* signal); - void hash(Signal* signal); - bool handle_special_hash(Uint32 dstHash[4], - Uint32* src, Uint32 srcLen, - Uint32 tabPtrI, bool distr); - - void initApiConnect(Signal* signal); - void initApiConnectRec(Signal* signal, - ApiConnectRecord * const regApiPtr, - bool releaseIndexOperations = false); - void initattrbuf(Signal* signal); - void initdatabuf(Signal* signal); - void initgcp(Signal* signal); - void inithost(Signal* signal); - void initialiseScanrec(Signal* signal); - void initialiseScanFragrec(Signal* signal); - void initialiseScanOprec(Signal* signal); - void initTable(Signal* signal); - void initialiseTcConnect(Signal* signal); - void linkApiToGcp(Signal* signal); - void linkGciInGcilist(Signal* signal); - void linkKeybuf(Signal* signal); - void linkTcInConnectionlist(Signal* signal); - void releaseAbortResources(Signal* signal); - void releaseApiCon(Signal* signal, UintR aApiConnectPtr); - void releaseApiConCopy(Signal* signal); - void releaseApiConnectFail(Signal* signal); - void releaseAttrinfo(); - void releaseGcp(Signal* signal); - void releaseKeys(); - void releaseDirtyRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*); - void releaseDirtyWrite(Signal* signal); - void releaseTcCon(); - void releaseTcConnectFail(Signal* signal); - void releaseTransResources(Signal* signal); - void saveAttrbuf(Signal* signal); - void seizeApiConnect(Signal* signal); - void seizeApiConnectCopy(Signal* signal); - void seizeApiConnectFail(Signal* signal); - void seizeDatabuf(Signal* signal); - void seizeGcp(Signal* signal); - void seizeTcConnect(Signal* signal); - void seizeTcConnectFail(Signal* signal); - void sendApiCommit(Signal* signal); - void sendAttrinfo(Signal* signal, - UintR TattrinfoPtr, - AttrbufRecord * const regAttrPtr, - UintR TBref); - void sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr); - void sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len); - void sendlqhkeyreq(Signal* signal, BlockReference TBRef); - void sendSystemError(Signal* signal, int line); - void sendtckeyconf(Signal* signal, UintR TcommitFlag); - void sendTcIndxConf(Signal* signal, UintR TcommitFlag); - void unlinkApiConnect(Signal* signal); - void unlinkGcp(Signal* signal); - void unlinkReadyTcCon(Signal* signal); - void handleFailedOperation(Signal* signal, - const LqhKeyRef * const lqhKeyRef, - bool gotLqhKeyRef); - void markOperationAborted(ApiConnectRecord * const regApiPtr, - TcConnectRecord * const regTcPtr); - void clearCommitAckMarker(ApiConnectRecord * const regApiPtr, - TcConnectRecord * const regTcPtr); - // Trigger and index handling - int saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len); - bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp); - int saveINDXATTRINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len); - bool receivedAllINDXATTRINFO(TcIndexOperation* indexOp); - bool saveTRANSID_AI(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len); - bool receivedAllTRANSID_AI(TcIndexOperation* indexOp); - void readIndexTable(Signal* signal, - ApiConnectRecord* regApiPtr, - TcIndexOperation* indexOp); - void executeIndexOperation(Signal* signal, - ApiConnectRecord* regApiPtr, - TcIndexOperation* indexOp); - bool seizeIndexOperation(ApiConnectRecord* regApiPtr, - TcIndexOperationPtr& indexOpPtr); - void releaseIndexOperation(ApiConnectRecord* regApiPtr, - TcIndexOperation* indexOp); - void releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr); - void setupIndexOpReturn(ApiConnectRecord* regApiPtr, - TcConnectRecord* regTcPtr); - - void saveTriggeringOpState(Signal* signal, - TcConnectRecord* trigOp); - void restoreTriggeringOpState(Signal* signal, - TcConnectRecord* trigOp); - void continueTriggeringOp(Signal* signal, - TcConnectRecord* trigOp); - - void scheduleFiredTrigger(ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr); - void executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr); - void executeTrigger(Signal* signal, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr); - void executeIndexTrigger(Signal* signal, - TcDefinedTriggerData* definedTriggerData, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr); - void insertIntoIndexTable(Signal* signal, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr, - TcIndexData* indexData, - bool holdOperation = false); - void deleteFromIndexTable(Signal* signal, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr, - TcIndexData* indexData, - bool holdOperation = false); - void releaseFiredTriggerData(DLFifoList* triggers); - // Generated statement blocks - void warningHandlerLab(Signal* signal, int line); - void systemErrorLab(Signal* signal, int line); - void sendSignalErrorRefuseLab(Signal* signal); - void scanTabRefLab(Signal* signal, Uint32 errCode); - void diFcountReqLab(Signal* signal, ScanRecordPtr); - void signalErrorRefuseLab(Signal* signal); - void abort080Lab(Signal* signal); - void packKeyData000Lab(Signal* signal, BlockReference TBRef, Uint32 len); - void abortScanLab(Signal* signal, ScanRecordPtr, Uint32 errCode, - bool not_started = false); - void sendAbortedAfterTimeout(Signal* signal, int Tcheck); - void abort010Lab(Signal* signal); - void abort015Lab(Signal* signal); - void packLqhkeyreq(Signal* signal, BlockReference TBRef); - void packLqhkeyreq040Lab(Signal* signal, - UintR anAttrBufIndex, - BlockReference TBRef); - void packLqhkeyreq040Lab(Signal* signal); - void returnFromQueuedDeliveryLab(Signal* signal); - void startTakeOverLab(Signal* signal); - void toCompleteHandlingLab(Signal* signal); - void toCommitHandlingLab(Signal* signal); - void toAbortHandlingLab(Signal* signal); - void abortErrorLab(Signal* signal); - void nodeTakeOverCompletedLab(Signal* signal); - void ndbsttorry010Lab(Signal* signal); - void commit020Lab(Signal* signal); - void complete010Lab(Signal* signal); - void releaseAtErrorLab(Signal* signal); - void seizeDatabuferrorLab(Signal* signal); - void scanAttrinfoLab(Signal* signal, UintR Tlen); - void seizeAttrbuferrorLab(Signal* signal); - void attrinfoDihReceivedLab(Signal* signal); - void aiErrorLab(Signal* signal); - void attrinfo020Lab(Signal* signal); - void scanReleaseResourcesLab(Signal* signal); - void scanCompletedLab(Signal* signal); - void scanError(Signal* signal, ScanRecordPtr, Uint32 errorCode); - void diverify010Lab(Signal* signal); - void intstartphase2x010Lab(Signal* signal); - void intstartphase3x010Lab(Signal* signal); - void sttorryLab(Signal* signal); - void abortBeginErrorLab(Signal* signal); - void tabStateErrorLab(Signal* signal); - void wrongSchemaVersionErrorLab(Signal* signal); - void noFreeConnectionErrorLab(Signal* signal); - void tckeyreq050Lab(Signal* signal); - void timeOutFoundLab(Signal* signal, UintR anAdd, Uint32 errCode); - void completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd); - void completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd); - void completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd); - void timeOutLoopStartLab(Signal* signal, Uint32 apiConnectPtr); - void initialiseRecordsLab(Signal* signal, UintR Tdata0, Uint32, Uint32); - void tckeyreq020Lab(Signal* signal); - void intstartphase2x020Lab(Signal* signal); - void intstartphase1x010Lab(Signal* signal); - void startphase1x010Lab(Signal* signal); - - void lqhKeyConf_checkTransactionState(Signal * signal, - Ptr regApiPtr); - - void checkDropTab(Signal* signal); - - void checkScanActiveInFailedLqh(Signal* signal, - Uint32 scanPtrI, - Uint32 failedNodeId); - void checkScanFragList(Signal*, Uint32 failedNodeId, ScanRecord * scanP, - LocalDLList::Head&); - - void nodeFailCheckTransactions(Signal*,Uint32 transPtrI,Uint32 failedNodeId); - void checkNodeFailComplete(Signal* signal, Uint32 failedNodeId, Uint32 bit); - - // Initialisation - void initData(); - void initRecords(); - - // Transit signals - - - ApiConnectRecord *apiConnectRecord; - ApiConnectRecordPtr apiConnectptr; - UintR capiConnectFilesize; - - TcConnectRecord *tcConnectRecord; - TcConnectRecordPtr tcConnectptr; - UintR ctcConnectFilesize; - - CacheRecord *cacheRecord; - CacheRecordPtr cachePtr; - UintR ccacheFilesize; - - AttrbufRecord *attrbufRecord; - AttrbufRecordPtr attrbufptr; - UintR cattrbufFilesize; - - HostRecord *hostRecord; - HostRecordPtr hostptr; - UintR chostFilesize; - NdbNodeBitmask c_alive_nodes; - - GcpRecord *gcpRecord; - GcpRecordPtr gcpPtr; - UintR cgcpFilesize; - - TableRecord *tableRecord; - UintR ctabrecFilesize; - - UintR thashValue; - UintR tdistrHashValue; - - UintR ttransid_ptr; - UintR cfailure_nr; - UintR coperationsize; - UintR ctcTimer; - UintR cDbHbInterval; - - ApiConnectRecordPtr tmpApiConnectptr; - UintR tcheckGcpId; - - struct TransCounters { - TransCounters() {} - enum { Off, Timer, Started } c_trans_status; - UintR cattrinfoCount; - UintR ctransCount; - UintR ccommitCount; - UintR creadCount; - UintR csimpleReadCount; - UintR cwriteCount; - UintR cabortCount; - UintR cconcurrentOp; - Uint32 c_scan_count; - Uint32 c_range_scan_count; - void reset () { - cattrinfoCount = ctransCount = ccommitCount = creadCount = - csimpleReadCount = cwriteCount = cabortCount = - c_scan_count = c_range_scan_count = 0; - } - Uint32 report(Signal* signal){ - signal->theData[0] = NDB_LE_TransReportCounters; - signal->theData[1] = ctransCount; - signal->theData[2] = ccommitCount; - signal->theData[3] = creadCount; - signal->theData[4] = csimpleReadCount; - signal->theData[5] = cwriteCount; - signal->theData[6] = cattrinfoCount; - signal->theData[7] = cconcurrentOp; - signal->theData[8] = cabortCount; - signal->theData[9] = c_scan_count; - signal->theData[10] = c_range_scan_count; - return 11; - } - } c_counters; - - Uint16 cownNodeid; - Uint16 terrorCode; - - UintR cfirstfreeAttrbuf; - UintR cfirstfreeTcConnect; - UintR cfirstfreeApiConnectCopy; - UintR cfirstfreeCacheRec; - - UintR cfirstgcp; - UintR clastgcp; - UintR cfirstfreeGcp; - UintR cfirstfreeScanrec; - - TableRecordPtr tabptr; - UintR cfirstfreeApiConnectFail; - UintR cfirstfreeApiConnect; - - UintR cfirstfreeDatabuf; - BlockReference cdihblockref; - BlockReference cownref; /* OWN BLOCK REFERENCE */ - - ApiConnectRecordPtr timeOutptr; - - ScanRecord *scanRecord; - UintR cscanrecFileSize; - - UnsafeArrayPool c_scan_frag_pool; - ScanFragRecPtr scanFragptr; - - UintR cscanFragrecFileSize; - UintR cdatabufFilesize; - - BlockReference cdictblockref; - BlockReference cerrorBlockref; - BlockReference clqhblockref; - BlockReference cndbcntrblockref; - - Uint16 csignalKey; - Uint16 csystemnodes; - Uint16 cnodes[4]; - NodeId cmasterNodeId; - UintR cnoParallelTakeOver; - TimeOutCheckState ctimeOutCheckFragActive; - - UintR ctimeOutCheckFragCounter; - UintR ctimeOutCheckCounter; - UintR ctimeOutValue; - UintR ctimeOutCheckDelay; - Uint32 ctimeOutCheckHeartbeat; - Uint32 ctimeOutCheckLastHeartbeat; - Uint32 ctimeOutMissedHeartbeats; - Uint32 c_appl_timeout_value; - - SystemStartState csystemStart; - TimeOutCheckState ctimeOutCheckActive; - - BlockReference capiFailRef; - UintR cpackedListIndex; - Uint16 cpackedList[MAX_NODES]; - UintR capiConnectClosing[MAX_NODES]; - UintR con_lineNodes; - - DatabufRecord *databufRecord; - DatabufRecordPtr databufptr; - DatabufRecordPtr tmpDatabufptr; - - UintR treqinfo; - UintR ttransid1; - UintR ttransid2; - - UintR tabortInd; - - NodeId tnodeid; - BlockReference tblockref; - - LqhTransConf::OperationStatus ttransStatus; - UintR ttcOprec; - NodeId tfailedNodeId; - Uint8 tcurrentReplicaNo; - Uint8 tpad1; - - UintR tgci; - UintR tapplRef; - UintR tapplOprec; - - UintR tindex; - UintR tmaxData; - UintR tmp; - - UintR tnodes; - BlockReference tusersblkref; - UintR tuserpointer; - UintR tloadCode; - - UintR tconfig1; - UintR tconfig2; - - UintR cdata[32]; - UintR ctransidFailHash[512]; - UintR ctcConnectFailHash[1024]; - - /** - * Commit Ack handling - */ -public: - struct CommitAckMarker { - CommitAckMarker() {} - Uint32 transid1; - Uint32 transid2; - union { Uint32 nextPool; Uint32 nextHash; }; - Uint32 prevHash; - Uint32 apiConnectPtr; - Uint16 apiNodeId; - Uint16 noOfLqhs; - Uint16 lqhNodeId[MAX_REPLICAS]; - - inline bool equal(const CommitAckMarker & p) const { - return ((p.transid1 == transid1) && (p.transid2 == transid2)); - } - - inline Uint32 hashValue() const { - return transid1; - } - }; -private: - typedef Ptr CommitAckMarkerPtr; - typedef DLHashTable::Iterator CommitAckMarkerIterator; - - ArrayPool m_commitAckMarkerPool; - DLHashTable m_commitAckMarkerHash; - - void execTC_COMMIT_ACK(Signal* signal); - void sendRemoveMarkers(Signal*, const CommitAckMarker *); - void sendRemoveMarker(Signal* signal, - NodeId nodeId, - Uint32 transid1, - Uint32 transid2); - void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket); - - bool getAllowStartTransaction(Uint32 nodeId, Uint32 table_single_user_mode) const { - if (unlikely(getNodeState().getSingleUserMode())) - { - if (getNodeState().getSingleUserApi() == nodeId || table_single_user_mode) - return true; - else - return false; - } - return getNodeState().startLevel < NodeState::SL_STOPPING_2; - } - - void checkAbortAllTimeout(Signal* signal, Uint32 sleepTime); - struct AbortAllRecord { - AbortAllRecord(){ clientRef = 0; } - Uint32 clientData; - BlockReference clientRef; - - Uint32 oldTimeOutValue; - }; - AbortAllRecord c_abortRec; - - /************************** API CONNECT RECORD ***********************/ - /* *******************************************************************/ - /* THE API CONNECT RECORD CONTAINS THE CONNECTION RECORD TO WHICH THE*/ - /* APPLICATION CONNECTS. THE APPLICATION CAN SEND ONE OPERATION AT A */ - /* TIME. IT CAN SEND A NEW OPERATION IMMEDIATELY AFTER SENDING THE */ - /* PREVIOUS OPERATION. THEREBY SEVERAL OPERATIONS CAN BE ACTIVE IN */ - /* ONE TRANSACTION WITHIN TC. THIS IS ACHIEVED BY USING THE API */ - /* CONNECT RECORD. EACH ACTIVE OPERATION IS HANDLED BY THE TC */ - /* CONNECT RECORD. AS SOON AS THE TC CONNECT RECORD HAS SENT THE */ - /* REQUEST TO THE LQH IT IS READY TO RECEIVE NEW OPERATIONS. THE */ - /* LQH CONNECT RECORD TAKES CARE OF WAITING FOR AN OPERATION TO */ - /* COMPLETE. WHEN AN OPERATION HAS COMPLETED ON THE LQH CONNECT */ - /* RECORD A NEW OPERATION CAN BE STARTED ON THIS LQH CONNECT RECORD. */ - /*******************************************************************>*/ - /* */ - /* API CONNECT RECORD ALIGNED TO BE 256 BYTES */ - /*******************************************************************>*/ - /************************** TC CONNECT RECORD ************************/ - /* *******************************************************************/ - /* TC CONNECT RECORD KEEPS ALL INFORMATION TO CARRY OUT A TRANSACTION*/ - /* THE TRANSACTION CONTROLLER ESTABLISHES CONNECTIONS TO DIFFERENT */ - /* BLOCKS TO CARRY OUT THE TRANSACTION. THERE CAN BE SEVERAL RECORDS */ - /* PER ACTIVE TRANSACTION. THE TC CONNECT RECORD COOPERATES WITH THE */ - /* API CONNECT RECORD FOR COMMUNICATION WITH THE API AND WITH THE */ - /* LQH CONNECT RECORD FOR COMMUNICATION WITH THE LQH'S INVOLVED IN */ - /* THE TRANSACTION. TC CONNECT RECORD IS PERMANENTLY CONNECTED TO A */ - /* RECORD IN DICT AND ONE IN DIH. IT CONTAINS A LIST OF ACTIVE LQH */ - /* CONNECT RECORDS AND A LIST OF STARTED BUT NOT ACTIVE LQH CONNECT */ - /* RECORDS. IT DOES ALSO CONTAIN A LIST OF ALL OPERATIONS THAT ARE */ - /* EXECUTED WITH THE TC CONNECT RECORD. */ - /*******************************************************************>*/ - /* TC_CONNECT RECORD ALIGNED TO BE 128 BYTES */ - /*******************************************************************>*/ - UintR cfirstfreeTcConnectFail; - - /* POINTER FOR THE LQH RECORD*/ - /* ************************ HOST RECORD ********************************* */ - /********************************************************/ - /* THIS RECORD CONTAINS ALIVE-STATUS ON ALL NODES IN THE*/ - /* SYSTEM */ - /********************************************************/ - /* THIS RECORD IS ALIGNED TO BE 8 BYTES. */ - /********************************************************/ - /* ************************ TABLE RECORD ******************************** */ - /********************************************************/ - /* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */ - /* ALL TABLES IN THE SYSTEM. */ - /********************************************************/ - /*-------------------------------------------------------------------------*/ - /* THE TC CONNECTION USED BY THIS SCAN. */ - /*-------------------------------------------------------------------------*/ - /*-------------------------------------------------------------------------*/ - /* LENGTH READ FOR A PARTICULAR SCANNED OPERATION. */ - /*-------------------------------------------------------------------------*/ - /*-------------------------------------------------------------------------*/ - /* REFERENCE TO THE SCAN RECORD FOR THIS SCAN PROCESS. */ - /*-------------------------------------------------------------------------*/ - /* *********************************************************************** */ - /* ******$ DATA BUFFER ******$ */ - /* */ - /* THIS BUFFER IS USED AS A GENERAL DATA STORAGE. */ - /* *********************************************************************** */ - /* *********************************************************************** */ - /* ******$ ATTRIBUTE INFORMATION RECORD ******$ */ - /* - CAN CONTAIN ONE (1) ATTRINFO SIGNAL. ONE SIGNAL CONTAINS 24 ATTR. - INFO WORDS. BUT 32 ELEMENTS ARE USED TO MAKE PLEX HAPPY. - SOME OF THE ELEMENTS ARE USED TO THE FOLLOWING THINGS: - DATA LENGHT IN THIS RECORD IS STORED IN THE ELEMENT INDEXED BY - ZINBUF_DATA_LEN. - NEXT FREE ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY - PREVIOUS ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_PREV - (NOT USED YET). - NEXT ATTRBUF IS POINTED OUT BY THE ELEMENT INDEXED BY ZINBUF_NEXT. - */ - /* ********************************************************************** */ - /**************************************************************************/ - /* GLOBAL CHECKPOINT INFORMATION RECORD */ - /* */ - /* THIS RECORD IS USED TO STORE THE GCP NUMBER AND A COUNTER */ - /* DURING THE COMPLETION PHASE OF THE TRANSACTION */ - /**************************************************************************/ - /* */ - /* GCP RECORD ALIGNED TO BE 32 BYTES */ - /**************************************************************************/ - /**************************************************************************/ - /* TC_FAIL_RECORD */ - /* THIS RECORD IS USED WHEN HANDLING TAKE OVER OF ANOTHER FAILED TC NODE.*/ - /**************************************************************************/ - TcFailRecord *tcFailRecord; - TcFailRecordPtr tcNodeFailptr; - /**************************************************************************/ - // Temporary variables that are not allowed to use for storage between - // signals. They - // can only be used in a signal to transfer values between subroutines. - // In the long run - // those variables should be removed and exchanged for stack - // variable communication. - /**************************************************************************/ - - Uint32 c_gcp_ref; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp deleted file mode 100644 index 22a1d7edb36..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp +++ /dev/null @@ -1,378 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTC_C -#include "Dbtc.hpp" -#include -#include -#include -#include - -#define DEBUG(x) { ndbout << "TC::" << x << endl; } - - -void Dbtc::initData() -{ - cattrbufFilesize = ZATTRBUF_FILESIZE; - capiConnectFilesize = ZAPI_CONNECT_FILESIZE; - ccacheFilesize = ZAPI_CONNECT_FILESIZE; - chostFilesize = MAX_NODES; - cdatabufFilesize = ZDATABUF_FILESIZE; - cgcpFilesize = ZGCP_FILESIZE; - cscanrecFileSize = ZSCANREC_FILE_SIZE; - cscanFragrecFileSize = ZSCAN_FRAGREC_FILE_SIZE; - ctabrecFilesize = ZTABREC_FILESIZE; - ctcConnectFilesize = ZTC_CONNECT_FILESIZE; - cdihblockref = DBDIH_REF; - cdictblockref = DBDICT_REF; - clqhblockref = DBLQH_REF; - cerrorBlockref = NDBCNTR_REF; - - // Records with constant sizes - tcFailRecord = (TcFailRecord*)allocRecord("TcFailRecord", - sizeof(TcFailRecord), 1); - - // Variables - ctcTimer = 0; - - // Trigger and index pools - c_theDefinedTriggerPool.setSize(c_maxNumberOfDefinedTriggers); - c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers); - c_theIndexPool.setSize(c_maxNumberOfIndexes); - c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations); - c_theAttributeBufferPool.setSize(c_transactionBufferSpace); - c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10); -}//Dbtc::initData() - -void Dbtc::initRecords() -{ - void *p; - // Records with dynamic sizes - cacheRecord = (CacheRecord*)allocRecord("CacheRecord", - sizeof(CacheRecord), - ccacheFilesize); - - apiConnectRecord = (ApiConnectRecord*)allocRecord("ApiConnectRecord", - sizeof(ApiConnectRecord), - capiConnectFilesize); - - for(unsigned i = 0; i triggers(c_theFiredTriggerPool); - FiredTriggerPtr tptr; - while(triggers.seize(tptr) == true) { - p= tptr.p; - new (p) TcFiredTriggerData(); - } - triggers.release(); - - /* - // Init all index records - ArrayList indexes(c_theIndexPool); - TcIndexDataPtr iptr; - while(indexes.seize(iptr) == true) { - new (iptr.p) TcIndexData(c_theAttrInfoListPool); - } - indexes.release(); - */ - - // Init all index operation records - SLList indexOps(c_theIndexOperationPool); - TcIndexOperationPtr ioptr; - while(indexOps.seize(ioptr) == true) { - p= ioptr.p; - new (p) TcIndexOperation(c_theAttributeBufferPool); - } - indexOps.release(); - - c_apiConTimer = (UintR*)allocRecord("ApiConTimer", - sizeof(UintR), - capiConnectFilesize); - - c_apiConTimer_line = (UintR*)allocRecord("ApiConTimer_line", - sizeof(UintR), - capiConnectFilesize); - - tcConnectRecord = (TcConnectRecord*)allocRecord("TcConnectRecord", - sizeof(TcConnectRecord), - ctcConnectFilesize); - - m_commitAckMarkerPool.setSize(capiConnectFilesize); - m_commitAckMarkerHash.setSize(512); - - hostRecord = (HostRecord*)allocRecord("HostRecord", - sizeof(HostRecord), - chostFilesize); - - tableRecord = (TableRecord*)allocRecord("TableRecord", - sizeof(TableRecord), - ctabrecFilesize); - - scanRecord = (ScanRecord*)allocRecord("ScanRecord", - sizeof(ScanRecord), - cscanrecFileSize); - - - c_scan_frag_pool.setSize(cscanFragrecFileSize); - { - ScanFragRecPtr ptr; - SLList tmp(c_scan_frag_pool); - while(tmp.seize(ptr)) { - new (ptr.p) ScanFragRec(); - } - tmp.release(); - } - - indexOps.release(); - - databufRecord = (DatabufRecord*)allocRecord("DatabufRecord", - sizeof(DatabufRecord), - cdatabufFilesize); - - attrbufRecord = (AttrbufRecord*)allocRecord("AttrbufRecord", - sizeof(AttrbufRecord), - cattrbufFilesize); - - gcpRecord = (GcpRecord*)allocRecord("GcpRecord", - sizeof(GcpRecord), - cgcpFilesize); - -}//Dbtc::initRecords() - -Dbtc::Dbtc(Block_context& ctx): - SimulatedBlock(DBTC, ctx), - c_theDefinedTriggers(c_theDefinedTriggerPool), - c_firedTriggerHash(c_theFiredTriggerPool), - c_maxNumberOfDefinedTriggers(0), - c_maxNumberOfFiredTriggers(0), - c_theIndexes(c_theIndexPool), - c_maxNumberOfIndexes(0), - c_maxNumberOfIndexOperations(0), - m_commitAckMarkerHash(m_commitAckMarkerPool) -{ - BLOCK_CONSTRUCTOR(Dbtc); - - const ndb_mgm_configuration_iterator * p = - ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - Uint32 transactionBufferMemory = 0; - Uint32 maxNoOfIndexes = 0, maxNoOfConcurrentIndexOperations = 0; - Uint32 maxNoOfTriggers = 0, maxNoOfFiredTriggers = 0; - - ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM, - &transactionBufferMemory); - ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, - &maxNoOfIndexes); - ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS, - &maxNoOfConcurrentIndexOperations); - ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, - &maxNoOfTriggers); - ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGER_OPS, - &maxNoOfFiredTriggers); - - c_transactionBufferSpace = - transactionBufferMemory / AttributeBuffer::getSegmentSize(); - c_maxNumberOfIndexes = maxNoOfIndexes; - c_maxNumberOfIndexOperations = maxNoOfConcurrentIndexOperations; - c_maxNumberOfDefinedTriggers = maxNoOfTriggers; - c_maxNumberOfFiredTriggers = maxNoOfFiredTriggers; - - // Transit signals - addRecSignal(GSN_PACKED_SIGNAL, &Dbtc::execPACKED_SIGNAL); - addRecSignal(GSN_ABORTED, &Dbtc::execABORTED); - addRecSignal(GSN_ATTRINFO, &Dbtc::execATTRINFO); - addRecSignal(GSN_CONTINUEB, &Dbtc::execCONTINUEB); - addRecSignal(GSN_KEYINFO, &Dbtc::execKEYINFO); - addRecSignal(GSN_SCAN_NEXTREQ, &Dbtc::execSCAN_NEXTREQ); - addRecSignal(GSN_TAKE_OVERTCREQ, &Dbtc::execTAKE_OVERTCREQ); - addRecSignal(GSN_TAKE_OVERTCCONF, &Dbtc::execTAKE_OVERTCCONF); - addRecSignal(GSN_LQHKEYREF, &Dbtc::execLQHKEYREF); - - // Received signals - - addRecSignal(GSN_DUMP_STATE_ORD, &Dbtc::execDUMP_STATE_ORD); - addRecSignal(GSN_SEND_PACKED, &Dbtc::execSEND_PACKED); - addRecSignal(GSN_SCAN_HBREP, &Dbtc::execSCAN_HBREP); - addRecSignal(GSN_COMPLETED, &Dbtc::execCOMPLETED); - addRecSignal(GSN_COMMITTED, &Dbtc::execCOMMITTED); - addRecSignal(GSN_DIGETPRIMCONF, &Dbtc::execDIGETPRIMCONF); - addRecSignal(GSN_DIGETPRIMREF, &Dbtc::execDIGETPRIMREF); - addRecSignal(GSN_DISEIZECONF, &Dbtc::execDISEIZECONF); - addRecSignal(GSN_DIVERIFYCONF, &Dbtc::execDIVERIFYCONF); - addRecSignal(GSN_DI_FCOUNTCONF, &Dbtc::execDI_FCOUNTCONF); - addRecSignal(GSN_DI_FCOUNTREF, &Dbtc::execDI_FCOUNTREF); - addRecSignal(GSN_GCP_NOMORETRANS, &Dbtc::execGCP_NOMORETRANS); - addRecSignal(GSN_LQHKEYCONF, &Dbtc::execLQHKEYCONF); - addRecSignal(GSN_NDB_STTOR, &Dbtc::execNDB_STTOR); - addRecSignal(GSN_READ_NODESCONF, &Dbtc::execREAD_NODESCONF); - addRecSignal(GSN_READ_NODESREF, &Dbtc::execREAD_NODESREF); - addRecSignal(GSN_STTOR, &Dbtc::execSTTOR); - addRecSignal(GSN_TC_COMMITREQ, &Dbtc::execTC_COMMITREQ); - addRecSignal(GSN_TC_CLOPSIZEREQ, &Dbtc::execTC_CLOPSIZEREQ); - addRecSignal(GSN_TCGETOPSIZEREQ, &Dbtc::execTCGETOPSIZEREQ); - addRecSignal(GSN_TCKEYREQ, &Dbtc::execTCKEYREQ); - addRecSignal(GSN_TCRELEASEREQ, &Dbtc::execTCRELEASEREQ); - addRecSignal(GSN_TCSEIZEREQ, &Dbtc::execTCSEIZEREQ); - addRecSignal(GSN_TCROLLBACKREQ, &Dbtc::execTCROLLBACKREQ); - addRecSignal(GSN_TC_HBREP, &Dbtc::execTC_HBREP); - addRecSignal(GSN_TC_SCHVERREQ, &Dbtc::execTC_SCHVERREQ); - addRecSignal(GSN_SCAN_TABREQ, &Dbtc::execSCAN_TABREQ); - addRecSignal(GSN_SCAN_FRAGCONF, &Dbtc::execSCAN_FRAGCONF); - addRecSignal(GSN_SCAN_FRAGREF, &Dbtc::execSCAN_FRAGREF); - addRecSignal(GSN_READ_CONFIG_REQ, &Dbtc::execREAD_CONFIG_REQ, true); - addRecSignal(GSN_LQH_TRANSCONF, &Dbtc::execLQH_TRANSCONF); - addRecSignal(GSN_COMPLETECONF, &Dbtc::execCOMPLETECONF); - addRecSignal(GSN_COMMITCONF, &Dbtc::execCOMMITCONF); - addRecSignal(GSN_ABORTCONF, &Dbtc::execABORTCONF); - addRecSignal(GSN_NODE_FAILREP, &Dbtc::execNODE_FAILREP); - addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ); - addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL); - addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ); - - addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK); - addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ); - - addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtc::execCREATE_TRIG_REQ); - addRecSignal(GSN_DROP_TRIG_REQ, &Dbtc::execDROP_TRIG_REQ); - addRecSignal(GSN_FIRE_TRIG_ORD, &Dbtc::execFIRE_TRIG_ORD); - addRecSignal(GSN_TRIG_ATTRINFO, &Dbtc::execTRIG_ATTRINFO); - - addRecSignal(GSN_CREATE_INDX_REQ, &Dbtc::execCREATE_INDX_REQ); - addRecSignal(GSN_DROP_INDX_REQ, &Dbtc::execDROP_INDX_REQ); - addRecSignal(GSN_TCINDXREQ, &Dbtc::execTCINDXREQ); - addRecSignal(GSN_INDXKEYINFO, &Dbtc::execINDXKEYINFO); - addRecSignal(GSN_INDXATTRINFO, &Dbtc::execINDXATTRINFO); - addRecSignal(GSN_ALTER_INDX_REQ, &Dbtc::execALTER_INDX_REQ); - - addRecSignal(GSN_TRANSID_AI_R, &Dbtc::execTRANSID_AI_R); - addRecSignal(GSN_KEYINFO20_R, &Dbtc::execKEYINFO20_R); - - // Index table lookup - addRecSignal(GSN_TCKEYCONF, &Dbtc::execTCKEYCONF); - addRecSignal(GSN_TCKEYREF, &Dbtc::execTCKEYREF); - addRecSignal(GSN_TRANSID_AI, &Dbtc::execTRANSID_AI); - addRecSignal(GSN_TCROLLBACKREP, &Dbtc::execTCROLLBACKREP); - - //addRecSignal(GSN_CREATE_TAB_REQ, &Dbtc::execCREATE_TAB_REQ); - addRecSignal(GSN_DROP_TAB_REQ, &Dbtc::execDROP_TAB_REQ); - addRecSignal(GSN_PREP_DROP_TAB_REQ, &Dbtc::execPREP_DROP_TAB_REQ); - addRecSignal(GSN_WAIT_DROP_TAB_REF, &Dbtc::execWAIT_DROP_TAB_REF); - addRecSignal(GSN_WAIT_DROP_TAB_CONF, &Dbtc::execWAIT_DROP_TAB_CONF); - - addRecSignal(GSN_ALTER_TAB_REQ, &Dbtc::execALTER_TAB_REQ); - addRecSignal(GSN_ROUTE_ORD, &Dbtc::execROUTE_ORD); - - cacheRecord = 0; - apiConnectRecord = 0; - tcConnectRecord = 0; - hostRecord = 0; - tableRecord = 0; - scanRecord = 0; - databufRecord = 0; - attrbufRecord = 0; - gcpRecord = 0; - tcFailRecord = 0; - c_apiConTimer = 0; - c_apiConTimer_line = 0; - -#ifdef VM_TRACE - { - void* tmp[] = { &apiConnectptr, - &tcConnectptr, - &cachePtr, - &attrbufptr, - &hostptr, - &gcpPtr, - &tmpApiConnectptr, - &timeOutptr, - &scanFragptr, - &databufptr, - &tmpDatabufptr }; - init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0])); - } -#endif - cacheRecord = 0; - apiConnectRecord = 0; - tcConnectRecord = 0; - hostRecord = 0; - tableRecord = 0; - scanRecord = 0; - databufRecord = 0; - attrbufRecord = 0; - gcpRecord = 0; - tcFailRecord = 0; - c_apiConTimer = 0; - c_apiConTimer_line = 0; -}//Dbtc::Dbtc() - -Dbtc::~Dbtc() -{ - // Records with dynamic sizes - deallocRecord((void **)&cacheRecord, "CacheRecord", - sizeof(CacheRecord), - ccacheFilesize); - - deallocRecord((void **)&apiConnectRecord, "ApiConnectRecord", - sizeof(ApiConnectRecord), - capiConnectFilesize); - - deallocRecord((void **)&tcConnectRecord, "TcConnectRecord", - sizeof(TcConnectRecord), - ctcConnectFilesize); - - deallocRecord((void **)&hostRecord, "HostRecord", - sizeof(HostRecord), - chostFilesize); - - deallocRecord((void **)&tableRecord, "TableRecord", - sizeof(TableRecord), - ctabrecFilesize); - - deallocRecord((void **)&scanRecord, "ScanRecord", - sizeof(ScanRecord), - cscanrecFileSize); - - deallocRecord((void **)&databufRecord, "DatabufRecord", - sizeof(DatabufRecord), - cdatabufFilesize); - - deallocRecord((void **)&attrbufRecord, "AttrbufRecord", - sizeof(AttrbufRecord), - cattrbufFilesize); - - deallocRecord((void **)&gcpRecord, "GcpRecord", - sizeof(GcpRecord), - cgcpFilesize); - - deallocRecord((void **)&tcFailRecord, "TcFailRecord", - sizeof(TcFailRecord), 1); - - deallocRecord((void **)&c_apiConTimer, "ApiConTimer", - sizeof(UintR), - capiConnectFilesize); - - deallocRecord((void **)&c_apiConTimer_line, "ApiConTimer", - sizeof(UintR), - capiConnectFilesize); -}//Dbtc::~Dbtc() - -BLOCK_FUNCTIONS(Dbtc) - diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp deleted file mode 100644 index f4b0e07854c..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ /dev/null @@ -1,13612 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTC_C - -#include "Dbtc.hpp" -#include "md5_hash.hpp" -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -// Use DEBUG to print messages that should be -// seen only when we debug the product -#ifdef VM_TRACE -#define DEBUG(x) ndbout << "DBTC: "<< x << endl; -#else -#define DEBUG(x) -#endif - -#define INTERNAL_TRIGGER_TCKEYREQ_JBA 0 - -#ifdef VM_TRACE -NdbOut & -operator<<(NdbOut& out, Dbtc::ConnectionState state){ - switch(state){ - case Dbtc::CS_CONNECTED: out << "CS_CONNECTED"; break; - case Dbtc::CS_DISCONNECTED: out << "CS_DISCONNECTED"; break; - case Dbtc::CS_STARTED: out << "CS_STARTED"; break; - case Dbtc::CS_RECEIVING: out << "CS_RECEIVING"; break; - case Dbtc::CS_PREPARED: out << "CS_PREPARED"; break; - case Dbtc::CS_START_PREPARING: out << "CS_START_PREPARING"; break; - case Dbtc::CS_REC_PREPARING: out << "CS_REC_PREPARING"; break; - case Dbtc::CS_RESTART: out << "CS_RESTART"; break; - case Dbtc::CS_ABORTING: out << "CS_ABORTING"; break; - case Dbtc::CS_COMPLETING: out << "CS_COMPLETING"; break; - case Dbtc::CS_COMPLETE_SENT: out << "CS_COMPLETE_SENT"; break; - case Dbtc::CS_PREPARE_TO_COMMIT: out << "CS_PREPARE_TO_COMMIT"; break; - case Dbtc::CS_COMMIT_SENT: out << "CS_COMMIT_SENT"; break; - case Dbtc::CS_START_COMMITTING: out << "CS_START_COMMITTING"; break; - case Dbtc::CS_COMMITTING: out << "CS_COMMITTING"; break; - case Dbtc::CS_REC_COMMITTING: out << "CS_REC_COMMITTING"; break; - case Dbtc::CS_WAIT_ABORT_CONF: out << "CS_WAIT_ABORT_CONF"; break; - case Dbtc::CS_WAIT_COMPLETE_CONF: out << "CS_WAIT_COMPLETE_CONF"; break; - case Dbtc::CS_WAIT_COMMIT_CONF: out << "CS_WAIT_COMMIT_CONF"; break; - case Dbtc::CS_FAIL_ABORTING: out << "CS_FAIL_ABORTING"; break; - case Dbtc::CS_FAIL_ABORTED: out << "CS_FAIL_ABORTED"; break; - case Dbtc::CS_FAIL_PREPARED: out << "CS_FAIL_PREPARED"; break; - case Dbtc::CS_FAIL_COMMITTING: out << "CS_FAIL_COMMITTING"; break; - case Dbtc::CS_FAIL_COMMITTED: out << "CS_FAIL_COMMITTED"; break; - case Dbtc::CS_FAIL_COMPLETED: out << "CS_FAIL_COMPLETED"; break; - case Dbtc::CS_START_SCAN: out << "CS_START_SCAN"; break; - default: - out << "Unknown: " << (int)state; break; - } - return out; -} -NdbOut & -operator<<(NdbOut& out, Dbtc::OperationState state){ - out << (int)state; - return out; -} -NdbOut & -operator<<(NdbOut& out, Dbtc::AbortState state){ - out << (int)state; - return out; -} -NdbOut & -operator<<(NdbOut& out, Dbtc::ReturnSignal state){ - out << (int)state; - return out; -} -NdbOut & -operator<<(NdbOut& out, Dbtc::ScanRecord::ScanState state){ - out << (int)state; - return out; -} -NdbOut & -operator<<(NdbOut& out, Dbtc::ScanFragRec::ScanFragState state){ - out << (int)state; - return out; -} -#endif - -void -Dbtc::updateBuddyTimer(ApiConnectRecordPtr apiPtr) -{ - if (apiPtr.p->buddyPtr != RNIL) { - jam(); - ApiConnectRecordPtr buddyApiPtr; - buddyApiPtr.i = apiPtr.p->buddyPtr; - ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord); - if (getApiConTimer(buddyApiPtr.i) != 0) { - if ((apiPtr.p->transid[0] == buddyApiPtr.p->transid[0]) && - (apiPtr.p->transid[1] == buddyApiPtr.p->transid[1])) { - jam(); - setApiConTimer(buddyApiPtr.i, ctcTimer, __LINE__); - } else { - jam(); - // Not a buddy anymore since not the same transid - apiPtr.p->buddyPtr = RNIL; - }//if - }//if - }//if -} - -void Dbtc::execCONTINUEB(Signal* signal) -{ - UintR tcase; - - jamEntry(); - tcase = signal->theData[0]; - UintR Tdata0 = signal->theData[1]; - UintR Tdata1 = signal->theData[2]; - UintR Tdata2 = signal->theData[3]; - switch (tcase) { - case TcContinueB::ZRETURN_FROM_QUEUED_DELIVERY: - jam(); - ndbrequire(false); - return; - case TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER: - jam(); - tcNodeFailptr.i = Tdata0; - ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord); - completeTransAtTakeOverLab(signal, Tdata1); - return; - case TcContinueB::ZCONTINUE_TIME_OUT_CONTROL: - jam(); - timeOutLoopStartLab(signal, Tdata0); - return; - case TcContinueB::ZNODE_TAKE_OVER_COMPLETED: - jam(); - tnodeid = Tdata0; - tcNodeFailptr.i = 0; - ptrAss(tcNodeFailptr, tcFailRecord); - nodeTakeOverCompletedLab(signal); - return; - case TcContinueB::ZINITIALISE_RECORDS: - jam(); - initialiseRecordsLab(signal, Tdata0, Tdata2, signal->theData[4]); - return; - case TcContinueB::ZSEND_COMMIT_LOOP: - jam(); - apiConnectptr.i = Tdata0; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - tcConnectptr.i = Tdata1; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - commit020Lab(signal); - return; - case TcContinueB::ZSEND_COMPLETE_LOOP: - jam(); - apiConnectptr.i = Tdata0; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - tcConnectptr.i = Tdata1; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - complete010Lab(signal); - return; - case TcContinueB::ZHANDLE_FAILED_API_NODE: - jam(); - handleFailedApiNode(signal, Tdata0, Tdata1); - return; - case TcContinueB::ZTRANS_EVENT_REP: - jam(); - /* -------------------------------------------------------------------- */ - // Report information about transaction activity once per second. - /* -------------------------------------------------------------------- */ - if (c_counters.c_trans_status == TransCounters::Timer){ - Uint32 len = c_counters.report(signal); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB); - - c_counters.reset(); - signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1); - } - return; - case TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL: - jam(); - timeOutLoopStartFragLab(signal, Tdata0); - return; - case TcContinueB::ZABORT_BREAK: - jam(); - tcConnectptr.i = Tdata0; - apiConnectptr.i = Tdata1; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - apiConnectptr.p->counter--; - abort015Lab(signal); - return; - case TcContinueB::ZABORT_TIMEOUT_BREAK: - jam(); - tcConnectptr.i = Tdata0; - apiConnectptr.i = Tdata1; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - apiConnectptr.p->counter--; - sendAbortedAfterTimeout(signal, 1); - return; - case TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS: - jam(); - removeMarkerForFailedAPI(signal, Tdata0, Tdata1); - return; - case TcContinueB::ZWAIT_ABORT_ALL: - jam(); - checkAbortAllTimeout(signal, Tdata0); - return; - case TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH: - jam(); - checkScanActiveInFailedLqh(signal, Tdata0, Tdata1); - return; - case TcContinueB::ZNF_CHECK_TRANSACTIONS: - jam(); - nodeFailCheckTransactions(signal, Tdata0, Tdata1); - return; - case TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH: - jam(); - checkWaitDropTabFailedLqh(signal, Tdata0, Tdata1); - return; - case TcContinueB::TRIGGER_PENDING: - jam(); - ApiConnectRecordPtr transPtr; - transPtr.i = Tdata0; - ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord); - transPtr.p->triggerPending = false; - executeTriggers(signal, &transPtr); - return; - case TcContinueB::DelayTCKEYCONF: - jam(); - apiConnectptr.i = Tdata0; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - sendtckeyconf(signal, Tdata1); - return; - default: - ndbrequire(false); - }//switch -} - -void Dbtc::execDIGETNODESREF(Signal* signal) -{ - jamEntry(); - terrorCode = signal->theData[1]; - releaseAtErrorLab(signal); -} - -void Dbtc::execINCL_NODEREQ(Signal* signal) -{ - jamEntry(); - tblockref = signal->theData[0]; - hostptr.i = signal->theData[1]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - hostptr.p->hostStatus = HS_ALIVE; - c_alive_nodes.set(hostptr.i); - - signal->theData[0] = hostptr.i; - signal->theData[1] = cownref; - - if (ERROR_INSERTED(8039)) - { - CLEAR_ERROR_INSERT_VALUE; - Uint32 save = signal->theData[0]; - signal->theData[0] = 9999; - sendSignal(numberToRef(CMVMI, hostptr.i), - GSN_NDB_TAMPER, signal, 1, JBB); - signal->theData[0] = save; - sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 2); - return; - } - - sendSignal(tblockref, GSN_INCL_NODECONF, signal, 2, JBB); -} - -void Dbtc::execREAD_NODESREF(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -} - -void Dbtc::execTC_SCHVERREQ(Signal* signal) -{ - jamEntry(); - if (! assembleFragments(signal)) { - jam(); - return; - } - tabptr.i = signal->theData[0]; - ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord); - tabptr.p->currentSchemaVersion = signal->theData[1]; - tabptr.p->m_flags = 0; - tabptr.p->set_storedTable((bool)signal->theData[2]); - BlockReference retRef = signal->theData[3]; - tabptr.p->tableType = (Uint8)signal->theData[4]; - BlockReference retPtr = signal->theData[5]; - Uint32 noOfKeyAttr = signal->theData[6]; - tabptr.p->singleUserMode = (Uint8)signal->theData[7]; - ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX); - - const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tabptr.i); - ndbrequire(noOfKeyAttr == desc->noOfKeyAttr); - - ndbrequire(tabptr.p->get_enabled() == false); - tabptr.p->set_enabled(true); - tabptr.p->set_dropping(false); - tabptr.p->noOfKeyAttr = desc->noOfKeyAttr; - tabptr.p->hasCharAttr = desc->hasCharAttr; - tabptr.p->noOfDistrKeys = desc->noOfDistrKeys; - tabptr.p->hasVarKeys = desc->noOfVarKeys > 0; - signal->theData[0] = tabptr.i; - signal->theData[1] = retPtr; - sendSignal(retRef, GSN_TC_SCHVERCONF, signal, 2, JBB); -}//Dbtc::execTC_SCHVERREQ() - -void -Dbtc::execPREP_DROP_TAB_REQ(Signal* signal) -{ - jamEntry(); - - PrepDropTabReq* req = (PrepDropTabReq*)signal->getDataPtr(); - - TableRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - - if(!tabPtr.p->get_enabled()){ - jam(); - PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = PrepDropTabRef::NoSuchTable; - sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal, - PrepDropTabRef::SignalLength, JBB); - return; - } - - if(tabPtr.p->get_dropping()){ - jam(); - PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = PrepDropTabRef::DropInProgress; - sendSignal(senderRef, GSN_PREP_DROP_TAB_REF, signal, - PrepDropTabRef::SignalLength, JBB); - return; - } - - tabPtr.p->set_dropping(true); - tabPtr.p->dropTable.senderRef = senderRef; - tabPtr.p->dropTable.senderData = senderData; - - { - WaitDropTabReq * req = (WaitDropTabReq*)signal->getDataPtrSend(); - req->tableId = tabPtr.i; - req->senderRef = reference(); - - HostRecordPtr hostPtr; - tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(); - for (hostPtr.i = 1; hostPtr.i < MAX_NDB_NODES; hostPtr.i++) { - jam(); - ptrAss(hostPtr, hostRecord); - if (hostPtr.p->hostStatus == HS_ALIVE) { - jam(); - tabPtr.p->dropTable.waitDropTabCount.setWaitingFor(hostPtr.i); - sendSignal(calcLqhBlockRef(hostPtr.i), GSN_WAIT_DROP_TAB_REQ, - signal, WaitDropTabReq::SignalLength, JBB); - }//for - }//if - - ndbrequire(tabPtr.p->dropTable.waitDropTabCount.done() != true); - } -} - -void -Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal) -{ - jamEntry(); - WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr(); - - TableRecordPtr tabPtr; - tabPtr.i = conf->tableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - - ndbrequire(tabPtr.p->get_dropping() == true); - Uint32 nodeId = refToNode(conf->senderRef); - tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId); - - if(!tabPtr.p->dropTable.waitDropTabCount.done()){ - jam(); - return; - } - - { - PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - conf->senderData = tabPtr.p->dropTable.senderData; - sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal, - PrepDropTabConf::SignalLength, JBB); - tabPtr.p->dropTable.senderRef = 0; - } -} - -void -Dbtc::execWAIT_DROP_TAB_REF(Signal* signal) -{ - jamEntry(); - WaitDropTabRef * ref = (WaitDropTabRef*)signal->getDataPtr(); - - TableRecordPtr tabPtr; - tabPtr.i = ref->tableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - - ndbrequire(tabPtr.p->get_dropping() == true); - Uint32 nodeId = refToNode(ref->senderRef); - tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId); - - ndbrequire(ref->errorCode == WaitDropTabRef::NoSuchTable || - ref->errorCode == WaitDropTabRef::NF_FakeErrorREF); - - if(!tabPtr.p->dropTable.waitDropTabCount.done()){ - jam(); - return; - } - - { - PrepDropTabConf* conf = (PrepDropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - conf->senderData = tabPtr.p->dropTable.senderData; - sendSignal(tabPtr.p->dropTable.senderRef, GSN_PREP_DROP_TAB_CONF, signal, - PrepDropTabConf::SignalLength, JBB); - tabPtr.p->dropTable.senderRef = 0; - } -} - -void -Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId) -{ - - TableRecordPtr tabPtr; - tabPtr.i = tableId; - - WaitDropTabConf * conf = (WaitDropTabConf*)signal->getDataPtr(); - conf->tableId = tableId; - - const Uint32 RT_BREAK = 16; - for(Uint32 i = 0; iget_enabled() && tabPtr.p->get_dropping()){ - if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){ - jam(); - conf->senderRef = calcLqhBlockRef(nodeId); - execWAIT_DROP_TAB_CONF(signal); - tabPtr.i++; - break; - } - } - } - - if(tabPtr.i == ctabrecFilesize){ - /** - * Finished - */ - jam(); - checkNodeFailComplete(signal, nodeId, HostRecord::NF_CHECK_DROP_TAB); - return; - } - - signal->theData[0] = TcContinueB::CHECK_WAIT_DROP_TAB_FAILED_LQH; - signal->theData[1] = nodeId; - signal->theData[2] = tabPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); -} - -void -Dbtc::execDROP_TAB_REQ(Signal* signal) -{ - jamEntry(); - - DropTabReq* req = (DropTabReq*)signal->getDataPtr(); - - TableRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType; - - if(!tabPtr.p->get_enabled() && rt == DropTabReq::OnlineDropTab){ - jam(); - DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = DropTabRef::NoSuchTable; - sendSignal(senderRef, GSN_DROP_TAB_REF, signal, - DropTabRef::SignalLength, JBB); - return; - } - - if(!tabPtr.p->get_dropping() && rt == DropTabReq::OnlineDropTab){ - jam(); - DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = senderData; - ref->tableId = tabPtr.i; - ref->errorCode = DropTabRef::DropWoPrep; - sendSignal(senderRef, GSN_DROP_TAB_REF, signal, - DropTabRef::SignalLength, JBB); - return; - } - - tabPtr.p->set_enabled(false); - tabPtr.p->set_dropping(false); - - DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend(); - conf->tableId = tabPtr.i; - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(senderRef, GSN_DROP_TAB_CONF, signal, - PrepDropTabConf::SignalLength, JBB); -} - -void Dbtc::execALTER_TAB_REQ(Signal * signal) -{ - AlterTabReq* const req = (AlterTabReq*)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - const Uint32 changeMask = req->changeMask; - const Uint32 tableId = req->tableId; - const Uint32 tableVersion = req->tableVersion; - const Uint32 gci = req->gci; - AlterTabReq::RequestType requestType = - (AlterTabReq::RequestType) req->requestType; - - TableRecordPtr tabPtr; - tabPtr.i = req->tableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - tabPtr.p->currentSchemaVersion = tableVersion; - - // Request handled successfully - AlterTabConf * conf = (AlterTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->changeMask = changeMask; - conf->tableId = tableId; - conf->tableVersion = tableVersion; - conf->gci = gci; - conf->requestType = requestType; - sendSignal(senderRef, GSN_ALTER_TAB_CONF, signal, - AlterTabConf::SignalLength, JBB); -} - -/* ***************************************************************************/ -/* START / RESTART */ -/* ***************************************************************************/ -void Dbtc::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - jamEntry(); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - initData(); - - UintR apiConnect; - UintR tcConnect; - UintR tables; - UintR localScan; - UintR tcScan; - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_API_CONNECT, &apiConnect)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TC_CONNECT, &tcConnect)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_TABLE, &tables)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_LOCAL_SCAN, &localScan)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TC_SCAN, &tcScan)); - - ccacheFilesize = (apiConnect/3) + 1; - capiConnectFilesize = apiConnect; - ctcConnectFilesize = tcConnect; - ctabrecFilesize = tables; - cscanrecFileSize = tcScan; - cscanFragrecFileSize = localScan; - - initRecords(); - initialiseRecordsLab(signal, 0, ref, senderData); - - Uint32 val = 3000; - ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &val); - set_timeout_value(val); - - val = 1500; - ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &val); - cDbHbInterval = (val < 10) ? 10 : val; - - val = 3000; - ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, &val); - set_appl_timeout_value(val); - - val = 1; - //ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_TRANSACTION_TAKEOVER, &val); - set_no_parallel_takeover(val); - - ctimeOutCheckDelay = 50; // 500ms -}//Dbtc::execSIZEALT_REP() - -void Dbtc::execSTTOR(Signal* signal) -{ - Uint16 tphase; - - jamEntry(); - /* START CASE */ - tphase = signal->theData[1]; - csignalKey = signal->theData[6]; - switch (tphase) { - case ZSPH1: - jam(); - startphase1x010Lab(signal); - return; - default: - jam(); - sttorryLab(signal); /* START PHASE 255 */ - return; - }//switch -}//Dbtc::execSTTOR() - -void Dbtc::sttorryLab(Signal* signal) -{ - signal->theData[0] = csignalKey; - signal->theData[1] = 3; /* BLOCK CATEGORY */ - signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */ - signal->theData[3] = ZSPH1; - signal->theData[4] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB); -}//Dbtc::sttorryLab() - -/* ***************************************************************************/ -/* INTERNAL START / RESTART */ -/*****************************************************************************/ -void Dbtc::execNDB_STTOR(Signal* signal) -{ - Uint16 tndbstartphase; - Uint16 tstarttype; - - jamEntry(); - tusersblkref = signal->theData[0]; - tnodeid = signal->theData[1]; - tndbstartphase = signal->theData[2]; /* START PHASE */ - tstarttype = signal->theData[3]; /* START TYPE */ - switch (tndbstartphase) { - case ZINTSPH1: - jam(); - intstartphase1x010Lab(signal); - return; - case ZINTSPH2: - jam(); - intstartphase2x010Lab(signal); - return; - case ZINTSPH3: - jam(); - intstartphase3x010Lab(signal); /* SEIZE CONNECT RECORD IN EACH LQH*/ -// Start transaction event reporting. - c_counters.c_trans_status = TransCounters::Timer; - c_counters.reset(); - signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1); - return; - case ZINTSPH6: - jam(); - csystemStart = SSS_TRUE; - break; - default: - jam(); - break; - }//switch - ndbsttorry010Lab(signal); - return; -}//Dbtc::execNDB_STTOR() - -void Dbtc::ndbsttorry010Lab(Signal* signal) -{ - signal->theData[0] = cownref; - sendSignal(cndbcntrblockref, GSN_NDB_STTORRY, signal, 1, JBB); -}//Dbtc::ndbsttorry010Lab() - -void -Dbtc::set_timeout_value(Uint32 timeOut) -{ - timeOut = timeOut / 10; - if (timeOut < 2) { - jam(); - timeOut = 100; - }//if - ctimeOutValue = timeOut; -} - -void -Dbtc::set_appl_timeout_value(Uint32 timeOut) -{ - if (timeOut) - { - timeOut /= 10; - if (timeOut < ctimeOutValue) { - jam(); - c_appl_timeout_value = ctimeOutValue; - }//if - } - c_appl_timeout_value = timeOut; -} - -void -Dbtc::set_no_parallel_takeover(Uint32 noParallelTakeOver) -{ - if (noParallelTakeOver == 0) { - jam(); - noParallelTakeOver = 1; - } else if (noParallelTakeOver > MAX_NDB_NODES) { - jam(); - noParallelTakeOver = MAX_NDB_NODES; - }//if - cnoParallelTakeOver = noParallelTakeOver; -} - -/* ***************************************************************************/ -/* S T A R T P H A S E 1 X */ -/* INITIALISE BLOCKREF AND BLOCKNUMBERS */ -/* ***************************************************************************/ -void Dbtc::startphase1x010Lab(Signal* signal) -{ - csystemStart = SSS_FALSE; - ctimeOutCheckCounter = 0; - ctimeOutCheckFragCounter = 0; - ctimeOutMissedHeartbeats = 0; - ctimeOutCheckHeartbeat = 0; - ctimeOutCheckLastHeartbeat = 0; - ctimeOutCheckActive = TOCS_FALSE; - ctimeOutCheckFragActive = TOCS_FALSE; - sttorryLab(signal); -}//Dbtc::startphase1x010Lab() - -/*****************************************************************************/ -/* I N T S T A R T P H A S E 1 X */ -/* INITIALISE ALL RECORDS. */ -/*****************************************************************************/ -void Dbtc::intstartphase1x010Lab(Signal* signal) -{ - cownNodeid = tnodeid; - cownref = calcTcBlockRef(cownNodeid); - clqhblockref = calcLqhBlockRef(cownNodeid); - cdihblockref = calcDihBlockRef(cownNodeid); - cdictblockref = calcDictBlockRef(cownNodeid); - cndbcntrblockref = calcNdbCntrBlockRef(cownNodeid); - cerrorBlockref = calcNdbCntrBlockRef(cownNodeid); - coperationsize = 0; - cfailure_nr = 0; - ndbsttorry010Lab(signal); -}//Dbtc::intstartphase1x010Lab() - -/*****************************************************************************/ -/* I N T S T A R T P H A S E 2 X */ -/* SET-UP LOCAL CONNECTIONS. */ -/*****************************************************************************/ -void Dbtc::intstartphase2x010Lab(Signal* signal) -{ - tcConnectptr.i = cfirstfreeTcConnect; - intstartphase2x020Lab(signal); -}//Dbtc::intstartphase2x010Lab() - -void Dbtc::intstartphase2x020Lab(Signal* signal) -{ - if (tcConnectptr.i == RNIL) { - jam(); - ndbsttorry010Lab(signal); - return; - }//if - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - tcConnectptr.p->tcConnectstate = OS_CONNECTING_DICT; -/* ****************** */ -/* DISEIZEREQ < */ -/* ****************** */ - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - sendSignal(cdihblockref, GSN_DISEIZEREQ, signal, 2, JBB); -}//Dbtc::intstartphase2x020Lab() - -void Dbtc::execDISEIZECONF(Signal* signal) -{ - jamEntry(); - tcConnectptr.i = signal->theData[0]; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - tcConnectptr.p->dihConnectptr = signal->theData[1]; - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - intstartphase2x020Lab(signal); -}//Dbtc::execDISEIZECONF() - -/*****************************************************************************/ -/* I N T S T A R T P H A S E 3 X */ -/* PREPARE DISTRIBUTED CONNECTIONS */ -/*****************************************************************************/ -void Dbtc::intstartphase3x010Lab(Signal* signal) -{ - signal->theData[0] = cownref; - sendSignal(cndbcntrblockref, GSN_READ_NODESREQ, signal, 1, JBB); -}//Dbtc::intstartphase3x010Lab() - -void Dbtc::execREAD_NODESCONF(Signal* signal) -{ - UintR guard0; - - jamEntry(); - - ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0]; - - csystemnodes = readNodes->noOfNodes; - cmasterNodeId = readNodes->masterNodeId; - - con_lineNodes = 0; - arrGuard(csystemnodes, MAX_NDB_NODES); - guard0 = csystemnodes - 1; - arrGuard(guard0, MAX_NDB_NODES); // Check not zero nodes - - for (unsigned i = 1; i < MAX_NDB_NODES; i++) { - jam(); - if (NodeBitmask::get(readNodes->allNodes, i)) { - hostptr.i = i; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - - if (NodeBitmask::get(readNodes->inactiveNodes, i)) { - jam(); - hostptr.p->hostStatus = HS_DEAD; - } else { - jam(); - con_lineNodes++; - hostptr.p->hostStatus = HS_ALIVE; - c_alive_nodes.set(i); - }//if - }//if - }//for - ndbsttorry010Lab(signal); -}//Dbtc::execREAD_NODESCONF() - -/*****************************************************************************/ -/* A P I _ F A I L R E Q */ -// An API node has failed for some reason. We need to disconnect all API -// connections to the API node. This also includes -/*****************************************************************************/ -void Dbtc::execAPI_FAILREQ(Signal* signal) -{ - /*************************************************************************** - * Set the block reference to return API_FAILCONF to. Set the number of api - * connects currently closing to one to indicate that we are still in the - * process of going through the api connect records. Thus checking for zero - * can only be true after all api connect records have been checked. - **************************************************************************/ - jamEntry(); - capiFailRef = signal->theData[1]; - arrGuard(signal->theData[0], MAX_NODES); - capiConnectClosing[signal->theData[0]] = 1; - handleFailedApiNode(signal, signal->theData[0], (UintR)0); -} - -void -Dbtc::handleFailedApiNode(Signal* signal, - UintR TapiFailedNode, - UintR TapiConnectPtr) -{ - UintR TloopCount = 0; - arrGuard(TapiFailedNode, MAX_NODES); - apiConnectptr.i = TapiConnectPtr; - do { - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - const UintR TapiNode = refToNode(apiConnectptr.p->ndbapiBlockref); - if (TapiNode == TapiFailedNode) { -#ifdef VM_TRACE - if (apiConnectptr.p->apiFailState != ZFALSE) { - ndbout << "Error in previous API fail handling discovered" << endl - << " apiConnectptr.i = " << apiConnectptr.i << endl - << " apiConnectstate = " << apiConnectptr.p->apiConnectstate - << endl - << " ndbapiBlockref = " << hex - << apiConnectptr.p->ndbapiBlockref << endl - << " apiNode = " << refToNode(apiConnectptr.p->ndbapiBlockref) - << endl; - if (apiConnectptr.p->lastTcConnect != RNIL){ - jam(); - tcConnectptr.i = apiConnectptr.p->lastTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - ndbout << " tcConnectptr.i = " << tcConnectptr.i << endl - << " tcConnectstate = " << tcConnectptr.p->tcConnectstate - << endl; - } - }//if -#endif - - apiConnectptr.p->returnsignal = RS_NO_RETURN; - /***********************************************************************/ - // The connected node is the failed node. - /**********************************************************************/ - switch(apiConnectptr.p->apiConnectstate) { - case CS_DISCONNECTED: - /*********************************************************************/ - // These states do not need any special handling. - // Simply continue with the next. - /*********************************************************************/ - jam(); - break; - case CS_ABORTING: - /*********************************************************************/ - // This could actually mean that the API connection is already - // ready to release if the abortState is IDLE. - /*********************************************************************/ - if (apiConnectptr.p->abortState == AS_IDLE) { - jam(); - releaseApiCon(signal, apiConnectptr.i); - } else { - jam(); - capiConnectClosing[TapiFailedNode]++; - apiConnectptr.p->apiFailState = ZTRUE; - }//if - break; - case CS_WAIT_ABORT_CONF: - case CS_WAIT_COMMIT_CONF: - case CS_START_COMMITTING: - case CS_PREPARE_TO_COMMIT: - case CS_COMMITTING: - case CS_COMMIT_SENT: - /*********************************************************************/ - // These states indicate that an abort process or commit process is - // already ongoing. We will set a state in the api record indicating - // that the API node has failed. - // Also we will increase the number of outstanding api records to - // wait for before we can respond with API_FAILCONF. - /*********************************************************************/ - jam(); - capiConnectClosing[TapiFailedNode]++; - apiConnectptr.p->apiFailState = ZTRUE; - break; - case CS_START_SCAN: - /*********************************************************************/ - // The api record was performing a scan operation. We need to check - // on the scan state. Since completing a scan process might involve - // sending several signals we will increase the loop count by 64. - /*********************************************************************/ - jam(); - - apiConnectptr.p->apiFailState = ZTRUE; - capiConnectClosing[TapiFailedNode]++; - - ScanRecordPtr scanPtr; - scanPtr.i = apiConnectptr.p->apiScanRec; - ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord); - close_scan_req(signal, scanPtr, true); - - TloopCount += 64; - break; - case CS_CONNECTED: - case CS_REC_COMMITTING: - case CS_RECEIVING: - case CS_STARTED: - /*********************************************************************/ - // The api record was in the process of performing a transaction but - // had not yet sent all information. - // We need to initiate an ABORT since the API will not provide any - // more information. - // Since the abort can send many signals we will insert a real-time - // break after checking this record. - /*********************************************************************/ - jam(); - apiConnectptr.p->apiFailState = ZTRUE; - capiConnectClosing[TapiFailedNode]++; - abort010Lab(signal); - TloopCount = 256; - break; - case CS_PREPARED: - jam(); - case CS_REC_PREPARING: - jam(); - case CS_START_PREPARING: - jam(); - /*********************************************************************/ - // Not implemented yet. - /*********************************************************************/ - systemErrorLab(signal, __LINE__); - break; - case CS_RESTART: - jam(); - case CS_COMPLETING: - jam(); - case CS_COMPLETE_SENT: - jam(); - case CS_WAIT_COMPLETE_CONF: - jam(); - case CS_FAIL_ABORTING: - jam(); - case CS_FAIL_ABORTED: - jam(); - case CS_FAIL_PREPARED: - jam(); - case CS_FAIL_COMMITTING: - jam(); - case CS_FAIL_COMMITTED: - /*********************************************************************/ - // These states are only valid on copy and fail API connections. - /*********************************************************************/ - default: - jam(); - systemErrorLab(signal, __LINE__); - break; - }//switch - } else { - jam(); - }//if - apiConnectptr.i++; - if (apiConnectptr.i > ((capiConnectFilesize / 3) - 1)) { - jam(); - /** - * Finished with scanning connection record - * - * Now scan markers - */ - removeMarkerForFailedAPI(signal, TapiFailedNode, 0); - return; - }//if - } while (TloopCount++ < 256); - signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE; - signal->theData[1] = TapiFailedNode; - signal->theData[2] = apiConnectptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); -}//Dbtc::handleFailedApiNode() - -void -Dbtc::removeMarkerForFailedAPI(Signal* signal, - Uint32 nodeId, - Uint32 startBucket) -{ - TcFailRecordPtr node_fail_ptr; - node_fail_ptr.i = 0; - ptrAss(node_fail_ptr, tcFailRecord); - if(node_fail_ptr.p->failStatus != FS_IDLE) { - jam(); - DEBUG("Restarting removeMarkerForFailedAPI"); - /** - * TC take-over in progress - * needs to restart as this - * creates new markers - */ - signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS; - signal->theData[1] = nodeId; - signal->theData[2] = 0; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 500, 3); - return; - } - - CommitAckMarkerIterator iter; - m_commitAckMarkerHash.next(startBucket, iter); - - const Uint32 RT_BREAK = 256; - for(Uint32 i = 0; itheData[0] = nodeId; - signal->theData[1] = cownref; - sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB); - } - return; - } - - if(iter.curr.p->apiNodeId == nodeId){ - jam(); - - /** - * Check so that the record is not still in use - * - */ - ApiConnectRecordPtr apiConnectPtr; - apiConnectPtr.i = iter.curr.p->apiConnectPtr; - ptrCheckGuard(apiConnectPtr, capiConnectFilesize, apiConnectRecord); - if(apiConnectPtr.p->commitAckMarker == iter.curr.i){ - jam(); - /** - * The record is still active - * - * Don't remove it, but continueb instead - */ - break; - } - sendRemoveMarkers(signal, iter.curr.p); - m_commitAckMarkerHash.release(iter.curr); - - break; - } - m_commitAckMarkerHash.next(iter); - } - - signal->theData[0] = TcContinueB::ZHANDLE_FAILED_API_NODE_REMOVE_MARKERS; - signal->theData[1] = nodeId; - signal->theData[2] = iter.bucket; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); -} - -void Dbtc::handleApiFailState(Signal* signal, UintR TapiConnectptr) -{ - ApiConnectRecordPtr TlocalApiConnectptr; - UintR TfailedApiNode; - - TlocalApiConnectptr.i = TapiConnectptr; - ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord); - TfailedApiNode = refToNode(TlocalApiConnectptr.p->ndbapiBlockref); - arrGuard(TfailedApiNode, MAX_NODES); - capiConnectClosing[TfailedApiNode]--; - releaseApiCon(signal, TapiConnectptr); - TlocalApiConnectptr.p->apiFailState = ZFALSE; - if (capiConnectClosing[TfailedApiNode] == 0) { - jam(); - signal->theData[0] = TfailedApiNode; - signal->theData[1] = cownref; - sendSignal(capiFailRef, GSN_API_FAILCONF, signal, 2, JBB); - }//if -}//Dbtc::handleApiFailState() - -/**************************************************************************** - * T C S E I Z E R E Q - * THE APPLICATION SENDS A REQUEST TO SEIZE A CONNECT RECORD TO CARRY OUT A - * TRANSACTION - * TC BLOCK TAKE OUT A CONNECT RECORD FROM THE FREE LIST AND ESTABLISHES ALL - * NECESSARY CONNECTION BEFORE REPLYING TO THE APPLICATION BLOCK - ****************************************************************************/ -void Dbtc::execTCSEIZEREQ(Signal* signal) -{ - UintR tapiPointer; - BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/ - - jamEntry(); - tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/ - tapiBlockref = signal->theData[1]; /* SENDERS BLOCK REFERENCE*/ - - const NodeState::StartLevel sl = - (NodeState::StartLevel)getNodeState().startLevel; - - const NodeId senderNodeId = refToNode(tapiBlockref); - const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0; - - { - { - if (!(sl == NodeState::SL_STARTED || - (sl == NodeState::SL_STARTING && local == true))) { - jam(); - - Uint32 errCode = 0; - if(!local) - { - switch(sl){ - case NodeState::SL_STARTING: - errCode = ZSYSTEM_NOT_STARTED_ERROR; - break; - case NodeState::SL_STOPPING_1: - case NodeState::SL_STOPPING_2: - if (getNodeState().getSingleUserMode()) - break; - case NodeState::SL_STOPPING_3: - case NodeState::SL_STOPPING_4: - if(getNodeState().stopping.systemShutdown) - errCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS; - else - errCode = ZNODE_SHUTDOWN_IN_PROGRESS; - break; - case NodeState::SL_SINGLEUSER: - break; - default: - errCode = ZWRONG_STATE; - break; - } - if (errCode) - { - signal->theData[0] = tapiPointer; - signal->theData[1] = errCode; - sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB); - return; - } - }//if (!(sl == SL_SINGLEUSER)) - } //if - } - } - - seizeApiConnect(signal); - if (terrorCode == ZOK) { - jam(); - apiConnectptr.p->ndbapiConnect = tapiPointer; - apiConnectptr.p->ndbapiBlockref = tapiBlockref; - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = apiConnectptr.i; - sendSignal(tapiBlockref, GSN_TCSEIZECONF, signal, 2, JBB); - return; - } - - signal->theData[0] = tapiPointer; - signal->theData[1] = terrorCode; - sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB); -}//Dbtc::execTCSEIZEREQ() - -/****************************************************************************/ -/* T C R E L E A S E Q */ -/* REQUEST TO RELEASE A CONNECT RECORD */ -/****************************************************************************/ -void Dbtc::execTCRELEASEREQ(Signal* signal) -{ - UintR tapiPointer; - BlockReference tapiBlockref; /* SENDER BLOCK REFERENCE*/ - - jamEntry(); - tapiPointer = signal->theData[0]; /* REQUEST SENDERS CONNECT RECORD POINTER*/ - tapiBlockref = signal->theData[1];/* SENDERS BLOCK REFERENCE*/ - tuserpointer = signal->theData[2]; - if (tapiPointer >= capiConnectFilesize) { - jam(); - signal->theData[0] = tuserpointer; - signal->theData[1] = ZINVALID_CONNECTION; - signal->theData[2] = __LINE__; - sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 3, JBB); - return; - } else { - jam(); - apiConnectptr.i = tapiPointer; - }//if - ptrAss(apiConnectptr, apiConnectRecord); - if (apiConnectptr.p->apiConnectstate == CS_DISCONNECTED) { - jam(); - signal->theData[0] = tuserpointer; - sendSignal(tapiBlockref, GSN_TCRELEASECONF, signal, 1, JBB); - } else { - if (tapiBlockref == apiConnectptr.p->ndbapiBlockref) { - if (apiConnectptr.p->apiConnectstate == CS_CONNECTED || - (apiConnectptr.p->apiConnectstate == CS_ABORTING && - apiConnectptr.p->abortState == AS_IDLE) || - (apiConnectptr.p->apiConnectstate == CS_STARTED && - apiConnectptr.p->firstTcConnect == RNIL)) - { - jam(); /* JUST REPLY OK */ - apiConnectptr.p->m_transaction_nodes.clear(); - releaseApiCon(signal, apiConnectptr.i); - signal->theData[0] = tuserpointer; - sendSignal(tapiBlockref, - GSN_TCRELEASECONF, signal, 1, JBB); - } else { - jam(); - signal->theData[0] = tuserpointer; - signal->theData[1] = ZINVALID_CONNECTION; - signal->theData[2] = __LINE__; - signal->theData[3] = apiConnectptr.p->apiConnectstate; - sendSignal(tapiBlockref, - GSN_TCRELEASEREF, signal, 4, JBB); - } - } else { - jam(); - signal->theData[0] = tuserpointer; - signal->theData[1] = ZINVALID_CONNECTION; - signal->theData[2] = __LINE__; - signal->theData[3] = tapiBlockref; - signal->theData[4] = apiConnectptr.p->ndbapiBlockref; - sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 5, JBB); - }//if - }//if -}//Dbtc::execTCRELEASEREQ() - -/****************************************************************************/ -// Error Handling for TCKEYREQ messages -/****************************************************************************/ -void Dbtc::signalErrorRefuseLab(Signal* signal) -{ - ptrGuard(apiConnectptr); - if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) { - jam(); - apiConnectptr.p->abortState = AS_IDLE; - apiConnectptr.p->apiConnectstate = CS_ABORTING; - }//if - sendSignalErrorRefuseLab(signal); -}//Dbtc::signalErrorRefuseLab() - -void Dbtc::sendSignalErrorRefuseLab(Signal* signal) -{ - ndbassert(false); - ptrGuard(apiConnectptr); - if (apiConnectptr.p->apiConnectstate != CS_DISCONNECTED) { - jam(); - ndbrequire(false); - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = signal->theData[ttransid_ptr]; - signal->theData[2] = signal->theData[ttransid_ptr + 1]; - signal->theData[3] = ZSIGNAL_ERROR; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP, - signal, 4, JBB); - } -}//Dbtc::sendSignalErrorRefuseLab() - -void Dbtc::abortBeginErrorLab(Signal* signal) -{ - apiConnectptr.p->transid[0] = signal->theData[ttransid_ptr]; - apiConnectptr.p->transid[1] = signal->theData[ttransid_ptr + 1]; - abortErrorLab(signal); -}//Dbtc::abortBeginErrorLab() - -void Dbtc::printState(Signal* signal, int place) -{ -#ifdef VM_TRACE // Change to if 0 to disable these printouts - ndbout << "-- Dbtc::printState -- " << endl; - ndbout << "Received from place = " << place - << " apiConnectptr.i = " << apiConnectptr.i - << " apiConnectstate = " << apiConnectptr.p->apiConnectstate << endl; - ndbout << "ctcTimer = " << ctcTimer - << " ndbapiBlockref = " << hex <ndbapiBlockref - << " Transid = " << apiConnectptr.p->transid[0] - << " " << apiConnectptr.p->transid[1] << endl; - ndbout << " apiTimer = " << getApiConTimer(apiConnectptr.i) - << " counter = " << apiConnectptr.p->counter - << " lqhkeyconfrec = " << apiConnectptr.p->lqhkeyconfrec - << " lqhkeyreqrec = " << apiConnectptr.p->lqhkeyreqrec << endl; - ndbout << "abortState = " << apiConnectptr.p->abortState - << " apiScanRec = " << apiConnectptr.p->apiScanRec - << " returncode = " << apiConnectptr.p->returncode << endl; - ndbout << "tckeyrec = " << apiConnectptr.p->tckeyrec - << " returnsignal = " << apiConnectptr.p->returnsignal - << " apiFailState = " << apiConnectptr.p->apiFailState << endl; - if (apiConnectptr.p->cachePtr != RNIL) { - jam(); - CacheRecord *localCacheRecord = cacheRecord; - UintR TcacheFilesize = ccacheFilesize; - UintR TcachePtr = apiConnectptr.p->cachePtr; - if (TcachePtr < TcacheFilesize) { - jam(); - CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr]; - ndbout << "currReclenAi = " << regCachePtr->currReclenAi - << " attrlength = " << regCachePtr->attrlength - << " tableref = " << regCachePtr->tableref - << " keylen = " << regCachePtr->keylen << endl; - } else { - jam(); - systemErrorLab(signal, __LINE__); - }//if - }//if -#endif - return; -}//Dbtc::printState() - -void -Dbtc::TCKEY_abort(Signal* signal, int place) -{ - switch (place) { - case 0: - jam(); - terrorCode = ZSTATE_ERROR; - apiConnectptr.p->firstTcConnect = RNIL; - printState(signal, 4); - abortBeginErrorLab(signal); - return; - case 1: - jam(); - printState(signal, 3); - sendSignalErrorRefuseLab(signal); - return; - case 2:{ - printState(signal, 6); - const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0]; - const Uint32 t1 = tcKeyReq->transId1; - const Uint32 t2 = tcKeyReq->transId2; - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = t1; - signal->theData[2] = t2; - signal->theData[3] = ZABORT_ERROR; - ndbrequire(false); - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREP, - signal, 4, JBB); - return; - } - case 3: - jam(); - printState(signal, 7); - noFreeConnectionErrorLab(signal); - return; - case 4: - jam(); - terrorCode = ZERO_KEYLEN_ERROR; - releaseAtErrorLab(signal); - return; - case 5: - jam(); - terrorCode = ZNO_AI_WITH_UPDATE; - releaseAtErrorLab(signal); - return; - case 6: - jam(); - warningHandlerLab(signal, __LINE__); - return; - - case 7: - jam(); - tabStateErrorLab(signal); - return; - - case 8: - jam(); - wrongSchemaVersionErrorLab(signal); - return; - - case 9: - jam(); - terrorCode = ZSTATE_ERROR; - releaseAtErrorLab(signal); - return; - - case 10: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 11: - jam(); - terrorCode = ZMORE_AI_IN_TCKEYREQ_ERROR; - releaseAtErrorLab(signal); - return; - - case 12: - jam(); - terrorCode = ZSIMPLE_READ_WITHOUT_AI; - releaseAtErrorLab(signal); - return; - - case 13: - jam(); - switch (tcConnectptr.p->tcConnectstate) { - case OS_WAIT_KEYINFO: - jam(); - printState(signal, 8); - terrorCode = ZSTATE_ERROR; - abortErrorLab(signal); - return; - default: - jam(); - /********************************************************************/ - /* MISMATCH BETWEEN STATE ON API CONNECTION AND THIS */ - /* PARTICULAR TC CONNECT RECORD. THIS MUST BE CAUSED BY NDB */ - /* INTERNAL ERROR. */ - /********************************************************************/ - systemErrorLab(signal, __LINE__); - return; - }//switch - return; - - case 15: - jam(); - terrorCode = ZSCAN_NODE_ERROR; - releaseAtErrorLab(signal); - return; - - case 16: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 17: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 18: - jam(); - warningHandlerLab(signal, __LINE__); - return; - - case 19: - jam(); - return; - - case 20: - jam(); - warningHandlerLab(signal, __LINE__); - return; - - case 21: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 22: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 23: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 24: - jam(); - seizeAttrbuferrorLab(signal); - return; - - case 25: - jam(); - warningHandlerLab(signal, __LINE__); - return; - - case 26: - jam(); - return; - - case 27: - systemErrorLab(signal, __LINE__); - jam(); - return; - - case 28: - jam(); - // NOT USED - return; - - case 29: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 30: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 31: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 32: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 33: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 34: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 35: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 36: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 37: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 38: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 39: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 40: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 41: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 42: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 43: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 44: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 45: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 46: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 47: - jam(); - terrorCode = apiConnectptr.p->returncode; - releaseAtErrorLab(signal); - return; - - case 48: - jam(); - terrorCode = ZCOMMIT_TYPE_ERROR; - releaseAtErrorLab(signal); - return; - - case 49: - jam(); - abortErrorLab(signal); - return; - - case 50: - jam(); - systemErrorLab(signal, __LINE__); - return; - - case 51: - jam(); - abortErrorLab(signal); - return; - - case 52: - jam(); - abortErrorLab(signal); - return; - - case 53: - jam(); - abortErrorLab(signal); - return; - - case 54: - jam(); - abortErrorLab(signal); - return; - - case 55: - jam(); - printState(signal, 5); - sendSignalErrorRefuseLab(signal); - return; - - case 56:{ - jam(); - terrorCode = ZNO_FREE_TC_MARKER; - abortErrorLab(signal); - return; - } - case 57:{ - jam(); - /** - * Initialize object before starting error handling - */ - initApiConnectRec(signal, apiConnectptr.p, true); -start_failure: - switch(getNodeState().startLevel){ - case NodeState::SL_STOPPING_2: - if (getNodeState().getSingleUserMode()) - { - terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; - break; - } - case NodeState::SL_STOPPING_3: - case NodeState::SL_STOPPING_4: - if(getNodeState().stopping.systemShutdown) - terrorCode = ZCLUSTER_SHUTDOWN_IN_PROGRESS; - else - terrorCode = ZNODE_SHUTDOWN_IN_PROGRESS; - break; - case NodeState::SL_SINGLEUSER: - terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; - break; - case NodeState::SL_STOPPING_1: - if (getNodeState().getSingleUserMode()) - { - terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; - break; - } - default: - terrorCode = ZWRONG_STATE; - break; - } - abortErrorLab(signal); - return; - } - - case 58:{ - jam(); - releaseAtErrorLab(signal); - return; - } - - case 59:{ - jam(); - terrorCode = ZABORTINPROGRESS; - abortErrorLab(signal); - return; - } - - case 60: - { - jam(); - initApiConnectRec(signal, apiConnectptr.p, true); - apiConnectptr.p->m_exec_flag = 1; - goto start_failure; - } - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch -} - -static -inline -bool -compare_transid(Uint32* val0, Uint32* val1) -{ - Uint32 tmp0 = val0[0] ^ val1[0]; - Uint32 tmp1 = val0[1] ^ val1[1]; - return (tmp0 | tmp1) == 0; -} - -void Dbtc::execKEYINFO(Signal* signal) -{ - jamEntry(); - apiConnectptr.i = signal->theData[0]; - tmaxData = 20; - if (apiConnectptr.i >= capiConnectFilesize) { - TCKEY_abort(signal, 18); - return; - }//if - ptrAss(apiConnectptr, apiConnectRecord); - ttransid_ptr = 1; - if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false) - { - TCKEY_abort(signal, 19); - return; - }//if - switch (apiConnectptr.p->apiConnectstate) { - case CS_RECEIVING: - case CS_REC_COMMITTING: - case CS_START_SCAN: - jam(); - /*empty*/; - break; - /* OK */ - case CS_ABORTING: - jam(); - return; /* IGNORE */ - case CS_CONNECTED: - jam(); - /****************************************************************>*/ - /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */ - /* SET STATE TO ABORTING. */ - /****************************************************************>*/ - printState(signal, 11); - signalErrorRefuseLab(signal); - return; - case CS_STARTED: - jam(); - /****************************************************************>*/ - /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */ - /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */ - /* WE ALSO NEED TO ABORT THIS TRANSACTION. */ - /****************************************************************>*/ - terrorCode = ZSIGNAL_ERROR; - printState(signal, 2); - abortErrorLab(signal); - return; - default: - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//switch - - CacheRecord *localCacheRecord = cacheRecord; - UintR TcacheFilesize = ccacheFilesize; - UintR TcachePtr = apiConnectptr.p->cachePtr; - UintR TtcTimer = ctcTimer; - CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr]; - if (TcachePtr >= TcacheFilesize) { - TCKEY_abort(signal, 42); - return; - }//if - setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); - cachePtr.i = TcachePtr; - cachePtr.p = regCachePtr; - - tcConnectptr.i = apiConnectptr.p->lastTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - switch (tcConnectptr.p->tcConnectstate) { - case OS_WAIT_KEYINFO: - jam(); - tckeyreq020Lab(signal); - return; - case OS_WAIT_SCAN: - break; - default: - jam(); - terrorCode = ZSTATE_ERROR; - abortErrorLab(signal); - return; - }//switch - - UintR TdataPos = 0; - UintR TkeyLen = regCachePtr->keylen; - UintR Tlen = regCachePtr->save1; - - do { - if (cfirstfreeDatabuf == RNIL) { - jam(); - seizeDatabuferrorLab(signal); - return; - }//if - linkKeybuf(signal); - arrGuard(TdataPos, 19); - databufptr.p->data[0] = signal->theData[TdataPos + 3]; - databufptr.p->data[1] = signal->theData[TdataPos + 4]; - databufptr.p->data[2] = signal->theData[TdataPos + 5]; - databufptr.p->data[3] = signal->theData[TdataPos + 6]; - Tlen = Tlen + 4; - TdataPos = TdataPos + 4; - if (Tlen < TkeyLen) { - jam(); - if (TdataPos >= tmaxData) { - jam(); - /*----------------------------------------------------*/ - /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/ - /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/ - /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/ - /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/ - /*----------------------------------------------------*/ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - regCachePtr->save1 = Tlen; - return; - }//if - } else { - jam(); - return; - }//if - } while (1); - return; -}//Dbtc::execKEYINFO() - -/*---------------------------------------------------------------------------*/ -/* */ -/* MORE THAN FOUR WORDS OF KEY DATA. WE NEED TO PACK THIS IN KEYINFO SIGNALS.*/ -/* WE WILL ALWAYS PACK 4 WORDS AT A TIME. */ -/*---------------------------------------------------------------------------*/ -void Dbtc::packKeyData000Lab(Signal* signal, - BlockReference TBRef, - Uint32 totalLen) -{ - CacheRecord * const regCachePtr = cachePtr.p; - - jam(); - Uint32 len = 0; - databufptr.i = regCachePtr->firstKeybuf; - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - Uint32 * dst = signal->theData+3; - ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord); - - do { - jam(); - databufptr.i = databufptr.p->nextDatabuf; - dst[len + 0] = databufptr.p->data[0]; - dst[len + 1] = databufptr.p->data[1]; - dst[len + 2] = databufptr.p->data[2]; - dst[len + 3] = databufptr.p->data[3]; - len += 4; - if (totalLen <= 4) { - jam(); - /*---------------------------------------------------------------------*/ - /* LAST PACK OF KEY DATA HAVE BEEN SENT */ - /*---------------------------------------------------------------------*/ - /* THERE WERE UNSENT INFORMATION, SEND IT. */ - /*---------------------------------------------------------------------*/ - sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB); - return; - } else if(len == KeyInfo::DataLength){ - jam(); - len = 0; - sendSignal(TBRef, GSN_KEYINFO, signal, 3 + KeyInfo::DataLength, JBB); - } - totalLen -= 4; - ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord); - } while (1); -}//Dbtc::packKeyData000Lab() - -void Dbtc::tckeyreq020Lab(Signal* signal) -{ - CacheRecord * const regCachePtr = cachePtr.p; - UintR TdataPos = 0; - UintR TkeyLen = regCachePtr->keylen; - UintR Tlen = regCachePtr->save1; - - do { - if (cfirstfreeDatabuf == RNIL) { - jam(); - seizeDatabuferrorLab(signal); - return; - }//if - linkKeybuf(signal); - arrGuard(TdataPos, 19); - databufptr.p->data[0] = signal->theData[TdataPos + 3]; - databufptr.p->data[1] = signal->theData[TdataPos + 4]; - databufptr.p->data[2] = signal->theData[TdataPos + 5]; - databufptr.p->data[3] = signal->theData[TdataPos + 6]; - Tlen = Tlen + 4; - TdataPos = TdataPos + 4; - if (Tlen < TkeyLen) { - jam(); - if (TdataPos >= tmaxData) { - jam(); - /*----------------------------------------------------*/ - /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/ - /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/ - /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/ - /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/ - /*----------------------------------------------------*/ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - regCachePtr->save1 = Tlen; - tcConnectptr.p->tcConnectstate = OS_WAIT_KEYINFO; - return; - }//if - } else { - jam(); - tckeyreq050Lab(signal); - return; - }//if - } while (1); - return; -}//Dbtc::tckeyreq020Lab() - -/* ------------------------------------------------------------------------- */ -/* ------- SAVE ATTRIBUTE INFORMATION IN OPERATION RECORD ------- */ -/* ------------------------------------------------------------------------- */ -void Dbtc::saveAttrbuf(Signal* signal) -{ - CacheRecord * const regCachePtr = cachePtr.p; - UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf; - UintR TattrbufFilesize = cattrbufFilesize; - UintR TTcfirstAttrbuf = regCachePtr->firstAttrbuf; - UintR Tlen = signal->length() - 3; - AttrbufRecord *localAttrbufRecord = attrbufRecord; - - AttrbufRecord * const regAttrPtr = &localAttrbufRecord[TfirstfreeAttrbuf]; - if (TfirstfreeAttrbuf >= TattrbufFilesize) { - TCKEY_abort(signal, 21); - return; - }//if - UintR Tnext = regAttrPtr->attrbuf[ZINBUF_NEXT]; - if (TTcfirstAttrbuf == RNIL) { - jam(); - regCachePtr->firstAttrbuf = TfirstfreeAttrbuf; - } else { - AttrbufRecordPtr saAttrbufptr; - - saAttrbufptr.i = regCachePtr->lastAttrbuf; - jam(); - if (saAttrbufptr.i >= TattrbufFilesize) { - TCKEY_abort(signal, 22); - return; - }//if - saAttrbufptr.p = &localAttrbufRecord[saAttrbufptr.i]; - saAttrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf; - }//if - - cfirstfreeAttrbuf = Tnext; - regAttrPtr->attrbuf[ZINBUF_NEXT] = RNIL; - regCachePtr->lastAttrbuf = TfirstfreeAttrbuf; - regAttrPtr->attrbuf[ZINBUF_DATA_LEN] = Tlen; - - UintR Tdata1 = signal->theData[3]; - UintR Tdata2 = signal->theData[4]; - UintR Tdata3 = signal->theData[5]; - UintR Tdata4 = signal->theData[6]; - UintR Tdata5 = signal->theData[7]; - UintR Tdata6 = signal->theData[8]; - UintR Tdata7 = signal->theData[9]; - UintR Tdata8 = signal->theData[10]; - - regAttrPtr->attrbuf[0] = Tdata1; - regAttrPtr->attrbuf[1] = Tdata2; - regAttrPtr->attrbuf[2] = Tdata3; - regAttrPtr->attrbuf[3] = Tdata4; - regAttrPtr->attrbuf[4] = Tdata5; - regAttrPtr->attrbuf[5] = Tdata6; - regAttrPtr->attrbuf[6] = Tdata7; - regAttrPtr->attrbuf[7] = Tdata8; - - if (Tlen > 8) { - - Tdata1 = signal->theData[11]; - Tdata2 = signal->theData[12]; - Tdata3 = signal->theData[13]; - Tdata4 = signal->theData[14]; - Tdata5 = signal->theData[15]; - Tdata6 = signal->theData[16]; - Tdata7 = signal->theData[17]; - - regAttrPtr->attrbuf[8] = Tdata1; - regAttrPtr->attrbuf[9] = Tdata2; - regAttrPtr->attrbuf[10] = Tdata3; - regAttrPtr->attrbuf[11] = Tdata4; - regAttrPtr->attrbuf[12] = Tdata5; - regAttrPtr->attrbuf[13] = Tdata6; - regAttrPtr->attrbuf[14] = Tdata7; - jam(); - if (Tlen > 15) { - - Tdata1 = signal->theData[18]; - Tdata2 = signal->theData[19]; - Tdata3 = signal->theData[20]; - Tdata4 = signal->theData[21]; - Tdata5 = signal->theData[22]; - Tdata6 = signal->theData[23]; - Tdata7 = signal->theData[24]; - - jam(); - regAttrPtr->attrbuf[15] = Tdata1; - regAttrPtr->attrbuf[16] = Tdata2; - regAttrPtr->attrbuf[17] = Tdata3; - regAttrPtr->attrbuf[18] = Tdata4; - regAttrPtr->attrbuf[19] = Tdata5; - regAttrPtr->attrbuf[20] = Tdata6; - regAttrPtr->attrbuf[21] = Tdata7; - }//if - }//if -}//Dbtc::saveAttrbuf() - -void Dbtc::execATTRINFO(Signal* signal) -{ - UintR Tdata1 = signal->theData[0]; - UintR Tlength = signal->length(); - UintR TapiConnectFilesize = capiConnectFilesize; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - - jamEntry(); - apiConnectptr.i = Tdata1; - ttransid_ptr = 1; - if (Tdata1 >= TapiConnectFilesize) { - DEBUG("Drop ATTRINFO, wrong apiConnectptr"); - TCKEY_abort(signal, 18); - return; - }//if - - ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1]; - apiConnectptr.p = regApiPtr; - - if (compare_transid(regApiPtr->transid, signal->theData+1) == false) - { - DEBUG("Drop ATTRINFO, wrong transid, lenght="<theData[1]<<", "<theData[2]); - TCKEY_abort(signal, 19); - return; - }//if - if (Tlength < 4) { - DEBUG("Drop ATTRINFO, wrong length = " << Tlength); - TCKEY_abort(signal, 20); - return; - } - Tlength -= 3; - UintR TcompREC_COMMIT = (regApiPtr->apiConnectstate == CS_REC_COMMITTING); - UintR TcompRECEIVING = (regApiPtr->apiConnectstate == CS_RECEIVING); - UintR TcompBOTH = TcompREC_COMMIT | TcompRECEIVING; - - if (TcompBOTH) { - jam(); - if (ERROR_INSERTED(8015)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - if (ERROR_INSERTED(8016)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - CacheRecord *localCacheRecord = cacheRecord; - UintR TcacheFilesize = ccacheFilesize; - UintR TcachePtr = regApiPtr->cachePtr; - UintR TtcTimer = ctcTimer; - CacheRecord * const regCachePtr = &localCacheRecord[TcachePtr]; - if (TcachePtr >= TcacheFilesize) { - TCKEY_abort(signal, 43); - return; - }//if - UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf; - UintR TcurrReclenAi = regCachePtr->currReclenAi; - UintR TattrLen = regCachePtr->attrlength; - - setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); - cachePtr.i = TcachePtr; - cachePtr.p = regCachePtr; - TcurrReclenAi = TcurrReclenAi + Tlength; - regCachePtr->currReclenAi = TcurrReclenAi; - int TattrlengthRemain = TattrLen - TcurrReclenAi; - - if (TfirstfreeAttrbuf == RNIL) { - DEBUG("No more attrinfo buffers"); - TCKEY_abort(signal, 24); - return; - }//if - saveAttrbuf(signal); - if (TattrlengthRemain == 0) { - /****************************************************************>*/ - /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */ - /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */ - /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */ - /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */ - /* OPERATIONS. */ - /****************************************************************>*/ - UintR TlastConnect = regApiPtr->lastTcConnect; - if (TcompRECEIVING) { - jam(); - regApiPtr->apiConnectstate = CS_STARTED; - } else { - jam(); - regApiPtr->apiConnectstate = CS_START_COMMITTING; - }//if - tcConnectptr.i = TlastConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - attrinfoDihReceivedLab(signal); - } else if (TattrlengthRemain < 0) { - jam(); - DEBUG("ATTRINFO wrong total length="<*/ - /* MOST LIKELY CAUSED BY A MISSED SIGNAL. SEND REFUSE AND */ - /* SET STATE TO ABORTING. SINCE A TRANSACTION WAS STARTED */ - /* WE ALSO NEED TO ABORT THIS TRANSACTION. */ - /****************************************************************>*/ - terrorCode = ZSIGNAL_ERROR; - printState(signal, 1); - abortErrorLab(signal); - return; - default: - jam(); - /****************************************************************>*/ - /* SIGNAL RECEIVED IN AN UNEXPECTED STATE. WE IGNORE SIGNAL */ - /* SINCE WE DO NOT REALLY KNOW WHERE THE ERROR OCCURRED. */ - /****************************************************************>*/ - DEBUG("Drop ATTRINFO, illegal state="<apiConnectstate); - printState(signal, 9); - return; - }//switch - }//if -}//Dbtc::execATTRINFO() - -/* *********************************************************************>> */ -/* */ -/* MODULE: HASH MODULE */ -/* DESCRIPTION: CONTAINS THE HASH VALUE CALCULATION */ -/* *********************************************************************> */ -void Dbtc::hash(Signal* signal) -{ - DatabufRecordPtr locDatabufptr; - UintR ti; - UintR Tdata0; - UintR Tdata1; - UintR Tdata2; - UintR Tdata3; - UintR* Tdata32; - - CacheRecord * const regCachePtr = cachePtr.p; - Tdata32 = signal->theData; - - Tdata0 = regCachePtr->keydata[0]; - Tdata1 = regCachePtr->keydata[1]; - Tdata2 = regCachePtr->keydata[2]; - Tdata3 = regCachePtr->keydata[3]; - Tdata32[0] = Tdata0; - Tdata32[1] = Tdata1; - Tdata32[2] = Tdata2; - Tdata32[3] = Tdata3; - if (regCachePtr->keylen > 4) { - locDatabufptr.i = regCachePtr->firstKeybuf; - ti = 4; - while (locDatabufptr.i != RNIL) { - ptrCheckGuard(locDatabufptr, cdatabufFilesize, databufRecord); - Tdata0 = locDatabufptr.p->data[0]; - Tdata1 = locDatabufptr.p->data[1]; - Tdata2 = locDatabufptr.p->data[2]; - Tdata3 = locDatabufptr.p->data[3]; - Tdata32[ti ] = Tdata0; - Tdata32[ti + 1] = Tdata1; - Tdata32[ti + 2] = Tdata2; - Tdata32[ti + 3] = Tdata3; - locDatabufptr.i = locDatabufptr.p->nextDatabuf; - ti += 4; - }//while - }//if - - UintR keylen = (UintR)regCachePtr->keylen; - Uint32 distKey = regCachePtr->distributionKeyIndicator; - - Uint32 tmp[4]; - if(!regCachePtr->m_special_hash) - { - md5_hash(tmp, (Uint64*)&Tdata32[0], keylen); - } - else - { - handle_special_hash(tmp, Tdata32, keylen, regCachePtr->tableref, !distKey); - } - - thashValue = tmp[0]; - if (distKey){ - jam(); - tdistrHashValue = regCachePtr->distributionKey; - } else { - jam(); - tdistrHashValue = tmp[1]; - }//if -}//Dbtc::hash() - -bool -Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen, - Uint32 tabPtrI, - bool distr) -{ - Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY]; - const TableRecord* tabPtrP = &tableRecord[tabPtrI]; - const bool hasVarKeys = tabPtrP->hasVarKeys; - const bool hasCharAttr = tabPtrP->hasCharAttr; - const bool compute_distkey = distr && (tabPtrP->noOfDistrKeys > 0); - - Uint32 *dst = (Uint32*)Tmp; - Uint32 dstPos = 0; - Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]; - Uint32 * keyPartLenPtr; - if(hasCharAttr || (compute_distkey && hasVarKeys)) - { - keyPartLenPtr = keyPartLen; - dstPos = xfrm_key(tabPtrI, src, dst, sizeof(Tmp) >> 2, keyPartLenPtr); - if (unlikely(dstPos == 0)) - { - goto error; - } - } - else - { - dst = src; - dstPos = srcLen; - keyPartLenPtr = 0; - } - - md5_hash(dstHash, (Uint64*)dst, dstPos); - - if(compute_distkey) - { - jam(); - - Uint32 tmp[4]; - Uint32 len = create_distr_key(tabPtrI, dst, keyPartLenPtr); - md5_hash(tmp, (Uint64*)dst, len); - dstHash[1] = tmp[1]; - } - return true; // success - -error: - terrorCode = ZINVALID_KEY; - return false; -} - -/* -INIT_API_CONNECT_REC ---------------------------- -*/ -/* ========================================================================= */ -/* ======= INIT_API_CONNECT_REC ======= */ -/* */ -/* ========================================================================= */ -void Dbtc::initApiConnectRec(Signal* signal, - ApiConnectRecord * const regApiPtr, - bool releaseIndexOperations) -{ - const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0]; - UintR TfailureNr = cfailure_nr; - UintR TtransCount = c_counters.ctransCount; - UintR Ttransid0 = tcKeyReq->transId1; - UintR Ttransid1 = tcKeyReq->transId2; - - regApiPtr->m_exec_flag = 0; - regApiPtr->returncode = 0; - regApiPtr->returnsignal = RS_TCKEYCONF; - ndbassert(regApiPtr->firstTcConnect == RNIL); - regApiPtr->firstTcConnect = RNIL; - regApiPtr->lastTcConnect = RNIL; - regApiPtr->globalcheckpointid = 0; - regApiPtr->lqhkeyconfrec = 0; - regApiPtr->lqhkeyreqrec = 0; - regApiPtr->tckeyrec = 0; - regApiPtr->tcindxrec = 0; - regApiPtr->failureNr = TfailureNr; - regApiPtr->transid[0] = Ttransid0; - regApiPtr->transid[1] = Ttransid1; - regApiPtr->commitAckMarker = RNIL; - regApiPtr->buddyPtr = RNIL; - regApiPtr->currSavePointId = 0; - regApiPtr->m_transaction_nodes.clear(); - regApiPtr->singleUserMode = 0; - // Trigger data - releaseFiredTriggerData(®ApiPtr->theFiredTriggers), - // Index data - regApiPtr->indexOpReturn = false; - regApiPtr->noIndexOp = 0; - if(releaseIndexOperations) - releaseAllSeizedIndexOperations(regApiPtr); - - c_counters.ctransCount = TtransCount + 1; -}//Dbtc::initApiConnectRec() - -int -Dbtc::seizeTcRecord(Signal* signal) -{ - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - UintR TfirstfreeTcConnect = cfirstfreeTcConnect; - UintR TtcConnectFilesize = ctcConnectFilesize; - tcConnectptr.i = TfirstfreeTcConnect; - if (TfirstfreeTcConnect >= TtcConnectFilesize) { - int place = 3; - if (TfirstfreeTcConnect != RNIL) { - place = 10; - }//if - TCKEY_abort(signal, place); - return 1; - }//if - //-------------------------------------------------------------------------- - // Optimised version of ptrAss(tcConnectptr, tcConnectRecord) - //-------------------------------------------------------------------------- - TcConnectRecord * const regTcPtr = - &localTcConnectRecord[TfirstfreeTcConnect]; - - UintR TconcurrentOp = c_counters.cconcurrentOp; - UintR TlastTcConnect = regApiPtr->lastTcConnect; - UintR TtcConnectptrIndex = tcConnectptr.i; - TcConnectRecordPtr tmpTcConnectptr; - - cfirstfreeTcConnect = regTcPtr->nextTcConnect; - tcConnectptr.p = regTcPtr; - - c_counters.cconcurrentOp = TconcurrentOp + 1; - regTcPtr->prevTcConnect = TlastTcConnect; - regTcPtr->nextTcConnect = RNIL; - regTcPtr->accumulatingTriggerData.i = RNIL; - regTcPtr->accumulatingTriggerData.p = NULL; - regTcPtr->noFiredTriggers = 0; - regTcPtr->noReceivedTriggers = 0; - regTcPtr->triggerExecutionCount = 0; - regTcPtr->triggeringOperation = RNIL; - regTcPtr->isIndexOp = false; - regTcPtr->indexOp = RNIL; - regTcPtr->currentIndexId = RNIL; - - regApiPtr->lastTcConnect = TtcConnectptrIndex; - - if (TlastTcConnect == RNIL) { - jam(); - regApiPtr->firstTcConnect = TtcConnectptrIndex; - } else { - tmpTcConnectptr.i = TlastTcConnect; - jam(); - ptrCheckGuard(tmpTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - tmpTcConnectptr.p->nextTcConnect = TtcConnectptrIndex; - }//if - return 0; -}//Dbtc::seizeTcRecord() - -int -Dbtc::seizeCacheRecord(Signal* signal) -{ - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - UintR TfirstfreeCacheRec = cfirstfreeCacheRec; - UintR TcacheFilesize = ccacheFilesize; - CacheRecord *localCacheRecord = cacheRecord; - if (TfirstfreeCacheRec >= TcacheFilesize) { - TCKEY_abort(signal, 41); - return 1; - }//if - CacheRecord * const regCachePtr = &localCacheRecord[TfirstfreeCacheRec]; - - regApiPtr->cachePtr = TfirstfreeCacheRec; - cfirstfreeCacheRec = regCachePtr->nextCacheRec; - cachePtr.i = TfirstfreeCacheRec; - cachePtr.p = regCachePtr; - -#ifdef VM_TRACE - // This is a good place to check that resources have - // been properly released from CacheRecord - ndbrequire(regCachePtr->firstKeybuf == RNIL); - ndbrequire(regCachePtr->lastKeybuf == RNIL); -#endif - regCachePtr->firstKeybuf = RNIL; - regCachePtr->lastKeybuf = RNIL; - regCachePtr->firstAttrbuf = RNIL; - regCachePtr->lastAttrbuf = RNIL; - regCachePtr->currReclenAi = 0; - return 0; -}//Dbtc::seizeCacheRecord() - -/*****************************************************************************/ -/* T C K E Y R E Q */ -/* AFTER HAVING ESTABLISHED THE CONNECT, THE APPLICATION BLOCK SENDS AN */ -/* OPERATION REQUEST TO TC. ALL NECESSARY INFORMATION TO CARRY OUT REQUEST */ -/* IS FURNISHED IN PARAMETERS. TC STORES THIS INFORMATION AND ENQUIRES */ -/* FROM DIH ABOUT THE NODES WHICH MAY HAVE THE REQUESTED DATA */ -/*****************************************************************************/ -void Dbtc::execTCKEYREQ(Signal* signal) -{ - Uint32 sendersNodeId = refToNode(signal->getSendersBlockRef()); - UintR compare_transid1, compare_transid2; - UintR titcLenAiInTckeyreq; - UintR TkeyLength; - const TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtr(); - UintR Treqinfo; - - jamEntry(); - /*------------------------------------------------------------------------- - * Common error routines are used for several signals, they need to know - * where to find the transaction identifier in the signal. - *-------------------------------------------------------------------------*/ - const UintR TapiIndex = tcKeyReq->apiConnectPtr; - const UintR TapiMaxIndex = capiConnectFilesize; - const UintR TtabIndex = tcKeyReq->tableId; - const UintR TtabMaxIndex = ctabrecFilesize; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - - ttransid_ptr = 6; - apiConnectptr.i = TapiIndex; - if (TapiIndex >= TapiMaxIndex) { - TCKEY_abort(signal, 6); - return; - }//if - if (TtabIndex >= TtabMaxIndex) { - TCKEY_abort(signal, 7); - return; - }//if - - Treqinfo = tcKeyReq->requestInfo; - //-------------------------------------------------------------------------- - // Optimised version of ptrAss(tabptr, tableRecord) - // Optimised version of ptrAss(apiConnectptr, apiConnectRecord) - //-------------------------------------------------------------------------- - ApiConnectRecord * const regApiPtr = &localApiConnectRecord[TapiIndex]; - apiConnectptr.p = regApiPtr; - - Uint32 TstartFlag = TcKeyReq::getStartFlag(Treqinfo); - Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo); - - Uint8 isIndexOp = regApiPtr->isIndexOp; - bool isIndexOpReturn = regApiPtr->indexOpReturn; - regApiPtr->isIndexOp = false; // Reset marker - regApiPtr->m_exec_flag |= TexecFlag; - TableRecordPtr localTabptr; - localTabptr.i = TtabIndex; - localTabptr.p = &tableRecord[TtabIndex]; - switch (regApiPtr->apiConnectstate) { - case CS_CONNECTED:{ - if (TstartFlag == 1 && getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){ - //--------------------------------------------------------------------- - // Initialise API connect record if transaction is started. - //--------------------------------------------------------------------- - jam(); - initApiConnectRec(signal, regApiPtr); - regApiPtr->m_exec_flag = TexecFlag; - } else { - if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){ - /*------------------------------------------------------------------ - * WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN - * RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO - * ENSURE PROPER OPERATION OF THE COMMON ABORT HANDLING. - *-----------------------------------------------------------------*/ - TCKEY_abort(signal, 0); - return; - } else { - /** - * getAllowStartTransaction(sendersNodeId) == false - */ - TCKEY_abort(signal, TexecFlag ? 60 : 57); - return; - }//if - } - } - break; - case CS_STARTED: - if(TstartFlag == 1 && regApiPtr->firstTcConnect == RNIL) - { - /** - * If last operation in last transaction was a simple/dirty read - * it does not have to be committed or rollbacked hence, - * the state will be CS_STARTED - */ - jam(); - if (unlikely(getNodeState().getSingleUserMode()) && - getNodeState().getSingleUserApi() != sendersNodeId && - !localTabptr.p->singleUserMode) - { - TCKEY_abort(signal, TexecFlag ? 60 : 57); - return; - } - initApiConnectRec(signal, regApiPtr); - regApiPtr->m_exec_flag = TexecFlag; - } else { - //---------------------------------------------------------------------- - // Transaction is started already. - // Check that the operation is on the same transaction. - //----------------------------------------------------------------------- - compare_transid1 = regApiPtr->transid[0] ^ tcKeyReq->transId1; - compare_transid2 = regApiPtr->transid[1] ^ tcKeyReq->transId2; - jam(); - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - TCKEY_abort(signal, 1); - return; - }//if - } - break; - case CS_ABORTING: - if (regApiPtr->abortState == AS_IDLE) { - if (TstartFlag == 1) { - if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == false){ - TCKEY_abort(signal, TexecFlag ? 60 : 57); - return; - } - //-------------------------------------------------------------------- - // Previous transaction had been aborted and the abort was completed. - // It is then OK to start a new transaction again. - //-------------------------------------------------------------------- - jam(); - initApiConnectRec(signal, regApiPtr); - regApiPtr->m_exec_flag = TexecFlag; - } else if(TexecFlag) { - TCKEY_abort(signal, 59); - return; - } else { - //-------------------------------------------------------------------- - // The current transaction was aborted successfully. - // We will not do anything before we receive an operation - // with a start indicator. We will ignore this signal. - //-------------------------------------------------------------------- - jam(); - DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, ==AS_IDLE"); - return; - }//if - } else { - //---------------------------------------------------------------------- - // Previous transaction is still aborting - //---------------------------------------------------------------------- - jam(); - if (TstartFlag == 1) { - //-------------------------------------------------------------------- - // If a new transaction tries to start while the old is - // still aborting, we will report this to the starting API. - //-------------------------------------------------------------------- - TCKEY_abort(signal, 2); - return; - } else if(TexecFlag) { - TCKEY_abort(signal, 59); - return; - } - //---------------------------------------------------------------------- - // Ignore signals without start indicator set when aborting transaction. - //---------------------------------------------------------------------- - DEBUG("Drop TCKEYREQ - apiConnectState=CS_ABORTING, !=AS_IDLE"); - return; - }//if - break; - case CS_START_COMMITTING: - jam(); - if(isIndexOpReturn || TcKeyReq::getExecutingTrigger(Treqinfo)){ - break; - } - default: - jam(); - /*---------------------------------------------------------------------- - * IN THIS CASE THE NDBAPI IS AN UNTRUSTED ENTITY THAT HAS SENT A SIGNAL - * WHEN IT WAS NOT EXPECTED TO. - * WE MIGHT BE IN A PROCESS TO RECEIVE, PREPARE, - * COMMIT OR COMPLETE AND OBVIOUSLY THIS IS NOT A DESIRED EVENT. - * WE WILL ALWAYS COMPLETE THE ABORT HANDLING BEFORE WE ALLOW - * ANYTHING TO HAPPEN ON THIS CONNECTION AGAIN. - * THUS THERE IS NO ACTION FROM THE API THAT CAN SPEED UP THIS PROCESS. - *---------------------------------------------------------------------*/ - TCKEY_abort(signal, 55); - return; - }//switch - - if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) { - ; - } else { - /*-----------------------------------------------------------------------*/ - /* THE API IS WORKING WITH AN OLD SCHEMA VERSION. IT NEEDS REPLACEMENT. */ - /* COULD ALSO BE THAT THE TABLE IS NOT DEFINED. */ - /*-----------------------------------------------------------------------*/ - TCKEY_abort(signal, 8); - return; - }//if - - //------------------------------------------------------------------------- - // Error Insertion for testing purposes. Test to see what happens when no - // more TC records available. - //------------------------------------------------------------------------- - if (ERROR_INSERTED(8032)) { - TCKEY_abort(signal, 3); - return; - }//if - - if (seizeTcRecord(signal) != 0) { - return; - }//if - - if (seizeCacheRecord(signal) != 0) { - return; - }//if - - TcConnectRecord * const regTcPtr = tcConnectptr.p; - CacheRecord * const regCachePtr = cachePtr.p; - - /* - INIT_TC_CONNECT_REC - ------------------------- - */ - /* ---------------------------------------------------------------------- */ - /* ------- INIT OPERATION RECORD WITH SIGNAL DATA AND RNILS ------- */ - /* */ - /* ---------------------------------------------------------------------- */ - - UintR TapiVersionNo = TcKeyReq::getAPIVersion(tcKeyReq->attrLen); - UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec; - regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec + 1; - regCachePtr->apiVersionNo = TapiVersionNo; - - UintR TapiConnectptrIndex = apiConnectptr.i; - UintR TsenderData = tcKeyReq->senderData; - UintR TattrLen = TcKeyReq::getAttrinfoLen(tcKeyReq->attrLen); - UintR TattrinfoCount = c_counters.cattrinfoCount; - - regTcPtr->apiConnect = TapiConnectptrIndex; - regTcPtr->clientData = TsenderData; - regTcPtr->commitAckMarker = RNIL; - regTcPtr->isIndexOp = isIndexOp; - regTcPtr->indexOp = regApiPtr->executingIndexOp; - regTcPtr->savePointId = regApiPtr->currSavePointId; - regApiPtr->executingIndexOp = RNIL; - - regApiPtr->singleUserMode |= 1 << localTabptr.p->singleUserMode; - - if (TcKeyReq::getExecutingTrigger(Treqinfo)) { - // Save the TcOperationPtr for fireing operation - regTcPtr->triggeringOperation = TsenderData; - } - - if (TexecFlag){ - Uint32 currSPId = regApiPtr->currSavePointId; - regApiPtr->currSavePointId = ++currSPId; - } - - regCachePtr->attrlength = TattrLen; - c_counters.cattrinfoCount = TattrinfoCount + TattrLen; - - UintR TtabptrIndex = localTabptr.i; - UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion; - Uint8 TOperationType = TcKeyReq::getOperationType(Treqinfo); - regCachePtr->tableref = TtabptrIndex; - regCachePtr->schemaVersion = TtableSchemaVersion; - regTcPtr->operation = TOperationType; - - Uint8 TSimpleFlag = TcKeyReq::getSimpleFlag(Treqinfo); - Uint8 TDirtyFlag = TcKeyReq::getDirtyFlag(Treqinfo); - Uint8 TInterpretedFlag = TcKeyReq::getInterpretedFlag(Treqinfo); - Uint8 TDistrKeyFlag = TcKeyReq::getDistributionKeyFlag(Treqinfo); - Uint8 TNoDiskFlag = TcKeyReq::getNoDiskFlag(Treqinfo); - Uint8 TexecuteFlag = TexecFlag; - - regTcPtr->dirtyOp = TDirtyFlag; - regTcPtr->opSimple = TSimpleFlag; - regCachePtr->opExec = TInterpretedFlag; - regCachePtr->distributionKeyIndicator = TDistrKeyFlag; - regCachePtr->m_no_disk_flag = TNoDiskFlag; - - //------------------------------------------------------------- - // The next step is to read the upto three conditional words. - //------------------------------------------------------------- - Uint32 TkeyIndex; - Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo; - { - Uint32 TDistrGHIndex = TcKeyReq::getScanIndFlag(Treqinfo); - Uint32 TDistrKeyIndex = TDistrGHIndex; - - Uint32 TscanInfo = TcKeyReq::getTakeOverScanInfo(TOptionalDataPtr[0]); - - regCachePtr->scanTakeOverInd = TDistrGHIndex; - regCachePtr->scanInfo = TscanInfo; - - regCachePtr->distributionKey = TOptionalDataPtr[TDistrKeyIndex]; - - TkeyIndex = TDistrKeyIndex + TDistrKeyFlag; - } - Uint32* TkeyDataPtr = &TOptionalDataPtr[TkeyIndex]; - - UintR Tdata1 = TkeyDataPtr[0]; - UintR Tdata2 = TkeyDataPtr[1]; - UintR Tdata3 = TkeyDataPtr[2]; - UintR Tdata4 = TkeyDataPtr[3]; - UintR Tdata5; - - regCachePtr->keydata[0] = Tdata1; - regCachePtr->keydata[1] = Tdata2; - regCachePtr->keydata[2] = Tdata3; - regCachePtr->keydata[3] = Tdata4; - - TkeyLength = TcKeyReq::getKeyLength(Treqinfo); - Uint32 TAIDataIndex; - if (TkeyLength > 8) { - TAIDataIndex = TkeyIndex + 8; - } else { - if (TkeyLength == 0) { - TCKEY_abort(signal, 4); - return; - }//if - TAIDataIndex = TkeyIndex + TkeyLength; - }//if - Uint32* TAIDataPtr = &TOptionalDataPtr[TAIDataIndex]; - - titcLenAiInTckeyreq = TcKeyReq::getAIInTcKeyReq(Treqinfo); - regCachePtr->keylen = TkeyLength; - regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq; - regCachePtr->currReclenAi = titcLenAiInTckeyreq; - regCachePtr->m_special_hash = - localTabptr.p->hasCharAttr | (localTabptr.p->noOfDistrKeys > 0); - Tdata1 = TAIDataPtr[0]; - Tdata2 = TAIDataPtr[1]; - Tdata3 = TAIDataPtr[2]; - Tdata4 = TAIDataPtr[3]; - Tdata5 = TAIDataPtr[4]; - - regCachePtr->attrinfo0 = Tdata1; - regCachePtr->attrinfo15[0] = Tdata2; - regCachePtr->attrinfo15[1] = Tdata3; - regCachePtr->attrinfo15[2] = Tdata4; - regCachePtr->attrinfo15[3] = Tdata5; - - if (TOperationType == ZREAD || TOperationType == ZREAD_EX) { - Uint32 TreadCount = c_counters.creadCount; - jam(); - c_counters.creadCount = TreadCount + 1; - } else { - if(regApiPtr->commitAckMarker == RNIL){ - jam(); - CommitAckMarkerPtr tmp; - if(!m_commitAckMarkerHash.seize(tmp)){ - TCKEY_abort(signal, 56); - return; - } else { - regTcPtr->commitAckMarker = tmp.i; - regApiPtr->commitAckMarker = tmp.i; - tmp.p->transid1 = tcKeyReq->transId1; - tmp.p->transid2 = tcKeyReq->transId2; - tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref); - tmp.p->apiConnectPtr = TapiIndex; - tmp.p->noOfLqhs = 0; -#if defined VM_TRACE || defined ERROR_INSERT - { - CommitAckMarkerPtr check; - ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); - } -#endif - m_commitAckMarkerHash.add(tmp); - } - } - - UintR TwriteCount = c_counters.cwriteCount; - UintR Toperationsize = coperationsize; - /* -------------------------------------------------------------------- - * THIS IS A TEMPORARY TABLE, DON'T UPDATE coperationsize. - * THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND - * TEMP TABLES DON'T PARTICIPATE. - * -------------------------------------------------------------------- */ - if (localTabptr.p->get_storedTable()) { - coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17; - } - c_counters.cwriteCount = TwriteCount + 1; - switch (TOperationType) { - case ZUPDATE: - case ZINSERT: - case ZDELETE: - case ZWRITE: - jam(); - break; - default: - TCKEY_abort(signal, 9); - return; - }//switch - }//if - - Uint32 TabortOption = TcKeyReq::getAbortOption(Treqinfo); - regTcPtr->m_execAbortOption = TabortOption; - - /*------------------------------------------------------------------------- - * Check error handling per operation - * If CommitFlag is set state accordingly and check for early abort - *------------------------------------------------------------------------*/ - if (TcKeyReq::getCommitFlag(Treqinfo) == 1) { - ndbrequire(TexecuteFlag); - regApiPtr->apiConnectstate = CS_REC_COMMITTING; - } else { - /* --------------------------------------------------------------------- - * PREPARE TRANSACTION IS NOT IMPLEMENTED YET. - * --------------------------------------------------------------------- - * ELSIF (TREQINFO => 3) (*) 1 = 1 THEN - * IF PREPARE TRANSACTION THEN - * API_CONNECTPTR:API_CONNECTSTATE = REC_PREPARING - * SET STATE TO PREPARING - * --------------------------------------------------------------------- */ - if (regApiPtr->apiConnectstate == CS_START_COMMITTING) { - jam(); - // Trigger execution at commit - regApiPtr->apiConnectstate = CS_REC_COMMITTING; - } else { - jam(); - regApiPtr->apiConnectstate = CS_RECEIVING; - }//if - }//if - if (TkeyLength <= 4) { - tckeyreq050Lab(signal); - return; - } else { - if (cfirstfreeDatabuf != RNIL) { - jam(); - linkKeybuf(signal); - Tdata1 = TkeyDataPtr[4]; - Tdata2 = TkeyDataPtr[5]; - Tdata3 = TkeyDataPtr[6]; - Tdata4 = TkeyDataPtr[7]; - - DatabufRecord * const regDataPtr = databufptr.p; - regDataPtr->data[0] = Tdata1; - regDataPtr->data[1] = Tdata2; - regDataPtr->data[2] = Tdata3; - regDataPtr->data[3] = Tdata4; - } else { - jam(); - seizeDatabuferrorLab(signal); - return; - }//if - if (TkeyLength <= 8) { - jam(); - tckeyreq050Lab(signal); - return; - } else { - jam(); - /* -------------------------------------------------------------------- - * THE TCKEYREQ DIDN'T CONTAIN ALL KEY DATA, - * SAVE STATE AND WAIT FOR KEYINFO - * --------------------------------------------------------------------*/ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - regCachePtr->save1 = 8; - regTcPtr->tcConnectstate = OS_WAIT_KEYINFO; - return; - }//if - }//if - return; -}//Dbtc::execTCKEYREQ() - -void Dbtc::tckeyreq050Lab(Signal* signal) -{ - UintR tnoOfBackup; - UintR tnoOfStandby; - UintR tnodeinfo; - - terrorCode = 0; - - hash(signal); /* NOW IT IS TIME TO CALCULATE THE HASH VALUE*/ - - if (unlikely(terrorCode)) - { - releaseAtErrorLab(signal); - return; - } - - CacheRecord * const regCachePtr = cachePtr.p; - TcConnectRecord * const regTcPtr = tcConnectptr.p; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - - UintR TtcTimer = ctcTimer; - UintR ThashValue = thashValue; - UintR TdistrHashValue = tdistrHashValue; - UintR TdihConnectptr = regTcPtr->dihConnectptr; - UintR Ttableref = regCachePtr->tableref; - - TableRecordPtr localTabptr; - localTabptr.i = Ttableref; - localTabptr.p = &tableRecord[localTabptr.i]; - Uint32 schemaVersion = regCachePtr->schemaVersion; - if(localTabptr.p->checkTable(schemaVersion)){ - ; - } else { - terrorCode = localTabptr.p->getErrorCode(schemaVersion); - TCKEY_abort(signal, 58); - return; - } - - setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); - regCachePtr->hashValue = ThashValue; - - signal->theData[0] = TdihConnectptr; - signal->theData[1] = Ttableref; - signal->theData[2] = TdistrHashValue; - signal->theData[3] = 0; - signal->theData[4] = 0; - signal->theData[5] = 0; - signal->theData[6] = 0; - - /*-------------------------------------------------------------*/ - /* FOR EFFICIENCY REASONS WE AVOID THE SIGNAL SENDING HERE AND */ - /* PROCEED IMMEDIATELY TO DIH. IN MULTI-THREADED VERSIONS WE */ - /* HAVE TO INSERT A MUTEX ON DIH TO ENSURE PROPER OPERATION. */ - /* SINCE THIS SIGNAL AND DIVERIFYREQ ARE THE ONLY SIGNALS SENT */ - /* TO DIH IN TRAFFIC IT SHOULD BE OK (3% OF THE EXECUTION TIME */ - /* IS SPENT IN DIH AND EVEN LESS IN REPLICATED NDB. */ - /*-------------------------------------------------------------*/ - EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3); - UintR TerrorIndicator = signal->theData[0]; - jamEntry(); - if (TerrorIndicator != 0) { - execDIGETNODESREF(signal); - return; - } - - if(ERROR_INSERTED(8050) && signal->theData[3] != getOwnNodeId()) - { - ndbassert(false); - signal->theData[1] = 626; - execDIGETNODESREF(signal); - return; - } - - /****************>>*/ - /* DIGETNODESCONF >*/ - /* ***************>*/ - - UintR Tdata1 = signal->theData[1]; - UintR Tdata2 = signal->theData[2]; - UintR Tdata3 = signal->theData[3]; - UintR Tdata4 = signal->theData[4]; - UintR Tdata5 = signal->theData[5]; - UintR Tdata6 = signal->theData[6]; - - regCachePtr->fragmentid = Tdata1; - tnodeinfo = Tdata2; - - regTcPtr->tcNodedata[0] = Tdata3; - regTcPtr->tcNodedata[1] = Tdata4; - regTcPtr->tcNodedata[2] = Tdata5; - regTcPtr->tcNodedata[3] = Tdata6; - - Uint8 Toperation = regTcPtr->operation; - Uint8 Tdirty = regTcPtr->dirtyOp; - tnoOfBackup = tnodeinfo & 3; - tnoOfStandby = (tnodeinfo >> 8) & 3; - - regCachePtr->fragmentDistributionKey = (tnodeinfo >> 16) & 255; - if (Toperation == ZREAD || Toperation == ZREAD_EX) { - if (Tdirty == 1) { - jam(); - /*-------------------------------------------------------------*/ - /* A SIMPLE READ CAN SELECT ANY OF THE PRIMARY AND */ - /* BACKUP NODES TO READ. WE WILL TRY TO SELECT THIS */ - /* NODE IF POSSIBLE TO AVOID UNNECESSARY COMMUNICATION */ - /* WITH SIMPLE READS. */ - /*-------------------------------------------------------------*/ - arrGuard(tnoOfBackup, MAX_REPLICAS); - UintR Tindex; - UintR TownNode = cownNodeid; - for (Tindex = 1; Tindex <= tnoOfBackup; Tindex++) { - UintR Tnode = regTcPtr->tcNodedata[Tindex]; - jam(); - if (Tnode == TownNode) { - jam(); - regTcPtr->tcNodedata[0] = Tnode; - }//if - }//for - if(ERROR_INSERTED(8048) || ERROR_INSERTED(8049)) - { - for (Tindex = 0; Tindex <= tnoOfBackup; Tindex++) - { - UintR Tnode = regTcPtr->tcNodedata[Tindex]; - jam(); - if (Tnode != TownNode) { - jam(); - regTcPtr->tcNodedata[0] = Tnode; - ndbout_c("Choosing %d", Tnode); - }//if - }//for - } - }//if - jam(); - regTcPtr->lastReplicaNo = 0; - regTcPtr->noOfNodes = 1; - } else { - UintR TlastReplicaNo; - jam(); - TlastReplicaNo = tnoOfBackup + tnoOfStandby; - regTcPtr->lastReplicaNo = (Uint8)TlastReplicaNo; - regTcPtr->noOfNodes = (Uint8)(TlastReplicaNo + 1); - }//if - if (regCachePtr->lenAiInTckeyreq == regCachePtr->attrlength) { - /****************************************************************>*/ - /* HERE WE HAVE FOUND THAT THE LAST SIGNAL BELONGING TO THIS */ - /* OPERATION HAVE BEEN RECEIVED. THIS MEANS THAT WE CAN NOW REUSE */ - /* THE API CONNECT RECORD. HOWEVER IF PREPARE OR COMMIT HAVE BEEN */ - /* RECEIVED THEN IT IS NOT ALLOWED TO RECEIVE ANY FURTHER */ - /* OPERATIONS. WE KNOW THAT WE WILL WAIT FOR DICT NEXT. IT IS NOT */ - /* POSSIBLE FOR THE TC CONNECTION TO BE READY YET. */ - /****************************************************************>*/ - switch (regApiPtr->apiConnectstate) { - case CS_RECEIVING: - jam(); - regApiPtr->apiConnectstate = CS_STARTED; - break; - case CS_REC_COMMITTING: - jam(); - regApiPtr->apiConnectstate = CS_START_COMMITTING; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch - attrinfoDihReceivedLab(signal); - return; - } else { - if (regCachePtr->lenAiInTckeyreq < regCachePtr->attrlength) { - TtcTimer = ctcTimer; - jam(); - setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); - regTcPtr->tcConnectstate = OS_WAIT_ATTR; - return; - } else { - TCKEY_abort(signal, 11); - return; - }//if - }//if - return; -}//Dbtc::tckeyreq050Lab() - -void Dbtc::attrinfoDihReceivedLab(Signal* signal) -{ - CacheRecord * const regCachePtr = cachePtr.p; - TcConnectRecord * const regTcPtr = tcConnectptr.p; - Uint16 Tnode = regTcPtr->tcNodedata[0]; - - TableRecordPtr localTabptr; - localTabptr.i = regCachePtr->tableref; - localTabptr.p = &tableRecord[localTabptr.i]; - - if(localTabptr.p->checkTable(regCachePtr->schemaVersion)){ - ; - } else { - terrorCode = localTabptr.p->getErrorCode(regCachePtr->schemaVersion); - TCKEY_abort(signal, 58); - return; - } - arrGuard(Tnode, MAX_NDB_NODES); - packLqhkeyreq(signal, calcLqhBlockRef(Tnode)); -}//Dbtc::attrinfoDihReceivedLab() - -void Dbtc::packLqhkeyreq(Signal* signal, - BlockReference TBRef) -{ - CacheRecord * const regCachePtr = cachePtr.p; - UintR Tkeylen = regCachePtr->keylen; - UintR TfirstAttrbuf = regCachePtr->firstAttrbuf; - sendlqhkeyreq(signal, TBRef); - if (Tkeylen > 4) { - packKeyData000Lab(signal, TBRef, Tkeylen - 4); - releaseKeys(); - }//if - packLqhkeyreq040Lab(signal, - TfirstAttrbuf, - TBRef); -}//Dbtc::packLqhkeyreq() - -void Dbtc::sendlqhkeyreq(Signal* signal, - BlockReference TBRef) -{ - UintR tslrAttrLen; - UintR Tdata10; - TcConnectRecord * const regTcPtr = tcConnectptr.p; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - CacheRecord * const regCachePtr = cachePtr.p; - Uint32 version = getNodeInfo(refToNode(TBRef)).m_version; - UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6; -#ifdef ERROR_INSERT - if (ERROR_INSERTED(8002)) { - systemErrorLab(signal, __LINE__); - }//if - if (ERROR_INSERTED(8007)) { - if (apiConnectptr.p->apiConnectstate == CS_STARTED) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8008)) { - if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8009)) { - if (apiConnectptr.p->apiConnectstate == CS_STARTED) { - return; - }//if - }//if - if (ERROR_INSERTED(8010)) { - if (apiConnectptr.p->apiConnectstate == CS_START_COMMITTING) { - return; - }//if - }//if -#endif - - tslrAttrLen = 0; - LqhKeyReq::setAttrLen(tslrAttrLen, regCachePtr->attrlength); - /* ---------------------------------------------------------------------- */ - // Bit16 == 0 since StoredProcedures are not yet supported. - /* ---------------------------------------------------------------------- */ - LqhKeyReq::setDistributionKey(tslrAttrLen, regCachePtr->fragmentDistributionKey); - LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd); - - Tdata10 = 0; - sig0 = regTcPtr->opSimple; - sig1 = regTcPtr->operation; - sig2 = regTcPtr->dirtyOp; - bool dirtyRead = (sig1 == ZREAD && sig2 == ZTRUE); - LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen); - LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo); - if (unlikely(version < NDBD_ROWID_VERSION)) - { - Uint32 op = regTcPtr->operation; - Uint32 lock = (Operation_t) op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op; - LqhKeyReq::setLockType(Tdata10, lock); - } - /* ---------------------------------------------------------------------- */ - // Indicate Application Reference is present in bit 15 - /* ---------------------------------------------------------------------- */ - LqhKeyReq::setApplicationAddressFlag(Tdata10, 1); - LqhKeyReq::setDirtyFlag(Tdata10, sig2); - LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec); - LqhKeyReq::setSimpleFlag(Tdata10, sig0); - LqhKeyReq::setOperation(Tdata10, sig1); - LqhKeyReq::setNoDiskFlag(Tdata10, regCachePtr->m_no_disk_flag); - - /* ----------------------------------------------------------------------- - * Sequential Number of first LQH = 0, bit 22-23 - * IF ATTRIBUTE INFORMATION IS SENT IN TCKEYREQ, - * IT IS ALSO SENT IN LQHKEYREQ - * ----------------------------------------------------------------------- */ - LqhKeyReq::setAIInLqhKeyReq(Tdata10, regCachePtr->lenAiInTckeyreq); - /* ----------------------------------------------------------------------- - * Bit 27 == 0 since TC record is the same as the client record. - * Bit 28 == 0 since readLenAi can only be set after reading in LQH. - * ----------------------------------------------------------------------- */ - //LqhKeyReq::setAPIVersion(Tdata10, regCachePtr->apiVersionNo); - Uint32 commitAckMarker = regTcPtr->commitAckMarker; - const Uint32 noOfLqhs = regTcPtr->noOfNodes; - if(commitAckMarker != RNIL){ - jam(); - LqhKeyReq::setMarkerFlag(Tdata10, 1); - - CommitAckMarker * tmp = m_commitAckMarkerHash.getPtr(commitAckMarker); - - /** - * Populate LQH array - */ - tmp->noOfLqhs = noOfLqhs; - for(Uint32 i = 0; ilqhNodeId[i] = regTcPtr->tcNodedata[i]; - } - } - - /* ************************************************************> */ - /* NO READ LENGTH SENT FROM TC. SEQUENTIAL NUMBER IS 1 AND IT */ - /* IS SENT TO A PRIMARY NODE. */ - /* ************************************************************> */ - - LqhKeyReq * const lqhKeyReq = (LqhKeyReq *)signal->getDataPtrSend(); - - sig0 = tcConnectptr.i; - sig2 = regCachePtr->hashValue; - sig4 = cownref; - sig5 = regTcPtr->savePointId; - - lqhKeyReq->clientConnectPtr = sig0; - lqhKeyReq->attrLen = tslrAttrLen; - lqhKeyReq->hashValue = sig2; - lqhKeyReq->requestInfo = Tdata10; - lqhKeyReq->tcBlockref = sig4; - lqhKeyReq->savePointId = sig5; - - sig0 = regCachePtr->tableref + ((regCachePtr->schemaVersion << 16) & 0xFFFF0000); - sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16); - sig2 = regApiPtr->transid[0]; - sig3 = regApiPtr->transid[1]; - sig4 = (regTcPtr->isIndexOp == 2) ? reference() : regApiPtr->ndbapiBlockref; - sig5 = regTcPtr->clientData; - sig6 = regCachePtr->scanInfo; - - if (! dirtyRead) - { - regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[0]); - regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[1]); - regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[2]); - regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[3]); - } - - lqhKeyReq->tableSchemaVersion = sig0; - lqhKeyReq->fragmentData = sig1; - lqhKeyReq->transId1 = sig2; - lqhKeyReq->transId2 = sig3; - lqhKeyReq->scanInfo = sig6; - - lqhKeyReq->variableData[0] = sig4; - lqhKeyReq->variableData[1] = sig5; - - UintR nextPos = 2; - - if (regTcPtr->lastReplicaNo > 1) { - sig0 = (UintR)regTcPtr->tcNodedata[2] + - (UintR)(regTcPtr->tcNodedata[3] << 16); - lqhKeyReq->variableData[nextPos] = sig0; - nextPos++; - }//if - - sig0 = regCachePtr->keydata[0]; - sig1 = regCachePtr->keydata[1]; - sig2 = regCachePtr->keydata[2]; - sig3 = regCachePtr->keydata[3]; - UintR Tkeylen = regCachePtr->keylen; - - lqhKeyReq->variableData[nextPos + 0] = sig0; - lqhKeyReq->variableData[nextPos + 1] = sig1; - lqhKeyReq->variableData[nextPos + 2] = sig2; - lqhKeyReq->variableData[nextPos + 3] = sig3; - - if (Tkeylen < 4) { - nextPos += Tkeylen; - } else { - nextPos += 4; - }//if - - sig0 = regCachePtr->attrinfo0; - sig1 = regCachePtr->attrinfo15[0]; - sig2 = regCachePtr->attrinfo15[1]; - sig3 = regCachePtr->attrinfo15[2]; - sig4 = regCachePtr->attrinfo15[3]; - UintR TlenAi = regCachePtr->lenAiInTckeyreq; - - lqhKeyReq->variableData[nextPos + 0] = sig0; - lqhKeyReq->variableData[nextPos + 1] = sig1; - lqhKeyReq->variableData[nextPos + 2] = sig2; - lqhKeyReq->variableData[nextPos + 3] = sig3; - lqhKeyReq->variableData[nextPos + 4] = sig4; - - nextPos += TlenAi; - - // Reset trigger count - regTcPtr->accumulatingTriggerData.i = RNIL; - regTcPtr->accumulatingTriggerData.p = NULL; - regTcPtr->noFiredTriggers = 0; - regTcPtr->triggerExecutionCount = 0; - - sendSignal(TBRef, GSN_LQHKEYREQ, signal, - nextPos + LqhKeyReq::FixedSignalLength, JBB); -}//Dbtc::sendlqhkeyreq() - -void Dbtc::packLqhkeyreq040Lab(Signal* signal, - UintR anAttrBufIndex, - BlockReference TBRef) -{ - TcConnectRecord * const regTcPtr = tcConnectptr.p; -#ifdef ERROR_INSERT - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - if (ERROR_INSERTED(8009)) { - if (regApiPtr->apiConnectstate == CS_STARTED) { - attrbufptr.i = RNIL; - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8010)) { - if (regApiPtr->apiConnectstate == CS_START_COMMITTING) { - attrbufptr.i = RNIL; - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if -#endif - - UintR TattrbufFilesize = cattrbufFilesize; - AttrbufRecord *localAttrbufRecord = attrbufRecord; - while (1) { - if (anAttrBufIndex == RNIL) { - UintR TtcTimer = ctcTimer; - UintR Tread = (regTcPtr->operation == ZREAD); - UintR Tdirty = (regTcPtr->dirtyOp == ZTRUE); - UintR Tboth = Tread & Tdirty; - setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); - jam(); - /*-------------------------------------------------------------------- - * WE HAVE SENT ALL THE SIGNALS OF THIS OPERATION. SET STATE AND EXIT. - *---------------------------------------------------------------------*/ - releaseAttrinfo(); - if (Tboth) { - jam(); - releaseDirtyRead(signal, apiConnectptr, tcConnectptr.p); - return; - }//if - regTcPtr->tcConnectstate = OS_OPERATING; - return; - }//if - if (anAttrBufIndex < TattrbufFilesize) { - AttrbufRecord * const regAttrPtr = &localAttrbufRecord[anAttrBufIndex]; - anAttrBufIndex = regAttrPtr->attrbuf[ZINBUF_NEXT]; - sendAttrinfo(signal, - tcConnectptr.i, - regAttrPtr, - TBRef); - } else { - TCKEY_abort(signal, 17); - return; - }//if - }//while -}//Dbtc::packLqhkeyreq040Lab() - -/* ========================================================================= */ -/* ------- RELEASE ALL ATTRINFO RECORDS IN AN OPERATION RECORD ------- */ -/* ========================================================================= */ -void Dbtc::releaseAttrinfo() -{ - UintR Tmp; - AttrbufRecordPtr Tattrbufptr; - CacheRecord * const regCachePtr = cachePtr.p; - UintR TattrbufFilesize = cattrbufFilesize; - UintR TfirstfreeAttrbuf = cfirstfreeAttrbuf; - Tattrbufptr.i = regCachePtr->firstAttrbuf; - AttrbufRecord *localAttrbufRecord = attrbufRecord; - - while (Tattrbufptr.i < TattrbufFilesize) { - Tattrbufptr.p = &localAttrbufRecord[Tattrbufptr.i]; - Tmp = Tattrbufptr.p->attrbuf[ZINBUF_NEXT]; - Tattrbufptr.p->attrbuf[ZINBUF_NEXT] = TfirstfreeAttrbuf; - TfirstfreeAttrbuf = Tattrbufptr.i; - Tattrbufptr.i = Tmp; - jam(); - }//while - if (Tattrbufptr.i == RNIL) { -//--------------------------------------------------- -// Now we will release the cache record at the same -// time as releasing the attrinfo records. -//--------------------------------------------------- - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - UintR TfirstfreeCacheRec = cfirstfreeCacheRec; - UintR TCacheIndex = cachePtr.i; - cfirstfreeAttrbuf = TfirstfreeAttrbuf; - regCachePtr->nextCacheRec = TfirstfreeCacheRec; - cfirstfreeCacheRec = TCacheIndex; - regApiPtr->cachePtr = RNIL; - return; - }//if - systemErrorLab(0, __LINE__); - return; -}//Dbtc::releaseAttrinfo() - -/* ========================================================================= */ -/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY OPERATION ------- */ -/* ========================================================================= */ -void Dbtc::releaseDirtyRead(Signal* signal, - ApiConnectRecordPtr regApiPtr, - TcConnectRecord* regTcPtr) -{ - Uint32 Ttckeyrec = regApiPtr.p->tckeyrec; - Uint32 TclientData = regTcPtr->clientData; - Uint32 Tnode = regTcPtr->tcNodedata[0]; - Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; - Uint32 TsimpleReadCount = c_counters.csimpleReadCount; - ConnectionState state = regApiPtr.p->apiConnectstate; - - regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData; - regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::DirtyReadBit | Tnode; - regApiPtr.p->tckeyrec = Ttckeyrec + 2; - - unlinkReadyTcCon(signal); - releaseTcCon(); - - /** - * No LQHKEYCONF in Simple/Dirty read - * Therefore decrese no LQHKEYCONF(REF) we are waiting for - */ - c_counters.csimpleReadCount = TsimpleReadCount + 1; - regApiPtr.p->lqhkeyreqrec = --Tlqhkeyreqrec; - - if(Tlqhkeyreqrec == 0) - { - /** - * Special case of lqhKeyConf_checkTransactionState: - * - commit with zero operations: handle only for simple read - */ - sendtckeyconf(signal, state == CS_START_COMMITTING); - regApiPtr.p->apiConnectstate = - (state == CS_START_COMMITTING ? CS_CONNECTED : state); - setApiConTimer(regApiPtr.i, 0, __LINE__); - - return; - } - - /** - * Emulate LQHKEYCONF - */ - lqhKeyConf_checkTransactionState(signal, regApiPtr); -}//Dbtc::releaseDirtyRead() - -/* ------------------------------------------------------------------------- */ -/* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */ -/* ------------------------------------------------------------------------- */ -void Dbtc::unlinkReadyTcCon(Signal* signal) -{ - TcConnectRecordPtr urtTcConnectptr; - - TcConnectRecord * const regTcPtr = tcConnectptr.p; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - UintR TtcConnectFilesize = ctcConnectFilesize; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - if (regTcPtr->prevTcConnect != RNIL) { - jam(); - urtTcConnectptr.i = regTcPtr->prevTcConnect; - ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - urtTcConnectptr.p->nextTcConnect = regTcPtr->nextTcConnect; - } else { - jam(); - regApiPtr->firstTcConnect = regTcPtr->nextTcConnect; - }//if - if (regTcPtr->nextTcConnect != RNIL) { - jam(); - urtTcConnectptr.i = regTcPtr->nextTcConnect; - ptrCheckGuard(urtTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - urtTcConnectptr.p->prevTcConnect = regTcPtr->prevTcConnect; - } else { - jam(); - regApiPtr->lastTcConnect = tcConnectptr.p->prevTcConnect; - }//if -}//Dbtc::unlinkReadyTcCon() - -void Dbtc::releaseTcCon() -{ - TcConnectRecord * const regTcPtr = tcConnectptr.p; - UintR TfirstfreeTcConnect = cfirstfreeTcConnect; - UintR TconcurrentOp = c_counters.cconcurrentOp; - UintR TtcConnectptrIndex = tcConnectptr.i; - - regTcPtr->tcConnectstate = OS_CONNECTED; - regTcPtr->nextTcConnect = TfirstfreeTcConnect; - regTcPtr->apiConnect = RNIL; - regTcPtr->isIndexOp = false; - regTcPtr->indexOp = RNIL; - cfirstfreeTcConnect = TtcConnectptrIndex; - c_counters.cconcurrentOp = TconcurrentOp - 1; -}//Dbtc::releaseTcCon() - -void Dbtc::execPACKED_SIGNAL(Signal* signal) -{ - LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr(); - - UintR Ti; - UintR Tstep = 0; - UintR Tlength; - UintR TpackedData[28]; - UintR Tdata1, Tdata2, Tdata3, Tdata4; - - jamEntry(); - Tlength = signal->length(); - if (Tlength > 25) { - jam(); - systemErrorLab(signal, __LINE__); - return; - }//if - Uint32* TpackDataPtr; - for (Ti = 0; Ti < Tlength; Ti += 4) { - Uint32* TsigDataPtr = &signal->theData[Ti]; - Tdata1 = TsigDataPtr[0]; - Tdata2 = TsigDataPtr[1]; - Tdata3 = TsigDataPtr[2]; - Tdata4 = TsigDataPtr[3]; - - TpackDataPtr = &TpackedData[Ti]; - TpackDataPtr[0] = Tdata1; - TpackDataPtr[1] = Tdata2; - TpackDataPtr[2] = Tdata3; - TpackDataPtr[3] = Tdata4; - }//for - while (Tlength > Tstep) { - - TpackDataPtr = &TpackedData[Tstep]; - Tdata1 = TpackDataPtr[0]; - Tdata2 = TpackDataPtr[1]; - Tdata3 = TpackDataPtr[2]; - - lqhKeyConf->connectPtr = Tdata1 & 0x0FFFFFFF; - lqhKeyConf->opPtr = Tdata2; - lqhKeyConf->userRef = Tdata3; - - switch (Tdata1 >> 28) { - case ZCOMMITTED: - signal->header.theLength = 3; - execCOMMITTED(signal); - Tstep += 3; - break; - case ZCOMPLETED: - signal->header.theLength = 3; - execCOMPLETED(signal); - Tstep += 3; - break; - case ZLQHKEYCONF: - jam(); - Tdata1 = TpackDataPtr[3]; - Tdata2 = TpackDataPtr[4]; - Tdata3 = TpackDataPtr[5]; - Tdata4 = TpackDataPtr[6]; - - lqhKeyConf->readLen = Tdata1; - lqhKeyConf->transId1 = Tdata2; - lqhKeyConf->transId2 = Tdata3; - lqhKeyConf->noFiredTriggers = Tdata4; - signal->header.theLength = LqhKeyConf::SignalLength; - execLQHKEYCONF(signal); - Tstep += LqhKeyConf::SignalLength; - break; - default: - systemErrorLab(signal, __LINE__); - return; - }//switch - }//while - return; -}//Dbtc::execPACKED_SIGNAL() - -void Dbtc::execLQHKEYCONF(Signal* signal) -{ - const LqhKeyConf * const lqhKeyConf = (LqhKeyConf *)signal->getDataPtr(); - UintR compare_transid1, compare_transid2; - BlockReference tlastLqhBlockref; - UintR tlastLqhConnect; - UintR treadlenAi; - UintR TtcConnectptrIndex; - UintR TtcConnectFilesize = ctcConnectFilesize; - - tlastLqhConnect = lqhKeyConf->connectPtr; - TtcConnectptrIndex = lqhKeyConf->opPtr; - tlastLqhBlockref = lqhKeyConf->userRef; - treadlenAi = lqhKeyConf->readLen; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - - /*------------------------------------------------------------------------ - * NUMBER OF EXTERNAL TRIGGERS FIRED IN DATA[6] - * OPERATION IS NOW COMPLETED. CHECK FOR CORRECT OPERATION POINTER - * TO ENSURE NO CRASHES BECAUSE OF ERRONEUS NODES. CHECK STATE OF - * OPERATION. THEN SET OPERATION STATE AND RETRIEVE ALL POINTERS - * OF THIS OPERATION. PUT COMPLETED OPERATION IN LIST OF COMPLETED - * OPERATIONS ON THE LQH CONNECT RECORD. - *------------------------------------------------------------------------ - * THIS SIGNAL ALWAYS ARRIVE BEFORE THE ABORTED SIGNAL ARRIVES SINCE IT USES - * THE SAME PATH BACK TO TC AS THE ABORTED SIGNAL DO. WE DO HOWEVER HAVE A - * PROBLEM WHEN WE ENCOUNTER A TIME-OUT WAITING FOR THE ABORTED SIGNAL. - * THEN THIS SIGNAL MIGHT ARRIVE WHEN THE TC CONNECT RECORD HAVE BEEN REUSED - * BY OTHER TRANSACTION THUS WE CHECK THE TRANSACTION ID OF THE SIGNAL - * BEFORE ACCEPTING THIS SIGNAL. - * Due to packing of LQHKEYCONF the ABORTED signal can now arrive before - * this. - * This is more reason to ignore the signal if not all states are correct. - *------------------------------------------------------------------------*/ - if (TtcConnectptrIndex >= TtcConnectFilesize) { - TCKEY_abort(signal, 25); - return; - }//if - TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex]; - OperationState TtcConnectstate = regTcPtr->tcConnectstate; - tcConnectptr.i = TtcConnectptrIndex; - tcConnectptr.p = regTcPtr; - if (TtcConnectstate != OS_OPERATING) { - warningReport(signal, 23); - return; - }//if - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - UintR TapiConnectptrIndex = regTcPtr->apiConnect; - UintR TapiConnectFilesize = capiConnectFilesize; - UintR Ttrans1 = lqhKeyConf->transId1; - UintR Ttrans2 = lqhKeyConf->transId2; - Uint32 noFired = lqhKeyConf->noFiredTriggers; - - if (TapiConnectptrIndex >= TapiConnectFilesize) { - TCKEY_abort(signal, 29); - return; - }//if - Ptr regApiPtr; - regApiPtr.i = TapiConnectptrIndex; - regApiPtr.p = &localApiConnectRecord[TapiConnectptrIndex]; - apiConnectptr.i = TapiConnectptrIndex; - apiConnectptr.p = regApiPtr.p; - compare_transid1 = regApiPtr.p->transid[0] ^ Ttrans1; - compare_transid2 = regApiPtr.p->transid[1] ^ Ttrans2; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - warningReport(signal, 24); - return; - }//if - -#ifdef ERROR_INSERT - if (ERROR_INSERTED(8029)) { - systemErrorLab(signal, __LINE__); - }//if - if (ERROR_INSERTED(8003)) { - if (regApiPtr.p->apiConnectstate == CS_STARTED) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8004)) { - if (regApiPtr.p->apiConnectstate == CS_RECEIVING) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8005)) { - if (regApiPtr.p->apiConnectstate == CS_REC_COMMITTING) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8006)) { - if (regApiPtr.p->apiConnectstate == CS_START_COMMITTING) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - }//if - if (ERROR_INSERTED(8023)) { - SET_ERROR_INSERT_VALUE(8024); - return; - }//if -#endif - UintR TtcTimer = ctcTimer; - regTcPtr->lastLqhCon = tlastLqhConnect; - regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref); - regTcPtr->noFiredTriggers = noFired; - - UintR Ttckeyrec = (UintR)regApiPtr.p->tckeyrec; - UintR TclientData = regTcPtr->clientData; - UintR TdirtyOp = regTcPtr->dirtyOp; - Uint32 TopSimple = regTcPtr->opSimple; - Uint32 Toperation = regTcPtr->operation; - ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate; - if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) { - TCKEY_abort(signal, 30); - return; - } - if (TapiConnectstate == CS_ABORTING) { - warningReport(signal, 27); - return; - }//if - - setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__); - - if (regTcPtr->isIndexOp) { - jam(); - // This was an internal TCKEYREQ - // will be returned unpacked - regTcPtr->attrInfoLen = treadlenAi; - } else { - if (noFired == 0 && regTcPtr->triggeringOperation == RNIL) { - jam(); - /* - * Skip counting triggering operations the first round - * since they will enter execLQHKEYCONF a second time - * Skip counting internally generated TcKeyReq - */ - regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData; - regApiPtr.p->tcSendArray[Ttckeyrec + 1] = treadlenAi; - regApiPtr.p->tckeyrec = Ttckeyrec + 2; - }//if - }//if - if (TdirtyOp == ZTRUE) - { - UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; - jam(); - releaseDirtyWrite(signal); - regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1; - } - else if (Toperation == ZREAD && TopSimple) - { - UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; - jam(); - unlinkReadyTcCon(signal); - releaseTcCon(); - regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1; - } - else - { - jam(); - if (noFired == 0) { - jam(); - // No triggers to execute - UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec; - regApiPtr.p->lqhkeyconfrec = Tlqhkeyconfrec + 1; - regTcPtr->tcConnectstate = OS_PREPARED; - } - }//if - - /** - * And now decide what to do next - */ - if (regTcPtr->triggeringOperation != RNIL) { - jam(); - // This operation was created by a trigger execting operation - // Restart it if we have executed all it's triggers - TcConnectRecordPtr opPtr; - - opPtr.i = regTcPtr->triggeringOperation; - ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord); - opPtr.p->triggerExecutionCount--; - if (opPtr.p->triggerExecutionCount == 0) { - /* - We have completed current trigger execution - Continue triggering operation - */ - jam(); - continueTriggeringOp(signal, opPtr.p); - } - } else if (noFired == 0) { - // This operation did not fire any triggers, finish operation - jam(); - if (regTcPtr->isIndexOp) { - jam(); - setupIndexOpReturn(regApiPtr.p, regTcPtr); - } - lqhKeyConf_checkTransactionState(signal, regApiPtr); - } else { - // We have fired triggers - jam(); - saveTriggeringOpState(signal, regTcPtr); - if (regTcPtr->noReceivedTriggers == noFired) - { - // We have received all data - jam(); - executeTriggers(signal, ®ApiPtr); - } - // else wait for more trigger data - } -}//Dbtc::execLQHKEYCONF() - -void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr, - TcConnectRecord* regTcPtr) -{ - regApiPtr->indexOpReturn = true; - regApiPtr->indexOp = regTcPtr->indexOp; - regApiPtr->clientData = regTcPtr->clientData; - regApiPtr->attrInfoLen = regTcPtr->attrInfoLen; -} - -/** - * lqhKeyConf_checkTransactionState - * - * This functions checks state variables, and - * decides if it should wait for more LQHKEYCONF signals - * or if it should start commiting - */ -void -Dbtc::lqhKeyConf_checkTransactionState(Signal * signal, - Ptr regApiPtr) -{ -/*---------------------------------------------------------------*/ -/* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */ -/* SEND TCKEYCONF FOR ALL OPERATIONS EXCEPT THE LAST ONE. WHEN */ -/* THE TRANSACTION THEN IS COMMITTED TCKEYCONF IS SENT FOR THE */ -/* WHOLE TRANSACTION */ -/* IF THE COMMIT FLAG IS NOT RECECIVED DBTC WILL SEND TCKEYCONF */ -/* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */ -/* TRANSACTION */ -/*---------------------------------------------------------------*/ - ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate; - UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec; - UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec; - int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec; - - switch (TapiConnectstate) { - case CS_START_COMMITTING: - if (TnoOfOutStanding == 0) { - jam(); - diverify010Lab(signal); - return; - } else if (TnoOfOutStanding > 0) { - if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) { - jam(); - sendtckeyconf(signal, 0); - return; - } else if (regApiPtr.p->indexOpReturn) { - jam(); - sendtckeyconf(signal, 0); - return; - }//if - jam(); - return; - } else { - TCKEY_abort(signal, 44); - return; - }//if - return; - case CS_STARTED: - case CS_RECEIVING: - if (TnoOfOutStanding == 0) { - jam(); - sendtckeyconf(signal, 2); - return; - } else { - if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) { - jam(); - sendtckeyconf(signal, 0); - return; - } else if (regApiPtr.p->indexOpReturn) { - jam(); - sendtckeyconf(signal, 0); - return; - }//if - jam(); - }//if - return; - case CS_REC_COMMITTING: - if (TnoOfOutStanding > 0) { - if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) { - jam(); - sendtckeyconf(signal, 0); - return; - } else if (regApiPtr.p->indexOpReturn) { - jam(); - sendtckeyconf(signal, 0); - return; - }//if - jam(); - return; - }//if - TCKEY_abort(signal, 45); - return; - case CS_CONNECTED: - jam(); -/*---------------------------------------------------------------*/ -/* WE HAVE CONCLUDED THE TRANSACTION SINCE IT WAS ONLY */ -/* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */ -/* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */ -/*---------------------------------------------------------------*/ - regApiPtr.p->tckeyrec = 0; - return; - default: - TCKEY_abort(signal, 46); - return; - }//switch -}//Dbtc::lqhKeyConf_checkTransactionState() - -void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag) -{ - if(ERROR_INSERTED(8049)){ - CLEAR_ERROR_INSERT_VALUE; - signal->theData[0] = TcContinueB::DelayTCKEYCONF; - signal->theData[1] = apiConnectptr.i; - signal->theData[2] = TcommitFlag; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 3000, 3); - return; - } - - HostRecordPtr localHostptr; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - const UintR TopWords = (UintR)regApiPtr->tckeyrec; - localHostptr.i = refToNode(regApiPtr->ndbapiBlockref); - const Uint32 type = getNodeInfo(localHostptr.i).m_type; - const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM); - const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref); - const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL) ? 0 : 1; - ptrAss(localHostptr, hostRecord); - UintR TcurrLen = localHostptr.p->noOfWordsTCKEYCONF; - UintR confInfo = 0; - TcKeyConf::setCommitFlag(confInfo, TcommitFlag == 1); - TcKeyConf::setMarkerFlag(confInfo, Tmarker); - const UintR TpacketLen = 6 + TopWords; - regApiPtr->tckeyrec = 0; - - if (regApiPtr->indexOpReturn) { - jam(); - // Return internally generated TCKEY - TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend(); - TcKeyConf::setNoOfOperations(confInfo, 1); - tcKeyConf->apiConnectPtr = regApiPtr->indexOp; - tcKeyConf->gci = regApiPtr->globalcheckpointid; - tcKeyConf->confInfo = confInfo; - tcKeyConf->transId1 = regApiPtr->transid[0]; - tcKeyConf->transId2 = regApiPtr->transid[1]; - tcKeyConf->operations[0].apiOperationPtr = regApiPtr->clientData; - tcKeyConf->operations[0].attrInfoLen = regApiPtr->attrInfoLen; - Uint32 sigLen = TcKeyConf::StaticLength + TcKeyConf::OperationLength; - EXECUTE_DIRECT(DBTC, GSN_TCKEYCONF, signal, sigLen); - regApiPtr->indexOpReturn = false; - if (TopWords == 0) { - jam(); - return; // No queued TcKeyConf - }//if - }//if - if(TcommitFlag){ - jam(); - regApiPtr->m_exec_flag = 0; - } - TcKeyConf::setNoOfOperations(confInfo, (TopWords >> 1)); - if ((TpacketLen > 25) || !is_api){ - TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtrSend(); - - jam(); - tcKeyConf->apiConnectPtr = regApiPtr->ndbapiConnect; - tcKeyConf->gci = regApiPtr->globalcheckpointid;; - tcKeyConf->confInfo = confInfo; - tcKeyConf->transId1 = regApiPtr->transid[0]; - tcKeyConf->transId2 = regApiPtr->transid[1]; - copyFromToLen(®ApiPtr->tcSendArray[0], - (UintR*)&tcKeyConf->operations, - (UintR)ZTCOPCONF_SIZE); - sendSignal(regApiPtr->ndbapiBlockref, - GSN_TCKEYCONF, signal, (TpacketLen - 1), JBB); - return; - } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) { - jam(); - sendPackedTCKEYCONF(signal, localHostptr.p, localHostptr.i); - TcurrLen = 0; - } else { - jam(); - updatePackedList(signal, localHostptr.p, localHostptr.i); - }//if - // ------------------------------------------------------------------------- - // The header contains the block reference of receiver plus the real signal - // length - 3, since we have the real signal length plus one additional word - // for the header we have to do - 4. - // ------------------------------------------------------------------------- - UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4); - UintR Tpack1 = regApiPtr->ndbapiConnect; - UintR Tpack2 = regApiPtr->globalcheckpointid; - UintR Tpack3 = confInfo; - UintR Tpack4 = regApiPtr->transid[0]; - UintR Tpack5 = regApiPtr->transid[1]; - - localHostptr.p->noOfWordsTCKEYCONF = TcurrLen + TpacketLen; - - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 0] = Tpack0; - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 1] = Tpack1; - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 2] = Tpack2; - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 3] = Tpack3; - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 4] = Tpack4; - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + 5] = Tpack5; - - UintR Ti; - for (Ti = 6; Ti < TpacketLen; Ti++) { - localHostptr.p->packedWordsTCKEYCONF[TcurrLen + Ti] = - regApiPtr->tcSendArray[Ti - 6]; - }//for -}//Dbtc::sendtckeyconf() - -void Dbtc::copyFromToLen(UintR* sourceBuffer, UintR* destBuffer, UintR Tlen) -{ - UintR Tindex = 0; - UintR Ti; - while (Tlen >= 4) { - UintR Tdata0 = sourceBuffer[Tindex + 0]; - UintR Tdata1 = sourceBuffer[Tindex + 1]; - UintR Tdata2 = sourceBuffer[Tindex + 2]; - UintR Tdata3 = sourceBuffer[Tindex + 3]; - Tlen -= 4; - destBuffer[Tindex + 0] = Tdata0; - destBuffer[Tindex + 1] = Tdata1; - destBuffer[Tindex + 2] = Tdata2; - destBuffer[Tindex + 3] = Tdata3; - Tindex += 4; - }//while - for (Ti = 0; Ti < Tlen; Ti++, Tindex++) { - destBuffer[Tindex] = sourceBuffer[Tindex]; - }//for -}//Dbtc::copyFromToLen() - -void Dbtc::execSEND_PACKED(Signal* signal) -{ - HostRecordPtr Thostptr; - HostRecord *localHostRecord = hostRecord; - UintR i; - UintR TpackedListIndex = cpackedListIndex; - jamEntry(); - for (i = 0; i < TpackedListIndex; i++) { - Thostptr.i = cpackedList[i]; - ptrAss(Thostptr, localHostRecord); - arrGuard(Thostptr.i - 1, MAX_NODES - 1); - UintR TnoOfPackedWordsLqh = Thostptr.p->noOfPackedWordsLqh; - UintR TnoOfWordsTCKEYCONF = Thostptr.p->noOfWordsTCKEYCONF; - UintR TnoOfWordsTCINDXCONF = Thostptr.p->noOfWordsTCINDXCONF; - jam(); - if (TnoOfPackedWordsLqh > 0) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - }//if - if (TnoOfWordsTCKEYCONF > 0) { - jam(); - sendPackedTCKEYCONF(signal, Thostptr.p, (Uint32)Thostptr.i); - }//if - if (TnoOfWordsTCINDXCONF > 0) { - jam(); - sendPackedTCINDXCONF(signal, Thostptr.p, (Uint32)Thostptr.i); - }//if - Thostptr.p->inPackedList = false; - }//for - cpackedListIndex = 0; - return; -}//Dbtc::execSEND_PACKED() - -void -Dbtc::updatePackedList(Signal* signal, HostRecord* ahostptr, Uint16 ahostIndex) -{ - if (ahostptr->inPackedList == false) { - UintR TpackedListIndex = cpackedListIndex; - jam(); - ahostptr->inPackedList = true; - cpackedList[TpackedListIndex] = ahostIndex; - cpackedListIndex = TpackedListIndex + 1; - }//if -}//Dbtc::updatePackedList() - -void Dbtc::sendPackedSignalLqh(Signal* signal, HostRecord * ahostptr) -{ - UintR Tj; - UintR TnoOfWords = ahostptr->noOfPackedWordsLqh; - for (Tj = 0; Tj < TnoOfWords; Tj += 4) { - UintR sig0 = ahostptr->packedWordsLqh[Tj + 0]; - UintR sig1 = ahostptr->packedWordsLqh[Tj + 1]; - UintR sig2 = ahostptr->packedWordsLqh[Tj + 2]; - UintR sig3 = ahostptr->packedWordsLqh[Tj + 3]; - signal->theData[Tj + 0] = sig0; - signal->theData[Tj + 1] = sig1; - signal->theData[Tj + 2] = sig2; - signal->theData[Tj + 3] = sig3; - }//for - ahostptr->noOfPackedWordsLqh = 0; - sendSignal(ahostptr->hostLqhBlockRef, - GSN_PACKED_SIGNAL, - signal, - TnoOfWords, - JBB); -}//Dbtc::sendPackedSignalLqh() - -void Dbtc::sendPackedTCKEYCONF(Signal* signal, - HostRecord * ahostptr, - UintR hostId) -{ - UintR Tj; - UintR TnoOfWords = ahostptr->noOfWordsTCKEYCONF; - BlockReference TBref = numberToRef(API_PACKED, hostId); - for (Tj = 0; Tj < ahostptr->noOfWordsTCKEYCONF; Tj += 4) { - UintR sig0 = ahostptr->packedWordsTCKEYCONF[Tj + 0]; - UintR sig1 = ahostptr->packedWordsTCKEYCONF[Tj + 1]; - UintR sig2 = ahostptr->packedWordsTCKEYCONF[Tj + 2]; - UintR sig3 = ahostptr->packedWordsTCKEYCONF[Tj + 3]; - signal->theData[Tj + 0] = sig0; - signal->theData[Tj + 1] = sig1; - signal->theData[Tj + 2] = sig2; - signal->theData[Tj + 3] = sig3; - }//for - ahostptr->noOfWordsTCKEYCONF = 0; - sendSignal(TBref, GSN_TCKEYCONF, signal, TnoOfWords, JBB); -}//Dbtc::sendPackedTCKEYCONF() - -void Dbtc::sendPackedTCINDXCONF(Signal* signal, - HostRecord * ahostptr, - UintR hostId) -{ - UintR Tj; - UintR TnoOfWords = ahostptr->noOfWordsTCINDXCONF; - BlockReference TBref = numberToRef(API_PACKED, hostId); - for (Tj = 0; Tj < ahostptr->noOfWordsTCINDXCONF; Tj += 4) { - UintR sig0 = ahostptr->packedWordsTCINDXCONF[Tj + 0]; - UintR sig1 = ahostptr->packedWordsTCINDXCONF[Tj + 1]; - UintR sig2 = ahostptr->packedWordsTCINDXCONF[Tj + 2]; - UintR sig3 = ahostptr->packedWordsTCINDXCONF[Tj + 3]; - signal->theData[Tj + 0] = sig0; - signal->theData[Tj + 1] = sig1; - signal->theData[Tj + 2] = sig2; - signal->theData[Tj + 3] = sig3; - }//for - ahostptr->noOfWordsTCINDXCONF = 0; - sendSignal(TBref, GSN_TCINDXCONF, signal, TnoOfWords, JBB); -}//Dbtc::sendPackedTCINDXCONF() - -/* -4.3.11 DIVERIFY ---------------- -*/ -/*****************************************************************************/ -/* D I V E R I F Y */ -/* */ -/*****************************************************************************/ -void Dbtc::diverify010Lab(Signal* signal) -{ - UintR TfirstfreeApiConnectCopy = cfirstfreeApiConnectCopy; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - signal->theData[0] = apiConnectptr.i; - if (ERROR_INSERTED(8022)) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - - if (regApiPtr->lqhkeyreqrec) - { - if (TfirstfreeApiConnectCopy != RNIL) { - seizeApiConnectCopy(signal); - regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT; - /*----------------------------------------------------------------------- - * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS - * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC - * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE - * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS. - *---------------------------------------------------------------------*/ - EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1); - if (signal->theData[2] == 0) { - execDIVERIFYCONF(signal); - } - return; - } else { - /*----------------------------------------------------------------------- - * There were no free copy connections available. We must abort the - * transaction since otherwise we will have a problem with the report - * to the application. - * This should more or less not happen but if it happens we do - * not want to crash and we do not want to create code to handle it - * properly since it is difficult to test it and will be complex to - * handle a problem more or less not occurring. - *---------------------------------------------------------------------*/ - terrorCode = ZSEIZE_API_COPY_ERROR; - abortErrorLab(signal); - return; - } - } - else - { - jam(); - sendtckeyconf(signal, 1); - regApiPtr->apiConnectstate = CS_CONNECTED; - regApiPtr->m_transaction_nodes.clear(); - setApiConTimer(apiConnectptr.i, 0,__LINE__); - } -}//Dbtc::diverify010Lab() - -/* ------------------------------------------------------------------------- */ -/* ------- SEIZE_API_CONNECT ------- */ -/* SEIZE CONNECT RECORD FOR A REQUEST */ -/* ------------------------------------------------------------------------- */ -void Dbtc::seizeApiConnectCopy(Signal* signal) -{ - ApiConnectRecordPtr locApiConnectptr; - - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - UintR TapiConnectFilesize = capiConnectFilesize; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - - locApiConnectptr.i = cfirstfreeApiConnectCopy; - ptrCheckGuard(locApiConnectptr, TapiConnectFilesize, localApiConnectRecord); - cfirstfreeApiConnectCopy = locApiConnectptr.p->nextApiConnect; - locApiConnectptr.p->nextApiConnect = RNIL; - regApiPtr->apiCopyRecord = locApiConnectptr.i; - regApiPtr->triggerPending = false; - regApiPtr->isIndexOp = false; -}//Dbtc::seizeApiConnectCopy() - -void Dbtc::execDIVERIFYCONF(Signal* signal) -{ - UintR TapiConnectptrIndex = signal->theData[0]; - UintR TapiConnectFilesize = capiConnectFilesize; - UintR Tgci = signal->theData[1]; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - - jamEntry(); - if (ERROR_INSERTED(8017)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - if (TapiConnectptrIndex >= TapiConnectFilesize) { - TCKEY_abort(signal, 31); - return; - }//if - ApiConnectRecord * const regApiPtr = - &localApiConnectRecord[TapiConnectptrIndex]; - ConnectionState TapiConnectstate = regApiPtr->apiConnectstate; - UintR TApifailureNr = regApiPtr->failureNr; - UintR Tfailure_nr = cfailure_nr; - apiConnectptr.i = TapiConnectptrIndex; - apiConnectptr.p = regApiPtr; - if (TapiConnectstate != CS_PREPARE_TO_COMMIT) { - TCKEY_abort(signal, 32); - return; - }//if - /*-------------------------------------------------------------------------- - * THIS IS THE COMMIT POINT. IF WE ARRIVE HERE THE TRANSACTION IS COMMITTED - * UNLESS EVERYTHING CRASHES BEFORE WE HAVE BEEN ABLE TO REPORT THE COMMIT - * DECISION. THERE IS NO TURNING BACK FROM THIS DECISION FROM HERE ON. - * WE WILL INSERT THE TRANSACTION INTO ITS PROPER QUEUE OF - * TRANSACTIONS FOR ITS GLOBAL CHECKPOINT. - *-------------------------------------------------------------------------*/ - if (TApifailureNr != Tfailure_nr) { - DIVER_node_fail_handling(signal, Tgci); - return; - }//if - commitGciHandling(signal, Tgci); - - /************************************************************************** - * C O M M I T - * THE TRANSACTION HAVE NOW BEEN VERIFIED AND NOW THE COMMIT PHASE CAN START - **************************************************************************/ - - UintR TtcConnectptrIndex = regApiPtr->firstTcConnect; - UintR TtcConnectFilesize = ctcConnectFilesize; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - - regApiPtr->counter = regApiPtr->lqhkeyconfrec; - regApiPtr->apiConnectstate = CS_COMMITTING; - if (TtcConnectptrIndex >= TtcConnectFilesize) { - TCKEY_abort(signal, 33); - return; - }//if - TcConnectRecord* const regTcPtr = &localTcConnectRecord[TtcConnectptrIndex]; - tcConnectptr.i = TtcConnectptrIndex; - tcConnectptr.p = regTcPtr; - commit020Lab(signal); -}//Dbtc::execDIVERIFYCONF() - -/*--------------------------------------------------------------------------*/ -/* COMMIT_GCI_HANDLING */ -/* SET UP GLOBAL CHECKPOINT DATA STRUCTURE AT THE COMMIT POINT. */ -/*--------------------------------------------------------------------------*/ -void Dbtc::commitGciHandling(Signal* signal, UintR Tgci) -{ - GcpRecordPtr localGcpPointer; - - UintR TgcpFilesize = cgcpFilesize; - UintR Tfirstgcp = cfirstgcp; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - GcpRecord *localGcpRecord = gcpRecord; - - regApiPtr->globalcheckpointid = Tgci; - if (Tfirstgcp != RNIL) { - /* IF THIS GLOBAL CHECKPOINT ALREADY EXISTS */ - localGcpPointer.i = Tfirstgcp; - ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord); - do { - if (regApiPtr->globalcheckpointid == localGcpPointer.p->gcpId) { - jam(); - gcpPtr.i = localGcpPointer.i; - gcpPtr.p = localGcpPointer.p; - linkApiToGcp(signal); - return; - } else { - localGcpPointer.i = localGcpPointer.p->nextGcp; - jam(); - if (localGcpPointer.i != RNIL) { - jam(); - ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord); - continue; - }//if - }//if - seizeGcp(signal); - linkApiToGcp(signal); - return; - } while (1); - } else { - jam(); - seizeGcp(signal); - linkApiToGcp(signal); - }//if -}//Dbtc::commitGciHandling() - -/* --------------------------------------------------------------------------*/ -/* -LINK AN API CONNECT RECORD IN STATE PREPARED INTO THE LIST WITH GLOBAL - */ -/* CHECKPOINTS. WHEN THE TRANSACTION I COMPLETED THE API CONNECT RECORD IS */ -/* LINKED OUT OF THE LIST. */ -/*---------------------------------------------------------------------------*/ -void Dbtc::linkApiToGcp(Signal* signal) -{ - ApiConnectRecordPtr localApiConnectptr; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - GcpRecord * const regGcpPtr = gcpPtr.p; - UintR TapiConnectptrIndex = apiConnectptr.i; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - - regApiPtr->nextGcpConnect = RNIL; - if (regGcpPtr->firstApiConnect == RNIL) { - regGcpPtr->firstApiConnect = TapiConnectptrIndex; - jam(); - } else { - UintR TapiConnectFilesize = capiConnectFilesize; - localApiConnectptr.i = regGcpPtr->lastApiConnect; - jam(); - ptrCheckGuard(localApiConnectptr, - TapiConnectFilesize, localApiConnectRecord); - localApiConnectptr.p->nextGcpConnect = TapiConnectptrIndex; - }//if - UintR TlastApiConnect = regGcpPtr->lastApiConnect; - regApiPtr->gcpPointer = gcpPtr.i; - regApiPtr->prevGcpConnect = TlastApiConnect; - regGcpPtr->lastApiConnect = TapiConnectptrIndex; -}//Dbtc::linkApiToGcp() - -void Dbtc::seizeGcp(Signal* signal) -{ - GcpRecordPtr tmpGcpPointer; - GcpRecordPtr localGcpPointer; - - UintR Tfirstgcp = cfirstgcp; - UintR Tglobalcheckpointid = apiConnectptr.p->globalcheckpointid; - UintR TgcpFilesize = cgcpFilesize; - GcpRecord *localGcpRecord = gcpRecord; - - localGcpPointer.i = cfirstfreeGcp; - ptrCheckGuard(localGcpPointer, TgcpFilesize, localGcpRecord); - UintR TfirstfreeGcp = localGcpPointer.p->nextGcp; - localGcpPointer.p->gcpId = Tglobalcheckpointid; - localGcpPointer.p->nextGcp = RNIL; - localGcpPointer.p->firstApiConnect = RNIL; - localGcpPointer.p->lastApiConnect = RNIL; - localGcpPointer.p->gcpNomoretransRec = ZFALSE; - cfirstfreeGcp = TfirstfreeGcp; - - if (Tfirstgcp == RNIL) { - jam(); - cfirstgcp = localGcpPointer.i; - } else { - tmpGcpPointer.i = clastgcp; - jam(); - ptrCheckGuard(tmpGcpPointer, TgcpFilesize, localGcpRecord); - tmpGcpPointer.p->nextGcp = localGcpPointer.i; - }//if - clastgcp = localGcpPointer.i; - gcpPtr = localGcpPointer; -}//Dbtc::seizeGcp() - -/*---------------------------------------------------------------------------*/ -// Send COMMIT messages to all LQH operations involved in the transaction. -/*---------------------------------------------------------------------------*/ -void Dbtc::commit020Lab(Signal* signal) -{ - TcConnectRecordPtr localTcConnectptr; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - UintR TtcConnectFilesize = ctcConnectFilesize; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - - localTcConnectptr.p = tcConnectptr.p; - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - UintR Tcount = 0; - do { - /*----------------------------------------------------------------------- - * WE ARE NOW READY TO RELEASE ALL OPERATIONS ON THE LQH - *-----------------------------------------------------------------------*/ - /* *********< */ - /* COMMIT < */ - /* *********< */ - localTcConnectptr.i = localTcConnectptr.p->nextTcConnect; - localTcConnectptr.p->tcConnectstate = OS_COMMITTING; - sendCommitLqh(signal, localTcConnectptr.p); - - if (localTcConnectptr.i != RNIL) { - Tcount = Tcount + 1; - if (Tcount < 16 && !ERROR_INSERTED(8057)) { - ptrCheckGuard(localTcConnectptr, - TtcConnectFilesize, localTcConnectRecord); - jam(); - continue; - } else { - jam(); - if (ERROR_INSERTED(8014)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - signal->theData[0] = TcContinueB::ZSEND_COMMIT_LOOP; - signal->theData[1] = apiConnectptr.i; - signal->theData[2] = localTcConnectptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; - }//if - } else { - jam(); - if (ERROR_INSERTED(8057)) - CLEAR_ERROR_INSERT_VALUE; - - regApiPtr->apiConnectstate = CS_COMMIT_SENT; - return; - }//if - } while (1); -}//Dbtc::commit020Lab() - -void Dbtc::sendCommitLqh(Signal* signal, - TcConnectRecord * const regTcPtr) -{ - HostRecordPtr Thostptr; - UintR ThostFilesize = chostFilesize; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - Thostptr.i = regTcPtr->lastLqhNodeId; - ptrCheckGuard(Thostptr, ThostFilesize, hostRecord); - if (Thostptr.p->noOfPackedWordsLqh > 21) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - UintR Tindex = Thostptr.p->noOfPackedWordsLqh; - UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex]; - UintR Tdata1 = regTcPtr->lastLqhCon; - UintR Tdata2 = regApiPtr->globalcheckpointid; - UintR Tdata3 = regApiPtr->transid[0]; - UintR Tdata4 = regApiPtr->transid[1]; - - TDataPtr[0] = Tdata1 | (ZCOMMIT << 28); - TDataPtr[1] = Tdata2; - TDataPtr[2] = Tdata3; - TDataPtr[3] = Tdata4; - Thostptr.p->noOfPackedWordsLqh = Tindex + 4; -}//Dbtc::sendCommitLqh() - -void -Dbtc::DIVER_node_fail_handling(Signal* signal, UintR Tgci) -{ - /*------------------------------------------------------------------------ - * AT LEAST ONE NODE HAS FAILED DURING THE TRANSACTION. WE NEED TO CHECK IF - * THIS IS SO SERIOUS THAT WE NEED TO ABORT THE TRANSACTION. IN BOTH THE - * ABORT AND THE COMMIT CASES WE NEED TO SET-UP THE DATA FOR THE - * ABORT/COMMIT/COMPLETE HANDLING AS ALSO USED BY TAKE OVER FUNCTIONALITY. - *------------------------------------------------------------------------*/ - tabortInd = ZFALSE; - setupFailData(signal); - if (false && tabortInd == ZFALSE) { - jam(); - commitGciHandling(signal, Tgci); - toCommitHandlingLab(signal); - } else { - jam(); - apiConnectptr.p->returnsignal = RS_TCROLLBACKREP; - apiConnectptr.p->returncode = ZNODEFAIL_BEFORE_COMMIT; - toAbortHandlingLab(signal); - }//if - return; -}//Dbtc::DIVER_node_fail_handling() - - -/* ------------------------------------------------------------------------- */ -/* ------- ENTER COMMITTED ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dbtc::execCOMMITTED(Signal* signal) -{ - TcConnectRecordPtr localTcConnectptr; - ApiConnectRecordPtr localApiConnectptr; - - UintR TtcConnectFilesize = ctcConnectFilesize; - UintR TapiConnectFilesize = capiConnectFilesize; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - -#ifdef ERROR_INSERT - if (ERROR_INSERTED(8018)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - if (ERROR_INSERTED(8030)) { - systemErrorLab(signal, __LINE__); - }//if - if (ERROR_INSERTED(8025)) { - SET_ERROR_INSERT_VALUE(8026); - return; - }//if - if (ERROR_INSERTED(8041)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 3); - return; - }//if - if (ERROR_INSERTED(8042)) { - SET_ERROR_INSERT_VALUE(8046); - sendSignalWithDelay(cownref, GSN_COMMITTED, signal, 2000, 4); - return; - }//if -#endif - localTcConnectptr.i = signal->theData[0]; - jamEntry(); - ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - localApiConnectptr.i = localTcConnectptr.p->apiConnect; - if (localTcConnectptr.p->tcConnectstate != OS_COMMITTING) { - warningReport(signal, 4); - return; - }//if - ptrCheckGuard(localApiConnectptr, TapiConnectFilesize, - localApiConnectRecord); - UintR Tcounter = localApiConnectptr.p->counter - 1; - ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate; - UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1]; - UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2]; - Tdata1 = Tdata1 | Tdata2; - bool TcheckCondition = - (TapiConnectstate != CS_COMMIT_SENT) || (Tcounter != 0); - - setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__); - localApiConnectptr.p->counter = Tcounter; - localTcConnectptr.p->tcConnectstate = OS_COMMITTED; - if (Tdata1 != 0) { - warningReport(signal, 5); - return; - }//if - if (TcheckCondition) { - jam(); - /*-------------------------------------------------------*/ - // We have not sent all COMMIT requests yet. We could be - // in the state that all sent are COMMITTED but we are - // still waiting for a CONTINUEB to send the rest of the - // COMMIT requests. - /*-------------------------------------------------------*/ - return; - }//if - if (ERROR_INSERTED(8020)) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - /*-------------------------------------------------------*/ - /* THE ENTIRE TRANSACTION IS NOW COMMITED */ - /* NOW WE NEED TO SEND THE RESPONSE TO THE APPLICATION. */ - /* THE APPLICATION CAN THEN REUSE THE API CONNECTION AND */ - /* THEREFORE WE NEED TO MOVE THE API CONNECTION TO A */ - /* NEW API CONNECT RECORD. */ - /*-------------------------------------------------------*/ - - apiConnectptr = localApiConnectptr; - sendApiCommit(signal); - - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - localTcConnectptr.i = regApiPtr->firstTcConnect; - UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec; - ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - regApiPtr->counter = Tlqhkeyconfrec; - - tcConnectptr = localTcConnectptr; - complete010Lab(signal); - return; - -}//Dbtc::execCOMMITTED() - -/*-------------------------------------------------------*/ -/* SEND_API_COMMIT */ -/* SEND COMMIT DECISION TO THE API. */ -/*-------------------------------------------------------*/ -void Dbtc::sendApiCommit(Signal* signal) -{ - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - - if (regApiPtr->returnsignal == RS_TCKEYCONF) { - sendtckeyconf(signal, 1); - } else if (regApiPtr->returnsignal == RS_TC_COMMITCONF) { - jam(); - TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0]; - if(regApiPtr->commitAckMarker == RNIL){ - jam(); - commitConf->apiConnectPtr = regApiPtr->ndbapiConnect; - } else { - jam(); - commitConf->apiConnectPtr = regApiPtr->ndbapiConnect | 1; - } - commitConf->transId1 = regApiPtr->transid[0]; - commitConf->transId2 = regApiPtr->transid[1]; - commitConf->gci = regApiPtr->globalcheckpointid; - - sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal, - TcCommitConf::SignalLength, JBB); - } else if (regApiPtr->returnsignal == RS_NO_RETURN) { - jam(); - } else { - TCKEY_abort(signal, 37); - return; - }//if - UintR TapiConnectFilesize = capiConnectFilesize; - UintR TcommitCount = c_counters.ccommitCount; - UintR TapiIndex = apiConnectptr.i; - UintR TnewApiIndex = regApiPtr->apiCopyRecord; - UintR TapiFailState = regApiPtr->apiFailState; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - - tmpApiConnectptr.p = apiConnectptr.p; - tmpApiConnectptr.i = TapiIndex; - c_counters.ccommitCount = TcommitCount + 1; - apiConnectptr.i = TnewApiIndex; - ptrCheckGuard(apiConnectptr, TapiConnectFilesize, localApiConnectRecord); - copyApi(signal); - if (TapiFailState != ZTRUE) { - return; - } else { - jam(); - handleApiFailState(signal, tmpApiConnectptr.i); - return; - }//if -}//Dbtc::sendApiCommit() - -/* ========================================================================= */ -/* ======= COPY_API ======= */ -/* COPY API RECORD ALSO RESET THE OLD API RECORD SO THAT IT */ -/* IS PREPARED TO RECEIVE A NEW TRANSACTION. */ -/*===========================================================================*/ -void Dbtc::copyApi(Signal* signal) -{ - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p; - - UintR TndbapiConnect = regTmpApiPtr->ndbapiConnect; - UintR TfirstTcConnect = regTmpApiPtr->firstTcConnect; - UintR Ttransid1 = regTmpApiPtr->transid[0]; - UintR Ttransid2 = regTmpApiPtr->transid[1]; - UintR Tlqhkeyconfrec = regTmpApiPtr->lqhkeyconfrec; - UintR TgcpPointer = regTmpApiPtr->gcpPointer; - UintR TgcpFilesize = cgcpFilesize; - UintR TcommitAckMarker = regTmpApiPtr->commitAckMarker; - NdbNodeBitmask Tnodes = regTmpApiPtr->m_transaction_nodes; - GcpRecord *localGcpRecord = gcpRecord; - - regApiPtr->ndbapiBlockref = regTmpApiPtr->ndbapiBlockref; - regApiPtr->ndbapiConnect = TndbapiConnect; - regApiPtr->firstTcConnect = TfirstTcConnect; - regApiPtr->apiConnectstate = CS_COMPLETING; - regApiPtr->transid[0] = Ttransid1; - regApiPtr->transid[1] = Ttransid2; - regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec; - regApiPtr->commitAckMarker = TcommitAckMarker; - regApiPtr->m_transaction_nodes = Tnodes; - regApiPtr->singleUserMode = 0; - - gcpPtr.i = TgcpPointer; - ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord); - unlinkApiConnect(signal); - linkApiToGcp(signal); - setApiConTimer(tmpApiConnectptr.i, 0, __LINE__); - regTmpApiPtr->apiConnectstate = CS_CONNECTED; - regTmpApiPtr->commitAckMarker = RNIL; - regTmpApiPtr->firstTcConnect = RNIL; - regTmpApiPtr->lastTcConnect = RNIL; - regTmpApiPtr->m_transaction_nodes.clear(); - regTmpApiPtr->singleUserMode = 0; - releaseAllSeizedIndexOperations(regTmpApiPtr); -}//Dbtc::copyApi() - -void Dbtc::unlinkApiConnect(Signal* signal) -{ - ApiConnectRecordPtr localApiConnectptr; - ApiConnectRecord * const regTmpApiPtr = tmpApiConnectptr.p; - UintR TapiConnectFilesize = capiConnectFilesize; - UintR TprevGcpConnect = regTmpApiPtr->prevGcpConnect; - UintR TnextGcpConnect = regTmpApiPtr->nextGcpConnect; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - - if (TprevGcpConnect == RNIL) { - gcpPtr.p->firstApiConnect = TnextGcpConnect; - jam(); - } else { - localApiConnectptr.i = TprevGcpConnect; - jam(); - ptrCheckGuard(localApiConnectptr, - TapiConnectFilesize, localApiConnectRecord); - localApiConnectptr.p->nextGcpConnect = TnextGcpConnect; - }//if - if (TnextGcpConnect == RNIL) { - gcpPtr.p->lastApiConnect = TprevGcpConnect; - jam(); - } else { - localApiConnectptr.i = TnextGcpConnect; - jam(); - ptrCheckGuard(localApiConnectptr, - TapiConnectFilesize, localApiConnectRecord); - localApiConnectptr.p->prevGcpConnect = TprevGcpConnect; - }//if -}//Dbtc::unlinkApiConnect() - -void Dbtc::complete010Lab(Signal* signal) -{ - TcConnectRecordPtr localTcConnectptr; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - UintR TtcConnectFilesize = ctcConnectFilesize; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - - localTcConnectptr.p = tcConnectptr.p; - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - UintR TapiConnectptrIndex = apiConnectptr.i; - UintR Tcount = 0; - do { - localTcConnectptr.p->apiConnect = TapiConnectptrIndex; - localTcConnectptr.p->tcConnectstate = OS_COMPLETING; - - /* ************ */ - /* COMPLETE < */ - /* ************ */ - const Uint32 nextTcConnect = localTcConnectptr.p->nextTcConnect; - sendCompleteLqh(signal, localTcConnectptr.p); - localTcConnectptr.i = nextTcConnect; - if (localTcConnectptr.i != RNIL) { - Tcount++; - if (Tcount < 16) { - ptrCheckGuard(localTcConnectptr, - TtcConnectFilesize, localTcConnectRecord); - jam(); - continue; - } else { - jam(); - if (ERROR_INSERTED(8013)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - signal->theData[0] = TcContinueB::ZSEND_COMPLETE_LOOP; - signal->theData[1] = apiConnectptr.i; - signal->theData[2] = localTcConnectptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; - }//if - } else { - jam(); - regApiPtr->apiConnectstate = CS_COMPLETE_SENT; - return; - }//if - } while (1); -}//Dbtc::complete010Lab() - -void Dbtc::sendCompleteLqh(Signal* signal, - TcConnectRecord * const regTcPtr) -{ - HostRecordPtr Thostptr; - UintR ThostFilesize = chostFilesize; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - Thostptr.i = regTcPtr->lastLqhNodeId; - ptrCheckGuard(Thostptr, ThostFilesize, hostRecord); - if (Thostptr.p->noOfPackedWordsLqh > 22) { - jam(); - sendPackedSignalLqh(signal, Thostptr.p); - } else { - jam(); - updatePackedList(signal, Thostptr.p, Thostptr.i); - }//if - - UintR Tindex = Thostptr.p->noOfPackedWordsLqh; - UintR* TDataPtr = &Thostptr.p->packedWordsLqh[Tindex]; - UintR Tdata1 = regTcPtr->lastLqhCon | (ZCOMPLETE << 28); - UintR Tdata2 = regApiPtr->transid[0]; - UintR Tdata3 = regApiPtr->transid[1]; - - TDataPtr[0] = Tdata1; - TDataPtr[1] = Tdata2; - TDataPtr[2] = Tdata3; - Thostptr.p->noOfPackedWordsLqh = Tindex + 3; -}//Dbtc::sendCompleteLqh() - -void -Dbtc::execTC_COMMIT_ACK(Signal* signal){ - jamEntry(); - - CommitAckMarker key; - key.transid1 = signal->theData[0]; - key.transid2 = signal->theData[1]; - - CommitAckMarkerPtr removedMarker; - m_commitAckMarkerHash.remove(removedMarker, key); - if (removedMarker.i == RNIL) { - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//if - sendRemoveMarkers(signal, removedMarker.p); - m_commitAckMarkerPool.release(removedMarker); -} - -void -Dbtc::sendRemoveMarkers(Signal* signal, const CommitAckMarker * marker){ - jam(); - const Uint32 noOfLqhs = marker->noOfLqhs; - const Uint32 transId1 = marker->transid1; - const Uint32 transId2 = marker->transid2; - - for(Uint32 i = 0; ilqhNodeId[i]; - sendRemoveMarker(signal, nodeId, transId1, transId2); - } -} - -void -Dbtc::sendRemoveMarker(Signal* signal, - NodeId nodeId, - Uint32 transid1, - Uint32 transid2){ - /** - * Seize host ptr - */ - HostRecordPtr hostPtr; - const UintR ThostFilesize = chostFilesize; - hostPtr.i = nodeId; - ptrCheckGuard(hostPtr, ThostFilesize, hostRecord); - - if (hostPtr.p->noOfPackedWordsLqh > (25 - 3)){ - jam(); - sendPackedSignalLqh(signal, hostPtr.p); - } else { - jam(); - updatePackedList(signal, hostPtr.p, hostPtr.i); - }//if - - UintR numWord = hostPtr.p->noOfPackedWordsLqh; - UintR* dataPtr = &hostPtr.p->packedWordsLqh[numWord]; - - dataPtr[0] = (ZREMOVE_MARKER << 28); - dataPtr[1] = transid1; - dataPtr[2] = transid2; - hostPtr.p->noOfPackedWordsLqh = numWord + 3; -} - -void Dbtc::execCOMPLETED(Signal* signal) -{ - TcConnectRecordPtr localTcConnectptr; - ApiConnectRecordPtr localApiConnectptr; - - UintR TtcConnectFilesize = ctcConnectFilesize; - UintR TapiConnectFilesize = capiConnectFilesize; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - -#ifdef ERROR_INSERT - if (ERROR_INSERTED(8031)) { - systemErrorLab(signal, __LINE__); - }//if - if (ERROR_INSERTED(8019)) { - CLEAR_ERROR_INSERT_VALUE; - return; - }//if - if (ERROR_INSERTED(8027)) { - SET_ERROR_INSERT_VALUE(8028); - return; - }//if - if (ERROR_INSERTED(8043)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3); - return; - }//if - if (ERROR_INSERTED(8044)) { - SET_ERROR_INSERT_VALUE(8047); - sendSignalWithDelay(cownref, GSN_COMPLETED, signal, 2000, 3); - return; - }//if -#endif - localTcConnectptr.i = signal->theData[0]; - jamEntry(); - ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - bool Tcond1 = (localTcConnectptr.p->tcConnectstate != OS_COMPLETING); - localApiConnectptr.i = localTcConnectptr.p->apiConnect; - if (Tcond1) { - warningReport(signal, 6); - return; - }//if - ptrCheckGuard(localApiConnectptr, TapiConnectFilesize, - localApiConnectRecord); - UintR Tdata1 = localApiConnectptr.p->transid[0] - signal->theData[1]; - UintR Tdata2 = localApiConnectptr.p->transid[1] - signal->theData[2]; - UintR Tcounter = localApiConnectptr.p->counter - 1; - ConnectionState TapiConnectstate = localApiConnectptr.p->apiConnectstate; - Tdata1 = Tdata1 | Tdata2; - bool TcheckCondition = - (TapiConnectstate != CS_COMPLETE_SENT) || (Tcounter != 0); - if (Tdata1 != 0) { - warningReport(signal, 7); - return; - }//if - setApiConTimer(localApiConnectptr.i, ctcTimer, __LINE__); - localApiConnectptr.p->counter = Tcounter; - localTcConnectptr.p->tcConnectstate = OS_COMPLETED; - localTcConnectptr.p->noOfNodes = 0; // == releaseNodes(signal) - if (TcheckCondition) { - jam(); - /*-------------------------------------------------------*/ - // We have not sent all COMPLETE requests yet. We could be - // in the state that all sent are COMPLETED but we are - // still waiting for a CONTINUEB to send the rest of the - // COMPLETE requests. - /*-------------------------------------------------------*/ - return; - }//if - if (ERROR_INSERTED(8021)) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - apiConnectptr = localApiConnectptr; - releaseTransResources(signal); -}//Dbtc::execCOMPLETED() - -/*---------------------------------------------------------------------------*/ -/* RELEASE_TRANS_RESOURCES */ -/* RELEASE ALL RESOURCES THAT ARE CONNECTED TO THIS TRANSACTION. */ -/*---------------------------------------------------------------------------*/ -void Dbtc::releaseTransResources(Signal* signal) -{ - TcConnectRecordPtr localTcConnectptr; - UintR TtcConnectFilesize = ctcConnectFilesize; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - apiConnectptr.p->m_transaction_nodes.clear(); - localTcConnectptr.i = apiConnectptr.p->firstTcConnect; - do { - jam(); - ptrCheckGuard(localTcConnectptr, TtcConnectFilesize, localTcConnectRecord); - UintR rtrTcConnectptrIndex = localTcConnectptr.p->nextTcConnect; - tcConnectptr.i = localTcConnectptr.i; - tcConnectptr.p = localTcConnectptr.p; - localTcConnectptr.i = rtrTcConnectptrIndex; - releaseTcCon(); - } while (localTcConnectptr.i != RNIL); - handleGcp(signal); - releaseFiredTriggerData(&apiConnectptr.p->theFiredTriggers); - releaseAllSeizedIndexOperations(apiConnectptr.p); - releaseApiConCopy(signal); -}//Dbtc::releaseTransResources() - -/* *********************************************************************>> */ -/* MODULE: HANDLE_GCP */ -/* DESCRIPTION: HANDLES GLOBAL CHECKPOINT HANDLING AT THE COMPLETION */ -/* OF THE COMMIT PHASE AND THE ABORT PHASE. WE MUST ENSURE THAT TC */ -/* SENDS GCP_TCFINISHED WHEN ALL TRANSACTIONS BELONGING TO A CERTAIN */ -/* GLOBAL CHECKPOINT HAVE COMPLETED. */ -/* *********************************************************************>> */ -void Dbtc::handleGcp(Signal* signal) -{ - GcpRecord *localGcpRecord = gcpRecord; - GcpRecordPtr localGcpPtr; - UintR TapiConnectptrIndex = apiConnectptr.i; - UintR TgcpFilesize = cgcpFilesize; - localGcpPtr.i = apiConnectptr.p->gcpPointer; - tmpApiConnectptr.i = TapiConnectptrIndex; - tmpApiConnectptr.p = apiConnectptr.p; - ptrCheckGuard(localGcpPtr, TgcpFilesize, localGcpRecord); - gcpPtr.i = localGcpPtr.i; - gcpPtr.p = localGcpPtr.p; - unlinkApiConnect(signal); - if (localGcpPtr.p->firstApiConnect == RNIL) { - if (localGcpPtr.p->gcpNomoretransRec == ZTRUE) { - jam(); - tcheckGcpId = localGcpPtr.p->gcpId; - gcpTcfinished(signal); - unlinkGcp(signal); - }//if - }//if -}//Dbtc::handleGcp() - -void Dbtc::releaseApiConCopy(Signal* signal) -{ - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - UintR TfirstfreeApiConnectCopyOld = cfirstfreeApiConnectCopy; - cfirstfreeApiConnectCopy = apiConnectptr.i; - regApiPtr->nextApiConnect = TfirstfreeApiConnectCopyOld; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - regApiPtr->apiConnectstate = CS_RESTART; -}//Dbtc::releaseApiConCopy() - -/* ========================================================================= */ -/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY WRITE OPERATION ------- */ -/* ========================================================================= */ -void Dbtc::releaseDirtyWrite(Signal* signal) -{ - unlinkReadyTcCon(signal); - releaseTcCon(); - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - if (regApiPtr->apiConnectstate == CS_START_COMMITTING) { - if (regApiPtr->firstTcConnect == RNIL) { - jam(); - regApiPtr->apiConnectstate = CS_CONNECTED; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - sendtckeyconf(signal, 1); - }//if - }//if -}//Dbtc::releaseDirtyWrite() - -/***************************************************************************** - * L Q H K E Y R E F - * WHEN LQHKEYREF IS RECEIVED DBTC WILL CHECK IF COMMIT FLAG WAS SENT FROM THE - * APPLICATION. IF SO, THE WHOLE TRANSACTION WILL BE ROLLED BACK AND SIGNAL - * TCROLLBACKREP WILL BE SENT TO THE API. - * - * OTHERWISE TC WILL CHECK THE ERRORCODE. IF THE ERRORCODE IS INDICATING THAT - * THE "ROW IS NOT FOUND" FOR UPDATE/READ/DELETE OPERATIONS AND "ROW ALREADY - * EXISTS" FOR INSERT OPERATIONS, DBTC WILL RELEASE THE OPERATION AND THEN - * SEND RETURN SIGNAL TCKEYREF TO THE USER. THE USER THEN HAVE TO SEND - * SIGNAL TC_COMMITREQ OR TC_ROLLBACKREQ TO CONCLUDE THE TRANSACTION. - * IF ANY TCKEYREQ WITH COMMIT IS RECEIVED AND API_CONNECTSTATE EQUALS - * "REC_LQHREFUSE", - * THE OPERATION WILL BE TREATED AS AN OPERATION WITHOUT COMMIT. WHEN ANY - * OTHER FAULTCODE IS RECEIVED THE WHOLE TRANSACTION MUST BE ROLLED BACK - *****************************************************************************/ -void Dbtc::execLQHKEYREF(Signal* signal) -{ - const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr(); - Uint32 indexId = 0; - jamEntry(); - - UintR compare_transid1, compare_transid2; - UintR TtcConnectFilesize = ctcConnectFilesize; - /*------------------------------------------------------------------------- - * - * RELEASE NODE BUFFER(S) TO INDICATE THAT THIS OPERATION HAVE NO - * TRANSACTION PARTS ACTIVE ANYMORE. - * LQHKEYREF HAVE CLEARED ALL PARTS ON ITS PATH BACK TO TC. - *-------------------------------------------------------------------------*/ - if (lqhKeyRef->connectPtr < TtcConnectFilesize) { - /*----------------------------------------------------------------------- - * WE HAVE TO CHECK THAT THE TRANSACTION IS STILL VALID. FIRST WE CHECK - * THAT THE LQH IS STILL CONNECTED TO A TC, IF THIS HOLDS TRUE THEN THE - * TC MUST BE CONNECTED TO AN API CONNECT RECORD. - * WE MUST ENSURE THAT THE TRANSACTION ID OF THIS API CONNECT - * RECORD IS STILL THE SAME AS THE ONE LQHKEYREF REFERS TO. - * IF NOT SIMPLY EXIT AND FORGET THE SIGNAL SINCE THE TRANSACTION IS - * ALREADY COMPLETED (ABORTED). - *-----------------------------------------------------------------------*/ - tcConnectptr.i = lqhKeyRef->connectPtr; - Uint32 errCode = terrorCode = lqhKeyRef->errorCode; - ptrAss(tcConnectptr, tcConnectRecord); - TcConnectRecord * const regTcPtr = tcConnectptr.p; - if (regTcPtr->tcConnectstate == OS_OPERATING) { - Uint32 save = apiConnectptr.i = regTcPtr->apiConnect; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - compare_transid1 = regApiPtr->transid[0] ^ lqhKeyRef->transId1; - compare_transid2 = regApiPtr->transid[1] ^ lqhKeyRef->transId2; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - warningReport(signal, 25); - return; - }//if - - const ConnectionState state = regApiPtr->apiConnectstate; - const Uint32 triggeringOp = regTcPtr->triggeringOperation; - if (triggeringOp != RNIL) { - jam(); - // This operation was created by a trigger execting operation - TcConnectRecordPtr opPtr; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - - const Uint32 currentIndexId = regTcPtr->currentIndexId; - ndbassert(currentIndexId != 0); // Only index triggers so far - - opPtr.i = triggeringOp; - ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord); - - // The operation executed an index trigger - TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId); - indexId = indexData->indexId; - regApiPtr->errorData = indexId; - const Uint32 opType = regTcPtr->operation; - if (errCode == ZALREADYEXIST) - errCode = terrorCode = ZNOTUNIQUE; - else if (!(opType == ZDELETE && errCode == ZNOT_FOUND)) { - jam(); - /** - * "Normal path" - */ - // fall-through - } else { - jam(); - /** ZDELETE && NOT_FOUND */ - if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){ - jam(); - /** - * Ignore error - */ - regApiPtr->lqhkeyconfrec++; - - unlinkReadyTcCon(signal); - releaseTcCon(); - - opPtr.p->triggerExecutionCount--; - if (opPtr.p->triggerExecutionCount == 0) { - /** - * We have completed current trigger execution - * Continue triggering operation - */ - jam(); - continueTriggeringOp(signal, opPtr.p); - } - return; - } - } - } - - Uint32 marker = regTcPtr->commitAckMarker; - markOperationAborted(regApiPtr, regTcPtr); - - if(regApiPtr->apiConnectstate == CS_ABORTING){ - /** - * We're already aborting' so don't send an "extra" TCKEYREF - */ - jam(); - return; - } - - const Uint32 abort = regTcPtr->m_execAbortOption; - if (abort == TcKeyReq::AbortOnError || triggeringOp != RNIL) { - /** - * No error is allowed on this operation - */ - TCKEY_abort(signal, 49); - return; - }//if - - if (marker != RNIL){ - /** - * This was an insert/update/delete/write which failed - * that contained the marker - * Currently unsupported to place new marker - */ - TCKEY_abort(signal, 49); - return; - } - - /* Only ref in certain situations */ - { - const Uint32 opType = regTcPtr->operation; - if ( (opType == ZDELETE && errCode != ZNOT_FOUND) - || (opType == ZINSERT && errCode != ZALREADYEXIST) - || (opType == ZUPDATE && errCode != ZNOT_FOUND) - || (opType == ZWRITE && errCode != 839 && errCode != 840)) - { - TCKEY_abort(signal, 49); - return; - } - } - - /* *************** */ - /* TCKEYREF < */ - /* *************** */ - TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend(); - tcKeyRef->transId[0] = regApiPtr->transid[0]; - tcKeyRef->transId[1] = regApiPtr->transid[1]; - tcKeyRef->errorCode = terrorCode; - bool isIndexOp = regTcPtr->isIndexOp; - Uint32 indexOp = tcConnectptr.p->indexOp; - Uint32 clientData = regTcPtr->clientData; - unlinkReadyTcCon(signal); /* LINK TC CONNECT RECORD OUT OF */ - releaseTcCon(); /* RELEASE THE TC CONNECT RECORD */ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - if (isIndexOp) { - jam(); - regApiPtr->lqhkeyreqrec--; // Compensate for extra during read - tcKeyRef->connectPtr = indexOp; - tcKeyRef->errorData = indexId; - EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength); - apiConnectptr.i = save; - apiConnectptr.p = regApiPtr; - } else { - jam(); - tcKeyRef->connectPtr = clientData; - tcKeyRef->errorData = indexId; - sendSignal(regApiPtr->ndbapiBlockref, - GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB); - }//if - - /*--------------------------------------------------------------------- - * SINCE WE ARE NOT ABORTING WE NEED TO UPDATE THE COUNT OF HOW MANY - * LQHKEYREQ THAT HAVE RETURNED. - * IF NO MORE OUTSTANDING LQHKEYREQ'S THEN WE NEED TO - * TCKEYCONF (IF THERE IS ANYTHING TO SEND). - *---------------------------------------------------------------------*/ - regApiPtr->lqhkeyreqrec--; - if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) { - if (regApiPtr->apiConnectstate == CS_START_COMMITTING) { - jam(); - diverify010Lab(signal); - return; - } else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) { - jam(); - sendtckeyconf(signal, 2); - return; - } - }//if - return; - - } else { - warningReport(signal, 26); - }//if - } else { - errorReport(signal, 6); - }//if - return; -}//Dbtc::execLQHKEYREF() - -void Dbtc::clearCommitAckMarker(ApiConnectRecord * const regApiPtr, - TcConnectRecord * const regTcPtr) -{ - const Uint32 commitAckMarker = regTcPtr->commitAckMarker; - if (regApiPtr->commitAckMarker == RNIL) - ndbassert(commitAckMarker == RNIL); - if (commitAckMarker != RNIL) - ndbassert(regApiPtr->commitAckMarker != RNIL); - if(commitAckMarker != RNIL){ - jam(); - m_commitAckMarkerHash.release(commitAckMarker); - regTcPtr->commitAckMarker = RNIL; - regApiPtr->commitAckMarker = RNIL; - } -} - -void Dbtc::markOperationAborted(ApiConnectRecord * const regApiPtr, - TcConnectRecord * const regTcPtr) -{ - /*------------------------------------------------------------------------ - * RELEASE NODES TO INDICATE THAT THE OPERATION IS ALREADY ABORTED IN THE - * LQH'S ALSO SET STATE TO ABORTING TO INDICATE THE ABORT IS - * ALREADY COMPLETED. - *------------------------------------------------------------------------*/ - regTcPtr->noOfNodes = 0; // == releaseNodes(signal) - regTcPtr->tcConnectstate = OS_ABORTING; - clearCommitAckMarker(regApiPtr, regTcPtr); -} - -/*--------------------------------------*/ -/* EXIT AND WAIT FOR SIGNAL TCOMMITREQ */ -/* OR TCROLLBACKREQ FROM THE USER TO */ -/* CONTINUE THE TRANSACTION */ -/*--------------------------------------*/ -void Dbtc::execTC_COMMITREQ(Signal* signal) -{ - UintR compare_transid1, compare_transid2; - - jamEntry(); - apiConnectptr.i = signal->theData[0]; - if (apiConnectptr.i < capiConnectFilesize) { - ptrAss(apiConnectptr, apiConnectRecord); - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - jam(); - return; - }//if - - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - - const Uint32 apiConnectPtr = regApiPtr->ndbapiConnect; - const Uint32 apiBlockRef = regApiPtr->ndbapiBlockref; - const Uint32 transId1 = regApiPtr->transid[0]; - const Uint32 transId2 = regApiPtr->transid[1]; - Uint32 errorCode = 0; - - regApiPtr->m_exec_flag = 1; - switch (regApiPtr->apiConnectstate) { - case CS_STARTED: - tcConnectptr.i = regApiPtr->firstTcConnect; - if (tcConnectptr.i != RNIL) { - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) { - jam(); - /*******************************************************************/ - // The proper case where the application is waiting for commit or - // abort order. - // Start the commit order. - /*******************************************************************/ - regApiPtr->returnsignal = RS_TC_COMMITCONF; - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - diverify010Lab(signal); - return; - } else { - jam(); - /*******************************************************************/ - // The transaction is started but not all operations are completed. - // It is not possible to commit the transaction in this state. - // We will abort it instead. - /*******************************************************************/ - regApiPtr->returnsignal = RS_NO_RETURN; - errorCode = ZTRANS_STATUS_ERROR; - abort010Lab(signal); - }//if - } else { - jam(); - /** - * No operations, accept commit - */ - TcCommitConf * const commitConf = (TcCommitConf *)&signal->theData[0]; - commitConf->apiConnectPtr = apiConnectPtr; - commitConf->transId1 = transId1; - commitConf->transId2 = transId2; - commitConf->gci = 0; - sendSignal(apiBlockRef, GSN_TC_COMMITCONF, signal, - TcCommitConf::SignalLength, JBB); - - regApiPtr->returnsignal = RS_NO_RETURN; - releaseAbortResources(signal); - return; - }//if - break; - case CS_RECEIVING: - jam(); - /***********************************************************************/ - // A transaction is still receiving data. We cannot commit an unfinished - // transaction. We will abort it instead. - /***********************************************************************/ - regApiPtr->returnsignal = RS_NO_RETURN; - errorCode = ZPREPAREINPROGRESS; - abort010Lab(signal); - break; - - case CS_START_COMMITTING: - case CS_COMMITTING: - case CS_COMMIT_SENT: - case CS_COMPLETING: - case CS_COMPLETE_SENT: - case CS_REC_COMMITTING: - case CS_PREPARE_TO_COMMIT: - jam(); - /***********************************************************************/ - // The transaction is already performing a commit but it is not concluded - // yet. - /***********************************************************************/ - errorCode = ZCOMMITINPROGRESS; - break; - case CS_ABORTING: - jam(); - errorCode = regApiPtr->returncode ? - regApiPtr->returncode : ZABORTINPROGRESS; - break; - case CS_START_SCAN: - jam(); - /***********************************************************************/ - // The transaction is a scan. Scans cannot commit - /***********************************************************************/ - errorCode = ZSCANINPROGRESS; - break; - case CS_PREPARED: - jam(); - return; - case CS_START_PREPARING: - jam(); - return; - case CS_REC_PREPARING: - jam(); - return; - break; - default: - warningHandlerLab(signal, __LINE__); - return; - }//switch - TcCommitRef * const commitRef = (TcCommitRef*)&signal->theData[0]; - commitRef->apiConnectPtr = apiConnectPtr; - commitRef->transId1 = transId1; - commitRef->transId2 = transId2; - commitRef->errorCode = errorCode; - sendSignal(apiBlockRef, GSN_TC_COMMITREF, signal, - TcCommitRef::SignalLength, JBB); - return; - } else /** apiConnectptr.i < capiConnectFilesize */ { - jam(); - warningHandlerLab(signal, __LINE__); - return; - } -}//Dbtc::execTC_COMMITREQ() - -/** - * TCROLLBACKREQ - * - * Format is: - * - * thedata[0] = apiconnectptr - * thedata[1] = transid[0] - * thedata[2] = transid[1] - * OPTIONAL thedata[3] = flags - * - * Flags: - * 0x1 = potentiallyBad data from API (try not to assert) - */ -void Dbtc::execTCROLLBACKREQ(Signal* signal) -{ - bool potentiallyBad= false; - UintR compare_transid1, compare_transid2; - - jamEntry(); - - if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1))) - { - ndbout_c("Trying to roll back potentially bad txn\n"); - potentiallyBad= true; - } - - apiConnectptr.i = signal->theData[0]; - if (apiConnectptr.i >= capiConnectFilesize) { - goto TC_ROLL_warning; - }//if - ptrAss(apiConnectptr, apiConnectRecord); - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - jam(); - return; - }//if - - apiConnectptr.p->m_exec_flag = 1; - switch (apiConnectptr.p->apiConnectstate) { - case CS_STARTED: - case CS_RECEIVING: - jam(); - apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF; - abort010Lab(signal); - return; - case CS_CONNECTED: - jam(); - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF, - signal, 3, JBB); - break; - case CS_START_SCAN: - case CS_PREPARE_TO_COMMIT: - case CS_COMMITTING: - case CS_COMMIT_SENT: - case CS_COMPLETING: - case CS_COMPLETE_SENT: - case CS_WAIT_COMMIT_CONF: - case CS_WAIT_COMPLETE_CONF: - case CS_RESTART: - case CS_DISCONNECTED: - case CS_START_COMMITTING: - case CS_REC_COMMITTING: - jam(); - /* ***************< */ - /* TC_ROLLBACKREF < */ - /* ***************< */ - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - signal->theData[3] = ZROLLBACKNOTALLOWED; - signal->theData[4] = apiConnectptr.p->apiConnectstate; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKREF, - signal, 5, JBB); - break; - /* SEND A REFUSAL SIGNAL*/ - case CS_ABORTING: - jam(); - if (apiConnectptr.p->abortState == AS_IDLE) { - jam(); - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_TCROLLBACKCONF, - signal, 3, JBB); - } else { - jam(); - apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF; - }//if - break; - case CS_WAIT_ABORT_CONF: - jam(); - apiConnectptr.p->returnsignal = RS_TCROLLBACKCONF; - break; - case CS_START_PREPARING: - jam(); - case CS_PREPARED: - jam(); - case CS_REC_PREPARING: - jam(); - default: - goto TC_ROLL_system_error; - break; - }//switch - return; - -TC_ROLL_warning: - jam(); - if(likely(potentiallyBad==false)) - warningHandlerLab(signal, __LINE__); - return; - -TC_ROLL_system_error: - jam(); - if(likely(potentiallyBad==false)) - systemErrorLab(signal, __LINE__); - return; -}//Dbtc::execTCROLLBACKREQ() - -void Dbtc::execTC_HBREP(Signal* signal) -{ - const TcHbRep * const tcHbRep = - (TcHbRep *)signal->getDataPtr(); - - jamEntry(); - apiConnectptr.i = tcHbRep->apiConnectPtr; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - - if (apiConnectptr.p->transid[0] == tcHbRep->transId1 && - apiConnectptr.p->transid[1] == tcHbRep->transId2){ - - if (getApiConTimer(apiConnectptr.i) != 0){ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - } else { - DEBUG("TCHBREP received when timer was off apiConnectptr.i=" - << apiConnectptr.i); - } - } -}//Dbtc::execTCHBREP() - -/* -4.3.15 ABORT ------------ -*/ -/*****************************************************************************/ -/* A B O R T */ -/* */ -/*****************************************************************************/ -void Dbtc::warningReport(Signal* signal, int place) -{ - switch (place) { - case 0: - jam(); -#ifdef ABORT_TRACE - ndbout << "ABORTED to not active TC record" << endl; -#endif - break; - case 1: - jam(); -#ifdef ABORT_TRACE - ndbout << "ABORTED to TC record active with new transaction" << endl; -#endif - break; - case 2: - jam(); -#ifdef ABORT_TRACE - ndbout << "ABORTED to active TC record not expecting ABORTED" << endl; -#endif - break; - case 3: - jam(); -#ifdef ABORT_TRACE - ndbout << "ABORTED to TC rec active with trans but wrong node" << endl; - ndbout << "This is ok when aborting in node failure situations" << endl; -#endif - break; - case 4: - jam(); -#ifdef ABORT_TRACE - ndbout << "Received COMMITTED in wrong state in Dbtc" << endl; -#endif - break; - case 5: - jam(); -#ifdef ABORT_TRACE - ndbout << "Received COMMITTED with wrong transid in Dbtc" << endl; -#endif - break; - case 6: - jam(); -#ifdef ABORT_TRACE - ndbout << "Received COMPLETED in wrong state in Dbtc" << endl; -#endif - break; - case 7: - jam(); -#ifdef ABORT_TRACE - ndbout << "Received COMPLETED with wrong transid in Dbtc" << endl; -#endif - break; - case 8: - jam(); -#ifdef ABORT_TRACE - ndbout << "Received COMMITCONF with tc-rec in wrong state in Dbtc" << endl; -#endif - break; - case 9: - jam(); -#ifdef ABORT_TRACE - ndbout << "Received COMMITCONF with api-rec in wrong state in Dbtc" <theData[0]; - UintR Tnodeid = signal->theData[3]; - UintR TlastLqhInd = signal->theData[4]; - - if (ERROR_INSERTED(8040)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_ABORTED, signal, 2000, 5); - return; - }//if - /*------------------------------------------------------------------------ - * ONE PARTICIPANT IN THE TRANSACTION HAS REPORTED THAT IT IS ABORTED. - *------------------------------------------------------------------------*/ - if (tcConnectptr.i >= ctcConnectFilesize) { - errorReport(signal, 0); - return; - }//if - /*------------------------------------------------------------------------- - * WE HAVE TO CHECK THAT THIS IS NOT AN OLD SIGNAL BELONGING TO A - * TRANSACTION ALREADY ABORTED. THIS CAN HAPPEN WHEN TIME-OUT OCCURS - * IN TC WAITING FOR ABORTED. - *-------------------------------------------------------------------------*/ - ptrAss(tcConnectptr, tcConnectRecord); - if (tcConnectptr.p->tcConnectstate != OS_ABORT_SENT) { - warningReport(signal, 2); - return; - /*-----------------------------------------------------------------------*/ - // ABORTED reported on an operation not expecting ABORT. - /*-----------------------------------------------------------------------*/ - }//if - apiConnectptr.i = tcConnectptr.p->apiConnect; - if (apiConnectptr.i >= capiConnectFilesize) { - warningReport(signal, 0); - return; - }//if - ptrAss(apiConnectptr, apiConnectRecord); - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - warningReport(signal, 1); - return; - }//if - if (ERROR_INSERTED(8024)) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - - /** - * Release marker - */ - clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p); - - Uint32 i; - Uint32 Tfound = 0; - for (i = 0; i < tcConnectptr.p->noOfNodes; i++) { - jam(); - if (tcConnectptr.p->tcNodedata[i] == Tnodeid) { - /*--------------------------------------------------------------------- - * We have received ABORTED from one of the participants in this - * operation in this aborted transaction. - * Record all nodes that have completed abort. - * If last indicator is set it means that no more replica has - * heard of the operation and are thus also aborted. - *---------------------------------------------------------------------*/ - jam(); - Tfound = 1; - clearTcNodeData(signal, TlastLqhInd, i); - }//if - }//for - if (Tfound == 0) { - warningReport(signal, 3); - return; - } - for (i = 0; i < tcConnectptr.p->noOfNodes; i++) { - if (tcConnectptr.p->tcNodedata[i] != 0) { - /*-------------------------------------------------------------------- - * There are still outstanding ABORTED's to wait for. - *--------------------------------------------------------------------*/ - jam(); - return; - }//if - }//for - tcConnectptr.p->noOfNodes = 0; - tcConnectptr.p->tcConnectstate = OS_ABORTING; - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - apiConnectptr.p->counter--; - if (apiConnectptr.p->counter > 0) { - jam(); - /*---------------------------------------------------------------------- - * WE ARE STILL WAITING FOR MORE PARTICIPANTS TO SEND ABORTED. - *----------------------------------------------------------------------*/ - return; - }//if - /*------------------------------------------------------------------------*/ - /* */ - /* WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED */ - /* FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL */ - /* RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE */ - /*------------------------------------------------------------------------*/ - releaseAbortResources(signal); -}//Dbtc::execABORTED() - -void Dbtc::clearTcNodeData(Signal* signal, - UintR TLastLqhIndicator, - UintR Tstart) -{ - UintR Ti; - if (TLastLqhIndicator == ZTRUE) { - for (Ti = Tstart ; Ti < tcConnectptr.p->noOfNodes; Ti++) { - jam(); - tcConnectptr.p->tcNodedata[Ti] = 0; - }//for - } else { - jam(); - tcConnectptr.p->tcNodedata[Tstart] = 0; - }//for -}//clearTcNodeData() - -void Dbtc::abortErrorLab(Signal* signal) -{ - ptrGuard(apiConnectptr); - ApiConnectRecord * transP = apiConnectptr.p; - if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){ - jam(); - return; - } - transP->returnsignal = RS_TCROLLBACKREP; - if(transP->returncode == 0){ - jam(); - transP->returncode = terrorCode; - } - abort010Lab(signal); -}//Dbtc::abortErrorLab() - -void Dbtc::abort010Lab(Signal* signal) -{ - ApiConnectRecord * transP = apiConnectptr.p; - if (transP->apiConnectstate == CS_ABORTING && transP->abortState != AS_IDLE){ - jam(); - return; - } - transP->apiConnectstate = CS_ABORTING; - /*------------------------------------------------------------------------*/ - /* AN ABORT DECISION HAS BEEN TAKEN FOR SOME REASON. WE NEED TO ABORT */ - /* ALL PARTICIPANTS IN THE TRANSACTION. */ - /*------------------------------------------------------------------------*/ - transP->abortState = AS_ACTIVE; - transP->counter = 0; - - if (transP->firstTcConnect == RNIL) { - jam(); - /*--------------------------------------------------------------------*/ - /* WE HAVE NO PARTICIPANTS IN THE TRANSACTION. */ - /*--------------------------------------------------------------------*/ - releaseAbortResources(signal); - return; - }//if - tcConnectptr.i = transP->firstTcConnect; - abort015Lab(signal); -}//Dbtc::abort010Lab() - -/*--------------------------------------------------------------------------*/ -/* */ -/* WE WILL ABORT ONE NODE PER OPERATION AT A TIME. THIS IS TO KEEP */ -/* ERROR HANDLING OF THIS PROCESS FAIRLY SIMPLE AND TRACTABLE. */ -/* EVEN IF NO NODE OF THIS PARTICULAR NODE NUMBER NEEDS ABORTION WE */ -/* MUST ENSURE THAT ALL NODES ARE CHECKED. THUS A FAULTY NODE DOES */ -/* NOT MEAN THAT ALL NODES IN AN OPERATION IS ABORTED. FOR THIS REASON*/ -/* WE SET THE TCONTINUE_ABORT TO TRUE WHEN A FAULTY NODE IS DETECTED. */ -/*--------------------------------------------------------------------------*/ -void Dbtc::abort015Lab(Signal* signal) -{ - Uint32 TloopCount = 0; -ABORT020: - jam(); - TloopCount++; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - switch (tcConnectptr.p->tcConnectstate) { - case OS_WAIT_DIH: - case OS_WAIT_KEYINFO: - case OS_WAIT_ATTR: - jam(); - /*----------------------------------------------------------------------*/ - /* WE ARE STILL WAITING FOR MORE KEYINFO/ATTRINFO. WE HAVE NOT CONTACTED*/ - /* ANY LQH YET AND SO WE CAN SIMPLY SET STATE TO ABORTING. */ - /*----------------------------------------------------------------------*/ - tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal) - tcConnectptr.p->tcConnectstate = OS_ABORTING; - break; - case OS_CONNECTED: - jam(); - /*----------------------------------------------------------------------- - * WE ARE STILL IN THE INITIAL PHASE OF THIS OPERATION. - * NEED NOT BOTHER ABOUT ANY LQH ABORTS. - *-----------------------------------------------------------------------*/ - tcConnectptr.p->noOfNodes = 0; // == releaseAbort(signal) - tcConnectptr.p->tcConnectstate = OS_ABORTING; - break; - case OS_PREPARED: - jam(); - case OS_OPERATING: - jam(); - /*---------------------------------------------------------------------- - * WE HAVE SENT LQHKEYREQ AND ARE IN SOME STATE OF EITHER STILL - * SENDING THE OPERATION, WAITING FOR REPLIES, WAITING FOR MORE - * ATTRINFO OR OPERATION IS PREPARED. WE NEED TO ABORT ALL LQH'S. - *----------------------------------------------------------------------*/ - releaseAndAbort(signal); - tcConnectptr.p->tcConnectstate = OS_ABORT_SENT; - TloopCount += 127; - break; - case OS_ABORTING: - jam(); - break; - case OS_ABORT_SENT: - jam(); - DEBUG("ABORT_SENT state in abort015Lab(), not expected"); - systemErrorLab(signal, __LINE__); - return; - default: - jam(); - DEBUG("tcConnectstate = " << tcConnectptr.p->tcConnectstate); - systemErrorLab(signal, __LINE__); - return; - }//switch - - if (tcConnectptr.p->nextTcConnect != RNIL) { - jam(); - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - if (TloopCount < 1024) { - goto ABORT020; - } else { - jam(); - /*--------------------------------------------------------------------- - * Reset timer to avoid time-out in real-time break. - * Increase counter to ensure that we don't think that all ABORTED have - * been received before all have been sent. - *---------------------------------------------------------------------*/ - apiConnectptr.p->counter++; - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - signal->theData[0] = TcContinueB::ZABORT_BREAK; - signal->theData[1] = tcConnectptr.i; - signal->theData[2] = apiConnectptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; - }//if - }//if - if (apiConnectptr.p->counter > 0) { - jam(); - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - return; - }//if - /*----------------------------------------------------------------------- - * WE HAVE NOW COMPLETED THE ABORT PROCESS. WE HAVE RECEIVED ABORTED - * FROM ALL PARTICIPANTS IN THE TRANSACTION. WE CAN NOW RELEASE ALL - * RESOURCES CONNECTED TO THE TRANSACTION AND SEND THE ABORT RESPONSE - *------------------------------------------------------------------------*/ - releaseAbortResources(signal); -}//Dbtc::abort015Lab() - -/*--------------------------------------------------------------------------*/ -/* RELEASE KEY AND ATTRINFO OBJECTS AND SEND ABORT TO THE LQH BLOCK. */ -/*--------------------------------------------------------------------------*/ -int Dbtc::releaseAndAbort(Signal* signal) -{ - HostRecordPtr localHostptr; - UintR TnoLoops = tcConnectptr.p->noOfNodes; - - apiConnectptr.p->counter++; - bool prevAlive = false; - for (Uint32 Ti = 0; Ti < TnoLoops ; Ti++) { - localHostptr.i = tcConnectptr.p->tcNodedata[Ti]; - ptrCheckGuard(localHostptr, chostFilesize, hostRecord); - if (localHostptr.p->hostStatus == HS_ALIVE) { - jam(); - if (prevAlive) { - // if previous is alive, its LQH forwards abort to this node - jam(); - continue; - } - /* ************< */ - /* ABORT < */ - /* ************< */ - tblockref = calcLqhBlockRef(localHostptr.i); - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - signal->theData[2] = apiConnectptr.p->transid[0]; - signal->theData[3] = apiConnectptr.p->transid[1]; - sendSignal(tblockref, GSN_ABORT, signal, 4, JBB); - prevAlive = true; - } else { - jam(); - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - signal->theData[3] = localHostptr.i; - signal->theData[4] = ZFALSE; - sendSignal(cownref, GSN_ABORTED, signal, 5, JBB); - prevAlive = false; - }//if - }//for - return 1; -}//Dbtc::releaseAndAbort() - -/* ------------------------------------------------------------------------- */ -/* ------- ENTER TIME_SIGNAL ------- */ -/* */ -/* ------------------------------------------------------------------------- */ -void Dbtc::execTIME_SIGNAL(Signal* signal) -{ - - jamEntry(); - ctcTimer++; - if (csystemStart != SSS_TRUE) { - jam(); - return; - }//if - checkStartTimeout(signal); - checkStartFragTimeout(signal); -}//Dbtc::execTIME_SIGNAL() - -/*------------------------------------------------*/ -/* Start timeout handling if not already going on */ -/*------------------------------------------------*/ -void Dbtc::checkStartTimeout(Signal* signal) -{ - ctimeOutCheckCounter++; - if (ctimeOutCheckActive == TOCS_TRUE) { - jam(); - // Check heartbeat of timeout loop - if(ctimeOutCheckHeartbeat > ctimeOutCheckLastHeartbeat){ - jam(); - ctimeOutMissedHeartbeats = 0; - }else{ - jam(); - ctimeOutMissedHeartbeats++; - if (ctimeOutMissedHeartbeats > 100){ - jam(); - systemErrorLab(signal, __LINE__); - } - } - ctimeOutCheckLastHeartbeat = ctimeOutCheckHeartbeat; - return; - }//if - if (ctimeOutCheckCounter < ctimeOutCheckDelay) { - jam(); - /*------------------------------------------------------------------*/ - /* */ - /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */ - /*------------------------------------------------------------------*/ - return; - }//if - ctimeOutCheckActive = TOCS_TRUE; - ctimeOutCheckCounter = 0; - timeOutLoopStartLab(signal, 0); // 0 is first api connect record - return; -}//Dbtc::execTIME_SIGNAL() - -/*----------------------------------------------------------------*/ -/* Start fragment (scan) timeout handling if not already going on */ -/*----------------------------------------------------------------*/ -void Dbtc::checkStartFragTimeout(Signal* signal) -{ - ctimeOutCheckFragCounter++; - if (ctimeOutCheckFragActive == TOCS_TRUE) { - jam(); - return; - }//if - if (ctimeOutCheckFragCounter < ctimeOutCheckDelay) { - jam(); - /*------------------------------------------------------------------*/ - /* NO TIME-OUT CHECKED THIS TIME. WAIT MORE. */ - /*------------------------------------------------------------------*/ - return; - }//if - - // Go through the fragment records and look for timeout in a scan. - ctimeOutCheckFragActive = TOCS_TRUE; - ctimeOutCheckFragCounter = 0; - timeOutLoopStartFragLab(signal, 0); // 0 means first scan record -}//checkStartFragTimeout() - -/*------------------------------------------------------------------*/ -/* IT IS NOW TIME TO CHECK WHETHER ANY TRANSACTIONS HAVE */ -/* BEEN DELAYED FOR SO LONG THAT WE ARE FORCED TO PERFORM */ -/* SOME ACTION, EITHER ABORT OR RESEND OR REMOVE A NODE FROM */ -/* THE WAITING PART OF A PROTOCOL. */ -/* -The algorithm used here is to check 1024 transactions at a time before -doing a real-time break. -To avoid aborting both transactions in a deadlock detected by time-out -we insert a random extra time-out of upto 630 ms by using the lowest -six bits of the api connect reference. -We spread it out from 0 to 630 ms if base time-out is larger than 3 sec, -we spread it out from 0 to 70 ms if base time-out is smaller than 300 msec, -and otherwise we spread it out 310 ms. -*/ -/*------------------------------------------------------------------*/ -void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr) -{ - Uint32 end_ptr, time_passed, time_out_value, mask_value; - Uint32 old_mask_value= 0; - const Uint32 api_con_sz= capiConnectFilesize; - const Uint32 tc_timer= ctcTimer; - const Uint32 time_out_param= ctimeOutValue; - const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue; - - ctimeOutCheckHeartbeat = tc_timer; - - if (api_con_ptr + 1024 < api_con_sz) { - jam(); - end_ptr= api_con_ptr + 1024; - } else { - jam(); - end_ptr= api_con_sz; - } - if (time_out_param > 300) { - jam(); - mask_value= 63; - } else if (time_out_param < 30) { - jam(); - mask_value= 7; - } else { - jam(); - mask_value= 31; - } - if (time_out_param != old_time_out_param && - getNodeState().getSingleUserMode()) - { - // abort during single user mode, use old_mask_value as flag - // and calculate value to be used for connections with allowed api - if (old_time_out_param > 300) { - jam(); - old_mask_value= 63; - } else if (old_time_out_param < 30) { - jam(); - old_mask_value= 7; - } else { - jam(); - old_mask_value= 31; - } - } - for ( ; api_con_ptr < end_ptr; api_con_ptr++) { - Uint32 api_timer= getApiConTimer(api_con_ptr); - jam(); - if (api_timer != 0) { - Uint32 error= ZTIME_OUT_ERROR; - time_out_value= time_out_param + (ndb_rand() & mask_value); - if (unlikely(old_mask_value)) // abort during single user mode - { - apiConnectptr.i = api_con_ptr; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - if ((getNodeState().getSingleUserApi() == - refToNode(apiConnectptr.p->ndbapiBlockref)) || - !(apiConnectptr.p->singleUserMode & (1 << NDB_SUM_LOCKED))) - { - // api allowed during single user, use original timeout - time_out_value= - old_time_out_param + (api_con_ptr & old_mask_value); - } - else - { - error= ZCLUSTER_IN_SINGLEUSER_MODE; - } - } - time_passed= tc_timer - api_timer; - if (time_passed > time_out_value) - { - jam(); - timeOutFoundLab(signal, api_con_ptr, error); - api_con_ptr++; - break; - } - } - } - if (api_con_ptr == api_con_sz) { - jam(); - /*------------------------------------------------------------------*/ - /* */ - /* WE HAVE NOW CHECKED ALL TRANSACTIONS FOR TIME-OUT AND ALSO */ - /* STARTED TIME-OUT HANDLING OF THOSE WE FOUND. WE ARE NOW */ - /* READY AND CAN WAIT FOR THE NEXT TIME-OUT CHECK. */ - /*------------------------------------------------------------------*/ - ctimeOutCheckActive = TOCS_FALSE; - } else { - jam(); - sendContinueTimeOutControl(signal, api_con_ptr); - } - return; -}//Dbtc::timeOutLoopStartLab() - -void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode) -{ - apiConnectptr.i = TapiConPtr; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - /*------------------------------------------------------------------*/ - /* */ - /* THIS TRANSACTION HAVE EXPERIENCED A TIME-OUT AND WE NEED TO*/ - /* FIND OUT WHAT WE NEED TO DO BASED ON THE STATE INFORMATION.*/ - /*------------------------------------------------------------------*/ - DEBUG("[ H'" << hex << apiConnectptr.p->transid[0] - << " H'" << apiConnectptr.p->transid[1] << "] " << dec - << "Time-out in state = " << apiConnectptr.p->apiConnectstate - << " apiConnectptr.i = " << apiConnectptr.i - << " - exec: " << apiConnectptr.p->m_exec_flag - << " - place: " << c_apiConTimer_line[apiConnectptr.i] - << " code: " << errCode); - switch (apiConnectptr.p->apiConnectstate) { - case CS_STARTED: - if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec && - errCode != ZCLUSTER_IN_SINGLEUSER_MODE){ - jam(); - /* - We are waiting for application to continue the transaction. In this - particular state we will use the application timeout parameter rather - than the shorter Deadlock detection timeout. - */ - if (c_appl_timeout_value == 0 || - (ctcTimer - getApiConTimer(apiConnectptr.i)) <= c_appl_timeout_value) { - jam(); - return; - }//if - } - apiConnectptr.p->returnsignal = RS_TCROLLBACKREP; - apiConnectptr.p->returncode = errCode; - abort010Lab(signal); - return; - case CS_RECEIVING: - case CS_REC_COMMITTING: - case CS_START_COMMITTING: - jam(); - /*------------------------------------------------------------------*/ - /* WE ARE STILL IN THE PREPARE PHASE AND THE TRANSACTION HAS */ - /* NOT YET REACHED ITS COMMIT POINT. THUS IT IS NOW OK TO */ - /* START ABORTING THE TRANSACTION. ALSO START CHECKING THE */ - /* REMAINING TRANSACTIONS. */ - /*------------------------------------------------------------------*/ - terrorCode = errCode; - abortErrorLab(signal); - return; - case CS_COMMITTING: - jam(); - /*------------------------------------------------------------------*/ - // We are simply waiting for a signal in the job buffer. Only extreme - // conditions should get us here. We ignore it. - /*------------------------------------------------------------------*/ - case CS_COMPLETING: - jam(); - /*------------------------------------------------------------------*/ - // We are simply waiting for a signal in the job buffer. Only extreme - // conditions should get us here. We ignore it. - /*------------------------------------------------------------------*/ - case CS_PREPARE_TO_COMMIT: - { - jam(); - /*------------------------------------------------------------------*/ - /* WE ARE WAITING FOR DIH TO COMMIT THE TRANSACTION. WE SIMPLY*/ - /* KEEP WAITING SINCE THERE IS NO BETTER IDEA ON WHAT TO DO. */ - /* IF IT IS BLOCKED THEN NO TRANSACTION WILL PASS THIS GATE. */ - // To ensure against strange bugs we crash the system if we have passed - // time-out period by a factor of 10 and it is also at least 5 seconds. - /*------------------------------------------------------------------*/ - Uint32 time_passed = ctcTimer - getApiConTimer(apiConnectptr.i); - if (time_passed > 500 && - time_passed > (5 * cDbHbInterval) && - time_passed > (10 * ctimeOutValue)) - { - jam(); - systemErrorLab(signal, __LINE__); - }//if - break; - } - case CS_COMMIT_SENT: - jam(); - /*------------------------------------------------------------------*/ - /* WE HAVE SENT COMMIT TO A NUMBER OF NODES. WE ARE CURRENTLY */ - /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */ - /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMMIT SIGNAL */ - /* TO THOSE NODES THAT HAVE MISSED THE COMMIT SIGNAL DUE TO */ - /* A NODE FAILURE. */ - /*------------------------------------------------------------------*/ - tabortInd = ZCOMMIT_SETUP; - setupFailData(signal); - toCommitHandlingLab(signal); - return; - case CS_COMPLETE_SENT: - jam(); - /*--------------------------------------------------------------------*/ - /* WE HAVE SENT COMPLETE TO A NUMBER OF NODES. WE ARE CURRENTLY */ - /* WAITING FOR THEIR REPLY. WITH NODE RECOVERY SUPPORTED WE */ - /* WILL CHECK FOR CRASHED NODES AND RESEND THE COMPLETE SIGNAL */ - /* TO THOSE NODES THAT HAVE MISSED THE COMPLETE SIGNAL DUE TO */ - /* A NODE FAILURE. */ - /*--------------------------------------------------------------------*/ - tabortInd = ZCOMMIT_SETUP; - setupFailData(signal); - toCompleteHandlingLab(signal); - return; - case CS_ABORTING: - jam(); - /*------------------------------------------------------------------*/ - /* TIME-OUT DURING ABORT. WE NEED TO SEND ABORTED FOR ALL */ - /* NODES THAT HAVE FAILED BEFORE SENDING ABORTED. */ - /*------------------------------------------------------------------*/ - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - sendAbortedAfterTimeout(signal, 0); - break; - case CS_START_SCAN:{ - jam(); - - /* - We are waiting for application to continue the transaction. In this - particular state we will use the application timeout parameter rather - than the shorter Deadlock detection timeout. - */ - if (c_appl_timeout_value == 0 || - (ctcTimer - getApiConTimer(apiConnectptr.i)) <= c_appl_timeout_value) { - jam(); - return; - }//if - - ScanRecordPtr scanPtr; - scanPtr.i = apiConnectptr.p->apiScanRec; - ptrCheckGuard(scanPtr, cscanrecFileSize, scanRecord); - scanError(signal, scanPtr, ZSCANTIME_OUT_ERROR); - break; - } - case CS_WAIT_ABORT_CONF: - jam(); - tcConnectptr.i = apiConnectptr.p->currentTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS); - hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - /*------------------------------------------------------------------*/ - // Time-out waiting for ABORTCONF. We will resend the ABORTREQ just in - // case. - /*------------------------------------------------------------------*/ - warningReport(signal, 20); - apiConnectptr.p->timeOutCounter++; - if (apiConnectptr.p->timeOutCounter > 3) { - /*------------------------------------------------------------------*/ - // 100 time-outs are not acceptable. We will shoot down the node - // not responding. - /*------------------------------------------------------------------*/ - reportNodeFailed(signal, hostptr.i); - }//if - apiConnectptr.p->currentReplicaNo++; - }//if - tcurrentReplicaNo = (Uint8)Z8NIL; - toAbortHandlingLab(signal); - return; - case CS_WAIT_COMMIT_CONF: - jam(); - CRASH_INSERTION(8053); - tcConnectptr.i = apiConnectptr.p->currentTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS); - hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - /*------------------------------------------------------------------*/ - // Time-out waiting for COMMITCONF. We will resend the COMMITREQ just in - // case. - /*------------------------------------------------------------------*/ - warningReport(signal, 21); - apiConnectptr.p->timeOutCounter++; - if (apiConnectptr.p->timeOutCounter > 3) { - /*------------------------------------------------------------------*/ - // 100 time-outs are not acceptable. We will shoot down the node - // not responding. - /*------------------------------------------------------------------*/ - reportNodeFailed(signal, hostptr.i); - }//if - apiConnectptr.p->currentReplicaNo++; - }//if - tcurrentReplicaNo = (Uint8)Z8NIL; - toCommitHandlingLab(signal); - return; - case CS_WAIT_COMPLETE_CONF: - jam(); - tcConnectptr.i = apiConnectptr.p->currentTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS); - hostptr.i = tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - /*------------------------------------------------------------------*/ - // Time-out waiting for COMPLETECONF. We will resend the COMPLETEREQ - // just in case. - /*------------------------------------------------------------------*/ - warningReport(signal, 22); - apiConnectptr.p->timeOutCounter++; - if (apiConnectptr.p->timeOutCounter > 100) { - /*------------------------------------------------------------------*/ - // 100 time-outs are not acceptable. We will shoot down the node - // not responding. - /*------------------------------------------------------------------*/ - reportNodeFailed(signal, hostptr.i); - }//if - apiConnectptr.p->currentReplicaNo++; - }//if - tcurrentReplicaNo = (Uint8)Z8NIL; - toCompleteHandlingLab(signal); - return; - case CS_FAIL_PREPARED: - jam(); - case CS_FAIL_COMMITTING: - jam(); - case CS_FAIL_COMMITTED: - jam(); - case CS_REC_PREPARING: - jam(); - case CS_START_PREPARING: - jam(); - case CS_PREPARED: - jam(); - case CS_RESTART: - jam(); - case CS_FAIL_ABORTED: - jam(); - case CS_DISCONNECTED: - jam(); - default: - jam(); - /*------------------------------------------------------------------*/ - /* AN IMPOSSIBLE STATE IS SET. CRASH THE SYSTEM. */ - /*------------------------------------------------------------------*/ - DEBUG("State = " << apiConnectptr.p->apiConnectstate); - systemErrorLab(signal, __LINE__); - return; - }//switch - return; -}//Dbtc::timeOutFoundLab() - -void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck) -{ - ApiConnectRecord * transP = apiConnectptr.p; - if(transP->abortState == AS_IDLE){ - jam(); - warningEvent("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d", - __LINE__, - apiConnectptr.i, - transP->apiConnectstate, - c_apiConTimer_line[apiConnectptr.i], - transP->firstTcConnect, - c_apiConTimer[apiConnectptr.i] - ); - ndbout_c("TC: %d: %d state=%d abort==IDLE place: %d fop=%d t: %d", - __LINE__, - apiConnectptr.i, - transP->apiConnectstate, - c_apiConTimer_line[apiConnectptr.i], - transP->firstTcConnect, - c_apiConTimer[apiConnectptr.i] - ); - ndbrequire(false); - setApiConTimer(apiConnectptr.i, 0, __LINE__); - return; - } - - bool found = false; - OperationState tmp[16]; - - Uint32 TloopCount = 0; - do { - jam(); - if (tcConnectptr.i == RNIL) { - jam(); - -#ifdef VM_TRACE - ndbout_c("found: %d Tcheck: %d apiConnectptr.p->counter: %d", - found, Tcheck, apiConnectptr.p->counter); -#endif - if (found || apiConnectptr.p->counter) - { - jam(); - /** - * We sent atleast one ABORT/ABORTED - * or ZABORT_TIMEOUT_BREAK is in job buffer - * wait for reception... - */ - return; - } - - if (Tcheck == 1) - { - jam(); - releaseAbortResources(signal); - return; - } - - if (Tcheck == 0) - { - jam(); - /*------------------------------------------------------------------ - * All nodes had already reported ABORTED for all tcConnect records. - * Crash since it is an error situation that we then received a - * time-out. - *------------------------------------------------------------------*/ - char buf[96]; buf[0] = 0; - char buf2[96]; - BaseString::snprintf(buf, sizeof(buf), "TC %d: %d counter: %d ops:", - __LINE__, apiConnectptr.i, - apiConnectptr.p->counter); - for(Uint32 i = 0; i= 1024) { - jam(); - /*------------------------------------------------------------------*/ - // Insert a real-time break for large transactions to avoid blowing - // away the job buffer. - /*------------------------------------------------------------------*/ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - apiConnectptr.p->counter++; - signal->theData[0] = TcContinueB::ZABORT_TIMEOUT_BREAK; - signal->theData[1] = tcConnectptr.i; - signal->theData[2] = apiConnectptr.i; - if (ERROR_INSERTED(8050)) - { - ndbout_c("sending ZABORT_TIMEOUT_BREAK delayed (%d %d)", - Tcheck, apiConnectptr.p->counter); - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 2000, 3); - } - else - { - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - } - return; - }//if - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - if(TloopCount < 16){ - jam(); - tmp[TloopCount-1] = tcConnectptr.p->tcConnectstate; - } - - if (tcConnectptr.p->tcConnectstate == OS_ABORT_SENT) { - jam(); - /*------------------------------------------------------------------*/ - // We have sent an ABORT signal to this node but not yet received any - // reply. We have to send an ABORTED signal on our own in some cases. - // If the node is declared as up and running and still do not respond - // in time to the ABORT signal we will declare it as dead. - /*------------------------------------------------------------------*/ - UintR Ti = 0; - arrGuard(tcConnectptr.p->noOfNodes, MAX_REPLICAS+1); - for (Ti = 0; Ti < tcConnectptr.p->noOfNodes; Ti++) { - jam(); - if (tcConnectptr.p->tcNodedata[Ti] != 0) { - TloopCount += 31; - found = true; - hostptr.i = tcConnectptr.p->tcNodedata[Ti]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - jam(); - /*--------------------------------------------------------------- - * A backup replica has not sent ABORTED. - * Could be that a node before him has crashed. - * Send an ABORT signal specifically to this node. - * We will not send to any more nodes after this - * to avoid race problems. - * To also ensure that we use this message also as a heartbeat - * we will move this node to the primary replica seat. - * The primary replica and any failed node after it will - * be removed from the node list. Update also number of nodes. - * Finally break the loop to ensure we don't mess - * things up by executing another loop. - * We also update the timer to ensure we don't get time-out - * too early. - *--------------------------------------------------------------*/ - BlockReference TBRef = calcLqhBlockRef(hostptr.i); - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - signal->theData[2] = apiConnectptr.p->transid[0]; - signal->theData[3] = apiConnectptr.p->transid[1]; - sendSignal(TBRef, GSN_ABORT, signal, 4, JBB); - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - break; - } else { - jam(); - /*-------------------------------------------------------------- - * The node we are waiting for is dead. We will send ABORTED to - * ourselves vicarious for the failed node. - *--------------------------------------------------------------*/ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - signal->theData[3] = hostptr.i; - signal->theData[4] = ZFALSE; - sendSignal(cownref, GSN_ABORTED, signal, 5, JBB); - }//if - }//if - }//for - }//if - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - } while (1); -}//Dbtc::sendAbortedAfterTimeout() - -void Dbtc::reportNodeFailed(Signal* signal, Uint32 nodeId) -{ - DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0]; - rep->nodeId = nodeId; - rep->err = DisconnectRep::TcReportNodeFailed; - sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal, - DisconnectRep::SignalLength, JBB); -}//Dbtc::reportNodeFailed() - -/*-------------------------------------------------*/ -/* Timeout-loop for scanned fragments. */ -/*-------------------------------------------------*/ -void Dbtc::timeOutLoopStartFragLab(Signal* signal, Uint32 TscanConPtr) -{ - ScanFragRecPtr timeOutPtr[8]; - UintR tfragTimer[8]; - UintR texpiredTime[8]; - UintR TloopCount = 0; - Uint32 TtcTimer = ctcTimer; - - while ((TscanConPtr + 8) < cscanFragrecFileSize) { - jam(); - timeOutPtr[0].i = TscanConPtr + 0; - timeOutPtr[1].i = TscanConPtr + 1; - timeOutPtr[2].i = TscanConPtr + 2; - timeOutPtr[3].i = TscanConPtr + 3; - timeOutPtr[4].i = TscanConPtr + 4; - timeOutPtr[5].i = TscanConPtr + 5; - timeOutPtr[6].i = TscanConPtr + 6; - timeOutPtr[7].i = TscanConPtr + 7; - - c_scan_frag_pool.getPtrForce(timeOutPtr[0]); - c_scan_frag_pool.getPtrForce(timeOutPtr[1]); - c_scan_frag_pool.getPtrForce(timeOutPtr[2]); - c_scan_frag_pool.getPtrForce(timeOutPtr[3]); - c_scan_frag_pool.getPtrForce(timeOutPtr[4]); - c_scan_frag_pool.getPtrForce(timeOutPtr[5]); - c_scan_frag_pool.getPtrForce(timeOutPtr[6]); - c_scan_frag_pool.getPtrForce(timeOutPtr[7]); - - tfragTimer[0] = timeOutPtr[0].p->scanFragTimer; - tfragTimer[1] = timeOutPtr[1].p->scanFragTimer; - tfragTimer[2] = timeOutPtr[2].p->scanFragTimer; - tfragTimer[3] = timeOutPtr[3].p->scanFragTimer; - tfragTimer[4] = timeOutPtr[4].p->scanFragTimer; - tfragTimer[5] = timeOutPtr[5].p->scanFragTimer; - tfragTimer[6] = timeOutPtr[6].p->scanFragTimer; - tfragTimer[7] = timeOutPtr[7].p->scanFragTimer; - - texpiredTime[0] = TtcTimer - tfragTimer[0]; - texpiredTime[1] = TtcTimer - tfragTimer[1]; - texpiredTime[2] = TtcTimer - tfragTimer[2]; - texpiredTime[3] = TtcTimer - tfragTimer[3]; - texpiredTime[4] = TtcTimer - tfragTimer[4]; - texpiredTime[5] = TtcTimer - tfragTimer[5]; - texpiredTime[6] = TtcTimer - tfragTimer[6]; - texpiredTime[7] = TtcTimer - tfragTimer[7]; - - for (Uint32 Ti = 0; Ti < 8; Ti++) { - jam(); - if (tfragTimer[Ti] != 0) { - - if (texpiredTime[Ti] > ctimeOutValue) { - jam(); - DEBUG("Fragment timeout found:"<< - " ctimeOutValue=" <theData[3]<<", "<theData[4]<<")"); - return; - }//if - - // Update timer on ScanFragRec - if (scanFragptr.p->scanFragTimer != 0){ - updateBuddyTimer(apiConnectptr); - scanFragptr.p->startFragTimer(ctcTimer); - } else { - ndbassert(false); - DEBUG("SCAN_HBREP when scanFragTimer was turned off"); - } -}//execSCAN_HBREP() - -/*--------------------------------------------------------------------------*/ -/* Timeout has occured on a fragment which means a scan has timed out. */ -/* If this is true we have an error in LQH/ACC. */ -/*--------------------------------------------------------------------------*/ -void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr) -{ - ScanFragRecPtr ptr; - c_scan_frag_pool.getPtr(ptr, TscanConPtr); - DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState); - - const Uint32 time_out_param= ctimeOutValue; - const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue; - - if (unlikely(time_out_param != old_time_out_param && - getNodeState().getSingleUserMode())) - { - jam(); - ScanRecordPtr scanptr; - scanptr.i = ptr.p->scanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - ApiConnectRecordPtr TlocalApiConnectptr; - TlocalApiConnectptr.i = scanptr.p->scanApiRec; - ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord); - - if (refToNode(TlocalApiConnectptr.p->ndbapiBlockref) == - getNodeState().getSingleUserApi()) - { - jam(); - Uint32 val = ctcTimer - ptr.p->scanFragTimer; - if (val <= old_time_out_param) - { - jam(); - goto next; - } - } - } - - /*-------------------------------------------------------------------------*/ - // The scan fragment has expired its timeout. Check its state to decide - // what to do. - /*-------------------------------------------------------------------------*/ - switch (ptr.p->scanFragState) { - case ScanFragRec::WAIT_GET_PRIMCONF: - jam(); - ndbrequire(false); - break; - case ScanFragRec::LQH_ACTIVE:{ - jam(); - - /** - * The LQH expired it's timeout, try to close it - */ - Uint32 nodeId = refToNode(ptr.p->lqhBlockref); - Uint32 connectCount = getNodeInfo(nodeId).m_connectCount; - ScanRecordPtr scanptr; - scanptr.i = ptr.p->scanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - - if(connectCount != ptr.p->m_connectCount){ - jam(); - /** - * The node has died - */ - ptr.p->scanFragState = ScanFragRec::COMPLETED; - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - run.release(ptr); - ptr.p->stopFragTimer(); - } - - scanError(signal, scanptr, ZSCAN_FRAG_LQH_ERROR); - break; - } - case ScanFragRec::DELIVERED: - jam(); - case ScanFragRec::IDLE: - jam(); - case ScanFragRec::QUEUED_FOR_DELIVERY: - jam(); - /*----------------------------------------------------------------------- - * Should never occur. We will simply report set the timer to zero and - * continue. In a debug version we should crash here but not in a release - * version. In a release version we will simply set the time-out to zero. - *-----------------------------------------------------------------------*/ -#ifdef VM_TRACE - systemErrorLab(signal, __LINE__); -#endif - scanFragptr.p->stopFragTimer(); - break; - default: - jam(); - /*----------------------------------------------------------------------- - * Non-existent state. Crash. - *-----------------------------------------------------------------------*/ - systemErrorLab(signal, __LINE__); - break; - }//switch - -next: - signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL; - signal->theData[1] = TscanConPtr + 1; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - return; -}//timeOutFoundFragLab() - - -/* - 4.3.16 GCP_NOMORETRANS - ---------------------- -*/ -/***************************************************************************** - * G C P _ N O M O R E T R A N S - * - * WHEN DBTC RECEIVES SIGNAL GCP_NOMORETRANS A CHECK IS DONE TO FIND OUT IF - * THERE ARE ANY GLOBAL CHECKPOINTS GOING ON - CFIRSTGCP /= RNIL. DBTC THEN - * SEARCHES THE GCP_RECORD FILE TO FIND OUT IF THERE ARE ANY TRANSACTIONS NOT - * CONCLUDED WITH THIS SPECIFIC CHECKPOINT - GCP_PTR:GCP_ID = TCHECK_GCP_ID. - * FOR EACH TRANSACTION WHERE API_CONNECTSTATE EQUALS PREPARED, COMMITTING, - * COMMITTED OR COMPLETING SIGNAL CONTINUEB IS SENT WITH A DELAY OF 100 MS, - * THE COUNTER GCP_PTR:OUTSTANDINGAPI IS INCREASED. WHEN CONTINUEB IS RECEIVED - * THE COUNTER IS DECREASED AND A CHECK IS DONE TO FIND OUT IF ALL - * TRANSACTIONS ARE CONCLUDED. IF SO, SIGNAL GCP_TCFINISHED IS SENT. - *****************************************************************************/ -void Dbtc::execGCP_NOMORETRANS(Signal* signal) -{ - jamEntry(); - c_gcp_ref = signal->theData[0]; - tcheckGcpId = signal->theData[1]; - if (cfirstgcp != RNIL) { - jam(); - /* A GLOBAL CHECKPOINT IS GOING ON */ - gcpPtr.i = cfirstgcp; /* SET POINTER TO FIRST GCP IN QUEUE*/ - ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord); - if (gcpPtr.p->gcpId == tcheckGcpId) { - jam(); - if (gcpPtr.p->firstApiConnect != RNIL) { - jam(); - gcpPtr.p->gcpNomoretransRec = ZTRUE; - } else { - jam(); - gcpTcfinished(signal); - unlinkGcp(signal); - }//if - } else { - jam(); - /*------------------------------------------------------------*/ - /* IF IT IS NOT THE FIRST THEN THERE SHOULD BE NO */ - /* RECORD FOR THIS GLOBAL CHECKPOINT. WE ALWAYS REMOVE */ - /* THE GLOBAL CHECKPOINTS IN ORDER. */ - /*------------------------------------------------------------*/ - gcpTcfinished(signal); - }//if - } else { - jam(); - gcpTcfinished(signal); - }//if - return; -}//Dbtc::execGCP_NOMORETRANS() - -/*****************************************************************************/ -/* */ -/* TAKE OVER MODULE */ -/* */ -/*****************************************************************************/ -/* */ -/* THIS PART OF TC TAKES OVER THE COMMIT/ABORT OF TRANSACTIONS WHERE THE */ -/* NODE ACTING AS TC HAVE FAILED. IT STARTS BY QUERYING ALL NODES ABOUT */ -/* ANY OPERATIONS PARTICIPATING IN A TRANSACTION WHERE THE TC NODE HAVE */ -/* FAILED. */ -/* */ -/* AFTER RECEIVING INFORMATION FROM ALL NODES ABOUT OPERATION STATUS THIS */ -/* CODE WILL ENSURE THAT ALL AFFECTED TRANSACTIONS ARE PROPERLY ABORTED OR*/ -/* COMMITTED. THE ORIGINATING APPLICATION NODE WILL ALSO BE CONTACTED. */ -/* IF THE ORIGINATING APPLICATION ALSO FAILED THEN THERE IS CURRENTLY NO */ -/* WAY TO FIND OUT WHETHER A TRANSACTION WAS PERFORMED OR NOT. */ -/*****************************************************************************/ -void Dbtc::execNODE_FAILREP(Signal* signal) -{ - jamEntry(); - - NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0]; - - cfailure_nr = nodeFail->failNo; - const Uint32 tnoOfNodes = nodeFail->noOfNodes; - const Uint32 tnewMasterId = nodeFail->masterNodeId; - - arrGuard(tnoOfNodes, MAX_NDB_NODES); - Uint32 i; - int index = 0; - for (i = 1; i< MAX_NDB_NODES; i++) - { - if(NodeBitmask::get(nodeFail->theNodes, i)) - { - cdata[index] = i; - index++; - }//if - }//for - - cmasterNodeId = tnewMasterId; - - tcNodeFailptr.i = 0; - ptrAss(tcNodeFailptr, tcFailRecord); - for (i = 0; i < tnoOfNodes; i++) - { - jam(); - hostptr.i = cdata[i]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - - /*------------------------------------------------------------*/ - /* SET STATUS OF THE FAILED NODE TO DEAD SINCE IT HAS */ - /* FAILED. */ - /*------------------------------------------------------------*/ - hostptr.p->hostStatus = HS_DEAD; - hostptr.p->m_nf_bits = HostRecord::NF_NODE_FAIL_BITS; - c_alive_nodes.clear(hostptr.i); - - if (tcNodeFailptr.p->failStatus == FS_LISTENING) - { - jam(); - /*------------------------------------------------------------*/ - /* THE CURRENT TAKE OVER CAN BE AFFECTED BY THIS NODE */ - /* FAILURE. */ - /*------------------------------------------------------------*/ - if (hostptr.p->lqhTransStatus == LTS_ACTIVE) - { - jam(); - /*------------------------------------------------------------*/ - /* WE WERE WAITING FOR THE FAILED NODE IN THE TAKE OVER */ - /* PROTOCOL FOR TC. */ - /*------------------------------------------------------------*/ - signal->theData[0] = TcContinueB::ZNODE_TAKE_OVER_COMPLETED; - signal->theData[1] = hostptr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); - }//if - }//if - - if (getOwnNodeId() != tnewMasterId) - { - jam(); - /** - * Only master does takeover currently - */ - hostptr.p->m_nf_bits &= ~HostRecord::NF_TAKEOVER; - } - else - { - jam(); - signal->theData[0] = hostptr.i; - sendSignal(cownref, GSN_TAKE_OVERTCREQ, signal, 1, JBB); - } - - checkScanActiveInFailedLqh(signal, 0, hostptr.i); - checkWaitDropTabFailedLqh(signal, hostptr.i, 0); // nodeid, tableid - nodeFailCheckTransactions(signal, 0, hostptr.i); - } -}//Dbtc::execNODE_FAILREP() - -void -Dbtc::checkNodeFailComplete(Signal* signal, - Uint32 failedNodeId, - Uint32 bit) -{ - hostptr.i = failedNodeId; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - hostptr.p->m_nf_bits &= ~bit; - if (hostptr.p->m_nf_bits == 0) - { - NFCompleteRep * const nfRep = (NFCompleteRep *)&signal->theData[0]; - nfRep->blockNo = DBTC; - nfRep->nodeId = cownNodeid; - nfRep->failedNodeId = hostptr.i; - sendSignal(cdihblockref, GSN_NF_COMPLETEREP, signal, - NFCompleteRep::SignalLength, JBB); - } -} - -void Dbtc::checkScanActiveInFailedLqh(Signal* signal, - Uint32 scanPtrI, - Uint32 failedNodeId){ - - ScanRecordPtr scanptr; - for (scanptr.i = scanPtrI; scanptr.i < cscanrecFileSize; scanptr.i++) { - jam(); - ptrAss(scanptr, scanRecord); - bool found = false; - if (scanptr.p->scanState != ScanRecord::IDLE){ - jam(); - ScanFragRecPtr ptr; - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - for(run.first(ptr); !ptr.isNull(); ){ - jam(); - ScanFragRecPtr curr = ptr; - run.next(ptr); - if (curr.p->scanFragState == ScanFragRec::LQH_ACTIVE && - refToNode(curr.p->lqhBlockref) == failedNodeId){ - jam(); - - run.release(curr); - curr.p->scanFragState = ScanFragRec::COMPLETED; - curr.p->stopFragTimer(); - found = true; - } - } - - ScanFragList deliv(c_scan_frag_pool, scanptr.p->m_delivered_scan_frags); - for(deliv.first(ptr); !ptr.isNull(); deliv.next(ptr)) - { - jam(); - if (refToNode(ptr.p->lqhBlockref) == failedNodeId) - { - jam(); - found = true; - break; - } - } - } - if(found){ - jam(); - scanError(signal, scanptr, ZSCAN_LQH_ERROR); - } - - // Send CONTINUEB to continue later - signal->theData[0] = TcContinueB::ZCHECK_SCAN_ACTIVE_FAILED_LQH; - signal->theData[1] = scanptr.i + 1; // Check next scanptr - signal->theData[2] = failedNodeId; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; - }//for - - checkNodeFailComplete(signal, failedNodeId, HostRecord::NF_CHECK_SCAN); -} - -void -Dbtc::nodeFailCheckTransactions(Signal* signal, - Uint32 transPtrI, - Uint32 failedNodeId) -{ - jam(); - Ptr transPtr; - Uint32 TtcTimer = ctcTimer; - Uint32 TapplTimeout = c_appl_timeout_value; - for (transPtr.i = transPtrI; transPtr.i < capiConnectFilesize; transPtr.i++) - { - ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord); - if (transPtr.p->m_transaction_nodes.get(failedNodeId)) - { - jam(); - - // Force timeout regardless of state - c_appl_timeout_value = 1; - setApiConTimer(transPtr.i, TtcTimer - 2, __LINE__); - timeOutFoundLab(signal, transPtr.i, ZNODEFAIL_BEFORE_COMMIT); - c_appl_timeout_value = TapplTimeout; - } - - // Send CONTINUEB to continue later - signal->theData[0] = TcContinueB::ZNF_CHECK_TRANSACTIONS; - signal->theData[1] = transPtr.i + 1; // Check next - signal->theData[2] = failedNodeId; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; - } - - checkNodeFailComplete(signal, failedNodeId, - HostRecord::NF_CHECK_TRANSACTION); -} - - -void -Dbtc::checkScanFragList(Signal* signal, - Uint32 failedNodeId, - ScanRecord * scanP, - ScanFragList::Head & head){ - - DEBUG("checkScanActiveInFailedLqh: scanFragError"); -} - -void Dbtc::execTAKE_OVERTCCONF(Signal* signal) -{ - jamEntry(); - tfailedNodeId = signal->theData[0]; - hostptr.i = tfailedNodeId; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - - if (signal->getSendersBlockRef() != reference()) - { - jam(); - return; - } - - checkNodeFailComplete(signal, hostptr.i, HostRecord::NF_TAKEOVER); -}//Dbtc::execTAKE_OVERTCCONF() - -void Dbtc::execTAKE_OVERTCREQ(Signal* signal) -{ - jamEntry(); - tfailedNodeId = signal->theData[0]; - tcNodeFailptr.i = 0; - ptrAss(tcNodeFailptr, tcFailRecord); - if (tcNodeFailptr.p->failStatus != FS_IDLE) { - jam(); - /*------------------------------------------------------------*/ - /* WE CAN CURRENTLY ONLY HANDLE ONE TAKE OVER AT A TIME */ - /*------------------------------------------------------------*/ - /* IF MORE THAN ONE TAKE OVER IS REQUESTED WE WILL */ - /* QUEUE THE TAKE OVER AND START IT AS SOON AS THE */ - /* PREVIOUS ARE COMPLETED. */ - /*------------------------------------------------------------*/ - arrGuard(tcNodeFailptr.p->queueIndex, MAX_NDB_NODES); - tcNodeFailptr.p->queueList[tcNodeFailptr.p->queueIndex] = tfailedNodeId; - tcNodeFailptr.p->queueIndex = tcNodeFailptr.p->queueIndex + 1; - return; - }//if - startTakeOverLab(signal); -}//Dbtc::execTAKE_OVERTCREQ() - -/*------------------------------------------------------------*/ -/* INITIALISE THE HASH TABLES FOR STORING TRANSACTIONS */ -/* AND OPERATIONS DURING TC TAKE OVER. */ -/*------------------------------------------------------------*/ -void Dbtc::startTakeOverLab(Signal* signal) -{ - for (tindex = 0; tindex <= 511; tindex++) { - ctransidFailHash[tindex] = RNIL; - }//for - for (tindex = 0; tindex <= 1023; tindex++) { - ctcConnectFailHash[tindex] = RNIL; - }//for - tcNodeFailptr.p->failStatus = FS_LISTENING; - tcNodeFailptr.p->takeOverNode = tfailedNodeId; - for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) { - jam(); - ptrAss(hostptr, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - jam(); - tblockref = calcLqhBlockRef(hostptr.i); - hostptr.p->lqhTransStatus = LTS_ACTIVE; - signal->theData[0] = tcNodeFailptr.i; - signal->theData[1] = cownref; - signal->theData[2] = tfailedNodeId; - sendSignal(tblockref, GSN_LQH_TRANSREQ, signal, 3, JBB); - }//if - }//for -}//Dbtc::startTakeOverLab() - -/*------------------------------------------------------------*/ -/* A REPORT OF AN OPERATION WHERE TC FAILED HAS ARRIVED.*/ -/*------------------------------------------------------------*/ -void Dbtc::execLQH_TRANSCONF(Signal* signal) -{ - jamEntry(); - LqhTransConf * const lqhTransConf = (LqhTransConf *)&signal->theData[0]; - - tcNodeFailptr.i = lqhTransConf->tcRef; - ptrCheckGuard(tcNodeFailptr, 1, tcFailRecord); - tnodeid = lqhTransConf->lqhNodeId; - ttransStatus = (LqhTransConf::OperationStatus)lqhTransConf->operationStatus; - ttransid1 = lqhTransConf->transId1; - ttransid2 = lqhTransConf->transId2; - ttcOprec = lqhTransConf->oldTcOpRec; - treqinfo = lqhTransConf->requestInfo; - tgci = lqhTransConf->gci; - cnodes[0] = lqhTransConf->nextNodeId1; - cnodes[1] = lqhTransConf->nextNodeId2; - cnodes[2] = lqhTransConf->nextNodeId3; - const Uint32 ref = tapplRef = lqhTransConf->apiRef; - tapplOprec = lqhTransConf->apiOpRec; - const Uint32 tableId = lqhTransConf->tableId; - - if (ttransStatus == LqhTransConf::LastTransConf){ - jam(); - /*------------------------------------------------------------*/ - /* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/ - /*------------------------------------------------------------*/ - nodeTakeOverCompletedLab(signal); - return; - }//if - if (ttransStatus == LqhTransConf::Marker){ - jam(); - treqinfo = 0; - LqhTransConf::setMarkerFlag(treqinfo, 1); - } else { - TableRecordPtr tabPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - switch((DictTabInfo::TableType)tabPtr.p->tableType){ - case DictTabInfo::SystemTable: - case DictTabInfo::UserTable: - break; - default: - tapplRef = 0; - tapplOprec = 0; - } - } - - findApiConnectFail(signal); - - if(apiConnectptr.p->ndbapiBlockref == 0 && tapplRef != 0){ - apiConnectptr.p->ndbapiBlockref = ref; - apiConnectptr.p->ndbapiConnect = tapplOprec; - } - - if (ttransStatus != LqhTransConf::Marker){ - jam(); - findTcConnectFail(signal); - } -}//Dbtc::execLQH_TRANSCONF() - -/*------------------------------------------------------------*/ -/* A NODE HAS REPORTED COMPLETION OF TAKE OVER REPORTING*/ -/*------------------------------------------------------------*/ -void Dbtc::nodeTakeOverCompletedLab(Signal* signal) -{ - Uint32 guard0; - - hostptr.i = tnodeid; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - hostptr.p->lqhTransStatus = LTS_IDLE; - for (hostptr.i = 1; hostptr.i < MAX_NDB_NODES; hostptr.i++) { - jam(); - ptrAss(hostptr, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - if (hostptr.p->lqhTransStatus == LTS_ACTIVE) { - jam(); - /*------------------------------------------------------------*/ - /* NOT ALL NODES ARE COMPLETED WITH REPORTING IN THE */ - /* TAKE OVER. */ - /*------------------------------------------------------------*/ - return; - }//if - }//if - }//for - /*------------------------------------------------------------*/ - /* ALL NODES HAVE REPORTED ON THE STATUS OF THE VARIOUS */ - /* OPERATIONS THAT WAS CONTROLLED BY THE FAILED TC. WE */ - /* ARE NOW IN A POSITION TO COMPLETE ALL OF THOSE */ - /* TRANSACTIONS EITHER IN A SUCCESSFUL WAY OR IN AN */ - /* UNSUCCESSFUL WAY. WE WILL ALSO REPORT THIS CONCLUSION*/ - /* TO THE APPLICATION IF THAT IS STILL ALIVE. */ - /*------------------------------------------------------------*/ - tcNodeFailptr.p->currentHashIndexTakeOver = 0; - tcNodeFailptr.p->completedTakeOver = 0; - tcNodeFailptr.p->failStatus = FS_COMPLETING; - guard0 = cnoParallelTakeOver - 1; - /*------------------------------------------------------------*/ - /* WE WILL COMPLETE THE TRANSACTIONS BY STARTING A */ - /* NUMBER OF PARALLEL ACTIVITIES. EACH ACTIVITY WILL */ - /* COMPLETE ONE TRANSACTION AT A TIME AND IN THAT */ - /* TRANSACTION IT WILL COMPLETE ONE OPERATION AT A TIME.*/ - /* WHEN ALL ACTIVITIES ARE COMPLETED THEN THE TAKE OVER */ - /* IS COMPLETED. */ - /*------------------------------------------------------------*/ - arrGuard(guard0, MAX_NDB_NODES); - for (tindex = 0; tindex <= guard0; tindex++) { - jam(); - tcNodeFailptr.p->takeOverProcState[tindex] = ZTAKE_OVER_ACTIVE; - signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER; - signal->theData[1] = tcNodeFailptr.i; - signal->theData[2] = tindex; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - }//for -}//Dbtc::nodeTakeOverCompletedLab() - -/*------------------------------------------------------------*/ -/* COMPLETE A NEW TRANSACTION FROM THE HASH TABLE OF */ -/* TRANSACTIONS TO COMPLETE. */ -/*------------------------------------------------------------*/ -void Dbtc::completeTransAtTakeOverLab(Signal* signal, UintR TtakeOverInd) -{ - jam(); - while (tcNodeFailptr.p->currentHashIndexTakeOver < 512){ - jam(); - apiConnectptr.i = - ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver]; - if (apiConnectptr.i != RNIL) { - jam(); - /*------------------------------------------------------------*/ - /* WE HAVE FOUND A TRANSACTION THAT NEEDS TO BE */ - /* COMPLETED. REMOVE IT FROM THE HASH TABLE SUCH THAT */ - /* NOT ANOTHER ACTIVITY ALSO TRIES TO COMPLETE THIS */ - /* TRANSACTION. */ - /*------------------------------------------------------------*/ - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - ctransidFailHash[tcNodeFailptr.p->currentHashIndexTakeOver] = - apiConnectptr.p->nextApiConnect; - - completeTransAtTakeOverDoOne(signal, TtakeOverInd); - // One transaction taken care of, return from this function - // and wait for the next CONTINUEB to continue processing - break; - - } else { - if (tcNodeFailptr.p->currentHashIndexTakeOver < 511){ - jam(); - tcNodeFailptr.p->currentHashIndexTakeOver++; - } else { - jam(); - completeTransAtTakeOverDoLast(signal, TtakeOverInd); - tcNodeFailptr.p->currentHashIndexTakeOver++; - }//if - }//if - }//while -}//Dbtc::completeTransAtTakeOverLab() - - - - -void Dbtc::completeTransAtTakeOverDoLast(Signal* signal, UintR TtakeOverInd) -{ - Uint32 guard0; - /*------------------------------------------------------------*/ - /* THERE ARE NO MORE TRANSACTIONS TO COMPLETE. THIS */ - /* ACTIVITY IS COMPLETED. */ - /*------------------------------------------------------------*/ - arrGuard(TtakeOverInd, MAX_NDB_NODES); - if (tcNodeFailptr.p->takeOverProcState[TtakeOverInd] != ZTAKE_OVER_ACTIVE) { - jam(); - systemErrorLab(signal, __LINE__); - return; - }//if - tcNodeFailptr.p->takeOverProcState[TtakeOverInd] = ZTAKE_OVER_IDLE; - tcNodeFailptr.p->completedTakeOver++; - - if (tcNodeFailptr.p->completedTakeOver == cnoParallelTakeOver) { - jam(); - /*------------------------------------------------------------*/ - /* WE WERE THE LAST ACTIVITY THAT WAS COMPLETED. WE NEED*/ - /* TO REPORT THE COMPLETION OF THE TAKE OVER TO ALL */ - /* NODES THAT ARE ALIVE. */ - /*------------------------------------------------------------*/ - NodeReceiverGroup rg(DBTC, c_alive_nodes); - signal->theData[0] = tcNodeFailptr.p->takeOverNode; - sendSignal(rg, GSN_TAKE_OVERTCCONF, signal, 1, JBB); - - if (tcNodeFailptr.p->queueIndex > 0) { - jam(); - /*------------------------------------------------------------*/ - /* THERE ARE MORE NODES TO TAKE OVER. WE NEED TO START */ - /* THE TAKE OVER. */ - /*------------------------------------------------------------*/ - tfailedNodeId = tcNodeFailptr.p->queueList[0]; - guard0 = tcNodeFailptr.p->queueIndex - 1; - arrGuard(guard0 + 1, MAX_NDB_NODES); - for (tindex = 0; tindex <= guard0; tindex++) { - jam(); - tcNodeFailptr.p->queueList[tindex] = - tcNodeFailptr.p->queueList[tindex + 1]; - }//for - tcNodeFailptr.p->queueIndex--; - startTakeOverLab(signal); - return; - } else { - jam(); - tcNodeFailptr.p->failStatus = FS_IDLE; - }//if - }//if - return; -}//Dbtc::completeTransAtTakeOverDoLast() - -void Dbtc::completeTransAtTakeOverDoOne(Signal* signal, UintR TtakeOverInd) -{ - apiConnectptr.p->takeOverRec = (Uint8)tcNodeFailptr.i; - apiConnectptr.p->takeOverInd = TtakeOverInd; - - switch (apiConnectptr.p->apiConnectstate) { - case CS_FAIL_COMMITTED: - jam(); - /*------------------------------------------------------------*/ - /* ALL PARTS OF THE TRANSACTIONS REPORTED COMMITTED. WE */ - /* HAVE THUS COMPLETED THE COMMIT PHASE. WE CAN REPORT */ - /* COMMITTED TO THE APPLICATION AND CONTINUE WITH THE */ - /* COMPLETE PHASE. */ - /*------------------------------------------------------------*/ - sendTCKEY_FAILCONF(signal, apiConnectptr.p); - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->currentTcConnect = tcConnectptr.i; - apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo; - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - toCompleteHandlingLab(signal); - return; - case CS_FAIL_COMMITTING: - jam(); - /*------------------------------------------------------------*/ - /* AT LEAST ONE PART WAS ONLY PREPARED AND AT LEAST ONE */ - /* PART WAS COMMITTED. COMPLETE THE COMMIT PHASE FIRST. */ - /* THEN CONTINUE AS AFTER COMMITTED. */ - /*------------------------------------------------------------*/ - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->currentTcConnect = tcConnectptr.i; - apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo; - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - toCommitHandlingLab(signal); - return; - case CS_FAIL_ABORTING: - case CS_FAIL_PREPARED: - jam(); - /*------------------------------------------------------------*/ - /* WE WILL ABORT THE TRANSACTION IF IT IS IN A PREPARED */ - /* STATE IN THIS VERSION. IN LATER VERSIONS WE WILL */ - /* HAVE TO ADD CODE FOR HANDLING OF PREPARED-TO-COMMIT */ - /* TRANSACTIONS. THESE ARE NOT ALLOWED TO ABORT UNTIL WE*/ - /* HAVE HEARD FROM THE TRANSACTION COORDINATOR. */ - /* */ - /* IT IS POSSIBLE TO COMMIT TRANSACTIONS THAT ARE */ - /* PREPARED ACTUALLY. WE WILL LEAVE THIS PROBLEM UNTIL */ - /* LATER VERSIONS. */ - /*------------------------------------------------------------*/ - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->currentTcConnect = tcConnectptr.i; - apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo; - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - toAbortHandlingLab(signal); - return; - case CS_FAIL_ABORTED: - jam(); - sendTCKEY_FAILREF(signal, apiConnectptr.p); - - signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER; - signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec; - signal->theData[2] = apiConnectptr.p->takeOverInd; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - releaseTakeOver(signal); - break; - case CS_FAIL_COMPLETED: - jam(); - sendTCKEY_FAILCONF(signal, apiConnectptr.p); - - signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER; - signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec; - signal->theData[2] = apiConnectptr.p->takeOverInd; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - releaseApiConnectFail(signal); - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch -}//Dbtc::completeTransAtTakeOverDoOne() - -void -Dbtc::sendTCKEY_FAILREF(Signal* signal, const ApiConnectRecord * regApiPtr){ - jam(); - - const Uint32 ref = regApiPtr->ndbapiBlockref; - if(ref != 0){ - signal->theData[0] = regApiPtr->ndbapiConnect; - signal->theData[1] = regApiPtr->transid[0]; - signal->theData[2] = regApiPtr->transid[1]; - - sendSignal(ref, GSN_TCKEY_FAILREF, signal, 3, JBB); - } -} - -void -Dbtc::sendTCKEY_FAILCONF(Signal* signal, ApiConnectRecord * regApiPtr){ - jam(); - TcKeyFailConf * const failConf = (TcKeyFailConf *)&signal->theData[0]; - - const Uint32 ref = regApiPtr->ndbapiBlockref; - const Uint32 marker = regApiPtr->commitAckMarker; - if(ref != 0){ - failConf->apiConnectPtr = regApiPtr->ndbapiConnect | (marker != RNIL); - failConf->transId1 = regApiPtr->transid[0]; - failConf->transId2 = regApiPtr->transid[1]; - - sendSignal(regApiPtr->ndbapiBlockref, - GSN_TCKEY_FAILCONF, signal, TcKeyFailConf::SignalLength, JBB); - } - regApiPtr->commitAckMarker = RNIL; -} - -/*------------------------------------------------------------*/ -/* THIS PART HANDLES THE ABORT PHASE IN THE CASE OF A */ -/* NODE FAILURE BEFORE THE COMMIT DECISION. */ -/*------------------------------------------------------------*/ -/* ABORT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */ -/*------------------------------------------------------------*/ -void Dbtc::execABORTCONF(Signal* signal) -{ - UintR compare_transid1, compare_transid2; - - jamEntry(); - tcConnectptr.i = signal->theData[0]; - tnodeid = signal->theData[2]; - if (ERROR_INSERTED(8045)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_ABORTCONF, signal, 2000, 5); - return; - }//if - if (tcConnectptr.i >= ctcConnectFilesize) { - errorReport(signal, 5); - return; - }//if - ptrAss(tcConnectptr, tcConnectRecord); - if (tcConnectptr.p->tcConnectstate != OS_WAIT_ABORT_CONF) { - warningReport(signal, 16); - return; - }//if - apiConnectptr.i = tcConnectptr.p->apiConnect; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - if (apiConnectptr.p->apiConnectstate != CS_WAIT_ABORT_CONF) { - warningReport(signal, 17); - return; - }//if - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[3]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[4]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - warningReport(signal, 18); - return; - }//if - arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS); - if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] != - tnodeid) { - warningReport(signal, 19); - return; - }//if - tcurrentReplicaNo = (Uint8)Z8NIL; - tcConnectptr.p->tcConnectstate = OS_ABORTING; - toAbortHandlingLab(signal); -}//Dbtc::execABORTCONF() - -void Dbtc::toAbortHandlingLab(Signal* signal) -{ - do { - if (tcurrentReplicaNo != (Uint8)Z8NIL) { - jam(); - arrGuard(tcurrentReplicaNo, MAX_REPLICAS); - const LqhTransConf::OperationStatus stat = - (LqhTransConf::OperationStatus) - tcConnectptr.p->failData[tcurrentReplicaNo]; - switch(stat){ - case LqhTransConf::InvalidStatus: - case LqhTransConf::Aborted: - jam(); - /*empty*/; - break; - case LqhTransConf::Prepared: - jam(); - hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - jam(); - tblockref = calcLqhBlockRef(hostptr.i); - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - tcConnectptr.p->tcConnectstate = OS_WAIT_ABORT_CONF; - apiConnectptr.p->apiConnectstate = CS_WAIT_ABORT_CONF; - apiConnectptr.p->timeOutCounter = 0; - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - signal->theData[2] = apiConnectptr.p->transid[0]; - signal->theData[3] = apiConnectptr.p->transid[1]; - signal->theData[4] = apiConnectptr.p->tcBlockref; - signal->theData[5] = tcConnectptr.p->tcOprec; - sendSignal(tblockref, GSN_ABORTREQ, signal, 6, JBB); - return; - }//if - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - }//switch - }//if - if (apiConnectptr.p->currentReplicaNo > 0) { - jam(); - /*------------------------------------------------------------*/ - /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */ - /* ABORTED. */ - /*------------------------------------------------------------*/ - apiConnectptr.p->currentReplicaNo--; - tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo; - } else { - /*------------------------------------------------------------*/ - /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */ - /*------------------------------------------------------------*/ - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - if (tcConnectptr.i == RNIL) { - /*------------------------------------------------------------*/ - /* WE HAVE COMPLETED THE ABORT PHASE. WE CAN NOW REPORT */ - /* THE ABORT STATUS TO THE APPLICATION AND CONTINUE */ - /* WITH THE NEXT TRANSACTION. */ - /*------------------------------------------------------------*/ - if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) { - jam(); - sendTCKEY_FAILREF(signal, apiConnectptr.p); - const Uint32 marker = apiConnectptr.p->commitAckMarker; - if(marker != RNIL){ - jam(); - - CommitAckMarkerPtr tmp; - tmp.i = marker; - tmp.p = m_commitAckMarkerHash.getPtr(tmp.i); - - m_commitAckMarkerHash.release(tmp); - apiConnectptr.p->commitAckMarker = RNIL; - } - - /*------------------------------------------------------------*/ - /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */ - /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */ - /*------------------------------------------------------------*/ - signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER; - signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec; - signal->theData[2] = apiConnectptr.p->takeOverInd; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - releaseTakeOver(signal); - } else { - jam(); - releaseAbortResources(signal); - }//if - return; - }//if - apiConnectptr.p->currentTcConnect = tcConnectptr.i; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo; - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - }//if - } while (1); -}//Dbtc::toAbortHandlingLab() - -/*------------------------------------------------------------*/ -/* THIS PART HANDLES THE COMMIT PHASE IN THE CASE OF A */ -/* NODE FAILURE IN THE MIDDLE OF THE COMMIT PHASE. */ -/*------------------------------------------------------------*/ -/* COMMIT REQUEST SUCCESSFULLY COMPLETED ON TNODEID */ -/*------------------------------------------------------------*/ -void Dbtc::execCOMMITCONF(Signal* signal) -{ - UintR compare_transid1, compare_transid2; - - jamEntry(); - tcConnectptr.i = signal->theData[0]; - tnodeid = signal->theData[1]; - if (ERROR_INSERTED(8046)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMMITCONF, signal, 2000, 4); - return; - }//if - if (tcConnectptr.i >= ctcConnectFilesize) { - errorReport(signal, 4); - return; - }//if - ptrAss(tcConnectptr, tcConnectRecord); - if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMMIT_CONF) { - warningReport(signal, 8); - return; - }//if - apiConnectptr.i = tcConnectptr.p->apiConnect; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMMIT_CONF) { - warningReport(signal, 9); - return; - }//if - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - warningReport(signal, 10); - return; - }//if - arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS); - if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] != - tnodeid) { - warningReport(signal, 11); - return; - }//if - if (ERROR_INSERTED(8026)) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - tcurrentReplicaNo = (Uint8)Z8NIL; - tcConnectptr.p->tcConnectstate = OS_COMMITTED; - toCommitHandlingLab(signal); -}//Dbtc::execCOMMITCONF() - -void Dbtc::toCommitHandlingLab(Signal* signal) -{ - do { - if (tcurrentReplicaNo != (Uint8)Z8NIL) { - jam(); - arrGuard(tcurrentReplicaNo, MAX_REPLICAS); - switch (tcConnectptr.p->failData[tcurrentReplicaNo]) { - case LqhTransConf::InvalidStatus: - jam(); - /*empty*/; - break; - case LqhTransConf::Committed: - jam(); - /*empty*/; - break; - case LqhTransConf::Prepared: - jam(); - /*------------------------------------------------------------*/ - /* THE NODE WAS PREPARED AND IS WAITING FOR ABORT OR */ - /* COMMIT REQUEST FROM TC. */ - /*------------------------------------------------------------*/ - hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - jam(); - tblockref = calcLqhBlockRef(hostptr.i); - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - apiConnectptr.p->apiConnectstate = CS_WAIT_COMMIT_CONF; - apiConnectptr.p->timeOutCounter = 0; - tcConnectptr.p->tcConnectstate = OS_WAIT_COMMIT_CONF; - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - signal->theData[2] = apiConnectptr.p->globalcheckpointid; - signal->theData[3] = apiConnectptr.p->transid[0]; - signal->theData[4] = apiConnectptr.p->transid[1]; - signal->theData[5] = apiConnectptr.p->tcBlockref; - signal->theData[6] = tcConnectptr.p->tcOprec; - sendSignal(tblockref, GSN_COMMITREQ, signal, 7, JBB); - return; - }//if - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - }//if - if (apiConnectptr.p->currentReplicaNo > 0) { - jam(); - /*------------------------------------------------------------*/ - /* THERE IS STILL ANOTHER REPLICA THAT NEEDS TO BE */ - /* COMMITTED. */ - /*------------------------------------------------------------*/ - apiConnectptr.p->currentReplicaNo--; - tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo; - } else { - /*------------------------------------------------------------*/ - /* THE LAST REPLICA IN THIS OPERATION HAVE COMMITTED. */ - /*------------------------------------------------------------*/ - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - if (tcConnectptr.i == RNIL) { - /*------------------------------------------------------------*/ - /* WE HAVE COMPLETED THE COMMIT PHASE. WE CAN NOW REPORT*/ - /* THE COMMIT STATUS TO THE APPLICATION AND CONTINUE */ - /* WITH THE COMPLETE PHASE. */ - /*------------------------------------------------------------*/ - if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) { - jam(); - sendTCKEY_FAILCONF(signal, apiConnectptr.p); - } else { - jam(); - sendApiCommit(signal); - }//if - apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect; - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo; - toCompleteHandlingLab(signal); - return; - }//if - apiConnectptr.p->currentTcConnect = tcConnectptr.i; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo; - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - }//if - } while (1); -}//Dbtc::toCommitHandlingLab() - -/*------------------------------------------------------------*/ -/* COMMON PART TO HANDLE COMPLETE PHASE WHEN ANY NODE */ -/* HAVE FAILED. */ -/*------------------------------------------------------------*/ -/* THE NODE WITH TNODEID HAVE COMPLETED THE OPERATION */ -/*------------------------------------------------------------*/ -void Dbtc::execCOMPLETECONF(Signal* signal) -{ - UintR compare_transid1, compare_transid2; - - jamEntry(); - tcConnectptr.i = signal->theData[0]; - tnodeid = signal->theData[1]; - if (ERROR_INSERTED(8047)) { - CLEAR_ERROR_INSERT_VALUE; - sendSignalWithDelay(cownref, GSN_COMPLETECONF, signal, 2000, 4); - return; - }//if - if (tcConnectptr.i >= ctcConnectFilesize) { - errorReport(signal, 3); - return; - }//if - ptrAss(tcConnectptr, tcConnectRecord); - if (tcConnectptr.p->tcConnectstate != OS_WAIT_COMPLETE_CONF) { - warningReport(signal, 12); - return; - }//if - apiConnectptr.i = tcConnectptr.p->apiConnect; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - if (apiConnectptr.p->apiConnectstate != CS_WAIT_COMPLETE_CONF) { - warningReport(signal, 13); - return; - }//if - compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[2]; - compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[3]; - compare_transid1 = compare_transid1 | compare_transid2; - if (compare_transid1 != 0) { - warningReport(signal, 14); - return; - }//if - arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS); - if (tcConnectptr.p->tcNodedata[apiConnectptr.p->currentReplicaNo] != - tnodeid) { - warningReport(signal, 15); - return; - }//if - if (ERROR_INSERTED(8028)) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - tcConnectptr.p->tcConnectstate = OS_COMPLETED; - tcurrentReplicaNo = (Uint8)Z8NIL; - toCompleteHandlingLab(signal); -}//Dbtc::execCOMPLETECONF() - -void Dbtc::toCompleteHandlingLab(Signal* signal) -{ - do { - if (tcurrentReplicaNo != (Uint8)Z8NIL) { - jam(); - arrGuard(tcurrentReplicaNo, MAX_REPLICAS); - switch (tcConnectptr.p->failData[tcurrentReplicaNo]) { - case LqhTransConf::InvalidStatus: - jam(); - /*empty*/; - break; - default: - jam(); - /*------------------------------------------------------------*/ - /* THIS NODE DID NOT REPORT ANYTHING FOR THIS OPERATION */ - /* IT MUST HAVE FAILED. */ - /*------------------------------------------------------------*/ - /*------------------------------------------------------------*/ - /* SEND COMPLETEREQ TO THE NEXT REPLICA. */ - /*------------------------------------------------------------*/ - hostptr.i = tcConnectptr.p->tcNodedata[tcurrentReplicaNo]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus == HS_ALIVE) { - jam(); - tblockref = calcLqhBlockRef(hostptr.i); - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - tcConnectptr.p->tcConnectstate = OS_WAIT_COMPLETE_CONF; - apiConnectptr.p->apiConnectstate = CS_WAIT_COMPLETE_CONF; - apiConnectptr.p->timeOutCounter = 0; - tcConnectptr.p->apiConnect = apiConnectptr.i; - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = cownref; - signal->theData[2] = apiConnectptr.p->transid[0]; - signal->theData[3] = apiConnectptr.p->transid[1]; - signal->theData[4] = apiConnectptr.p->tcBlockref; - signal->theData[5] = tcConnectptr.p->tcOprec; - sendSignal(tblockref, GSN_COMPLETEREQ, signal, 6, JBB); - return; - }//if - break; - }//switch - }//if - if (apiConnectptr.p->currentReplicaNo != 0) { - jam(); - /*------------------------------------------------------------*/ - /* THERE ARE STILL MORE REPLICAS IN THIS OPERATION. WE */ - /* NEED TO CONTINUE WITH THOSE REPLICAS. */ - /*------------------------------------------------------------*/ - apiConnectptr.p->currentReplicaNo--; - tcurrentReplicaNo = apiConnectptr.p->currentReplicaNo; - } else { - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - if (tcConnectptr.i == RNIL) { - /*------------------------------------------------------------*/ - /* WE HAVE COMPLETED THIS TRANSACTION NOW AND CAN */ - /* CONTINUE THE PROCESS WITH THE NEXT TRANSACTION. */ - /*------------------------------------------------------------*/ - if (apiConnectptr.p->takeOverRec != (Uint8)Z8NIL) { - jam(); - signal->theData[0] = TcContinueB::ZCOMPLETE_TRANS_AT_TAKE_OVER; - signal->theData[1] = (UintR)apiConnectptr.p->takeOverRec; - signal->theData[2] = apiConnectptr.p->takeOverInd; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - releaseTakeOver(signal); - } else { - jam(); - releaseTransResources(signal); - }//if - return; - }//if - /*------------------------------------------------------------*/ - /* WE HAVE COMPLETED AN OPERATION AND THERE ARE MORE TO */ - /* COMPLETE. TAKE THE NEXT OPERATION AND START WITH THE */ - /* FIRST REPLICA SINCE IT IS THE COMPLETE PHASE. */ - /*------------------------------------------------------------*/ - apiConnectptr.p->currentTcConnect = tcConnectptr.i; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; - apiConnectptr.p->currentReplicaNo = tcurrentReplicaNo; - }//if - } while (1); -}//Dbtc::toCompleteHandlingLab() - -/*------------------------------------------------------------*/ -/* */ -/* FIND THE API CONNECT RECORD FOR THIS TRANSACTION */ -/* DURING TAKE OVER FROM A FAILED TC. IF NONE EXISTS */ -/* YET THEN SEIZE A NEW API CONNECT RECORD AND LINK IT */ -/* INTO THE HASH TABLE. */ -/*------------------------------------------------------------*/ -void Dbtc::findApiConnectFail(Signal* signal) -{ - ApiConnectRecordPtr fafPrevApiConnectptr; - ApiConnectRecordPtr fafNextApiConnectptr; - UintR tfafHashNumber; - - tfafHashNumber = ttransid1 & 511; - fafPrevApiConnectptr.i = RNIL; - ptrNull(fafPrevApiConnectptr); - arrGuard(tfafHashNumber, 512); - fafNextApiConnectptr.i = ctransidFailHash[tfafHashNumber]; - ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord); -FAF_LOOP: - jam(); - if (fafNextApiConnectptr.i == RNIL) { - jam(); - if (cfirstfreeApiConnectFail == RNIL) { - jam(); - systemErrorLab(signal, __LINE__); - return; - }//if - seizeApiConnectFail(signal); - if (fafPrevApiConnectptr.i == RNIL) { - jam(); - ctransidFailHash[tfafHashNumber] = apiConnectptr.i; - } else { - jam(); - ptrGuard(fafPrevApiConnectptr); - fafPrevApiConnectptr.p->nextApiConnect = apiConnectptr.i; - }//if - apiConnectptr.p->nextApiConnect = RNIL; - initApiConnectFail(signal); - } else { - jam(); - fafPrevApiConnectptr.i = fafNextApiConnectptr.i; - fafPrevApiConnectptr.p = fafNextApiConnectptr.p; - apiConnectptr.i = fafNextApiConnectptr.i; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - fafNextApiConnectptr.i = apiConnectptr.p->nextApiConnect; - ptrCheck(fafNextApiConnectptr, capiConnectFilesize, apiConnectRecord); - if ((apiConnectptr.p->transid[1] != ttransid2) || - (apiConnectptr.p->transid[0] != ttransid1)) { - goto FAF_LOOP; - }//if - updateApiStateFail(signal); - }//if -}//Dbtc::findApiConnectFail() - -/*----------------------------------------------------------*/ -/* FIND THE TC CONNECT AND IF NOT FOUND ALLOCATE A NEW */ -/*----------------------------------------------------------*/ -void Dbtc::findTcConnectFail(Signal* signal) -{ - UintR tftfHashNumber; - - tftfHashNumber = (ttransid1 ^ ttcOprec) & 1023; - tcConnectptr.i = ctcConnectFailHash[tftfHashNumber]; - do { - if (tcConnectptr.i == RNIL) { - jam(); - if (cfirstfreeTcConnectFail == RNIL) { - jam(); - systemErrorLab(signal, __LINE__); - return; - }//if - seizeTcConnectFail(signal); - linkTcInConnectionlist(signal); - tcConnectptr.p->nextTcFailHash = ctcConnectFailHash[tftfHashNumber]; - ctcConnectFailHash[tftfHashNumber] = tcConnectptr.i; - initTcConnectFail(signal); - return; - } else { - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - if (tcConnectptr.p->tcOprec != ttcOprec) { - jam(); /* FRAGMENTID = TC_OPREC HERE, LOOP ANOTHER TURN */ - tcConnectptr.i = tcConnectptr.p->nextTcFailHash; - } else { - updateTcStateFail(signal); - return; - }//if - }//if - } while (1); -}//Dbtc::findTcConnectFail() - -/*----------------------------------------------------------*/ -/* INITIALISE AN API CONNECT FAIL RECORD */ -/*----------------------------------------------------------*/ -void Dbtc::initApiConnectFail(Signal* signal) -{ - apiConnectptr.p->transid[0] = ttransid1; - apiConnectptr.p->transid[1] = ttransid2; - apiConnectptr.p->firstTcConnect = RNIL; - apiConnectptr.p->currSavePointId = 0; - apiConnectptr.p->lastTcConnect = RNIL; - tblockref = calcTcBlockRef(tcNodeFailptr.p->takeOverNode); - - apiConnectptr.p->tcBlockref = tblockref; - apiConnectptr.p->ndbapiBlockref = 0; - apiConnectptr.p->ndbapiConnect = 0; - apiConnectptr.p->buddyPtr = RNIL; - apiConnectptr.p->m_transaction_nodes.clear(); - apiConnectptr.p->singleUserMode = 0; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - switch(ttransStatus){ - case LqhTransConf::Committed: - jam(); - apiConnectptr.p->globalcheckpointid = tgci; - apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED; - break; - case LqhTransConf::Prepared: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_PREPARED; - break; - case LqhTransConf::Aborted: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTED; - break; - case LqhTransConf::Marker: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_COMPLETED; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - }//if - apiConnectptr.p->commitAckMarker = RNIL; - if(LqhTransConf::getMarkerFlag(treqinfo)){ - jam(); - CommitAckMarkerPtr tmp; - m_commitAckMarkerHash.seize(tmp); - - ndbrequire(tmp.i != RNIL); - - apiConnectptr.p->commitAckMarker = tmp.i; - tmp.p->transid1 = ttransid1; - tmp.p->transid2 = ttransid2; - tmp.p->apiNodeId = refToNode(tapplRef); - tmp.p->noOfLqhs = 1; - tmp.p->lqhNodeId[0] = tnodeid; - tmp.p->apiConnectPtr = apiConnectptr.i; - -#if defined VM_TRACE || defined ERROR_INSERT - { - CommitAckMarkerPtr check; - ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); - } -#endif - m_commitAckMarkerHash.add(tmp); - } -}//Dbtc::initApiConnectFail() - -/*------------------------------------------------------------*/ -/* INITIALISE AT TC CONNECT AT TAKE OVER WHEN ALLOCATING*/ -/* THE TC CONNECT RECORD. */ -/*------------------------------------------------------------*/ -void Dbtc::initTcConnectFail(Signal* signal) -{ - tcConnectptr.p->apiConnect = apiConnectptr.i; - tcConnectptr.p->tcOprec = ttcOprec; - Uint32 treplicaNo = LqhTransConf::getReplicaNo(treqinfo); - for (Uint32 i = 0; i < MAX_REPLICAS; i++) { - tcConnectptr.p->failData[i] = LqhTransConf::InvalidStatus; - }//for - tcConnectptr.p->tcNodedata[treplicaNo] = tnodeid; - tcConnectptr.p->failData[treplicaNo] = ttransStatus; - tcConnectptr.p->lastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo); - tcConnectptr.p->dirtyOp = LqhTransConf::getDirtyFlag(treqinfo); - -}//Dbtc::initTcConnectFail() - -/*----------------------------------------------------------*/ -/* INITIALISE TC NODE FAIL RECORD. */ -/*----------------------------------------------------------*/ -void Dbtc::initTcFail(Signal* signal) -{ - tcNodeFailptr.i = 0; - ptrAss(tcNodeFailptr, tcFailRecord); - tcNodeFailptr.p->queueIndex = 0; - tcNodeFailptr.p->failStatus = FS_IDLE; -}//Dbtc::initTcFail() - -/*----------------------------------------------------------*/ -/* RELEASE_TAKE_OVER */ -/*----------------------------------------------------------*/ -void Dbtc::releaseTakeOver(Signal* signal) -{ - TcConnectRecordPtr rtoNextTcConnectptr; - - rtoNextTcConnectptr.i = apiConnectptr.p->firstTcConnect; - do { - jam(); - tcConnectptr.i = rtoNextTcConnectptr.i; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - rtoNextTcConnectptr.i = tcConnectptr.p->nextTcConnect; - releaseTcConnectFail(signal); - } while (rtoNextTcConnectptr.i != RNIL); - releaseApiConnectFail(signal); -}//Dbtc::releaseTakeOver() - -/*---------------------------------------------------------------------------*/ -/* SETUP_FAIL_DATA */ -/* SETUP DATA TO REUSE TAKE OVER CODE FOR HANDLING ABORT/COMMIT IN NODE */ -/* FAILURE SITUATIONS. */ -/*---------------------------------------------------------------------------*/ -void Dbtc::setupFailData(Signal* signal) -{ - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - do { - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - switch (tcConnectptr.p->tcConnectstate) { - case OS_PREPARED: - case OS_COMMITTING: - jam(); - arrGuard(tcConnectptr.p->lastReplicaNo, MAX_REPLICAS); - for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) { - jam(); - /*------------------------------------------------------------------- - * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH. - * IN THIS CASE ALL LQH'S ARE PREPARED AND WAITING FOR - * COMMIT/ABORT DECISION. - *------------------------------------------------------------------*/ - tcConnectptr.p->failData[tindex] = LqhTransConf::Prepared; - }//for - break; - case OS_COMMITTED: - case OS_COMPLETING: - jam(); - arrGuard(tcConnectptr.p->lastReplicaNo, MAX_REPLICAS); - for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) { - jam(); - /*------------------------------------------------------------------- - * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH. - * IN THIS CASE ALL LQH'S ARE COMMITTED AND WAITING FOR - * COMPLETE MESSAGE. - *------------------------------------------------------------------*/ - tcConnectptr.p->failData[tindex] = LqhTransConf::Committed; - }//for - break; - case OS_COMPLETED: - jam(); - arrGuard(tcConnectptr.p->lastReplicaNo, MAX_REPLICAS); - for (tindex = 0; tindex <= tcConnectptr.p->lastReplicaNo; tindex++) { - jam(); - /*------------------------------------------------------------------- - * KEYDATA IS USED TO KEEP AN INDICATION OF STATE IN LQH. - * IN THIS CASE ALL LQH'S ARE COMPLETED. - *-------------------------------------------------------------------*/ - tcConnectptr.p->failData[tindex] = LqhTransConf::InvalidStatus; - }//for - break; - default: - jam(); - sendSystemError(signal, __LINE__); - break; - }//switch - if (tabortInd != ZCOMMIT_SETUP) { - jam(); - for (UintR Ti = 0; Ti <= tcConnectptr.p->lastReplicaNo; Ti++) { - hostptr.i = tcConnectptr.p->tcNodedata[Ti]; - ptrCheckGuard(hostptr, chostFilesize, hostRecord); - if (hostptr.p->hostStatus != HS_ALIVE) { - jam(); - /*----------------------------------------------------------------- - * FAILURE OF ANY INVOLVED NODE ALWAYS INVOKES AN ABORT DECISION. - *-----------------------------------------------------------------*/ - tabortInd = ZTRUE; - }//if - }//for - }//if - tcConnectptr.p->tcConnectstate = OS_TAKE_OVER; - tcConnectptr.p->tcOprec = tcConnectptr.i; - tcConnectptr.i = tcConnectptr.p->nextTcConnect; - } while (tcConnectptr.i != RNIL); - apiConnectptr.p->tcBlockref = cownref; - apiConnectptr.p->currentTcConnect = apiConnectptr.p->firstTcConnect; - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->currentReplicaNo = tcConnectptr.p->lastReplicaNo; - tcurrentReplicaNo = tcConnectptr.p->lastReplicaNo; -}//Dbtc::setupFailData() - -/*----------------------------------------------------------*/ -/* UPDATE THE STATE OF THE API CONNECT FOR THIS PART. */ -/*----------------------------------------------------------*/ -void Dbtc::updateApiStateFail(Signal* signal) -{ - if(LqhTransConf::getMarkerFlag(treqinfo)){ - jam(); - const Uint32 marker = apiConnectptr.p->commitAckMarker; - if(marker == RNIL){ - jam(); - - CommitAckMarkerPtr tmp; - m_commitAckMarkerHash.seize(tmp); - ndbrequire(tmp.i != RNIL); - - apiConnectptr.p->commitAckMarker = tmp.i; - tmp.p->transid1 = ttransid1; - tmp.p->transid2 = ttransid2; - tmp.p->apiNodeId = refToNode(tapplRef); - tmp.p->noOfLqhs = 1; - tmp.p->lqhNodeId[0] = tnodeid; - tmp.p->apiConnectPtr = apiConnectptr.i; -#if defined VM_TRACE || defined ERROR_INSERT - { - CommitAckMarkerPtr check; - ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); - } -#endif - m_commitAckMarkerHash.add(tmp); - } else { - jam(); - - CommitAckMarkerPtr tmp; - tmp.i = marker; - tmp.p = m_commitAckMarkerHash.getPtr(marker); - - const Uint32 noOfLqhs = tmp.p->noOfLqhs; - ndbrequire(noOfLqhs < MAX_REPLICAS); - tmp.p->lqhNodeId[noOfLqhs] = tnodeid; - tmp.p->noOfLqhs = (noOfLqhs + 1); - } - } - - switch (ttransStatus) { - case LqhTransConf::Committed: - jam(); - switch (apiConnectptr.p->apiConnectstate) { - case CS_FAIL_COMMITTING: - case CS_FAIL_COMMITTED: - jam(); - ndbrequire(tgci == apiConnectptr.p->globalcheckpointid); - break; - case CS_FAIL_PREPARED: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING; - apiConnectptr.p->globalcheckpointid = tgci; - break; - case CS_FAIL_COMPLETED: - jam(); - apiConnectptr.p->globalcheckpointid = tgci; - apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTED; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - break; - }//switch - break; - case LqhTransConf::Prepared: - jam(); - switch (apiConnectptr.p->apiConnectstate) { - case CS_FAIL_COMMITTED: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_COMMITTING; - break; - case CS_FAIL_ABORTED: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING; - break; - case CS_FAIL_COMMITTING: - case CS_FAIL_PREPARED: - case CS_FAIL_ABORTING: - jam(); - /*empty*/; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - break; - }//switch - break; - case LqhTransConf::Aborted: - jam(); - switch (apiConnectptr.p->apiConnectstate) { - case CS_FAIL_COMMITTING: - case CS_FAIL_COMMITTED: - jam(); - systemErrorLab(signal, __LINE__); - break; - case CS_FAIL_PREPARED: - jam(); - apiConnectptr.p->apiConnectstate = CS_FAIL_ABORTING; - break; - case CS_FAIL_ABORTING: - case CS_FAIL_ABORTED: - jam(); - /*empty*/; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - break; - }//switch - break; - case LqhTransConf::Marker: - jam(); - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - break; - }//switch -}//Dbtc::updateApiStateFail() - -/*------------------------------------------------------------*/ -/* UPDATE_TC_STATE_FAIL */ -/* */ -/* WE NEED TO UPDATE THE STATUS OF TC_CONNECT RECORD AND*/ -/* WE ALSO NEED TO CHECK THAT THERE IS CONSISTENCY */ -/* BETWEEN THE DIFFERENT REPLICAS. */ -/*------------------------------------------------------------*/ -void Dbtc::updateTcStateFail(Signal* signal) -{ - const Uint8 treplicaNo = LqhTransConf::getReplicaNo(treqinfo); - const Uint8 tlastReplicaNo = LqhTransConf::getLastReplicaNo(treqinfo); - const Uint8 tdirtyOp = LqhTransConf::getDirtyFlag(treqinfo); - - TcConnectRecord * regTcPtr = tcConnectptr.p; - - ndbrequire(regTcPtr->apiConnect == apiConnectptr.i); - ndbrequire(regTcPtr->failData[treplicaNo] == LqhTransConf::InvalidStatus); - ndbrequire(regTcPtr->lastReplicaNo == tlastReplicaNo); - ndbrequire(regTcPtr->dirtyOp == tdirtyOp); - - regTcPtr->tcNodedata[treplicaNo] = tnodeid; - regTcPtr->failData[treplicaNo] = ttransStatus; -}//Dbtc::updateTcStateFail() - -void Dbtc::execTCGETOPSIZEREQ(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(8000); - - UintR Tuserpointer = signal->theData[0]; /* DBDIH POINTER */ - BlockReference Tusersblkref = signal->theData[1];/* DBDIH BLOCK REFERENCE */ - signal->theData[0] = Tuserpointer; - signal->theData[1] = coperationsize; - sendSignal(Tusersblkref, GSN_TCGETOPSIZECONF, signal, 2, JBB); -}//Dbtc::execTCGETOPSIZEREQ() - -void Dbtc::execTC_CLOPSIZEREQ(Signal* signal) -{ - jamEntry(); - CRASH_INSERTION(8001); - - tuserpointer = signal->theData[0]; - tusersblkref = signal->theData[1]; - /* DBDIH BLOCK REFERENCE */ - coperationsize = 0; - signal->theData[0] = tuserpointer; - sendSignal(tusersblkref, GSN_TC_CLOPSIZECONF, signal, 1, JBB); -}//Dbtc::execTC_CLOPSIZEREQ() - -/* ######################################################################### */ -/* ####### ERROR MODULE ####### */ -/* ######################################################################### */ -void Dbtc::tabStateErrorLab(Signal* signal) -{ - terrorCode = ZSTATE_ERROR; - releaseAtErrorLab(signal); -}//Dbtc::tabStateErrorLab() - -void Dbtc::wrongSchemaVersionErrorLab(Signal* signal) -{ - const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0]; - - TableRecordPtr tabPtr; - tabPtr.i = tcKeyReq->tableId; - const Uint32 schemVer = tcKeyReq->tableSchemaVersion; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - - terrorCode = tabPtr.p->getErrorCode(schemVer); - - abortErrorLab(signal); -}//Dbtc::wrongSchemaVersionErrorLab() - -void Dbtc::noFreeConnectionErrorLab(Signal* signal) -{ - terrorCode = ZNO_FREE_TC_CONNECTION; - abortErrorLab(signal); /* RECORD. OTHERWISE GOTO ERRORHANDLING */ -}//Dbtc::noFreeConnectionErrorLab() - -void Dbtc::aiErrorLab(Signal* signal) -{ - terrorCode = ZLENGTH_ERROR; - abortErrorLab(signal); -}//Dbtc::aiErrorLab() - -void Dbtc::seizeAttrbuferrorLab(Signal* signal) -{ - terrorCode = ZGET_ATTRBUF_ERROR; - abortErrorLab(signal); -}//Dbtc::seizeAttrbuferrorLab() - -void Dbtc::seizeDatabuferrorLab(Signal* signal) -{ - terrorCode = ZGET_DATAREC_ERROR; - releaseAtErrorLab(signal); -}//Dbtc::seizeDatabuferrorLab() - -void Dbtc::releaseAtErrorLab(Signal* signal) -{ - ptrGuard(tcConnectptr); - tcConnectptr.p->tcConnectstate = OS_ABORTING; - /*-------------------------------------------------------------------------* - * A FAILURE OF THIS OPERATION HAS OCCURRED. THIS FAILURE WAS EITHER A - * FAULTY PARAMETER OR A RESOURCE THAT WAS NOT AVAILABLE. - * WE WILL ABORT THE ENTIRE TRANSACTION SINCE THIS IS THE SAFEST PATH - * TO HANDLE THIS PROBLEM. - * SINCE WE HAVE NOT YET CONTACTED ANY LQH WE SET NUMBER OF NODES TO ZERO - * WE ALSO SET THE STATE TO ABORTING TO INDICATE THAT WE ARE NOT EXPECTING - * ANY SIGNALS. - *-------------------------------------------------------------------------*/ - tcConnectptr.p->noOfNodes = 0; - abortErrorLab(signal); -}//Dbtc::releaseAtErrorLab() - -void Dbtc::warningHandlerLab(Signal* signal, int line) -{ - ndbassert(false); -}//Dbtc::warningHandlerLab() - -void Dbtc::systemErrorLab(Signal* signal, int line) -{ - progError(line, NDBD_EXIT_NDBREQUIRE); -}//Dbtc::systemErrorLab() - - -/* ######################################################################### * - * ####### SCAN MODULE ####### * - * ######################################################################### * - - The application orders a scan of a table. We divide the scan into a scan on - each fragment. The scan uses the primary replicas since the scan might be - used for an update in a separate transaction. - - Scans are always done as a separate transaction. Locks from the scan - can be overtaken by another transaction. Scans can never lock the entire - table. Locks are released immediately after the read has been verified - by the application. There is not even an option to leave the locks. - The reason is that this would hurt real-time behaviour too much. - - -# The first step in handling a scan of a table is to receive all signals - defining the scan. If failures occur during this step we release all - resource and reply with SCAN_TABREF providing the error code. - If system load is too high, the request will not be allowed. - - -# The second step retrieves the number of fragments that exist in the - table. It also ensures that the table actually exist. After this, - the scan is ready to be parallelised. The idea is that the receiving - process (hereafter called delivery process) will start up a number - of scan processes. Each of these scan processes will - independently scan one fragment at a time. The delivery - process object is the scan record and the scan process object is - the scan fragment record plus the scan operation record. - - -# The third step is thus performed in parallel. In the third step each - scan process retrieves the primary replica of the fragment it will - scan. Then it starts the scan as soon as the load on that node permits. - - The LQH returns either when it retrieved the maximum number of tuples or - when it has retrived at least one tuple and is hindered by a lock to - retrieve the next tuple. This is to ensure that a scan process never - can be involved in a deadlock situation. - - When the scan process receives a number of tuples to report to the - application it checks the state of the delivery process. Only one delivery - at a time is handled by the application. Thus if the delivery process - has already sent a number of tuples to the application this set of tuples - are queued. - - When the application requests the next set of tuples it is immediately - delivered if any are queued, otherwise it waits for the next scan - process that is ready to deliver. - - - ERROR HANDLING - - As already mentioned it is rather easy to handle errors before the scan - processes have started. In this case it is enough to release the resources - and send SCAN_TAB_REF. - - If an error occurs in any of the scan processes then we have to stop all - scan processes. We do however only stop the delivery process and ask - the api to order us to close the scan. The reason is that we can easily - enter into difficult timing problems since the application and this - block is out of synch we will thus always start by report the error to - the application and wait for a close request. This error report uses the - SCAN_TABREF signal with a special error code that the api must check for. - - - CLOSING AN ACTIVE SCAN - - The application can close a scan for several reasons before it is completed. - One reason was mentioned above where an error in a scan process led to a - request to close the scan. Another reason could simply be that the - application found what it looked for and is thus not interested in the - rest of the scan. - - IT COULD ALSO BE DEPENDENT ON INTERNAL ERRORS IN THE API. - - When a close scan request is received, all scan processes are stopped and all - resources belonging to those scan processes are released. Stopping the scan - processes most often includes communication with an LQH where the local scan - is controlled. Finally all resources belonging to the scan is released and - the SCAN_TABCONF is sent with an indication of that the scan is closed. - - - CLOSING A COMPLETED SCAN - - When all scan processes are completed then a report is sent to the - application which indicates that no more tuples can be fetched. - The application will send a close scan and the same action as when - closing an active scan is performed. - In this case it will of course not find any active scan processes. - It will even find all scan processes already released. - - The reason for requiring the api to close the scan is the same as above. - It is to avoid any timing problems due to that the api and this block - is out of synch. - - * ######################################################################## */ -void Dbtc::execSCAN_TABREQ(Signal* signal) -{ - const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0]; - const Uint32 ri = scanTabReq->requestInfo; - const Uint32 aiLength = (scanTabReq->attrLenKeyLen & 0xFFFF); - const Uint32 keyLen = scanTabReq->attrLenKeyLen >> 16; - const Uint32 schemaVersion = scanTabReq->tableSchemaVersion; - const Uint32 transid1 = scanTabReq->transId1; - const Uint32 transid2 = scanTabReq->transId2; - const Uint32 tmpXX = scanTabReq->buddyConPtr; - const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX); - Uint32 currSavePointId = 0; - - Uint32 scanConcurrency = scanTabReq->getParallelism(ri); - Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri); - Uint32 scanParallel = scanConcurrency; - Uint32 errCode; - ScanRecordPtr scanptr; - - jamEntry(); - - SegmentedSectionPtr api_op_ptr; - signal->getSection(api_op_ptr, 0); - copy(&cdata[0], api_op_ptr); - releaseSections(signal); - - apiConnectptr.i = scanTabReq->apiConnectPtr; - tabptr.i = scanTabReq->tableId; - - if (apiConnectptr.i >= capiConnectFilesize) - { - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//if - - ptrAss(apiConnectptr, apiConnectRecord); - ApiConnectRecord * transP = apiConnectptr.p; - - if (transP->apiConnectstate != CS_CONNECTED) { - jam(); - // could be left over from TCKEYREQ rollback - if (transP->apiConnectstate == CS_ABORTING && - transP->abortState == AS_IDLE) { - jam(); - } else if(transP->apiConnectstate == CS_STARTED && - transP->firstTcConnect == RNIL){ - jam(); - // left over from simple/dirty read - } else { - jam(); - jamLine(transP->apiConnectstate); - errCode = ZSTATE_ERROR; - goto SCAN_TAB_error_no_state_change; - } - } - - if(tabptr.i >= ctabrecFilesize) - { - errCode = ZUNKNOWN_TABLE_ERROR; - goto SCAN_TAB_error; - } - - ptrAss(tabptr, tableRecord); - if ((aiLength == 0) || - (!tabptr.p->checkTable(schemaVersion)) || - (scanConcurrency == 0) || - (cfirstfreeTcConnect == RNIL) || - (cfirstfreeScanrec == RNIL)) { - goto SCAN_error_check; - } - if (buddyPtr != RNIL) { - jam(); - ApiConnectRecordPtr buddyApiPtr; - buddyApiPtr.i = buddyPtr; - ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord); - if ((transid1 == buddyApiPtr.p->transid[0]) && - (transid2 == buddyApiPtr.p->transid[1])) { - jam(); - - if (buddyApiPtr.p->apiConnectstate == CS_ABORTING) { - // transaction has been aborted - jam(); - errCode = buddyApiPtr.p->returncode; - goto SCAN_TAB_error; - }//if - currSavePointId = buddyApiPtr.p->currSavePointId; - buddyApiPtr.p->currSavePointId++; - } - } - - if (getNodeState().startLevel == NodeState::SL_SINGLEUSER && - getNodeState().getSingleUserApi() != - refToNode(apiConnectptr.p->ndbapiBlockref)) - { - errCode = ZCLUSTER_IN_SINGLEUSER_MODE; - goto SCAN_TAB_error; - } - - seizeTcConnect(signal); - tcConnectptr.p->apiConnect = apiConnectptr.i; - tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN; - apiConnectptr.p->lastTcConnect = tcConnectptr.i; - - seizeCacheRecord(signal); - cachePtr.p->keylen = keyLen; - cachePtr.p->save1 = 0; - cachePtr.p->distributionKey = scanTabReq->distributionKey; - cachePtr.p->distributionKeyIndicator= ScanTabReq::getDistributionKeyFlag(ri); - scanptr = seizeScanrec(signal); - - ndbrequire(transP->apiScanRec == RNIL); - ndbrequire(scanptr.p->scanApiRec == RNIL); - - initScanrec(scanptr, scanTabReq, scanParallel, noOprecPerFrag); - - transP->apiScanRec = scanptr.i; - transP->returncode = 0; - transP->transid[0] = transid1; - transP->transid[1] = transid2; - transP->buddyPtr = buddyPtr; - - // The scan is started - transP->apiConnectstate = CS_START_SCAN; - transP->currSavePointId = currSavePointId; - - /********************************************************** - * We start the timer on scanRec to be able to discover a - * timeout in the API the API now is in charge! - ***********************************************************/ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - updateBuddyTimer(apiConnectptr); - - /*********************************************************** - * WE HAVE NOW RECEIVED ALL REFERENCES TO SCAN OBJECTS IN - * THE API. WE ARE NOW READY TO RECEIVE THE ATTRIBUTE INFO - * IF ANY TO RECEIVE. - **********************************************************/ - scanptr.p->scanState = ScanRecord::WAIT_AI; - - if (ERROR_INSERTED(8038)) - { - /** - * Force API_FAILREQ - */ - DisconnectRep * const rep = (DisconnectRep *)signal->getDataPtrSend(); - rep->nodeId = refToNode(apiConnectptr.p->ndbapiBlockref); - rep->err = 8038; - - EXECUTE_DIRECT(CMVMI, GSN_DISCONNECT_REP, signal, 2); - CLEAR_ERROR_INSERT_VALUE; - } - - return; - - SCAN_error_check: - if (aiLength == 0) { - jam() - errCode = ZSCAN_AI_LEN_ERROR; - goto SCAN_TAB_error; - }//if - if (!tabptr.p->checkTable(schemaVersion)){ - jam(); - errCode = tabptr.p->getErrorCode(schemaVersion); - goto SCAN_TAB_error; - }//if - if (scanConcurrency == 0) { - jam(); - errCode = ZNO_CONCURRENCY_ERROR; - goto SCAN_TAB_error; - }//if - if (cfirstfreeTcConnect == RNIL) { - jam(); - errCode = ZNO_FREE_TC_CONNECTION; - goto SCAN_TAB_error; - }//if - ndbrequire(cfirstfreeScanrec == RNIL); - jam(); - errCode = ZNO_SCANREC_ERROR; - goto SCAN_TAB_error; - -SCAN_TAB_error: - jam(); - /** - * Prepare for up coming ATTRINFO/KEYINFO - */ - transP->apiConnectstate = CS_ABORTING; - transP->abortState = AS_IDLE; - transP->transid[0] = transid1; - transP->transid[1] = transid2; - -SCAN_TAB_error_no_state_change: - - ScanTabRef * ref = (ScanTabRef*)&signal->theData[0]; - ref->apiConnectPtr = transP->ndbapiConnect; - ref->transId1 = transid1; - ref->transId2 = transid2; - ref->errorCode = errCode; - ref->closeNeeded = 0; - sendSignal(transP->ndbapiBlockref, GSN_SCAN_TABREF, - signal, ScanTabRef::SignalLength, JBB); - return; -}//Dbtc::execSCAN_TABREQ() - -void Dbtc::initScanrec(ScanRecordPtr scanptr, - const ScanTabReq * scanTabReq, - UintR scanParallel, - UintR noOprecPerFrag) -{ - const UintR ri = scanTabReq->requestInfo; - scanptr.p->scanTcrec = tcConnectptr.i; - scanptr.p->scanApiRec = apiConnectptr.i; - scanptr.p->scanAiLength = scanTabReq->attrLenKeyLen & 0xFFFF; - scanptr.p->scanKeyLen = scanTabReq->attrLenKeyLen >> 16; - scanptr.p->scanTableref = tabptr.i; - scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion; - scanptr.p->scanParallel = scanParallel; - scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size; - scanptr.p->batch_byte_size = scanTabReq->batch_byte_size; - scanptr.p->batch_size_rows = noOprecPerFrag; - - Uint32 tmp = 0; - ScanFragReq::setLockMode(tmp, ScanTabReq::getLockMode(ri)); - ScanFragReq::setHoldLockFlag(tmp, ScanTabReq::getHoldLockFlag(ri)); - ScanFragReq::setKeyinfoFlag(tmp, ScanTabReq::getKeyinfoFlag(ri)); - ScanFragReq::setReadCommittedFlag(tmp,ScanTabReq::getReadCommittedFlag(ri)); - ScanFragReq::setRangeScanFlag(tmp, ScanTabReq::getRangeScanFlag(ri)); - ScanFragReq::setDescendingFlag(tmp, ScanTabReq::getDescendingFlag(ri)); - ScanFragReq::setTupScanFlag(tmp, ScanTabReq::getTupScanFlag(ri)); - ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF); - ScanFragReq::setNoDiskFlag(tmp, ScanTabReq::getNoDiskFlag(ri)); - - scanptr.p->scanRequestInfo = tmp; - scanptr.p->scanStoredProcId = scanTabReq->storedProcId; - scanptr.p->scanState = ScanRecord::RUNNING; - scanptr.p->m_queued_count = 0; - - ScanFragList list(c_scan_frag_pool, - scanptr.p->m_running_scan_frags); - for (Uint32 i = 0; i < scanParallel; i++) { - jam(); - ScanFragRecPtr ptr; - ndbrequire(list.seize(ptr)); - ptr.p->scanFragState = ScanFragRec::IDLE; - ptr.p->scanRec = scanptr.i; - ptr.p->scanFragId = 0; - ptr.p->m_apiPtr = cdata[i]; - }//for - - (* (ScanTabReq::getRangeScanFlag(ri) ? - &c_counters.c_range_scan_count : - &c_counters.c_scan_count))++; -}//Dbtc::initScanrec() - -void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode) -{ - ScanTabRef * ref = (ScanTabRef*)&signal->theData[0]; - ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect; - ref->transId1 = apiConnectptr.p->transid[0]; - ref->transId2 = apiConnectptr.p->transid[1]; - ref->errorCode = errCode; - ref->closeNeeded = 0; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF, - signal, ScanTabRef::SignalLength, JBB); -}//Dbtc::scanTabRefLab() - -/*---------------------------------------------------------------------------*/ -/* */ -/* RECEPTION OF ATTRINFO FOR SCAN TABLE REQUEST. */ -/*---------------------------------------------------------------------------*/ -void Dbtc::scanAttrinfoLab(Signal* signal, UintR Tlen) -{ - ScanRecordPtr scanptr; - scanptr.i = apiConnectptr.p->apiScanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - cachePtr.i = apiConnectptr.p->cachePtr; - ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord); - CacheRecord * const regCachePtr = cachePtr.p; - ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_AI); - - regCachePtr->currReclenAi = regCachePtr->currReclenAi + Tlen; - if (regCachePtr->currReclenAi < scanptr.p->scanAiLength) { - if (cfirstfreeAttrbuf == RNIL) { - goto scanAttrinfo_attrbuf_error; - }//if - saveAttrbuf(signal); - } else { - if (regCachePtr->currReclenAi > scanptr.p->scanAiLength) { - goto scanAttrinfo_len_error; - } else { - /* CURR_RECLEN_AI = SCAN_AI_LENGTH */ - if (cfirstfreeAttrbuf == RNIL) { - goto scanAttrinfo_attrbuf2_error; - }//if - saveAttrbuf(signal); - /************************************************** - * WE HAVE NOW RECEIVED ALL INFORMATION CONCERNING - * THIS SCAN. WE ARE READY TO START THE ACTUAL - * EXECUTION OF THE SCAN QUERY - **************************************************/ - diFcountReqLab(signal, scanptr); - return; - }//if - }//if - return; - -scanAttrinfo_attrbuf_error: - jam(); - abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR, true); - return; - -scanAttrinfo_attrbuf2_error: - jam(); - abortScanLab(signal, scanptr, ZGET_ATTRBUF_ERROR, true); - return; - -scanAttrinfo_len_error: - jam(); - abortScanLab(signal, scanptr, ZLENGTH_ERROR, true); - return; -}//Dbtc::scanAttrinfoLab() - -void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr) -{ - /** - * Check so that the table is not being dropped - */ - TableRecordPtr tabPtr; - tabPtr.i = scanptr.p->scanTableref; - tabPtr.p = &tableRecord[tabPtr.i]; - if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){ - ; - } else { - abortScanLab(signal, scanptr, - tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion), - true); - return; - } - - scanptr.p->scanNextFragId = 0; - scanptr.p->m_booked_fragments_count= 0; - scanptr.p->scanState = ScanRecord::WAIT_FRAGMENT_COUNT; - - if(!cachePtr.p->distributionKeyIndicator) - { - jam(); - /************************************************* - * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED. - * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE. - ***************************************************/ - DihFragCountReq * const req = (DihFragCountReq*)signal->getDataPtrSend(); - req->m_connectionData = tcConnectptr.p->dihConnectptr; - req->m_tableRef = scanptr.p->scanTableref; - sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, - DihFragCountReq::SignalLength, JBB); - } - else - { - signal->theData[0] = tcConnectptr.p->dihConnectptr; - signal->theData[1] = tabPtr.i; - signal->theData[2] = cachePtr.p->distributionKey; - EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3); - UintR TerrorIndicator = signal->theData[0]; - jamEntry(); - if (TerrorIndicator != 0) { - DihFragCountRef * const ref = (DihFragCountRef*)signal->getDataPtr(); - ref->m_connectionData = tcConnectptr.i; - ref->m_error = signal->theData[1]; - execDI_FCOUNTREF(signal); - return; - } - - UintR Tdata1 = signal->theData[1]; - scanptr.p->scanNextFragId = Tdata1; - DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); - conf->m_connectionData = tcConnectptr.i; - conf->m_fragmentCount = 1; // Frag count - execDI_FCOUNTCONF(signal); - } - return; -}//Dbtc::diFcountReqLab() - -/******************************************************************** - * execDI_FCOUNTCONF - * - * WE HAVE ASKED DIH ABOUT THE NUMBER OF FRAGMENTS IN THIS TABLE. - * WE WILL NOW START A NUMBER OF PARALLEL SCAN PROCESSES. EACH OF - * THESE WILL SCAN ONE FRAGMENT AT A TIME. THEY WILL CONTINUE THIS - * UNTIL THERE ARE NO MORE FRAGMENTS TO SCAN OR UNTIL THE APPLICATION - * CLOSES THE SCAN. - ********************************************************************/ -void Dbtc::execDI_FCOUNTCONF(Signal* signal) -{ - jamEntry(); - DihFragCountConf * const conf = (DihFragCountConf*)signal->getDataPtr(); - tcConnectptr.i = conf->m_connectionData; - Uint32 tfragCount = conf->m_fragmentCount; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.i = tcConnectptr.p->apiConnect; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - ScanRecordPtr scanptr; - scanptr.i = apiConnectptr.p->apiScanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT); - if (apiConnectptr.p->apiFailState == ZTRUE) { - jam(); - releaseScanResources(scanptr, true); - handleApiFailState(signal, apiConnectptr.i); - return; - }//if - if (tfragCount == 0) { - jam(); - abortScanLab(signal, scanptr, ZNO_FRAGMENT_ERROR, true); - return; - }//if - - /** - * Check so that the table is not being dropped - */ - TableRecordPtr tabPtr; - tabPtr.i = scanptr.p->scanTableref; - tabPtr.p = &tableRecord[tabPtr.i]; - if (tabPtr.p->checkTable(scanptr.p->scanSchemaVersion)){ - ; - } else { - abortScanLab(signal, scanptr, - tabPtr.p->getErrorCode(scanptr.p->scanSchemaVersion), - true); - return; - } - - scanptr.p->scanParallel = tfragCount; - scanptr.p->scanNoFrag = tfragCount; - scanptr.p->scanState = ScanRecord::RUNNING; - - setApiConTimer(apiConnectptr.i, 0, __LINE__); - updateBuddyTimer(apiConnectptr); - - ScanFragRecPtr ptr; - ScanFragList list(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - for (list.first(ptr); !ptr.isNull() && tfragCount; - list.next(ptr), tfragCount--){ - jam(); - - ptr.p->lqhBlockref = 0; - ptr.p->startFragTimer(ctcTimer); - ptr.p->scanFragId = scanptr.p->scanNextFragId++; - ptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF; - ptr.p->startFragTimer(ctcTimer); - - signal->theData[0] = tcConnectptr.p->dihConnectptr; - signal->theData[1] = ptr.i; - signal->theData[2] = scanptr.p->scanTableref; - signal->theData[3] = ptr.p->scanFragId; - sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB); - }//for - - ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags); - for (; !ptr.isNull();) - { - ptr.p->m_ops = 0; - ptr.p->m_totalLen = 0; - ptr.p->m_scan_frag_conf_status = 1; - ptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY; - ptr.p->stopFragTimer(); - - ScanFragRecPtr tmp = ptr; - list.next(ptr); - list.remove(tmp); - queued.add(tmp); - scanptr.p->m_queued_count++; - } -}//Dbtc::execDI_FCOUNTCONF() - -/****************************************************** - * execDI_FCOUNTREF - ******************************************************/ -void Dbtc::execDI_FCOUNTREF(Signal* signal) -{ - jamEntry(); - DihFragCountRef * const ref = (DihFragCountRef*)signal->getDataPtr(); - tcConnectptr.i = ref->m_connectionData; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - const Uint32 errCode = ref->m_error; - apiConnectptr.i = tcConnectptr.p->apiConnect; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - ScanRecordPtr scanptr; - scanptr.i = apiConnectptr.p->apiScanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_FRAGMENT_COUNT); - if (apiConnectptr.p->apiFailState == ZTRUE) { - jam(); - releaseScanResources(scanptr, true); - handleApiFailState(signal, apiConnectptr.i); - return; - }//if - abortScanLab(signal, scanptr, errCode, true); -}//Dbtc::execDI_FCOUNTREF() - -void Dbtc::abortScanLab(Signal* signal, ScanRecordPtr scanptr, Uint32 errCode, - bool not_started) -{ - scanTabRefLab(signal, errCode); - releaseScanResources(scanptr, not_started); -}//Dbtc::abortScanLab() - -void Dbtc::releaseScanResources(ScanRecordPtr scanPtr, - bool not_started) -{ - if (apiConnectptr.p->cachePtr != RNIL) { - cachePtr.i = apiConnectptr.p->cachePtr; - ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord); - releaseKeys(); - releaseAttrinfo(); - }//if - tcConnectptr.i = scanPtr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - releaseTcCon(); - - if (not_started) - { - jam(); - ScanFragList run(c_scan_frag_pool, scanPtr.p->m_running_scan_frags); - ScanFragList queue(c_scan_frag_pool, scanPtr.p->m_queued_scan_frags); - run.release(); - queue.release(); - } - - ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty()); - ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty()); - ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty()); - - ndbassert(scanPtr.p->scanApiRec == apiConnectptr.i); - ndbassert(apiConnectptr.p->apiScanRec == scanPtr.i); - - // link into free list - scanPtr.p->nextScan = cfirstfreeScanrec; - scanPtr.p->scanState = ScanRecord::IDLE; - scanPtr.p->scanTcrec = RNIL; - scanPtr.p->scanApiRec = RNIL; - cfirstfreeScanrec = scanPtr.i; - - apiConnectptr.p->apiScanRec = RNIL; - apiConnectptr.p->apiConnectstate = CS_CONNECTED; - setApiConTimer(apiConnectptr.i, 0, __LINE__); -}//Dbtc::releaseScanResources() - - -/**************************************************************** - * execDIGETPRIMCONF - * - * WE HAVE RECEIVED THE PRIMARY NODE OF THIS FRAGMENT. - * WE ARE NOW READY TO ASK FOR PERMISSION TO LOAD THIS - * SPECIFIC NODE WITH A SCAN OPERATION. - ****************************************************************/ -void Dbtc::execDIGETPRIMCONF(Signal* signal) -{ - jamEntry(); - // tcConnectptr.i in theData[0] is not used - scanFragptr.i = signal->theData[1]; - c_scan_frag_pool.getPtr(scanFragptr); - - tnodeid = signal->theData[2]; - arrGuard(tnodeid, MAX_NDB_NODES); - - ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF); - scanFragptr.p->stopFragTimer(); - - ScanRecordPtr scanptr; - scanptr.i = scanFragptr.p->scanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - - /** - * This must be false as select count(*) otherwise - * can "pass" committing on backup fragments and - * get incorrect row count - */ - if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo)) - { - jam(); - Uint32 max = 3+signal->theData[6]; - Uint32 nodeid = getOwnNodeId(); - for(Uint32 i = 3; itheData[i] == nodeid) - { - jam(); - tnodeid = nodeid; - break; - } - } - - { - /** - * Check table - */ - TableRecordPtr tabPtr; - tabPtr.i = scanptr.p->scanTableref; - ptrAss(tabPtr, tableRecord); - Uint32 schemaVersion = scanptr.p->scanSchemaVersion; - if(tabPtr.p->checkTable(schemaVersion) == false){ - jam(); - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - run.release(scanFragptr); - scanError(signal, scanptr, tabPtr.p->getErrorCode(schemaVersion)); - return; - } - } - - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.i = scanptr.p->scanApiRec; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - cachePtr.i = apiConnectptr.p->cachePtr; - ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord); - switch (scanptr.p->scanState) { - case ScanRecord::CLOSING_SCAN: - jam(); - updateBuddyTimer(apiConnectptr); - { - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - run.release(scanFragptr); - } - close_scan_req_send_conf(signal, scanptr); - return; - default: - jam(); - /*empty*/; - break; - }//switch - Uint32 ref = calcLqhBlockRef(tnodeid); - scanFragptr.p->lqhBlockref = ref; - scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount; - sendScanFragReq(signal, scanptr.p, scanFragptr.p); - if(ERROR_INSERTED(8035)) - globalTransporterRegistry.performSend(); - attrbufptr.i = cachePtr.p->firstAttrbuf; - while (attrbufptr.i != RNIL) { - jam(); - ptrCheckGuard(attrbufptr, cattrbufFilesize, attrbufRecord); - sendAttrinfo(signal, - scanFragptr.i, - attrbufptr.p, - ref); - attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT]; - if(ERROR_INSERTED(8035)) - globalTransporterRegistry.performSend(); - }//while - scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE; - scanFragptr.p->startFragTimer(ctcTimer); - updateBuddyTimer(apiConnectptr); - /********************************************* - * WE HAVE NOW STARTED A FRAGMENT SCAN. NOW - * WAIT FOR THE FIRST SCANNED RECORDS - *********************************************/ -}//Dbtc::execDIGETPRIMCONF - -/*************************************************** - * execDIGETPRIMREF - * - * WE ARE NOW FORCED TO STOP THE SCAN. THIS ERROR - * IS NOT RECOVERABLE SINCE THERE IS A PROBLEM WITH - * FINDING A PRIMARY REPLICA OF A CERTAIN FRAGMENT. - ***************************************************/ -void Dbtc::execDIGETPRIMREF(Signal* signal) -{ - jamEntry(); - // tcConnectptr.i in theData[0] is not used. - scanFragptr.i = signal->theData[1]; - const Uint32 errCode = signal->theData[2]; - c_scan_frag_pool.getPtr(scanFragptr); - ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::WAIT_GET_PRIMCONF); - - ScanRecordPtr scanptr; - scanptr.i = scanFragptr.p->scanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - run.release(scanFragptr); - - scanError(signal, scanptr, errCode); -}//Dbtc::execDIGETPRIMREF() - -/** - * Dbtc::execSCAN_FRAGREF - * Our attempt to scan a fragment was refused - * set error code and close all other fragment - * scan's belonging to this scan - */ -void Dbtc::execSCAN_FRAGREF(Signal* signal) -{ - const ScanFragRef * const ref = (ScanFragRef *)&signal->theData[0]; - - jamEntry(); - const Uint32 errCode = ref->errorCode; - - scanFragptr.i = ref->senderData; - c_scan_frag_pool.getPtr(scanFragptr); - - ScanRecordPtr scanptr; - scanptr.i = scanFragptr.p->scanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - - apiConnectptr.i = scanptr.p->scanApiRec; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - - Uint32 transid1 = apiConnectptr.p->transid[0] ^ ref->transId1; - Uint32 transid2 = apiConnectptr.p->transid[1] ^ ref->transId2; - transid1 = transid1 | transid2; - if (transid1 != 0) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - - /** - * Set errorcode, close connection to this lqh fragment, - * stop fragment timer and call scanFragError to start - * close of the other fragment scans - */ - ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE); - { - scanFragptr.p->scanFragState = ScanFragRec::COMPLETED; - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - run.release(scanFragptr); - scanFragptr.p->stopFragTimer(); - } - scanError(signal, scanptr, errCode); -}//Dbtc::execSCAN_FRAGREF() - -/** - * Dbtc::scanError - * - * Called when an error occurs during - */ -void Dbtc::scanError(Signal* signal, ScanRecordPtr scanptr, Uint32 errorCode) -{ - jam(); - ScanRecord* scanP = scanptr.p; - - DEBUG("scanError, errorCode = "<< errorCode << - ", scanState = " << scanptr.p->scanState); - - apiConnectptr.i = scanP->scanApiRec; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - ndbrequire(apiConnectptr.p->apiScanRec == scanptr.i); - - if(scanP->scanState == ScanRecord::CLOSING_SCAN){ - jam(); - close_scan_req_send_conf(signal, scanptr); - return; - } - - ndbrequire(scanP->scanState == ScanRecord::RUNNING); - - /** - * Close scan wo/ having received an order to do so - */ - close_scan_req(signal, scanptr, false); - - const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE); - if(apiFail){ - jam(); - return; - } - - ScanTabRef * ref = (ScanTabRef*)&signal->theData[0]; - ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect; - ref->transId1 = apiConnectptr.p->transid[0]; - ref->transId2 = apiConnectptr.p->transid[1]; - ref->errorCode = errorCode; - ref->closeNeeded = 1; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABREF, - signal, ScanTabRef::SignalLength, JBB); -}//Dbtc::scanError() - -/************************************************************ - * execSCAN_FRAGCONF - * - * A NUMBER OF OPERATIONS HAVE BEEN COMPLETED IN THIS - * FRAGMENT. TAKE CARE OF AND ISSUE FURTHER ACTIONS. - ************************************************************/ -void Dbtc::execSCAN_FRAGCONF(Signal* signal) -{ - Uint32 transid1, transid2, total_len; - jamEntry(); - - const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0]; - const Uint32 noCompletedOps = conf->completedOps; - const Uint32 status = conf->fragmentCompleted; - - scanFragptr.i = conf->senderData; - c_scan_frag_pool.getPtr(scanFragptr); - - ScanRecordPtr scanptr; - scanptr.i = scanFragptr.p->scanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - - apiConnectptr.i = scanptr.p->scanApiRec; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - - transid1 = apiConnectptr.p->transid[0] ^ conf->transId1; - transid2 = apiConnectptr.p->transid[1] ^ conf->transId2; - total_len= conf->total_len; - transid1 = transid1 | transid2; - if (transid1 != 0) { - jam(); - systemErrorLab(signal, __LINE__); - }//if - - ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE); - - if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){ - jam(); - if(status == 0){ - /** - * We have started closing = we sent a close -> ignore this - */ - return; - } else { - jam(); - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - - run.release(scanFragptr); - scanFragptr.p->stopFragTimer(); - scanFragptr.p->scanFragState = ScanFragRec::COMPLETED; - } - close_scan_req_send_conf(signal, scanptr); - return; - } - - if(noCompletedOps == 0 && status != 0 && - scanptr.p->scanNextFragId+scanptr.p->m_booked_fragments_count < scanptr.p->scanNoFrag){ - /** - * Start on next fragment - */ - scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF; - scanFragptr.p->startFragTimer(ctcTimer); - - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++; - signal->theData[0] = tcConnectptr.p->dihConnectptr; - signal->theData[1] = scanFragptr.i; - signal->theData[2] = scanptr.p->scanTableref; - signal->theData[3] = scanFragptr.p->scanFragId; - sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB); - return; - } - /* - Uint32 totalLen = 0; - for(Uint32 i = 0; iopReturnDataLen[i]; - totalLen += tmp; - } - */ - { - ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags); - ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags); - - run.remove(scanFragptr); - queued.add(scanFragptr); - scanptr.p->m_queued_count++; - } - - scanFragptr.p->m_scan_frag_conf_status = status; - scanFragptr.p->m_ops = noCompletedOps; - scanFragptr.p->m_totalLen = total_len; - scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY; - scanFragptr.p->stopFragTimer(); - - if(scanptr.p->m_queued_count > /** Min */ 0){ - jam(); - sendScanTabConf(signal, scanptr); - } -}//Dbtc::execSCAN_FRAGCONF() - -/**************************************************************************** - * execSCAN_NEXTREQ - * - * THE APPLICATION HAVE PROCESSED THE TUPLES TRANSFERRED AND IS NOW READY FOR - * MORE. THIS SIGNAL IS ALSO USED TO CLOSE THE SCAN. - ****************************************************************************/ -void Dbtc::execSCAN_NEXTREQ(Signal* signal) -{ - const ScanNextReq * const req = (ScanNextReq *)&signal->theData[0]; - const UintR transid1 = req->transId1; - const UintR transid2 = req->transId2; - const UintR stopScan = req->stopScan; - - jamEntry(); - - apiConnectptr.i = req->apiConnectPtr; - if (apiConnectptr.i >= capiConnectFilesize) { - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//if - ptrAss(apiConnectptr, apiConnectRecord); - - /** - * Check transid - */ - const UintR ctransid1 = apiConnectptr.p->transid[0] ^ transid1; - const UintR ctransid2 = apiConnectptr.p->transid[1] ^ transid2; - if ((ctransid1 | ctransid2) != 0){ - ScanTabRef * ref = (ScanTabRef*)&signal->theData[0]; - ref->apiConnectPtr = apiConnectptr.p->ndbapiConnect; - ref->transId1 = transid1; - ref->transId2 = transid2; - ref->errorCode = ZSTATE_ERROR; - ref->closeNeeded = 0; - sendSignal(signal->senderBlockRef(), GSN_SCAN_TABREF, - signal, ScanTabRef::SignalLength, JBB); - DEBUG("Wrong transid"); - return; - } - - /** - * Check state of API connection - */ - if (apiConnectptr.p->apiConnectstate != CS_START_SCAN) { - jam(); - if (apiConnectptr.p->apiConnectstate == CS_CONNECTED) { - jam(); - /********************************************************************* - * The application sends a SCAN_NEXTREQ after experiencing a time-out. - * We will send a SCAN_TABREF to indicate a time-out occurred. - *********************************************************************/ - DEBUG("scanTabRefLab: ZSCANTIME_OUT_ERROR2"); - ndbout_c("apiConnectptr(%d) -> abort", apiConnectptr.i); - ndbrequire(false); //B2 indication of strange things going on - scanTabRefLab(signal, ZSCANTIME_OUT_ERROR2); - return; - } - DEBUG("scanTabRefLab: ZSTATE_ERROR"); - DEBUG(" apiConnectstate="<apiConnectstate); - ndbrequire(false); //B2 indication of strange things going on - scanTabRefLab(signal, ZSTATE_ERROR); - return; - }//if - - /******************************************************* - * START THE ACTUAL LOGIC OF SCAN_NEXTREQ. - ********************************************************/ - // Stop the timer that is used to check for timeout in the API - setApiConTimer(apiConnectptr.i, 0, __LINE__); - ScanRecordPtr scanptr; - scanptr.i = apiConnectptr.p->apiScanRec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - ScanRecord* scanP = scanptr.p; - - const Uint32 len = signal->getLength() - 4; - - if (stopScan == ZTRUE) { - jam(); - /********************************************************************* - * APPLICATION IS CLOSING THE SCAN. - **********************************************************************/ - close_scan_req(signal, scanptr, true); - return; - }//if - - if (scanptr.p->scanState == ScanRecord::CLOSING_SCAN){ - jam(); - /** - * The scan is closing (typically due to error) - * but the API hasn't understood it yet - * - * Wait for API close request - */ - return; - } - - // Copy op ptrs so I dont overwrite them when sending... - memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len); - - ScanFragNextReq tmp; - tmp.closeFlag = ZFALSE; - tmp.transId1 = apiConnectptr.p->transid[0]; - tmp.transId2 = apiConnectptr.p->transid[1]; - tmp.batch_size_rows = scanP->batch_size_rows; - tmp.batch_size_bytes = scanP->batch_byte_size; - - ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags); - ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags); - for(Uint32 i = 0 ; itheData[i+25]; - c_scan_frag_pool.getPtr(scanFragptr); - ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED); - - scanFragptr.p->startFragTimer(ctcTimer); - scanFragptr.p->m_ops = 0; - - if(scanFragptr.p->m_scan_frag_conf_status) - { - /** - * last scan was complete - */ - jam(); - ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag); - jam(); - ndbassert(scanptr.p->m_booked_fragments_count); - scanptr.p->m_booked_fragments_count--; - scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF; - - tcConnectptr.i = scanptr.p->scanTcrec; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++; - signal->theData[0] = tcConnectptr.p->dihConnectptr; - signal->theData[1] = scanFragptr.i; - signal->theData[2] = scanptr.p->scanTableref; - signal->theData[3] = scanFragptr.p->scanFragId; - sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB); - } - else - { - jam(); - scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE; - ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend(); - * req = tmp; - req->senderData = scanFragptr.i; - sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - } - delivered.remove(scanFragptr); - running.add(scanFragptr); - }//for - -}//Dbtc::execSCAN_NEXTREQ() - -void -Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){ - - ScanRecord* scanP = scanPtr.p; - ndbrequire(scanPtr.p->scanState != ScanRecord::IDLE); - ScanRecord::ScanState old = scanPtr.p->scanState; - scanPtr.p->scanState = ScanRecord::CLOSING_SCAN; - scanPtr.p->m_close_scan_req = req_received; - - if (old == ScanRecord::WAIT_FRAGMENT_COUNT) - { - jam(); - scanPtr.p->scanState = old; - return; // Will continue on execDI_FCOUNTCONF - } - - /** - * Queue : Action - * ============= : ================= - * completed : - - * running : close -> LQH - * delivered w/ : close -> LQH - * delivered wo/ : move to completed - * queued w/ : close -> LQH - * queued wo/ : move to completed - */ - - ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0]; - nextReq->closeFlag = ZTRUE; - nextReq->transId1 = apiConnectptr.p->transid[0]; - nextReq->transId2 = apiConnectptr.p->transid[1]; - - { - ScanFragRecPtr ptr; - ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags); - ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags); - ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags); - - // Close running - for(running.first(ptr); !ptr.isNull(); ){ - ScanFragRecPtr curr = ptr; // Remove while iterating... - running.next(ptr); - - switch(curr.p->scanFragState){ - case ScanFragRec::IDLE: - jam(); // real early abort - ndbrequire(old == ScanRecord::WAIT_AI); - running.release(curr); - continue; - case ScanFragRec::WAIT_GET_PRIMCONF: - jam(); - continue; - case ScanFragRec::LQH_ACTIVE: - jam(); - break; - default: - jamLine(curr.p->scanFragState); - ndbrequire(false); - } - - curr.p->startFragTimer(ctcTimer); - curr.p->scanFragState = ScanFragRec::LQH_ACTIVE; - nextReq->senderData = curr.i; - sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - } - - // Close delivered - for(delivered.first(ptr); !ptr.isNull(); ){ - jam(); - ScanFragRecPtr curr = ptr; // Remove while iterating... - delivered.next(ptr); - - ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED); - delivered.remove(curr); - - if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){ - jam(); - running.add(curr); - curr.p->scanFragState = ScanFragRec::LQH_ACTIVE; - curr.p->startFragTimer(ctcTimer); - nextReq->senderData = curr.i; - sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - - } else { - jam(); - c_scan_frag_pool.release(curr); - curr.p->scanFragState = ScanFragRec::COMPLETED; - curr.p->stopFragTimer(); - } - }//for - - /** - * All queued with data should be closed - */ - for(queued.first(ptr); !ptr.isNull(); ){ - jam(); - ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY); - ScanFragRecPtr curr = ptr; // Remove while iterating... - queued.next(ptr); - - queued.remove(curr); - scanP->m_queued_count--; - - if(curr.p->m_ops > 0){ - jam(); - running.add(curr); - curr.p->scanFragState = ScanFragRec::LQH_ACTIVE; - curr.p->startFragTimer(ctcTimer); - nextReq->senderData = curr.i; - sendSignal(curr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - } else { - jam(); - c_scan_frag_pool.release(curr); - curr.p->scanFragState = ScanFragRec::COMPLETED; - curr.p->stopFragTimer(); - } - } - } - close_scan_req_send_conf(signal, scanPtr); -} - -void -Dbtc::close_scan_req_send_conf(Signal* signal, ScanRecordPtr scanPtr){ - - jam(); - - ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty()); - ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty()); - //ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty()); - -#if 0 - { - ScanFragList comp(c_scan_frag_pool, scanPtr.p->m_completed_scan_frags); - ScanFragRecPtr ptr; - for(comp.first(ptr); !ptr.isNull(); comp.next(ptr)){ - ndbrequire(ptr.p->scanFragTimer == 0); - ndbrequire(ptr.p->scanFragState == ScanFragRec::COMPLETED); - } - } -#endif - - if(!scanPtr.p->m_running_scan_frags.isEmpty()){ - jam(); - return; - } - - const bool apiFail = (apiConnectptr.p->apiFailState == ZTRUE); - - if(!scanPtr.p->m_close_scan_req){ - jam(); - /** - * The API hasn't order closing yet - */ - return; - } - - Uint32 ref = apiConnectptr.p->ndbapiBlockref; - if(!apiFail && ref){ - jam(); - ScanTabConf * conf = (ScanTabConf*)&signal->theData[0]; - conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect; - conf->requestInfo = ScanTabConf::EndOfData; - conf->transId1 = apiConnectptr.p->transid[0]; - conf->transId2 = apiConnectptr.p->transid[1]; - sendSignal(ref, GSN_SCAN_TABCONF, signal, ScanTabConf::SignalLength, JBB); - } - - releaseScanResources(scanPtr); - - if(apiFail){ - jam(); - /** - * API has failed - */ - handleApiFailState(signal, apiConnectptr.i); - } -} - -Dbtc::ScanRecordPtr -Dbtc::seizeScanrec(Signal* signal) { - ScanRecordPtr scanptr; - scanptr.i = cfirstfreeScanrec; - ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord); - cfirstfreeScanrec = scanptr.p->nextScan; - scanptr.p->nextScan = RNIL; - ndbrequire(scanptr.p->scanState == ScanRecord::IDLE); - return scanptr; -}//Dbtc::seizeScanrec() - -void Dbtc::sendScanFragReq(Signal* signal, - ScanRecord* scanP, - ScanFragRec* scanFragP) -{ - ScanFragReq * const req = (ScanFragReq *)&signal->theData[0]; - Uint32 requestInfo = scanP->scanRequestInfo; - ScanFragReq::setScanPrio(requestInfo, 1); - apiConnectptr.i = scanP->scanApiRec; - req->tableId = scanP->scanTableref; - req->schemaVersion = scanP->scanSchemaVersion; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - req->senderData = scanFragptr.i; - req->requestInfo = requestInfo; - req->fragmentNoKeyLen = scanFragP->scanFragId | (scanP->scanKeyLen << 16); - req->resultRef = apiConnectptr.p->ndbapiBlockref; - req->savePointId = apiConnectptr.p->currSavePointId; - req->transId1 = apiConnectptr.p->transid[0]; - req->transId2 = apiConnectptr.p->transid[1]; - req->clientOpPtr = scanFragP->m_apiPtr; - req->batch_size_rows= scanP->batch_size_rows; - req->batch_size_bytes= scanP->batch_byte_size; - sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal, - ScanFragReq::SignalLength, JBB); - if(scanP->scanKeyLen > 0) - { - tcConnectptr.i = scanFragptr.i; - packKeyData000Lab(signal, scanFragP->lqhBlockref, scanP->scanKeyLen); - } - updateBuddyTimer(apiConnectptr); - scanFragP->startFragTimer(ctcTimer); -}//Dbtc::sendScanFragReq() - - -void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) { - jam(); - Uint32* ops = signal->getDataPtrSend()+4; - Uint32 op_count = scanPtr.p->m_queued_count; - if(4 + 3 * op_count > 25){ - jam(); - ops += 21; - } - - int left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId; - Uint32 booked = scanPtr.p->m_booked_fragments_count; - - ScanTabConf * conf = (ScanTabConf*)&signal->theData[0]; - conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect; - conf->requestInfo = op_count; - conf->transId1 = apiConnectptr.p->transid[0]; - conf->transId2 = apiConnectptr.p->transid[1]; - ScanFragRecPtr ptr; - { - ScanFragList queued(c_scan_frag_pool, scanPtr.p->m_queued_scan_frags); - ScanFragList delivered(c_scan_frag_pool,scanPtr.p->m_delivered_scan_frags); - for(queued.first(ptr); !ptr.isNull(); ){ - ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY); - ScanFragRecPtr curr = ptr; // Remove while iterating... - queued.next(ptr); - - bool done = curr.p->m_scan_frag_conf_status && (left <= (int)booked); - if(curr.p->m_scan_frag_conf_status) - booked++; - - * ops++ = curr.p->m_apiPtr; - * ops++ = done ? RNIL : curr.i; - * ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops; - - queued.remove(curr); - if(!done){ - delivered.add(curr); - curr.p->scanFragState = ScanFragRec::DELIVERED; - curr.p->stopFragTimer(); - } else { - c_scan_frag_pool.release(curr); - curr.p->scanFragState = ScanFragRec::COMPLETED; - curr.p->stopFragTimer(); - } - } - } - - scanPtr.p->m_booked_fragments_count = booked; - if(scanPtr.p->m_delivered_scan_frags.isEmpty() && - scanPtr.p->m_running_scan_frags.isEmpty()) - { - conf->requestInfo = op_count | ScanTabConf::EndOfData; - releaseScanResources(scanPtr); - } - else - { - if (scanPtr.p->m_running_scan_frags.isEmpty()) - { - jam(); - /** - * All scan frags delivered...waiting for API - */ - setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__); - } - } - - if(4 + 3 * op_count > 25){ - jam(); - LinearSectionPtr ptr[3]; - ptr[0].p = signal->getDataPtrSend()+25; - ptr[0].sz = 3 * op_count; - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal, - ScanTabConf::SignalLength, JBB, ptr, 1); - } else { - jam(); - sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal, - ScanTabConf::SignalLength + 3 * op_count, JBB); - } - scanPtr.p->m_queued_count = 0; -}//Dbtc::sendScanTabConf() - - -void Dbtc::gcpTcfinished(Signal* signal) -{ - signal->theData[0] = c_gcp_ref; - signal->theData[1] = tcheckGcpId; - sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB); -}//Dbtc::gcpTcfinished() - -void Dbtc::initApiConnect(Signal* signal) -{ - Uint32 tiacTmp; - Uint32 guard4; - - tiacTmp = capiConnectFilesize / 3; - ndbrequire(tiacTmp > 0); - guard4 = tiacTmp + 1; - for (cachePtr.i = 0; cachePtr.i < guard4; cachePtr.i++) { - refresh_watch_dog(); - ptrAss(cachePtr, cacheRecord); - cachePtr.p->firstAttrbuf = RNIL; - cachePtr.p->lastAttrbuf = RNIL; - cachePtr.p->firstKeybuf = RNIL; - cachePtr.p->lastKeybuf = RNIL; - cachePtr.p->nextCacheRec = cachePtr.i + 1; - }//for - cachePtr.i = tiacTmp; - ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord); - cachePtr.p->nextCacheRec = RNIL; - cfirstfreeCacheRec = 0; - - guard4 = tiacTmp - 1; - for (apiConnectptr.i = 0; apiConnectptr.i <= guard4; apiConnectptr.i++) { - refresh_watch_dog(); - jam(); - ptrAss(apiConnectptr, apiConnectRecord); - apiConnectptr.p->apiConnectstate = CS_DISCONNECTED; - apiConnectptr.p->apiFailState = ZFALSE; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - apiConnectptr.p->takeOverRec = (Uint8)Z8NIL; - apiConnectptr.p->cachePtr = RNIL; - apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1; - apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref - apiConnectptr.p->commitAckMarker = RNIL; - apiConnectptr.p->firstTcConnect = RNIL; - apiConnectptr.p->lastTcConnect = RNIL; - apiConnectptr.p->triggerPending = false; - apiConnectptr.p->isIndexOp = false; - apiConnectptr.p->accumulatingIndexOp = RNIL; - apiConnectptr.p->executingIndexOp = RNIL; - apiConnectptr.p->buddyPtr = RNIL; - apiConnectptr.p->currSavePointId = 0; - apiConnectptr.p->m_transaction_nodes.clear(); - apiConnectptr.p->singleUserMode = 0; - }//for - apiConnectptr.i = tiacTmp - 1; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - apiConnectptr.p->nextApiConnect = RNIL; - cfirstfreeApiConnect = 0; - guard4 = (2 * tiacTmp) - 1; - for (apiConnectptr.i = tiacTmp; apiConnectptr.i <= guard4; apiConnectptr.i++) - { - refresh_watch_dog(); - jam(); - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - apiConnectptr.p->apiConnectstate = CS_RESTART; - apiConnectptr.p->apiFailState = ZFALSE; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - apiConnectptr.p->takeOverRec = (Uint8)Z8NIL; - apiConnectptr.p->cachePtr = RNIL; - apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1; - apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref - apiConnectptr.p->commitAckMarker = RNIL; - apiConnectptr.p->firstTcConnect = RNIL; - apiConnectptr.p->lastTcConnect = RNIL; - apiConnectptr.p->triggerPending = false; - apiConnectptr.p->isIndexOp = false; - apiConnectptr.p->accumulatingIndexOp = RNIL; - apiConnectptr.p->executingIndexOp = RNIL; - apiConnectptr.p->buddyPtr = RNIL; - apiConnectptr.p->currSavePointId = 0; - apiConnectptr.p->m_transaction_nodes.clear(); - apiConnectptr.p->singleUserMode = 0; - }//for - apiConnectptr.i = (2 * tiacTmp) - 1; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - apiConnectptr.p->nextApiConnect = RNIL; - cfirstfreeApiConnectCopy = tiacTmp; - guard4 = (3 * tiacTmp) - 1; - for (apiConnectptr.i = 2 * tiacTmp; apiConnectptr.i <= guard4; - apiConnectptr.i++) { - refresh_watch_dog(); - jam(); - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - setApiConTimer(apiConnectptr.i, 0, __LINE__); - apiConnectptr.p->apiFailState = ZFALSE; - apiConnectptr.p->apiConnectstate = CS_RESTART; - apiConnectptr.p->takeOverRec = (Uint8)Z8NIL; - apiConnectptr.p->cachePtr = RNIL; - apiConnectptr.p->nextApiConnect = apiConnectptr.i + 1; - apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref - apiConnectptr.p->commitAckMarker = RNIL; - apiConnectptr.p->firstTcConnect = RNIL; - apiConnectptr.p->lastTcConnect = RNIL; - apiConnectptr.p->triggerPending = false; - apiConnectptr.p->isIndexOp = false; - apiConnectptr.p->accumulatingIndexOp = RNIL; - apiConnectptr.p->executingIndexOp = RNIL; - apiConnectptr.p->buddyPtr = RNIL; - apiConnectptr.p->currSavePointId = 0; - apiConnectptr.p->m_transaction_nodes.clear(); - apiConnectptr.p->singleUserMode = 0; - }//for - apiConnectptr.i = (3 * tiacTmp) - 1; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - apiConnectptr.p->nextApiConnect = RNIL; - cfirstfreeApiConnectFail = 2 * tiacTmp; -}//Dbtc::initApiConnect() - -void Dbtc::initattrbuf(Signal* signal) -{ - ndbrequire(cattrbufFilesize > 0); - for (attrbufptr.i = 0; attrbufptr.i < cattrbufFilesize; attrbufptr.i++) { - refresh_watch_dog(); - jam(); - ptrAss(attrbufptr, attrbufRecord); - attrbufptr.p->attrbuf[ZINBUF_NEXT] = attrbufptr.i + 1; /* NEXT ATTRBUF */ - }//for - attrbufptr.i = cattrbufFilesize - 1; - ptrAss(attrbufptr, attrbufRecord); - attrbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL; /* NEXT ATTRBUF */ - cfirstfreeAttrbuf = 0; -}//Dbtc::initattrbuf() - -void Dbtc::initdatabuf(Signal* signal) -{ - ndbrequire(cdatabufFilesize > 0); - for (databufptr.i = 0; databufptr.i < cdatabufFilesize; databufptr.i++) { - refresh_watch_dog(); - ptrAss(databufptr, databufRecord); - databufptr.p->nextDatabuf = databufptr.i + 1; - }//for - databufptr.i = cdatabufFilesize - 1; - ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord); - databufptr.p->nextDatabuf = RNIL; - cfirstfreeDatabuf = 0; -}//Dbtc::initdatabuf() - -void Dbtc::initgcp(Signal* signal) -{ - ndbrequire(cgcpFilesize > 0); - for (gcpPtr.i = 0; gcpPtr.i < cgcpFilesize; gcpPtr.i++) { - ptrAss(gcpPtr, gcpRecord); - gcpPtr.p->nextGcp = gcpPtr.i + 1; - }//for - gcpPtr.i = cgcpFilesize - 1; - ptrCheckGuard(gcpPtr, cgcpFilesize, gcpRecord); - gcpPtr.p->nextGcp = RNIL; - cfirstfreeGcp = 0; - cfirstgcp = RNIL; - clastgcp = RNIL; -}//Dbtc::initgcp() - -void Dbtc::inithost(Signal* signal) -{ - cpackedListIndex = 0; - ndbrequire(chostFilesize > 0); - for (hostptr.i = 0; hostptr.i < chostFilesize; hostptr.i++) { - jam(); - ptrAss(hostptr, hostRecord); - hostptr.p->hostStatus = HS_DEAD; - hostptr.p->inPackedList = false; - hostptr.p->lqhTransStatus = LTS_IDLE; - hostptr.p->noOfWordsTCKEYCONF = 0; - hostptr.p->noOfWordsTCINDXCONF = 0; - hostptr.p->noOfPackedWordsLqh = 0; - hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i); - hostptr.p->m_nf_bits = 0; - }//for - c_alive_nodes.clear(); -}//Dbtc::inithost() - -void Dbtc::initialiseRecordsLab(Signal* signal, UintR Tdata0, - Uint32 retRef, Uint32 retData) -{ - switch (Tdata0) { - case 0: - jam(); - initApiConnect(signal); - break; - case 1: - jam(); - initattrbuf(signal); - break; - case 2: - jam(); - initdatabuf(signal); - break; - case 3: - jam(); - initgcp(signal); - break; - case 4: - jam(); - inithost(signal); - break; - case 5: - jam(); - // UNUSED Free to initialise something - break; - case 6: - jam(); - initTable(signal); - break; - case 7: - jam(); - initialiseScanrec(signal); - break; - case 8: - jam(); - initialiseScanOprec(signal); - break; - case 9: - jam(); - initialiseScanFragrec(signal); - break; - case 10: - jam(); - initialiseTcConnect(signal); - break; - case 11: - jam(); - initTcFail(signal); - - { - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = retData; - sendSignal(retRef, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); - } - return; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch - - signal->theData[0] = TcContinueB::ZINITIALISE_RECORDS; - signal->theData[1] = Tdata0 + 1; - signal->theData[2] = 0; - signal->theData[3] = retRef; - signal->theData[4] = retData; - sendSignal(DBTC_REF, GSN_CONTINUEB, signal, 5, JBB); -} - -/* ========================================================================= */ -/* ======= INITIALISE_SCANREC ======= */ -/* */ -/* ========================================================================= */ -void Dbtc::initialiseScanrec(Signal* signal) -{ - ScanRecordPtr scanptr; - ndbrequire(cscanrecFileSize > 0); - for (scanptr.i = 0; scanptr.i < cscanrecFileSize; scanptr.i++) { - refresh_watch_dog(); - jam(); - ptrAss(scanptr, scanRecord); - new (scanptr.p) ScanRecord(); - scanptr.p->scanState = ScanRecord::IDLE; - scanptr.p->scanApiRec = RNIL; - scanptr.p->nextScan = scanptr.i + 1; - }//for - scanptr.i = cscanrecFileSize - 1; - ptrAss(scanptr, scanRecord); - scanptr.p->nextScan = RNIL; - cfirstfreeScanrec = 0; -}//Dbtc::initialiseScanrec() - -void Dbtc::initialiseScanFragrec(Signal* signal) -{ -}//Dbtc::initialiseScanFragrec() - -void Dbtc::initialiseScanOprec(Signal* signal) -{ -}//Dbtc::initialiseScanOprec() - -void Dbtc::initTable(Signal* signal) -{ - - ndbrequire(ctabrecFilesize > 0); - for (tabptr.i = 0; tabptr.i < ctabrecFilesize; tabptr.i++) { - refresh_watch_dog(); - ptrAss(tabptr, tableRecord); - tabptr.p->currentSchemaVersion = 0; - tabptr.p->m_flags = 0; - tabptr.p->set_storedTable(true); - tabptr.p->tableType = 0; - tabptr.p->set_enabled(false); - tabptr.p->set_dropping(false); - tabptr.p->noOfKeyAttr = 0; - tabptr.p->hasCharAttr = 0; - tabptr.p->noOfDistrKeys = 0; - tabptr.p->hasVarKeys = 0; - }//for -}//Dbtc::initTable() - -void Dbtc::initialiseTcConnect(Signal* signal) -{ - ndbrequire(ctcConnectFilesize >= 2); - - // Place half of tcConnectptr's in cfirstfreeTcConnectFail list - Uint32 titcTmp = ctcConnectFilesize / 2; - for (tcConnectptr.i = 0; tcConnectptr.i < titcTmp; tcConnectptr.i++) { - refresh_watch_dog(); - jam(); - ptrAss(tcConnectptr, tcConnectRecord); - tcConnectptr.p->tcConnectstate = OS_RESTART; - tcConnectptr.p->apiConnect = RNIL; - tcConnectptr.p->noOfNodes = 0; - tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1; - }//for - tcConnectptr.i = titcTmp - 1; - ptrAss(tcConnectptr, tcConnectRecord); - tcConnectptr.p->nextTcConnect = RNIL; - cfirstfreeTcConnectFail = 0; - - // Place other half in cfirstfreeTcConnect list - for (tcConnectptr.i = titcTmp; tcConnectptr.i < ctcConnectFilesize; - tcConnectptr.i++) { - refresh_watch_dog(); - jam(); - ptrAss(tcConnectptr, tcConnectRecord); - tcConnectptr.p->tcConnectstate = OS_RESTART; - tcConnectptr.p->apiConnect = RNIL; - tcConnectptr.p->noOfNodes = 0; - tcConnectptr.p->nextTcConnect = tcConnectptr.i + 1; - }//for - tcConnectptr.i = ctcConnectFilesize - 1; - ptrAss(tcConnectptr, tcConnectRecord); - tcConnectptr.p->nextTcConnect = RNIL; - cfirstfreeTcConnect = titcTmp; - c_counters.cconcurrentOp = 0; -}//Dbtc::initialiseTcConnect() - -/* ------------------------------------------------------------------------- */ -/* ---- LINK A GLOBAL CHECKPOINT RECORD INTO THE LIST WITH TRANSACTIONS */ -/* WAITING FOR COMPLETION. */ -/* ------------------------------------------------------------------------- */ -void Dbtc::linkGciInGcilist(Signal* signal) -{ - GcpRecordPtr tmpGcpPointer; - if (cfirstgcp == RNIL) { - jam(); - cfirstgcp = gcpPtr.i; - } else { - jam(); - tmpGcpPointer.i = clastgcp; - ptrCheckGuard(tmpGcpPointer, cgcpFilesize, gcpRecord); - tmpGcpPointer.p->nextGcp = gcpPtr.i; - }//if - clastgcp = gcpPtr.i; -}//Dbtc::linkGciInGcilist() - -/* ------------------------------------------------------------------------- */ -/* ------- LINK SECONDARY KEY BUFFER IN OPERATION RECORD ------- */ -/* ------------------------------------------------------------------------- */ -void Dbtc::linkKeybuf(Signal* signal) -{ - seizeDatabuf(signal); - tmpDatabufptr.i = cachePtr.p->lastKeybuf; - cachePtr.p->lastKeybuf = databufptr.i; - if (tmpDatabufptr.i == RNIL) { - jam(); - cachePtr.p->firstKeybuf = databufptr.i; - } else { - jam(); - ptrCheckGuard(tmpDatabufptr, cdatabufFilesize, databufRecord); - tmpDatabufptr.p->nextDatabuf = databufptr.i; - }//if -}//Dbtc::linkKeybuf() - -/* ------------------------------------------------------------------------- */ -/* ------- LINK A TC CONNECT RECORD INTO THE API LIST OF TC CONNECTIONS --- */ -/* ------------------------------------------------------------------------- */ -void Dbtc::linkTcInConnectionlist(Signal* signal) -{ - /* POINTER FOR THE CONNECT_RECORD */ - TcConnectRecordPtr ltcTcConnectptr; - - tcConnectptr.p->nextTcConnect = RNIL; - ltcTcConnectptr.i = apiConnectptr.p->lastTcConnect; - ptrCheck(ltcTcConnectptr, ctcConnectFilesize, tcConnectRecord); - apiConnectptr.p->lastTcConnect = tcConnectptr.i; - if (ltcTcConnectptr.i == RNIL) { - jam(); - apiConnectptr.p->firstTcConnect = tcConnectptr.i; - } else { - jam(); - ptrGuard(ltcTcConnectptr); - ltcTcConnectptr.p->nextTcConnect = tcConnectptr.i; - }//if -}//Dbtc::linkTcInConnectionlist() - -/*---------------------------------------------------------------------------*/ -/* RELEASE_ABORT_RESOURCES */ -/* THIS CODE RELEASES ALL RESOURCES AFTER AN ABORT OF A TRANSACTION AND ALSO */ -/* SENDS THE ABORT DECISION TO THE APPLICATION. */ -/*---------------------------------------------------------------------------*/ -void Dbtc::releaseAbortResources(Signal* signal) -{ - TcConnectRecordPtr rarTcConnectptr; - - c_counters.cabortCount++; - if (apiConnectptr.p->cachePtr != RNIL) { - cachePtr.i = apiConnectptr.p->cachePtr; - ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord); - releaseAttrinfo(); - releaseKeys(); - }//if - tcConnectptr.i = apiConnectptr.p->firstTcConnect; - while (tcConnectptr.i != RNIL) { - jam(); - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - // Clear any markers that were set in CS_RECEIVING state - clearCommitAckMarker(apiConnectptr.p, tcConnectptr.p); - rarTcConnectptr.i = tcConnectptr.p->nextTcConnect; - releaseTcCon(); - tcConnectptr.i = rarTcConnectptr.i; - }//while - apiConnectptr.p->firstTcConnect = RNIL; - apiConnectptr.p->lastTcConnect = RNIL; - apiConnectptr.p->m_transaction_nodes.clear(); - apiConnectptr.p->singleUserMode = 0; - - // MASV let state be CS_ABORTING until all - // signals in the "air" have been received. Reset to CS_CONNECTED - // will be done when a TCKEYREQ with start flag is recieved - // or releaseApiCon is called - // apiConnectptr.p->apiConnectstate = CS_CONNECTED; - apiConnectptr.p->apiConnectstate = CS_ABORTING; - apiConnectptr.p->abortState = AS_IDLE; - releaseAllSeizedIndexOperations(apiConnectptr.p); - if(apiConnectptr.p->m_exec_flag || apiConnectptr.p->apiFailState == ZTRUE){ - jam(); - bool ok = false; - Uint32 blockRef = apiConnectptr.p->ndbapiBlockref; - ReturnSignal ret = apiConnectptr.p->returnsignal; - apiConnectptr.p->returnsignal = RS_NO_RETURN; - apiConnectptr.p->m_exec_flag = 0; - switch(ret){ - case RS_TCROLLBACKCONF: - jam(); - ok = true; - signal->theData[0] = apiConnectptr.p->ndbapiConnect; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - sendSignal(blockRef, GSN_TCROLLBACKCONF, signal, 3, JBB); - break; - case RS_TCROLLBACKREP:{ - jam(); - ok = true; - TcRollbackRep * const tcRollbackRep = - (TcRollbackRep *) signal->getDataPtr(); - - tcRollbackRep->connectPtr = apiConnectptr.p->ndbapiConnect; - tcRollbackRep->transId[0] = apiConnectptr.p->transid[0]; - tcRollbackRep->transId[1] = apiConnectptr.p->transid[1]; - tcRollbackRep->returnCode = apiConnectptr.p->returncode; - tcRollbackRep->errorData = apiConnectptr.p->errorData; - sendSignal(blockRef, GSN_TCROLLBACKREP, signal, - TcRollbackRep::SignalLength, JBB); - } - break; - case RS_NO_RETURN: - jam(); - ok = true; - break; - case RS_TCKEYCONF: - case RS_TC_COMMITCONF: - break; - } - if(!ok){ - jam(); - ndbout_c("returnsignal = %d", apiConnectptr.p->returnsignal); - sendSystemError(signal, __LINE__); - }//if - - } - setApiConTimer(apiConnectptr.i, 0, - 100000+c_apiConTimer_line[apiConnectptr.i]); - if (apiConnectptr.p->apiFailState == ZTRUE) { - jam(); - handleApiFailState(signal, apiConnectptr.i); - return; - }//if -}//Dbtc::releaseAbortResources() - -void Dbtc::releaseApiCon(Signal* signal, UintR TapiConnectPtr) -{ - ApiConnectRecordPtr TlocalApiConnectptr; - - TlocalApiConnectptr.i = TapiConnectPtr; - ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord); - TlocalApiConnectptr.p->nextApiConnect = cfirstfreeApiConnect; - cfirstfreeApiConnect = TlocalApiConnectptr.i; - setApiConTimer(TlocalApiConnectptr.i, 0, __LINE__); - TlocalApiConnectptr.p->apiConnectstate = CS_DISCONNECTED; - ndbassert(TlocalApiConnectptr.p->m_transaction_nodes.isclear()); - ndbassert(TlocalApiConnectptr.p->apiScanRec == RNIL); - TlocalApiConnectptr.p->ndbapiBlockref = 0; -}//Dbtc::releaseApiCon() - -void Dbtc::releaseApiConnectFail(Signal* signal) -{ - apiConnectptr.p->apiConnectstate = CS_RESTART; - apiConnectptr.p->takeOverRec = (Uint8)Z8NIL; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - apiConnectptr.p->nextApiConnect = cfirstfreeApiConnectFail; - cfirstfreeApiConnectFail = apiConnectptr.i; -}//Dbtc::releaseApiConnectFail() - -void Dbtc::releaseGcp(Signal* signal) -{ - ptrGuard(gcpPtr); - gcpPtr.p->nextGcp = cfirstfreeGcp; - cfirstfreeGcp = gcpPtr.i; -}//Dbtc::releaseGcp() - -void Dbtc::releaseKeys() -{ - UintR Tmp; - databufptr.i = cachePtr.p->firstKeybuf; - while (databufptr.i != RNIL) { - jam(); - ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord); - Tmp = databufptr.p->nextDatabuf; - databufptr.p->nextDatabuf = cfirstfreeDatabuf; - cfirstfreeDatabuf = databufptr.i; - databufptr.i = Tmp; - }//while - cachePtr.p->firstKeybuf = RNIL; - cachePtr.p->lastKeybuf = RNIL; -}//Dbtc::releaseKeys() - -void Dbtc::releaseTcConnectFail(Signal* signal) -{ - ptrGuard(tcConnectptr); - tcConnectptr.p->nextTcConnect = cfirstfreeTcConnectFail; - cfirstfreeTcConnectFail = tcConnectptr.i; -}//Dbtc::releaseTcConnectFail() - -void Dbtc::seizeApiConnect(Signal* signal) -{ - if (cfirstfreeApiConnect != RNIL) { - jam(); - terrorCode = ZOK; - apiConnectptr.i = cfirstfreeApiConnect; /* ASSIGN A FREE RECORD FROM */ - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - cfirstfreeApiConnect = apiConnectptr.p->nextApiConnect; - apiConnectptr.p->nextApiConnect = RNIL; - setApiConTimer(apiConnectptr.i, 0, __LINE__); - apiConnectptr.p->apiConnectstate = CS_CONNECTED; /* STATE OF CONNECTION */ - apiConnectptr.p->triggerPending = false; - apiConnectptr.p->isIndexOp = false; - } else { - jam(); - terrorCode = ZNO_FREE_API_CONNECTION; - }//if -}//Dbtc::seizeApiConnect() - -void Dbtc::seizeApiConnectFail(Signal* signal) -{ - apiConnectptr.i = cfirstfreeApiConnectFail; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - cfirstfreeApiConnectFail = apiConnectptr.p->nextApiConnect; -}//Dbtc::seizeApiConnectFail() - -void Dbtc::seizeDatabuf(Signal* signal) -{ - databufptr.i = cfirstfreeDatabuf; - ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord); - cfirstfreeDatabuf = databufptr.p->nextDatabuf; - databufptr.p->nextDatabuf = RNIL; -}//Dbtc::seizeDatabuf() - -void Dbtc::seizeTcConnect(Signal* signal) -{ - tcConnectptr.i = cfirstfreeTcConnect; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - cfirstfreeTcConnect = tcConnectptr.p->nextTcConnect; - c_counters.cconcurrentOp++; - tcConnectptr.p->isIndexOp = false; -}//Dbtc::seizeTcConnect() - -void Dbtc::seizeTcConnectFail(Signal* signal) -{ - tcConnectptr.i = cfirstfreeTcConnectFail; - ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord); - cfirstfreeTcConnectFail = tcConnectptr.p->nextTcConnect; -}//Dbtc::seizeTcConnectFail() - -void Dbtc::sendAttrinfo(Signal* signal, - UintR TattrinfoPtr, - AttrbufRecord * const regAttrPtr, - UintR TBref) -{ - UintR TdataPos; - UintR sig0, sig1, sig2, sig3, sig4, sig5, sig6, sig7; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - TdataPos = regAttrPtr->attrbuf[ZINBUF_DATA_LEN]; - sig0 = TattrinfoPtr; - sig1 = regApiPtr->transid[0]; - sig2 = regApiPtr->transid[1]; - - signal->theData[0] = sig0; - signal->theData[1] = sig1; - signal->theData[2] = sig2; - - sig0 = regAttrPtr->attrbuf[0]; - sig1 = regAttrPtr->attrbuf[1]; - sig2 = regAttrPtr->attrbuf[2]; - sig3 = regAttrPtr->attrbuf[3]; - sig4 = regAttrPtr->attrbuf[4]; - sig5 = regAttrPtr->attrbuf[5]; - sig6 = regAttrPtr->attrbuf[6]; - sig7 = regAttrPtr->attrbuf[7]; - - signal->theData[3] = sig0; - signal->theData[4] = sig1; - signal->theData[5] = sig2; - signal->theData[6] = sig3; - signal->theData[7] = sig4; - signal->theData[8] = sig5; - signal->theData[9] = sig6; - signal->theData[10] = sig7; - - if (TdataPos > 8) { - sig0 = regAttrPtr->attrbuf[8]; - sig1 = regAttrPtr->attrbuf[9]; - sig2 = regAttrPtr->attrbuf[10]; - sig3 = regAttrPtr->attrbuf[11]; - sig4 = regAttrPtr->attrbuf[12]; - sig5 = regAttrPtr->attrbuf[13]; - sig6 = regAttrPtr->attrbuf[14]; - - jam(); - signal->theData[11] = sig0; - signal->theData[12] = sig1; - signal->theData[13] = sig2; - signal->theData[14] = sig3; - signal->theData[15] = sig4; - signal->theData[16] = sig5; - signal->theData[17] = sig6; - - if (TdataPos > 15) { - - sig0 = regAttrPtr->attrbuf[15]; - sig1 = regAttrPtr->attrbuf[16]; - sig2 = regAttrPtr->attrbuf[17]; - sig3 = regAttrPtr->attrbuf[18]; - sig4 = regAttrPtr->attrbuf[19]; - sig5 = regAttrPtr->attrbuf[20]; - sig6 = regAttrPtr->attrbuf[21]; - - jam(); - signal->theData[18] = sig0; - signal->theData[19] = sig1; - signal->theData[20] = sig2; - signal->theData[21] = sig3; - signal->theData[22] = sig4; - signal->theData[23] = sig5; - signal->theData[24] = sig6; - }//if - }//if - sendSignal(TBref, GSN_ATTRINFO, signal, TdataPos + 3, JBB); -}//Dbtc::sendAttrinfo() - -void Dbtc::sendContinueTimeOutControl(Signal* signal, Uint32 TapiConPtr) -{ - signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_CONTROL; - signal->theData[1] = TapiConPtr; - sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB); -}//Dbtc::sendContinueTimeOutControl() - -void Dbtc::sendKeyinfo(Signal* signal, BlockReference TBRef, Uint32 len) -{ - signal->theData[0] = tcConnectptr.i; - signal->theData[1] = apiConnectptr.p->transid[0]; - signal->theData[2] = apiConnectptr.p->transid[1]; - signal->theData[3] = cdata[0]; - signal->theData[4] = cdata[1]; - signal->theData[5] = cdata[2]; - signal->theData[6] = cdata[3]; - signal->theData[7] = cdata[4]; - signal->theData[8] = cdata[5]; - signal->theData[9] = cdata[6]; - signal->theData[10] = cdata[7]; - signal->theData[11] = cdata[8]; - signal->theData[12] = cdata[9]; - signal->theData[13] = cdata[10]; - signal->theData[14] = cdata[11]; - signal->theData[15] = cdata[12]; - signal->theData[16] = cdata[13]; - signal->theData[17] = cdata[14]; - signal->theData[18] = cdata[15]; - signal->theData[19] = cdata[16]; - signal->theData[20] = cdata[17]; - signal->theData[21] = cdata[18]; - signal->theData[22] = cdata[19]; - sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB); -}//Dbtc::sendKeyinfo() - -void Dbtc::sendSystemError(Signal* signal, int line) -{ - progError(line, NDBD_EXIT_NDBREQUIRE); -}//Dbtc::sendSystemError() - -/* ========================================================================= */ -/* ------- LINK ACTUAL GCP OUT OF LIST ------- */ -/* ------------------------------------------------------------------------- */ -void Dbtc::unlinkGcp(Signal* signal) -{ - if (cfirstgcp == gcpPtr.i) { - jam(); - cfirstgcp = gcpPtr.p->nextGcp; - if (gcpPtr.i == clastgcp) { - jam(); - clastgcp = RNIL; - }//if - } else { - jam(); - /* -------------------------------------------------------------------- - * WE ARE TRYING TO REMOVE A GLOBAL CHECKPOINT WHICH WAS NOT THE OLDEST. - * THIS IS A SYSTEM ERROR. - * ------------------------------------------------------------------- */ - sendSystemError(signal, __LINE__); - }//if - gcpPtr.p->nextGcp = cfirstfreeGcp; - cfirstfreeGcp = gcpPtr.i; -}//Dbtc::unlinkGcp() - -void -Dbtc::execDUMP_STATE_ORD(Signal* signal) -{ - DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0]; - if(signal->theData[0] == DumpStateOrd::CommitAckMarkersSize){ - infoEvent("TC: m_commitAckMarkerPool: %d free size: %d", - m_commitAckMarkerPool.getNoOfFree(), - m_commitAckMarkerPool.getSize()); - } - if(signal->theData[0] == DumpStateOrd::CommitAckMarkersDump){ - infoEvent("TC: m_commitAckMarkerPool: %d free size: %d", - m_commitAckMarkerPool.getNoOfFree(), - m_commitAckMarkerPool.getSize()); - - CommitAckMarkerIterator iter; - for(m_commitAckMarkerHash.first(iter); iter.curr.i != RNIL; - m_commitAckMarkerHash.next(iter)){ - infoEvent("CommitAckMarker: i = %d (0x%x, 0x%x)" - " Api: %d Lghs(%d): %d %d %d %d bucket = %d", - iter.curr.i, - iter.curr.p->transid1, - iter.curr.p->transid2, - iter.curr.p->apiNodeId, - iter.curr.p->noOfLqhs, - iter.curr.p->lqhNodeId[0], - iter.curr.p->lqhNodeId[1], - iter.curr.p->lqhNodeId[2], - iter.curr.p->lqhNodeId[3], - iter.bucket); - } - } - // Dump all ScanFragRecs - if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanFragRec){ - Uint32 recordNo = 0; - if (signal->getLength() == 1) - infoEvent("TC: Dump all ScanFragRec - size: %d", - cscanFragrecFileSize); - else if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - - if (recordNo < cscanFragrecFileSize-1){ - dumpState->args[0] = DumpStateOrd::TcDumpAllScanFragRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - } - - // Dump one ScanFragRec - if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanFragRec){ - Uint32 recordNo = RNIL; - if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - if (recordNo >= cscanFragrecFileSize) - return; - - ScanFragRecPtr sfp; - sfp.i = recordNo; - c_scan_frag_pool.getPtr(sfp); - infoEvent("Dbtc::ScanFragRec[%d]: state=%d fragid=%d", - sfp.i, - sfp.p->scanFragState, - sfp.p->scanFragId); - infoEvent(" nodeid=%d, timer=%d", - refToNode(sfp.p->lqhBlockref), - sfp.p->scanFragTimer); - } - - // Dump all ScanRecords - if (dumpState->args[0] == DumpStateOrd::TcDumpAllScanRec){ - Uint32 recordNo = 0; - if (signal->getLength() == 1) - infoEvent("TC: Dump all ScanRecord - size: %d", - cscanrecFileSize); - else if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - - if (recordNo < cscanrecFileSize-1){ - dumpState->args[0] = DumpStateOrd::TcDumpAllScanRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - } - - // Dump all active ScanRecords - if (dumpState->args[0] == DumpStateOrd::TcDumpAllActiveScanRec){ - Uint32 recordNo = 0; - if (signal->getLength() == 1) - infoEvent("TC: Dump active ScanRecord - size: %d", - cscanrecFileSize); - else if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - ScanRecordPtr sp; - sp.i = recordNo; - ptrAss(sp, scanRecord); - if (sp.p->scanState != ScanRecord::IDLE){ - dumpState->args[0] = DumpStateOrd::TcDumpOneScanRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - } - - if (recordNo < cscanrecFileSize-1){ - dumpState->args[0] = DumpStateOrd::TcDumpAllActiveScanRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - } - - // Dump one ScanRecord - // and associated ScanFragRec and ApiConnectRecord - if (dumpState->args[0] == DumpStateOrd::TcDumpOneScanRec){ - Uint32 recordNo = RNIL; - if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - if (recordNo >= cscanrecFileSize) - return; - - ScanRecordPtr sp; - sp.i = recordNo; - ptrAss(sp, scanRecord); - infoEvent("Dbtc::ScanRecord[%d]: state=%d" - "nextfrag=%d, nofrag=%d", - sp.i, - sp.p->scanState, - sp.p->scanNextFragId, - sp.p->scanNoFrag); - infoEvent(" ailen=%d, para=%d, receivedop=%d, noOprePperFrag=%d", - sp.p->scanAiLength, - sp.p->scanParallel, - sp.p->scanReceivedOperations, - sp.p->batch_size_rows); - infoEvent(" schv=%d, tab=%d, sproc=%d", - sp.p->scanSchemaVersion, - sp.p->scanTableref, - sp.p->scanStoredProcId); - infoEvent(" apiRec=%d, next=%d", - sp.p->scanApiRec, sp.p->nextScan); - - if (sp.p->scanState != ScanRecord::IDLE){ - // Request dump of ScanFragRec - ScanFragRecPtr sfptr; -#define DUMP_SFR(x){\ - ScanFragList list(c_scan_frag_pool, x);\ - for(list.first(sfptr); !sfptr.isNull(); list.next(sfptr)){\ - dumpState->args[0] = DumpStateOrd::TcDumpOneScanFragRec; \ - dumpState->args[1] = sfptr.i;\ - execDUMP_STATE_ORD(signal);\ - }} - - DUMP_SFR(sp.p->m_running_scan_frags); - DUMP_SFR(sp.p->m_queued_scan_frags); - DUMP_SFR(sp.p->m_delivered_scan_frags); - - // Request dump of ApiConnectRecord - dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec; - dumpState->args[1] = sp.p->scanApiRec; - execDUMP_STATE_ORD(signal); - } - - } - - // Dump all ApiConnectRecord(s) - if (dumpState->args[0] == DumpStateOrd::TcDumpAllApiConnectRec){ - Uint32 recordNo = 0; - if (signal->getLength() == 1) - infoEvent("TC: Dump all ApiConnectRecord - size: %d", - capiConnectFilesize); - else if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec; - dumpState->args[1] = recordNo; - execDUMP_STATE_ORD(signal); - - if (recordNo < capiConnectFilesize-1){ - dumpState->args[0] = DumpStateOrd::TcDumpAllApiConnectRec; - dumpState->args[1] = recordNo+1; - sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); - } - } - - // Dump one ApiConnectRecord - if (dumpState->args[0] == DumpStateOrd::TcDumpOneApiConnectRec){ - Uint32 recordNo = RNIL; - if (signal->getLength() == 2) - recordNo = dumpState->args[1]; - else - return; - - if (recordNo >= capiConnectFilesize) - return; - - ApiConnectRecordPtr ap; - ap.i = recordNo; - ptrAss(ap, apiConnectRecord); - infoEvent("Dbtc::ApiConnectRecord[%d]: state=%d, abortState=%d, " - "apiFailState=%d", - ap.i, - ap.p->apiConnectstate, - ap.p->abortState, - ap.p->apiFailState); - infoEvent(" transid(0x%x, 0x%x), apiBref=0x%x, scanRec=%d", - ap.p->transid[0], - ap.p->transid[1], - ap.p->ndbapiBlockref, - ap.p->apiScanRec); - infoEvent(" ctcTimer=%d, apiTimer=%d, counter=%d, retcode=%d, " - "retsig=%d", - ctcTimer, getApiConTimer(ap.i), - ap.p->counter, - ap.p->returncode, - ap.p->returnsignal); - infoEvent(" lqhkeyconfrec=%d, lqhkeyreqrec=%d, " - "tckeyrec=%d", - ap.p->lqhkeyconfrec, - ap.p->lqhkeyreqrec, - ap.p->tckeyrec); - infoEvent(" next=%d ", - ap.p->nextApiConnect); - } - - if (dumpState->args[0] == DumpStateOrd::TcSetTransactionTimeout){ - jam(); - if(signal->getLength() > 1){ - set_timeout_value(signal->theData[1]); - } - } - - if (dumpState->args[0] == DumpStateOrd::TcSetApplTransactionTimeout){ - jam(); - if(signal->getLength() > 1){ - set_appl_timeout_value(signal->theData[1]); - } - } - - if (dumpState->args[0] == DumpStateOrd::StartTcTimer){ - c_counters.c_trans_status = TransCounters::Started; - c_counters.reset(); - } - - if (dumpState->args[0] == DumpStateOrd::StopTcTimer){ - c_counters.c_trans_status = TransCounters::Off; - Uint32 len = c_counters.report(signal); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB); - c_counters.reset(); - } - - if (dumpState->args[0] == DumpStateOrd::StartPeriodicTcTimer){ - c_counters.c_trans_status = TransCounters::Timer; - c_counters.reset(); - signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1); - } - - if (dumpState->args[0] == DumpStateOrd::TcStartDumpIndexOpCount) - { - static int frequency = 1; - if (signal->getLength() > 1) - frequency = signal->theData[1]; - else - if (refToBlock(signal->getSendersBlockRef()) != DBTC) - frequency = 1; - - if (frequency) - { - dumpState->args[0] = DumpStateOrd::TcDumpIndexOpCount; - execDUMP_STATE_ORD(signal); - dumpState->args[0] = DumpStateOrd::TcStartDumpIndexOpCount; - - Uint32 delay = 1000 * (frequency > 25 ? 25 : frequency); - sendSignalWithDelay(cownref, GSN_DUMP_STATE_ORD, signal, delay, 1); - } - } - - if (dumpState->args[0] == DumpStateOrd::TcDumpIndexOpCount) - { - infoEvent("IndexOpCount: pool: %d free: %d", - c_theIndexOperationPool.getSize(), - c_theIndexOperationPool.getNoOfFree()); - } - - if (dumpState->args[0] == 2514) - { - if (signal->getLength() == 2) - { - dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec; - execDUMP_STATE_ORD(signal); - } - - NodeReceiverGroup rg(CMVMI, c_alive_nodes); - dumpState->args[0] = 15; - sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBB); - - signal->theData[0] = 2515; - sendSignalWithDelay(cownref, GSN_DUMP_STATE_ORD, signal, 1000, 1); - return; - } - - if (dumpState->args[0] == 2515) - { - NdbNodeBitmask mask = c_alive_nodes; - mask.clear(getOwnNodeId()); - NodeReceiverGroup rg(NDBCNTR, mask); - - sendSignal(rg, GSN_SYSTEM_ERROR, signal, 1, JBB); - sendSignalWithDelay(cownref, GSN_SYSTEM_ERROR, signal, 300, 1); - return; - } -}//Dbtc::execDUMP_STATE_ORD() - -void Dbtc::execABORT_ALL_REQ(Signal* signal) -{ - jamEntry(); - AbortAllReq * req = (AbortAllReq*)&signal->theData[0]; - AbortAllRef * ref = (AbortAllRef*)&signal->theData[0]; - - const Uint32 senderData = req->senderData; - const BlockReference senderRef = req->senderRef; - - if(getAllowStartTransaction(refToNode(senderRef), 0) == true && !getNodeState().getSingleUserMode()){ - jam(); - - ref->senderData = senderData; - ref->errorCode = AbortAllRef::InvalidState; - sendSignal(senderRef, GSN_ABORT_ALL_REF, signal, - AbortAllRef::SignalLength, JBB); - return; - } - - if(c_abortRec.clientRef != 0){ - jam(); - - ref->senderData = senderData; - ref->errorCode = AbortAllRef::AbortAlreadyInProgress; - sendSignal(senderRef, GSN_ABORT_ALL_REF, signal, - AbortAllRef::SignalLength, JBB); - return; - } - - if(refToNode(senderRef) != getOwnNodeId()){ - jam(); - - ref->senderData = senderData; - ref->errorCode = AbortAllRef::FunctionNotImplemented; - sendSignal(senderRef, GSN_ABORT_ALL_REF, signal, - AbortAllRef::SignalLength, JBB); - return; - } - - c_abortRec.clientRef = senderRef; - c_abortRec.clientData = senderData; - c_abortRec.oldTimeOutValue = ctimeOutValue; - - ctimeOutValue = 0; - const Uint32 sleepTime = (2 * 10 * ctimeOutCheckDelay + 199) / 200; - - checkAbortAllTimeout(signal, (sleepTime == 0 ? 1 : sleepTime)); -} - -void Dbtc::checkAbortAllTimeout(Signal* signal, Uint32 sleepTime) -{ - - ndbrequire(c_abortRec.clientRef != 0); - - if(sleepTime > 0){ - jam(); - - sleepTime -= 1; - signal->theData[0] = TcContinueB::ZWAIT_ABORT_ALL; - signal->theData[1] = sleepTime; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 200, 2); - return; - } - - AbortAllConf * conf = (AbortAllConf*)&signal->theData[0]; - conf->senderData = c_abortRec.clientData; - sendSignal(c_abortRec.clientRef, GSN_ABORT_ALL_CONF, signal, - AbortAllConf::SignalLength, JBB); - - ctimeOutValue = c_abortRec.oldTimeOutValue; - c_abortRec.clientRef = 0; -} - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* ------------------ TRIGGER AND INDEX HANDLING ------------------ */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -void Dbtc::execCREATE_TRIG_REQ(Signal* signal) -{ - jamEntry(); - CreateTrigReq * const createTrigReq = - (CreateTrigReq *)&signal->theData[0]; - TcDefinedTriggerData* triggerData; - DefinedTriggerPtr triggerPtr; - BlockReference sender = signal->senderBlockRef(); - - releaseSections(signal); - - triggerPtr.i = createTrigReq->getTriggerId(); - if (ERROR_INSERTED(8033) || - !c_theDefinedTriggers.seizeId(triggerPtr, - createTrigReq->getTriggerId())) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - // Failed to allocate trigger record - CreateTrigRef * const createTrigRef = - (CreateTrigRef *)&signal->theData[0]; - - createTrigRef->setConnectionPtr(createTrigReq->getConnectionPtr()); - createTrigRef->setErrorCode(CreateTrigRef::TooManyTriggers); - sendSignal(sender, GSN_CREATE_TRIG_REF, - signal, CreateTrigRef::SignalLength, JBB); - return; - } - - triggerData = triggerPtr.p; - triggerData->triggerId = createTrigReq->getTriggerId(); - triggerData->triggerType = createTrigReq->getTriggerType(); - triggerData->triggerEvent = createTrigReq->getTriggerEvent(); - triggerData->attributeMask = createTrigReq->getAttributeMask(); - if (triggerData->triggerType == TriggerType::SECONDARY_INDEX) - triggerData->indexId = createTrigReq->getIndexId(); - CreateTrigConf * const createTrigConf = - (CreateTrigConf *)&signal->theData[0]; - - createTrigConf->setConnectionPtr(createTrigReq->getConnectionPtr()); - sendSignal(sender, GSN_CREATE_TRIG_CONF, - signal, CreateTrigConf::SignalLength, JBB); -} - - -void Dbtc::execDROP_TRIG_REQ(Signal* signal) -{ - jamEntry(); - DropTrigReq * const dropTrigReq = (DropTrigReq *)&signal->theData[0]; - BlockReference sender = signal->senderBlockRef(); - - if (ERROR_INSERTED(8035) || - (c_theDefinedTriggers.getPtr(dropTrigReq->getTriggerId())) == NULL) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - // Failed to find find trigger record - DropTrigRef * const dropTrigRef = (DropTrigRef *)&signal->theData[0]; - - dropTrigRef->setConnectionPtr(dropTrigReq->getConnectionPtr()); - dropTrigRef->setErrorCode(DropTrigRef::TriggerNotFound); - sendSignal(sender, GSN_DROP_TRIG_REF, - signal, DropTrigRef::SignalLength, JBB); - return; - } - - // Release trigger record - c_theDefinedTriggers.release(dropTrigReq->getTriggerId()); - - DropTrigConf * const dropTrigConf = (DropTrigConf *)&signal->theData[0]; - - dropTrigConf->setConnectionPtr(dropTrigReq->getConnectionPtr()); - sendSignal(sender, GSN_DROP_TRIG_CONF, - signal, DropTrigConf::SignalLength, JBB); -} - -void Dbtc::execCREATE_INDX_REQ(Signal* signal) -{ - jamEntry(); - CreateIndxReq * const createIndxReq = - (CreateIndxReq *)signal->getDataPtr(); - TcIndexData* indexData; - TcIndexDataPtr indexPtr; - BlockReference sender = signal->senderBlockRef(); - - if (ERROR_INSERTED(8034) || - !c_theIndexes.seizeId(indexPtr, createIndxReq->getIndexId())) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - // Failed to allocate index record - CreateIndxRef * const createIndxRef = - (CreateIndxRef *)&signal->theData[0]; - - createIndxRef->setConnectionPtr(createIndxReq->getConnectionPtr()); - createIndxRef->setErrorCode(CreateIndxRef::TooManyIndexes); - releaseSections(signal); - sendSignal(sender, GSN_CREATE_INDX_REF, - signal, CreateIndxRef::SignalLength, JBB); - return; - } - indexData = indexPtr.p; - // Indexes always start in state IS_BUILDING - // Will become IS_ONLINE in execALTER_INDX_REQ - indexData->indexState = IS_BUILDING; - indexData->indexId = indexPtr.i; - indexData->primaryTableId = createIndxReq->getTableId(); - - // So far need only attribute count - SegmentedSectionPtr ssPtr; - signal->getSection(ssPtr, CreateIndxReq::ATTRIBUTE_LIST_SECTION); - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); - r0.reset(); // undo implicit first() - if (!r0.getWord(&indexData->attributeList.sz) || - !r0.getWords(indexData->attributeList.id, indexData->attributeList.sz)) { - ndbrequire(false); - } - indexData->primaryKeyPos = indexData->attributeList.sz; - - releaseSections(signal); - - CreateIndxConf * const createIndxConf = - (CreateIndxConf *)&signal->theData[0]; - - createIndxConf->setConnectionPtr(createIndxReq->getConnectionPtr()); - createIndxConf->setTableId(createIndxReq->getTableId()); - createIndxConf->setIndexId(createIndxReq->getIndexId()); - sendSignal(sender, GSN_CREATE_INDX_CONF, - signal, CreateIndxConf::SignalLength, JBB); -} - -void Dbtc::execALTER_INDX_REQ(Signal* signal) -{ - jamEntry(); - AlterIndxReq * const alterIndxReq = (AlterIndxReq *)signal->getDataPtr(); - TcIndexData* indexData; - //BlockReference sender = signal->senderBlockRef(); - BlockReference sender = (BlockReference) alterIndxReq->getUserRef(); - Uint32 connectionPtr = alterIndxReq->getConnectionPtr(); - AlterIndxReq::RequestType requestType = alterIndxReq->getRequestType(); - Uint32 tableId = alterIndxReq->getTableId(); - Uint32 indexId = alterIndxReq->getIndexId(); - bool online = (alterIndxReq->getOnline() == 1) ? true : false; - - if ((indexData = c_theIndexes.getPtr(indexId)) == NULL) { - jam(); - // Failed to find index record - AlterIndxRef * const alterIndxRef = - (AlterIndxRef *)signal->getDataPtrSend(); - - alterIndxRef->setUserRef(reference()); - alterIndxRef->setConnectionPtr(connectionPtr); - alterIndxRef->setRequestType(requestType); - alterIndxRef->setTableId(tableId); - alterIndxRef->setIndexId(indexId); - alterIndxRef->setErrorCode(AlterIndxRef::IndexNotFound); - alterIndxRef->setErrorLine(__LINE__); - alterIndxRef->setErrorNode(getOwnNodeId()); - sendSignal(sender, GSN_ALTER_INDX_REF, - signal, AlterIndxRef::SignalLength, JBB); - return; - } - // Found index record, alter it's state - if (online) { - jam(); - indexData->indexState = IS_ONLINE; - } else { - jam(); - indexData->indexState = IS_BUILDING; - }//if - AlterIndxConf * const alterIndxConf = - (AlterIndxConf *)signal->getDataPtrSend(); - - alterIndxConf->setUserRef(reference()); - alterIndxConf->setConnectionPtr(connectionPtr); - alterIndxConf->setRequestType(requestType); - alterIndxConf->setTableId(tableId); - alterIndxConf->setIndexId(indexId); - sendSignal(sender, GSN_ALTER_INDX_CONF, - signal, AlterIndxConf::SignalLength, JBB); -} - -void Dbtc::execFIRE_TRIG_ORD(Signal* signal) -{ - jamEntry(); - FireTrigOrd * const fireOrd = (FireTrigOrd *)signal->getDataPtr(); - ApiConnectRecord *localApiConnectRecord = apiConnectRecord; - ApiConnectRecordPtr transPtr; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - TcConnectRecordPtr opPtr; - /** - * TODO - * Check transid, - * Fix overload i.e invalid word count - */ - TcFiredTriggerData key; - key.fireingOperation = fireOrd->getConnectionPtr(); - key.nodeId = refToNode(signal->getSendersBlockRef()); - FiredTriggerPtr trigPtr; - if(c_firedTriggerHash.find(trigPtr, key)){ - - c_firedTriggerHash.remove(trigPtr); - - trigPtr.p->fragId= fireOrd->fragId; - bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords; - ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords; - ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords; - if(ok){ - opPtr.i = key.fireingOperation; - ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord); - transPtr.i = opPtr.p->apiConnect; - transPtr.p = &localApiConnectRecord[transPtr.i]; - - opPtr.p->noReceivedTriggers++; - opPtr.p->triggerExecutionCount++; - - // Insert fired trigger in execution queue - transPtr.p->theFiredTriggers.add(trigPtr); - if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) { - executeTriggers(signal, &transPtr); - } - return; - } - jam(); - c_theFiredTriggerPool.release(trigPtr); - } - jam(); - /** - * Failed to find record or invalid word counts - */ - ndbrequire(false); -} - -void Dbtc::execTRIG_ATTRINFO(Signal* signal) -{ - jamEntry(); - TrigAttrInfo * const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtr(); - Uint32 attrInfoLength = signal->getLength() - TrigAttrInfo::StaticLength; - const Uint32 *src = trigAttrInfo->getData(); - FiredTriggerPtr firedTrigPtr; - - TcFiredTriggerData key; - key.fireingOperation = trigAttrInfo->getConnectionPtr(); - key.nodeId = refToNode(signal->getSendersBlockRef()); - if(!c_firedTriggerHash.find(firedTrigPtr, key)){ - jam(); - if(!c_firedTriggerHash.seize(firedTrigPtr)){ - jam(); - /** - * Will be handled when FIRE_TRIG_ORD arrives - */ - ndbout_c("op: %d node: %d failed to seize", - key.fireingOperation, key.nodeId); - return; - } - ndbrequire(firedTrigPtr.p->keyValues.getSize() == 0 && - firedTrigPtr.p->beforeValues.getSize() == 0 && - firedTrigPtr.p->afterValues.getSize() == 0); - - firedTrigPtr.p->nodeId = refToNode(signal->getSendersBlockRef()); - firedTrigPtr.p->fireingOperation = key.fireingOperation; - firedTrigPtr.p->triggerId = trigAttrInfo->getTriggerId(); - c_firedTriggerHash.add(firedTrigPtr); - } - - AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool; - switch (trigAttrInfo->getAttrInfoType()) { - case(TrigAttrInfo::PRIMARY_KEY): - jam(); - { - LocalDataBuffer<11> buf(pool, firedTrigPtr.p->keyValues); - buf.append(src, attrInfoLength); - } - break; - case(TrigAttrInfo::BEFORE_VALUES): - jam(); - { - LocalDataBuffer<11> buf(pool, firedTrigPtr.p->beforeValues); - buf.append(src, attrInfoLength); - } - break; - case(TrigAttrInfo::AFTER_VALUES): - jam(); - { - LocalDataBuffer<11> buf(pool, firedTrigPtr.p->afterValues); - buf.append(src, attrInfoLength); - } - break; - default: - ndbrequire(false); - } -} - -void Dbtc::execDROP_INDX_REQ(Signal* signal) -{ - jamEntry(); - DropIndxReq * const dropIndxReq = (DropIndxReq *)signal->getDataPtr(); - TcIndexData* indexData; - BlockReference sender = signal->senderBlockRef(); - - if (ERROR_INSERTED(8036) || - (indexData = c_theIndexes.getPtr(dropIndxReq->getIndexId())) == NULL) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - // Failed to find index record - DropIndxRef * const dropIndxRef = - (DropIndxRef *)signal->getDataPtrSend(); - - dropIndxRef->setConnectionPtr(dropIndxReq->getConnectionPtr()); - dropIndxRef->setErrorCode(DropIndxRef::IndexNotFound); - sendSignal(sender, GSN_DROP_INDX_REF, - signal, DropIndxRef::SignalLength, JBB); - return; - } - // Release index record - c_theIndexes.release(dropIndxReq->getIndexId()); - - DropIndxConf * const dropIndxConf = - (DropIndxConf *)signal->getDataPtrSend(); - - dropIndxConf->setConnectionPtr(dropIndxReq->getConnectionPtr()); - sendSignal(sender, GSN_DROP_INDX_CONF, - signal, DropIndxConf::SignalLength, JBB); -} - -void Dbtc::execTCINDXREQ(Signal* signal) -{ - jamEntry(); - - TcKeyReq * const tcIndxReq = (TcKeyReq *)signal->getDataPtr(); - const UintR TapiIndex = tcIndxReq->apiConnectPtr; - Uint32 tcIndxRequestInfo = tcIndxReq->requestInfo; - Uint32 startFlag = tcIndxReq->getStartFlag(tcIndxRequestInfo); - Uint32 * dataPtr = &tcIndxReq->scanInfo; - Uint32 indexBufSize = 8; // Maximum for index in TCINDXREQ - Uint32 attrBufSize = 5; // Maximum for attrInfo in TCINDXREQ - ApiConnectRecordPtr transPtr; - transPtr.i = TapiIndex; - if (transPtr.i >= capiConnectFilesize) { - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//if - ptrAss(transPtr, apiConnectRecord); - ApiConnectRecord * const regApiPtr = transPtr.p; - // Seize index operation - TcIndexOperationPtr indexOpPtr; - if ((startFlag == 1) && - (regApiPtr->apiConnectstate == CS_CONNECTED || - (regApiPtr->apiConnectstate == CS_STARTED && - regApiPtr->firstTcConnect == RNIL)) || - (regApiPtr->apiConnectstate == CS_ABORTING && - regApiPtr->abortState == AS_IDLE)) { - jam(); - // This is a newly started transaction, clean-up - releaseAllSeizedIndexOperations(regApiPtr); - - regApiPtr->apiConnectstate = CS_STARTED; - regApiPtr->transid[0] = tcIndxReq->transId1; - regApiPtr->transid[1] = tcIndxReq->transId2; - }//if - - if (getNodeState().startLevel == NodeState::SL_SINGLEUSER && - getNodeState().getSingleUserApi() != - refToNode(regApiPtr->ndbapiBlockref)) - { - terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE; - regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo); - apiConnectptr = transPtr; - abortErrorLab(signal); - return; - } - - if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) { - jam(); - // Failed to allocate index operation - terrorCode = 288; - regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo); - apiConnectptr = transPtr; - abortErrorLab(signal); - return; - } - TcIndexOperation* indexOp = indexOpPtr.p; - indexOp->indexOpId = indexOpPtr.i; - - // Save original signal - indexOp->tcIndxReq = *tcIndxReq; - indexOp->connectionIndex = TapiIndex; - regApiPtr->accumulatingIndexOp = indexOp->indexOpId; - - // If operation is readTupleExclusive or updateTuple then read index - // table with exclusive lock - Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo); - Uint32 attrLength = TcKeyReq::getAttrinfoLen(tcIndxReq->attrLen); - indexOp->expectedKeyInfo = indexLength; - Uint32 includedIndexLength = MIN(indexLength, indexBufSize); - indexOp->expectedAttrInfo = attrLength; - Uint32 includedAttrLength = MIN(attrLength, attrBufSize); - - int ret; - if ((ret = saveINDXKEYINFO(signal, - indexOp, - dataPtr, - includedIndexLength)) == 0) - { - jam(); - // We have received all we need - readIndexTable(signal, regApiPtr, indexOp); - return; - } - else if (ret == -1) - { - jam(); - return; - } - - dataPtr += includedIndexLength; - if (saveINDXATTRINFO(signal, - indexOp, - dataPtr, - includedAttrLength) == 0) { - jam(); - // We have received all we need - readIndexTable(signal, regApiPtr, indexOp); - return; - } -} - - -void Dbtc::sendTcIndxConf(Signal* signal, UintR TcommitFlag) -{ - HostRecordPtr localHostptr; - ApiConnectRecord * const regApiPtr = apiConnectptr.p; - const UintR TopWords = (UintR)regApiPtr->tcindxrec; - localHostptr.i = refToNode(regApiPtr->ndbapiBlockref); - const Uint32 type = getNodeInfo(localHostptr.i).m_type; - const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM); - const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref); - const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL ? 0 : 1); - ptrAss(localHostptr, hostRecord); - UintR TcurrLen = localHostptr.p->noOfWordsTCINDXCONF; - UintR confInfo = 0; - TcIndxConf::setNoOfOperations(confInfo, (TopWords >> 1)); - TcIndxConf::setCommitFlag(confInfo, TcommitFlag == 1); - TcIndxConf::setMarkerFlag(confInfo, Tmarker); - const UintR TpacketLen = 6 + TopWords; - regApiPtr->tcindxrec = 0; - - if(TcommitFlag || (regApiPtr->lqhkeyreqrec == regApiPtr->lqhkeyconfrec)){ - jam(); - regApiPtr->m_exec_flag = 0; - } - - if ((TpacketLen > 25) || !is_api){ - TcIndxConf * const tcIndxConf = (TcIndxConf *)signal->getDataPtrSend(); - - jam(); - tcIndxConf->apiConnectPtr = regApiPtr->ndbapiConnect; - tcIndxConf->gci = regApiPtr->globalcheckpointid;; - tcIndxConf->confInfo = confInfo; - tcIndxConf->transId1 = regApiPtr->transid[0]; - tcIndxConf->transId2 = regApiPtr->transid[1]; - copyFromToLen(®ApiPtr->tcIndxSendArray[0], - (UintR*)&tcIndxConf->operations, - (UintR)ZTCOPCONF_SIZE); - sendSignal(regApiPtr->ndbapiBlockref, - GSN_TCINDXCONF, signal, (TpacketLen - 1), JBB); - return; - } else if (((TcurrLen + TpacketLen) > 25) && (TcurrLen > 0)) { - jam(); - sendPackedTCINDXCONF(signal, localHostptr.p, localHostptr.i); - TcurrLen = 0; - } else { - jam(); - updatePackedList(signal, localHostptr.p, localHostptr.i); - }//if -// ------------------------------------------------------------------------- -// The header contains the block reference of receiver plus the real signal -// length - 3, since we have the real signal length plus one additional word -// for the header we have to do - 4. -// ------------------------------------------------------------------------- - UintR Tpack0 = (TblockNum << 16) + (TpacketLen - 4); - UintR Tpack1 = regApiPtr->ndbapiConnect; - UintR Tpack2 = regApiPtr->globalcheckpointid; - UintR Tpack3 = confInfo; - UintR Tpack4 = regApiPtr->transid[0]; - UintR Tpack5 = regApiPtr->transid[1]; - - localHostptr.p->noOfWordsTCINDXCONF = TcurrLen + TpacketLen; - - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 0] = Tpack0; - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 1] = Tpack1; - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 2] = Tpack2; - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 3] = Tpack3; - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 4] = Tpack4; - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + 5] = Tpack5; - - UintR Ti; - for (Ti = 6; Ti < TpacketLen; Ti++) { - localHostptr.p->packedWordsTCINDXCONF[TcurrLen + Ti] = - regApiPtr->tcIndxSendArray[Ti - 6]; - }//for -}//Dbtc::sendTcIndxConf() - -void Dbtc::execINDXKEYINFO(Signal* signal) -{ - jamEntry(); - Uint32 keyInfoLength = signal->getLength() - IndxKeyInfo::HeaderLength; - IndxKeyInfo * const indxKeyInfo = (IndxKeyInfo *)signal->getDataPtr(); - const Uint32 *src = indxKeyInfo->getData(); - const UintR TconnectIndex = indxKeyInfo->connectPtr; - ApiConnectRecordPtr transPtr; - transPtr.i = TconnectIndex; - if (transPtr.i >= capiConnectFilesize) { - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//if - ptrAss(transPtr, apiConnectRecord); - ApiConnectRecord * const regApiPtr = transPtr.p; - TcIndexOperationPtr indexOpPtr; - TcIndexOperation* indexOp; - - if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false) - { - TCKEY_abort(signal, 19); - return; - } - - if (regApiPtr->apiConnectstate == CS_ABORTING) - { - jam(); - return; - } - - if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) - { - indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); - if (saveINDXKEYINFO(signal, - indexOp, - src, - keyInfoLength) == 0) { - jam(); - // We have received all we need - readIndexTable(signal, regApiPtr, indexOp); - } - } -} - -void Dbtc::execINDXATTRINFO(Signal* signal) -{ - jamEntry(); - Uint32 attrInfoLength = signal->getLength() - IndxAttrInfo::HeaderLength; - IndxAttrInfo * const indxAttrInfo = (IndxAttrInfo *)signal->getDataPtr(); - const Uint32 *src = indxAttrInfo->getData(); - const UintR TconnectIndex = indxAttrInfo->connectPtr; - ApiConnectRecordPtr transPtr; - transPtr.i = TconnectIndex; - if (transPtr.i >= capiConnectFilesize) { - jam(); - warningHandlerLab(signal, __LINE__); - return; - }//if - ptrAss(transPtr, apiConnectRecord); - ApiConnectRecord * const regApiPtr = transPtr.p; - TcIndexOperationPtr indexOpPtr; - TcIndexOperation* indexOp; - - if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false) - { - TCKEY_abort(signal, 19); - return; - } - - if (regApiPtr->apiConnectstate == CS_ABORTING) - { - jam(); - return; - } - - if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL) - { - indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); - if (saveINDXATTRINFO(signal, - indexOp, - src, - attrInfoLength) == 0) { - jam(); - // We have received all we need - readIndexTable(signal, regApiPtr, indexOp); - return; - } - return; - } -} - -/** - * Save signal INDXKEYINFO - * Return true if we have received all needed data - */ -int -Dbtc::saveINDXKEYINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) -{ - if (ERROR_INSERTED(8052) || !indexOp->keyInfo.append(src, len)) { - jam(); - // Failed to seize keyInfo, abort transaction -#ifdef VM_TRACE - ndbout_c("Dbtc::saveINDXKEYINFO: Failed to seize keyinfo\n"); -#endif - // Abort transaction - apiConnectptr.i = indexOp->connectionIndex; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 289; - if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) - apiConnectptr.p->m_exec_flag= 1; - abortErrorLab(signal); - return -1; - } - if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { - jam(); - return 0; - } - return 1; -} - -bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp) -{ - return (indexOp->keyInfo.getSize() == indexOp->expectedKeyInfo); -} - -/** - * Save signal INDXATTRINFO - * Return true if we have received all needed data - */ -int -Dbtc::saveINDXATTRINFO(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) -{ - if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) { - jam(); -#ifdef VM_TRACE - ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n"); -#endif - apiConnectptr.i = indexOp->connectionIndex; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 289; - if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo)) - apiConnectptr.p->m_exec_flag= 1; - abortErrorLab(signal); - return -1; - } - if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) { - jam(); - return 0; - } - return 1; -} - -bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp) -{ - return (indexOp->attrInfo.getSize() == indexOp->expectedAttrInfo); -} - -bool Dbtc::saveTRANSID_AI(Signal* signal, - TcIndexOperation* indexOp, - const Uint32 *src, - Uint32 len) -{ - Uint32 currentTransIdAILength = indexOp->transIdAI.getSize(); - - if (currentTransIdAILength == 0) { - jam(); - // Read first AttributeHeader to get expected size - // of the single key attribute expected - AttributeHeader* head = (AttributeHeader *) src; - indexOp->expectedTransIdAI = head->getHeaderSize() + head->getDataSize(); - } - if (!indexOp->transIdAI.append(src, len)) { - jam(); -#ifdef VM_TRACE - ndbout_c("Dbtc::saveTRANSID_AI: Failed to seize transIdAI\n"); -#endif - apiConnectptr.i = indexOp->connectionIndex; - ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord); - releaseIndexOperation(apiConnectptr.p, indexOp); - terrorCode = 4000; - abortErrorLab(signal); - return false; - } - return true; -} - -bool Dbtc::receivedAllTRANSID_AI(TcIndexOperation* indexOp) -{ - return (indexOp->transIdAI.getSize() == indexOp->expectedTransIdAI); -} - -/** - * Receive signal TCINDXCONF - * This can be either the return of reading an index table - * or performing an index operation - */ -void Dbtc::execTCKEYCONF(Signal* signal) -{ - TcKeyConf * const tcKeyConf = (TcKeyConf *)signal->getDataPtr(); - TcIndexOperationPtr indexOpPtr; - - jamEntry(); - indexOpPtr.i = tcKeyConf->apiConnectPtr; - TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); - Uint32 confInfo = tcKeyConf->confInfo; - - /** - * Check on TCKEYCONF wheater the the transaction was committed - */ - Uint32 Tcommit = TcKeyConf::getCommitFlag(confInfo); - - indexOpPtr.p = indexOp; - if (!indexOp) { - jam(); - // Missing index operation - return; - } - const UintR TconnectIndex = indexOp->connectionIndex; - ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex]; - apiConnectptr.p = regApiPtr; - apiConnectptr.i = TconnectIndex; - switch(indexOp->indexOpState) { - case(IOS_NOOP): { - jam(); - // Should never happen, abort - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4349; - tcIndxRef->errorData = 0; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - } - case(IOS_INDEX_ACCESS): { - jam(); - // Wait for TRANSID_AI - indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI; - break; - } - case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): { - jam(); - // Double TCKEYCONF, should never happen, abort - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4349; - tcIndxRef->errorData = 0; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - } - case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): { - jam(); - // Continue with index operation - executeIndexOperation(signal, regApiPtr, indexOp); - break; - } - case(IOS_INDEX_OPERATION): { - // We are done, send TCINDXCONF - jam(); - Uint32 Ttcindxrec = regApiPtr->tcindxrec; - // Copy reply from TcKeyConf - - ndbassert(regApiPtr->noIndexOp); - regApiPtr->noIndexOp--; // Decrease count - regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData; - regApiPtr->tcIndxSendArray[Ttcindxrec + 1] = - tcKeyConf->operations[0].attrInfoLen; - regApiPtr->tcindxrec = Ttcindxrec + 2; - if (regApiPtr->noIndexOp == 0) { - jam(); - sendTcIndxConf(signal, Tcommit); - } else if (regApiPtr->tcindxrec == ZTCOPCONF_SIZE) { - jam(); - sendTcIndxConf(signal, 0); - } - releaseIndexOperation(regApiPtr, indexOp); - break; - } - } -} - -void Dbtc::execTCKEYREF(Signal* signal) -{ - TcKeyRef * const tcKeyRef = (TcKeyRef *)signal->getDataPtr(); - TcIndexOperationPtr indexOpPtr; - - jamEntry(); - indexOpPtr.i = tcKeyRef->connectPtr; - TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); - indexOpPtr.p = indexOp; - if (!indexOp) { - jam(); - // Missing index operation - return; - } - const UintR TconnectIndex = indexOp->connectionIndex; - ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex]; - - switch(indexOp->indexOpState) { - case(IOS_NOOP): { - jam(); - // Should never happen, abort - break; - } - case(IOS_INDEX_ACCESS): - case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): - case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): { - jam(); - /** - * Increase count as it will be decreased below... - * (and the code is written to handle failing lookup on "real" table - * not lookup on index table) - */ - regApiPtr->noIndexOp++; - // else continue - } - case(IOS_INDEX_OPERATION): { - // Send TCINDXREF - - jam(); - TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq; - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - ndbassert(regApiPtr->noIndexOp); - regApiPtr->noIndexOp--; // Decrease count - tcIndxRef->connectPtr = tcIndxReq->senderData; - tcIndxRef->transId[0] = tcKeyRef->transId[0]; - tcIndxRef->transId[1] = tcKeyRef->transId[1]; - tcIndxRef->errorCode = tcKeyRef->errorCode; - tcIndxRef->errorData = 0; - - releaseIndexOperation(regApiPtr, indexOp); - - sendSignal(regApiPtr->ndbapiBlockref, - GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB); - return; - } - } -} - -void Dbtc::execTRANSID_AI_R(Signal* signal){ - TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr(); - Uint32 sigLen = signal->length(); - Uint32 dataLen = sigLen - TransIdAI::HeaderLength - 1; - Uint32 recBlockref = transIdAI->attrData[dataLen]; - - jamEntry(); - - /** - * Forward signal to final destination - * Truncate last word since that was used to hold the final dest. - */ - sendSignal(recBlockref, GSN_TRANSID_AI, - signal, sigLen - 1, JBB); -} - -void Dbtc::execKEYINFO20_R(Signal* signal){ - KeyInfo20 * const keyInfo = (KeyInfo20 *)signal->getDataPtr(); - Uint32 sigLen = signal->length(); - Uint32 dataLen = sigLen - KeyInfo20::HeaderLength - 1; - Uint32 recBlockref = keyInfo->keyData[dataLen]; - - jamEntry(); - - /** - * Forward signal to final destination - * Truncate last word since that was used to hold the final dest. - */ - sendSignal(recBlockref, GSN_KEYINFO20, - signal, sigLen - 1, JBB); -} - - -void Dbtc::execTRANSID_AI(Signal* signal) -{ - TransIdAI * const transIdAI = (TransIdAI *)signal->getDataPtr(); - - jamEntry(); - TcIndexOperationPtr indexOpPtr; - indexOpPtr.i = transIdAI->connectPtr; - TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); - indexOpPtr.p = indexOp; - if (!indexOp) { - jam(); - // Missing index operation - } - const UintR TconnectIndex = indexOp->connectionIndex; - // ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex]; - ApiConnectRecordPtr transPtr; - - transPtr.i = TconnectIndex; - ptrCheckGuard(transPtr, capiConnectFilesize, apiConnectRecord); - ApiConnectRecord * const regApiPtr = transPtr.p; - - // Acccumulate attribute data - if (!saveTRANSID_AI(signal, - indexOp, - transIdAI->getData(), - signal->getLength() - TransIdAI::HeaderLength)) { - jam(); - // Failed to allocate space for TransIdAI - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4000; - tcIndxRef->errorData = 0; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - } - - switch(indexOp->indexOpState) { - case(IOS_NOOP): { - jam(); - // Should never happen, abort - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4349; - tcIndxRef->errorData = 0; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - break; - } - case(IOS_INDEX_ACCESS): { - jam(); - // Check if all TRANSID_AI have been received - if (receivedAllTRANSID_AI(indexOp)) { - jam(); - // Wait for TRANSID_AI - indexOp->indexOpState = IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF; - } - break; - } - case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): { - jam(); -#ifdef VM_TRACE - ndbout_c("Dbtc::execTRANSID_AI: Too many TRANSID_AI, ignore for now\n"); -#endif - /* - // Too many TRANSID_AI - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4349; - tcIndxRef->errorData = 0; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - */ - break; - } - case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): { - jam(); - // Check if all TRANSID_AI have been received - if (receivedAllTRANSID_AI(indexOp)) { - jam(); - // Continue with index operation - executeIndexOperation(signal, regApiPtr, indexOp); - } - // else continue waiting for more TRANSID_AI - break; - } - case(IOS_INDEX_OPERATION): { - // Should never receive TRANSID_AI in this state!! - jam(); - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4349; - tcIndxRef->errorData = regApiPtr->errorData; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - } - } -} - -void Dbtc::execTCROLLBACKREP(Signal* signal) -{ - TcRollbackRep* tcRollbackRep = (TcRollbackRep *)signal->getDataPtr(); - jamEntry(); - TcIndexOperationPtr indexOpPtr; - indexOpPtr.i = tcRollbackRep->connectPtr; - TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i); - indexOpPtr.p = indexOp; - tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend(); - tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData; - sendSignal(apiConnectptr.p->ndbapiBlockref, - GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB); -} - -/** - * Read index table with the index attributes as PK - */ -void Dbtc::readIndexTable(Signal* signal, - ApiConnectRecord* regApiPtr, - TcIndexOperation* indexOp) -{ - Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ - Uint32 dataPos = 0; - TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend(); - Uint32 * dataPtr = &tcKeyReq->scanInfo; - Uint32 tcKeyLength = TcKeyReq::StaticLength; - Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo; - AttributeBuffer::DataBufferIterator keyIter; - Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo); - TcIndexData* indexData; - Uint32 transId1 = indexOp->tcIndxReq.transId1; - Uint32 transId2 = indexOp->tcIndxReq.transId2; - - const Operation_t opType = - (Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo); - - // Find index table - if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.tableId)) == NULL) { - jam(); - // Failed to find index record - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4000; - // tcIndxRef->errorData = ??; Where to find indexId - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - } - tcKeyReq->transId1 = transId1; - tcKeyReq->transId2 = transId2; - tcKeyReq->tableId = indexData->indexId; - tcKeyLength += MIN(keyLength, keyBufSize); - tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.tableSchemaVersion; - TcKeyReq::setOperationType(tcKeyRequestInfo, - opType == ZREAD ? ZREAD : ZREAD_EX); - TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo - TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0); - tcKeyReq->senderData = indexOp->indexOpId; - indexOp->indexOpState = IOS_INDEX_ACCESS; - regApiPtr->executingIndexOp = regApiPtr->accumulatingIndexOp; - regApiPtr->accumulatingIndexOp = RNIL; - regApiPtr->isIndexOp = 2; - - if (ERROR_INSERTED(8037)) - { - ndbout_c("shifting index version"); - tcKeyReq->tableSchemaVersion = ~(Uint32)indexOp->tcIndxReq.tableSchemaVersion; - } - - Uint32 remainingKey = indexOp->keyInfo.getSize(); - bool moreKeyData = indexOp->keyInfo.first(keyIter); - // *********** KEYINFO in TCKEYREQ *********** - while((dataPos < keyBufSize) && - (remainingKey-- != 0)) { - *dataPtr++ = *keyIter.data; - dataPos++; - moreKeyData = indexOp->keyInfo.next(keyIter); - } - // *********** ATTRINFO in TCKEYREQ *********** - tcKeyReq->attrLen = 1; // Primary key is stored as one attribute - AttributeHeader::init(dataPtr, indexData->primaryKeyPos, 0); - tcKeyLength++; - tcKeyReq->requestInfo = tcKeyRequestInfo; - - ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0); - ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0); - EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - goto err; - } - - // *********** KEYINFO *********** - if (moreKeyData) { - jam(); - // Send KEYINFO sequence - KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend(); - - keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr; - keyInfo->transId[0] = transId1; - keyInfo->transId[1] = transId2; - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - while(remainingKey-- != 0) {// If we have not read complete key - *dataPtr++ = *keyIter.data; - dataPos++; - if (dataPos == KeyInfo::DataLength) { - // Flush KEYINFO - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength); - jamEntry(); - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - goto err; - } - dataPos = 0; - dataPtr = (Uint32 *) &keyInfo->keyData; - } - moreKeyData = indexOp->keyInfo.next(keyIter); - } - if (dataPos != 0) { - // Flush last KEYINFO - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + dataPos); - jamEntry(); - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - goto err; - } - } - } - - /** - * "Fool" TC not to start commiting transaction since it always will - * have one outstanding lqhkeyreq - * This is later decreased when the index read is complete - */ - regApiPtr->lqhkeyreqrec++; - - /** - * Remember ptr to index read operation - * (used to set correct save point id on index operation later) - */ - indexOp->indexReadTcConnect = regApiPtr->lastTcConnect; - -done: - return; - -err: - jam(); - goto done; -} - -/** - * Execute the index operation with the result from - * the index table read as PK - */ -void Dbtc::executeIndexOperation(Signal* signal, - ApiConnectRecord* regApiPtr, - TcIndexOperation* indexOp) { - - Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ - Uint32 attrBufSize = 5; - Uint32 dataPos = 0; - TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq; - TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend(); - /* - Data points to distrGroupHashValue since scanInfo is used to send - fragment id of receiving fragment - */ - Uint32 * dataPtr = &tcKeyReq->distrGroupHashValue; - Uint32 tcKeyLength = TcKeyReq::StaticLength; - Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo; - TcIndexData* indexData; - AttributeBuffer::DataBufferIterator attrIter; - AttributeBuffer::DataBufferIterator aiIter; - bool moreKeyData = indexOp->transIdAI.first(aiIter); - - // Find index table - if ((indexData = c_theIndexes.getPtr(tcIndxReq->tableId)) == NULL) { - jam(); - // Failed to find index record - TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend(); - - tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData; - tcIndxRef->transId[0] = regApiPtr->transid[0]; - tcIndxRef->transId[1] = regApiPtr->transid[1]; - tcIndxRef->errorCode = 4349; - tcIndxRef->errorData = 0; - sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal, - TcKeyRef::SignalLength, JBB); - return; - } - // Find schema version of primary table - TableRecordPtr tabPtr; - tabPtr.i = indexData->primaryTableId; - ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord); - - tcKeyReq->apiConnectPtr = tcIndxReq->apiConnectPtr; - tcKeyReq->attrLen = tcIndxReq->attrLen; - tcKeyReq->tableId = indexData->primaryTableId; - tcKeyReq->tableSchemaVersion = tabPtr.p->currentSchemaVersion; - tcKeyReq->transId1 = regApiPtr->transid[0]; - tcKeyReq->transId2 = regApiPtr->transid[1]; - tcKeyReq->senderData = tcIndxReq->senderData; // Needed for TRANSID_AI to API - indexOp->indexOpState = IOS_INDEX_OPERATION; - regApiPtr->isIndexOp = 1; - regApiPtr->executingIndexOp = indexOp->indexOpId;; - regApiPtr->noIndexOp++; // Increase count - - /* - Filter out AttributeHeader:s since this should not be in key. - Also filter out fragment id from primary key and handle that - separately by setting it as Distribution Key and set indicator. - */ - - AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data; - - Uint32 headerSize = attrHeader->getHeaderSize(); - Uint32 keySize = attrHeader->getDataSize() - 1; - TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize); - // Skip header - if (headerSize == 1) { - jam(); - moreKeyData = indexOp->transIdAI.next(aiIter); - } else { - jam(); - moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1); - }//if - tcKeyReq->scanInfo = *aiIter.data; //Fragment Id - moreKeyData = indexOp->transIdAI.next(aiIter); - TcKeyReq::setDistributionKeyFlag(tcKeyRequestInfo, 1U); - while(// If we have not read complete key - (keySize != 0) && - (dataPos < keyBufSize)) { - *dataPtr++ = *aiIter.data; - dataPos++; - keySize--; - moreKeyData = indexOp->transIdAI.next(aiIter); - } - tcKeyLength += dataPos; - - Uint32 attributesLength = indexOp->attrInfo.getSize(); - if (attributesLength <= attrBufSize) { - jam(); - // ATTRINFO fits in TCKEYREQ - // Pack ATTRINFO IN TCKEYREQ - TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, indexOp->attrInfo.getSize()); - // Insert IndxAttrInfo - for(bool moreAttrData = indexOp->attrInfo.first(attrIter); - moreAttrData; - moreAttrData = indexOp->attrInfo.next(attrIter)) { - *dataPtr++ = *attrIter.data; - } - tcKeyLength += attributesLength; - } else { - jam(); - // No ATTRINFO in TCKEYREQ - TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0); - } - - TcKeyReq::setCommitFlag(tcKeyRequestInfo, 0); - TcKeyReq::setExecuteFlag(tcKeyRequestInfo, 0); - TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0); - tcKeyReq->requestInfo = tcKeyRequestInfo; - - ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0); - ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0); - - /** - * Decrease lqhkeyreqrec to compensate for addition - * during read of index table - * I.e. let TC start committing when other operations has completed - */ - regApiPtr->lqhkeyreqrec--; - - /** - * Fix savepoint id - - * fix so that index operation has the same savepoint id - * as the read of the index table (TCINDXREQ) - */ - TcConnectRecordPtr tmp; - tmp.i = indexOp->indexReadTcConnect; - ptrCheckGuard(tmp, ctcConnectFilesize, tcConnectRecord); - const Uint32 currSavePointId = regApiPtr->currSavePointId; - regApiPtr->currSavePointId = tmp.p->savePointId; - EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - regApiPtr->currSavePointId = currSavePointId; - - // *********** KEYINFO *********** - if (moreKeyData) { - jam(); - // Send KEYINFO sequence - KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend(); - - keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr; - keyInfo->transId[0] = regApiPtr->transid[0]; - keyInfo->transId[1] = regApiPtr->transid[1]; - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - // Pack any part of a key attribute that did no fit TCKEYREQ - while(keySize-- != 0) {// If we have not read complete key - *dataPtr++ = *aiIter.data; - dataPos++; - if (dataPos == KeyInfo::DataLength) { - // Flush KEYINFO - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPos = 0; - dataPtr = (Uint32 *) &keyInfo->keyData; - } - moreKeyData = indexOp->transIdAI.next(aiIter); - } - if (dataPos != 0) { - // Flush last KEYINFO - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + dataPos); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - } - } - - // *********** ATTRINFO *********** - if (attributesLength > attrBufSize) { - jam(); - // No ATTRINFO in TcKeyReq - TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0); - // Send ATTRINFO sequence - AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend(); - Uint32 attrInfoPos = 0; - - attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr; - attrInfo->transId[0] = regApiPtr->transid[0]; - attrInfo->transId[1] = regApiPtr->transid[1]; - dataPtr = (Uint32 *) &attrInfo->attrData; - - - // Insert attribute values (insert key values of primary table) - for(bool moreAttrData = indexOp->attrInfo.first(attrIter); - moreAttrData; - moreAttrData = indexOp->attrInfo.next(attrIter)) { - *dataPtr++ = *attrIter.data; - attrInfoPos++; - if (attrInfoPos == AttrInfo::DataLength) { - // Flush ATTRINFO - EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - attrInfoPos = 0; - dataPtr = (Uint32 *) &attrInfo->attrData; - } - } - if (attrInfoPos != 0) { - // Send last ATTRINFO - EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + attrInfoPos); - jamEntry(); - } - } -} - -bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr, - TcIndexOperationPtr& indexOpPtr) -{ - if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr)) - { - ndbassert(indexOpPtr.p->expectedKeyInfo == 0); - ndbassert(indexOpPtr.p->keyInfo.getSize() == 0); - ndbassert(indexOpPtr.p->expectedAttrInfo == 0); - ndbassert(indexOpPtr.p->attrInfo.getSize() == 0); - ndbassert(indexOpPtr.p->expectedTransIdAI == 0); - ndbassert(indexOpPtr.p->transIdAI.getSize() == 0); - return true; - } - - return false; -} - -void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr, - TcIndexOperation* indexOp) -{ - indexOp->indexOpState = IOS_NOOP; - indexOp->expectedKeyInfo = 0; - indexOp->keyInfo.release(); - indexOp->expectedAttrInfo = 0; - indexOp->attrInfo.release(); - indexOp->expectedTransIdAI = 0; - indexOp->transIdAI.release(); - regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId); -} - -void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr) -{ - TcIndexOperationPtr seizedIndexOpPtr; - - regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr); - while(seizedIndexOpPtr.i != RNIL) { - jam(); - TcIndexOperation* indexOp = seizedIndexOpPtr.p; - - indexOp->indexOpState = IOS_NOOP; - indexOp->expectedKeyInfo = 0; - indexOp->keyInfo.release(); - indexOp->expectedAttrInfo = 0; - indexOp->attrInfo.release(); - indexOp->expectedTransIdAI = 0; - indexOp->transIdAI.release(); - regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr); - } - regApiPtr->theSeizedIndexOperations.release(); -} - -void Dbtc::saveTriggeringOpState(Signal* signal, TcConnectRecord* trigOp) -{ - LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr(); - copyFromToLen((UintR*)lqhKeyConf, - &trigOp->savedState[0], - LqhKeyConf::SignalLength); -} - -void Dbtc::continueTriggeringOp(Signal* signal, TcConnectRecord* trigOp) -{ - LqhKeyConf * lqhKeyConf = (LqhKeyConf *)signal->getDataPtr(); - copyFromToLen(&trigOp->savedState[0], - (UintR*)lqhKeyConf, - LqhKeyConf::SignalLength); - - lqhKeyConf->noFiredTriggers = 0; - trigOp->noReceivedTriggers = 0; - - // All triggers executed successfully, continue operation - execLQHKEYCONF(signal); -} - -void Dbtc::scheduleFiredTrigger(ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr) -{ - // Set initial values for trigger fireing operation - opPtr->p->triggerExecutionCount++; - - // Insert fired trigger in execution queue - transPtr->p->theFiredTriggers.add(opPtr->p->accumulatingTriggerData); - opPtr->p->accumulatingTriggerData.i = RNIL; - opPtr->p->accumulatingTriggerData.p = NULL; -} - -void Dbtc::executeTriggers(Signal* signal, ApiConnectRecordPtr* transPtr) -{ - ApiConnectRecord* regApiPtr = transPtr->p; - TcConnectRecord *localTcConnectRecord = tcConnectRecord; - TcConnectRecordPtr opPtr; - FiredTriggerPtr trigPtr; - - if (!regApiPtr->theFiredTriggers.isEmpty()) { - jam(); - if ((regApiPtr->apiConnectstate == CS_STARTED) || - (regApiPtr->apiConnectstate == CS_START_COMMITTING)) { - jam(); - regApiPtr->theFiredTriggers.first(trigPtr); - while (trigPtr.i != RNIL) { - jam(); - // Execute all ready triggers in parallel - opPtr.i = trigPtr.p->fireingOperation; - ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord); - FiredTriggerPtr nextTrigPtr = trigPtr; - regApiPtr->theFiredTriggers.next(nextTrigPtr); - if (opPtr.p->noReceivedTriggers == opPtr.p->noFiredTriggers) { - jam(); - // Fireing operation is ready to have a trigger executing - executeTrigger(signal, trigPtr.p, transPtr, &opPtr); - // Should allow for interleaving here by sending a CONTINUEB and - // return - // Release trigger records - AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool; - LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues); - tmp1.release(); - LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues); - tmp2.release(); - LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues); - tmp3.release(); - regApiPtr->theFiredTriggers.release(trigPtr); - } - trigPtr = nextTrigPtr; - } - return; - // No more triggers, continue transaction after last executed trigger has - // reurned (in execLQHKEYCONF or execLQHKEYREF) - } else { - // Wait until transaction is ready to execute a trigger - jam(); - if (!regApiPtr->triggerPending) { - jam(); - regApiPtr->triggerPending = true; - signal->theData[0] = TcContinueB::TRIGGER_PENDING; - signal->theData[1] = transPtr->i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - } - // else - // We are already waiting for a pending trigger (CONTINUEB) - } - } -} - -void Dbtc::executeTrigger(Signal* signal, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr) -{ - TcDefinedTriggerData* definedTriggerData; - - if ((definedTriggerData = - c_theDefinedTriggers.getPtr(firedTriggerData->triggerId)) - != NULL) { - switch(definedTriggerData->triggerType) { - case(TriggerType::SECONDARY_INDEX): - jam(); - executeIndexTrigger(signal, definedTriggerData, firedTriggerData, - transPtr, opPtr); - break; - default: - ndbrequire(false); - } - } -} - -void Dbtc::executeIndexTrigger(Signal* signal, - TcDefinedTriggerData* definedTriggerData, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr) -{ - TcIndexData* indexData; - - indexData = c_theIndexes.getPtr(definedTriggerData->indexId); - ndbassert(indexData != NULL); - - switch (definedTriggerData->triggerEvent) { - case(TriggerEvent::TE_INSERT): { - jam(); - insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData); - break; - } - case(TriggerEvent::TE_DELETE): { - jam(); - deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData); - break; - } - case(TriggerEvent::TE_UPDATE): { - jam(); - deleteFromIndexTable(signal, firedTriggerData, transPtr, opPtr, - indexData, true); // Hold the triggering operation - insertIntoIndexTable(signal, firedTriggerData, transPtr, opPtr, indexData); - break; - } - default: - ndbrequire(false); - } -} - -void Dbtc::releaseFiredTriggerData(DLFifoList* triggers) -{ - FiredTriggerPtr trigPtr; - - triggers->first(trigPtr); - while (trigPtr.i != RNIL) { - jam(); - // Release trigger records - - AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool; - LocalDataBuffer<11> tmp1(pool, trigPtr.p->keyValues); - tmp1.release(); - LocalDataBuffer<11> tmp2(pool, trigPtr.p->beforeValues); - tmp2.release(); - LocalDataBuffer<11> tmp3(pool, trigPtr.p->afterValues); - tmp3.release(); - - triggers->next(trigPtr); - } - triggers->release(); -} - -void Dbtc::insertIntoIndexTable(Signal* signal, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr, - TcIndexData* indexData, - bool holdOperation) -{ - ApiConnectRecord* regApiPtr = transPtr->p; - TcConnectRecord* opRecord = opPtr->p; - TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend(); - Uint32 tcKeyRequestInfo = 0; - Uint32 tcKeyLength = TcKeyReq::StaticLength; - TableRecordPtr indexTabPtr; - AttributeBuffer::DataBufferIterator iter; - Uint32 attrId = 0; - Uint32 keyLength = 0; - Uint32 totalPrimaryKeyLength = 1; // fragment length - Uint32 hops; - - indexTabPtr.i = indexData->indexId; - ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord); - tcKeyReq->apiConnectPtr = transPtr->i; - tcKeyReq->senderData = opPtr->i; - if (holdOperation) { - jam(); - opRecord->triggerExecutionCount++; - }//if - // Calculate key length and renumber attribute id:s - AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool; - LocalDataBuffer<11> afterValues(pool, firedTriggerData->afterValues); - bool skipNull = false; - for(bool moreKeyAttrs = afterValues.first(iter); moreKeyAttrs; attrId++) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - // Filter out NULL valued attributes - if (attrHeader->isNULL()) { - skipNull = true; - break; - } - attrHeader->setAttributeId(attrId); - keyLength += attrHeader->getDataSize(); - hops = attrHeader->getHeaderSize() + attrHeader->getDataSize(); - moreKeyAttrs = afterValues.next(iter, hops); - } - if (skipNull) { - jam(); - opRecord->triggerExecutionCount--; - if (opRecord->triggerExecutionCount == 0) { - /* - We have completed current trigger execution - Continue triggering operation - */ - jam(); - continueTriggeringOp(signal, opRecord); - }//if - return; - }//if - - // Calculate total length of primary key to be stored in index table - LocalDataBuffer<11> keyValues(pool, firedTriggerData->keyValues); - for(bool moreAttrData = keyValues.first(iter); moreAttrData; ) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - totalPrimaryKeyLength += attrHeader->getDataSize(); - hops = attrHeader->getHeaderSize() + attrHeader->getDataSize(); - moreAttrData = keyValues.next(iter, hops); - } - AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength << 2); - Uint32 attributesLength = afterValues.getSize() + - pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize(); - - TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength); - tcKeyReq->attrLen = attributesLength; - tcKeyReq->tableId = indexData->indexId; - TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT); - TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true); - tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion; - tcKeyReq->transId1 = regApiPtr->transid[0]; - tcKeyReq->transId2 = regApiPtr->transid[1]; - Uint32 * dataPtr = &tcKeyReq->scanInfo; - // Write first part of key in TCKEYREQ - Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ - Uint32 attrBufSize = 5; // Maximum for key in TCKEYREQ - Uint32 dataPos = 0; - // Filter out AttributeHeader:s since this should no be in key - bool moreKeyData = afterValues.first(iter); - Uint32 headerSize = 0, keyAttrSize = 0, dataSize = 0, headAndData = 0; - - while (moreKeyData && (dataPos < keyBufSize)) { - /* - * If we have not read complete key - * and it fits in the signal - */ - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - headerSize = attrHeader->getHeaderSize(); - keyAttrSize = attrHeader->getDataSize(); - headAndData = headerSize + attrHeader->getDataSize(); - // Skip header - if (headerSize == 1) { - jam(); - moreKeyData = afterValues.next(iter); - } else { - jam(); - moreKeyData = afterValues.next(iter, headerSize - 1); - }//if - while((keyAttrSize != 0) && (dataPos < keyBufSize)) { - // If we have not read complete key - jam(); - *dataPtr++ = *iter.data; - dataPos++; - keyAttrSize--; - moreKeyData = afterValues.next(iter); - } - if (keyAttrSize != 0) { - jam(); - break; - }//if - } - - tcKeyLength += dataPos; - /* - Size of attrinfo is unique index attributes one by one, header for each - of them (all contained in the afterValues data structure), plus a header, - the primary key (compacted) and the fragment id before the primary key - */ - if (attributesLength <= attrBufSize) { - jam(); - // ATTRINFO fits in TCKEYREQ - // Pack ATTRINFO IN TCKEYREQ as one attribute - TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, attributesLength); - bool moreAttrData; - // Insert primary key attributes (insert after values of primary table) - for(moreAttrData = afterValues.first(iter); - moreAttrData; - moreAttrData = afterValues.next(iter)) { - *dataPtr++ = *iter.data; - } - // Insert attribute values (insert key values of primary table) - // as one attribute - pkAttrHeader.insertHeader(dataPtr); - dataPtr += pkAttrHeader.getHeaderSize(); - /* - Insert fragment id before primary key as part of reference to tuple - */ - *dataPtr++ = firedTriggerData->fragId; - moreAttrData = keyValues.first(iter); - while(moreAttrData) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - headerSize = attrHeader->getHeaderSize(); - dataSize = attrHeader->getDataSize(); - // Skip header - if (headerSize == 1) { - jam(); - moreAttrData = keyValues.next(iter); - } else { - jam(); - moreAttrData = keyValues.next(iter, headerSize - 1); - }//if - // Copy attribute data - while(dataSize-- != 0) { - *dataPtr++ = *iter.data; - moreAttrData = keyValues.next(iter); - } - } - tcKeyLength += attributesLength; - } else { - jam(); - // No ATTRINFO in TCKEYREQ - TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 0); - } - tcKeyReq->requestInfo = tcKeyRequestInfo; - - /** - * Fix savepoint id - - * fix so that insert has same savepoint id as triggering operation - */ - const Uint32 currSavePointId = regApiPtr->currSavePointId; - regApiPtr->currSavePointId = opRecord->savePointId; - EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - regApiPtr->currSavePointId = currSavePointId; - tcConnectptr.p->currentIndexId = indexData->indexId; - - // *********** KEYINFO *********** - if (moreKeyData) { - jam(); - // Send KEYINFO sequence - KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend(); - - keyInfo->connectPtr = transPtr->i; - keyInfo->transId[0] = regApiPtr->transid[0]; - keyInfo->transId[1] = regApiPtr->transid[1]; - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - // Pack any part of a key attribute that did no fit TCKEYREQ - while((keyAttrSize != 0) && (dataPos < KeyInfo::DataLength)) { - // If we have not read complete key - *dataPtr++ = *iter.data; - dataPos++; - keyAttrSize--; - if (dataPos == KeyInfo::DataLength) { - jam(); - // Flush KEYINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength); - jamEntry(); -#endif - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - } - moreKeyData = afterValues.next(iter); - } - - while(moreKeyData) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - headerSize = attrHeader->getHeaderSize(); - keyAttrSize = attrHeader->getDataSize(); - headAndData = headerSize + attrHeader->getDataSize(); - // Skip header - if (headerSize == 1) { - jam(); - moreKeyData = afterValues.next(iter); - } else { - jam(); - moreKeyData = afterValues.next(iter, headerSize - 1); - }//if - while (keyAttrSize-- != 0) { - *dataPtr++ = *iter.data; - dataPos++; - if (dataPos == KeyInfo::DataLength) { - jam(); - // Flush KEYINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength); - jamEntry(); -#endif - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - } - moreKeyData = afterValues.next(iter); - } - } - if (dataPos != 0) { - jam(); - // Flush last KEYINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_KEYINFO, signal, - KeyInfo::HeaderLength + dataPos, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + dataPos); - jamEntry(); -#endif - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - } - } - - // *********** ATTRINFO *********** - if (attributesLength > attrBufSize) { - jam(); - // No ATTRINFO in TcKeyReq - TcKeyReq::setAIInTcKeyReq(tcKeyReq->requestInfo, 0); - // Send ATTRINFO sequence - AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend(); - Uint32 attrInfoPos = 0; - - attrInfo->connectPtr = transPtr->i; - attrInfo->transId[0] = regApiPtr->transid[0]; - attrInfo->transId[1] = regApiPtr->transid[1]; - dataPtr = (Uint32 *) &attrInfo->attrData; - - bool moreAttrData; - // Insert primary key attributes (insert after values of primary table) - for(moreAttrData = afterValues.first(iter); - moreAttrData; - moreAttrData = afterValues.next(iter)) { - *dataPtr++ = *iter.data; - attrInfoPos++; - if (attrInfoPos == AttrInfo::DataLength) { - jam(); - // Flush ATTRINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength); - jamEntry(); -#endif - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPtr = (Uint32 *) &attrInfo->attrData; - attrInfoPos = 0; - } - } - // Insert attribute values (insert key values of primary table) - // as one attribute - pkAttrHeader.insertHeader(dataPtr); - dataPtr += pkAttrHeader.getHeaderSize(); - attrInfoPos += pkAttrHeader.getHeaderSize(); - /* - Add fragment id before primary key - TODO: This code really needs to be made into a long signal - to remove this messy code. - */ - if (attrInfoPos == AttrInfo::DataLength) - { - jam(); - // Flush ATTRINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength); - jamEntry(); -#endif - dataPtr = (Uint32 *) &attrInfo->attrData; - attrInfoPos = 0; - } - attrInfoPos++; - *dataPtr++ = firedTriggerData->fragId; - - moreAttrData = keyValues.first(iter); - while(moreAttrData) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - headerSize = attrHeader->getHeaderSize(); - dataSize = attrHeader->getDataSize(); - // Skip header - if (headerSize == 1) { - jam(); - moreAttrData = keyValues.next(iter); - } else { - jam(); - moreAttrData = keyValues.next(iter, headerSize - 1); - }//if - while(dataSize-- != 0) { // If we have not read complete key - if (attrInfoPos == AttrInfo::DataLength) { - jam(); - // Flush ATTRINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + AttrInfo::DataLength); - jamEntry(); -#endif - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPtr = (Uint32 *) &attrInfo->attrData; - attrInfoPos = 0; - } - *dataPtr++ = *iter.data; - attrInfoPos++; - moreAttrData = keyValues.next(iter); - } - } - if (attrInfoPos != 0) { - jam(); - // Flush last ATTRINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + attrInfoPos, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal, - AttrInfo::HeaderLength + attrInfoPos); - jamEntry(); -#endif - } - } -} - -void Dbtc::deleteFromIndexTable(Signal* signal, - TcFiredTriggerData* firedTriggerData, - ApiConnectRecordPtr* transPtr, - TcConnectRecordPtr* opPtr, - TcIndexData* indexData, - bool holdOperation) -{ - ApiConnectRecord* regApiPtr = transPtr->p; - TcConnectRecord* opRecord = opPtr->p; - TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend(); - Uint32 tcKeyRequestInfo = 0; - Uint32 tcKeyLength = 12; // Static length - TableRecordPtr indexTabPtr; - AttributeBuffer::DataBufferIterator iter; - Uint32 attrId = 0; - Uint32 keyLength = 0; - Uint32 hops; - - indexTabPtr.i = indexData->indexId; - ptrCheckGuard(indexTabPtr, ctabrecFilesize, tableRecord); - tcKeyReq->apiConnectPtr = transPtr->i; - tcKeyReq->senderData = opPtr->i; - if (holdOperation) { - jam(); - opRecord->triggerExecutionCount++; - }//if - // Calculate key length and renumber attribute id:s - AttributeBuffer::DataBufferPool & pool = c_theAttributeBufferPool; - LocalDataBuffer<11> beforeValues(pool, firedTriggerData->beforeValues); - bool skipNull = false; - for(bool moreKeyAttrs = beforeValues.first(iter); - (moreKeyAttrs); - attrId++) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - // Filter out NULL valued attributes - if (attrHeader->isNULL()) { - skipNull = true; - break; - } - attrHeader->setAttributeId(attrId); - keyLength += attrHeader->getDataSize(); - hops = attrHeader->getHeaderSize() + attrHeader->getDataSize(); - moreKeyAttrs = beforeValues.next(iter, hops); - } - - if (skipNull) { - jam(); - opRecord->triggerExecutionCount--; - if (opRecord->triggerExecutionCount == 0) { - /* - We have completed current trigger execution - Continue triggering operation - */ - jam(); - continueTriggeringOp(signal, opRecord); - }//if - return; - }//if - - TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength); - tcKeyReq->attrLen = 0; - tcKeyReq->tableId = indexData->indexId; - TcKeyReq::setOperationType(tcKeyRequestInfo, ZDELETE); - TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true); - tcKeyReq->tableSchemaVersion = indexTabPtr.p->currentSchemaVersion; - tcKeyReq->transId1 = regApiPtr->transid[0]; - tcKeyReq->transId2 = regApiPtr->transid[1]; - Uint32 * dataPtr = &tcKeyReq->scanInfo; - // Write first part of key in TCKEYREQ - Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ - Uint32 dataPos = 0; - // Filter out AttributeHeader:s since this should no be in key - bool moreKeyData = beforeValues.first(iter); - Uint32 headerSize = 0, keyAttrSize = 0, headAndData = 0; - - while (moreKeyData && - (dataPos < keyBufSize)) { - /* - If we have not read complete key - and it fits in the signal - */ - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - headerSize = attrHeader->getHeaderSize(); - keyAttrSize = attrHeader->getDataSize(); - headAndData = headerSize + attrHeader->getDataSize(); - // Skip header - if (headerSize == 1) { - jam(); - moreKeyData = beforeValues.next(iter); - } else { - jam(); - moreKeyData = beforeValues.next(iter, headerSize - 1); - }//if - while((keyAttrSize != 0) && - (dataPos < keyBufSize)) { - // If we have not read complete key - jam(); - *dataPtr++ = *iter.data; - dataPos++; - keyAttrSize--; - moreKeyData = beforeValues.next(iter); - } - if (keyAttrSize != 0) { - jam(); - break; - }//if - } - - tcKeyLength += dataPos; - tcKeyReq->requestInfo = tcKeyRequestInfo; - - /** - * Fix savepoint id - - * fix so that delete has same savepoint id as triggering operation - */ - const Uint32 currSavePointId = regApiPtr->currSavePointId; - regApiPtr->currSavePointId = opRecord->savePointId; - EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength); - jamEntry(); - - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - regApiPtr->currSavePointId = currSavePointId; - tcConnectptr.p->currentIndexId = indexData->indexId; - - // *********** KEYINFO *********** - if (moreKeyData) { - jam(); - // Send KEYINFO sequence - KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend(); - - keyInfo->connectPtr = transPtr->i; - keyInfo->transId[0] = regApiPtr->transid[0]; - keyInfo->transId[1] = regApiPtr->transid[1]; - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - // Pack any part of a key attribute that did no fit TCKEYREQ - while((keyAttrSize != 0) && - (dataPos < KeyInfo::DataLength)) { - // If we have not read complete key - *dataPtr++ = *iter.data; - dataPos++; - keyAttrSize--; - if (dataPos == KeyInfo::DataLength) { - jam(); - // Flush KEYINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength); - jamEntry(); -#endif - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - } - moreKeyData = beforeValues.next(iter); - } - - while(moreKeyData) { - jam(); - AttributeHeader* attrHeader = (AttributeHeader *) iter.data; - - headerSize = attrHeader->getHeaderSize(); - keyAttrSize = attrHeader->getDataSize(); - headAndData = headerSize + attrHeader->getDataSize(); - // Skip header - if (headerSize == 1) { - jam(); - moreKeyData = beforeValues.next(iter); - } else { - jam(); - moreKeyData = beforeValues.next(iter, - headerSize - 1); - }//if - while (keyAttrSize-- != 0) { - *dataPtr++ = *iter.data; - dataPos++; - if (dataPos == KeyInfo::DataLength) { - jam(); - // Flush KEYINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + KeyInfo::DataLength); - jamEntry(); -#endif - if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING)) - { - jam(); - return; - } - - dataPtr = (Uint32 *) &keyInfo->keyData; - dataPos = 0; - } - moreKeyData = beforeValues.next(iter); - } - } - if (dataPos != 0) { - jam(); - // Flush last KEYINFO -#if INTERNAL_TRIGGER_TCKEYREQ_JBA - sendSignal(reference(), GSN_KEYINFO, signal, - KeyInfo::HeaderLength + dataPos, JBA); -#else - EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal, - KeyInfo::HeaderLength + dataPos); - jamEntry(); -#endif - } - } -} - -Uint32 -Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const { - if(!get_enabled()) - return ZNO_SUCH_TABLE; - if(get_dropping()) - return ZDROP_TABLE_IN_PROGRESS; - if(table_version_major(schemaVersion) != table_version_major(currentSchemaVersion)) - return ZWRONG_SCHEMA_VERSION_ERROR; - ErrorReporter::handleAssert("Dbtc::TableRecord::getErrorCode", - __FILE__, __LINE__); - return 0; -} - -void -Dbtc::execROUTE_ORD(Signal* signal) -{ - jamEntry(); - if(!assembleFragments(signal)){ - jam(); - return; - } - - RouteOrd* ord = (RouteOrd*)signal->getDataPtr(); - Uint32 dstRef = ord->dstRef; - Uint32 srcRef = ord->srcRef; - Uint32 gsn = ord->gsn; - - if (likely(getNodeInfo(refToNode(dstRef)).m_connected)) - { - jam(); - Uint32 secCount = signal->getNoOfSections(); - SegmentedSectionPtr ptr[3]; - ndbrequire(secCount >= 1 && secCount <= 3); - - jamLine(secCount); - for (Uint32 i = 0; igetSection(ptr[i], i); - - /** - * Put section 0 in signal->theData - */ - ndbrequire(ptr[0].sz <= 25); - copy(signal->theData, ptr[0]); - - signal->header.m_noOfSections = 0; - - /** - * Shift rest of sections - */ - for(Uint32 i = 1; isetSection(ptr[i], i - 1); - } - - sendSignal(dstRef, gsn, signal, ptr[0].sz, JBB); - - signal->header.m_noOfSections = 0; - signal->setSection(ptr[0], 0); - releaseSections(signal); - return ; - } - - warningEvent("Unable to route GSN: %d from %x to %x", - gsn, srcRef, dstRef); -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp deleted file mode 100644 index 12be23851c8..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef ATTRIBUTE_OFFSET_HPP -#define ATTRIBUTE_OFFSET_HPP - -class AttributeOffset { - friend class Dbtup; - -private: - static void setOffset(Uint32 & desc, Uint32 offset); - static void setCharsetPos(Uint32 & desc, Uint32 offset); - static void setNullFlagPos(Uint32 & desc, Uint32 offset); - - static Uint32 getOffset(const Uint32 &); - static bool getCharsetFlag(const Uint32 &); - static Uint32 getCharsetPos(const Uint32 &); - static Uint32 getNullFlagPos(const Uint32 &); - static Uint32 getNullFlagOffset(const Uint32 &); - static Uint32 getNullFlagBitOffset(const Uint32 &); - - Uint32 m_data; - - friend class NdbOut& operator<<(class NdbOut&, const AttributeOffset&); -}; - -/** - * Allow for 4096 attributes, all nullable, and for 128 different - * character sets. - * - * a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb ) - * c = Has charset flag 1 bits 11-11 - * s = Charset pointer position - 7 bits 12-18 ( in table descriptor ) - * f = Null flag offset in word - 5 bits 20-24 ( address 32 bits ) - * w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs ) - * - * 1111111111222222222233 - * 01234567890123456789012345678901 - * aaaaaaaaaaacsssssss fffffwwwwwww - */ - -#define AO_ATTRIBUTE_OFFSET_SHIFT 0 -#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff - -#define AO_CHARSET_FLAG_SHIFT 11 -#define AO_CHARSET_POS_SHIFT 12 -#define AO_CHARSET_POS_MASK 127 - -#define AO_NULL_FLAG_POS_MASK 0xfff // f+w -#define AO_NULL_FLAG_POS_SHIFT 20 - -#define AO_NULL_FLAG_WORD_MASK 31 // f -#define AO_NULL_FLAG_OFFSET_SHIFT 5 - -inline -void -AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){ - ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset"); - desc &= ~(Uint32)(AO_ATTRIBUTE_OFFSET_MASK << AO_ATTRIBUTE_OFFSET_SHIFT); - desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT); -} - -inline -void -AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) { - ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos"); - desc |= (1 << AO_CHARSET_FLAG_SHIFT); - desc |= (offset << AO_CHARSET_POS_SHIFT); -} - -inline -void -AttributeOffset::setNullFlagPos(Uint32 & desc, Uint32 pos){ - ASSERT_MAX(pos, AO_NULL_FLAG_POS_MASK, "AttributeOffset::setNullFlagPos"); - desc |= (pos << AO_NULL_FLAG_POS_SHIFT); -} - -inline -Uint32 -AttributeOffset::getOffset(const Uint32 & desc) -{ - return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK; -} - -inline -bool -AttributeOffset::getCharsetFlag(const Uint32 & desc) -{ - return (desc >> AO_CHARSET_FLAG_SHIFT) & 1; -} - -inline -Uint32 -AttributeOffset::getCharsetPos(const Uint32 & desc) -{ - return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK; -} - -inline -Uint32 -AttributeOffset::getNullFlagPos(const Uint32 & desc) -{ - return ((desc >> AO_NULL_FLAG_POS_SHIFT) & AO_NULL_FLAG_POS_MASK); -} - -inline -Uint32 -AttributeOffset::getNullFlagOffset(const Uint32 & desc) -{ - return (getNullFlagPos(desc) >> AO_NULL_FLAG_OFFSET_SHIFT); -} - -inline -Uint32 -AttributeOffset::getNullFlagBitOffset(const Uint32 & desc) -{ - return (getNullFlagPos(desc) & AO_NULL_FLAG_WORD_MASK); -} - -class NdbOut& -operator<<(class NdbOut&, const AttributeOffset&); - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp deleted file mode 100644 index a963fe9b708..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ /dev/null @@ -1,3070 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBTUP_H -#define DBTUP_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "Undo_buffer.hpp" -#include "tuppage.hpp" -#include <../pgman.hpp> -#include <../tsman.hpp> - -// jams -#undef jam -#undef jamEntry -#ifdef DBTUP_BUFFER_CPP -#define jam() jamLine(10000 + __LINE__) -#define jamEntry() jamEntryLine(10000 + __LINE__) -#endif -#ifdef DBTUP_ROUTINES_CPP -#define jam() jamLine(15000 + __LINE__) -#define jamEntry() jamEntryLine(15000 + __LINE__) -#endif -#ifdef DBTUP_COMMIT_CPP -#define jam() jamLine(20000 + __LINE__) -#define jamEntry() jamEntryLine(20000 + __LINE__) -#endif -#ifdef DBTUP_FIXALLOC_CPP -#define jam() jamLine(25000 + __LINE__) -#define jamEntry() jamEntryLine(25000 + __LINE__) -#endif -#ifdef DBTUP_TRIGGER_CPP -#define jam() jamLine(30000 + __LINE__) -#define jamEntry() jamEntryLine(30000 + __LINE__) -#endif -#ifdef DBTUP_ABORT_CPP -#define jam() jamLine(35000 + __LINE__) -#define jamEntry() jamEntryLine(35000 + __LINE__) -#endif -#ifdef DBTUP_PAGE_MAP_CPP -#define jam() jamLine(40000 + __LINE__) -#define jamEntry() jamEntryLine(40000 + __LINE__) -#endif -#ifdef DBTUP_PAG_MAN_CPP -#define jam() jamLine(45000 + __LINE__) -#define jamEntry() jamEntryLine(45000 + __LINE__) -#endif -#ifdef DBTUP_STORE_PROC_DEF_CPP -#define jam() jamLine(50000 + __LINE__) -#define jamEntry() jamEntryLine(50000 + __LINE__) -#endif -#ifdef DBTUP_META_CPP -#define jam() jamLine(55000 + __LINE__) -#define jamEntry() jamEntryLine(55000 + __LINE__) -#endif -#ifdef DBTUP_TAB_DES_MAN_CPP -#define jam() jamLine(60000 + __LINE__) -#define jamEntry() jamEntryLine(60000 + __LINE__) -#endif -#ifdef DBTUP_GEN_CPP -#define jam() jamLine(65000 + __LINE__) -#define jamEntry() jamEntryLine(65000 + __LINE__) -#endif -#ifdef DBTUP_INDEX_CPP -#define jam() jamLine(70000 + __LINE__) -#define jamEntry() jamEntryLine(70000 + __LINE__) -#endif -#ifdef DBTUP_DEBUG_CPP -#define jam() jamLine(75000 + __LINE__) -#define jamEntry() jamEntryLine(75000 + __LINE__) -#endif -#ifdef DBTUP_VAR_ALLOC_CPP -#define jam() jamLine(80000 + __LINE__) -#define jamEntry() jamEntryLine(80000 + __LINE__) -#endif -#ifdef DBTUP_SCAN_CPP -#define jam() jamLine(85000 + __LINE__) -#define jamEntry() jamEntryLine(85000 + __LINE__) -#endif -#ifdef DBTUP_DISK_ALLOC_CPP -#define jam() jamLine(90000 + __LINE__) -#define jamEntry() jamEntryLine(90000 + __LINE__) -#endif -#ifndef jam -#define jam() jamLine(__LINE__) -#define jamEntry() jamEntryLine(__LINE__) -#endif - -#ifdef VM_TRACE -inline const char* dbgmask(const Bitmask& bm) { - static int i=0; static char buf[5][200]; - bm.getText(buf[i%5]); return buf[i++%5]; } -inline const char* dbgmask(const Uint32 bm[2]) { - static int i=0; static char buf[5][200]; - sprintf(buf[i%5],"%08x%08x",bm[1],bm[0]); return buf[i++%5]; } -#endif - -#define ZWORDS_ON_PAGE 8192 /* NUMBER OF WORDS ON A PAGE. */ -#define ZATTRBUF_SIZE 32 /* SIZE OF ATTRIBUTE RECORD BUFFER */ -#define ZMIN_PAGE_LIMIT_TUPKEYREQ 5 -#define ZTUP_VERSION_BITS 15 -#define ZTUP_VERSION_MASK ((1 << ZTUP_VERSION_BITS) - 1) -#define MAX_FREE_LIST 4 - -inline Uint32* ALIGN_WORD(void * ptr) -{ - return (Uint32*)(((UintPtr(ptr) + 3) >> 2) << 2); -} - -inline const Uint32* ALIGN_WORD(const void* ptr) -{ - return (Uint32*)(((UintPtr(ptr) + 3) >> 2) << 2); -} - -#ifdef DBTUP_C -//------------------------------------------------------------------ -// Jam Handling: -// -// When DBTUP reports lines through jam in the trace files it has to -// be interpreted. 4024 means as an example line 24 in DbtupCommit.cpp -// Thus 4000 is added to the line number beacuse it is located in the -// file DbtupCommit.cpp. The following is the exhaustive list of the -// added value in the various files. ndbrequire, ptrCheckGuard still -// only reports the line number in the file it currently is located in. -// -// DbtupExecQuery.cpp 0 -// DbtupBuffer.cpp 10000 -// DbtupRoutines.cpp 15000 -// DbtupCommit.cpp 20000 -// DbtupFixAlloc.cpp 25000 -// DbtupTrigger.cpp 30000 -// DbtupAbort.cpp 35000 -// DbtupPageMap.cpp 40000 -// DbtupPagMan.cpp 45000 -// DbtupStoredProcDef.cpp 50000 -// DbtupMeta.cpp 55000 -// DbtupTabDesMan.cpp 60000 -// DbtupGen.cpp 65000 -// DbtupIndex.cpp 70000 -// DbtupDebug.cpp 75000 -// DbtupVarAlloc.cpp 80000 -// DbtupScan.cpp 85000 -// DbtupDiskAlloc.cpp 90000 -//------------------------------------------------------------------ - -/* -2.2 LOCAL SYMBOLS ------------------ -*/ -/* ---------------------------------------------------------------- */ -/* S I Z E O F R E C O R D S */ -/* ---------------------------------------------------------------- */ -#define ZNO_OF_ATTRBUFREC 10000 /* SIZE OF ATTRIBUTE INFO FILE */ -#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */ -#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/ -#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */ -#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */ -#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE -#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */ -// Trigger constants -#define ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE 16 - -/* ---------------------------------------------------------------- */ -/* A ATTRIBUTE MAY BE NULL, DYNAMIC OR NORMAL. A NORMAL ATTRIBUTE */ -/* IS A ATTRIBUTE THAT IS NOT NULL OR DYNAMIC. A NULL ATTRIBUTE */ -/* MAY HAVE NO VALUE. A DYNAMIC ATTRIBUTE IS A NULL ATTRIBUTE THAT */ -/* DOES NOT HAVE TO BE A MEMBER OF EVERY TUPLE I A CERTAIN TABLE. */ -/* ---------------------------------------------------------------- */ -/** - * #defines moved into include/kernel/Interpreter.hpp - */ -#define ZINSERT_DELETE 0 -#define ZUPDATE_ALL 8 -/* ---------------------------------------------------------------- */ -/* THE MINIMUM SIZE OF AN 'EMPTY' TUPLE HEADER IN R-WORDS */ -/* ---------------------------------------------------------------- */ - /* THE TUPLE HEADER FIELD 'SIZE OF NULL ATTR. FIELD' SPECIFYES */ - /* THE SIZE OF THE TUPLE HEADER FIELD 'NULL ATTR. FIELD'. */ - /* THE TUPLE HEADER FIELD 'TYPE' SPECIFYES THE TYPE OF THE TUPLE */ - /* HEADER. */ - /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */ - /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */ - /* IT MAY ALSO CONTAIN SHORT ATTRIBUTES AND */ - /* POINTERS TO LONG ATTRIBUTE HEADERS. */ - /* TUPLE ATTRIBUTE INDEX CLUSTERS, ATTRIBUTE */ - /* CLUSTERS AND A DYNAMIC ATTRIBUTE HEADER. */ - - /* DATA STRUCTURE TYPES */ - /* WHEN ATTRIBUTE INFO IS SENT WITH A ATTRINFO-SIGNAL THE */ - /* VARIABLE TYPE IS SPECIFYED. THIS MUST BE DONE TO BE ABLE TO */ - /* NOW HOW MUCH DATA OF A ATTRIBUTE TO READ FROM ATTRINFO. */ - - /* WHEN A REQUEST CAN NOT BE EXECUTED BECAUSE OF A ERROR THE */ - /* ERROR MUST BE IDENTIFYED BY MEANS OF A ERROR CODE AND SENT TO */ - /* THE REQUESTER. */ -#define ZGET_OPREC_ERROR 804 // TUP_SEIZEREF - -#define ZEXIST_FRAG_ERROR 816 // Add fragment -#define ZFULL_FRAGRECORD_ERROR 817 // Add fragment -#define ZNO_FREE_PAGE_RANGE_ERROR 818 // Add fragment -#define ZNOFREE_FRAGOP_ERROR 830 // Add fragment -#define ZTOO_LARGE_TUPLE_ERROR 851 // Add fragment -#define ZNO_FREE_TAB_ENTRY_ERROR 852 // Add fragment -#define ZNO_PAGES_ALLOCATED_ERROR 881 // Add fragment - -#define ZGET_REALPID_ERROR 809 -#define ZNOT_IMPLEMENTED_ERROR 812 -#define ZSEIZE_ATTRINBUFREC_ERROR 805 -#define ZTOO_MUCH_ATTRINFO_ERROR 823 -#define ZMEM_NOTABDESCR_ERROR 826 -#define ZMEM_NOMEM_ERROR 827 -#define ZAI_INCONSISTENCY_ERROR 829 -#define ZNO_ILLEGAL_NULL_ATTR 839 -#define ZNOT_NULL_ATTR 840 -#define ZNO_INSTRUCTION_ERROR 871 -#define ZOUTSIDE_OF_PROGRAM_ERROR 876 -#define ZSTORED_PROC_ID_ERROR 877 -#define ZREGISTER_INIT_ERROR 878 -#define ZATTRIBUTE_ID_ERROR 879 -#define ZTRY_TO_READ_TOO_MUCH_ERROR 880 -#define ZTOTAL_LEN_ERROR 882 -#define ZATTR_INTERPRETER_ERROR 883 -#define ZSTACK_OVERFLOW_ERROR 884 -#define ZSTACK_UNDERFLOW_ERROR 885 -#define ZTOO_MANY_INSTRUCTIONS_ERROR 886 -#define ZTRY_TO_UPDATE_ERROR 888 -#define ZCALL_ERROR 890 -#define ZTEMPORARY_RESOURCE_FAILURE 891 -#define ZUNSUPPORTED_BRANCH 892 - -#define ZSTORED_SEIZE_ATTRINBUFREC_ERROR 873 // Part of Scan -#define ZSTORED_TOO_MUCH_ATTRINFO_ERROR 874 - -#define ZREAD_ONLY_CONSTRAINT_VIOLATION 893 -#define ZVAR_SIZED_NOT_SUPPORTED 894 -#define ZINCONSISTENT_NULL_ATTRIBUTE_COUNT 895 -#define ZTUPLE_CORRUPTED_ERROR 896 -#define ZTRY_UPDATE_PRIMARY_KEY 897 -#define ZMUST_BE_ABORTED_ERROR 898 -#define ZTUPLE_DELETED_ERROR 626 -#define ZINSERT_ERROR 630 - -#define ZINVALID_CHAR_FORMAT 744 -#define ZROWID_ALLOCATED 899 - - /* SOME WORD POSITIONS OF FIELDS IN SOME HEADERS */ - -#define ZFREE_COMMON 1 /* PAGE STATE, PAGE IN COMMON AREA */ -#define ZEMPTY_MM 2 /* PAGE STATE, PAGE IN EMPTY LIST */ -#define ZTH_MM_FREE 3 /* PAGE STATE, TUPLE HEADER PAGE WITH FREE AREA */ -#define ZTH_MM_FULL 4 /* PAGE STATE, TUPLE HEADER PAGE WHICH IS FULL */ - -#define ZTD_HEADER 0 /* HEADER POSITION */ -#define ZTD_DATASIZE 1 /* SIZE OF THE DATA IN THIS CHUNK */ -#define ZTD_SIZE 2 /* TOTAL SIZE OF TABLE DESCRIPTOR */ - - /* TRAILER POSITIONS FROM END OF TABLE DESCRIPTOR RECORD */ -#define ZTD_TR_SIZE 1 /* SIZE DESCRIPTOR POS FROM END+1 */ -#define ZTD_TR_TYPE 2 -#define ZTD_TRAILER_SIZE 2 /* TOTAL SIZE OF TABLE TRAILER */ -#define ZAD_SIZE 2 /* TOTAL SIZE OF ATTR DESCRIPTOR */ -#define ZAD_LOG_SIZE 1 /* TWO LOG OF TOTAL SIZE OF ATTR DESCRIPTOR */ - - /* CONSTANTS USED TO HANDLE TABLE DESCRIPTOR AS A FREELIST */ -#define ZTD_FL_HEADER 0 /* HEADER POSITION */ -#define ZTD_FL_SIZE 1 /* TOTAL SIZE OF THIS FREELIST ENTRY */ -#define ZTD_FL_PREV 2 /* PREVIOUS RECORD IN FREELIST */ -#define ZTD_FL_NEXT 3 /* NEXT RECORD IN FREELIST */ -#define ZTD_FREE_SIZE 16 /* SIZE NEEDED TO HOLD ONE FL ENTRY */ - - /* CONSTANTS USED IN LSB OF TABLE DESCRIPTOR HEADER DESCRIBING USAGE */ -#define ZTD_TYPE_FREE 0 /* RECORD LINKED INTO FREELIST */ -#define ZTD_TYPE_NORMAL 1 /* RECORD USED AS TABLE DESCRIPTOR */ - /* ATTRIBUTE OPERATION CONSTANTS */ -#define ZLEAF 1 -#define ZNON_LEAF 2 - - /* ATTRINBUFREC VARIABLE POSITIONS. */ -#define ZBUF_PREV 29 /* POSITION OF 'PREV'-VARIABLE (USED BY INTERPRETED EXEC) */ -#define ZBUF_DATA_LEN 30 /* POSITION OF 'DATA LENGTH'-VARIABLE. */ -#define ZBUF_NEXT 31 /* POSITION OF 'NEXT'-VARIABLE. */ -#define ZSAVE_BUF_NEXT 28 -#define ZSAVE_BUF_DATA_LEN 27 - - /* RETURN POINTS. */ - /* RESTART PHASES */ -#define ZSTARTPHASE1 1 -#define ZSTARTPHASE2 2 -#define ZSTARTPHASE3 3 -#define ZSTARTPHASE4 4 -#define ZSTARTPHASE6 6 - -#define ZADDFRAG 0 - -//------------------------------------------------------------ -// TUP_CONTINUEB codes -//------------------------------------------------------------ -#define ZINITIALISE_RECORDS 6 -#define ZREL_FRAG 7 -#define ZREPORT_MEMORY_USAGE 8 -#define ZBUILD_INDEX 9 -#define ZTUP_SCAN 10 -#define ZFREE_EXTENT 11 -#define ZUNMAP_PAGES 12 -#define ZFREE_VAR_PAGES 13 - -#define ZSCAN_PROCEDURE 0 -#define ZCOPY_PROCEDURE 2 -#define ZSTORED_PROCEDURE_DELETE 3 -#define ZSTORED_PROCEDURE_FREE 0xffff -#define ZMIN_PAGE_LIMIT_TUP_COMMITREQ 2 - -#define ZSKIP_TUX_TRIGGERS 0x1 // flag for TUP_ABORTREQ - -#endif - -class Dbtup: public SimulatedBlock { -friend class Suma; -public: -struct KeyReqStruct; -friend struct KeyReqStruct; // CC -typedef bool (Dbtup::* ReadFunction)(Uint32*, - KeyReqStruct*, - AttributeHeader*, - Uint32); -typedef bool (Dbtup::* UpdateFunction)(Uint32*, - KeyReqStruct*, - Uint32); -private: - - typedef Tup_fixsize_page Fix_page; - typedef Tup_varsize_page Var_page; - -public: - class Dblqh *c_lqh; - Tsman* c_tsman; - Lgman* c_lgman; - Page_cache_client m_pgman; - -// State values -enum ChangeMaskState { - DELETE_CHANGES = 0, - SET_ALL_MASK = 1, - USE_SAVED_CHANGE_MASK = 2, - RECALCULATE_CHANGE_MASK = 3 -}; - -enum TransState { - TRANS_IDLE = 0, - TRANS_STARTED = 1, - TRANS_WAIT_STORED_PROCEDURE_ATTR_INFO = 2, - TRANS_ERROR_WAIT_STORED_PROCREQ = 3, - TRANS_ERROR_WAIT_TUPKEYREQ = 4, - TRANS_TOO_MUCH_AI = 5, - TRANS_DISCONNECTED = 6 -}; - -enum TupleState { - TUPLE_PREPARED = 1, - TUPLE_ALREADY_ABORTED = 2, - TUPLE_TO_BE_COMMITTED = 3 -}; - -enum State { - NOT_INITIALIZED = 0, - IDLE = 17, - ACTIVE = 18, - SYSTEM_RESTART = 19, - DEFINED = 34, - NOT_DEFINED = 37, - NORMAL_PAGE = 40, - DEFINING = 65, - DROPPING = 68 -}; - -// Records -/* ************** ATTRIBUTE INFO BUFFER RECORD ****************** */ -/* THIS RECORD IS USED AS A BUFFER FOR INCOMING AND OUTGOING DATA */ -/* ************************************************************** */ -struct Attrbufrec { - Uint32 attrbuf[ZATTRBUF_SIZE]; -}; /* p2c: size = 128 bytes */ - -typedef Ptr AttrbufrecPtr; - - - -struct Fragoperrec { - Uint64 minRows; - Uint64 maxRows; - Uint32 nextFragoprec; - Uint32 lqhPtrFrag; - Uint32 fragidFrag; - Uint32 tableidFrag; - Uint32 fragPointer; - Uint32 attributeCount; - Uint32 charsetIndex; - Uint32 m_null_bits[2]; - Uint32 m_fix_attributes_size[2]; // In words - Uint32 m_var_attributes_size[2]; // In bytes - BlockReference lqhBlockrefFrag; - bool inUse; - bool definingFragment; -}; -typedef Ptr FragoperrecPtr; - - - typedef Tup_page Page; - typedef Ptr PagePtr; - - // Scan position - struct ScanPos { - enum Get { - Get_undef = 0, - Get_next_page, - Get_page, - Get_next_page_mm, - Get_page_mm, - Get_next_page_dd, - Get_page_dd, - Get_next_tuple, - Get_tuple, - Get_next_tuple_fs, - Get_tuple_fs - }; - Get m_get; // entry point in scanNext - Local_key m_key; // scan position pointer MM or DD - Page* m_page; // scanned MM or DD (cache) page - Local_key m_key_mm; // MM local key returned - Uint32 m_realpid_mm; // MM real page id - Uint32 m_extent_info_ptr_i; - }; - - // Scan Lock - struct ScanLock { - Uint32 m_accLockOp; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - typedef Ptr ScanLockPtr; - ArrayPool c_scanLockPool; - - // Tup scan, similar to Tux scan. Later some of this could - // be moved to common superclass. - struct ScanOp { - ScanOp() : - m_state(Undef), - m_bits(0), - m_userPtr(RNIL), - m_userRef(RNIL), - m_tableId(RNIL), - m_fragId(~(Uint32)0), - m_fragPtrI(RNIL), - m_transId1(0), - m_transId2(0), - m_savePointId(0), - m_accLockOp(RNIL) - {} - - enum State { - Undef = 0, - First = 1, // before first entry - Current = 2, // at current before locking - Blocked = 3, // at current waiting for ACC lock - Locked = 4, // at current and locked or no lock needed - Next = 5, // looking for next extry - Last = 6, // after last entry - Aborting = 7, // lock wait at scan close - Invalid = 9 // cannot return REF to LQH currently - }; - Uint16 m_state; - - enum Bits { - SCAN_DD = 0x01, // scan disk pages - SCAN_VS = 0x02, // page format is var size - SCAN_LCP = 0x04, // LCP mem page scan - SCAN_LOCK_SH = 0x10, // lock mode shared - SCAN_LOCK_EX = 0x20, // lock mode exclusive - SCAN_LOCK_WAIT = 0x40, // lock wait - // any lock mode - SCAN_LOCK = SCAN_LOCK_SH | SCAN_LOCK_EX, - SCAN_NR = 0x80 // Node recovery scan - }; - Uint16 m_bits; - - Uint32 m_userPtr; // scanptr.i in LQH - Uint32 m_userRef; - Uint32 m_tableId; - Uint32 m_fragId; - Uint32 m_fragPtrI; - Uint32 m_transId1; - Uint32 m_transId2; - union { - Uint32 m_savePointId; - Uint32 m_scanGCI; - }; - Uint32 m_endPage; - // lock waited for or obtained and not yet passed to LQH - Uint32 m_accLockOp; - - ScanPos m_scanPos; - - DLFifoList::Head m_accLockOps; - - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - typedef Ptr ScanOpPtr; - ArrayPool c_scanOpPool; - - void scanReply(Signal*, ScanOpPtr scanPtr); - void scanFirst(Signal*, ScanOpPtr scanPtr); - bool scanNext(Signal*, ScanOpPtr scanPtr); - void scanCont(Signal*, ScanOpPtr scanPtr); - void disk_page_tup_scan_callback(Signal*, Uint32 scanPtrI, Uint32 page_i); - void scanClose(Signal*, ScanOpPtr scanPtr); - void addAccLockOp(ScanOp& scan, Uint32 accLockOp); - void removeAccLockOp(ScanOp& scan, Uint32 accLockOp); - void releaseScanOp(ScanOpPtr& scanPtr); - - // for md5 of key (could maybe reuse existing temp buffer) - Uint64 c_dataBuffer[ZWORDS_ON_PAGE/2 + 1]; - - struct Page_request - { - Local_key m_key; - Uint32 m_frag_ptr_i; - Uint32 m_extent_info_ptr; - Uint16 m_estimated_free_space; // in bytes/records - Uint16 m_list_index; // in Disk_alloc_info.m_page_requests - Uint16 m_ref_count; // Waiters for page - Uint16 m_uncommitted_used_space; - Uint32 nextList; - Uint32 prevList; - Uint32 m_magic; - }; // 32 bytes - - typedef RecordPool Page_request_pool; - typedef DLFifoListImpl Page_request_list; - typedef LocalDLFifoListImpl Local_page_request_list; - - STATIC_CONST( EXTENT_SEARCH_MATRIX_COLS = 4 ); // Guarantee size - STATIC_CONST( EXTENT_SEARCH_MATRIX_ROWS = 5 ); // Total size - STATIC_CONST( EXTENT_SEARCH_MATRIX_SIZE = 20 ); - - struct Extent_list_t - { - Uint32 nextList; - }; - - struct Extent_info : public Extent_list_t - { - Uint32 m_magic; - Uint32 m_first_page_no; - Local_key m_key; - Uint32 m_free_space; - Uint32 m_free_matrix_pos; - Uint16 m_free_page_count[EXTENT_SEARCH_MATRIX_COLS]; - union { - Uint32 nextList; - Uint32 nextPool; - }; - Uint32 prevList; - Uint32 nextHash, prevHash; - - Uint32 hashValue() const { - return (m_key.m_file_no << 16) ^ m_key.m_page_idx; - } - - Extent_info() {} - bool equal(const Extent_info & rec) const { - return m_key.m_file_no == rec.m_key.m_file_no && - m_key.m_page_idx == rec.m_key.m_page_idx; - } - }; // 40 bytes - - typedef RecordPool Extent_info_pool; - typedef DLListImpl Extent_info_list; - typedef LocalDLListImpl Local_extent_info_list; - typedef DLHashTableImpl Extent_info_hash; - typedef SLListImpl Fragment_extent_list; - typedef LocalSLListImpl Local_fragment_extent_list; - struct Tablerec; - struct Disk_alloc_info - { - Disk_alloc_info() {} - Disk_alloc_info(const Tablerec* tabPtrP, - Uint32 extent_size_in_pages); - Uint32 m_extent_size; - - /** - * Disk allocation - * - * 1) Allocate space on pages that already are dirty - * (4 free lists for different requests) - * 2) Allocate space on pages waiting to maped that will be dirty - * (4 free lists for different requests) - * 3) Check if "current" extent can accommodate request - * If so, allocate page from there - * Else put "current" into free matrix - * 4) Search free matrix for extent with greatest amount of free space - * while still accommodating current request - * (20 free lists for different requests) - */ - - /** - * Free list of pages in different size - * that are dirty - */ - DLList::Head m_dirty_pages[MAX_FREE_LIST]; // In real page id's - - /** - * Requests (for update) that have sufficient space left after request - * these are currently being "mapped" - */ - Page_request_list::Head m_page_requests[MAX_FREE_LIST]; - - DLList::Head m_unmap_pages; - - /** - * Current extent - */ - Uint32 m_curr_extent_info_ptr_i; - - /** - * - */ - STATIC_CONST( SZ = EXTENT_SEARCH_MATRIX_SIZE ); - Extent_info_list::Head m_free_extents[SZ]; - Uint32 m_total_extent_free_space_thresholds[EXTENT_SEARCH_MATRIX_ROWS]; - Uint32 m_page_free_bits_map[EXTENT_SEARCH_MATRIX_COLS]; - - Uint32 find_extent(Uint32 sz) const; - Uint32 calc_extent_pos(const Extent_info*) const; - - /** - * Compute minimum free space on page given bits - */ - Uint32 calc_page_free_space(Uint32 bits) const { - return m_page_free_bits_map[bits]; - } - - /** - * Compute page free bits, given free space - */ - Uint32 calc_page_free_bits(Uint32 free) const { - for(Uint32 i = 0; i= m_page_free_bits_map[i]) - return i; - return EXTENT_SEARCH_MATRIX_COLS - 1; - } - - Fragment_extent_list::Head m_extent_list; - }; - - void dump_disk_alloc(Disk_alloc_info&); - -struct Fragrecord { - Uint32 nextStartRange; - Uint32 currentPageRange; - Uint32 rootPageRange; - Uint32 noOfPages; - Uint32 noOfVarPages; - Uint32 noOfPagesToGrow; - - DLList::Head emptyPrimPage; // allocated pages (not init) - DLFifoList::Head thFreeFirst; // pages with atleast 1 free record - SLList::Head m_empty_pages; // Empty pages not in logical/physical map - - Uint32 m_lcp_scan_op; - Uint32 m_lcp_keep_list; - - State fragStatus; - Uint32 fragTableId; - Uint32 fragmentId; - Uint32 nextfreefrag; - DLList::Head free_var_page_array[MAX_FREE_LIST]; - - DLList::Head m_scanList; - - enum { UC_LCP = 1, UC_CREATE = 2, UC_SET_LCP = 3 }; - Uint32 m_restore_lcp_id; - Uint32 m_undo_complete; - Uint32 m_tablespace_id; - Uint32 m_logfile_group_id; - Disk_alloc_info m_disk_alloc_info; - Uint32 m_var_page_chunks; -}; -typedef Ptr FragrecordPtr; - - -struct Operationrec { - /* - * To handle Attrinfo signals and buffer them up we need to - * a simple list with first and last and we also need to keep track - * of how much we received for security check. - * Will most likely disappear with introduction of long signals. - * These variables are used before TUPKEYREQ is received and not - * thereafter and is disposed with after calling copyAttrinfo - * which is called before putting the operation into its lists. - * Thus we can use union declarations for these variables. - */ - - /* - * Used by scans to find the Attrinfo buffers. - * This is only until returning from copyAttrinfo and - * can thus reuse the same memory as needed by the - * active operation list variables. - */ - - /* - * Doubly linked list with anchor on tuple. - * This is to handle multiple updates on the same tuple - * by the same transaction. - */ - union { - Uint32 prevActiveOp; - Uint32 storedProcedureId; //Used until copyAttrinfo - }; - union { - Uint32 nextActiveOp; - Uint32 currentAttrinbufLen; //Used until copyAttrinfo - }; - - Operationrec() {} - bool is_first_operation() const { return prevActiveOp == RNIL;} - bool is_last_operation() const { return nextActiveOp == RNIL;} - - Uint32 m_undo_buffer_space; // In words - union { - Uint32 firstAttrinbufrec; //Used until copyAttrinfo - }; - Uint32 m_any_value; - union { - Uint32 lastAttrinbufrec; //Used until copyAttrinfo - Uint32 nextPool; - }; - Uint32 attrinbufLen; //only used during STORED_PROCDEF phase - Uint32 storedProcPtr; //only used during STORED_PROCDEF phase - - /* - * From fragment i-value we can find fragment and table record - */ - Uint32 fragmentPtr; - - /* - * We need references to both the original tuple and the copy tuple. - * We keep the page's real i-value and its index and from there we - * can find out about the fragment page id and the page offset. - */ - Local_key m_tuple_location; - Local_key m_copy_tuple_location; - - /* - * We keep the record linked to the operation record in LQH. - * This is needed due to writing of REDO log must be performed - * in correct order, which is the same order as the writes - * occurred. LQH can receive the records in different order. - */ - Uint32 userpointer; - - /* - * When responding to queries in the same transaction they will see - * a result from the save point id the query was started. Again - * functionality for multi-updates of the same record in one - * transaction. - */ - union { - Uint32 savepointId; - Uint32 m_commit_disk_callback_page; - }; - - /* - * We use 64 bits to save change mask for the most common cases. - */ - Uint32 saved_change_mask[2]; - - /* - * State variables on connection. - * State variable on tuple after multi-updates - * Is operation undo logged or not - * Is operation in fragment list - * Is operation in multi-update list - * Operation type (READ, UPDATE, etc) - * Is record primary replica - * Is delete or insert performed - */ - struct OpBitFields { - unsigned int trans_state : 3; - unsigned int tuple_state : 2; - unsigned int in_active_list : 1; - - unsigned int op_type : 3; - unsigned int delete_insert_flag : 1; - unsigned int primary_replica : 1; - unsigned int change_mask_state : 2; - unsigned int m_disk_preallocated : 1; - unsigned int m_load_diskpage_on_commit : 1; - unsigned int m_wait_log_buffer : 1; - }; - union { - OpBitFields op_struct; - Uint16 op_bit_fields; - }; - - /* - * TUX needs to know the tuple version of the tuple since it - * keeps an entry for both the committed and all versions in - * a transaction currently. So each update will create a new - * version even if in the same transaction. - */ - Uint16 tupVersion; -}; -typedef Ptr OperationrecPtr; - - /* ****************************** PAGE RANGE RECORD ************************** */ - /* PAGE RANGES AND BASE PAGE ID. EACH RANGE HAS A CORRESPONDING BASE PAGE ID */ - /* THAT IS USED TO CALCULATE REAL PAGE ID FROM A FRAGMENT PAGE ID AND A TABLE */ - /* REFERENCE. */ - /* THE PAGE RANGES ARE ORGANISED IN A B-TREE FASHION WHERE THE VARIABLE TYPE */ - /* SPECIFIES IF A LEAF NODE HAS BEEN REACHED. IF A LEAF NODE HAS BEEN REACHED */ - /* THEN BASE_PAGE_ID IS THE BASE_PAGE_ID OF THE SET OF PAGES THAT WAS */ - /* ALLOCATED IN THAT RANGE. OTHERWISE BASE_PAGE_ID IS THE POINTER TO THE NEXT */ - /* PAGE_RANGE RECORD. */ - /* *************************************************************************** */ -struct PageRange { - Uint32 startRange[4]; /* START OF RANGE */ - Uint32 endRange[4]; /* END OF THIS RANGE */ - Uint32 basePageId[4]; /* BASE PAGE ID. */ -/*---- VARIABLE BASE_PAGE_ID2 (4) 8 DS NEEDED WHEN SUPPORTING 40 BIT PAGE ID -------*/ - Uint8 type[4]; /* TYPE OF BASE PAGE ID */ - Uint32 nextFree; /* NEXT FREE PAGE RANGE RECORD */ - Uint32 parentPtr; /* THE PARENT TO THE PAGE RANGE REC IN THE B-TREE */ - Uint8 currentIndexPos; -}; -typedef Ptr PageRangePtr; - - - /* ************* TRIGGER DATA ************* */ - /* THIS RECORD FORMS LISTS OF ACTIVE */ - /* TRIGGERS FOR EACH TABLE. */ - /* THE RECORDS ARE MANAGED BY A TRIGGER */ - /* POOL wHERE A TRIGGER RECORD IS SEIZED */ - /* WHEN A TRIGGER IS ACTIVATED AND RELEASED */ - /* WHEN THE TRIGGER IS DEACTIVATED. */ - /* **************************************** */ -struct TupTriggerData { - TupTriggerData() {} - - /** - * Trigger id, used by DICT/TRIX to identify the trigger - * - * trigger Ids are unique per block for SUBSCRIPTION triggers. - * This is so that BACKUP can use TUP triggers directly and delete them - * properly. - */ - Uint32 triggerId; - - /** - * Index id is needed for ordered index. - */ - Uint32 indexId; - - /** - * Trigger type etc, defines what the trigger is used for - */ - TriggerType::Value triggerType; - TriggerActionTime::Value triggerActionTime; - TriggerEvent::Value triggerEvent; - /** - * Receiver block - */ - Uint32 m_receiverBlock; - - /** - * Monitor all replicas, i.e. trigger will fire on all nodes where tuples - * are stored - */ - bool monitorReplicas; - - /** - * Monitor all attributes, the trigger monitors all changes to attributes - * in the table - */ - bool monitorAllAttributes; - - /** - * Send only changed attributes at trigger firing time. - */ - bool sendOnlyChangedAttributes; - - /** - * Send also before values at trigger firing time. - */ - bool sendBeforeValues; - - /** - * Attribute mask, defines what attributes are to be monitored - * Can be seen as a compact representation of SQL column name list - */ - Bitmask attributeMask; - - /** - * Next ptr (used in pool/list) - */ - union { - Uint32 nextPool; - Uint32 nextList; - }; - - /** - * Prev pointer (used in list) - */ - Uint32 prevList; - - inline void print(NdbOut & s) const { s << "[TriggerData = " << triggerId << "]"; }; -}; - -typedef Ptr TriggerPtr; - -/** - * Pool of trigger data record - */ -ArrayPool c_triggerPool; - - /* ************ TABLE RECORD ************ */ - /* THIS RECORD FORMS A LIST OF TABLE */ - /* REFERENCE INFORMATION. ONE RECORD */ - /* PER TABLE REFERENCE. */ - /* ************************************** */ - STATIC_CONST( MM = 0 ); - STATIC_CONST( DD = 1 ); - - struct Tablerec { - Tablerec(ArrayPool & triggerPool) : - afterInsertTriggers(triggerPool), - afterDeleteTriggers(triggerPool), - afterUpdateTriggers(triggerPool), - subscriptionInsertTriggers(triggerPool), - subscriptionDeleteTriggers(triggerPool), - subscriptionUpdateTriggers(triggerPool), - constraintUpdateTriggers(triggerPool), - tuxCustomTriggers(triggerPool) - {} - - Bitmask notNullAttributeMask; - Bitmask blobAttributeMask; - - ReadFunction* readFunctionArray; - UpdateFunction* updateFunctionArray; - CHARSET_INFO** charsetArray; - - Uint32 readKeyArray; - Uint32 tabDescriptor; - Uint32 m_real_order_descriptor; - - enum Bits - { - TR_Checksum = 0x1, // Need to be 1 - TR_RowGCI = 0x2, - TR_ForceVarPart = 0x4 - }; - Uint16 m_bits; - Uint16 total_rec_size; // Max total size for entire tuple in words - - /** - * Aggregates - */ - Uint16 m_no_of_attributes; - Uint16 m_no_of_disk_attributes; - Uint16 noOfKeyAttr; - Uint16 noOfCharsets; - - bool need_expand() const { - return m_no_of_attributes > m_attributes[MM].m_no_of_fixsize; - } - - bool need_expand(bool disk) const { - return m_attributes[MM].m_no_of_varsize > 0 || - (disk && m_no_of_disk_attributes > 0); - } - - bool need_shrink() const { - return - m_attributes[MM].m_no_of_varsize > 0 || - m_attributes[DD].m_no_of_varsize > 0; - } - - bool need_shrink(bool disk) const { - return - m_attributes[MM].m_no_of_varsize > 0 || - (disk && m_attributes[DD].m_no_of_varsize > 0); - } - - /** - * Descriptors for MM and DD part - */ - struct Tuple_offsets { - Uint8 m_null_words; - Uint8 m_null_offset; - Uint16 m_disk_ref_offset; // In words relative m_data - Uint16 m_fix_header_size; // For fix size tuples= total rec size(part) - Uint16 m_max_var_offset; // In bytes relative m_var_data.m_data_ptr - } m_offsets[2]; - - Uint32 get_check_offset(Uint32 mm) const { - return m_offsets[mm].m_fix_header_size; - } - - struct { - Uint16 m_no_of_fixsize; - Uint16 m_no_of_varsize; - } m_attributes[2]; - - // Lists of trigger data for active triggers - DLList afterInsertTriggers; - DLList afterDeleteTriggers; - DLList afterUpdateTriggers; - DLList subscriptionInsertTriggers; - DLList subscriptionDeleteTriggers; - DLList subscriptionUpdateTriggers; - DLList constraintUpdateTriggers; - - // List of ordered indexes - DLList tuxCustomTriggers; - - Uint32 fragid[MAX_FRAG_PER_NODE]; - Uint32 fragrec[MAX_FRAG_PER_NODE]; - - struct { - Uint32 tabUserPtr; - Uint32 tabUserRef; - Uint32 m_lcpno; - Uint32 m_fragPtrI; - } m_dropTable; - State tableStatus; - }; - - struct Disk_undo - { - enum - { - UNDO_ALLOC = File_formats::Undofile::UNDO_TUP_ALLOC - ,UNDO_UPDATE = File_formats::Undofile::UNDO_TUP_UPDATE - ,UNDO_FREE = File_formats::Undofile::UNDO_TUP_FREE - ,UNDO_CREATE = File_formats::Undofile::UNDO_TUP_CREATE - ,UNDO_DROP = File_formats::Undofile::UNDO_TUP_DROP - ,UNDO_ALLOC_EXTENT = File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT - ,UNDO_FREE_EXTENT = File_formats::Undofile::UNDO_TUP_FREE_EXTENT - }; - - struct Alloc - { - Uint32 m_file_no_page_idx; // 16 bit file_no, 16 bit page_idx - Uint32 m_page_no; - Uint32 m_type_length; // 16 bit type, 16 bit length - }; - - struct Update - { - Uint32 m_file_no_page_idx; // 16 bit file_no, 16 bit page_idx - Uint32 m_page_no; - Uint32 m_gci; - Uint32 m_data[1]; - Uint32 m_type_length; // 16 bit type, 16 bit length - }; - - struct Free - { - Uint32 m_file_no_page_idx; // 16 bit file_no, 16 bit page_idx - Uint32 m_page_no; - Uint32 m_gci; - Uint32 m_data[1]; - Uint32 m_type_length; // 16 bit type, 16 bit length - }; - - struct Create - { - Uint32 m_table; - Uint32 m_type_length; // 16 bit type, 16 bit length - }; - - struct Drop - { - Uint32 m_table; - Uint32 m_type_length; // 16 bit type, 16 bit length - }; - - struct AllocExtent - { - Uint32 m_table; - Uint32 m_fragment; - Uint32 m_page_no; - Uint32 m_file_no; - Uint32 m_type_length; - }; - - struct FreeExtent - { - Uint32 m_table; - Uint32 m_fragment; - Uint32 m_page_no; - Uint32 m_file_no; - Uint32 m_type_length; - }; - }; - - Extent_info_pool c_extent_pool; - Extent_info_hash c_extent_hash; - Page_request_pool c_page_request_pool; - - typedef Ptr TablerecPtr; - - struct storedProc { - Uint32 storedLinkFirst; - Uint32 storedLinkLast; - Uint32 storedCounter; - Uint32 nextPool; - Uint16 storedCode; - Uint16 storedProcLength; -}; - -typedef Ptr StoredProcPtr; - -ArrayPool c_storedProcPool; - -/* **************************** TABLE_DESCRIPTOR RECORD ******************************** */ -/* THIS VARIABLE IS USED TO STORE TABLE DESCRIPTIONS. A TABLE DESCRIPTION IS STORED AS A */ -/* CONTIGUOS ARRAY IN THIS VARIABLE. WHEN A NEW TABLE IS ADDED A CHUNK IS ALLOCATED IN */ -/* THIS RECORD. WHEN ATTRIBUTES ARE ADDED TO THE TABLE, A NEW CHUNK OF PROPER SIZE IS */ -/* ALLOCATED AND ALL DATA IS COPIED TO THIS NEW CHUNK AND THEN THE OLD CHUNK IS PUT IN */ -/* THE FREE LIST. EACH TABLE IS DESCRIBED BY A NUMBER OF TABLE DESCRIPTIVE ATTRIBUTES */ -/* AND A NUMBER OF ATTRIBUTE DESCRIPTORS AS SHOWN IN FIGURE BELOW */ -/* */ -/* WHEN ALLOCATING A TABLE DESCRIPTOR THE SIZE IS ALWAYS A MULTIPLE OF 16 WORDS. */ -/* */ -/* ---------------------------------------------- */ -/* | TRAILER USED FOR ALLOC/DEALLOC | */ -/* ---------------------------------------------- */ -/* | TABLE DESCRIPTIVE ATTRIBUTES | */ -/* ---------------------------------------------- */ -/* | ATTRIBUTE DESCRIPTION 1 | */ -/* ---------------------------------------------- */ -/* | ATTRIBUTE DESCRIPTION 2 | */ -/* ---------------------------------------------- */ -/* | | */ -/* | | */ -/* | | */ -/* ---------------------------------------------- */ -/* | ATTRIBUTE DESCRIPTION N | */ -/* ---------------------------------------------- */ -/* */ -/* THE TABLE DESCRIPTIVE ATTRIBUTES CONTAINS THE FOLLOWING ATTRIBUTES: */ -/* */ -/* ---------------------------------------------- */ -/* | HEADER (TYPE OF INFO) | */ -/* ---------------------------------------------- */ -/* | SIZE OF WHOLE CHUNK (INCL. TRAILER) | */ -/* ---------------------------------------------- */ -/* | TABLE IDENTITY | */ -/* ---------------------------------------------- */ -/* | FRAGMENT IDENTITY | */ -/* ---------------------------------------------- */ -/* | NUMBER OF ATTRIBUTES | */ -/* ---------------------------------------------- */ -/* | SIZE OF FIXED ATTRIBUTES | */ -/* ---------------------------------------------- */ -/* | NUMBER OF NULL FIELDS | */ -/* ---------------------------------------------- */ -/* | NOT USED | */ -/* ---------------------------------------------- */ -/* */ -/* THESE ATTRIBUTES ARE ALL ONE R-VARIABLE IN THE RECORD. */ -/* NORMALLY ONLY ONE TABLE DESCRIPTOR IS USED. DURING SCHEMA CHANGES THERE COULD */ -/* HOWEVER EXIST MORE THAN ONE TABLE DESCRIPTION SINCE THE SCHEMA CHANGE OF VARIOUS */ -/* FRAGMENTS ARE NOT SYNCHRONISED. THIS MEANS THAT ALTHOUGH THE SCHEMA HAS CHANGED */ -/* IN ALL FRAGMENTS, BUT THE FRAGMENTS HAVE NOT REMOVED THE ATTRIBUTES IN THE SAME */ -/* TIME-FRAME. THEREBY SOME ATTRIBUTE INFORMATION MIGHT DIFFER BETWEEN FRAGMENTS. */ -/* EXAMPLES OF ATTRIBUTES THAT MIGHT DIFFER ARE SIZE OF FIXED ATTRIBUTES, NUMBER OF */ -/* ATTRIBUTES, FIELD START WORD, START BIT. */ -/* */ -/* AN ATTRIBUTE DESCRIPTION CONTAINS THE FOLLOWING ATTRIBUTES: */ -/* */ -/* ---------------------------------------------- */ -/* | Field Type, 4 bits (LSB Bits) | */ -/* ---------------------------------------------- */ -/* | Attribute Size, 4 bits | */ -/* ---------------------------------------------- */ -/* | NULL indicator 1 bit | */ -/* ---------------------------------------------- */ -/* | Indicator if TUP stores attr. 1 bit | */ -/* ---------------------------------------------- */ -/* | Not used 6 bits | */ -/* ---------------------------------------------- */ -/* | No. of elements in fixed array 16 bits | */ -/* ---------------------------------------------- */ -/* ---------------------------------------------- */ -/* | Field Start Word, 21 bits (LSB Bits) | */ -/* ---------------------------------------------- */ -/* | NULL Bit, 11 bits | */ -/* ---------------------------------------------- */ -/* */ -/* THE ATTRIBUTE SIZE CAN BE 1,2,4,8,16,32,64 AND 128 BITS. */ -/* */ -/* THE UNUSED PARTS OF THE RECORDS ARE PUT IN A LINKED LIST OF FREE PARTS. EACH OF */ -/* THOSE FREE PARTS HAVE THREE RECORDS ASSIGNED AS SHOWN IN THIS STRUCTURE */ -/* ALL FREE PARTS ARE SET INTO A CHUNK LIST WHERE EACH CHUNK IS AT LEAST 16 WORDS */ -/* */ -/* ---------------------------------------------- */ -/* | HEADER = RNIL | */ -/* ---------------------------------------------- */ -/* | SIZE OF FREE AREA | */ -/* ---------------------------------------------- */ -/* | POINTER TO PREVIOUS FREE AREA | */ -/* ---------------------------------------------- */ -/* | POINTER TO NEXT FREE AREA | */ -/* ---------------------------------------------- */ -/* */ -/* IF THE POINTER TO THE NEXT AREA IS RNIL THEN THIS IS THE LAST FREE AREA. */ -/* */ -/*****************************************************************************************/ -struct TableDescriptor { - Uint32 tabDescr; -}; -typedef Ptr TableDescriptorPtr; - -struct HostBuffer { - bool inPackedList; - Uint32 packetLenTA; - Uint32 noOfPacketsTA; - Uint32 packetBufferTA[30]; -}; -typedef Ptr HostBufferPtr; - - /* - * Build index operation record. - */ - struct BuildIndexRec { - // request cannot use signal class due to extra members - Uint32 m_request[BuildIndxReq::SignalLength]; - Uint8 m_build_vs; // varsize pages - Uint32 m_indexId; // the index - Uint32 m_fragNo; // fragment number under Tablerec - Uint32 m_pageId; // logical fragment page id - Uint32 m_tupleNo; // tuple number on page - Uint32 m_buildRef; // Where to send tuples - BuildIndxRef::ErrorCode m_errorCode; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - typedef Ptr BuildIndexPtr; - ArrayPool c_buildIndexPool; - DLList c_buildIndexList; - Uint32 c_noOfBuildIndexRec; - - /** - * Reference to variable part when a tuple is chained - */ - struct Var_part_ref - { -#ifdef NDB_32BIT_VAR_REF - /* - In versions prior to ndb 6.1.6, 6.2.1 and mysql 5.1.17 - Running this code limits DataMemory to 16G, also online - upgrade not possible between versions - */ - Uint32 m_ref; - STATIC_CONST( SZ32 = 1 ); - - void copyout(Local_key* dst) const { - dst->m_page_no = m_ref >> MAX_TUPLES_BITS; - dst->m_page_idx = m_ref & MAX_TUPLES_PER_PAGE; - } - - void assign(const Local_key* src) { - m_ref = (src->m_page_no << MAX_TUPLES_BITS) | src->m_page_idx; - } -#else - Uint32 m_page_no; - Uint32 m_page_idx; - STATIC_CONST( SZ32 = 2 ); - - void copyout(Local_key* dst) const { - dst->m_page_no = m_page_no; - dst->m_page_idx = m_page_idx; - } - - void assign(const Local_key* src) { - m_page_no = src->m_page_no; - m_page_idx = src->m_page_idx; - } -#endif - }; - - struct Disk_part_ref - { - STATIC_CONST( SZ32 = 2 ); - }; - - struct Tuple_header - { - union { - /** - * List of prepared operations for this tuple. - * Points to most recent/last operation, ie. to walk the list must follow - * regOperPtr->prevActiveOp links. - */ - Uint32 m_operation_ptr_i; // OperationPtrI - Uint32 m_base_record_ref; // For disk tuple, ref to MM tuple - }; - Uint32 m_header_bits; // Header word - union { - Uint32 m_checksum; - Uint32 m_data[1]; - Uint32 m_null_bits[1]; - }; - - STATIC_CONST( HeaderSize = 2 ); - - /** - * header bits - */ - STATIC_CONST( TUP_VERSION_MASK = 0xFFFF ); - STATIC_CONST( CHAINED_ROW = 0x00010000 ); // Is var part on different page - STATIC_CONST( DISK_PART = 0x00020000 ); // Is there a disk part - STATIC_CONST( DISK_ALLOC = 0x00040000 ); // Is disk part allocated - STATIC_CONST( DISK_INLINE = 0x00080000 ); // Is disk inline - STATIC_CONST( ALLOC = 0x00100000 ); // Is record allocated now - STATIC_CONST( MM_SHRINK = 0x00200000 ); // Has MM part shrunk - STATIC_CONST( MM_GROWN = 0x00400000 ); // Has MM part grown - STATIC_CONST( FREED = 0x00800000 ); // Is freed - STATIC_CONST( LCP_SKIP = 0x01000000 ); // Should not be returned in LCP - STATIC_CONST( LCP_KEEP = 0x02000000 ); // Should be returned in LCP - STATIC_CONST( FREE = 0x02800000 ); // Is free - - Tuple_header() {} - Uint32 get_tuple_version() const { - return m_header_bits & TUP_VERSION_MASK; - } - void set_tuple_version(Uint32 version) { - m_header_bits= - (m_header_bits & ~(Uint32)TUP_VERSION_MASK) | - (version & TUP_VERSION_MASK); - } - - Uint32* get_null_bits(const Tablerec* tabPtrP) { - return m_null_bits+tabPtrP->m_offsets[MM].m_null_offset; - } - - Uint32* get_null_bits(const Tablerec* tabPtrP, Uint32 mm) { - return m_null_bits+tabPtrP->m_offsets[mm].m_null_offset; - } - - Var_part_ref* get_var_part_ref_ptr(const Tablerec* tabPtrP) { - return (Var_part_ref*)(get_disk_ref_ptr(tabPtrP) + Disk_part_ref::SZ32); - } - - const Var_part_ref* get_var_part_ref_ptr(const Tablerec* tabPtrP) const { - return (Var_part_ref*)(get_disk_ref_ptr(tabPtrP) + Disk_part_ref::SZ32); - } - - Uint32* get_end_of_fix_part_ptr(const Tablerec* tabPtrP) { - return m_data + tabPtrP->m_offsets[MM].m_fix_header_size - - Tuple_header::HeaderSize; - } - - const Uint32* get_end_of_fix_part_ptr(const Tablerec* tabPtrP) const { - return m_data + tabPtrP->m_offsets[MM].m_fix_header_size - - Tuple_header::HeaderSize; - } - - Uint32* get_disk_ref_ptr(const Tablerec* tabPtrP) { - return m_data + tabPtrP->m_offsets[MM].m_disk_ref_offset; - } - - const Uint32* get_disk_ref_ptr(const Tablerec* tabPtrP) const { - return m_data + tabPtrP->m_offsets[MM].m_disk_ref_offset; - } - - Uint32 *get_mm_gci(const Tablerec* tabPtrP){ - assert(tabPtrP->m_bits & Tablerec::TR_RowGCI); - return m_data + (tabPtrP->m_bits & Tablerec::TR_Checksum); - } - - Uint32 *get_dd_gci(const Tablerec* tabPtrP, Uint32 mm){ - assert(tabPtrP->m_bits & Tablerec::TR_RowGCI); - return m_data; - } - }; - -struct KeyReqStruct { -/** - * These variables are used as temporary storage during execution of the - * TUPKEYREQ signal. - * The first set of variables defines a number of variables needed for - * the fix part of the tuple. - * - * The second part defines a number of commonly used meta data variables. - * - * The third set of variables defines a set of variables needed for the - * variable part. - * - * The fourth part is variables needed only for updates and inserts. - * - * The fifth part is a long array of real lengths which is is put last - * for cache memory reasons. This is part of the variable part and - * contains the real allocated lengths whereas the tuple contains - * the length of attribute stored. - */ - Tuple_header *m_tuple_ptr; - - Uint32 check_offset[2]; - - TableDescriptor *attr_descr; - Uint32 max_read; - Uint32 out_buf_index; - Uint32 in_buf_index; - Uint32 in_buf_len; - Uint32 attr_descriptor; - bool xfrm_flag; - - struct Var_data { - char *m_data_ptr; - Uint16 *m_offset_array_ptr; - Uint16 m_var_len_offset; - Uint16 m_max_var_offset; - } m_var_data[2]; - - Tuple_header *m_disk_ptr; - PagePtr m_page_ptr; - PagePtr m_varpart_page_ptr; // could be same as m_page_ptr_p - PagePtr m_disk_page_ptr; // - Local_key m_row_id; - - bool dirty_op; - bool interpreted_exec; - bool last_row; - bool m_use_rowid; - - Signal* signal; - Uint32 no_fired_triggers; - Uint32 frag_page_id; - Uint32 hash_value; - Uint32 gci; - Uint32 log_size; - Uint32 read_length; - Uint32 attrinfo_len; - Uint32 tc_operation_ptr; - Uint32 trans_id1; - Uint32 trans_id2; - Uint32 TC_index; - // next 2 apply only to attrids >= 64 (zero otherwise) - Uint32 max_attr_id_updated; - Uint32 no_changed_attrs; - BlockReference TC_ref; - BlockReference rec_blockref; - bool change_mask_calculated; - /* - * A bit mask where a bit set means that the update or insert - * was updating this record. - */ - Bitmask changeMask; - Uint16 var_pos_array[2*MAX_ATTRIBUTES_IN_TABLE + 1]; - OperationrecPtr prevOpPtr; -}; - - friend class Undo_buffer; - Undo_buffer c_undo_buffer; - -/* - No longer used: - Implemented by shift instructions in subroutines instead - -struct TupHeadInfo { - struct BitPart { - unsigned int disk_indicator : 1; - unsigned int var_part_loc_ind : 1; - unsigned int initialised : 1; - unsigned int not_used_yet : 5; - unsigned int no_var_sized : 8; - unsigned int tuple_version : 16; - }; - union { - Uint32 all; - BitPart bit_part; - }; -}; -*/ - -// updateAttributes module - Uint32 terrorCode; - -public: - Dbtup(Block_context&, Pgman*); - virtual ~Dbtup(); - - /* - * TUX uses logical tuple address when talking to ACC and LQH. - */ - void tuxGetTupAddr(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32& tupAddr); - - /* - * TUX index in TUP has single Uint32 array attribute which stores an - * index node. TUX reads and writes the node directly via pointer. - */ - int tuxAllocNode(Signal* signal, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node); - void tuxFreeNode(Signal* signal, Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node); - void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node); - - /* - * TUX reads primary table attributes for index keys. Tuple is - * specified by location of original tuple and version number. Input - * is attribute ids in AttributeHeader format. Output is attribute - * data with headers. Uses readAttributes with xfrm option set. - * Returns number of words or negative (-terrorCode) on error. - */ - int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut); - - /* - * TUX reads primary key without headers into an array of words. Used - * for md5 summing and when returning keyinfo. Returns number of - * words or negative (-terrorCode) on error. - */ - int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag); - - /* - * ACC reads primary key without headers into an array of words. At - * this point in ACC deconstruction, ACC still uses logical references - * to fragment and tuple. - */ - int accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag); - - /* - * TUX checks if tuple is visible to scan. - */ - bool tuxQueryTh(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, bool dirty, Uint32 savepointId); - - int load_diskpage(Signal*, Uint32 opRec, Uint32 fragPtrI, - Uint32 local_key, Uint32 flags); - - int load_diskpage_scan(Signal*, Uint32 opRec, Uint32 fragPtrI, - Uint32 local_key, Uint32 flags); - - int alloc_page(Tablerec*, Fragrecord*, PagePtr*,Uint32 page_no); - - void start_restore_lcp(Uint32 tableId, Uint32 fragmentId); - void complete_restore_lcp(Uint32 tableId, Uint32 fragmentId); - - int nr_read_pk(Uint32 fragPtr, const Local_key*, Uint32* dataOut, bool©); - int nr_update_gci(Uint32 fragPtr, const Local_key*, Uint32 gci); - int nr_delete(Signal*, Uint32, Uint32 fragPtr, const Local_key*, Uint32 gci); - - void nr_delete_page_callback(Signal*, Uint32 op, Uint32 page); - void nr_delete_log_buffer_callback(Signal*, Uint32 op, Uint32 page); - - bool get_frag_info(Uint32 tableId, Uint32 fragId, Uint32* maxPage); -private: - BLOCK_DEFINES(Dbtup); - - // Transit signals - void execDEBUG_SIG(Signal* signal); - void execCONTINUEB(Signal* signal); - - // Received signals - void execLCP_FRAG_ORD(Signal*signal); - void execDUMP_STATE_ORD(Signal* signal); - void execSEND_PACKED(Signal* signal); - void execSTTOR(Signal* signal); - void execTUP_LCPREQ(Signal* signal); - void execEND_LCPREQ(Signal* signal); - void execSTART_RECREQ(Signal* signal); - void execMEMCHECKREQ(Signal* signal); - void execTUPSEIZEREQ(Signal* signal); - void execTUPRELEASEREQ(Signal* signal); - void execSTORED_PROCREQ(Signal* signal); - void execTUPFRAGREQ(Signal* signal); - void execTUP_ADD_ATTRREQ(Signal* signal); - void execTUP_COMMITREQ(Signal* signal); - void execTUP_ABORTREQ(Signal* signal); - void execNDB_STTOR(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execDROP_TAB_REQ(Signal* signal); - void execALTER_TAB_REQ(Signal* signal); - void execTUP_DEALLOCREQ(Signal* signal); - void execTUP_WRITELOG_REQ(Signal* signal); - - // Ordered index related - void execBUILDINDXREQ(Signal* signal); - void buildIndex(Signal* signal, Uint32 buildPtrI); - void buildIndexReply(Signal* signal, const BuildIndexRec* buildRec); - - // Tup scan - void execACC_SCANREQ(Signal* signal); - void execNEXT_SCANREQ(Signal* signal); - void execACC_CHECK_SCAN(Signal* signal); - void execACCKEYCONF(Signal* signal); - void execACCKEYREF(Signal* signal); - void execACC_ABORTCONF(Signal* signal); - - - // Drop table - void execFSREMOVEREF(Signal*); - void execFSREMOVECONF(Signal*); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ -// Methods to handle execution of TUPKEYREQ + ATTRINFO. -// -// Module Execution Manager -// -// The TUPKEYREQ signal is central to this block. This signal is used -// by everybody that needs to read data residing in DBTUP. The data is -// read using an interpreter approach. -// -// Operations only needing to read execute a simplified version of the -// interpreter where the only instruction is read Attribute to send. -// Operations only needing to update the record (insert or update) -// execute a simplified version of the interpreter where the only -// instruction is write Attribute. -// -// Currently TUPKEYREQ is used in the following situations. -// 1) Normal transaction execution. Can be any of the types described -// below. -// 2) Execution of fragment redo log during system restart. -// In this situation there will only be normal updates, inserts -// and deletes performed. -// 3) A special type of normal transaction execution is to write the -// records arriving from the primary replica in the node restart -// processing. This will always be normal write operations which -// are translated to inserts or updates before arriving to TUP. -// 4) Scan processing. The scan processing will use normal reads or -// interpreted reads in their execution. There will be one TUPKEYREQ -// signal for each record processed. -// 5) Copy fragment processing. This is a special type of scan used in the -// primary replica at system restart. It reads the entire reads and -// converts those to writes to the starting node. In this special case -// LQH acts as an API node and receives also the ATTRINFO sent in the -// TRANSID_AI signals. -// -// Signal Diagram: -// -// In Signals: -// ----------- -// -// Logically there is one request TUPKEYREQ which requests to read/write data -// of one tuple in the database. Since the definition of what to read and write -// can be bigger than the maximum signal size we segment the signal. The definition -// of what to read/write/interpreted program is sent before the TUPKEYREQ signal. -// -// ---> ATTRINFO -// ... -// ---> ATTRINFO -// ---> TUPKEYREQ -// The number of ATTRINFO signals can be anything between 0 and upwards. -// The total size of the ATTRINFO is not allowed to be more than 16384 words. -// There is always one and only one TUPKEYREQ. -// -// Response Signals (successful case): -// -// Simple/Dirty Read Operation -// --------------------------- -// -// <---- TRANSID_AI (to API) -// ... -// <---- TRANSID_AI (to API) -// <---- READCONF (to API) -// <---- TUPKEYCONF (to LQH) -// There is always exactly one READCONF25 sent last. The number of -// TRANSID_AI is dependent on how much that was read. The maximum size -// of the ATTRINFO sent back is 16384 words. The signals are sent -// directly to the application with an address provided by the -// TUPKEYREQ signal. -// A positive response signal is also sent to LQH. -// -// Normal Read Operation -// --------------------- -// -// <---- TRANSID_AI (to API) -// ... -// <---- TRANSID_AI (to API) -// <---- TUPKEYCONF (to LQH) -// The number of TRANSID_AI is dependent on how much that was read. -// The maximum size of the ATTRINFO sent back is 16384 words. The -// signals are sent directly to the application with an address -// provided by the TUPKEYREQ signal. -// A positive response signal is also sent to LQH. -// -// Normal update/insert/delete operation -// ------------------------------------- -// -// <---- TUPKEYCONF -// After successful updating of the tuple LQH is informed of this. -// -// Delete with read -// ---------------- -// -// Will behave as a normal read although it also prepares the -// deletion of the tuple. -// -// Interpreted Update -// ------------------ -// -// <---- TRANSID_AI (to API) -// ... -// <---- TRANSID_AI (to API) -// <---- TUP_ATTRINFO (to LQH) -// ... -// <---- TUP_ATTRINFO (to LQH) -// <---- TUPKEYCONF (to LQH) -// -// The interpreted Update contains five sections: -// The first section performs read Attribute operations -// that send results back to the API. -// -// The second section executes the interpreted program -// where data from attributes can be updated and it -// can also read attribute values into the registers. -// -// The third section performs unconditional updates of -// attributes. -// -// The fourth section can read the attributes to be sent to the -// API after updating the record. -// -// The fifth section contains subroutines used by the interpreter -// in the second section. -// -// All types of interpreted programs contains the same five sections. -// The only difference is that only interpreted updates can update -// attributes. Interpreted inserts are not allowed. -// -// Interpreted Updates have to send back the information about the -// attributes they have updated. This information will be shipped to -// the log and also to any other replicas. Thus interpreted updates -// are only performed in the primary replica. The fragment redo log -// in LQH will contain information so that normal update/inserts/deletes -// can be performed using TUPKEYREQ. -// -// Interpreted Read -// ---------------- -// -// From a signalling point of view the Interpreted Read behaves as -// as a Normal Read. The interpreted Read is often used by Scan's. -// -// Interpreted Delete -// ------------------ -// -// <---- TUPKEYCONF -// After successful prepartion to delete the tuple LQH is informed -// of this. -// -// Interpreted Delete with Read -// ---------------------------- -// -// From a signalling point of view an interpreted delete with read -// behaves as a normal read. -// -// Continuation after successful case: -// -// After a read of any kind the operation record is ready to be used -// again by a new operation. -// -// Any updates, inserts or deletes waits for either of two messages. -// A commit specifying that the operation is to be performed for real -// or an abort specifying that the operation is to be rolled back and -// the record to be restored in its original format. -// -// This is handled by the module Transaction Manager. -// -// Response Signals (unsuccessful case): -// -// <---- TUPKEYREF (to LQH) -// A signal is sent back to LQH informing about the unsuccessful -// operation. In this case TUP waits for an abort signal to arrive -// before the operation record is ready for the next operation. -// This is handled by the Transaction Manager. -//------------------------------------------------------------------ -//------------------------------------------------------------------ - -// ***************************************************************** -// Signal Reception methods. -// ***************************************************************** -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void execTUPKEYREQ(Signal* signal); - void disk_page_load_callback(Signal*, Uint32 op, Uint32 page); - void disk_page_load_scan_callback(Signal*, Uint32 op, Uint32 page); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void execATTRINFO(Signal* signal); -public: - void receive_attrinfo(Signal*, Uint32 op, const Uint32* data, Uint32 len); -private: - -// Trigger signals -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void execCREATE_TRIG_REQ(Signal* signal); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void execDROP_TRIG_REQ(Signal* signal); - -// ***************************************************************** -// Support methods for ATTRINFO. -// ***************************************************************** -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void handleATTRINFOforTUPKEYREQ(Signal* signal, - const Uint32* data, - Uint32 length, - Operationrec * regOperPtr); - -// ***************************************************************** -// Setting up the environment for reads, inserts, updates and deletes. -// ***************************************************************** -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int handleReadReq(Signal* signal, - Operationrec* regOperPtr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int handleUpdateReq(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct, - bool disk); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int handleInsertReq(Signal* signal, - Ptr regOperPtr, - Ptr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int handleDeleteReq(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct, - bool disk); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int updateStartLab(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct); - -// ***************************************************************** -// Interpreter Handling methods. -// ***************************************************************** - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int interpreterStartLab(Signal* signal, - KeyReqStruct *req_struct); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int interpreterNextLab(Signal* signal, - KeyReqStruct *req_struct, - Uint32* logMemory, - Uint32* mainProgram, - Uint32 TmainProgLen, - Uint32* subroutineProg, - Uint32 TsubroutineLen, - Uint32 * tmpArea, - Uint32 tmpAreaSz); - -// ***************************************************************** -// Signal Sending methods. -// ***************************************************************** -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void sendReadAttrinfo(Signal* signal, - KeyReqStruct *req_struct, - Uint32 TnoOfData, - const Operationrec * regOperPtr); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void sendLogAttrinfo(Signal* signal, - Uint32 TlogSize, - Operationrec * regOperPtr); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void sendTUPKEYCONF(Signal* signal, - KeyReqStruct *req_struct, - Operationrec * regOperPtr); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ -// ***************************************************************** -// The methods that perform the actual read and update of attributes -// in the tuple. -// ***************************************************************** -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int readAttributes(KeyReqStruct* req_struct, - const Uint32* inBuffer, - Uint32 inBufLen, - Uint32* outBuffer, - Uint32 TmaxRead, - bool xfrmFlag); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int updateAttributes(KeyReqStruct *req_struct, - Uint32* inBuffer, - Uint32 inBufLen); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHOneWordNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHOneWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateFixedSizeTHOneWordNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHTwoWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateFixedSizeTHTwoWordNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHManyWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readFixedSizeTHZeroWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateFixedSizeTHManyWordNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readVarSizeNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateVarSizeNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readVarSizeNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateVarSizeNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readDynFixedSize(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateDynFixedSize(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool readDynVarSize(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool updateDynVarSize(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2); - - bool readCharNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - - bool readCharNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - - bool readBitsNULLable(Uint32* outBuffer, KeyReqStruct *req_struct, AttributeHeader*, Uint32); - bool updateBitsNULLable(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32); - bool readBitsNotNULL(Uint32* outBuffer, KeyReqStruct *req_struct, AttributeHeader*, Uint32); - bool updateBitsNotNULL(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32); - - bool updateFixedNULLable(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32); - bool updateFixedNotNull(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32); - - bool updateVarNULLable(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32); - bool updateVarNotNull(Uint32* inBuffer, KeyReqStruct *req_struct, Uint32); - - - bool readDiskFixedSizeNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - - bool readDiskFixedSizeNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2); - bool readDiskVarSizeNULLable(Uint32* outBuffer, KeyReqStruct *req_struct, AttributeHeader*, Uint32); - bool readDiskVarSizeNotNULL(Uint32* outBuffer, KeyReqStruct *req_struct, AttributeHeader*, Uint32); - - bool updateDiskFixedSizeNULLable(Uint32*, KeyReqStruct*, Uint32); - bool updateDiskFixedSizeNotNULL(Uint32*, KeyReqStruct*, Uint32); - - bool updateDiskVarSizeNULLable(Uint32*, KeyReqStruct *, Uint32); - bool updateDiskVarSizeNotNULL(Uint32*, KeyReqStruct *, Uint32); - - bool readDiskBitsNULLable(Uint32*, KeyReqStruct*, AttributeHeader*, Uint32); - bool readDiskBitsNotNULL(Uint32*, KeyReqStruct*, AttributeHeader*, Uint32); - bool updateDiskBitsNULLable(Uint32*, KeyReqStruct*, Uint32); - bool updateDiskBitsNotNULL(Uint32*, KeyReqStruct*, Uint32); - - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool nullFlagCheck(KeyReqStruct *req_struct, Uint32 attrDes2); - bool disk_nullFlagCheck(KeyReqStruct *req_struct, Uint32 attrDes2); - Uint32 read_pseudo(Uint32 attrId, - KeyReqStruct *req_struct, - Uint32* outBuffer); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void setUpQueryRoutines(Tablerec* regTabPtr); - -// ***************************************************************** -// Service methods. -// ***************************************************************** - TransState get_trans_state(Operationrec * const); - void set_trans_state(Operationrec * const, TransState); - TupleState get_tuple_state(Operationrec * const); - void set_tuple_state(Operationrec * const, TupleState); - Uint32 get_frag_page_id(Uint32 real_page_id); - Uint32 get_fix_page_offset(Uint32 page_index, Uint32 tuple_size); - - Uint32 decr_tup_version(Uint32 tuple_version); - void set_change_mask_state(Operationrec * const, ChangeMaskState); - ChangeMaskState get_change_mask_state(Operationrec * const); - void update_change_mask_info(KeyReqStruct * const, Operationrec * const); - void set_change_mask_info(KeyReqStruct * const, Operationrec * const); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void copyAttrinfo(Operationrec * regOperPtr, Uint32* inBuffer); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void initOpConnection(Operationrec* regOperPtr); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void initOperationrec(Signal* signal); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int initStoredOperationrec(Operationrec* regOperPtr, - KeyReqStruct* req_struct, - Uint32 storedId); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - bool insertActiveOpList(OperationrecPtr, KeyReqStruct* req_struct); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void bufferTRANSID_AI(Signal* signal, BlockReference aRef, Uint32 Tlen); - -//------------------------------------------------------------------ -// Trigger handling routines -//------------------------------------------------------------------ - DLList* - findTriggerList(Tablerec* table, - TriggerType::Value ttype, - TriggerActionTime::Value ttime, - TriggerEvent::Value tevent); - - bool createTrigger(Tablerec* table, const CreateTrigReq* req); - - Uint32 dropTrigger(Tablerec* table, - const DropTrigReq* req, - BlockNumber sender); - - void - checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* tablePtr, - bool disk); - - void - checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* tablePtr, - bool disk); - - void - checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* tablePtr, - bool disk); - -#if 0 - void checkDeferredTriggers(Signal* signal, - Operationrec* regOperPtr, - Tablerec* regTablePtr); -#endif - void checkDetachedTriggers(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* regTablePtr, - bool disk); - - void fireImmediateTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* regOperPtr, - bool disk); - - void fireDeferredTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* regOperPtr); - - void fireDetachedTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* regOperPtr, - bool disk); - - void executeTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* regOperPtr, - bool disk); - - void executeTrigger(KeyReqStruct *req_struct, - TupTriggerData* trigPtr, - Operationrec* regOperPtr, - bool disk); - - bool readTriggerInfo(TupTriggerData* trigPtr, - Operationrec* regOperPtr, - KeyReqStruct * req_struct, - Fragrecord* regFragPtr, - Uint32* keyBuffer, - Uint32& noPrimKey, - Uint32* afterBuffer, - Uint32& noAfterWords, - Uint32* beforeBuffer, - Uint32& noBeforeWords, - bool disk); - - void sendTrigAttrInfo(Signal* signal, - Uint32* data, - Uint32 dataLen, - bool executeDirect, - BlockReference receiverReference); - - Uint32 setAttrIds(Bitmask& attributeMask, - Uint32 noOfAttributes, - Uint32* inBuffer); - - void sendFireTrigOrd(Signal* signal, - KeyReqStruct *req_struct, - Operationrec * regOperPtr, - TupTriggerData* trigPtr, - Uint32 fragmentId, - Uint32 noPrimKeySignals, - Uint32 noBeforeSignals, - Uint32 noAfterSignals); - - bool primaryKey(Tablerec* const, Uint32); - - // these set terrorCode and return non-zero on error - - int executeTuxInsertTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - int executeTuxUpdateTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - int executeTuxDeleteTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - int addTuxEntries(Signal* signal, - Operationrec* regOperPtr, - Tablerec* regTabPtr); - - // these crash the node on error - - void executeTuxCommitTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - void executeTuxAbortTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - void removeTuxEntries(Signal* signal, - Tablerec* regTabPtr); - -// ***************************************************************** -// Error Handling routines. -// ***************************************************************** -//------------------------------------------------------------------ -//------------------------------------------------------------------ - int TUPKEY_abort(Signal* signal, int error_type); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ - void tupkeyErrorLab(Signal* signal); - void do_tup_abortreq(Signal*, Uint32 flags); - -//------------------------------------------------------------------ -//------------------------------------------------------------------ -// Methods to handle execution of TUP_COMMITREQ + TUP_ABORTREQ. -// -// Module Transaction Manager -// -// The Transaction Manager module is responsible for the commit -// and abort of operations started by the Execution Manager. -// -// Commit Operation: -// ---------------- -// -// Failures in commit processing is not allowed since that would -// leave the database in an unreliable state. Thus the only way -// to handle failures in commit processing is to crash the node. -// -// TUP_COMMITREQ can only be received in the wait state after a -// successful TUPKEYREQ which was not a read operation. -// -// Commit of Delete: -// ----------------- -// -// This will actually perform the deletion of the record unless -// other operations also are connected to the record. In this case -// we will set the delete state on the record that becomes the ownerd -// of the record. -// -// Commit of Update: -// ---------------- -// -// We will release the copy record where the original record was kept. -// Also here we will take special care if more operations are updating -// the record simultaneously. -// -// Commit of Insert: -// ----------------- -// -// Will simply reset the state of the operation record. -// -// Signal Diagram: -// ---> TUP_COMMITREQ (from LQH) -// <---- TUP_COMMITCONF (to LQH) -// -// -// Abort Operation: -// ---------------- -// -// Signal Diagram: -// ---> TUP_ABORTREQ (from LQH) -// <---- TUP_ABORTCONF (to LQH) -// -// Failures in abort processing is not allowed since that would -// leave the database in an unreliable state. Thus the only way -// to handle failures in abort processing is to crash the node. -// -// Abort messages can arrive at any time. It can arrive even before -// anything at all have arrived of the operation. It can arrive after -// receiving a number of ATTRINFO but before TUPKEYREQ has been received. -// It must arrive after that we sent TUPKEYREF in response to TUPKEYREQ -// and finally it can arrive after successfully performing the TUPKEYREQ -// in all cases including the read case. -//------------------------------------------------------------------ -//------------------------------------------------------------------ - -#if 0 - void checkPages(Fragrecord* regFragPtr); -#endif - Uint32 convert_byte_to_word_size(Uint32 byte_size) - { - return ((byte_size + 3) >> 2); - } - Uint32 convert_bit_to_word_size(Uint32 bit_size) - { - return ((bit_size + 31) >> 5); - } - - void prepare_initial_insert(KeyReqStruct*, Operationrec*, Tablerec*); - void fix_disk_insert_no_mem_insert(KeyReqStruct*, Operationrec*, Tablerec*); - void setup_fixed_part(KeyReqStruct* req_struct, - Operationrec* regOperPtr, - Tablerec* regTabPtr); - - void send_TUPKEYREF(Signal* signal, - Operationrec* regOperPtr); - void early_tupkey_error(Signal* signal); - - void printoutTuplePage(Uint32 fragid, Uint32 pageid, Uint32 printLimit); - - bool checkUpdateOfPrimaryKey(KeyReqStruct *req_struct, - Uint32* updateBuffer, - Tablerec* regTabPtr); - - void setNullBits(Uint32*, Tablerec* regTabPtr); - bool checkNullAttributes(KeyReqStruct * const, Tablerec* const); - bool find_savepoint(OperationrecPtr& loopOpPtr, Uint32 savepointId); - bool setup_read(KeyReqStruct* req_struct, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - bool disk); - - Uint32 calculateChecksum(Tuple_header*, Tablerec* regTabPtr); - void setChecksum(Tuple_header*, Tablerec* regTabPtr); - - void complexTrigger(Signal* signal, - KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - void setTupleStatesSetOpType(Operationrec* regOperPtr, - KeyReqStruct *req_struct, - Page* pagePtr, - Uint32& opType, - OperationrecPtr& firstOpPtr); - - void findBeforeValueOperation(OperationrecPtr& befOpPtr, - OperationrecPtr firstOpPtr); - - void calculateChangeMask(Page* PagePtr, - Tablerec* regTabPtr, - KeyReqStruct * req_struct); - - void updateGcpId(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr); - - void setTupleStateOnPreviousOps(Uint32 prevOpIndex); - void copyMem(Signal* signal, Uint32 sourceIndex, Uint32 destIndex); - - void freeAllAttrBuffers(Operationrec* const regOperPtr); - void freeAttrinbufrec(Uint32 anAttrBufRec); - void removeActiveOpList(Operationrec* const regOperPtr, Tuple_header*); - - void updatePackedList(Signal* signal, Uint16 ahostIndex); - - void setUpDescriptorReferences(Uint32 descriptorReference, - Tablerec* regTabPtr, - const Uint32* offset); - void setUpKeyArray(Tablerec* regTabPtr); - bool addfragtotab(Tablerec* regTabPtr, Uint32 fragId, Uint32 fragIndex); - void deleteFragTab(Tablerec* regTabPtr, Uint32 fragId); - void abortAddFragOp(Signal* signal); - void releaseTabDescr(Tablerec* regTabPtr); - void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* regTabPtr); - - void initialiseRecordsLab(Signal* signal, Uint32 switchData, Uint32, Uint32); - void initializeAttrbufrec(); - void initializeCheckpointInfoRec(); - void initializeDiskBufferSegmentRecord(); - void initializeFragoperrec(); - void initializeFragrecord(); - void initializeHostBuffer(); - void initializeLocalLogInfo(); - void initializeOperationrec(); - void initializePendingFileOpenInfoRecord(); - void initializeRestartInfoRec(); - void initializeTablerec(); - void initializeTabDescr(); - void initializeUndoPage(); - - void initTab(Tablerec* regTabPtr); - - void startphase3Lab(Signal* signal, Uint32 config1, Uint32 config2); - - void fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr); - void fragrefuse1Lab(Signal* signal, FragoperrecPtr fragOperPtr); - void fragrefuse2Lab(Signal* signal, FragoperrecPtr fragOperPtr, FragrecordPtr regFragPtr); - void fragrefuse3Lab(Signal* signal, - FragoperrecPtr fragOperPtr, - FragrecordPtr regFragPtr, - Tablerec* regTabPtr, - Uint32 fragId); - void fragrefuse4Lab(Signal* signal, - FragoperrecPtr fragOperPtr, - FragrecordPtr regFragPtr, - Tablerec* regTabPtr, - Uint32 fragId); - void addattrrefuseLab(Signal* signal, - FragrecordPtr regFragPtr, - FragoperrecPtr fragOperPtr, - Tablerec* regTabPtr, - Uint32 fragId); - - - void releaseFragment(Signal* signal, Uint32 tableId, Uint32); - void drop_fragment_free_var_pages(Signal*); - void drop_fragment_free_extent(Signal*, TablerecPtr, FragrecordPtr, Uint32); - void drop_fragment_free_extent_log_buffer_callback(Signal*, Uint32, Uint32); - void drop_fragment_unmap_pages(Signal*, TablerecPtr, FragrecordPtr, Uint32); - void drop_fragment_unmap_page_callback(Signal* signal, Uint32, Uint32); - void drop_fragment_fsremove(Signal*, TablerecPtr, FragrecordPtr); - void drop_fragment_fsremove_done(Signal*, TablerecPtr, FragrecordPtr); - - // Initialisation - void initData(); - void initRecords(); - - void deleteScanProcedure(Signal* signal, Operationrec* regOperPtr); - void copyProcedure(Signal* signal, - TablerecPtr regTabPtr, - Operationrec* regOperPtr); - void scanProcedure(Signal* signal, - Operationrec* regOperPtr, - Uint32 lenAttrInfo); - void storedSeizeAttrinbufrecErrorLab(Signal* signal, - Operationrec* regOperPtr, - Uint32 errorCode); - bool storedProcedureAttrInfo(Signal* signal, - Operationrec* regOperPtr, - const Uint32* data, - Uint32 length, - bool copyProc); - -//----------------------------------------------------------------------------- -// Table Descriptor Memory Manager -//----------------------------------------------------------------------------- - -// Public methods - Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset); - Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset); - void freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal = true); - Uint32 getTabDescrWord(Uint32 index); - void setTabDescrWord(Uint32 index, Uint32 word); - -// Private methods - Uint32 sizeOfReadFunction(); - void removeTdArea(Uint32 tabDesRef, Uint32 list); - void insertTdArea(Uint32 tabDesRef, Uint32 list); - void itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal); -#ifdef VM_TRACE - void verifytabdes(); -#endif - - void seizeOpRec(OperationrecPtr& regOperPtr); - void seizeFragrecord(FragrecordPtr& regFragPtr); - void seizeFragoperrec(FragoperrecPtr& fragOperPtr); - void releaseFragoperrec(FragoperrecPtr fragOperPtr); - void releaseFragrec(FragrecordPtr); -//---------------------------------------------------------------------------- -// Page Memory Manager -//---------------------------------------------------------------------------- - -// Public methods - void allocConsPages(Uint32 noOfPagesToAllocate, - Uint32& noOfPagesAllocated, - Uint32& allocPageRef); - void returnCommonArea(Uint32 retPageRef, Uint32 retNo); - void initializePage(); - -// Private methods - void removeCommonArea(Uint32 remPageRef, Uint32 list); - void insertCommonArea(Uint32 insPageRef, Uint32 list); - void findFreeLeftNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate); - void findFreeRightNeighbours(Uint32& allocPageRef, Uint32& noPagesAllocated, Uint32 noPagesToAllocate); - Uint32 nextHigherTwoLog(Uint32 input); - -// Private data - Uint32 cfreepageList[16]; - -//------------------------------------------------------------------------------------------------------ -// Page Mapper, convert logical page id's to physical page id's -// The page mapper also handles the pages allocated to the fragment. -//------------------------------------------------------------------------------------------------------ -// -// Public methods - Uint32 getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId); - Uint32 getNoOfPages(Fragrecord* regFragPtr); - void initPageRangeSize(Uint32 size); - bool insertPageRangeTab(Fragrecord* regFragPtr, - Uint32 startPageId, - Uint32 noPages); - void releaseFragPages(Fragrecord* regFragPtr); - void initFragRange(Fragrecord* regFragPtr); - void initializePageRange(); - Uint32 getEmptyPage(Fragrecord* regFragPtr); - Uint32 allocFragPages(Fragrecord* regFragPtr, Uint32 noOfPagesAllocated); - Uint32 get_empty_var_page(Fragrecord* frag_ptr); - -// Private methods - Uint32 leafPageRangeFull(Fragrecord* regFragPtr, PageRangePtr currPageRangePtr); - void releasePagerange(PageRangePtr regPRPtr); - void seizePagerange(PageRangePtr& regPageRangePtr); - void errorHandler(Uint32 errorCode); - void allocMoreFragPages(Fragrecord* regFragPtr); - -// Private data - Uint32 cfirstfreerange; - PageRange *pageRange; - Uint32 c_noOfFreePageRanges; - Uint32 cnoOfPageRangeRec; - -//--------------------------------------------------------------- -// Variable Allocator -// Allocates and deallocates tuples of fixed size on a fragment. -//--------------------------------------------------------------- -// -// Public methods - - void init_list_sizes(void); - -// Private methods - - Uint32 get_alloc_page(Fragrecord* const, Uint32); - void update_free_page_list(Fragrecord* const, Ptr); - -#if 0 - Uint32 calc_free_list(const Tablerec* regTabPtr, Uint32 sz) const { - return regTabPtr->m_disk_alloc_info.calc_page_free_bits(sz); - } -#endif - - Uint32 calculate_free_list_impl(Uint32) const ; - void remove_free_page(Fragrecord*, Var_page*, Uint32); - void insert_free_page(Fragrecord*, Var_page*, Uint32); - -//--------------------------------------------------------------- -// Fixed Allocator -// Allocates and deallocates tuples of fixed size on a fragment. -//--------------------------------------------------------------- -// -// Public methods - Uint32* alloc_var_rec(Fragrecord*, Tablerec*, Uint32, Local_key*, Uint32*); - void free_var_rec(Fragrecord*, Tablerec*, Local_key*, Ptr); - Uint32* alloc_var_part(Fragrecord*, Tablerec*, Uint32, Local_key*); - int realloc_var_part(Fragrecord*, Tablerec*, - PagePtr, Var_part_ref*, Uint32, Uint32); - - void validate_page(Tablerec*, Var_page* page); - - Uint32* alloc_fix_rec(Fragrecord*const, Tablerec*const, Local_key*, - Uint32*); - void free_fix_rec(Fragrecord*, Tablerec*, Local_key*, Fix_page*); - - Uint32* alloc_fix_rowid(Fragrecord*, Tablerec*, Local_key*, Uint32 *); - Uint32* alloc_var_rowid(Fragrecord*, Tablerec*, Uint32, Local_key*, Uint32*); -// Private methods - void convertThPage(Fix_page* regPagePtr, - Tablerec*, - Uint32 mm); - - /** - * Return offset - */ - Uint32 alloc_tuple_from_page(Fragrecord* regFragPtr, - Fix_page* regPagePtr); - -//--------------------------------------------------------------- -// Temporary variables used for storing commonly used variables -// in certain modules -//--------------------------------------------------------------- - - Uint32 c_lcp_scan_op; - FragrecordPtr fragptr; - OperationrecPtr operPtr; - TablerecPtr tabptr; - -// readAttributes and updateAttributes module -//------------------------------------------------------------------------------------------------------ -// Common stored variables. Variables that have a valid value always. -//------------------------------------------------------------------------------------------------------ - Attrbufrec *attrbufrec; - Uint32 cfirstfreeAttrbufrec; - Uint32 cnoOfAttrbufrec; - Uint32 cnoFreeAttrbufrec; - - Fragoperrec *fragoperrec; - Uint32 cfirstfreeFragopr; - Uint32 cnoOfFragoprec; - - Fragrecord *fragrecord; - Uint32 cfirstfreefrag; - Uint32 cnoOfFragrec; - - HostBuffer *hostBuffer; - - ArrayPool c_operation_pool; - - ArrayPool c_page_pool; - Uint32 cnoOfAllocatedPages; - Uint32 m_max_allocate_pages; - - /* read ahead in pages during disk order scan */ - Uint32 m_max_page_read_ahead; - - Tablerec *tablerec; - Uint32 cnoOfTablerec; - - TableDescriptor *tableDescriptor; - Uint32 cnoOfTabDescrRec; - - Uint32 cdata[32]; - Uint32 cdataPages[16]; - Uint32 cpackedListIndex; - Uint32 cpackedList[MAX_NODES]; - Uint32 cfreeTdList[16]; - Uint32 clastBitMask; - Uint32 clblPageCounter; - Uint32 clblPagesPerTick; - Uint32 clblPagesPerTickAfterSr; - BlockReference clqhBlockref; - Uint32 clqhUserpointer; - Uint32 cminusOne; - BlockReference cndbcntrRef; - BlockReference cownref; - Uint32 cownNodeId; - Uint32 czero; - - // A little bit bigger to cover overwrites in copy algorithms (16384 real size). -#define ZATTR_BUFFER_SIZE 16384 - Uint32 clogMemBuffer[ZATTR_BUFFER_SIZE + 16]; - Uint32 coutBuffer[ZATTR_BUFFER_SIZE + 16]; - Uint32 cinBuffer[ZATTR_BUFFER_SIZE + 16]; - Uint32 ctemp_page[ZWORDS_ON_PAGE]; - Uint32 ctemp_var_record[ZWORDS_ON_PAGE]; - Uint32 totNoOfPagesAllocated; - - // Trigger variables - Uint32 c_maxTriggersPerTable; - Uint32 c_memusage_report_frequency; - - Uint32 c_errorInsert4000TableId; - Uint32 c_min_list_size[MAX_FREE_LIST + 1]; - Uint32 c_max_list_size[MAX_FREE_LIST + 1]; - - void initGlobalTemporaryVars(); - void reportMemoryUsage(Signal* signal, int incDec); - - -#ifdef VM_TRACE - struct Th { - Uint32 data[1]; - }; - friend class NdbOut& operator<<(NdbOut&, const Operationrec&); - friend class NdbOut& operator<<(NdbOut&, const Th&); -#endif - - void expand_tuple(KeyReqStruct*, Uint32 sizes[4], Tuple_header*org, - const Tablerec*, bool disk); - void shrink_tuple(KeyReqStruct*, Uint32 sizes[2], const Tablerec*, - bool disk); - - Uint32* get_ptr(Var_part_ref); - Uint32* get_ptr(PagePtr*, Var_part_ref); - Uint32* get_ptr(PagePtr*, const Local_key*, const Tablerec*); - Uint32* get_dd_ptr(PagePtr*, const Local_key*, const Tablerec*); - - /** - * prealloc space from disk - * key.m_file_no contains file no - * key.m_page_no contains disk page - * key.m_page_idx contains byte preallocated - */ - int disk_page_prealloc(Signal*, Ptr, Local_key*, Uint32); - void disk_page_prealloc_dirty_page(Disk_alloc_info&, - Ptr, Uint32, Uint32); - void disk_page_prealloc_transit_page(Disk_alloc_info&, - Ptr, Uint32, Uint32); - - void disk_page_abort_prealloc(Signal*, Fragrecord*,Local_key*, Uint32); - void disk_page_abort_prealloc_callback(Signal*, Uint32, Uint32); - void disk_page_abort_prealloc_callback_1(Signal*, Fragrecord*, - PagePtr, Uint32); - - void disk_page_prealloc_callback(Signal*, Uint32, Uint32); - void disk_page_prealloc_initial_callback(Signal*, Uint32, Uint32); - void disk_page_prealloc_callback_common(Signal*, - Ptr, - Ptr, - Ptr); - - void disk_page_alloc(Signal*, - Tablerec*, Fragrecord*, Local_key*, PagePtr, Uint32); - void disk_page_free(Signal*, - Tablerec*, Fragrecord*, Local_key*, PagePtr, Uint32); - - void disk_page_commit_callback(Signal*, Uint32 opPtrI, Uint32 page_id); - - void disk_page_log_buffer_callback(Signal*, Uint32 opPtrI, Uint32); - - void disk_page_alloc_extent_log_buffer_callback(Signal*, Uint32, Uint32); - void disk_page_free_extent_log_buffer_callback(Signal*, Uint32, Uint32); - - Uint64 disk_page_undo_alloc(Page*, const Local_key*, - Uint32 sz, Uint32 gci, Uint32 logfile_group_id); - - Uint64 disk_page_undo_update(Page*, const Local_key*, - const Uint32*, Uint32, - Uint32 gci, Uint32 logfile_group_id); - - Uint64 disk_page_undo_free(Page*, const Local_key*, - const Uint32*, Uint32 sz, - Uint32 gci, Uint32 logfile_group_id); - - void undo_createtable_callback(Signal* signal, Uint32 opPtrI, Uint32 unused); - void undo_createtable_logsync_callback(Signal* signal, Uint32, Uint32); - - void drop_table_log_buffer_callback(Signal*, Uint32, Uint32); - void drop_table_logsync_callback(Signal*, Uint32, Uint32); - - void disk_page_set_dirty(Ptr); - void restart_setup_page(Disk_alloc_info&, Ptr); - void update_extent_pos(Disk_alloc_info&, Ptr); - - /** - * Disk restart code - */ -public: - int disk_page_load_hook(Uint32 page_id); - - void disk_page_unmap_callback(Uint32 when, Uint32 page, Uint32 dirty_count); - - int disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId, - const Local_key* key, Uint32 pages); - void disk_restart_page_bits(Uint32 tableId, Uint32 fragId, - const Local_key*, Uint32 bits); - void disk_restart_undo(Signal* signal, Uint64 lsn, - Uint32 type, const Uint32 * ptr, Uint32 len); - - struct Apply_undo - { - Uint32 m_type, m_len; - const Uint32* m_ptr; - Uint64 m_lsn; - Ptr m_table_ptr; - Ptr m_fragment_ptr; - Ptr m_page_ptr; - Ptr m_extent_ptr; - Local_key m_key; - }; - - void disk_restart_lcp_id(Uint32 table, Uint32 frag, Uint32 lcpId); - -private: - void disk_restart_undo_next(Signal*); - void disk_restart_undo_lcp(Uint32, Uint32, Uint32 flag, Uint32 lcpId); - void disk_restart_undo_callback(Signal* signal, Uint32, Uint32); - void disk_restart_undo_alloc(Apply_undo*); - void disk_restart_undo_update(Apply_undo*); - void disk_restart_undo_free(Apply_undo*); - void disk_restart_undo_page_bits(Signal*, Apply_undo*); - -#ifdef VM_TRACE - void verify_page_lists(Disk_alloc_info&); -#else - void verify_page_lists(Disk_alloc_info&) {} -#endif - - void findFirstOp(OperationrecPtr&); - void commit_operation(Signal*, Uint32, Tuple_header*, PagePtr, - Operationrec*, Fragrecord*, Tablerec*); - - void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*, - Operationrec*, Fragrecord*, Tablerec*); - - int handle_size_change_after_update(KeyReqStruct* req_struct, - Tuple_header* org, - Operationrec*, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - Uint32 sizes[4]); - - /** - * Setup all pointer on keyreqstruct to prepare for read - * req_struct->m_tuple_ptr is set to tuple to read - */ - void prepare_read(KeyReqStruct*, Tablerec* const, bool disk); -}; - -#if 0 -inline -Uint32 -Dbtup::get_frag_page_id(Uint32 real_page_id) -{ - PagePtr real_page_ptr; - real_page_ptr.i= real_page_id; - ptrCheckGuard(real_page_ptr, cnoOfPage, cpage); - return real_page_ptr.p->frag_page_id; -} -#endif - -inline -Dbtup::TransState -Dbtup::get_trans_state(Operationrec * regOperPtr) -{ - return (Dbtup::TransState)regOperPtr->op_struct.trans_state; -} - -inline -void -Dbtup::set_trans_state(Operationrec* regOperPtr, - Dbtup::TransState trans_state) -{ - regOperPtr->op_struct.trans_state= (Uint32)trans_state; -} - -inline -Dbtup::TupleState -Dbtup::get_tuple_state(Operationrec * regOperPtr) -{ - return (Dbtup::TupleState)regOperPtr->op_struct.tuple_state; -} - -inline -void -Dbtup::set_tuple_state(Operationrec* regOperPtr, - Dbtup::TupleState tuple_state) -{ - regOperPtr->op_struct.tuple_state= (Uint32)tuple_state; -} - - -inline -Uint32 -Dbtup::decr_tup_version(Uint32 tup_version) -{ - return (tup_version - 1) & ZTUP_VERSION_MASK; -} - -inline -Dbtup::ChangeMaskState -Dbtup::get_change_mask_state(Operationrec * regOperPtr) -{ - return (Dbtup::ChangeMaskState)regOperPtr->op_struct.change_mask_state; -} - -inline -void -Dbtup::set_change_mask_state(Operationrec * regOperPtr, - ChangeMaskState new_state) -{ - regOperPtr->op_struct.change_mask_state= (Uint32)new_state; -} - -inline -void -Dbtup::update_change_mask_info(KeyReqStruct * req_struct, - Operationrec * regOperPtr) -{ - if (req_struct->max_attr_id_updated == 0) { - if (get_change_mask_state(regOperPtr) == USE_SAVED_CHANGE_MASK) { - // add new changes - regOperPtr->saved_change_mask[0] |= req_struct->changeMask.getWord(0); - regOperPtr->saved_change_mask[1] |= req_struct->changeMask.getWord(1); - } - } else { - if (req_struct->no_changed_attrs < 16) { - set_change_mask_state(regOperPtr, RECALCULATE_CHANGE_MASK); - } else { - set_change_mask_state(regOperPtr, SET_ALL_MASK); - } - } -} - -inline -Uint32* -Dbtup::get_ptr(Var_part_ref ref) -{ - Ptr tmp; - return get_ptr(&tmp, ref); -} - -inline -Uint32* -Dbtup::get_ptr(Ptr* pagePtr, Var_part_ref ref) -{ - PagePtr tmp; - Local_key key; - ref.copyout(&key); - tmp.i = key.m_page_no; - - c_page_pool.getPtr(tmp); - memcpy(pagePtr, &tmp, sizeof(tmp)); - return ((Var_page*)tmp.p)->get_ptr(key.m_page_idx); -} - -inline -Uint32* -Dbtup::get_ptr(PagePtr* pagePtr, - const Local_key* key, const Tablerec* regTabPtr) -{ - PagePtr tmp; - tmp.i= key->m_page_no; - c_page_pool.getPtr(tmp); - memcpy(pagePtr, &tmp, sizeof(tmp)); - - return ((Fix_page*)tmp.p)-> - get_ptr(key->m_page_idx, regTabPtr->m_offsets[MM].m_fix_header_size); -} - -inline -Uint32* -Dbtup::get_dd_ptr(PagePtr* pagePtr, - const Local_key* key, const Tablerec* regTabPtr) -{ - PagePtr tmp; - tmp.i= key->m_page_no; - tmp.p= (Page*)m_global_page_pool.getPtr(tmp.i); - memcpy(pagePtr, &tmp, sizeof(tmp)); - - if(regTabPtr->m_attributes[DD].m_no_of_varsize) - return ((Var_page*)tmp.p)->get_ptr(key->m_page_idx); - else - return ((Fix_page*)tmp.p)-> - get_ptr(key->m_page_idx, regTabPtr->m_offsets[DD].m_fix_header_size); -} - -NdbOut& -operator<<(NdbOut&, const Dbtup::Tablerec&); - -inline -bool Dbtup::find_savepoint(OperationrecPtr& loopOpPtr, Uint32 savepointId) -{ - while (true) { - if (savepointId > loopOpPtr.p->savepointId) { - jam(); - return true; - } - loopOpPtr.i = loopOpPtr.p->prevActiveOp; - if (loopOpPtr.i == RNIL) { - break; - } - c_operation_pool.getPtr(loopOpPtr); - } - return false; -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp deleted file mode 100644 index 7a000eb5042..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp +++ /dev/null @@ -1,423 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_ABORT_CPP -#include "Dbtup.hpp" -#include -#include -#include - -void Dbtup::freeAllAttrBuffers(Operationrec* const regOperPtr) -{ - if (regOperPtr->storedProcedureId == RNIL) { - jam(); - freeAttrinbufrec(regOperPtr->firstAttrinbufrec); - } else { - jam(); - StoredProcPtr storedPtr; - c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId); - ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); - storedPtr.p->storedCounter--; - regOperPtr->storedProcedureId = ZNIL; - }//if - regOperPtr->firstAttrinbufrec = RNIL; - regOperPtr->lastAttrinbufrec = RNIL; - regOperPtr->m_any_value = 0; -}//Dbtup::freeAllAttrBuffers() - -void Dbtup::freeAttrinbufrec(Uint32 anAttrBuf) -{ - Uint32 Ttemp; - AttrbufrecPtr localAttrBufPtr; - Uint32 RnoFree = cnoFreeAttrbufrec; - localAttrBufPtr.i = anAttrBuf; - while (localAttrBufPtr.i != RNIL) { - jam(); - ptrCheckGuard(localAttrBufPtr, cnoOfAttrbufrec, attrbufrec); - Ttemp = localAttrBufPtr.p->attrbuf[ZBUF_NEXT]; - localAttrBufPtr.p->attrbuf[ZBUF_NEXT] = cfirstfreeAttrbufrec; - cfirstfreeAttrbufrec = localAttrBufPtr.i; - localAttrBufPtr.i = Ttemp; - RnoFree++; - }//if - cnoFreeAttrbufrec = RnoFree; -}//Dbtup::freeAttrinbufrec() - -/** - * Abort abort this operation and all after (nextActiveOp's) - */ -void Dbtup::execTUP_ABORTREQ(Signal* signal) -{ - jamEntry(); - do_tup_abortreq(signal, 0); -} - -void Dbtup::do_tup_abortreq(Signal* signal, Uint32 flags) -{ - OperationrecPtr regOperPtr; - FragrecordPtr regFragPtr; - TablerecPtr regTabPtr; - - regOperPtr.i = signal->theData[0]; - c_operation_pool.getPtr(regOperPtr); - TransState trans_state= get_trans_state(regOperPtr.p); - ndbrequire((trans_state == TRANS_STARTED) || - (trans_state == TRANS_TOO_MUCH_AI) || - (trans_state == TRANS_ERROR_WAIT_TUPKEYREQ) || - (trans_state == TRANS_IDLE)); - if (regOperPtr.p->op_struct.op_type == ZREAD) { - jam(); - freeAllAttrBuffers(regOperPtr.p); - initOpConnection(regOperPtr.p); - return; - }//if - - regFragPtr.i = regOperPtr.p->fragmentPtr; - ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); - - regTabPtr.i = regFragPtr.p->fragTableId; - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - - if (get_tuple_state(regOperPtr.p) == TUPLE_PREPARED) - { - jam(); - if (!regTabPtr.p->tuxCustomTriggers.isEmpty() && - (flags & ZSKIP_TUX_TRIGGERS) == 0) - executeTuxAbortTriggers(signal, - regOperPtr.p, - regFragPtr.p, - regTabPtr.p); - - OperationrecPtr loopOpPtr; - loopOpPtr.i = regOperPtr.p->nextActiveOp; - while (loopOpPtr.i != RNIL) { - jam(); - c_operation_pool.getPtr(loopOpPtr); - if (get_tuple_state(loopOpPtr.p) != TUPLE_ALREADY_ABORTED && - !regTabPtr.p->tuxCustomTriggers.isEmpty() && - (flags & ZSKIP_TUX_TRIGGERS) == 0) { - jam(); - executeTuxAbortTriggers(signal, - loopOpPtr.p, - regFragPtr.p, - regTabPtr.p); - } - set_tuple_state(loopOpPtr.p, TUPLE_ALREADY_ABORTED); - loopOpPtr.i = loopOpPtr.p->nextActiveOp; - } - } - - PagePtr page; - Tuple_header *tuple_ptr= (Tuple_header*) - get_ptr(&page, ®OperPtr.p->m_tuple_location, regTabPtr.p); - - Uint32 bits= tuple_ptr->m_header_bits; - if(regOperPtr.p->op_struct.op_type != ZDELETE) - { - Tuple_header *copy= (Tuple_header*) - c_undo_buffer.get_ptr(®OperPtr.p->m_copy_tuple_location); - - if(regOperPtr.p->op_struct.m_disk_preallocated) - { - jam(); - Local_key key; - memcpy(&key, copy->get_disk_ref_ptr(regTabPtr.p), sizeof(key)); - disk_page_abort_prealloc(signal, regFragPtr.p, &key, key.m_page_idx); - } - - - Uint32 copy_bits= copy->m_header_bits; - if(! (bits & Tuple_header::ALLOC)) - { - if(copy_bits & Tuple_header::MM_GROWN) - { - if (0) ndbout_c("abort grow"); - Ptr vpage; - Uint32 idx= regOperPtr.p->m_tuple_location.m_page_idx; - Uint32 mm_vars= regTabPtr.p->m_attributes[MM].m_no_of_varsize; - Uint32 *var_part; - - ndbassert(tuple_ptr->m_header_bits & Tuple_header::CHAINED_ROW); - - Var_part_ref *ref = tuple_ptr->get_var_part_ref_ptr(regTabPtr.p); - - Local_key tmp; - ref->copyout(&tmp); - - idx= tmp.m_page_idx; - var_part= get_ptr(&vpage, *ref); - Var_page* pageP = (Var_page*)vpage.p; - Uint32 len= pageP->get_entry_len(idx) & ~Var_page::CHAIN; - Uint32 sz = ((((mm_vars + 1) << 1) + (((Uint16*)var_part)[mm_vars]) + 3)>> 2); - ndbassert(sz <= len); - pageP->shrink_entry(idx, sz); - update_free_page_list(regFragPtr.p, vpage); - } - else if(bits & Tuple_header::MM_SHRINK) - { - if (0) ndbout_c("abort shrink"); - } - } - else if (regOperPtr.p->is_first_operation() && - regOperPtr.p->is_last_operation()) - { - /** - * Aborting last operation that performed ALLOC - */ - tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC; - tuple_ptr->m_header_bits |= Tuple_header::FREED; - } - } - else if (regOperPtr.p->is_first_operation() && - regOperPtr.p->is_last_operation()) - { - if (bits & Tuple_header::ALLOC) - { - tuple_ptr->m_header_bits &= ~(Uint32)Tuple_header::ALLOC; - tuple_ptr->m_header_bits |= Tuple_header::FREED; - } - } - - if(regOperPtr.p->is_first_operation() && regOperPtr.p->is_last_operation()) - { - if (regOperPtr.p->m_undo_buffer_space) - c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id, - regOperPtr.p->m_undo_buffer_space); - } - - removeActiveOpList(regOperPtr.p, tuple_ptr); - initOpConnection(regOperPtr.p); -} - -/* **************************************************************** */ -/* ********************** TRANSACTION ERROR MODULE **************** */ -/* **************************************************************** */ -int Dbtup::TUPKEY_abort(Signal* signal, int error_type) -{ - switch(error_type) { - case 1: -//tmupdate_alloc_error: - terrorCode= ZMEM_NOMEM_ERROR; - jam(); - break; - - case 15: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 16: - jam(); - terrorCode = ZTRY_TO_UPDATE_ERROR; - break; - - case 17: - jam(); - terrorCode = ZNO_ILLEGAL_NULL_ATTR; - break; - - case 19: - jam(); - terrorCode = ZTRY_TO_UPDATE_ERROR; - break; - - case 20: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 22: - jam(); - terrorCode = ZTOTAL_LEN_ERROR; - break; - - case 23: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 24: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 26: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 27: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 28: - jam(); - terrorCode = ZREGISTER_INIT_ERROR; - break; - - case 29: - jam(); - break; - - case 30: - jam(); - terrorCode = ZCALL_ERROR; - break; - - case 31: - jam(); - terrorCode = ZSTACK_OVERFLOW_ERROR; - break; - - case 32: - jam(); - terrorCode = ZSTACK_UNDERFLOW_ERROR; - break; - - case 33: - jam(); - terrorCode = ZNO_INSTRUCTION_ERROR; - break; - - case 34: - jam(); - terrorCode = ZOUTSIDE_OF_PROGRAM_ERROR; - break; - - case 35: - jam(); - terrorCode = ZTOO_MANY_INSTRUCTIONS_ERROR; - break; - - case 38: - jam(); - terrorCode = ZTEMPORARY_RESOURCE_FAILURE; - break; - - case 39: - if (get_trans_state(operPtr.p) == TRANS_TOO_MUCH_AI) { - jam(); - terrorCode = ZTOO_MUCH_ATTRINFO_ERROR; - } else if (get_trans_state(operPtr.p) == TRANS_ERROR_WAIT_TUPKEYREQ) { - jam(); - terrorCode = ZSEIZE_ATTRINBUFREC_ERROR; - } else { - ndbrequire(false); - }//if - break; - case 40: - jam(); - terrorCode = ZUNSUPPORTED_BRANCH; - break; - default: - ndbrequire(false); - break; - }//switch - tupkeyErrorLab(signal); - return -1; -} - -void Dbtup::early_tupkey_error(Signal* signal) -{ - Operationrec * const regOperPtr = operPtr.p; - ndbrequire(!regOperPtr->op_struct.in_active_list); - set_trans_state(regOperPtr, TRANS_IDLE); - set_tuple_state(regOperPtr, TUPLE_PREPARED); - initOpConnection(regOperPtr); - send_TUPKEYREF(signal, regOperPtr); -} - -void Dbtup::tupkeyErrorLab(Signal* signal) -{ - Operationrec * const regOperPtr = operPtr.p; - set_trans_state(regOperPtr, TRANS_IDLE); - set_tuple_state(regOperPtr, TUPLE_PREPARED); - - FragrecordPtr fragPtr; - fragPtr.i= regOperPtr->fragmentPtr; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - TablerecPtr tabPtr; - tabPtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - if (regOperPtr->m_undo_buffer_space && - (regOperPtr->is_first_operation() && regOperPtr->is_last_operation())) - { - c_lgman->free_log_space(fragPtr.p->m_logfile_group_id, - regOperPtr->m_undo_buffer_space); - } - - Uint32 *ptr = 0; - if (!regOperPtr->m_tuple_location.isNull()) - { - PagePtr tmp; - ptr= get_ptr(&tmp, ®OperPtr->m_tuple_location, tabPtr.p); - } - - - removeActiveOpList(regOperPtr, (Tuple_header*)ptr); - initOpConnection(regOperPtr); - send_TUPKEYREF(signal, regOperPtr); -} - -void Dbtup::send_TUPKEYREF(Signal* signal, - Operationrec* const regOperPtr) -{ - TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtrSend(); - tupKeyRef->userRef = regOperPtr->userpointer; - tupKeyRef->errorCode = terrorCode; - sendSignal(DBLQH_REF, GSN_TUPKEYREF, signal, - TupKeyRef::SignalLength, JBB); -} - -/** - * Unlink one operation from the m_operation_ptr_i list in the tuple. - */ -void Dbtup::removeActiveOpList(Operationrec* const regOperPtr, - Tuple_header *tuple_ptr) -{ - OperationrecPtr raoOperPtr; - - if(!regOperPtr->m_copy_tuple_location.isNull()) - { - jam(); - c_undo_buffer.free_copy_tuple(®OperPtr->m_copy_tuple_location); - } - - if (regOperPtr->op_struct.in_active_list) { - regOperPtr->op_struct.in_active_list= false; - if (regOperPtr->nextActiveOp != RNIL) { - jam(); - raoOperPtr.i= regOperPtr->nextActiveOp; - c_operation_pool.getPtr(raoOperPtr); - raoOperPtr.p->prevActiveOp= regOperPtr->prevActiveOp; - } else { - jam(); - tuple_ptr->m_operation_ptr_i = regOperPtr->prevActiveOp; - } - if (regOperPtr->prevActiveOp != RNIL) { - jam(); - raoOperPtr.i= regOperPtr->prevActiveOp; - c_operation_pool.getPtr(raoOperPtr); - raoOperPtr.p->nextActiveOp= regOperPtr->nextActiveOp; - } - regOperPtr->prevActiveOp= RNIL; - regOperPtr->nextActiveOp= RNIL; - } -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp deleted file mode 100644 index 2f8151acca1..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp +++ /dev/null @@ -1,275 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_BUFFER_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include - -void Dbtup::execSEND_PACKED(Signal* signal) -{ - Uint16 hostId; - Uint32 i; - Uint32 TpackedListIndex= cpackedListIndex; - jamEntry(); - for (i= 0; i < TpackedListIndex; i++) { - jam(); - hostId= cpackedList[i]; - ndbrequire((hostId - 1) < (MAX_NODES - 1)); // Also check not zero - Uint32 TpacketTA= hostBuffer[hostId].noOfPacketsTA; - if (TpacketTA != 0) { - jam(); - BlockReference TBref= numberToRef(API_PACKED, hostId); - Uint32 TpacketLen= hostBuffer[hostId].packetLenTA; - MEMCOPY_NO_WORDS(&signal->theData[0], - &hostBuffer[hostId].packetBufferTA[0], - TpacketLen); - sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB); - hostBuffer[hostId].noOfPacketsTA= 0; - hostBuffer[hostId].packetLenTA= 0; - } - hostBuffer[hostId].inPackedList= false; - }//for - cpackedListIndex= 0; -} - -void Dbtup::bufferTRANSID_AI(Signal* signal, BlockReference aRef, - Uint32 Tlen) -{ - if (Tlen == 3) - return; - - Uint32 hostId= refToNode(aRef); - Uint32 Theader= ((refToBlock(aRef) << 16)+(Tlen-3)); - - ndbrequire(hostId < MAX_NODES); - Uint32 TpacketLen= hostBuffer[hostId].packetLenTA; - Uint32 TnoOfPackets= hostBuffer[hostId].noOfPacketsTA; - Uint32 sig0= signal->theData[0]; - Uint32 sig1= signal->theData[1]; - Uint32 sig2= signal->theData[2]; - - BlockReference TBref= numberToRef(API_PACKED, hostId); - - if ((Tlen + TpacketLen + 1) <= 25) { -// ---------------------------------------------------------------- -// There is still space in the buffer. We will copy it into the -// buffer. -// ---------------------------------------------------------------- - jam(); - updatePackedList(signal, hostId); - } else if (false && TnoOfPackets == 1) { -// ---------------------------------------------------------------- -// The buffer is full and there was only one packet buffered. We -// will send this as a normal signal. -// ---------------------------------------------------------------- - Uint32 TnewRef= numberToRef((hostBuffer[hostId].packetBufferTA[0] >> 16), - hostId); - MEMCOPY_NO_WORDS(&signal->theData[0], - &hostBuffer[hostId].packetBufferTA[1], - TpacketLen - 1); - sendSignal(TnewRef, GSN_TRANSID_AI, signal, (TpacketLen - 1), JBB); - TpacketLen= 0; - TnoOfPackets= 0; - } else { -// ---------------------------------------------------------------- -// The buffer is full but at least two packets. Send those in -// packed form. -// ---------------------------------------------------------------- - MEMCOPY_NO_WORDS(&signal->theData[0], - &hostBuffer[hostId].packetBufferTA[0], - TpacketLen); - sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB); - TpacketLen= 0; - TnoOfPackets= 0; - } -// ---------------------------------------------------------------- -// Copy the signal into the buffer -// ---------------------------------------------------------------- - hostBuffer[hostId].packetBufferTA[TpacketLen + 0]= Theader; - hostBuffer[hostId].packetBufferTA[TpacketLen + 1]= sig0; - hostBuffer[hostId].packetBufferTA[TpacketLen + 2]= sig1; - hostBuffer[hostId].packetBufferTA[TpacketLen + 3]= sig2; - hostBuffer[hostId].noOfPacketsTA= TnoOfPackets + 1; - hostBuffer[hostId].packetLenTA= Tlen + TpacketLen + 1; - MEMCOPY_NO_WORDS(&hostBuffer[hostId].packetBufferTA[TpacketLen + 4], - &signal->theData[25], - Tlen - 3); -} - -void Dbtup::updatePackedList(Signal* signal, Uint16 hostId) -{ - if (hostBuffer[hostId].inPackedList == false) { - Uint32 TpackedListIndex= cpackedListIndex; - jam(); - hostBuffer[hostId].inPackedList= true; - cpackedList[TpackedListIndex]= hostId; - cpackedListIndex= TpackedListIndex + 1; - } -} - -/* ---------------------------------------------------------------- */ -/* ----------------------- SEND READ ATTRINFO --------------------- */ -/* ---------------------------------------------------------------- */ -void Dbtup::sendReadAttrinfo(Signal* signal, - KeyReqStruct *req_struct, - Uint32 ToutBufIndex, - const Operationrec *regOperPtr) -{ - if(ToutBufIndex == 0) - return; - - const BlockReference recBlockref= req_struct->rec_blockref; - const Uint32 block= refToBlock(recBlockref); - const Uint32 nodeId= refToNode(recBlockref); - - bool connectedToNode= getNodeInfo(nodeId).m_connected; - const Uint32 type= getNodeInfo(nodeId).m_type; - bool is_api= (type >= NodeInfo::API && type <= NodeInfo::MGM); - bool old_dest= (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0)); - Uint32 TpacketLen= hostBuffer[nodeId].packetLenTA; - Uint32 TpacketTA= hostBuffer[nodeId].noOfPacketsTA; - - if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){ - // Use error insert to turn routing on - jam(); - connectedToNode= false; - } - - Uint32 sig0= req_struct->tc_operation_ptr; - Uint32 sig1= req_struct->trans_id1; - Uint32 sig2= req_struct->trans_id2; - - TransIdAI * transIdAI= (TransIdAI *)signal->getDataPtrSend(); - transIdAI->connectPtr= sig0; - transIdAI->transId[0]= sig1; - transIdAI->transId[1]= sig2; - - if (connectedToNode){ - /** - * Own node -> execute direct - */ - if(nodeId != getOwnNodeId()){ - jam(); - - /** - * Send long sig - */ - if (ToutBufIndex >= 22 && is_api && !old_dest) { - jam(); - /** - * Flush buffer so that order is maintained - */ - if (TpacketTA != 0) { - jam(); - BlockReference TBref = numberToRef(API_PACKED, nodeId); - MEMCOPY_NO_WORDS(&signal->theData[0], - &hostBuffer[nodeId].packetBufferTA[0], - TpacketLen); - sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB); - hostBuffer[nodeId].noOfPacketsTA = 0; - hostBuffer[nodeId].packetLenTA = 0; - transIdAI->connectPtr = sig0; - transIdAI->transId[0] = sig1; - transIdAI->transId[1] = sig2; - }//if - LinearSectionPtr ptr[3]; - ptr[0].p= &signal->theData[25]; - ptr[0].sz= ToutBufIndex; - sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3, JBB, ptr, 1); - return; - } - - /** - * short sig + api -> buffer - */ -#ifndef NDB_NO_DROPPED_SIGNAL - if (ToutBufIndex < 22 && is_api){ - jam(); - bufferTRANSID_AI(signal, recBlockref, 3+ToutBufIndex); - return; - } -#endif - - /** - * rest -> old send sig - */ - Uint32 * src= signal->theData+25; - if (ToutBufIndex >= 22){ - do { - jam(); - MEMCOPY_NO_WORDS(&signal->theData[3], src, 22); - sendSignal(recBlockref, GSN_TRANSID_AI, signal, 25, JBB); - ToutBufIndex -= 22; - src += 22; - } while(ToutBufIndex >= 22); - } - - if (ToutBufIndex > 0){ - jam(); - MEMCOPY_NO_WORDS(&signal->theData[3], src, ToutBufIndex); - sendSignal(recBlockref, GSN_TRANSID_AI, signal, 3+ToutBufIndex, JBB); - } - return; - } - EXECUTE_DIRECT(block, GSN_TRANSID_AI, signal, 3 + ToutBufIndex); - jamEntry(); - return; - } - - /** - * If this node does not have a direct connection - * to the receiving node we want to send the signals - * routed via the node that controls this read - */ - Uint32 routeBlockref= req_struct->TC_ref; - - if (true){ // TODO is_api && !old_dest){ - jam(); - transIdAI->attrData[0]= recBlockref; - LinearSectionPtr ptr[3]; - ptr[0].p= &signal->theData[25]; - ptr[0].sz= ToutBufIndex; - sendSignal(routeBlockref, GSN_TRANSID_AI_R, signal, 4, JBB, ptr, 1); - return; - } - - /** - * Fill in a TRANSID_AI signal, use last word to store - * final destination and send it to route node - * as signal TRANSID_AI_R (R as in Routed) - */ - Uint32 tot= ToutBufIndex; - Uint32 sent= 0; - Uint32 maxLen= TransIdAI::DataLength - 1; - while (sent < tot) { - jam(); - Uint32 dataLen= (tot - sent > maxLen) ? maxLen : tot - sent; - Uint32 sigLen= dataLen + TransIdAI::HeaderLength + 1; - MEMCOPY_NO_WORDS(&transIdAI->attrData, - &signal->theData[25+sent], - dataLen); - // Set final destination in last word - transIdAI->attrData[dataLen]= recBlockref; - - sendSignal(routeBlockref, GSN_TRANSID_AI_R, - signal, sigLen, JBB); - sent += dataLen; - } -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp deleted file mode 100644 index d65ebf26eee..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp +++ /dev/null @@ -1,749 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_COMMIT_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include -#include "../dblqh/Dblqh.hpp" - -void Dbtup::execTUP_DEALLOCREQ(Signal* signal) -{ - TablerecPtr regTabPtr; - FragrecordPtr regFragPtr; - Uint32 frag_page_id, frag_id; - - jamEntry(); - - frag_id= signal->theData[0]; - regTabPtr.i= signal->theData[1]; - frag_page_id= signal->theData[2]; - Uint32 page_index= signal->theData[3]; - - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - - getFragmentrec(regFragPtr, frag_id, regTabPtr.p); - ndbassert(regFragPtr.p != NULL); - - if (! (((frag_page_id << MAX_TUPLES_BITS) + page_index) == ~ (Uint32) 0)) - { - Local_key tmp; - tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); - tmp.m_page_idx= page_index; - - PagePtr pagePtr; - Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p); - - ndbassert(ptr->m_header_bits & Tuple_header::FREE); - - if (ptr->m_header_bits & Tuple_header::LCP_KEEP) - { - ndbassert(! (ptr->m_header_bits & Tuple_header::FREED)); - ptr->m_header_bits |= Tuple_header::FREED; - return; - } - - if (regTabPtr.p->m_attributes[MM].m_no_of_varsize) - { - jam(); - free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr); - } else { - free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p); - } - } -} - -void Dbtup::execTUP_WRITELOG_REQ(Signal* signal) -{ - jamEntry(); - OperationrecPtr loopOpPtr; - loopOpPtr.i= signal->theData[0]; - Uint32 gci= signal->theData[1]; - c_operation_pool.getPtr(loopOpPtr); - while (loopOpPtr.p->prevActiveOp != RNIL) { - jam(); - loopOpPtr.i= loopOpPtr.p->prevActiveOp; - c_operation_pool.getPtr(loopOpPtr); - } - do { - ndbrequire(get_trans_state(loopOpPtr.p) == TRANS_STARTED); - signal->theData[0]= loopOpPtr.p->userpointer; - signal->theData[1]= gci; - if (loopOpPtr.p->nextActiveOp == RNIL) { - jam(); - EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2); - return; - } - jam(); - EXECUTE_DIRECT(DBLQH, GSN_LQH_WRITELOG_REQ, signal, 2); - jamEntry(); - loopOpPtr.i= loopOpPtr.p->nextActiveOp; - c_operation_pool.getPtr(loopOpPtr); - } while (true); -} - -/* ---------------------------------------------------------------- */ -/* INITIALIZATION OF ONE CONNECTION RECORD TO PREPARE FOR NEXT OP. */ -/* ---------------------------------------------------------------- */ -void Dbtup::initOpConnection(Operationrec* regOperPtr) -{ - set_tuple_state(regOperPtr, TUPLE_ALREADY_ABORTED); - set_trans_state(regOperPtr, TRANS_IDLE); - regOperPtr->currentAttrinbufLen= 0; - regOperPtr->op_struct.op_type= ZREAD; - regOperPtr->op_struct.m_disk_preallocated= 0; - regOperPtr->op_struct.m_load_diskpage_on_commit= 0; - regOperPtr->op_struct.m_wait_log_buffer= 0; - regOperPtr->op_struct.in_active_list = false; - regOperPtr->m_undo_buffer_space= 0; -} - -static -inline -bool -operator>(const Local_key& key1, const Local_key& key2) -{ - return key1.m_page_no > key2.m_page_no || - (key1.m_page_no == key2.m_page_no && key1.m_page_idx > key2.m_page_idx); -} - -void -Dbtup::dealloc_tuple(Signal* signal, - Uint32 gci, - Page* page, - Tuple_header* ptr, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr) -{ - Uint32 lcpScan_ptr_i= regFragPtr->m_lcp_scan_op; - Uint32 lcp_keep_list = regFragPtr->m_lcp_keep_list; - - Uint32 bits = ptr->m_header_bits; - Uint32 extra_bits = Tuple_header::FREED; - if (bits & Tuple_header::DISK_PART) - { - jam(); - Local_key disk; - memcpy(&disk, ptr->get_disk_ref_ptr(regTabPtr), sizeof(disk)); - PagePtr tmpptr; - tmpptr.i = m_pgman.m_ptr.i; - tmpptr.p = reinterpret_cast(m_pgman.m_ptr.p); - disk_page_free(signal, regTabPtr, regFragPtr, - &disk, tmpptr, gci); - } - - if (! (bits & (Tuple_header::LCP_SKIP | Tuple_header::ALLOC)) && - lcpScan_ptr_i != RNIL) - { - jam(); - ScanOpPtr scanOp; - c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i); - Local_key rowid = regOperPtr->m_tuple_location; - Local_key scanpos = scanOp.p->m_scanPos.m_key; - rowid.m_page_no = page->frag_page_id; - if (rowid > scanpos) - { - jam(); - extra_bits = Tuple_header::LCP_KEEP; // Note REMOVE FREE - ptr->m_operation_ptr_i = lcp_keep_list; - regFragPtr->m_lcp_keep_list = rowid.ref(); - } - } - - ptr->m_header_bits = bits | extra_bits; - - if (regTabPtr->m_bits & Tablerec::TR_RowGCI) - { - jam(); - * ptr->get_mm_gci(regTabPtr) = gci; - } -} - -void -Dbtup::commit_operation(Signal* signal, - Uint32 gci, - Tuple_header* tuple_ptr, - PagePtr pagePtr, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr) -{ - ndbassert(regOperPtr->op_struct.op_type != ZDELETE); - - Uint32 lcpScan_ptr_i= regFragPtr->m_lcp_scan_op; - Uint32 save= tuple_ptr->m_operation_ptr_i; - Uint32 bits= tuple_ptr->m_header_bits; - - Tuple_header *disk_ptr= 0; - Tuple_header *copy= (Tuple_header*) - c_undo_buffer.get_ptr(®OperPtr->m_copy_tuple_location); - - Uint32 copy_bits= copy->m_header_bits; - - Uint32 fixsize= regTabPtr->m_offsets[MM].m_fix_header_size; - Uint32 mm_vars= regTabPtr->m_attributes[MM].m_no_of_varsize; - if(mm_vars == 0) - { - jam(); - memcpy(tuple_ptr, copy, 4*fixsize); - disk_ptr= (Tuple_header*)(((Uint32*)copy)+fixsize); - } - else - { - jam(); - /** - * Var_part_ref is only stored in *allocated* tuple - * so memcpy from copy, will over write it... - * hence subtle copyout/assign... - */ - Local_key tmp; - Var_part_ref *ref= tuple_ptr->get_var_part_ref_ptr(regTabPtr); - ref->copyout(&tmp); - - memcpy(tuple_ptr, copy, 4*fixsize); - ref->assign(&tmp); - - PagePtr vpagePtr; - Uint32 *dst= get_ptr(&vpagePtr, *ref); - Var_page* vpagePtrP = (Var_page*)vpagePtr.p; - Uint32 *src= copy->get_end_of_fix_part_ptr(regTabPtr); - Uint32 sz= ((mm_vars + 1) << 1) + (((Uint16*)src)[mm_vars]); - ndbassert(4*vpagePtrP->get_entry_len(tmp.m_page_idx) >= sz); - memcpy(dst, src, sz); - - copy_bits |= Tuple_header::CHAINED_ROW; - - if(copy_bits & Tuple_header::MM_SHRINK) - { - jam(); - vpagePtrP->shrink_entry(tmp.m_page_idx, (sz + 3) >> 2); - update_free_page_list(regFragPtr, vpagePtr); - } - - disk_ptr = (Tuple_header*)(((Uint32*)copy)+fixsize+((sz + 3) >> 2)); - } - - if (regTabPtr->m_no_of_disk_attributes && - (copy_bits & Tuple_header::DISK_INLINE)) - { - jam(); - Local_key key; - memcpy(&key, copy->get_disk_ref_ptr(regTabPtr), sizeof(Local_key)); - Uint32 logfile_group_id= regFragPtr->m_logfile_group_id; - - PagePtr diskPagePtr = *(PagePtr*)&m_pgman.m_ptr; - ndbassert(diskPagePtr.p->m_page_no == key.m_page_no); - ndbassert(diskPagePtr.p->m_file_no == key.m_file_no); - Uint32 sz, *dst; - if(copy_bits & Tuple_header::DISK_ALLOC) - { - jam(); - disk_page_alloc(signal, regTabPtr, regFragPtr, &key, diskPagePtr, gci); - } - - if(regTabPtr->m_attributes[DD].m_no_of_varsize == 0) - { - jam(); - sz= regTabPtr->m_offsets[DD].m_fix_header_size; - dst= ((Fix_page*)diskPagePtr.p)->get_ptr(key.m_page_idx, sz); - } - else - { - jam(); - dst= ((Var_page*)diskPagePtr.p)->get_ptr(key.m_page_idx); - sz= ((Var_page*)diskPagePtr.p)->get_entry_len(key.m_page_idx); - } - - if(! (copy_bits & Tuple_header::DISK_ALLOC)) - { - jam(); - disk_page_undo_update(diskPagePtr.p, - &key, dst, sz, gci, logfile_group_id); - } - - memcpy(dst, disk_ptr, 4*sz); - memcpy(tuple_ptr->get_disk_ref_ptr(regTabPtr), &key, sizeof(Local_key)); - - ndbassert(! (disk_ptr->m_header_bits & Tuple_header::FREE)); - copy_bits |= Tuple_header::DISK_PART; - } - - if(lcpScan_ptr_i != RNIL && (bits & Tuple_header::ALLOC)) - { - jam(); - ScanOpPtr scanOp; - c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i); - Local_key rowid = regOperPtr->m_tuple_location; - Local_key scanpos = scanOp.p->m_scanPos.m_key; - rowid.m_page_no = pagePtr.p->frag_page_id; - if(rowid > scanpos) - { - jam(); - copy_bits |= Tuple_header::LCP_SKIP; - } - } - - Uint32 clear= - Tuple_header::ALLOC | Tuple_header::FREE | - Tuple_header::DISK_ALLOC | Tuple_header::DISK_INLINE | - Tuple_header::MM_SHRINK | Tuple_header::MM_GROWN; - copy_bits &= ~(Uint32)clear; - - tuple_ptr->m_header_bits= copy_bits; - tuple_ptr->m_operation_ptr_i= save; - - if (regTabPtr->m_bits & Tablerec::TR_RowGCI) - { - jam(); - * tuple_ptr->get_mm_gci(regTabPtr) = gci; - } - - if (regTabPtr->m_bits & Tablerec::TR_Checksum) { - jam(); - setChecksum(tuple_ptr, regTabPtr); - } -} - -void -Dbtup::disk_page_commit_callback(Signal* signal, - Uint32 opPtrI, Uint32 page_id) -{ - Uint32 hash_value; - Uint32 gci; - OperationrecPtr regOperPtr; - - jamEntry(); - - c_operation_pool.getPtr(regOperPtr, opPtrI); - c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci); - - TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); - - tupCommitReq->opPtr= opPtrI; - tupCommitReq->hashValue= hash_value; - tupCommitReq->gci= gci; - tupCommitReq->diskpage = page_id; - - regOperPtr.p->op_struct.m_load_diskpage_on_commit= 0; - regOperPtr.p->m_commit_disk_callback_page= page_id; - m_global_page_pool.getPtr(m_pgman.m_ptr, page_id); - - { - PagePtr tmp; - tmp.i = m_pgman.m_ptr.i; - tmp.p = reinterpret_cast(m_pgman.m_ptr.p); - disk_page_set_dirty(tmp); - } - - execTUP_COMMITREQ(signal); - if(signal->theData[0] == 0) - { - jam(); - c_lqh->tupcommit_conf_callback(signal, regOperPtr.p->userpointer); - } -} - -void -Dbtup::disk_page_log_buffer_callback(Signal* signal, - Uint32 opPtrI, - Uint32 unused) -{ - Uint32 hash_value; - Uint32 gci; - OperationrecPtr regOperPtr; - - jamEntry(); - - c_operation_pool.getPtr(regOperPtr, opPtrI); - c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci); - Uint32 page= regOperPtr.p->m_commit_disk_callback_page; - - TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); - - tupCommitReq->opPtr= opPtrI; - tupCommitReq->hashValue= hash_value; - tupCommitReq->gci= gci; - tupCommitReq->diskpage = page; - - ndbassert(regOperPtr.p->op_struct.m_load_diskpage_on_commit == 0); - regOperPtr.p->op_struct.m_wait_log_buffer= 0; - m_global_page_pool.getPtr(m_pgman.m_ptr, page); - - execTUP_COMMITREQ(signal); - ndbassert(signal->theData[0] == 0); - - c_lqh->tupcommit_conf_callback(signal, regOperPtr.p->userpointer); -} - -/** - * Move to the first operation performed on this tuple - */ -void -Dbtup::findFirstOp(OperationrecPtr & firstPtr) -{ - jam(); - printf("Detect out-of-order commit(%u) -> ", firstPtr.i); - ndbassert(!firstPtr.p->is_first_operation()); - while(firstPtr.p->prevActiveOp != RNIL) - { - firstPtr.i = firstPtr.p->prevActiveOp; - c_operation_pool.getPtr(firstPtr); - } - ndbout_c("%u", firstPtr.i); -} - -/* ----------------------------------------------------------------- */ -/* --------------- COMMIT THIS PART OF A TRANSACTION --------------- */ -/* ----------------------------------------------------------------- */ -void Dbtup::execTUP_COMMITREQ(Signal* signal) -{ - FragrecordPtr regFragPtr; - OperationrecPtr regOperPtr; - TablerecPtr regTabPtr; - KeyReqStruct req_struct; - TransState trans_state; - Uint32 no_of_fragrec, no_of_tablerec; - - TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); - - regOperPtr.i= tupCommitReq->opPtr; - Uint32 hash_value= tupCommitReq->hashValue; - Uint32 gci = tupCommitReq->gci; - - jamEntry(); - - c_operation_pool.getPtr(regOperPtr); - - regFragPtr.i= regOperPtr.p->fragmentPtr; - trans_state= get_trans_state(regOperPtr.p); - - no_of_fragrec= cnoOfFragrec; - - ndbrequire(trans_state == TRANS_STARTED); - ptrCheckGuard(regFragPtr, no_of_fragrec, fragrecord); - - no_of_tablerec= cnoOfTablerec; - regTabPtr.i= regFragPtr.p->fragTableId; - hash_value= tupCommitReq->hashValue; - gci= tupCommitReq->gci; - - req_struct.signal= signal; - req_struct.hash_value= hash_value; - req_struct.gci= gci; - regOperPtr.p->m_commit_disk_callback_page = tupCommitReq->diskpage; - -#ifdef VM_TRACE - if (tupCommitReq->diskpage == RNIL) - { - m_pgman.m_ptr.i = RNIL; - m_pgman.m_ptr.p = 0; - req_struct.m_disk_page_ptr.i = RNIL; - req_struct.m_disk_page_ptr.p = 0; - } -#endif - - ptrCheckGuard(regTabPtr, no_of_tablerec, tablerec); - - PagePtr page; - Tuple_header* tuple_ptr= (Tuple_header*) - get_ptr(&page, ®OperPtr.p->m_tuple_location, regTabPtr.p); - - /** - * NOTE: This has to be run before potential time-slice when - * waiting for disk, as otherwise the "other-ops" in a multi-op - * commit might run while we're waiting for disk - * - */ - if (!regTabPtr.p->tuxCustomTriggers.isEmpty()) - { - if(get_tuple_state(regOperPtr.p) == TUPLE_PREPARED) - { - jam(); - - OperationrecPtr loopPtr = regOperPtr; - if (unlikely(!regOperPtr.p->is_first_operation())) - { - findFirstOp(loopPtr); - } - - /** - * Execute all tux triggers at first commit - * since previous tuple is otherwise removed... - */ - jam(); - goto first; - while(loopPtr.i != RNIL) - { - c_operation_pool.getPtr(loopPtr); - first: - executeTuxCommitTriggers(signal, - loopPtr.p, - regFragPtr.p, - regTabPtr.p); - set_tuple_state(loopPtr.p, TUPLE_TO_BE_COMMITTED); - loopPtr.i = loopPtr.p->nextActiveOp; - } - } - } - - bool get_page = false; - if(regOperPtr.p->op_struct.m_load_diskpage_on_commit) - { - jam(); - Page_cache_client::Request req; - - /** - * Only last op on tuple needs "real" commit, - * hence only this one should have m_load_diskpage_on_commit - */ - ndbassert(tuple_ptr->m_operation_ptr_i == regOperPtr.i); - - /** - * Check for page - */ - if(!regOperPtr.p->m_copy_tuple_location.isNull()) - { - jam(); - Tuple_header* tmp= (Tuple_header*) - c_undo_buffer.get_ptr(®OperPtr.p->m_copy_tuple_location); - - memcpy(&req.m_page, - tmp->get_disk_ref_ptr(regTabPtr.p), sizeof(Local_key)); - - if (unlikely(regOperPtr.p->op_struct.op_type == ZDELETE && - tmp->m_header_bits & Tuple_header::DISK_ALLOC)) - { - jam(); - /** - * Insert+Delete - */ - regOperPtr.p->op_struct.m_load_diskpage_on_commit = 0; - regOperPtr.p->op_struct.m_wait_log_buffer = 0; - disk_page_abort_prealloc(signal, regFragPtr.p, - &req.m_page, req.m_page.m_page_idx); - - c_lgman->free_log_space(regFragPtr.p->m_logfile_group_id, - regOperPtr.p->m_undo_buffer_space); - goto skip_disk; - if (0) ndbout_c("insert+delete"); - jamEntry(); - goto skip_disk; - } - } - else - { - jam(); - // initial delete - ndbassert(regOperPtr.p->op_struct.op_type == ZDELETE); - memcpy(&req.m_page, - tuple_ptr->get_disk_ref_ptr(regTabPtr.p), sizeof(Local_key)); - - ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART); - } - req.m_callback.m_callbackData= regOperPtr.i; - req.m_callback.m_callbackFunction = - safe_cast(&Dbtup::disk_page_commit_callback); - - /* - * Consider commit to be correlated. Otherwise pk op + commit makes - * the page hot. XXX move to TUP which knows better. - */ - int flags= regOperPtr.p->op_struct.op_type | - Page_cache_client::COMMIT_REQ | Page_cache_client::CORR_REQ; - int res= m_pgman.get_page(signal, req, flags); - switch(res){ - case 0: - /** - * Timeslice - */ - jam(); - signal->theData[0] = 1; - return; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - default: - jam(); - } - get_page = true; - - { - PagePtr tmpptr; - tmpptr.i = m_pgman.m_ptr.i; - tmpptr.p = reinterpret_cast(m_pgman.m_ptr.p); - disk_page_set_dirty(tmpptr); - } - - regOperPtr.p->m_commit_disk_callback_page= res; - regOperPtr.p->op_struct.m_load_diskpage_on_commit= 0; - } - - if(regOperPtr.p->op_struct.m_wait_log_buffer) - { - jam(); - /** - * Only last op on tuple needs "real" commit, - * hence only this one should have m_wait_log_buffer - */ - ndbassert(tuple_ptr->m_operation_ptr_i == regOperPtr.i); - - Callback cb; - cb.m_callbackData= regOperPtr.i; - cb.m_callbackFunction = - safe_cast(&Dbtup::disk_page_log_buffer_callback); - Uint32 sz= regOperPtr.p->m_undo_buffer_space; - - Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id); - int res= lgman.get_log_buffer(signal, sz, &cb); - jamEntry(); - switch(res){ - case 0: - jam(); - signal->theData[0] = 1; - return; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - default: - jam(); - } - } - - assert(tuple_ptr); -skip_disk: - req_struct.m_tuple_ptr = tuple_ptr; - - Uint32 nextOp = regOperPtr.p->nextActiveOp; - Uint32 prevOp = regOperPtr.p->prevActiveOp; - /** - * The trigger code (which is shared between detached/imediate) - * check op-list to check were to read before values from - * detached triggers should always read from original tuple value - * from before transaction start, not from any intermediate update - * - * Setting the op-list has this effect - */ - regOperPtr.p->nextActiveOp = RNIL; - regOperPtr.p->prevActiveOp = RNIL; - if(tuple_ptr->m_operation_ptr_i == regOperPtr.i) - { - jam(); - /** - * Perform "real" commit - */ - Uint32 disk = regOperPtr.p->m_commit_disk_callback_page; - set_change_mask_info(&req_struct, regOperPtr.p); - checkDetachedTriggers(&req_struct, regOperPtr.p, regTabPtr.p, - disk != RNIL); - - tuple_ptr->m_operation_ptr_i = RNIL; - - if(regOperPtr.p->op_struct.op_type != ZDELETE) - { - jam(); - commit_operation(signal, gci, tuple_ptr, page, - regOperPtr.p, regFragPtr.p, regTabPtr.p); - } - else - { - jam(); - if (get_page) - ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART); - dealloc_tuple(signal, gci, page.p, tuple_ptr, - regOperPtr.p, regFragPtr.p, regTabPtr.p); - } - } - - if (nextOp != RNIL) - { - c_operation_pool.getPtr(nextOp)->prevActiveOp = prevOp; - } - - if (prevOp != RNIL) - { - c_operation_pool.getPtr(prevOp)->nextActiveOp = nextOp; - } - - if(!regOperPtr.p->m_copy_tuple_location.isNull()) - { - jam(); - c_undo_buffer.free_copy_tuple(®OperPtr.p->m_copy_tuple_location); - } - - initOpConnection(regOperPtr.p); - signal->theData[0] = 0; -} - -void -Dbtup::set_change_mask_info(KeyReqStruct * const req_struct, - Operationrec * const regOperPtr) -{ - ChangeMaskState state = get_change_mask_state(regOperPtr); - if (state == USE_SAVED_CHANGE_MASK) { - jam(); - req_struct->changeMask.setWord(0, regOperPtr->saved_change_mask[0]); - req_struct->changeMask.setWord(1, regOperPtr->saved_change_mask[1]); - } else if (state == RECALCULATE_CHANGE_MASK) { - jam(); - // Recompute change mask, for now set all bits - req_struct->changeMask.set(); - } else if (state == SET_ALL_MASK) { - jam(); - req_struct->changeMask.set(); - } else { - jam(); - ndbrequire(state == DELETE_CHANGES); - req_struct->changeMask.set(); - } -} - -void -Dbtup::calculateChangeMask(Page* const pagePtr, - Tablerec* const regTabPtr, - KeyReqStruct * const req_struct) -{ - OperationrecPtr loopOpPtr; - Uint32 saved_word1= 0; - Uint32 saved_word2= 0; - loopOpPtr.i= req_struct->m_tuple_ptr->m_operation_ptr_i; - do { - c_operation_pool.getPtr(loopOpPtr); - ndbrequire(loopOpPtr.p->op_struct.op_type == ZUPDATE); - ChangeMaskState change_mask= get_change_mask_state(loopOpPtr.p); - if (change_mask == USE_SAVED_CHANGE_MASK) { - jam(); - saved_word1|= loopOpPtr.p->saved_change_mask[0]; - saved_word2|= loopOpPtr.p->saved_change_mask[1]; - } else if (change_mask == RECALCULATE_CHANGE_MASK) { - jam(); - //Recompute change mask, for now set all bits - req_struct->changeMask.set(); - return; - } else { - ndbrequire(change_mask == SET_ALL_MASK); - jam(); - req_struct->changeMask.set(); - return; - } - loopOpPtr.i= loopOpPtr.p->prevActiveOp; - } while (loopOpPtr.i != RNIL); - req_struct->changeMask.setWord(0, saved_word1); - req_struct->changeMask.setWord(1, saved_word2); -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp deleted file mode 100644 index 7d88587cb35..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp +++ /dev/null @@ -1,450 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_DEBUG_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include -#include -#include -#include - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* ------------------------ DEBUG MODULE -------------------------- */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ -void Dbtup::execDEBUG_SIG(Signal* signal) -{ - PagePtr regPagePtr; - jamEntry(); - regPagePtr.i = signal->theData[0]; - c_page_pool.getPtr(regPagePtr); -}//Dbtup::execDEBUG_SIG() - -#ifdef TEST_MR -#include - -void startTimer(struct timespec *tp) -{ - clock_gettime(CLOCK_REALTIME, tp); -}//startTimer() - -int stopTimer(struct timespec *tp) -{ - double timer_count; - struct timespec theStopTime; - clock_gettime(CLOCK_REALTIME, &theStopTime); - timer_count = (double)(1000000*((double)theStopTime.tv_sec - (double)tp->tv_sec)) + - (double)((double)((double)theStopTime.tv_nsec - (double)tp->tv_nsec)/(double)1000); - return (int)timer_count; -}//stopTimer() - -#endif // end TEST_MR - -struct Chunk { - Uint32 pageId; - Uint32 pageCount; -}; - -void -Dbtup::reportMemoryUsage(Signal* signal, int incDec){ - signal->theData[0] = NDB_LE_MemoryUsage; - signal->theData[1] = incDec; - signal->theData[2] = sizeof(Page); - signal->theData[3] = cnoOfAllocatedPages; - signal->theData[4] = c_page_pool.getSize(); - signal->theData[5] = DBTUP; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 6, JBB); -} - -#ifdef VM_TRACE -extern Uint32 fc_left, fc_right, fc_remove; -#endif - -void -Dbtup::execDUMP_STATE_ORD(Signal* signal) -{ - Uint32 type = signal->theData[0]; - if(type == DumpStateOrd::DumpPageMemory && signal->getLength() == 1){ - reportMemoryUsage(signal, 0); - return; - } - DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0]; - -#if 0 - if (type == 100) { - RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); - req->primaryTableId = 2; - req->secondaryTableId = RNIL; - req->userPtr = 2; - req->userRef = DBDICT_REF; - sendSignal(cownref, GSN_REL_TABMEMREQ, signal, - RelTabMemReq::SignalLength, JBB); - return; - }//if - if (type == 101) { - RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); - req->primaryTableId = 4; - req->secondaryTableId = 5; - req->userPtr = 4; - req->userRef = DBDICT_REF; - sendSignal(cownref, GSN_REL_TABMEMREQ, signal, - RelTabMemReq::SignalLength, JBB); - return; - }//if - if (type == 102) { - RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); - req->primaryTableId = 6; - req->secondaryTableId = 8; - req->userPtr = 6; - req->userRef = DBDICT_REF; - sendSignal(cownref, GSN_REL_TABMEMREQ, signal, - RelTabMemReq::SignalLength, JBB); - return; - }//if - if (type == 103) { - DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); - req->primaryTableId = 2; - req->secondaryTableId = RNIL; - req->userPtr = 2; - req->userRef = DBDICT_REF; - sendSignal(cownref, GSN_DROP_TABFILEREQ, signal, - DropTabFileReq::SignalLength, JBB); - return; - }//if - if (type == 104) { - DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); - req->primaryTableId = 4; - req->secondaryTableId = 5; - req->userPtr = 4; - req->userRef = DBDICT_REF; - sendSignal(cownref, GSN_DROP_TABFILEREQ, signal, - DropTabFileReq::SignalLength, JBB); - return; - }//if - if (type == 105) { - DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); - req->primaryTableId = 6; - req->secondaryTableId = 8; - req->userPtr = 6; - req->userRef = DBDICT_REF; - sendSignal(cownref, GSN_DROP_TABFILEREQ, signal, - DropTabFileReq::SignalLength, JBB); - return; - }//if -#endif -#ifdef ERROR_INSERT - if (type == DumpStateOrd::EnableUndoDelayDataWrite) { - ndbout << "Dbtup:: delay write of datapages for table = " - << dumpState->args[1]<< endl; - c_errorInsert4000TableId = dumpState->args[1]; - SET_ERROR_INSERT_VALUE(4000); - return; - }//if -#endif -#if defined VM_TRACE - if (type == 1211 || type == 1212 || type == 1213){ - Uint32 seed = time(0); - if (signal->getLength() > 1) - seed = signal->theData[1]; - ndbout_c("Startar modul test av Page Manager (seed: 0x%x)", seed); - srand(seed); - - Vector chunks; - const Uint32 LOOPS = 1000; - Uint32 sum_req = 0; - Uint32 sum_conf = 0; - Uint32 sum_loop = 0; - Uint32 max_loop = 0; - for(Uint32 i = 0; i> 3) + (sum_conf >> 4); - } - switch(c){ - case 0:{ // Release - const int ch = rand() % chunks.size(); - Chunk chunk = chunks[ch]; - chunks.erase(ch); - returnCommonArea(chunk.pageId, chunk.pageCount); - } - break; - case 2: { // Seize(n) - fail - alloc += free; - // Fall through - sum_req += free; - goto doalloc; - } - case 1: { // Seize(n) (success) - sum_req += alloc; - doalloc: - Chunk chunk; - allocConsPages(alloc, chunk.pageCount, chunk.pageId); - ndbrequire(chunk.pageCount <= alloc); - if(chunk.pageCount != 0){ - chunks.push_back(chunk); - if(chunk.pageCount != alloc) { - if (type == 1211) - ndbout_c(" Tried to allocate %d - only allocated %d - free: %d", - alloc, chunk.pageCount, free); - } - } else { - ndbout_c(" Failed to alloc %d pages with %d pages free", - alloc, free); - } - - sum_conf += chunk.pageCount; - Uint32 tot = fc_left + fc_right + fc_remove; - sum_loop += tot; - if (tot > max_loop) - max_loop = tot; - - for(Uint32 i = 0; ipage_state = ~ZFREE_COMMON; - } - - if(alloc == 1 && free > 0) - ndbrequire(chunk.pageCount == alloc); - } - break; - } - } - while(chunks.size() > 0){ - Chunk chunk = chunks.back(); - returnCommonArea(chunk.pageId, chunk.pageCount); - chunks.erase(chunks.size() - 1); - } - - ndbout_c("Got %u%% of requested allocs, loops : %u 100*avg: %u max: %u", - (100 * sum_conf) / sum_req, sum_loop, 100*sum_loop / LOOPS, - max_loop); - } -#endif -}//Dbtup::execDUMP_STATE_ORD() - -/* ---------------------------------------------------------------- */ -/* --------- MEMORY CHECK ----------------------- */ -/* ---------------------------------------------------------------- */ -void Dbtup::execMEMCHECKREQ(Signal* signal) -{ - TablerecPtr regTabPtr; - regTabPtr.i = 2; - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - if(tablerec && regTabPtr.p->tableStatus == DEFINED) - validate_page(regTabPtr.p, 0); - -#if 0 - const Dbtup::Tablerec& tab = *tup->tabptr.p; - - PagePtr regPagePtr; - Uint32* data = &signal->theData[0]; - - jamEntry(); - BlockReference blockref = signal->theData[0]; - Uint32 i; - for (i = 0; i < 25; i++) { - jam(); - data[i] = 0; - }//for - for (i = 0; i < 16; i++) { - regPagePtr.i = cfreepageList[i]; - jam(); - while (regPagePtr.i != RNIL) { - jam(); - ptrCheckGuard(regPagePtr, cnoOfPage, cpage); - regPagePtr.i = regPagePtr.p->next_page; - data[0]++; - }//while - }//for - sendSignal(blockref, GSN_MEMCHECKCONF, signal, 25, JBB); -#endif -}//Dbtup::memCheck() - -// ------------------------------------------------------------------------ -// Help function to be used when debugging. Prints out a tuple page. -// printLimit is the number of bytes that is printed out from the page. A -// page is of size 32768 bytes as of March 2003. -// ------------------------------------------------------------------------ -void Dbtup::printoutTuplePage(Uint32 fragid, Uint32 pageid, Uint32 printLimit) -{ - PagePtr tmpPageP; - FragrecordPtr tmpFragP; - TablerecPtr tmpTableP; - - c_page_pool.getPtr(tmpPageP, pageid); - - tmpFragP.i = fragid; - ptrCheckGuard(tmpFragP, cnoOfFragrec, fragrecord); - - tmpTableP.i = tmpFragP.p->fragTableId; - ptrCheckGuard(tmpTableP, cnoOfTablerec, tablerec); - - ndbout << "Fragid: " << fragid << " Pageid: " << pageid << endl - << "----------------------------------------" << endl; - - ndbout << "PageHead : "; - ndbout << endl; -}//Dbtup::printoutTuplePage - -#ifdef VM_TRACE -NdbOut& -operator<<(NdbOut& out, const Dbtup::Operationrec& op) -{ - out << "[Operationrec " << hex << &op; - // table - out << " [fragmentPtr " << hex << op.fragmentPtr << "]"; - // type - out << " [op_type " << dec << op.op_struct.op_type << "]"; - out << " [delete_insert_flag " << dec; - out << op.op_struct.delete_insert_flag << "]"; - // state - out << " [tuple_state " << dec << op.op_struct.tuple_state << "]"; - out << " [trans_state " << dec << op.op_struct.trans_state << "]"; - out << " [in_active_list " << dec << op.op_struct.in_active_list << "]"; - // links - out << " [prevActiveOp " << hex << op.prevActiveOp << "]"; - out << " [nextActiveOp " << hex << op.nextActiveOp << "]"; - // tuples - out << " [tupVersion " << hex << op.tupVersion << "]"; - out << " [m_tuple_location " << op.m_tuple_location << "]"; - out << " [m_copy_tuple_location " << op.m_copy_tuple_location << "]"; - out << "]"; - return out; -} - -// uses global tabptr -NdbOut& -operator<<(NdbOut& out, const Dbtup::Th& th) -{ - // ugly - Dbtup* tup = (Dbtup*)globalData.getBlock(DBTUP); - const Dbtup::Tablerec& tab = *tup->tabptr.p; - unsigned i = 0; - out << "[Th " << hex << &th; - out << " [op " << hex << th.data[i++] << "]"; - out << " [version " << hex << (Uint16)th.data[i++] << "]"; - if (tab.m_bits & Dbtup::Tablerec::TR_Checksum) - out << " [checksum " << hex << th.data[i++] << "]"; - out << " [nullbits"; - for (unsigned j = 0; j < tab.m_offsets[Dbtup::MM].m_null_words; j++) - out << " " << hex << th.data[i++]; - out << "]"; - out << " [data"; - while (i < tab.m_offsets[Dbtup::MM].m_fix_header_size) - out << " " << hex << th.data[i++]; - out << "]"; - out << "]"; - return out; -} -#endif - -#ifdef VM_TRACE -template class Vector; -#endif -// uses global tabptr - -NdbOut& -operator<<(NdbOut& out, const Local_key & key) -{ - out << "[ m_page_no: " << dec << key.m_page_no - << " m_file_no: " << dec << key.m_file_no - << " m_page_idx: " << dec << key.m_page_idx << "]"; - return out; -} - -static -NdbOut& -operator<<(NdbOut& out, const Dbtup::Tablerec::Tuple_offsets& off) -{ - out << "[ null_words: " << (Uint32)off.m_null_words - << " null off: " << (Uint32)off.m_null_offset - << " disk_off: " << off.m_disk_ref_offset - << " fixheadsz: " << off.m_fix_header_size - << " max_var_off: " << off.m_max_var_offset - << " ]"; - - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtup::Tablerec& tab) -{ - out << "[ total_rec_size: " << tab.total_rec_size - << " checksum: " << !!(tab.m_bits & Dbtup::Tablerec::TR_Checksum) - << " attr: " << tab.m_no_of_attributes - << " disk: " << tab.m_no_of_disk_attributes - << " mm: " << tab.m_offsets[Dbtup::MM] - << " [ fix: " << tab.m_attributes[Dbtup::MM].m_no_of_fixsize - << " var: " << tab.m_attributes[Dbtup::MM].m_no_of_varsize << "]" - - << " dd: " << tab.m_offsets[Dbtup::DD] - << " [ fix: " << tab.m_attributes[Dbtup::DD].m_no_of_fixsize - << " var: " << tab.m_attributes[Dbtup::DD].m_no_of_varsize << "]" - << " ]" << endl; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const AttributeDescriptor& off) -{ - Uint32 word; - memcpy(&word, &off, 4); - return out; -} - -#include "AttributeOffset.hpp" - -NdbOut& -operator<<(NdbOut& out, const AttributeOffset& off) -{ - Uint32 word; - memcpy(&word, &off, 4); - out << "[ offset: " << AttributeOffset::getOffset(word) - << " nullpos: " << AttributeOffset::getNullFlagPos(word); - if(AttributeOffset::getCharsetFlag(word)) - out << " charset: %d" << AttributeOffset::getCharsetPos(word); - out << " ]"; - return out; -} - diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp deleted file mode 100644 index e426213162a..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp +++ /dev/null @@ -1,1893 +0,0 @@ -/* Copyright (c) 2004-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_DISK_ALLOC_CPP -#include "Dbtup.hpp" - -static bool f_undo_done = true; - -static -NdbOut& -operator<<(NdbOut& out, const Ptr & ptr) -{ - out << "[ Page: ptr.i: " << ptr.i - << " [ m_file_no: " << ptr.p->m_file_no - << " m_page_no: " << ptr.p->m_page_no << "]" - << " list_index: " << ptr.p->list_index - << " free_space: " << ptr.p->free_space - << " uncommitted_used_space: " << ptr.p->uncommitted_used_space - << " ]"; - return out; -} - -static -NdbOut& -operator<<(NdbOut& out, const Ptr & ptr) -{ - out << "[ Page_request: ptr.i: " << ptr.i - << " " << ptr.p->m_key - << " m_estimated_free_space: " << ptr.p->m_estimated_free_space - << " m_list_index: " << ptr.p->m_list_index - << " m_frag_ptr_i: " << ptr.p->m_frag_ptr_i - << " m_extent_info_ptr: " << ptr.p->m_extent_info_ptr - << " m_ref_count: " << ptr.p->m_ref_count - << " m_uncommitted_used_space: " << ptr.p->m_uncommitted_used_space - << " ]"; - - return out; -} - -static -NdbOut& -operator<<(NdbOut& out, const Ptr & ptr) -{ - out << "[ Extent_info: ptr.i " << ptr.i - << " " << ptr.p->m_key - << " m_first_page_no: " << ptr.p->m_first_page_no - << " m_free_space: " << ptr.p->m_free_space - << " m_free_matrix_pos: " << ptr.p->m_free_matrix_pos - << " m_free_page_count: ["; - - for(Uint32 i = 0; im_free_page_count[i]; - out << " ] ]"; - - return out; -} - -#if NOT_YET_FREE_EXTENT -static -inline -bool -check_free(const Dbtup::Extent_info* extP) -{ - Uint32 res = 0; - for (Uint32 i = 1; im_free_page_count[i]; - return res; -} -#error "Code for deallocting extents when they get empty" -#error "This code is not yet complete" -#endif - -#if NOT_YET_UNDO_ALLOC_EXTENT -#error "This is needed for deallocting extents when they get empty" -#error "This code is not complete yet" -#endif - -void -Dbtup::dump_disk_alloc(Dbtup::Disk_alloc_info & alloc) -{ - ndbout_c("dirty pages"); - for(Uint32 i = 0; i *pool= (ArrayPool*)&m_global_page_pool; - LocalDLList list(*pool, alloc.m_dirty_pages[i]); - for(list.first(ptr); !ptr.isNull(); list.next(ptr)) - { - ndbout << ptr << " "; - } - ndbout_c(" "); - } - ndbout_c("page requests"); - for(Uint32 i = 0; i ptr; - Local_page_request_list list(c_page_request_pool, - alloc.m_page_requests[i]); - for(list.first(ptr); !ptr.isNull(); list.next(ptr)) - { - ndbout << ptr << " "; - } - ndbout_c(" "); - } - - ndbout_c("Extent matrix"); - for(Uint32 i = 0; i ptr; - Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[i]); - for(list.first(ptr); !ptr.isNull(); list.next(ptr)) - { - ndbout << ptr << " "; - } - ndbout_c(" "); - } - - if (alloc.m_curr_extent_info_ptr_i != RNIL) - { - Ptr ptr; - c_extent_pool.getPtr(ptr, alloc.m_curr_extent_info_ptr_i); - ndbout << "current extent: " << ptr << endl; - } -} - -#if defined VM_TRACE || true -#define ddassert(x) do { if(unlikely(!(x))) { dump_disk_alloc(alloc); ndbrequire(false); } } while(0) -#else -#define ddassert(x) -#endif - -Dbtup::Disk_alloc_info::Disk_alloc_info(const Tablerec* tabPtrP, - Uint32 extent_size) -{ - m_extent_size = extent_size; - m_curr_extent_info_ptr_i = RNIL; - if (tabPtrP->m_no_of_disk_attributes == 0) - return; - - Uint32 min_size= 4*tabPtrP->m_offsets[DD].m_fix_header_size; - - if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0) - { - Uint32 recs_per_page= (4*Tup_fixsize_page::DATA_WORDS)/min_size; - m_page_free_bits_map[0] = recs_per_page; // 100% free - m_page_free_bits_map[1] = 1; - m_page_free_bits_map[2] = 0; - m_page_free_bits_map[3] = 0; - - Uint32 max= recs_per_page * extent_size; - for(Uint32 i = 0; i= col) - { - i = (i & ~mask) + mask; - } - } - - return RNIL; -} - -Uint32 -Dbtup::Disk_alloc_info::calc_extent_pos(const Extent_info* extP) const -{ - Uint32 free= extP->m_free_space; - Uint32 mask= EXTENT_SEARCH_MATRIX_COLS - 1; - - Uint32 col= 0, row=0; - - /** - * Find correct row based on total free space - * if zero (or very small free space) put - * absolutly last - */ - { - const Uint32 *arr= m_total_extent_free_space_thresholds; - for(; free < * arr++; row++) - assert(row < EXTENT_SEARCH_MATRIX_ROWS); - } - - /** - * Find correct col based on largest available chunk - */ - { - const Uint16 *arr= extP->m_free_page_count; - for(; col < EXTENT_SEARCH_MATRIX_COLS && * arr++ == 0; col++); - } - - /** - * NOTE - * - * If free space on extent is small or zero, - * col will be = EXTENT_SEARCH_MATRIX_COLS - * row will be = EXTENT_SEARCH_MATRIX_ROWS - * in that case pos will be col * row = max pos - * (as fixed by + 1 in declaration) - */ - Uint32 pos= (row * (mask + 1)) + (col & mask); - - assert(pos < EXTENT_SEARCH_MATRIX_SIZE); - return pos; -} - -void -Dbtup::update_extent_pos(Disk_alloc_info& alloc, - Ptr extentPtr) -{ -#ifdef VM_TRACE - Uint32 min_free = 0; - for(Uint32 i = 0; im_free_page_count[i]; - } - ddassert(extentPtr.p->m_free_space >= min_free); -#endif - - Uint32 old = extentPtr.p->m_free_matrix_pos; - if (old != RNIL) - { - Uint32 pos = alloc.calc_extent_pos(extentPtr.p); - if (old != pos) - { - jam(); - Local_extent_info_list old_list(c_extent_pool, alloc.m_free_extents[old]); - Local_extent_info_list new_list(c_extent_pool, alloc.m_free_extents[pos]); - old_list.remove(extentPtr); - new_list.add(extentPtr); - extentPtr.p->m_free_matrix_pos= pos; - } - } - else - { - ddassert(alloc.m_curr_extent_info_ptr_i == extentPtr.i); - } -} - -void -Dbtup::restart_setup_page(Disk_alloc_info& alloc, PagePtr pagePtr) -{ - jam(); - /** - * Link to extent, clear uncommitted_used_space - */ - pagePtr.p->uncommitted_used_space = 0; - pagePtr.p->m_restart_seq = globalData.m_restart_seq; - - Extent_info key; - key.m_key.m_file_no = pagePtr.p->m_file_no; - key.m_key.m_page_idx = pagePtr.p->m_extent_no; - Ptr extentPtr; - ndbrequire(c_extent_hash.find(extentPtr, key)); - pagePtr.p->m_extent_info_ptr = extentPtr.i; - - Uint32 idx = pagePtr.p->list_index & ~0x8000; - Uint32 estimated = alloc.calc_page_free_space(idx); - Uint32 real_free = pagePtr.p->free_space; - - ddassert(real_free >= estimated); - if (real_free != estimated) - { - jam(); - extentPtr.p->m_free_space += (real_free - estimated); - update_extent_pos(alloc, extentPtr); - } - -#ifdef VM_TRACE - { - Local_key page; - page.m_file_no = pagePtr.p->m_file_no; - page.m_page_no = pagePtr.p->m_page_no; - - Tablespace_client tsman(0, c_tsman, - 0, 0, 0); - unsigned uncommitted, committed; - uncommitted = committed = ~(unsigned)0; - (void) tsman.get_page_free_bits(&page, &uncommitted, &committed); - jamEntry(); - - idx = alloc.calc_page_free_bits(real_free); - ddassert(idx == committed); - } -#endif -} - -/** - * - Page free bits - - * 0 = 00 - free - 100% free - * 1 = 01 - atleast 70% free, 70= pct_free + 2 * (100 - pct_free) / 3 - * 2 = 10 - atleast 40% free, 40= pct_free + (100 - pct_free) / 3 - * 3 = 11 - full - less than pct_free% free, pct_free=10% - * - */ - -#define DBG_DISK 0 - -int -Dbtup::disk_page_prealloc(Signal* signal, - Ptr fragPtr, - Local_key* key, Uint32 sz) -{ - int err; - Uint32 i, ptrI; - Ptr req; - Fragrecord* fragPtrP = fragPtr.p; - Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; - Uint32 idx= alloc.calc_page_free_bits(sz); - Tablespace_client tsman(signal, c_tsman, - fragPtrP->fragTableId, - fragPtrP->fragmentId, - fragPtrP->m_tablespace_id); - - if (DBG_DISK) - ndbout << "disk_page_prealloc"; - - /** - * 1) search current dirty pages - */ - for(i= 0; i <= idx; i++) - { - if (!alloc.m_dirty_pages[i].isEmpty()) - { - ptrI= alloc.m_dirty_pages[i].firstItem; - Ptr gpage; - m_global_page_pool.getPtr(gpage, ptrI); - - PagePtr tmp; - tmp.i = gpage.i; - tmp.p = reinterpret_cast(gpage.p); - disk_page_prealloc_dirty_page(alloc, tmp, i, sz); - key->m_page_no= tmp.p->m_page_no; - key->m_file_no= tmp.p->m_file_no; - if (DBG_DISK) - ndbout << " found dirty page " << *key << endl; - jam(); - return 0; // Page in memory - } - } - - /** - * Search outanding page requests - * callback does not need to access page request again - * as it's not the first request to this page - */ - for(i= 0; i <= idx; i++) - { - if (!alloc.m_page_requests[i].isEmpty()) - { - ptrI= alloc.m_page_requests[i].firstItem; - Ptr req; - c_page_request_pool.getPtr(req, ptrI); - - disk_page_prealloc_transit_page(alloc, req, i, sz); - * key = req.p->m_key; - if (DBG_DISK) - ndbout << " found transit page " << *key << endl; - jam(); - return 0; - } - } - - /** - * We need to request a page... - */ - if (!c_page_request_pool.seize(req)) - { - jam(); - err= 1; - //XXX set error code - ndbout_c("no free request"); - return -err; - } - - req.p->m_ref_count= 1; - req.p->m_frag_ptr_i= fragPtr.i; - req.p->m_uncommitted_used_space= sz; - - int pageBits; // received - Ptr ext; - const Uint32 bits= alloc.calc_page_free_bits(sz); // required - bool found= false; - - /** - * Do we have a current extent - */ - if ((ext.i= alloc.m_curr_extent_info_ptr_i) != RNIL) - { - jam(); - c_extent_pool.getPtr(ext); - if ((pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits)) >= 0) - { - jamEntry(); - found= true; - } - else - { - jamEntry(); - /** - * The current extent is not in a free list - * and since it couldn't accomadate the request - * we put it on the free list - */ - alloc.m_curr_extent_info_ptr_i = RNIL; - Uint32 pos= alloc.calc_extent_pos(ext.p); - ext.p->m_free_matrix_pos = pos; - Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[pos]); - list.add(ext); - } - } - - if (!found) - { - Uint32 pos; - if ((pos= alloc.find_extent(sz)) != RNIL) - { - jam(); - Local_extent_info_list list(c_extent_pool, alloc.m_free_extents[pos]); - list.first(ext); - list.remove(ext); - } - else - { - jam(); - /** - * We need to alloc an extent - */ -#if NOT_YET_UNDO_ALLOC_EXTENT - Uint32 logfile_group_id = fragPtr.p->m_logfile_group_id; - - err = c_lgman->alloc_log_space(logfile_group_id, - sizeof(Disk_undo::AllocExtent)>>2); - jamEntry(); - if(unlikely(err)) - { - return -err; - } -#endif - - if (!c_extent_pool.seize(ext)) - { - jam(); - //XXX - err= 2; -#if NOT_YET_UNDO_ALLOC_EXTENT - c_lgman->free_log_space(logfile_group_id, - sizeof(Disk_undo::AllocExtent)>>2); -#endif - c_page_request_pool.release(req); - ndbout_c("no free extent info"); - return -err; - } - - if ((err= tsman.alloc_extent(&ext.p->m_key)) < 0) - { - jamEntry(); -#if NOT_YET_UNDO_ALLOC_EXTENT - c_lgman->free_log_space(logfile_group_id, - sizeof(Disk_undo::AllocExtent)>>2); -#endif - c_extent_pool.release(ext); - c_page_request_pool.release(req); - return err; - } - - int pages= err; -#if NOT_YET_UNDO_ALLOC_EXTENT - { - /** - * Do something here - */ - { - Callback cb; - cb.m_callbackData= ext.i; - cb.m_callbackFunction = - safe_cast(&Dbtup::disk_page_alloc_extent_log_buffer_callback); - Uint32 sz= sizeof(Disk_undo::AllocExtent)>>2; - - Logfile_client lgman(this, c_lgman, logfile_group_id); - int res= lgman.get_log_buffer(signal, sz, &cb); - switch(res){ - case 0: - break; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - default: - execute(signal, cb, res); - } - } - } -#endif - - ndbout << "allocated " << pages << " pages: " << ext.p->m_key << endl; - ext.p->m_first_page_no = ext.p->m_key.m_page_no; - bzero(ext.p->m_free_page_count, sizeof(ext.p->m_free_page_count)); - ext.p->m_free_space= alloc.m_page_free_bits_map[0] * pages; - ext.p->m_free_page_count[0]= pages; // All pages are "free"-est - c_extent_hash.add(ext); - - Local_fragment_extent_list list1(c_extent_pool, alloc.m_extent_list); - list1.add(ext); - } - - alloc.m_curr_extent_info_ptr_i= ext.i; - ext.p->m_free_matrix_pos= RNIL; - pageBits= tsman.alloc_page_from_extent(&ext.p->m_key, bits); - jamEntry(); - ddassert(pageBits >= 0); - } - - /** - * We have a page from an extent - */ - *key= req.p->m_key= ext.p->m_key; - - if (DBG_DISK) - ndbout << " allocated page " << *key << endl; - - /** - * We don't know exact free space of page - * but we know what page free bits it has. - * compute free space based on them - */ - Uint32 size= alloc.calc_page_free_space((Uint32)pageBits); - - ddassert(size >= sz); - Uint32 new_size = size - sz; // Subtract alloc rec - req.p->m_estimated_free_space= new_size; // Store on page request - - Uint32 newPageBits= alloc.calc_page_free_bits(new_size); - if (newPageBits != (Uint32)pageBits) - { - jam(); - ddassert(ext.p->m_free_page_count[pageBits] > 0); - ext.p->m_free_page_count[pageBits]--; - ext.p->m_free_page_count[newPageBits]++; - } - ddassert(ext.p->m_free_space >= sz); - ext.p->m_free_space -= sz; - - // And put page request in correct free list - idx= alloc.calc_page_free_bits(new_size); - { - Local_page_request_list list(c_page_request_pool, - alloc.m_page_requests[idx]); - - list.add(req); - } - req.p->m_list_index= idx; - req.p->m_extent_info_ptr= ext.i; - - Page_cache_client::Request preq; - preq.m_page = *key; - preq.m_callback.m_callbackData= req.i; - preq.m_callback.m_callbackFunction = - safe_cast(&Dbtup::disk_page_prealloc_callback); - - int flags= Page_cache_client::ALLOC_REQ; - if (pageBits == 0) - { - jam(); - //XXX empty page -> fast to map - flags |= Page_cache_client::EMPTY_PAGE; - preq.m_callback.m_callbackFunction = - safe_cast(&Dbtup::disk_page_prealloc_initial_callback); - } - - int res= m_pgman.get_page(signal, preq, flags); - jamEntry(); - switch(res) - { - case 0: - jam(); - break; - case -1: - ndbassert(false); - break; - default: - jam(); - execute(signal, preq.m_callback, res); // run callback - } - - return res; -} - -void -Dbtup::disk_page_prealloc_dirty_page(Disk_alloc_info & alloc, - PagePtr pagePtr, - Uint32 old_idx, Uint32 sz) -{ - jam(); - ddassert(pagePtr.p->list_index == old_idx); - - Uint32 free= pagePtr.p->free_space; - Uint32 used= pagePtr.p->uncommitted_used_space + sz; - Uint32 ext= pagePtr.p->m_extent_info_ptr; - - ddassert(free >= used); - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, ext); - - Uint32 new_idx= alloc.calc_page_free_bits(free - used); - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - - if (old_idx != new_idx) - { - jam(); - LocalDLList old_list(*pool, alloc.m_dirty_pages[old_idx]); - LocalDLList new_list(*pool, alloc.m_dirty_pages[new_idx]); - old_list.remove(pagePtr); - new_list.add(pagePtr); - - ddassert(extentPtr.p->m_free_page_count[old_idx]); - extentPtr.p->m_free_page_count[old_idx]--; - extentPtr.p->m_free_page_count[new_idx]++; - pagePtr.p->list_index= new_idx; - } - - pagePtr.p->uncommitted_used_space = used; - ddassert(extentPtr.p->m_free_space >= sz); - extentPtr.p->m_free_space -= sz; - update_extent_pos(alloc, extentPtr); -} - - -void -Dbtup::disk_page_prealloc_transit_page(Disk_alloc_info& alloc, - Ptr req, - Uint32 old_idx, Uint32 sz) -{ - jam(); - ddassert(req.p->m_list_index == old_idx); - - Uint32 free= req.p->m_estimated_free_space; - Uint32 used= req.p->m_uncommitted_used_space + sz; - Uint32 ext= req.p->m_extent_info_ptr; - - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, ext); - - ddassert(free >= sz); - Uint32 new_idx= alloc.calc_page_free_bits(free - sz); - - if (old_idx != new_idx) - { - jam(); - Page_request_list::Head *lists = alloc.m_page_requests; - Local_page_request_list old_list(c_page_request_pool, lists[old_idx]); - Local_page_request_list new_list(c_page_request_pool, lists[new_idx]); - old_list.remove(req); - new_list.add(req); - - ddassert(extentPtr.p->m_free_page_count[old_idx]); - extentPtr.p->m_free_page_count[old_idx]--; - extentPtr.p->m_free_page_count[new_idx]++; - req.p->m_list_index= new_idx; - } - - req.p->m_uncommitted_used_space = used; - req.p->m_estimated_free_space = free - sz; - ddassert(extentPtr.p->m_free_space >= sz); - extentPtr.p->m_free_space -= sz; - update_extent_pos(alloc, extentPtr); -} - - -void -Dbtup::disk_page_prealloc_callback(Signal* signal, - Uint32 page_request, Uint32 page_id) -{ - jamEntry(); - //ndbout_c("disk_alloc_page_callback id: %d", page_id); - - Ptr req; - c_page_request_pool.getPtr(req, page_request); - - Ptr gpage; - m_global_page_pool.getPtr(gpage, page_id); - - Ptr fragPtr; - fragPtr.i= req.p->m_frag_ptr_i; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - PagePtr pagePtr; - pagePtr.i = gpage.i; - pagePtr.p = reinterpret_cast(gpage.p); - - if (unlikely(pagePtr.p->m_restart_seq != globalData.m_restart_seq)) - { - restart_setup_page(fragPtr.p->m_disk_alloc_info, pagePtr); - } - - disk_page_prealloc_callback_common(signal, req, fragPtr, pagePtr); -} - -void -Dbtup::disk_page_prealloc_initial_callback(Signal*signal, - Uint32 page_request, - Uint32 page_id) -{ - jamEntry(); - //ndbout_c("disk_alloc_page_callback_initial id: %d", page_id); - /** - * 1) lookup page request - * 2) lookup page - * 3) lookup table - * 4) init page (according to page type) - * 5) call ordinary callback - */ - Ptr req; - c_page_request_pool.getPtr(req, page_request); - - Ptr gpage; - m_global_page_pool.getPtr(gpage, page_id); - PagePtr pagePtr; - pagePtr.i = gpage.i; - pagePtr.p = reinterpret_cast(gpage.p); - - Ptr fragPtr; - fragPtr.i= req.p->m_frag_ptr_i; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - Ptr tabPtr; - tabPtr.i = fragPtr.p->fragTableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, req.p->m_extent_info_ptr); - - pagePtr.p->m_page_no= req.p->m_key.m_page_no; - pagePtr.p->m_file_no= req.p->m_key.m_file_no; - pagePtr.p->m_table_id= fragPtr.p->fragTableId; - pagePtr.p->m_fragment_id = fragPtr.p->fragmentId; - pagePtr.p->m_extent_no = extentPtr.p->m_key.m_page_idx; // logical extent no - pagePtr.p->m_extent_info_ptr= req.p->m_extent_info_ptr; - pagePtr.p->m_restart_seq = globalData.m_restart_seq; - pagePtr.p->list_index = 0x8000; - pagePtr.p->uncommitted_used_space = 0; - pagePtr.p->nextList = pagePtr.p->prevList = RNIL; - - if (tabPtr.p->m_attributes[DD].m_no_of_varsize == 0) - { - convertThPage((Fix_page*)pagePtr.p, tabPtr.p, DD); - } - else - { - abort(); - } - disk_page_prealloc_callback_common(signal, req, fragPtr, pagePtr); -} - -void -Dbtup::disk_page_prealloc_callback_common(Signal* signal, - Ptr req, - Ptr fragPtr, - PagePtr pagePtr) -{ - /** - * 1) remove page request from Disk_alloc_info.m_page_requests - * 2) Add page to Disk_alloc_info.m_dirty_pages - * 3) register callback in pgman (unmap callback) - * 4) inform pgman about current users - */ - Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info; - ddassert((pagePtr.p->list_index & 0x8000) == 0x8000); - ddassert(pagePtr.p->m_extent_info_ptr == req.p->m_extent_info_ptr); - ddassert(pagePtr.p->m_page_no == req.p->m_key.m_page_no); - ddassert(pagePtr.p->m_file_no == req.p->m_key.m_file_no); - - Uint32 old_idx = req.p->m_list_index; - Uint32 free= req.p->m_estimated_free_space; - Uint32 ext = req.p->m_extent_info_ptr; - Uint32 used= req.p->m_uncommitted_used_space; - Uint32 real_free = pagePtr.p->free_space; - Uint32 real_used = used + pagePtr.p->uncommitted_used_space; - - ddassert(real_free >= free); - ddassert(real_free >= real_used); - ddassert(alloc.calc_page_free_bits(free) == old_idx); - Uint32 new_idx= alloc.calc_page_free_bits(real_free - real_used); - - /** - * Add to dirty pages - */ - ArrayPool *cheat_pool= (ArrayPool*)&m_global_page_pool; - LocalDLList list(* cheat_pool, alloc.m_dirty_pages[new_idx]); - list.add(pagePtr); - pagePtr.p->uncommitted_used_space = real_used; - pagePtr.p->list_index = new_idx; - - if (old_idx != new_idx || free != real_free) - { - jam(); - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, ext); - - extentPtr.p->m_free_space += (real_free - free); - - if (old_idx != new_idx) - { - jam(); - ddassert(extentPtr.p->m_free_page_count[old_idx]); - extentPtr.p->m_free_page_count[old_idx]--; - extentPtr.p->m_free_page_count[new_idx]++; - } - - update_extent_pos(alloc, extentPtr); - } - - { - Local_page_request_list list(c_page_request_pool, - alloc.m_page_requests[old_idx]); - list.release(req); - } -} - -void -Dbtup::disk_page_set_dirty(PagePtr pagePtr) -{ - jam(); - Uint32 idx = pagePtr.p->list_index; - if ((idx & 0x8000) == 0) - { - jam(); - /** - * Already in dirty list - */ - return ; - } - - Local_key key; - key.m_page_no = pagePtr.p->m_page_no; - key.m_file_no = pagePtr.p->m_file_no; - - pagePtr.p->nextList = pagePtr.p->prevList = RNIL; - - if (DBG_DISK) - ndbout << " disk_page_set_dirty " << key << endl; - - Ptr tabPtr; - tabPtr.i= pagePtr.p->m_table_id; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - Ptr fragPtr; - getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p); - - Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info; - - Uint32 free = pagePtr.p->free_space; - Uint32 used = pagePtr.p->uncommitted_used_space; - if (unlikely(pagePtr.p->m_restart_seq != globalData.m_restart_seq)) - { - restart_setup_page(alloc, pagePtr); - idx = alloc.calc_page_free_bits(free); - used = 0; - } - else - { - idx &= ~0x8000; - ddassert(idx == alloc.calc_page_free_bits(free - used)); - } - - ddassert(free >= used); - - Tablespace_client tsman(0, c_tsman, - fragPtr.p->fragTableId, - fragPtr.p->fragmentId, - fragPtr.p->m_tablespace_id); - - pagePtr.p->list_index = idx; - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - LocalDLList list(*pool, alloc.m_dirty_pages[idx]); - list.add(pagePtr); - - // Make sure no one will allocate it... - tsman.unmap_page(&key, MAX_FREE_LIST - 1); - jamEntry(); -} - -void -Dbtup::disk_page_unmap_callback(Uint32 when, - Uint32 page_id, Uint32 dirty_count) -{ - jamEntry(); - Ptr gpage; - m_global_page_pool.getPtr(gpage, page_id); - PagePtr pagePtr; - pagePtr.i = gpage.i; - pagePtr.p = reinterpret_cast(gpage.p); - - Uint32 type = pagePtr.p->m_page_header.m_page_type; - if (unlikely((type != File_formats::PT_Tup_fixsize_page && - type != File_formats::PT_Tup_varsize_page) || - f_undo_done == false)) - { - jam(); - return ; - } - - Uint32 idx = pagePtr.p->list_index; - - Ptr tabPtr; - tabPtr.i= pagePtr.p->m_table_id; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - Ptr fragPtr; - getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p); - - Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info; - - if (when == 0) - { - /** - * Before pageout - */ - jam(); - - if (DBG_DISK) - { - Local_key key; - key.m_page_no = pagePtr.p->m_page_no; - key.m_file_no = pagePtr.p->m_file_no; - ndbout << "disk_page_unmap_callback(before) " << key - << " cnt: " << dirty_count << " " << (idx & ~0x8000) << endl; - } - - ndbassert((idx & 0x8000) == 0); - - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - LocalDLList list(*pool, alloc.m_dirty_pages[idx]); - LocalDLList list2(*pool, alloc.m_unmap_pages); - list.remove(pagePtr); - list2.add(pagePtr); - - if (dirty_count == 0) - { - jam(); - pagePtr.p->list_index = idx | 0x8000; - - Local_key key; - key.m_page_no = pagePtr.p->m_page_no; - key.m_file_no = pagePtr.p->m_file_no; - - Uint32 free = pagePtr.p->free_space; - Uint32 used = pagePtr.p->uncommitted_used_space; - ddassert(free >= used); - ddassert(alloc.calc_page_free_bits(free - used) == idx); - - Tablespace_client tsman(0, c_tsman, - fragPtr.p->fragTableId, - fragPtr.p->fragmentId, - fragPtr.p->m_tablespace_id); - - tsman.unmap_page(&key, idx); - jamEntry(); - } - } - else if (when == 1) - { - /** - * After page out - */ - jam(); - - Local_key key; - key.m_page_no = pagePtr.p->m_page_no; - key.m_file_no = pagePtr.p->m_file_no; - Uint32 real_free = pagePtr.p->free_space; - - if (DBG_DISK) - { - ndbout << "disk_page_unmap_callback(after) " << key - << " cnt: " << dirty_count << " " << (idx & ~0x8000) << endl; - } - - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - LocalDLList list(*pool, alloc.m_unmap_pages); - list.remove(pagePtr); - - Tablespace_client tsman(0, c_tsman, - fragPtr.p->fragTableId, - fragPtr.p->fragmentId, - fragPtr.p->m_tablespace_id); - - if (DBG_DISK && alloc.calc_page_free_bits(real_free) != (idx & ~0x8000)) - { - ndbout << key - << " calc: " << alloc.calc_page_free_bits(real_free) - << " idx: " << (idx & ~0x8000) - << endl; - } - tsman.update_page_free_bits(&key, alloc.calc_page_free_bits(real_free)); - jamEntry(); - } -} - -void -Dbtup::disk_page_alloc(Signal* signal, - Tablerec* tabPtrP, Fragrecord* fragPtrP, - Local_key* key, PagePtr pagePtr, Uint32 gci) -{ - jam(); - Uint32 logfile_group_id= fragPtrP->m_logfile_group_id; - Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; - - Uint64 lsn; - if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0) - { - ddassert(pagePtr.p->uncommitted_used_space > 0); - pagePtr.p->uncommitted_used_space--; - key->m_page_idx= ((Fix_page*)pagePtr.p)->alloc_record(); - lsn= disk_page_undo_alloc(pagePtr.p, key, 1, gci, logfile_group_id); - } - else - { - Uint32 sz= key->m_page_idx; - ddassert(pagePtr.p->uncommitted_used_space >= sz); - pagePtr.p->uncommitted_used_space -= sz; - key->m_page_idx= ((Var_page*)pagePtr.p)-> - alloc_record(sz, (Var_page*)ctemp_page, 0); - - lsn= disk_page_undo_alloc(pagePtr.p, key, sz, gci, logfile_group_id); - } -} - -void -Dbtup::disk_page_free(Signal *signal, - Tablerec *tabPtrP, Fragrecord * fragPtrP, - Local_key* key, PagePtr pagePtr, Uint32 gci) -{ - jam(); - if (DBG_DISK) - ndbout << " disk_page_free " << *key << endl; - - Uint32 page_idx= key->m_page_idx; - Uint32 logfile_group_id= fragPtrP->m_logfile_group_id; - Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; - Uint32 old_free= pagePtr.p->free_space; - - Uint32 sz; - Uint64 lsn; - if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0) - { - sz = 1; - const Uint32 *src= ((Fix_page*)pagePtr.p)->get_ptr(page_idx, 0); - ndbassert(* (src + 1) != Tup_fixsize_page::FREE_RECORD); - lsn= disk_page_undo_free(pagePtr.p, key, - src, tabPtrP->m_offsets[DD].m_fix_header_size, - gci, logfile_group_id); - - ((Fix_page*)pagePtr.p)->free_record(page_idx); - } - else - { - const Uint32 *src= ((Var_page*)pagePtr.p)->get_ptr(page_idx); - sz= ((Var_page*)pagePtr.p)->get_entry_len(page_idx); - lsn= disk_page_undo_free(pagePtr.p, key, - src, sz, - gci, logfile_group_id); - - ((Var_page*)pagePtr.p)->free_record(page_idx, 0); - } - - Uint32 new_free = pagePtr.p->free_space; - - Uint32 ext = pagePtr.p->m_extent_info_ptr; - Uint32 used = pagePtr.p->uncommitted_used_space; - Uint32 old_idx = pagePtr.p->list_index; - ddassert(old_free >= used); - ddassert(new_free >= used); - ddassert(new_free >= old_free); - ddassert((old_idx & 0x8000) == 0); - - Uint32 new_idx = alloc.calc_page_free_bits(new_free - used); - ddassert(alloc.calc_page_free_bits(old_free - used) == old_idx); - - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, ext); - - if (old_idx != new_idx) - { - jam(); - ddassert(extentPtr.p->m_free_page_count[old_idx]); - extentPtr.p->m_free_page_count[old_idx]--; - extentPtr.p->m_free_page_count[new_idx]++; - - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - LocalDLList new_list(*pool, alloc.m_dirty_pages[new_idx]); - LocalDLList old_list(*pool, alloc.m_dirty_pages[old_idx]); - old_list.remove(pagePtr); - new_list.add(pagePtr); - pagePtr.p->list_index = new_idx; - } - - extentPtr.p->m_free_space += sz; - update_extent_pos(alloc, extentPtr); -#if NOT_YET_FREE_EXTENT - if (check_free(extentPtr.p) == 0) - { - ndbout_c("free: extent is free"); - } -#endif -} - -void -Dbtup::disk_page_abort_prealloc(Signal *signal, Fragrecord* fragPtrP, - Local_key* key, Uint32 sz) -{ - jam(); - Page_cache_client::Request req; - req.m_callback.m_callbackData= sz; - req.m_callback.m_callbackFunction = - safe_cast(&Dbtup::disk_page_abort_prealloc_callback); - - int flags= Page_cache_client::DIRTY_REQ; - memcpy(&req.m_page, key, sizeof(Local_key)); - - int res= m_pgman.get_page(signal, req, flags); - jamEntry(); - switch(res) - { - case 0: - jam(); - break; - case -1: - ndbrequire(false); - break; - default: - jam(); - Ptr gpage; - m_global_page_pool.getPtr(gpage, (Uint32)res); - PagePtr pagePtr; - pagePtr.i = gpage.i; - pagePtr.p = reinterpret_cast(gpage.p); - - disk_page_abort_prealloc_callback_1(signal, fragPtrP, pagePtr, sz); - } -} - -void -Dbtup::disk_page_abort_prealloc_callback(Signal* signal, - Uint32 sz, Uint32 page_id) -{ - //ndbout_c("disk_alloc_page_callback id: %d", page_id); - jamEntry(); - Ptr gpage; - m_global_page_pool.getPtr(gpage, page_id); - - PagePtr pagePtr; - pagePtr.i = gpage.i; - pagePtr.p = reinterpret_cast(gpage.p); - - Ptr tabPtr; - tabPtr.i= pagePtr.p->m_table_id; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - Ptr fragPtr; - getFragmentrec(fragPtr, pagePtr.p->m_fragment_id, tabPtr.p); - - disk_page_abort_prealloc_callback_1(signal, fragPtr.p, pagePtr, sz); -} - -void -Dbtup::disk_page_abort_prealloc_callback_1(Signal* signal, - Fragrecord* fragPtrP, - PagePtr pagePtr, - Uint32 sz) -{ - jam(); - disk_page_set_dirty(pagePtr); - - Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; - Uint32 page_idx = pagePtr.p->list_index; - Uint32 used = pagePtr.p->uncommitted_used_space; - Uint32 free = pagePtr.p->free_space; - Uint32 ext = pagePtr.p->m_extent_info_ptr; - - Uint32 old_idx = page_idx & 0x7FFF; - ddassert(free >= used); - ddassert(used >= sz); - ddassert(alloc.calc_page_free_bits(free - used) == old_idx); - Uint32 new_idx = alloc.calc_page_free_bits(free - used + sz); - - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, ext); - if (old_idx != new_idx) - { - jam(); - ddassert(extentPtr.p->m_free_page_count[old_idx]); - extentPtr.p->m_free_page_count[old_idx]--; - extentPtr.p->m_free_page_count[new_idx]++; - - if (old_idx == page_idx) - { - jam(); - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - LocalDLList old_list(*pool, alloc.m_dirty_pages[old_idx]); - LocalDLList new_list(*pool, alloc.m_dirty_pages[new_idx]); - old_list.remove(pagePtr); - new_list.add(pagePtr); - pagePtr.p->list_index = new_idx; - } - else - { - jam(); - pagePtr.p->list_index = new_idx | 0x8000; - } - } - - pagePtr.p->uncommitted_used_space = used - sz; - - extentPtr.p->m_free_space += sz; - update_extent_pos(alloc, extentPtr); -#if NOT_YET_FREE_EXTENT - if (check_free(extentPtr.p) == 0) - { - ndbout_c("abort: extent is free"); - } -#endif -} - -#if NOT_YET_UNDO_ALLOC_EXTENT -void -Dbtup::disk_page_alloc_extent_log_buffer_callback(Signal* signal, - Uint32 extentPtrI, - Uint32 unused) -{ - Ptr extentPtr; - c_extent_pool.getPtr(extentPtr, extentPtrI); - - Local_key key = extentPtr.p->m_key; - Tablespace_client2 tsman(signal, c_tsman, &key); - - Ptr tabPtr; - tabPtr.i= tsman.m_table_id; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - Ptr fragPtr; - getFragmentrec(fragPtr, tsman.m_fragment_id, tabPtr.p); - - Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id); - - Disk_undo::AllocExtent alloc; - alloc.m_table = tabPtr.i; - alloc.m_fragment = tsman.m_fragment_id; - alloc.m_page_no = key.m_page_no; - alloc.m_file_no = key.m_file_no; - alloc.m_type_length = (Disk_undo::UNDO_ALLOC_EXTENT<<16)|(sizeof(alloc)>> 2); - - Logfile_client::Change c[1] = {{ &alloc, sizeof(alloc) >> 2 } }; - - Uint64 lsn= lgman.add_entry(c, 1); - - tsman.update_lsn(&key, lsn); - jamEntry(); -} -#endif - -Uint64 -Dbtup::disk_page_undo_alloc(Page* page, const Local_key* key, - Uint32 sz, Uint32 gci, Uint32 logfile_group_id) -{ - jam(); - Logfile_client lgman(this, c_lgman, logfile_group_id); - - Disk_undo::Alloc alloc; - alloc.m_type_length= (Disk_undo::UNDO_ALLOC << 16) | (sizeof(alloc) >> 2); - alloc.m_page_no = key->m_page_no; - alloc.m_file_no_page_idx= key->m_file_no << 16 | key->m_page_idx; - - Logfile_client::Change c[1] = {{ &alloc, sizeof(alloc) >> 2 } }; - - Uint64 lsn= lgman.add_entry(c, 1); - m_pgman.update_lsn(* key, lsn); - jamEntry(); - - return lsn; -} - -Uint64 -Dbtup::disk_page_undo_update(Page* page, const Local_key* key, - const Uint32* src, Uint32 sz, - Uint32 gci, Uint32 logfile_group_id) -{ - jam(); - Logfile_client lgman(this, c_lgman, logfile_group_id); - - Disk_undo::Update update; - update.m_page_no = key->m_page_no; - update.m_file_no_page_idx= key->m_file_no << 16 | key->m_page_idx; - update.m_gci= gci; - - update.m_type_length= - (Disk_undo::UNDO_UPDATE << 16) | (sz + (sizeof(update) >> 2) - 1); - - Logfile_client::Change c[3] = { - { &update, 3 }, - { src, sz }, - { &update.m_type_length, 1 } - }; - - ndbassert(4*(3 + sz + 1) == (sizeof(update) + 4*sz - 4)); - - Uint64 lsn= lgman.add_entry(c, 3); - m_pgman.update_lsn(* key, lsn); - jamEntry(); - - return lsn; -} - -Uint64 -Dbtup::disk_page_undo_free(Page* page, const Local_key* key, - const Uint32* src, Uint32 sz, - Uint32 gci, Uint32 logfile_group_id) -{ - jam(); - Logfile_client lgman(this, c_lgman, logfile_group_id); - - Disk_undo::Free free; - free.m_page_no = key->m_page_no; - free.m_file_no_page_idx= key->m_file_no << 16 | key->m_page_idx; - free.m_gci= gci; - - free.m_type_length= - (Disk_undo::UNDO_FREE << 16) | (sz + (sizeof(free) >> 2) - 1); - - Logfile_client::Change c[3] = { - { &free, 3 }, - { src, sz }, - { &free.m_type_length, 1 } - }; - - ndbassert(4*(3 + sz + 1) == (sizeof(free) + 4*sz - 4)); - - Uint64 lsn= lgman.add_entry(c, 3); - m_pgman.update_lsn(* key, lsn); - jamEntry(); - - return lsn; -} - -#include - -static Dbtup::Apply_undo f_undo; - -#define DBG_UNDO 0 - -void -Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn, - Uint32 type, const Uint32 * ptr, Uint32 len) -{ - f_undo_done = false; - f_undo.m_lsn= lsn; - f_undo.m_ptr= ptr; - f_undo.m_len= len; - f_undo.m_type = type; - - Page_cache_client::Request preq; - switch(f_undo.m_type){ - case File_formats::Undofile::UNDO_LCP_FIRST: - case File_formats::Undofile::UNDO_LCP: - { - jam(); - ndbrequire(len == 3); - Uint32 lcp = ptr[0]; - Uint32 tableId = ptr[1] >> 16; - Uint32 fragId = ptr[1] & 0xFFFF; - disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_LCP, lcp); - disk_restart_undo_next(signal); - - if (DBG_UNDO) - { - ndbout_c("UNDO LCP %u (%u, %u)", lcp, tableId, fragId); - } - return; - } - case File_formats::Undofile::UNDO_TUP_ALLOC: - { - jam(); - Disk_undo::Alloc* rec= (Disk_undo::Alloc*)ptr; - preq.m_page.m_page_no = rec->m_page_no; - preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16; - preq.m_page.m_page_idx = rec->m_file_no_page_idx & 0xFFFF; - break; - } - case File_formats::Undofile::UNDO_TUP_UPDATE: - { - jam(); - Disk_undo::Update* rec= (Disk_undo::Update*)ptr; - preq.m_page.m_page_no = rec->m_page_no; - preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16; - preq.m_page.m_page_idx = rec->m_file_no_page_idx & 0xFFFF; - break; - } - case File_formats::Undofile::UNDO_TUP_FREE: - { - jam(); - Disk_undo::Free* rec= (Disk_undo::Free*)ptr; - preq.m_page.m_page_no = rec->m_page_no; - preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16; - preq.m_page.m_page_idx = rec->m_file_no_page_idx & 0xFFFF; - break; - } - case File_formats::Undofile::UNDO_TUP_CREATE: - /** - * - */ - { - jam(); - Disk_undo::Create* rec= (Disk_undo::Create*)ptr; - Ptr tabPtr; - tabPtr.i= rec->m_table; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - for(Uint32 i = 0; ifragrec[i] != RNIL) - disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i], - Fragrecord::UC_CREATE, 0); - disk_restart_undo_next(signal); - - if (DBG_UNDO) - { - ndbout_c("UNDO CREATE (%u)", tabPtr.i); - } - return; - } - case File_formats::Undofile::UNDO_TUP_DROP: - { - jam(); - Disk_undo::Drop* rec = (Disk_undo::Drop*)ptr; - Ptr tabPtr; - tabPtr.i= rec->m_table; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - for(Uint32 i = 0; ifragrec[i] != RNIL) - disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i], - Fragrecord::UC_CREATE, 0); - disk_restart_undo_next(signal); - - if (DBG_UNDO) - { - ndbout_c("UNDO DROP (%u)", tabPtr.i); - } - return; - } - case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT: - jam(); - case File_formats::Undofile::UNDO_TUP_FREE_EXTENT: - jam(); - disk_restart_undo_next(signal); - return; - - case File_formats::Undofile::UNDO_END: - jam(); - f_undo_done = true; - return; - default: - ndbrequire(false); - } - - f_undo.m_key = preq.m_page; - preq.m_callback.m_callbackFunction = - safe_cast(&Dbtup::disk_restart_undo_callback); - - int flags = 0; - int res= m_pgman.get_page(signal, preq, flags); - jamEntry(); - switch(res) - { - case 0: - break; // Wait for callback - case -1: - ndbrequire(false); - break; - default: - execute(signal, preq.m_callback, res); // run callback - } -} - -void -Dbtup::disk_restart_undo_next(Signal* signal) -{ - signal->theData[0] = LgmanContinueB::EXECUTE_UNDO_RECORD; - sendSignal(LGMAN_REF, GSN_CONTINUEB, signal, 1, JBB); -} - -void -Dbtup::disk_restart_lcp_id(Uint32 tableId, Uint32 fragId, Uint32 lcpId) -{ - jamEntry(); - - if (lcpId == RNIL) - { - disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE, 0); - if (DBG_UNDO) - { - ndbout_c("mark_no_lcp (%u, %u)", tableId, fragId); - } - } - else - { - disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_SET_LCP, lcpId); - if (DBG_UNDO) - { - ndbout_c("mark_no_lcp (%u, %u)", tableId, fragId); - } - - } -} - -void -Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag, - Uint32 lcpId) -{ - Ptr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - if (tabPtr.p->tableStatus == DEFINED) - { - jam(); - FragrecordPtr fragPtr; - getFragmentrec(fragPtr, fragId, tabPtr.p); - if (!fragPtr.isNull()) - { - jam(); - switch(flag){ - case Fragrecord::UC_CREATE: - jam(); - fragPtr.p->m_undo_complete |= flag; - return; - case Fragrecord::UC_LCP: - jam(); - if (fragPtr.p->m_undo_complete == 0 && - fragPtr.p->m_restore_lcp_id == lcpId) - { - jam(); - fragPtr.p->m_undo_complete |= flag; - if (DBG_UNDO) - ndbout_c("table: %u fragment: %u lcp: %u -> done", - tableId, fragId, lcpId); - } - return; - case Fragrecord::UC_SET_LCP: - { - jam(); - if (DBG_UNDO) - ndbout_c("table: %u fragment: %u restore to lcp: %u", - tableId, fragId, lcpId); - ndbrequire(fragPtr.p->m_undo_complete == 0); - ndbrequire(fragPtr.p->m_restore_lcp_id == RNIL); - fragPtr.p->m_restore_lcp_id = lcpId; - return; - } - } - jamLine(flag); - ndbrequire(false); - } - } -} - -void -Dbtup::disk_restart_undo_callback(Signal* signal, - Uint32 id, - Uint32 page_id) -{ - jamEntry(); - Ptr gpage; - m_global_page_pool.getPtr(gpage, page_id); - PagePtr pagePtr; - pagePtr.i = gpage.i; - pagePtr.p = reinterpret_cast(gpage.p); - - Apply_undo* undo = &f_undo; - - bool update = false; - if (! (pagePtr.p->list_index & 0x8000) || - pagePtr.p->nextList != RNIL || - pagePtr.p->prevList != RNIL) - { - jam(); - update = true; - pagePtr.p->list_index |= 0x8000; - pagePtr.p->nextList = pagePtr.p->prevList = RNIL; - } - - Uint32 tableId= pagePtr.p->m_table_id; - Uint32 fragId = pagePtr.p->m_fragment_id; - - if (tableId >= cnoOfTablerec) - { - jam(); - if (DBG_UNDO) - ndbout_c("UNDO table> %u", tableId); - disk_restart_undo_next(signal); - return; - } - undo->m_table_ptr.i = tableId; - ptrCheckGuard(undo->m_table_ptr, cnoOfTablerec, tablerec); - - if (undo->m_table_ptr.p->tableStatus != DEFINED) - { - jam(); - if (DBG_UNDO) - ndbout_c("UNDO !defined (%u) ", tableId); - disk_restart_undo_next(signal); - return; - } - - getFragmentrec(undo->m_fragment_ptr, fragId, undo->m_table_ptr.p); - if(undo->m_fragment_ptr.isNull()) - { - jam(); - if (DBG_UNDO) - ndbout_c("UNDO fragment null %u/%u", tableId, fragId); - disk_restart_undo_next(signal); - return; - } - - if (undo->m_fragment_ptr.p->m_undo_complete) - { - jam(); - if (DBG_UNDO) - ndbout_c("UNDO undo complete %u/%u", tableId, fragId); - disk_restart_undo_next(signal); - return; - } - - Local_key key = undo->m_key; -// key.m_page_no = pagePtr.p->m_page_no; -// key.m_file_no = pagePtr.p->m_file_no; - - Uint64 lsn = 0; - lsn += pagePtr.p->m_page_header.m_page_lsn_hi; lsn <<= 32; - lsn += pagePtr.p->m_page_header.m_page_lsn_lo; - - undo->m_page_ptr = pagePtr; - - if (undo->m_lsn <= lsn) - { - jam(); - if (DBG_UNDO) - { - ndbout << "apply: " << undo->m_lsn << "(" << lsn << " )" - << key << " type: " << undo->m_type << endl; - } - - update = true; - if (DBG_UNDO) - ndbout_c("applying %lld", undo->m_lsn); - /** - * Apply undo record - */ - switch(undo->m_type){ - case File_formats::Undofile::UNDO_TUP_ALLOC: - jam(); - disk_restart_undo_alloc(undo); - break; - case File_formats::Undofile::UNDO_TUP_UPDATE: - jam(); - disk_restart_undo_update(undo); - break; - case File_formats::Undofile::UNDO_TUP_FREE: - jam(); - disk_restart_undo_free(undo); - break; - default: - ndbrequire(false); - } - - if (DBG_UNDO) - ndbout << "disk_restart_undo: " << undo->m_type << " " - << undo->m_key << endl; - - lsn = undo->m_lsn - 1; // make sure undo isn't run again... - - m_pgman.update_lsn(undo->m_key, lsn); - jamEntry(); - - disk_restart_undo_page_bits(signal, undo); - } - else if (DBG_UNDO) - { - jam(); - ndbout << "ignore: " << undo->m_lsn << "(" << lsn << " )" - << key << " type: " << undo->m_type - << " tab: " << tableId << endl; - } - - disk_restart_undo_next(signal); -} - -void -Dbtup::disk_restart_undo_alloc(Apply_undo* undo) -{ - ndbassert(undo->m_page_ptr.p->m_file_no == undo->m_key.m_file_no); - ndbassert(undo->m_page_ptr.p->m_page_no == undo->m_key.m_page_no); - if (undo->m_table_ptr.p->m_attributes[DD].m_no_of_varsize == 0) - { - ((Fix_page*)undo->m_page_ptr.p)->free_record(undo->m_key.m_page_idx); - } - else - ((Var_page*)undo->m_page_ptr.p)->free_record(undo->m_key.m_page_idx, 0); -} - -void -Dbtup::disk_restart_undo_update(Apply_undo* undo) -{ - Uint32* ptr; - Uint32 len= undo->m_len - 4; - if (undo->m_table_ptr.p->m_attributes[DD].m_no_of_varsize == 0) - { - ptr= ((Fix_page*)undo->m_page_ptr.p)->get_ptr(undo->m_key.m_page_idx, len); - ndbrequire(len == undo->m_table_ptr.p->m_offsets[DD].m_fix_header_size); - } - else - { - ptr= ((Var_page*)undo->m_page_ptr.p)->get_ptr(undo->m_key.m_page_idx); - abort(); - } - - const Disk_undo::Update *update = (const Disk_undo::Update*)undo->m_ptr; - const Uint32* src= update->m_data; - memcpy(ptr, src, 4 * len); -} - -void -Dbtup::disk_restart_undo_free(Apply_undo* undo) -{ - Uint32* ptr, idx = undo->m_key.m_page_idx; - Uint32 len= undo->m_len - 4; - if (undo->m_table_ptr.p->m_attributes[DD].m_no_of_varsize == 0) - { - ndbrequire(len == undo->m_table_ptr.p->m_offsets[DD].m_fix_header_size); - idx= ((Fix_page*)undo->m_page_ptr.p)->alloc_record(idx); - ptr= ((Fix_page*)undo->m_page_ptr.p)->get_ptr(idx, len); - } - else - { - abort(); - } - - ndbrequire(idx == undo->m_key.m_page_idx); - const Disk_undo::Free *free = (const Disk_undo::Free*)undo->m_ptr; - const Uint32* src= free->m_data; - memcpy(ptr, src, 4 * len); -} - -void -Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo) -{ - Fragrecord* fragPtrP = undo->m_fragment_ptr.p; - Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info; - - /** - * Set alloc.m_curr_extent_info_ptr_i to - * current this extent (and move old extend into free matrix) - */ - Page* pageP = undo->m_page_ptr.p; - Uint32 free = pageP->free_space; - Uint32 new_bits = alloc.calc_page_free_bits(free); - pageP->list_index = 0x8000 | new_bits; - - Tablespace_client tsman(signal, c_tsman, - fragPtrP->fragTableId, - fragPtrP->fragmentId, - fragPtrP->m_tablespace_id); - - tsman.restart_undo_page_free_bits(&undo->m_key, new_bits); - jamEntry(); -} - -int -Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId, - const Local_key* key, Uint32 pages) -{ - TablerecPtr tabPtr; - FragrecordPtr fragPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - if (tabPtr.p->tableStatus == DEFINED) - { - getFragmentrec(fragPtr, fragId, tabPtr.p); - if (fragPtr.p->m_undo_complete & Fragrecord::UC_CREATE) - { - jam(); - return -1; - } - - if (!fragPtr.isNull()) - { - Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info; - - Ptr ext; - ndbrequire(c_extent_pool.seize(ext)); - - ndbout << "allocated " << pages << " pages: " << *key << endl; - - ext.p->m_key = *key; - ext.p->m_first_page_no = ext.p->m_key.m_page_no; - ext.p->m_free_space= 0; - bzero(ext.p->m_free_page_count, sizeof(ext.p->m_free_page_count)); - - if (alloc.m_curr_extent_info_ptr_i != RNIL) - { - jam(); - Ptr old; - c_extent_pool.getPtr(old, alloc.m_curr_extent_info_ptr_i); - ndbassert(old.p->m_free_matrix_pos == RNIL); - Uint32 pos= alloc.calc_extent_pos(old.p); - Local_extent_info_list new_list(c_extent_pool, alloc.m_free_extents[pos]); - new_list.add(old); - old.p->m_free_matrix_pos= pos; - } - - alloc.m_curr_extent_info_ptr_i = ext.i; - ext.p->m_free_matrix_pos = RNIL; - c_extent_hash.add(ext); - - Local_fragment_extent_list list1(c_extent_pool, alloc.m_extent_list); - list1.add(ext); - return 0; - } - } - - return -1; -} - -void -Dbtup::disk_restart_page_bits(Uint32 tableId, Uint32 fragId, - const Local_key*, Uint32 bits) -{ - jam(); - TablerecPtr tabPtr; - FragrecordPtr fragPtr; - tabPtr.i = tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - getFragmentrec(fragPtr, fragId, tabPtr.p); - Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info; - - Ptr ext; - c_extent_pool.getPtr(ext, alloc.m_curr_extent_info_ptr_i); - - Uint32 size= alloc.calc_page_free_space(bits); - - ext.p->m_free_space += size; - ext.p->m_free_page_count[bits]++; - ndbassert(ext.p->m_free_matrix_pos == RNIL); -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp deleted file mode 100644 index d7d7f20bbc7..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ /dev/null @@ -1,3205 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#include -#include "Dbtup.hpp" -#include -#include -#include -#include -#include "AttributeOffset.hpp" -#include -#include -#include -#include -#include - -/* ----------------------------------------------------------------- */ -/* ----------- INIT_STORED_OPERATIONREC -------------- */ -/* ----------------------------------------------------------------- */ -int Dbtup::initStoredOperationrec(Operationrec* regOperPtr, - KeyReqStruct* req_struct, - Uint32 storedId) -{ - jam(); - StoredProcPtr storedPtr; - c_storedProcPool.getPtr(storedPtr, storedId); - if (storedPtr.i != RNIL) { - if (storedPtr.p->storedCode == ZSCAN_PROCEDURE) { - storedPtr.p->storedCounter++; - regOperPtr->firstAttrinbufrec= storedPtr.p->storedLinkFirst; - regOperPtr->lastAttrinbufrec= storedPtr.p->storedLinkLast; - regOperPtr->currentAttrinbufLen= storedPtr.p->storedProcLength; - req_struct->attrinfo_len= storedPtr.p->storedProcLength; - return ZOK; - } - } - terrorCode= ZSTORED_PROC_ID_ERROR; - return terrorCode; -} - -void Dbtup::copyAttrinfo(Operationrec * regOperPtr, - Uint32* inBuffer) -{ - AttrbufrecPtr copyAttrBufPtr; - Uint32 RnoOfAttrBufrec= cnoOfAttrbufrec; - int RbufLen; - Uint32 RinBufIndex= 0; - Uint32 Rnext; - Uint32 Rfirst; - Uint32 TstoredProcedure= (regOperPtr->storedProcedureId != ZNIL); - Uint32 RnoFree= cnoFreeAttrbufrec; - -//------------------------------------------------------------------------- -// As a prelude to the execution of the TUPKEYREQ we will copy the program -// into the inBuffer to enable easy execution without any complex jumping -// between the buffers. In particular this will make the interpreter less -// complex. Hopefully it does also improve performance. -//------------------------------------------------------------------------- - copyAttrBufPtr.i= regOperPtr->firstAttrinbufrec; - while (copyAttrBufPtr.i != RNIL) { - jam(); - ndbrequire(copyAttrBufPtr.i < RnoOfAttrBufrec); - ptrAss(copyAttrBufPtr, attrbufrec); - RbufLen = copyAttrBufPtr.p->attrbuf[ZBUF_DATA_LEN]; - Rnext = copyAttrBufPtr.p->attrbuf[ZBUF_NEXT]; - Rfirst = cfirstfreeAttrbufrec; - /* - * ATTRINFO comes from 2 mutually exclusive places: - * 1) TUPKEYREQ (also interpreted part) - * 2) STORED_PROCREQ before scan start - * Assert here that both have a check for overflow. - * The "<" instead of "<=" is intentional. - */ - ndbrequire(RinBufIndex + RbufLen < ZATTR_BUFFER_SIZE); - MEMCOPY_NO_WORDS(&inBuffer[RinBufIndex], - ©AttrBufPtr.p->attrbuf[0], - RbufLen); - RinBufIndex += RbufLen; - if (!TstoredProcedure) { - copyAttrBufPtr.p->attrbuf[ZBUF_NEXT]= Rfirst; - cfirstfreeAttrbufrec= copyAttrBufPtr.i; - RnoFree++; - } - copyAttrBufPtr.i= Rnext; - } - cnoFreeAttrbufrec= RnoFree; - if (TstoredProcedure) { - jam(); - StoredProcPtr storedPtr; - c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcedureId); - ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); - storedPtr.p->storedCounter--; - } - // Release the ATTRINFO buffers - regOperPtr->storedProcedureId= RNIL; - regOperPtr->firstAttrinbufrec= RNIL; - regOperPtr->lastAttrinbufrec= RNIL; - regOperPtr->m_any_value= 0; -} - -void Dbtup::handleATTRINFOforTUPKEYREQ(Signal* signal, - const Uint32 *data, - Uint32 len, - Operationrec * regOperPtr) -{ - while(len) - { - Uint32 length = len > AttrInfo::DataLength ? AttrInfo::DataLength : len; - - AttrbufrecPtr TAttrinbufptr; - TAttrinbufptr.i= cfirstfreeAttrbufrec; - if ((cfirstfreeAttrbufrec < cnoOfAttrbufrec) && - (cnoFreeAttrbufrec > MIN_ATTRBUF)) { - ptrAss(TAttrinbufptr, attrbufrec); - MEMCOPY_NO_WORDS(&TAttrinbufptr.p->attrbuf[0], - data, - length); - Uint32 RnoFree= cnoFreeAttrbufrec; - Uint32 Rnext= TAttrinbufptr.p->attrbuf[ZBUF_NEXT]; - TAttrinbufptr.p->attrbuf[ZBUF_DATA_LEN]= length; - TAttrinbufptr.p->attrbuf[ZBUF_NEXT]= RNIL; - - AttrbufrecPtr locAttrinbufptr; - Uint32 RnewLen= regOperPtr->currentAttrinbufLen; - - locAttrinbufptr.i= regOperPtr->lastAttrinbufrec; - cfirstfreeAttrbufrec= Rnext; - cnoFreeAttrbufrec= RnoFree - 1; - RnewLen += length; - regOperPtr->lastAttrinbufrec= TAttrinbufptr.i; - regOperPtr->currentAttrinbufLen= RnewLen; - if (locAttrinbufptr.i == RNIL) { - regOperPtr->firstAttrinbufrec= TAttrinbufptr.i; - } else { - jam(); - ptrCheckGuard(locAttrinbufptr, cnoOfAttrbufrec, attrbufrec); - locAttrinbufptr.p->attrbuf[ZBUF_NEXT]= TAttrinbufptr.i; - } - if (RnewLen < ZATTR_BUFFER_SIZE) { - } else { - jam(); - set_trans_state(regOperPtr, TRANS_TOO_MUCH_AI); - return; - } - } else if (cnoFreeAttrbufrec <= MIN_ATTRBUF) { - jam(); - set_trans_state(regOperPtr, TRANS_ERROR_WAIT_TUPKEYREQ); - } else { - ndbrequire(false); - } - - len -= length; - data += length; - } -} - -void Dbtup::execATTRINFO(Signal* signal) -{ - Uint32 Rsig0= signal->theData[0]; - Uint32 Rlen= signal->length(); - jamEntry(); - - receive_attrinfo(signal, Rsig0, signal->theData+3, Rlen-3); -} - -void -Dbtup::receive_attrinfo(Signal* signal, Uint32 op, - const Uint32* data, Uint32 Rlen) -{ - OperationrecPtr regOpPtr; - regOpPtr.i= op; - c_operation_pool.getPtr(regOpPtr, op); - TransState trans_state= get_trans_state(regOpPtr.p); - if (trans_state == TRANS_IDLE) { - handleATTRINFOforTUPKEYREQ(signal, data, Rlen, regOpPtr.p); - return; - } else if (trans_state == TRANS_WAIT_STORED_PROCEDURE_ATTR_INFO) { - storedProcedureAttrInfo(signal, regOpPtr.p, data, Rlen, false); - return; - } - switch (trans_state) { - case TRANS_ERROR_WAIT_STORED_PROCREQ: - jam(); - case TRANS_TOO_MUCH_AI: - jam(); - case TRANS_ERROR_WAIT_TUPKEYREQ: - jam(); - return; /* IGNORE ATTRINFO IN THOSE STATES, WAITING FOR ABORT SIGNAL */ - case TRANS_DISCONNECTED: - jam(); - case TRANS_STARTED: - jam(); - default: - ndbrequire(false); - } -} - -void -Dbtup::setChecksum(Tuple_header* tuple_ptr, - Tablerec* regTabPtr) -{ - tuple_ptr->m_checksum= 0; - tuple_ptr->m_checksum= calculateChecksum(tuple_ptr, regTabPtr); -} - -Uint32 -Dbtup::calculateChecksum(Tuple_header* tuple_ptr, - Tablerec* regTabPtr) -{ - Uint32 checksum; - Uint32 i, rec_size, *tuple_header; - rec_size= regTabPtr->m_offsets[MM].m_fix_header_size; - tuple_header= tuple_ptr->m_data; - checksum= 0; - // includes tupVersion - //printf("%p - ", tuple_ptr); - - for (i= 0; i < rec_size-Tuple_header::HeaderSize; i++) { - checksum ^= tuple_header[i]; - //printf("%.8x ", tuple_header[i]); - } - - //printf("-> %.8x\n", checksum); - -#if 0 - if (var_sized) { - /* - if (! req_struct->fix_var_together) { - jam(); - checksum ^= tuple_header[rec_size]; - } - */ - jam(); - var_data_part= req_struct->var_data_start; - vsize_words= calculate_total_var_size(req_struct->var_len_array, - regTabPtr->no_var_attr); - ndbassert(req_struct->var_data_end >= &var_data_part[vsize_words]); - for (i= 0; i < vsize_words; i++) { - checksum ^= var_data_part[i]; - } - } -#endif - return checksum; -} - -/* ----------------------------------------------------------------- */ -/* ----------- INSERT_ACTIVE_OP_LIST -------------- */ -/* ----------------------------------------------------------------- */ -bool -Dbtup::insertActiveOpList(OperationrecPtr regOperPtr, - KeyReqStruct* req_struct) -{ - OperationrecPtr prevOpPtr; - ndbrequire(!regOperPtr.p->op_struct.in_active_list); - regOperPtr.p->op_struct.in_active_list= true; - req_struct->prevOpPtr.i= - prevOpPtr.i= req_struct->m_tuple_ptr->m_operation_ptr_i; - regOperPtr.p->prevActiveOp= prevOpPtr.i; - regOperPtr.p->nextActiveOp= RNIL; - regOperPtr.p->m_undo_buffer_space= 0; - req_struct->m_tuple_ptr->m_operation_ptr_i= regOperPtr.i; - if (prevOpPtr.i == RNIL) { - set_change_mask_state(regOperPtr.p, USE_SAVED_CHANGE_MASK); - regOperPtr.p->saved_change_mask[0] = 0; - regOperPtr.p->saved_change_mask[1] = 0; - return true; - } else { - req_struct->prevOpPtr.p= prevOpPtr.p= c_operation_pool.getPtr(prevOpPtr.i); - prevOpPtr.p->nextActiveOp= regOperPtr.i; - - regOperPtr.p->op_struct.m_wait_log_buffer= - prevOpPtr.p->op_struct.m_wait_log_buffer; - regOperPtr.p->op_struct.m_load_diskpage_on_commit= - prevOpPtr.p->op_struct.m_load_diskpage_on_commit; - regOperPtr.p->m_undo_buffer_space= prevOpPtr.p->m_undo_buffer_space; - // start with prev mask (matters only for UPD o UPD) - set_change_mask_state(regOperPtr.p, get_change_mask_state(prevOpPtr.p)); - regOperPtr.p->saved_change_mask[0] = prevOpPtr.p->saved_change_mask[0]; - regOperPtr.p->saved_change_mask[1] = prevOpPtr.p->saved_change_mask[1]; - - regOperPtr.p->m_any_value = prevOpPtr.p->m_any_value; - - prevOpPtr.p->op_struct.m_wait_log_buffer= 0; - prevOpPtr.p->op_struct.m_load_diskpage_on_commit= 0; - - if(prevOpPtr.p->op_struct.tuple_state == TUPLE_PREPARED) - { - Uint32 op= regOperPtr.p->op_struct.op_type; - Uint32 prevOp= prevOpPtr.p->op_struct.op_type; - if (prevOp == ZDELETE) - { - if(op == ZINSERT) - { - // mark both - prevOpPtr.p->op_struct.delete_insert_flag= true; - regOperPtr.p->op_struct.delete_insert_flag= true; - return true; - } else { - terrorCode= ZTUPLE_DELETED_ERROR; - return false; - } - } - else if(op == ZINSERT && prevOp != ZDELETE) - { - terrorCode= ZINSERT_ERROR; - return false; - } - return true; - } - else - { - terrorCode= ZMUST_BE_ABORTED_ERROR; - return false; - } - } -} - -bool -Dbtup::setup_read(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - bool disk) -{ - OperationrecPtr currOpPtr; - currOpPtr.i= req_struct->m_tuple_ptr->m_operation_ptr_i; - if (currOpPtr.i == RNIL) - { - if (regTabPtr->need_expand(disk)) - prepare_read(req_struct, regTabPtr, disk); - return true; - } - - do { - Uint32 savepointId= regOperPtr->savepointId; - bool dirty= req_struct->dirty_op; - - c_operation_pool.getPtr(currOpPtr); - bool sameTrans= c_lqh->is_same_trans(currOpPtr.p->userpointer, - req_struct->trans_id1, - req_struct->trans_id2); - /** - * Read committed in same trans reads latest copy - */ - if(dirty && !sameTrans) - { - savepointId= 0; - } - else if(sameTrans) - { - // Use savepoint even in read committed mode - dirty= false; - } - - bool found= find_savepoint(currOpPtr, savepointId); - - Uint32 currOp= currOpPtr.p->op_struct.op_type; - - if((found && currOp == ZDELETE) || - ((dirty || !found) && currOp == ZINSERT)) - { - terrorCode= ZTUPLE_DELETED_ERROR; - break; - } - - if(dirty || !found) - { - - } - else - { - req_struct->m_tuple_ptr= (Tuple_header*) - c_undo_buffer.get_ptr(&currOpPtr.p->m_copy_tuple_location); - } - - if (regTabPtr->need_expand(disk)) - prepare_read(req_struct, regTabPtr, disk); - -#if 0 - ndbout_c("reading copy"); - Uint32 *var_ptr = fixed_ptr+regTabPtr->var_offset; - req_struct->m_tuple_ptr= fixed_ptr; - req_struct->fix_var_together= true; - req_struct->var_len_array= (Uint16*)var_ptr; - req_struct->var_data_start= var_ptr+regTabPtr->var_array_wsize; - Uint32 var_sz32= init_var_pos_array((Uint16*)var_ptr, - req_struct->var_pos_array, - regTabPtr->no_var_attr); - req_struct->var_data_end= var_ptr+regTabPtr->var_array_wsize + var_sz32; -#endif - return true; - } while(0); - - return false; -} - -int -Dbtup::load_diskpage(Signal* signal, - Uint32 opRec, Uint32 fragPtrI, - Uint32 local_key, Uint32 flags) -{ - c_operation_pool.getPtr(operPtr, opRec); - fragptr.i= fragPtrI; - ptrCheckGuard(fragptr, cnoOfFragrec, fragrecord); - - Operationrec * regOperPtr= operPtr.p; - Fragrecord * regFragPtr= fragptr.p; - - tabptr.i = regFragPtr->fragTableId; - ptrCheckGuard(tabptr, cnoOfTablerec, tablerec); - Tablerec* regTabPtr = tabptr.p; - - if(local_key == ~(Uint32)0) - { - jam(); - regOperPtr->op_struct.m_wait_log_buffer= 1; - regOperPtr->op_struct.m_load_diskpage_on_commit= 1; - return 1; - } - - jam(); - Uint32 page_idx= local_key & MAX_TUPLES_PER_PAGE; - Uint32 frag_page_id= local_key >> MAX_TUPLES_BITS; - regOperPtr->m_tuple_location.m_page_no= getRealpid(regFragPtr, - frag_page_id); - regOperPtr->m_tuple_location.m_page_idx= page_idx; - - PagePtr page_ptr; - Uint32* tmp= get_ptr(&page_ptr, ®OperPtr->m_tuple_location, regTabPtr); - Tuple_header* ptr= (Tuple_header*)tmp; - - int res= 1; - if(ptr->m_header_bits & Tuple_header::DISK_PART) - { - Page_cache_client::Request req; - memcpy(&req.m_page, ptr->get_disk_ref_ptr(regTabPtr), sizeof(Local_key)); - req.m_callback.m_callbackData= opRec; - req.m_callback.m_callbackFunction= - safe_cast(&Dbtup::disk_page_load_callback); - -#ifdef ERROR_INSERT - if (ERROR_INSERTED(4022)) - { - flags |= Page_cache_client::DELAY_REQ; - req.m_delay_until_time = NdbTick_CurrentMillisecond()+(Uint64)3000; - } -#endif - - if((res= m_pgman.get_page(signal, req, flags)) > 0) - { - //ndbout_c("in cache"); - // In cache - } - else if(res == 0) - { - //ndbout_c("waiting for callback"); - // set state - } - else - { - // Error - } - } - - switch(flags & 7) - { - case ZREAD: - case ZREAD_EX: - break; - case ZDELETE: - case ZUPDATE: - case ZINSERT: - case ZWRITE: - regOperPtr->op_struct.m_wait_log_buffer= 1; - regOperPtr->op_struct.m_load_diskpage_on_commit= 1; - } - return res; -} - -void -Dbtup::disk_page_load_callback(Signal* signal, Uint32 opRec, Uint32 page_id) -{ - c_operation_pool.getPtr(operPtr, opRec); - c_lqh->acckeyconf_load_diskpage_callback(signal, - operPtr.p->userpointer, page_id); -} - -int -Dbtup::load_diskpage_scan(Signal* signal, - Uint32 opRec, Uint32 fragPtrI, - Uint32 local_key, Uint32 flags) -{ - c_operation_pool.getPtr(operPtr, opRec); - fragptr.i= fragPtrI; - ptrCheckGuard(fragptr, cnoOfFragrec, fragrecord); - - Operationrec * regOperPtr= operPtr.p; - Fragrecord * regFragPtr= fragptr.p; - - tabptr.i = regFragPtr->fragTableId; - ptrCheckGuard(tabptr, cnoOfTablerec, tablerec); - Tablerec* regTabPtr = tabptr.p; - - jam(); - Uint32 page_idx= local_key & MAX_TUPLES_PER_PAGE; - Uint32 frag_page_id= local_key >> MAX_TUPLES_BITS; - regOperPtr->m_tuple_location.m_page_no= getRealpid(regFragPtr, - frag_page_id); - regOperPtr->m_tuple_location.m_page_idx= page_idx; - regOperPtr->op_struct.m_load_diskpage_on_commit= 0; - - PagePtr page_ptr; - Uint32* tmp= get_ptr(&page_ptr, ®OperPtr->m_tuple_location, regTabPtr); - Tuple_header* ptr= (Tuple_header*)tmp; - - int res= 1; - if(ptr->m_header_bits & Tuple_header::DISK_PART) - { - Page_cache_client::Request req; - memcpy(&req.m_page, ptr->get_disk_ref_ptr(regTabPtr), sizeof(Local_key)); - req.m_callback.m_callbackData= opRec; - req.m_callback.m_callbackFunction= - safe_cast(&Dbtup::disk_page_load_scan_callback); - - if((res= m_pgman.get_page(signal, req, flags)) > 0) - { - // ndbout_c("in cache"); - // In cache - } - else if(res == 0) - { - //ndbout_c("waiting for callback"); - // set state - } - else - { - // Error - } - } - return res; -} - -void -Dbtup::disk_page_load_scan_callback(Signal* signal, - Uint32 opRec, Uint32 page_id) -{ - c_operation_pool.getPtr(operPtr, opRec); - c_lqh->next_scanconf_load_diskpage_callback(signal, - operPtr.p->userpointer, page_id); -} - -void Dbtup::execTUPKEYREQ(Signal* signal) -{ - TupKeyReq * tupKeyReq= (TupKeyReq *)signal->getDataPtr(); - KeyReqStruct req_struct; - Uint32 sig1, sig2, sig3, sig4; - - Uint32 RoperPtr= tupKeyReq->connectPtr; - Uint32 Rfragptr= tupKeyReq->fragPtr; - - Uint32 RnoOfFragrec= cnoOfFragrec; - Uint32 RnoOfTablerec= cnoOfTablerec; - - jamEntry(); - fragptr.i= Rfragptr; - - ndbrequire(Rfragptr < RnoOfFragrec); - - c_operation_pool.getPtr(operPtr, RoperPtr); - ptrAss(fragptr, fragrecord); - - Uint32 TrequestInfo= tupKeyReq->request; - - Operationrec * regOperPtr= operPtr.p; - Fragrecord * regFragPtr= fragptr.p; - - tabptr.i = regFragPtr->fragTableId; - ptrCheckGuard(tabptr, RnoOfTablerec, tablerec); - Tablerec* regTabPtr = tabptr.p; - - req_struct.signal= signal; - req_struct.dirty_op= TrequestInfo & 1; - req_struct.interpreted_exec= (TrequestInfo >> 10) & 1; - req_struct.no_fired_triggers= 0; - req_struct.read_length= 0; - req_struct.max_attr_id_updated= 0; - req_struct.no_changed_attrs= 0; - req_struct.last_row= false; - req_struct.changeMask.clear(); - - if (unlikely(get_trans_state(regOperPtr) != TRANS_IDLE)) - { - TUPKEY_abort(signal, 39); - return; - } - - /* ----------------------------------------------------------------- */ - // Operation is ZREAD when we arrive here so no need to worry about the - // abort process. - /* ----------------------------------------------------------------- */ - /* ----------- INITIATE THE OPERATION RECORD -------------- */ - /* ----------------------------------------------------------------- */ - Uint32 Rstoredid= tupKeyReq->storedProcedure; - - regOperPtr->fragmentPtr= Rfragptr; - regOperPtr->op_struct.op_type= (TrequestInfo >> 6) & 0xf; - regOperPtr->op_struct.delete_insert_flag = false; - regOperPtr->storedProcedureId= Rstoredid; - - regOperPtr->m_copy_tuple_location.setNull(); - regOperPtr->tupVersion= ZNIL; - - sig1= tupKeyReq->savePointId; - sig2= tupKeyReq->primaryReplica; - sig3= tupKeyReq->keyRef2; - - regOperPtr->savepointId= sig1; - regOperPtr->op_struct.primary_replica= sig2; - Uint32 pageidx = regOperPtr->m_tuple_location.m_page_idx= sig3; - - sig1= tupKeyReq->opRef; - sig2= tupKeyReq->tcOpIndex; - sig3= tupKeyReq->coordinatorTC; - sig4= tupKeyReq->keyRef1; - - req_struct.tc_operation_ptr= sig1; - req_struct.TC_index= sig2; - req_struct.TC_ref= sig3; - Uint32 pageid = req_struct.frag_page_id= sig4; - req_struct.m_use_rowid = (TrequestInfo >> 11) & 1; - - sig1= tupKeyReq->attrBufLen; - sig2= tupKeyReq->applRef; - sig3= tupKeyReq->transId1; - sig4= tupKeyReq->transId2; - - Uint32 disk_page= tupKeyReq->disk_page; - - req_struct.log_size= sig1; - req_struct.attrinfo_len= sig1; - req_struct.rec_blockref= sig2; - req_struct.trans_id1= sig3; - req_struct.trans_id2= sig4; - req_struct.m_disk_page_ptr.i= disk_page; - - sig1 = tupKeyReq->m_row_id_page_no; - sig2 = tupKeyReq->m_row_id_page_idx; - - req_struct.m_row_id.m_page_no = sig1; - req_struct.m_row_id.m_page_idx = sig2; - - Uint32 Roptype = regOperPtr->op_struct.op_type; - - if (Rstoredid != ZNIL) { - ndbrequire(initStoredOperationrec(regOperPtr, - &req_struct, - Rstoredid) == ZOK); - } - - copyAttrinfo(regOperPtr, &cinBuffer[0]); - - Uint32 localkey = (pageid << MAX_TUPLES_BITS) + pageidx; - if (Roptype == ZINSERT && localkey == ~ (Uint32) 0) - { - // No tuple allocatated yet - goto do_insert; - } - - /** - * Get pointer to tuple - */ - regOperPtr->m_tuple_location.m_page_no= getRealpid(regFragPtr, - req_struct.frag_page_id); - - setup_fixed_part(&req_struct, regOperPtr, regTabPtr); - - /** - * Check operation - */ - if (Roptype == ZREAD) { - jam(); - - if (setup_read(&req_struct, regOperPtr, regFragPtr, regTabPtr, - disk_page != RNIL)) - { - if(handleReadReq(signal, regOperPtr, regTabPtr, &req_struct) != -1) - { - req_struct.log_size= 0; - sendTUPKEYCONF(signal, &req_struct, regOperPtr); - /* ---------------------------------------------------------------- */ - // Read Operations need not to be taken out of any lists. - // We also do not need to wait for commit since there is no changes - // to commit. Thus we - // prepare the operation record already now for the next operation. - // Write operations have set the state to STARTED above indicating - // that they are waiting for the Commit or Abort decision. - /* ---------------------------------------------------------------- */ - set_trans_state(regOperPtr, TRANS_IDLE); - regOperPtr->currentAttrinbufLen= 0; - } - return; - } - tupkeyErrorLab(signal); - return; - } - - if(insertActiveOpList(operPtr, &req_struct)) - { - if(Roptype == ZINSERT) - { - jam(); - do_insert: - if (handleInsertReq(signal, operPtr, - fragptr, regTabPtr, &req_struct) == -1) - { - return; - } - if (!regTabPtr->tuxCustomTriggers.isEmpty()) - { - jam(); - if (executeTuxInsertTriggers(signal, - regOperPtr, - regFragPtr, - regTabPtr) != 0) { - jam(); - /* - * TUP insert succeeded but add of TUX entries failed. All - * TUX changes have been rolled back at this point. - * - * We will abort via tupkeyErrorLab() as usual. This routine - * however resets the operation to ZREAD. The TUP_ABORTREQ - * arriving later cannot then undo the insert. - * - * Therefore we call TUP_ABORTREQ already now. Diskdata etc - * should be in memory and timeslicing cannot occur. We must - * skip TUX abort triggers since TUX is already aborted. - */ - signal->theData[0] = operPtr.i; - do_tup_abortreq(signal, ZSKIP_TUX_TRIGGERS); - tupkeyErrorLab(signal); - return; - } - } - checkImmediateTriggersAfterInsert(&req_struct, - regOperPtr, - regTabPtr, - disk_page != RNIL); - set_change_mask_state(regOperPtr, SET_ALL_MASK); - sendTUPKEYCONF(signal, &req_struct, regOperPtr); - return; - } - - if (Roptype == ZUPDATE) { - jam(); - if (handleUpdateReq(signal, regOperPtr, - regFragPtr, regTabPtr, &req_struct, disk_page != RNIL) == -1) { - return; - } - // If update operation is done on primary, - // check any after op triggers - terrorCode= 0; - if (!regTabPtr->tuxCustomTriggers.isEmpty()) { - jam(); - if (executeTuxUpdateTriggers(signal, - regOperPtr, - regFragPtr, - regTabPtr) != 0) { - jam(); - /* - * See insert case. - */ - signal->theData[0] = operPtr.i; - do_tup_abortreq(signal, ZSKIP_TUX_TRIGGERS); - tupkeyErrorLab(signal); - return; - } - } - checkImmediateTriggersAfterUpdate(&req_struct, - regOperPtr, - regTabPtr, - disk_page != RNIL); - // XXX use terrorCode for now since all methods are void - if (terrorCode != 0) - { - tupkeyErrorLab(signal); - return; - } - update_change_mask_info(&req_struct, regOperPtr); - sendTUPKEYCONF(signal, &req_struct, regOperPtr); - return; - } - else if(Roptype == ZDELETE) - { - jam(); - req_struct.log_size= 0; - if (handleDeleteReq(signal, regOperPtr, - regFragPtr, regTabPtr, - &req_struct, - disk_page != RNIL) == -1) { - return; - } - /* - * TUX doesn't need to check for triggers at delete since entries in - * the index are kept until commit time. - */ - - /* - * Secondary index triggers fire on the primary after a delete. - */ - checkImmediateTriggersAfterDelete(&req_struct, - regOperPtr, - regTabPtr, - disk_page != RNIL); - set_change_mask_state(regOperPtr, DELETE_CHANGES); - sendTUPKEYCONF(signal, &req_struct, regOperPtr); - return; - } - else - { - ndbrequire(false); // Invalid op type - } - } - - tupkeyErrorLab(signal); - } - -void -Dbtup::setup_fixed_part(KeyReqStruct* req_struct, - Operationrec* regOperPtr, - Tablerec* regTabPtr) -{ - PagePtr page_ptr; - Uint32* ptr= get_ptr(&page_ptr, ®OperPtr->m_tuple_location, regTabPtr); - req_struct->m_page_ptr = page_ptr; - req_struct->m_tuple_ptr = (Tuple_header*)ptr; - - ndbassert(regOperPtr->op_struct.op_type == ZINSERT || (! (req_struct->m_tuple_ptr->m_header_bits & Tuple_header::FREE))); - - req_struct->check_offset[MM]= regTabPtr->get_check_offset(MM); - req_struct->check_offset[DD]= regTabPtr->get_check_offset(DD); - - Uint32 num_attr= regTabPtr->m_no_of_attributes; - Uint32 descr_start= regTabPtr->tabDescriptor; - TableDescriptor *tab_descr= &tableDescriptor[descr_start]; - ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); - req_struct->attr_descr= tab_descr; -} - - /* ---------------------------------------------------------------- */ - /* ------------------------ CONFIRM REQUEST ----------------------- */ - /* ---------------------------------------------------------------- */ - void Dbtup::sendTUPKEYCONF(Signal* signal, - KeyReqStruct *req_struct, - Operationrec * regOperPtr) -{ - TupKeyConf * tupKeyConf= (TupKeyConf *)signal->getDataPtrSend(); - - Uint32 Rcreate_rowid = req_struct->m_use_rowid; - Uint32 RuserPointer= regOperPtr->userpointer; - Uint32 RnoFiredTriggers= req_struct->no_fired_triggers; - Uint32 log_size= req_struct->log_size; - Uint32 read_length= req_struct->read_length; - Uint32 last_row= req_struct->last_row; - - set_trans_state(regOperPtr, TRANS_STARTED); - set_tuple_state(regOperPtr, TUPLE_PREPARED); - tupKeyConf->userPtr= RuserPointer; - tupKeyConf->readLength= read_length; - tupKeyConf->writeLength= log_size; - tupKeyConf->noFiredTriggers= RnoFiredTriggers; - tupKeyConf->lastRow= last_row; - tupKeyConf->rowid = Rcreate_rowid; - - EXECUTE_DIRECT(DBLQH, GSN_TUPKEYCONF, signal, - TupKeyConf::SignalLength); - -} - - -#define MAX_READ (sizeof(signal->theData) > MAX_MESSAGE_SIZE ? MAX_MESSAGE_SIZE : sizeof(signal->theData)) - -/* ---------------------------------------------------------------- */ -/* ----------------------------- READ ---------------------------- */ -/* ---------------------------------------------------------------- */ -int Dbtup::handleReadReq(Signal* signal, - Operationrec* regOperPtr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct) -{ - Uint32 *dst; - Uint32 dstLen, start_index; - const BlockReference sendBref= req_struct->rec_blockref; - if ((regTabPtr->m_bits & Tablerec::TR_Checksum) && - (calculateChecksum(req_struct->m_tuple_ptr, regTabPtr) != 0)) { - jam(); - ndbout_c("here2"); - terrorCode= ZTUPLE_CORRUPTED_ERROR; - tupkeyErrorLab(signal); - return -1; - } - - const Uint32 node = refToNode(sendBref); - if(node != 0 && node != getOwnNodeId()) { - start_index= 25; - } else { - jam(); - /** - * execute direct - */ - start_index= 3; - } - dst= &signal->theData[start_index]; - dstLen= (MAX_READ / 4) - start_index; - if (!req_struct->interpreted_exec) { - jam(); - int ret = readAttributes(req_struct, - &cinBuffer[0], - req_struct->attrinfo_len, - dst, - dstLen, - false); - if (likely(ret != -1)) { -/* ------------------------------------------------------------------------- */ -// We have read all data into coutBuffer. Now send it to the API. -/* ------------------------------------------------------------------------- */ - jam(); - Uint32 TnoOfDataRead= (Uint32) ret; - req_struct->read_length= TnoOfDataRead; - sendReadAttrinfo(signal, req_struct, TnoOfDataRead, regOperPtr); - return 0; - } - } else { - jam(); - if (likely(interpreterStartLab(signal, req_struct) != -1)) { - return 0; - } - return -1; - } - - jam(); - tupkeyErrorLab(signal); - return -1; -} - -/* ---------------------------------------------------------------- */ -/* ---------------------------- UPDATE ---------------------------- */ -/* ---------------------------------------------------------------- */ -int Dbtup::handleUpdateReq(Signal* signal, - Operationrec* operPtrP, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - KeyReqStruct* req_struct, - bool disk) -{ - Uint32 *dst; - Tuple_header *base= req_struct->m_tuple_ptr, *org; - if ((dst= c_undo_buffer.alloc_copy_tuple(&operPtrP->m_copy_tuple_location, - regTabPtr->total_rec_size)) == 0) - { - terrorCode= ZMEM_NOMEM_ERROR; - goto error; - } - - Uint32 tup_version; - if(operPtrP->is_first_operation()) - { - org= req_struct->m_tuple_ptr; - tup_version= org->get_tuple_version(); - } - else - { - Operationrec* prevOp= req_struct->prevOpPtr.p; - tup_version= prevOp->tupVersion; - org= (Tuple_header*)c_undo_buffer.get_ptr(&prevOp->m_copy_tuple_location); - } - - /** - * Check consistency before update/delete - */ - req_struct->m_tuple_ptr= org; - if ((regTabPtr->m_bits & Tablerec::TR_Checksum) && - (calculateChecksum(req_struct->m_tuple_ptr, regTabPtr) != 0)) - { - terrorCode= ZTUPLE_CORRUPTED_ERROR; - goto error; - } - - req_struct->m_tuple_ptr= (Tuple_header*)dst; - - union { - Uint32 sizes[4]; - Uint64 cmp[2]; - }; - - disk = disk || (org->m_header_bits & Tuple_header::DISK_INLINE); - if (regTabPtr->need_expand(disk)) - { - expand_tuple(req_struct, sizes, org, regTabPtr, disk); - if(disk && operPtrP->m_undo_buffer_space == 0) - { - operPtrP->op_struct.m_wait_log_buffer = 1; - operPtrP->op_struct.m_load_diskpage_on_commit = 1; - Uint32 sz= operPtrP->m_undo_buffer_space= - (sizeof(Dbtup::Disk_undo::Update) >> 2) + sizes[DD] - 1; - - terrorCode= c_lgman->alloc_log_space(regFragPtr->m_logfile_group_id, - sz); - if(unlikely(terrorCode)) - { - operPtrP->m_undo_buffer_space= 0; - goto error; - } - } - } - else - { - memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size); - } - - tup_version= (tup_version + 1) & ZTUP_VERSION_MASK; - operPtrP->tupVersion= tup_version; - - if (!req_struct->interpreted_exec) { - jam(); - int retValue = updateAttributes(req_struct, - &cinBuffer[0], - req_struct->attrinfo_len); - if (unlikely(retValue == -1)) - goto error; - } else { - jam(); - if (unlikely(interpreterStartLab(signal, req_struct) == -1)) - return -1; - } - - if (regTabPtr->need_shrink()) - { - shrink_tuple(req_struct, sizes+2, regTabPtr, disk); - if (cmp[0] != cmp[1] && handle_size_change_after_update(req_struct, - base, - operPtrP, - regFragPtr, - regTabPtr, - sizes)) { - goto error; - } - } - - req_struct->m_tuple_ptr->set_tuple_version(tup_version); - if (regTabPtr->m_bits & Tablerec::TR_Checksum) { - jam(); - setChecksum(req_struct->m_tuple_ptr, regTabPtr); - } - return 0; - -error: - tupkeyErrorLab(signal); - return -1; -} - -/* ---------------------------------------------------------------- */ -/* ----------------------------- INSERT --------------------------- */ -/* ---------------------------------------------------------------- */ -void -Dbtup::prepare_initial_insert(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* regTabPtr) -{ - Uint32 disk_undo = regTabPtr->m_no_of_disk_attributes ? - sizeof(Dbtup::Disk_undo::Alloc) >> 2 : 0; - regOperPtr->nextActiveOp= RNIL; - regOperPtr->prevActiveOp= RNIL; - regOperPtr->op_struct.in_active_list= true; - regOperPtr->m_undo_buffer_space= disk_undo; - - req_struct->check_offset[MM]= regTabPtr->get_check_offset(MM); - req_struct->check_offset[DD]= regTabPtr->get_check_offset(DD); - - Uint32 num_attr= regTabPtr->m_no_of_attributes; - Uint32 descr_start= regTabPtr->tabDescriptor; - Uint32 order_desc= regTabPtr->m_real_order_descriptor; - TableDescriptor *tab_descr= &tableDescriptor[descr_start]; - ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); - req_struct->attr_descr= tab_descr; - Uint16* order= (Uint16*)&tableDescriptor[order_desc]; - - const Uint32 cnt1= regTabPtr->m_attributes[MM].m_no_of_varsize; - const Uint32 cnt2= regTabPtr->m_attributes[DD].m_no_of_varsize; - Uint32 *ptr= req_struct->m_tuple_ptr->get_end_of_fix_part_ptr(regTabPtr); - Var_part_ref* ref = req_struct->m_tuple_ptr->get_var_part_ref_ptr(regTabPtr); - - if (regTabPtr->m_bits & Tablerec::TR_ForceVarPart) - { - ref->m_page_no = RNIL; - ref->m_page_idx = Tup_varsize_page::END_OF_FREE_LIST; - } - - if(cnt1) - { - KeyReqStruct::Var_data* dst= &req_struct->m_var_data[MM]; - dst->m_data_ptr= (char*)(((Uint16*)ptr)+cnt1+1); - dst->m_offset_array_ptr= req_struct->var_pos_array; - dst->m_var_len_offset= cnt1; - dst->m_max_var_offset= regTabPtr->m_offsets[MM].m_max_var_offset; - // Disk part is 32-bit aligned - ptr= ALIGN_WORD(dst->m_data_ptr+regTabPtr->m_offsets[MM].m_max_var_offset); - order += regTabPtr->m_attributes[MM].m_no_of_fixsize; - Uint32 pos= 0; - Uint16 *pos_ptr = req_struct->var_pos_array; - Uint16 *len_ptr = pos_ptr + cnt1; - for(Uint32 i= 0; im_disk_ptr= (Tuple_header*)ptr; - - ndbrequire(cnt2 == 0); - - // Set all null bits - memset(req_struct->m_tuple_ptr->m_null_bits+ - regTabPtr->m_offsets[MM].m_null_offset, 0xFF, - 4*regTabPtr->m_offsets[MM].m_null_words); - memset(req_struct->m_disk_ptr->m_null_bits+ - regTabPtr->m_offsets[DD].m_null_offset, 0xFF, - 4*regTabPtr->m_offsets[DD].m_null_words); - req_struct->m_tuple_ptr->m_header_bits= - disk_undo ? (Tuple_header::DISK_ALLOC | Tuple_header::DISK_INLINE) : 0; -} - -int Dbtup::handleInsertReq(Signal* signal, - Ptr regOperPtr, - Ptr fragPtr, - Tablerec* regTabPtr, - KeyReqStruct *req_struct) -{ - Uint32 tup_version = 1; - Fragrecord* regFragPtr = fragPtr.p; - Uint32 *dst, *ptr= 0; - Tuple_header *base= req_struct->m_tuple_ptr, *org= base; - Tuple_header *tuple_ptr; - - bool disk = regTabPtr->m_no_of_disk_attributes > 0; - bool mem_insert = regOperPtr.p->is_first_operation(); - bool disk_insert = mem_insert && disk; - bool varsize = regTabPtr->m_attributes[MM].m_no_of_varsize; - bool rowid = req_struct->m_use_rowid; - Uint32 real_page_id = regOperPtr.p->m_tuple_location.m_page_no; - Uint32 frag_page_id = req_struct->frag_page_id; - - union { - Uint32 sizes[4]; - Uint64 cmp[2]; - }; - - if (ERROR_INSERTED(4014)) - { - dst = 0; - goto undo_buffer_error; - } - - dst= c_undo_buffer.alloc_copy_tuple(®OperPtr.p->m_copy_tuple_location, - regTabPtr->total_rec_size); - if (unlikely(dst == 0)) - { - goto undo_buffer_error; - } - tuple_ptr= req_struct->m_tuple_ptr= (Tuple_header*)dst; - - if(mem_insert) - { - jam(); - prepare_initial_insert(req_struct, regOperPtr.p, regTabPtr); - } - else - { - Operationrec* prevOp= req_struct->prevOpPtr.p; - ndbassert(prevOp->op_struct.op_type == ZDELETE); - tup_version= prevOp->tupVersion + 1; - - if(!prevOp->is_first_operation()) - org= (Tuple_header*)c_undo_buffer.get_ptr(&prevOp->m_copy_tuple_location); - if (regTabPtr->need_expand()) - { - expand_tuple(req_struct, sizes, org, regTabPtr, !disk_insert); - memset(req_struct->m_disk_ptr->m_null_bits+ - regTabPtr->m_offsets[DD].m_null_offset, 0xFF, - 4*regTabPtr->m_offsets[DD].m_null_words); - } - else - { - memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size); - } - memset(tuple_ptr->m_null_bits+ - regTabPtr->m_offsets[MM].m_null_offset, 0xFF, - 4*regTabPtr->m_offsets[MM].m_null_words); - } - - if (disk_insert) - { - int res; - - if (ERROR_INSERTED(4015)) - { - terrorCode = 1501; - goto log_space_error; - } - - res= c_lgman->alloc_log_space(regFragPtr->m_logfile_group_id, - regOperPtr.p->m_undo_buffer_space); - if(unlikely(res)) - { - terrorCode= res; - goto log_space_error; - } - } - - regOperPtr.p->tupVersion= tup_version & ZTUP_VERSION_MASK; - tuple_ptr->set_tuple_version(tup_version); - - if (ERROR_INSERTED(4016)) - { - terrorCode = ZAI_INCONSISTENCY_ERROR; - goto update_error; - } - - if(unlikely(updateAttributes(req_struct, &cinBuffer[0], - req_struct->attrinfo_len) == -1)) - { - goto update_error; - } - - if (ERROR_INSERTED(4017)) - { - goto null_check_error; - } - if (unlikely(checkNullAttributes(req_struct, regTabPtr) == false)) - { - goto null_check_error; - } - - if (regTabPtr->need_shrink()) - { - shrink_tuple(req_struct, sizes+2, regTabPtr, true); - } - - if (ERROR_INSERTED(4025)) - { - goto mem_error; - } - - if (ERROR_INSERTED(4026)) - { - CLEAR_ERROR_INSERT_VALUE; - goto mem_error; - } - - if (ERROR_INSERTED(4027) && (rand() % 100) > 25) - { - goto mem_error; - } - - if (ERROR_INSERTED(4028) && (rand() % 100) > 25) - { - CLEAR_ERROR_INSERT_VALUE; - goto mem_error; - } - - /** - * Alloc memory - */ - if(mem_insert) - { - if (!rowid) - { - if (ERROR_INSERTED(4018)) - { - goto mem_error; - } - - if (!varsize) - { - jam(); - ptr= alloc_fix_rec(regFragPtr, - regTabPtr, - ®OperPtr.p->m_tuple_location, - &frag_page_id); - } - else - { - jam(); - regOperPtr.p->m_tuple_location.m_file_no= sizes[2+MM]; - ptr= alloc_var_rec(regFragPtr, regTabPtr, - sizes[2+MM], - ®OperPtr.p->m_tuple_location, - &frag_page_id); - } - if (unlikely(ptr == 0)) - { - goto mem_error; - } - req_struct->m_use_rowid = true; - } - else - { - regOperPtr.p->m_tuple_location = req_struct->m_row_id; - if (ERROR_INSERTED(4019)) - { - terrorCode = ZROWID_ALLOCATED; - goto alloc_rowid_error; - } - - if (!varsize) - { - jam(); - ptr= alloc_fix_rowid(regFragPtr, - regTabPtr, - ®OperPtr.p->m_tuple_location, - &frag_page_id); - } - else - { - jam(); - regOperPtr.p->m_tuple_location.m_file_no= sizes[2+MM]; - ptr= alloc_var_rowid(regFragPtr, regTabPtr, - sizes[2+MM], - ®OperPtr.p->m_tuple_location, - &frag_page_id); - } - if (unlikely(ptr == 0)) - { - jam(); - goto alloc_rowid_error; - } - } - real_page_id = regOperPtr.p->m_tuple_location.m_page_no; - regOperPtr.p->m_tuple_location.m_page_no= frag_page_id; - c_lqh->accminupdate(signal, - regOperPtr.p->userpointer, - ®OperPtr.p->m_tuple_location); - - base = (Tuple_header*)ptr; - base->m_operation_ptr_i= regOperPtr.i; - base->m_header_bits= Tuple_header::ALLOC | - (varsize ? Tuple_header::CHAINED_ROW : 0); - regOperPtr.p->m_tuple_location.m_page_no = real_page_id; - } - else - { - int ret; - if (ERROR_INSERTED(4020)) - { - goto size_change_error; - } - - if (regTabPtr->need_shrink() && cmp[0] != cmp[1] && - unlikely(ret = handle_size_change_after_update(req_struct, - base, - regOperPtr.p, - regFragPtr, - regTabPtr, - sizes))) - { - goto size_change_error; - } - req_struct->m_use_rowid = false; - base->m_header_bits &= ~(Uint32)Tuple_header::FREE; - } - - base->m_header_bits |= Tuple_header::ALLOC & - (regOperPtr.p->is_first_operation() ? ~0 : 1); - - if (disk_insert) - { - Local_key tmp; - Uint32 size= regTabPtr->m_attributes[DD].m_no_of_varsize == 0 ? - 1 : sizes[2+DD]; - - if (ERROR_INSERTED(4021)) - { - terrorCode = 1601; - goto disk_prealloc_error; - } - - int ret= disk_page_prealloc(signal, fragPtr, &tmp, size); - if (unlikely(ret < 0)) - { - terrorCode = -ret; - goto disk_prealloc_error; - } - - regOperPtr.p->op_struct.m_disk_preallocated= 1; - tmp.m_page_idx= size; - memcpy(tuple_ptr->get_disk_ref_ptr(regTabPtr), &tmp, sizeof(tmp)); - - /** - * Set ref from disk to mm - */ - Local_key ref = regOperPtr.p->m_tuple_location; - ref.m_page_no = frag_page_id; - - Tuple_header* disk_ptr= req_struct->m_disk_ptr; - disk_ptr->m_header_bits = 0; - disk_ptr->m_base_record_ref= ref.ref(); - } - - if (regTabPtr->m_bits & Tablerec::TR_Checksum) - { - jam(); - setChecksum(req_struct->m_tuple_ptr, regTabPtr); - } - return 0; - -size_change_error: - jam(); - terrorCode = ZMEM_NOMEM_ERROR; - goto exit_error; - -undo_buffer_error: - jam(); - terrorCode= ZMEM_NOMEM_ERROR; - regOperPtr.p->m_undo_buffer_space = 0; - if (mem_insert) - regOperPtr.p->m_tuple_location.setNull(); - regOperPtr.p->m_copy_tuple_location.setNull(); - tupkeyErrorLab(signal); - return -1; - -null_check_error: - jam(); - terrorCode= ZNO_ILLEGAL_NULL_ATTR; - goto update_error; - -mem_error: - jam(); - terrorCode= ZMEM_NOMEM_ERROR; - goto update_error; - -log_space_error: - jam(); - regOperPtr.p->m_undo_buffer_space = 0; -alloc_rowid_error: - jam(); -update_error: - jam(); - if (mem_insert) - { - regOperPtr.p->op_struct.in_active_list = false; - regOperPtr.p->m_tuple_location.setNull(); - } -exit_error: - tupkeyErrorLab(signal); - return -1; - -disk_prealloc_error: - base->m_header_bits |= Tuple_header::FREED; - goto exit_error; -} - -/* ---------------------------------------------------------------- */ -/* ---------------------------- DELETE ---------------------------- */ -/* ---------------------------------------------------------------- */ -int Dbtup::handleDeleteReq(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - KeyReqStruct *req_struct, - bool disk) -{ - // delete must set but not increment tupVersion - if (!regOperPtr->is_first_operation()) - { - Operationrec* prevOp= req_struct->prevOpPtr.p; - regOperPtr->tupVersion= prevOp->tupVersion; - // make copy since previous op is committed before this one - const Uint32* org = c_undo_buffer.get_ptr(&prevOp->m_copy_tuple_location); - Uint32* dst = c_undo_buffer.alloc_copy_tuple( - ®OperPtr->m_copy_tuple_location, regTabPtr->total_rec_size); - if (dst == 0) { - terrorCode = ZMEM_NOMEM_ERROR; - goto error; - } - memcpy(dst, org, regTabPtr->total_rec_size << 2); - req_struct->m_tuple_ptr = (Tuple_header*)dst; - } - else - { - regOperPtr->tupVersion= req_struct->m_tuple_ptr->get_tuple_version(); - } - - if(disk && regOperPtr->m_undo_buffer_space == 0) - { - regOperPtr->op_struct.m_wait_log_buffer = 1; - regOperPtr->op_struct.m_load_diskpage_on_commit = 1; - Uint32 sz= regOperPtr->m_undo_buffer_space= - (sizeof(Dbtup::Disk_undo::Free) >> 2) + - regTabPtr->m_offsets[DD].m_fix_header_size - 1; - - terrorCode= c_lgman->alloc_log_space(regFragPtr->m_logfile_group_id, - sz); - if(unlikely(terrorCode)) - { - regOperPtr->m_undo_buffer_space= 0; - goto error; - } - } - if (req_struct->attrinfo_len == 0) - { - return 0; - } - - if (regTabPtr->need_expand(disk)) - { - prepare_read(req_struct, regTabPtr, disk); - } - - { - Uint32 RlogSize; - int ret= handleReadReq(signal, regOperPtr, regTabPtr, req_struct); - if (ret == 0 && (RlogSize= req_struct->log_size)) - { - jam(); - sendLogAttrinfo(signal, RlogSize, regOperPtr); - } - return ret; - } - -error: - tupkeyErrorLab(signal); - return -1; -} - -bool -Dbtup::checkNullAttributes(KeyReqStruct * req_struct, - Tablerec* regTabPtr) -{ -// Implement checking of updating all not null attributes in an insert here. - Bitmask attributeMask; - /* - * The idea here is maybe that changeMask is not-null attributes - * and must contain notNullAttributeMask. But: - * - * 1. changeMask has all bits set on insert - * 2. not-null is checked in each UpdateFunction - * 3. the code below does not work except trivially due to 1. - * - * XXX remove or fix - */ - attributeMask.clear(); - attributeMask.bitOR(req_struct->changeMask); - attributeMask.bitAND(regTabPtr->notNullAttributeMask); - attributeMask.bitXOR(regTabPtr->notNullAttributeMask); - if (!attributeMask.isclear()) { - return false; - } - return true; -} - -/* ---------------------------------------------------------------- */ -/* THIS IS THE START OF THE INTERPRETED EXECUTION OF UPDATES. WE */ -/* START BY LINKING ALL ATTRINFO'S IN A DOUBLY LINKED LIST (THEY ARE*/ -/* ALREADY IN A LINKED LIST). WE ALLOCATE A REGISTER MEMORY (EQUAL */ -/* TO AN ATTRINFO RECORD). THE INTERPRETER GOES THROUGH FOUR PHASES*/ -/* DURING THE FIRST PHASE IT IS ONLY ALLOWED TO READ ATTRIBUTES THAT*/ -/* ARE SENT TO THE CLIENT APPLICATION. DURING THE SECOND PHASE IT IS*/ -/* ALLOWED TO READ FROM ATTRIBUTES INTO REGISTERS, TO UPDATE */ -/* ATTRIBUTES BASED ON EITHER A CONSTANT VALUE OR A REGISTER VALUE, */ -/* A DIVERSE SET OF OPERATIONS ON REGISTERS ARE AVAILABLE AS WELL. */ -/* IT IS ALSO POSSIBLE TO PERFORM JUMPS WITHIN THE INSTRUCTIONS THAT*/ -/* BELONGS TO THE SECOND PHASE. ALSO SUBROUTINES CAN BE CALLED IN */ -/* THIS PHASE. THE THIRD PHASE IS TO AGAIN READ ATTRIBUTES AND */ -/* FINALLY THE FOURTH PHASE READS SELECTED REGISTERS AND SEND THEM */ -/* TO THE CLIENT APPLICATION. */ -/* THERE IS A FIFTH REGION WHICH CONTAINS SUBROUTINES CALLABLE FROM */ -/* THE INTERPRETER EXECUTION REGION. */ -/* THE FIRST FIVE WORDS WILL GIVE THE LENGTH OF THE FIVEE REGIONS */ -/* */ -/* THIS MEANS THAT FROM THE APPLICATIONS POINT OF VIEW THE DATABASE */ -/* CAN HANDLE SUBROUTINE CALLS WHERE THE CODE IS SENT IN THE REQUEST*/ -/* THE RETURN PARAMETERS ARE FIXED AND CAN EITHER BE GENERATED */ -/* BEFORE THE EXECUTION OF THE ROUTINE OR AFTER. */ -/* */ -/* IN LATER VERSIONS WE WILL ADD MORE THINGS LIKE THE POSSIBILITY */ -/* TO ALLOCATE MEMORY AND USE THIS AS LOCAL STORAGE. IT IS ALSO */ -/* IMAGINABLE TO HAVE SPECIAL ROUTINES THAT CAN PERFORM CERTAIN */ -/* OPERATIONS ON BLOB'S DEPENDENT ON WHAT THE BLOB REPRESENTS. */ -/* */ -/* */ -/* ----------------------------------------- */ -/* + INITIAL READ REGION + */ -/* ----------------------------------------- */ -/* + INTERPRETED EXECUTE REGION + */ -/* ----------------------------------------- */ -/* + FINAL UPDATE REGION + */ -/* ----------------------------------------- */ -/* + FINAL READ REGION + */ -/* ----------------------------------------- */ -/* + SUBROUTINE REGION + */ -/* ----------------------------------------- */ -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -/* ----------------- INTERPRETED EXECUTION ----------------------- */ -/* ---------------------------------------------------------------- */ -int Dbtup::interpreterStartLab(Signal* signal, - KeyReqStruct *req_struct) -{ - Operationrec * const regOperPtr= operPtr.p; - int TnoDataRW; - Uint32 RtotalLen, start_index, dstLen; - Uint32 *dst; - - Uint32 RinitReadLen= cinBuffer[0]; - Uint32 RexecRegionLen= cinBuffer[1]; - Uint32 RfinalUpdateLen= cinBuffer[2]; - Uint32 RfinalRLen= cinBuffer[3]; - Uint32 RsubLen= cinBuffer[4]; - - Uint32 RattrinbufLen= req_struct->attrinfo_len; - const BlockReference sendBref= req_struct->rec_blockref; - - const Uint32 node = refToNode(sendBref); - if(node != 0 && node != getOwnNodeId()) { - start_index= 25; - } else { - jam(); - /** - * execute direct - */ - start_index= 3; - } - dst= &signal->theData[start_index]; - dstLen= (MAX_READ / 4) - start_index; - - RtotalLen= RinitReadLen; - RtotalLen += RexecRegionLen; - RtotalLen += RfinalUpdateLen; - RtotalLen += RfinalRLen; - RtotalLen += RsubLen; - - Uint32 RattroutCounter= 0; - Uint32 RinstructionCounter= 5; - Uint32 RlogSize= 0; - if (((RtotalLen + 5) == RattrinbufLen) && - (RattrinbufLen >= 5) && - (RattrinbufLen < ZATTR_BUFFER_SIZE)) { - /* ---------------------------------------------------------------- */ - // We start by checking consistency. We must have the first five - // words of the ATTRINFO to give us the length of the regions. The - // size of these regions must be the same as the total ATTRINFO - // length and finally the total length must be within the limits. - /* ---------------------------------------------------------------- */ - - if (RinitReadLen > 0) { - jam(); - /* ---------------------------------------------------------------- */ - // The first step that can be taken in the interpreter is to read - // data of the tuple before any updates have been applied. - /* ---------------------------------------------------------------- */ - TnoDataRW= readAttributes(req_struct, - &cinBuffer[5], - RinitReadLen, - &dst[0], - dstLen, - false); - if (TnoDataRW != -1) { - RattroutCounter= TnoDataRW; - RinstructionCounter += RinitReadLen; - } else { - jam(); - tupkeyErrorLab(signal); - return -1; - } - } - if (RexecRegionLen > 0) { - jam(); - /* ---------------------------------------------------------------- */ - // The next step is the actual interpreted execution. This executes - // a register-based virtual machine which can read and write attributes - // to and from registers. - /* ---------------------------------------------------------------- */ - Uint32 RsubPC= RinstructionCounter + RfinalUpdateLen + RfinalRLen; - TnoDataRW= interpreterNextLab(signal, - req_struct, - &clogMemBuffer[0], - &cinBuffer[RinstructionCounter], - RexecRegionLen, - &cinBuffer[RsubPC], - RsubLen, - &coutBuffer[0], - sizeof(coutBuffer) / 4); - if (TnoDataRW != -1) { - RinstructionCounter += RexecRegionLen; - RlogSize= TnoDataRW; - } else { - jam(); - /** - * TUPKEY REF is sent from within interpreter - */ - return -1; - } - } - if (RfinalUpdateLen > 0) { - jam(); - /* ---------------------------------------------------------------- */ - // We can also apply a set of updates without any conditions as part - // of the interpreted execution. - /* ---------------------------------------------------------------- */ - if (regOperPtr->op_struct.op_type == ZUPDATE) { - TnoDataRW= updateAttributes(req_struct, - &cinBuffer[RinstructionCounter], - RfinalUpdateLen); - if (TnoDataRW != -1) { - MEMCOPY_NO_WORDS(&clogMemBuffer[RlogSize], - &cinBuffer[RinstructionCounter], - RfinalUpdateLen); - RinstructionCounter += RfinalUpdateLen; - RlogSize += RfinalUpdateLen; - } else { - jam(); - tupkeyErrorLab(signal); - return -1; - } - } else { - return TUPKEY_abort(signal, 19); - } - } - if (RfinalRLen > 0) { - jam(); - /* ---------------------------------------------------------------- */ - // The final action is that we can also read the tuple after it has - // been updated. - /* ---------------------------------------------------------------- */ - TnoDataRW= readAttributes(req_struct, - &cinBuffer[RinstructionCounter], - RfinalRLen, - &dst[RattroutCounter], - (dstLen - RattroutCounter), - false); - if (TnoDataRW != -1) { - RattroutCounter += TnoDataRW; - } else { - jam(); - tupkeyErrorLab(signal); - return -1; - } - } - req_struct->log_size= RlogSize; - req_struct->read_length= RattroutCounter; - sendReadAttrinfo(signal, req_struct, RattroutCounter, regOperPtr); - if (RlogSize > 0) { - sendLogAttrinfo(signal, RlogSize, regOperPtr); - } - return 0; - } else { - return TUPKEY_abort(signal, 22); - } -} - -/* ---------------------------------------------------------------- */ -/* WHEN EXECUTION IS INTERPRETED WE NEED TO SEND SOME ATTRINFO*/ -/* BACK TO LQH FOR LOGGING AND SENDING TO BACKUP AND STANDBY */ -/* NODES. */ -/* INPUT: LOG_ATTRINFOPTR WHERE TO FETCH DATA FROM */ -/* TLOG_START FIRST INDEX TO LOG */ -/* TLOG_END LAST INDEX + 1 TO LOG */ -/* ---------------------------------------------------------------- */ -void Dbtup::sendLogAttrinfo(Signal* signal, - Uint32 TlogSize, - Operationrec * const regOperPtr) - -{ - Uint32 TbufferIndex= 0; - signal->theData[0]= regOperPtr->userpointer; - while (TlogSize > 22) { - MEMCOPY_NO_WORDS(&signal->theData[3], - &clogMemBuffer[TbufferIndex], - 22); - EXECUTE_DIRECT(DBLQH, GSN_TUP_ATTRINFO, signal, 25); - TbufferIndex += 22; - TlogSize -= 22; - } - MEMCOPY_NO_WORDS(&signal->theData[3], - &clogMemBuffer[TbufferIndex], - TlogSize); - EXECUTE_DIRECT(DBLQH, GSN_TUP_ATTRINFO, signal, 3 + TlogSize); -} - -inline -Uint32 -brancher(Uint32 TheInstruction, Uint32 TprogramCounter) -{ - Uint32 TbranchDirection= TheInstruction >> 31; - Uint32 TbranchLength= (TheInstruction >> 16) & 0x7fff; - TprogramCounter--; - if (TbranchDirection == 1) { - jam(); - /* ---------------------------------------------------------------- */ - /* WE JUMP BACKWARDS. */ - /* ---------------------------------------------------------------- */ - return (TprogramCounter - TbranchLength); - } else { - jam(); - /* ---------------------------------------------------------------- */ - /* WE JUMP FORWARD. */ - /* ---------------------------------------------------------------- */ - return (TprogramCounter + TbranchLength); - } -} - -int Dbtup::interpreterNextLab(Signal* signal, - KeyReqStruct* req_struct, - Uint32* logMemory, - Uint32* mainProgram, - Uint32 TmainProgLen, - Uint32* subroutineProg, - Uint32 TsubroutineLen, - Uint32 * tmpArea, - Uint32 tmpAreaSz) -{ - register Uint32* TcurrentProgram= mainProgram; - register Uint32 TcurrentSize= TmainProgLen; - register Uint32 RnoOfInstructions= 0; - register Uint32 TprogramCounter= 0; - register Uint32 theInstruction; - register Uint32 theRegister; - Uint32 TdataWritten= 0; - Uint32 RstackPtr= 0; - union { - Uint32 TregMemBuffer[32]; - Uint64 align[16]; - }; - Uint32 TstackMemBuffer[32]; - - /* ---------------------------------------------------------------- */ - // Initialise all 8 registers to contain the NULL value. - // In this version we can handle 32 and 64 bit unsigned integers. - // They are handled as 64 bit values. Thus the 32 most significant - // bits are zeroed for 32 bit values. - /* ---------------------------------------------------------------- */ - TregMemBuffer[0]= 0; - TregMemBuffer[4]= 0; - TregMemBuffer[8]= 0; - TregMemBuffer[12]= 0; - TregMemBuffer[16]= 0; - TregMemBuffer[20]= 0; - TregMemBuffer[24]= 0; - TregMemBuffer[28]= 0; - Uint32 tmpHabitant= ~0; - - while (RnoOfInstructions < 8000) { - /* ---------------------------------------------------------------- */ - /* EXECUTE THE NEXT INTERPRETER INSTRUCTION. */ - /* ---------------------------------------------------------------- */ - RnoOfInstructions++; - theInstruction= TcurrentProgram[TprogramCounter]; - theRegister= Interpreter::getReg1(theInstruction) << 2; - if (TprogramCounter < TcurrentSize) { - TprogramCounter++; - switch (Interpreter::getOpCode(theInstruction)) { - case Interpreter::READ_ATTR_INTO_REG: - jam(); - /* ---------------------------------------------------------------- */ - // Read an attribute from the tuple into a register. - // While reading an attribute we allow the attribute to be an array - // as long as it fits in the 64 bits of the register. - /* ---------------------------------------------------------------- */ - { - Uint32 theAttrinfo= theInstruction; - int TnoDataRW= readAttributes(req_struct, - &theAttrinfo, - (Uint32)1, - &TregMemBuffer[theRegister], - (Uint32)3, - false); - if (TnoDataRW == 2) { - /* ------------------------------------------------------------- */ - // Two words read means that we get the instruction plus one 32 - // word read. Thus we set the register to be a 32 bit register. - /* ------------------------------------------------------------- */ - TregMemBuffer[theRegister]= 0x50; - // arithmetic conversion if big-endian - * (Int64*)(TregMemBuffer+theRegister+2)= TregMemBuffer[theRegister+1]; - } else if (TnoDataRW == 3) { - /* ------------------------------------------------------------- */ - // Three words read means that we get the instruction plus two - // 32 words read. Thus we set the register to be a 64 bit register. - /* ------------------------------------------------------------- */ - TregMemBuffer[theRegister]= 0x60; - TregMemBuffer[theRegister+3]= TregMemBuffer[theRegister+2]; - TregMemBuffer[theRegister+2]= TregMemBuffer[theRegister+1]; - } else if (TnoDataRW == 1) { - /* ------------------------------------------------------------- */ - // One word read means that we must have read a NULL value. We set - // the register to indicate a NULL value. - /* ------------------------------------------------------------- */ - TregMemBuffer[theRegister]= 0; - TregMemBuffer[theRegister + 2]= 0; - TregMemBuffer[theRegister + 3]= 0; - } else if (TnoDataRW == -1) { - jam(); - tupkeyErrorLab(signal); - return -1; - } else { - /* ------------------------------------------------------------- */ - // Any other return value from the read attribute here is not - // allowed and will lead to a system crash. - /* ------------------------------------------------------------- */ - ndbrequire(false); - } - break; - } - - case Interpreter::WRITE_ATTR_FROM_REG: - jam(); - { - Uint32 TattrId= theInstruction >> 16; - Uint32 TattrDescrIndex= tabptr.p->tabDescriptor + - (TattrId << ZAD_LOG_SIZE); - Uint32 TattrDesc1= tableDescriptor[TattrDescrIndex].tabDescr; - Uint32 TregType= TregMemBuffer[theRegister]; - - /* --------------------------------------------------------------- */ - // Calculate the number of words of this attribute. - // We allow writes into arrays as long as they fit into the 64 bit - // register size. - /* --------------------------------------------------------------- */ - Uint32 TattrNoOfWords = AttributeDescriptor::getSizeInWords(TattrDesc1); - Uint32 Toptype = operPtr.p->op_struct.op_type; - Uint32 TdataForUpdate[3]; - Uint32 Tlen; - - AttributeHeader ah(TattrId, TattrNoOfWords << 2); - TdataForUpdate[0]= ah.m_value; - TdataForUpdate[1]= TregMemBuffer[theRegister + 2]; - TdataForUpdate[2]= TregMemBuffer[theRegister + 3]; - Tlen= TattrNoOfWords + 1; - if (Toptype == ZUPDATE) { - if (TattrNoOfWords <= 2) { - if (TattrNoOfWords == 1) { - // arithmetic conversion if big-endian - TdataForUpdate[1] = *(Int64*)&TregMemBuffer[theRegister + 2]; - TdataForUpdate[2] = 0; - } - if (TregType == 0) { - /* --------------------------------------------------------- */ - // Write a NULL value into the attribute - /* --------------------------------------------------------- */ - ah.setNULL(); - TdataForUpdate[0]= ah.m_value; - Tlen= 1; - } - int TnoDataRW= updateAttributes(req_struct, - &TdataForUpdate[0], - Tlen); - if (TnoDataRW != -1) { - /* --------------------------------------------------------- */ - // Write the written data also into the log buffer so that it - // will be logged. - /* --------------------------------------------------------- */ - logMemory[TdataWritten + 0]= TdataForUpdate[0]; - logMemory[TdataWritten + 1]= TdataForUpdate[1]; - logMemory[TdataWritten + 2]= TdataForUpdate[2]; - TdataWritten += Tlen; - } else { - tupkeyErrorLab(signal); - return -1; - } - } else { - return TUPKEY_abort(signal, 15); - } - } else { - return TUPKEY_abort(signal, 16); - } - break; - } - - case Interpreter::LOAD_CONST_NULL: - jam(); - TregMemBuffer[theRegister]= 0; /* NULL INDICATOR */ - break; - - case Interpreter::LOAD_CONST16: - jam(); - TregMemBuffer[theRegister]= 0x50; /* 32 BIT UNSIGNED CONSTANT */ - * (Int64*)(TregMemBuffer+theRegister+2)= theInstruction >> 16; - break; - - case Interpreter::LOAD_CONST32: - jam(); - TregMemBuffer[theRegister]= 0x50; /* 32 BIT UNSIGNED CONSTANT */ - * (Int64*)(TregMemBuffer+theRegister+2)= * - (TcurrentProgram+TprogramCounter); - TprogramCounter++; - break; - - case Interpreter::LOAD_CONST64: - jam(); - TregMemBuffer[theRegister]= 0x60; /* 64 BIT UNSIGNED CONSTANT */ - TregMemBuffer[theRegister + 2 ]= * (TcurrentProgram + - TprogramCounter++); - TregMemBuffer[theRegister + 3 ]= * (TcurrentProgram + - TprogramCounter++); - break; - - case Interpreter::ADD_REG_REG: - jam(); - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - Uint32 TdestRegister= Interpreter::getReg3(theInstruction) << 2; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Int64 Tright0= * (Int64*)(TregMemBuffer + TrightRegister + 2); - - - Uint32 TleftType= TregMemBuffer[theRegister]; - Int64 Tleft0= * (Int64*)(TregMemBuffer + theRegister + 2); - - if ((TleftType | TrightType) != 0) { - Uint64 Tdest0= Tleft0 + Tright0; - * (Int64*)(TregMemBuffer+TdestRegister+2)= Tdest0; - TregMemBuffer[TdestRegister]= 0x60; - } else { - return TUPKEY_abort(signal, 20); - } - break; - } - - case Interpreter::SUB_REG_REG: - jam(); - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - Uint32 TdestRegister= Interpreter::getReg3(theInstruction) << 2; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Int64 Tright0= * (Int64*)(TregMemBuffer + TrightRegister + 2); - - Uint32 TleftType= TregMemBuffer[theRegister]; - Int64 Tleft0= * (Int64*)(TregMemBuffer + theRegister + 2); - - if ((TleftType | TrightType) != 0) { - Int64 Tdest0= Tleft0 - Tright0; - * (Int64*)(TregMemBuffer+TdestRegister+2)= Tdest0; - TregMemBuffer[TdestRegister]= 0x60; - } else { - return TUPKEY_abort(signal, 20); - } - break; - } - - case Interpreter::BRANCH: - TprogramCounter= brancher(theInstruction, TprogramCounter); - break; - - case Interpreter::BRANCH_REG_EQ_NULL: - if (TregMemBuffer[theRegister] != 0) { - jam(); - continue; - } else { - jam(); - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - break; - - case Interpreter::BRANCH_REG_NE_NULL: - if (TregMemBuffer[theRegister] == 0) { - jam(); - continue; - } else { - jam(); - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - break; - - - case Interpreter::BRANCH_EQ_REG_REG: - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - - Uint32 TleftType= TregMemBuffer[theRegister]; - Uint32 Tleft0= TregMemBuffer[theRegister + 2]; - Uint32 Tleft1= TregMemBuffer[theRegister + 3]; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Uint32 Tright0= TregMemBuffer[TrightRegister + 2]; - Uint32 Tright1= TregMemBuffer[TrightRegister + 3]; - if ((TrightType | TleftType) != 0) { - jam(); - if ((Tleft0 == Tright0) && (Tleft1 == Tright1)) { - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - } else { - return TUPKEY_abort(signal, 23); - } - break; - } - - case Interpreter::BRANCH_NE_REG_REG: - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - - Uint32 TleftType= TregMemBuffer[theRegister]; - Uint32 Tleft0= TregMemBuffer[theRegister + 2]; - Uint32 Tleft1= TregMemBuffer[theRegister + 3]; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Uint32 Tright0= TregMemBuffer[TrightRegister + 2]; - Uint32 Tright1= TregMemBuffer[TrightRegister + 3]; - if ((TrightType | TleftType) != 0) { - jam(); - if ((Tleft0 != Tright0) || (Tleft1 != Tright1)) { - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - } else { - return TUPKEY_abort(signal, 24); - } - break; - } - - case Interpreter::BRANCH_LT_REG_REG: - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Int64 Tright0= * (Int64*)(TregMemBuffer + TrightRegister + 2); - - Uint32 TleftType= TregMemBuffer[theRegister]; - Int64 Tleft0= * (Int64*)(TregMemBuffer + theRegister + 2); - - - if ((TrightType | TleftType) != 0) { - jam(); - if (Tleft0 < Tright0) { - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - } else { - return TUPKEY_abort(signal, 24); - } - break; - } - - case Interpreter::BRANCH_LE_REG_REG: - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Int64 Tright0= * (Int64*)(TregMemBuffer + TrightRegister + 2); - - Uint32 TleftType= TregMemBuffer[theRegister]; - Int64 Tleft0= * (Int64*)(TregMemBuffer + theRegister + 2); - - - if ((TrightType | TleftType) != 0) { - jam(); - if (Tleft0 <= Tright0) { - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - } else { - return TUPKEY_abort(signal, 26); - } - break; - } - - case Interpreter::BRANCH_GT_REG_REG: - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Int64 Tright0= * (Int64*)(TregMemBuffer + TrightRegister + 2); - - Uint32 TleftType= TregMemBuffer[theRegister]; - Int64 Tleft0= * (Int64*)(TregMemBuffer + theRegister + 2); - - - if ((TrightType | TleftType) != 0) { - jam(); - if (Tleft0 > Tright0){ - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - } else { - return TUPKEY_abort(signal, 27); - } - break; - } - - case Interpreter::BRANCH_GE_REG_REG: - { - Uint32 TrightRegister= Interpreter::getReg2(theInstruction) << 2; - - Uint32 TrightType= TregMemBuffer[TrightRegister]; - Int64 Tright0= * (Int64*)(TregMemBuffer + TrightRegister + 2); - - Uint32 TleftType= TregMemBuffer[theRegister]; - Int64 Tleft0= * (Int64*)(TregMemBuffer + theRegister + 2); - - - if ((TrightType | TleftType) != 0) { - jam(); - if (Tleft0 >= Tright0){ - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - } else { - return TUPKEY_abort(signal, 28); - } - break; - } - - case Interpreter::BRANCH_ATTR_OP_ARG:{ - jam(); - Uint32 cond = Interpreter::getBinaryCondition(theInstruction); - Uint32 ins2 = TcurrentProgram[TprogramCounter]; - Uint32 attrId = Interpreter::getBranchCol_AttrId(ins2) << 16; - Uint32 argLen = Interpreter::getBranchCol_Len(ins2); - - if(tmpHabitant != attrId){ - Int32 TnoDataR = readAttributes(req_struct, - &attrId, 1, - tmpArea, tmpAreaSz, - false); - - if (TnoDataR == -1) { - jam(); - tupkeyErrorLab(signal); - return -1; - } - tmpHabitant= attrId; - } - - // get type - attrId >>= 16; - Uint32 TattrDescrIndex = tabptr.p->tabDescriptor + - (attrId << ZAD_LOG_SIZE); - Uint32 TattrDesc1 = tableDescriptor[TattrDescrIndex].tabDescr; - Uint32 TattrDesc2 = tableDescriptor[TattrDescrIndex+1].tabDescr; - Uint32 typeId = AttributeDescriptor::getType(TattrDesc1); - const void * cs = 0; - if(AttributeOffset::getCharsetFlag(TattrDesc2)) - { - Uint32 pos = AttributeOffset::getCharsetPos(TattrDesc2); - cs = (void*) tabptr.p->charsetArray[pos]; - } - const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(typeId); - - // get data - AttributeHeader ah(tmpArea[0]); - const char* s1 = (char*)&tmpArea[1]; - const char* s2 = (char*)&TcurrentProgram[TprogramCounter+1]; - // fixed length in 5.0 - Uint32 attrLen = AttributeDescriptor::getSizeInBytes(TattrDesc1); - - bool r1_null = ah.isNULL(); - bool r2_null = argLen == 0; - int res1; - if (cond != Interpreter::LIKE && - cond != Interpreter::NOT_LIKE) { - if (r1_null || r2_null) { - // NULL==NULL and NULL 0); - break; - case Interpreter::LE: - res = (res1 >= 0); - break; - case Interpreter::GT: - res = (res1 < 0); - break; - case Interpreter::GE: - res = (res1 <= 0); - break; - case Interpreter::LIKE: - res = (res1 == 0); - break; - case Interpreter::NOT_LIKE: - res = (res1 == 1); - break; - // XXX handle invalid value - } -#ifdef TRACE_INTERPRETER - ndbout_c("cond=%u attr(%d)='%.*s'(%d) str='%.*s'(%d) res1=%d res=%d", - cond, attrId >> 16, - attrLen, s1, attrLen, argLen, s2, argLen, res1, res); -#endif - if (res) - TprogramCounter = brancher(theInstruction, TprogramCounter); - else - { - Uint32 tmp = ((argLen + 3) >> 2) + 1; - TprogramCounter += tmp; - } - break; - } - - case Interpreter::BRANCH_ATTR_EQ_NULL:{ - jam(); - Uint32 ins2= TcurrentProgram[TprogramCounter]; - Uint32 attrId= Interpreter::getBranchCol_AttrId(ins2) << 16; - - if (tmpHabitant != attrId){ - Int32 TnoDataR= readAttributes(req_struct, - &attrId, 1, - tmpArea, tmpAreaSz, - false); - - if (TnoDataR == -1) { - jam(); - tupkeyErrorLab(signal); - return -1; - } - tmpHabitant= attrId; - } - - AttributeHeader ah(tmpArea[0]); - if (ah.isNULL()){ - TprogramCounter= brancher(theInstruction, TprogramCounter); - } else { - TprogramCounter ++; - } - break; - } - - case Interpreter::BRANCH_ATTR_NE_NULL:{ - jam(); - Uint32 ins2= TcurrentProgram[TprogramCounter]; - Uint32 attrId= Interpreter::getBranchCol_AttrId(ins2) << 16; - - if (tmpHabitant != attrId){ - Int32 TnoDataR= readAttributes(req_struct, - &attrId, 1, - tmpArea, tmpAreaSz, - false); - - if (TnoDataR == -1) { - jam(); - tupkeyErrorLab(signal); - return -1; - } - tmpHabitant= attrId; - } - - AttributeHeader ah(tmpArea[0]); - if (ah.isNULL()){ - TprogramCounter ++; - } else { - TprogramCounter= brancher(theInstruction, TprogramCounter); - } - break; - } - - case Interpreter::EXIT_OK: - jam(); -#ifdef TRACE_INTERPRETER - ndbout_c(" - exit_ok"); -#endif - return TdataWritten; - - case Interpreter::EXIT_OK_LAST: - jam(); -#ifdef TRACE_INTERPRETER - ndbout_c(" - exit_ok_last"); -#endif - req_struct->last_row= true; - return TdataWritten; - - case Interpreter::EXIT_REFUSE: - jam(); -#ifdef TRACE_INTERPRETER - ndbout_c(" - exit_nok"); -#endif - terrorCode= theInstruction >> 16; - return TUPKEY_abort(signal, 29); - - case Interpreter::CALL: - jam(); - RstackPtr++; - if (RstackPtr < 32) { - TstackMemBuffer[RstackPtr]= TprogramCounter + 1; - TprogramCounter= theInstruction >> 16; - if (TprogramCounter < TsubroutineLen) { - TcurrentProgram= subroutineProg; - TcurrentSize= TsubroutineLen; - } else { - return TUPKEY_abort(signal, 30); - } - } else { - return TUPKEY_abort(signal, 31); - } - break; - - case Interpreter::RETURN: - jam(); - if (RstackPtr > 0) { - TprogramCounter= TstackMemBuffer[RstackPtr]; - RstackPtr--; - if (RstackPtr == 0) { - jam(); - /* ------------------------------------------------------------- */ - // We are back to the main program. - /* ------------------------------------------------------------- */ - TcurrentProgram= mainProgram; - TcurrentSize= TmainProgLen; - } - } else { - return TUPKEY_abort(signal, 32); - } - break; - - default: - return TUPKEY_abort(signal, 33); - } - } else { - return TUPKEY_abort(signal, 34); - } - } - return TUPKEY_abort(signal, 35); -} - -/** - * expand_var_part - copy packed variable attributes to fully expanded size - * - * dst: where to start writing attribute data - * dst_off_ptr where to write attribute offsets - * src pointer to packed attributes - * tabDesc array of attribute descriptors (used for getting max size) - * no_of_attr no of atributes to expand - */ -Uint32* -expand_var_part(Dbtup::KeyReqStruct::Var_data *dst, - const Uint32* src, - const Uint32 * tabDesc, - const Uint16* order) -{ - char* dst_ptr= dst->m_data_ptr; - Uint32 no_attr= dst->m_var_len_offset; - Uint16* dst_off_ptr= dst->m_offset_array_ptr; - Uint16* dst_len_ptr= dst_off_ptr + no_attr; - const Uint16* src_off_ptr= (const Uint16*)src; - const char* src_ptr= (const char*)(src_off_ptr + no_attr + 1); - - Uint16 tmp= *src_off_ptr++, next_pos, len, max_len, dst_off= 0; - for(Uint32 i = 0; im_header_bits; - Tuple_header* ptr= req_struct->m_tuple_ptr; - - Uint16 dd_tot= tabPtrP->m_no_of_disk_attributes; - Uint16 mm_vars= tabPtrP->m_attributes[MM].m_no_of_varsize; - Uint32 fix_size= tabPtrP->m_offsets[MM].m_fix_header_size; - Uint32 order_desc= tabPtrP->m_real_order_descriptor; - - Uint32 *dst_ptr= ptr->get_end_of_fix_part_ptr(tabPtrP); - const Uint32 *disk_ref= src->get_disk_ref_ptr(tabPtrP); - const Uint32 *src_ptr= src->get_end_of_fix_part_ptr(tabPtrP); - const Var_part_ref* var_ref = src->get_var_part_ref_ptr(tabPtrP); - const Uint32 *desc= (Uint32*)req_struct->attr_descr; - const Uint16 *order = (Uint16*)(&tableDescriptor[order_desc]); - order += tabPtrP->m_attributes[MM].m_no_of_fixsize; - - if(mm_vars) - { - - Uint32 step; // in bytes - const Uint32 *src_data= src_ptr; - KeyReqStruct::Var_data* dst= &req_struct->m_var_data[MM]; - if(bits & Tuple_header::CHAINED_ROW) - { - Ptr var_page; - src_data= get_ptr(&var_page, *var_ref); - step= 4; - sizes[MM]= (2 + (mm_vars << 1) + ((Uint16*)src_data)[mm_vars] + 3) >> 2; - req_struct->m_varpart_page_ptr = var_page; - } - else - { - step= (2 + (mm_vars << 1) + ((Uint16*)src_ptr)[mm_vars]); - sizes[MM]= (step + 3) >> 2; - req_struct->m_varpart_page_ptr = req_struct->m_page_ptr; - } - dst->m_data_ptr= (char*)(((Uint16*)dst_ptr)+mm_vars+1); - dst->m_offset_array_ptr= req_struct->var_pos_array; - dst->m_var_len_offset= mm_vars; - dst->m_max_var_offset= tabPtrP->m_offsets[MM].m_max_var_offset; - - dst_ptr= expand_var_part(dst, src_data, desc, order); - ndbassert(dst_ptr == ALIGN_WORD(dst->m_data_ptr + dst->m_max_var_offset)); - ndbassert((UintPtr(src_ptr) & 3) == 0); - src_ptr = ALIGN_WORD(((char*)src_ptr)+step); - - sizes[MM] += fix_size; - memcpy(ptr, src, 4*fix_size); - } - else - { - sizes[MM]= 1; - memcpy(ptr, src, 4*fix_size); - } - - src->m_header_bits= bits & - ~(Uint32)(Tuple_header::MM_SHRINK | Tuple_header::MM_GROWN); - - sizes[DD]= 0; - if(disk && dd_tot) - { - const Uint16 dd_vars= tabPtrP->m_attributes[DD].m_no_of_varsize; - order += mm_vars; - - if(bits & Tuple_header::DISK_INLINE) - { - // Only on copy tuple - ndbassert((bits & Tuple_header::CHAINED_ROW) == 0); - } - else - { - Local_key key; - memcpy(&key, disk_ref, sizeof(key)); - key.m_page_no= req_struct->m_disk_page_ptr.i; - src_ptr= get_dd_ptr(&req_struct->m_disk_page_ptr, &key, tabPtrP); - } - bits |= Tuple_header::DISK_INLINE; - - // Fix diskpart - req_struct->m_disk_ptr= (Tuple_header*)dst_ptr; - memcpy(dst_ptr, src_ptr, 4*tabPtrP->m_offsets[DD].m_fix_header_size); - sizes[DD] = tabPtrP->m_offsets[DD].m_fix_header_size; - - ndbassert(! (req_struct->m_disk_ptr->m_header_bits & Tuple_header::FREE)); - - ndbrequire(dd_vars == 0); - } - - ptr->m_header_bits= (bits & ~(Uint32)(Tuple_header::CHAINED_ROW)); -} - -void -Dbtup::prepare_read(KeyReqStruct* req_struct, - Tablerec* tabPtrP, bool disk) -{ - Tuple_header* ptr= req_struct->m_tuple_ptr; - - Uint32 bits= ptr->m_header_bits; - Uint16 dd_tot= tabPtrP->m_no_of_disk_attributes; - Uint16 mm_vars= tabPtrP->m_attributes[MM].m_no_of_varsize; - - const Uint32 *src_ptr= ptr->get_end_of_fix_part_ptr(tabPtrP); - const Uint32 *disk_ref= ptr->get_disk_ref_ptr(tabPtrP); - const Var_part_ref* var_ref = ptr->get_var_part_ref_ptr(tabPtrP); - if(mm_vars) - { - const Uint32 *src_data= src_ptr; - KeyReqStruct::Var_data* dst= &req_struct->m_var_data[MM]; - if(bits & Tuple_header::CHAINED_ROW) - { -#if VM_TRACE - -#endif - src_data= get_ptr(* var_ref); - } - dst->m_data_ptr= (char*)(((Uint16*)src_data)+mm_vars+1); - dst->m_offset_array_ptr= (Uint16*)src_data; - dst->m_var_len_offset= 1; - dst->m_max_var_offset= ((Uint16*)src_data)[mm_vars]; - - // disk part start after varsize (aligned) - src_ptr = ALIGN_WORD(dst->m_data_ptr + dst->m_max_var_offset); - } - - if(disk && dd_tot) - { - const Uint16 dd_vars= tabPtrP->m_attributes[DD].m_no_of_varsize; - - if(bits & Tuple_header::DISK_INLINE) - { - // Only on copy tuple - ndbassert((bits & Tuple_header::CHAINED_ROW) == 0); - } - else - { - // XXX - Local_key key; - memcpy(&key, disk_ref, sizeof(key)); - key.m_page_no= req_struct->m_disk_page_ptr.i; - src_ptr= get_dd_ptr(&req_struct->m_disk_page_ptr, &key, tabPtrP); - } - // Fix diskpart - req_struct->m_disk_ptr= (Tuple_header*)src_ptr; - ndbassert(! (req_struct->m_disk_ptr->m_header_bits & Tuple_header::FREE)); - ndbrequire(dd_vars == 0); - } -} - -void -Dbtup::shrink_tuple(KeyReqStruct* req_struct, Uint32 sizes[2], - const Tablerec* tabPtrP, bool disk) -{ - ndbassert(tabPtrP->need_shrink()); - Tuple_header* ptr= req_struct->m_tuple_ptr; - - Uint16 dd_tot= tabPtrP->m_no_of_disk_attributes; - Uint16 mm_vars= tabPtrP->m_attributes[MM].m_no_of_varsize; - Uint16 dd_vars= tabPtrP->m_attributes[DD].m_no_of_varsize; - - Uint32 *dst_ptr= ptr->get_end_of_fix_part_ptr(tabPtrP); - Uint16* src_off_ptr= req_struct->var_pos_array; - - sizes[MM] = 1; - sizes[DD] = 0; - if(mm_vars) - { - Uint16* dst_off_ptr= (Uint16*)dst_ptr; - char* dst_data_ptr= (char*)(dst_off_ptr + mm_vars + 1); - char* src_data_ptr= dst_data_ptr; - Uint32 off= 0; - for(Uint32 i= 0; i> 2; - - dst_ptr = ALIGN_WORD(dst_data_ptr); - } - - if(disk && dd_tot) - { - Uint32 * src_ptr = (Uint32*)req_struct->m_disk_ptr; - req_struct->m_disk_ptr = (Tuple_header*)dst_ptr; - ndbrequire(dd_vars == 0); - sizes[DD] = tabPtrP->m_offsets[DD].m_fix_header_size; - memmove(dst_ptr, src_ptr, 4*tabPtrP->m_offsets[DD].m_fix_header_size); - } -} - -void -Dbtup::validate_page(Tablerec* regTabPtr, Var_page* p) -{ - Uint32 mm_vars= regTabPtr->m_attributes[MM].m_no_of_varsize; - Uint32 fix_sz= regTabPtr->m_offsets[MM].m_fix_header_size + - Tuple_header::HeaderSize; - - if(mm_vars == 0) - return; - - for(Uint32 F= 0; Ffragrec[F]) == RNIL) - continue; - - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - for(Uint32 P= 0; PnoOfPages; P++) - { - Uint32 real= getRealpid(fragPtr.p, P); - Var_page* page= (Var_page*)c_page_pool.getPtr(real); - - for(Uint32 i=1; ihigh_index; i++) - { - Uint32 idx= page->get_index_word(i); - Uint32 len = (idx & Var_page::LEN_MASK) >> Var_page::LEN_SHIFT; - if(!(idx & Var_page::FREE) && !(idx & Var_page::CHAIN)) - { - Tuple_header *ptr= (Tuple_header*)page->get_ptr(i); - Uint32 *part= ptr->get_end_of_fix_part_ptr(regTabPtr); - if(ptr->m_header_bits & Tuple_header::CHAINED_ROW) - { - ndbassert(len == fix_sz + 1); - Local_key tmp; tmp.assref(*part); - Ptr tmpPage; - part= get_ptr(&tmpPage, *(Var_part_ref*)part); - len= ((Var_page*)tmpPage.p)->get_entry_len(tmp.m_page_idx); - Uint32 sz= ((mm_vars + 1) << 1) + (((Uint16*)part)[mm_vars]); - ndbassert(len >= ((sz + 3) >> 2)); - } - else - { - Uint32 sz= ((mm_vars + 1) << 1) + (((Uint16*)part)[mm_vars]); - ndbassert(len >= ((sz+3)>>2)+fix_sz); - } - if(ptr->m_operation_ptr_i != RNIL) - { - c_operation_pool.getPtr(ptr->m_operation_ptr_i); - } - } - else if(!(idx & Var_page::FREE)) - { - /** - * Chain - */ - Uint32 *part= page->get_ptr(i); - Uint32 sz= ((mm_vars + 1) << 1) + (((Uint16*)part)[mm_vars]); - ndbassert(len >= ((sz + 3) >> 2)); - } - else - { - - } - } - if(p == 0 && page->high_index > 1) - page->reorg((Var_page*)ctemp_page); - } - } - - if(p == 0) - { - validate_page(regTabPtr, (Var_page*)1); - } -} - -int -Dbtup::handle_size_change_after_update(KeyReqStruct* req_struct, - Tuple_header* org, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr, - Uint32 sizes[4]) -{ - ndbrequire(sizes[1] == sizes[3]); - //ndbout_c("%d %d %d %d", sizes[0], sizes[1], sizes[2], sizes[3]); - if(0) - printf("%p %d %d - handle_size_change_after_update ", - req_struct->m_tuple_ptr, - regOperPtr->m_tuple_location.m_page_no, - regOperPtr->m_tuple_location.m_page_idx); - - Uint32 bits= org->m_header_bits; - Uint32 copy_bits= req_struct->m_tuple_ptr->m_header_bits; - Uint32 fix_sz = regTabPtr->m_offsets[MM].m_fix_header_size; - - if(sizes[MM] == sizes[2+MM]) - ; - else if(sizes[MM] > sizes[2+MM]) - { - if(0) ndbout_c("shrink"); - copy_bits |= Tuple_header::MM_SHRINK; - } - else - { - if(0) printf("grow - "); - Ptr pagePtr = req_struct->m_varpart_page_ptr; - Var_page* pageP= (Var_page*)pagePtr.p; - Uint32 idx, alloc, needed; - Var_part_ref *refptr = org->get_var_part_ref_ptr(regTabPtr); - ndbassert(bits & Tuple_header::CHAINED_ROW); - - Local_key ref; - refptr->copyout(&ref); - idx= ref.m_page_idx; - if (! (copy_bits & Tuple_header::CHAINED_ROW)) - { - c_page_pool.getPtr(pagePtr, ref.m_page_no); - pageP = (Var_page*)pagePtr.p; - } - alloc= pageP->get_entry_len(idx); -#ifdef VM_TRACE - if(!pageP->get_entry_chain(idx)) - ndbout << *pageP << endl; -#endif - ndbassert(pageP->get_entry_chain(idx)); - needed= sizes[2+MM] - fix_sz; - - if(needed <= alloc) - { - //ndbassert(!regOperPtr->is_first_operation()); - if (0) ndbout_c(" no grow"); - return 0; - } - copy_bits |= Tuple_header::MM_GROWN; - if (unlikely(realloc_var_part(regFragPtr, regTabPtr, pagePtr, - refptr, alloc, needed))) - return -1; - - if (regTabPtr->m_bits & Tablerec::TR_Checksum) - { - jam(); - setChecksum(org, regTabPtr); - } - } - req_struct->m_tuple_ptr->m_header_bits = copy_bits; - return 0; -} - -int -Dbtup::nr_update_gci(Uint32 fragPtrI, const Local_key* key, Uint32 gci) -{ - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - if (tablePtr.p->m_bits & Tablerec::TR_RowGCI) - { - Local_key tmp = *key; - PagePtr page_ptr; - - int ret = alloc_page(tablePtr.p, fragPtr.p, &page_ptr, tmp.m_page_no); - - if (ret) - return -1; - - Tuple_header* ptr = (Tuple_header*) - ((Fix_page*)page_ptr.p)->get_ptr(tmp.m_page_idx, 0); - - ndbrequire(ptr->m_header_bits & Tuple_header::FREE); - *ptr->get_mm_gci(tablePtr.p) = gci; - } - return 0; -} - -int -Dbtup::nr_read_pk(Uint32 fragPtrI, - const Local_key* key, Uint32* dst, bool& copy) -{ - - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - Local_key tmp = *key; - - - PagePtr page_ptr; - int ret = alloc_page(tablePtr.p, fragPtr.p, &page_ptr, tmp.m_page_no); - if (ret) - return -1; - - KeyReqStruct req_struct; - Uint32* ptr= ((Fix_page*)page_ptr.p)->get_ptr(key->m_page_idx, 0); - - req_struct.m_page_ptr = page_ptr; - req_struct.m_tuple_ptr = (Tuple_header*)ptr; - Uint32 bits = req_struct.m_tuple_ptr->m_header_bits; - - ret = 0; - copy = false; - if (! (bits & Tuple_header::FREE)) - { - if (bits & Tuple_header::ALLOC) - { - Uint32 opPtrI= req_struct.m_tuple_ptr->m_operation_ptr_i; - Operationrec* opPtrP= c_operation_pool.getPtr(opPtrI); - ndbassert(!opPtrP->m_copy_tuple_location.isNull()); - req_struct.m_tuple_ptr= (Tuple_header*) - c_undo_buffer.get_ptr(&opPtrP->m_copy_tuple_location); - copy = true; - } - req_struct.check_offset[MM]= tablePtr.p->get_check_offset(MM); - req_struct.check_offset[DD]= tablePtr.p->get_check_offset(DD); - - Uint32 num_attr= tablePtr.p->m_no_of_attributes; - Uint32 descr_start= tablePtr.p->tabDescriptor; - TableDescriptor *tab_descr= &tableDescriptor[descr_start]; - ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); - req_struct.attr_descr= tab_descr; - - if (tablePtr.p->need_expand()) - prepare_read(&req_struct, tablePtr.p, false); - - const Uint32* attrIds= &tableDescriptor[tablePtr.p->readKeyArray].tabDescr; - const Uint32 numAttrs= tablePtr.p->noOfKeyAttr; - // read pk attributes from original tuple - - // new globals - tabptr= tablePtr; - fragptr= fragPtr; - operPtr.i= RNIL; - operPtr.p= NULL; - - // do it - ret = readAttributes(&req_struct, - attrIds, - numAttrs, - dst, - ZNIL, false); - - // done - if (likely(ret != -1)) { - // remove headers - Uint32 n= 0; - Uint32 i= 0; - while (n < numAttrs) { - const AttributeHeader ah(dst[i]); - Uint32 size= ah.getDataSize(); - ndbrequire(size != 0); - for (Uint32 j= 0; j < size; j++) { - dst[i + j - n]= dst[i + j + 1]; - } - n+= 1; - i+= 1 + size; - } - ndbrequire((int)i == ret); - ret -= numAttrs; - } else { - return terrorCode ? (-(int)terrorCode) : -1; - } - } - - if (tablePtr.p->m_bits & Tablerec::TR_RowGCI) - { - dst[ret] = *req_struct.m_tuple_ptr->get_mm_gci(tablePtr.p); - } - else - { - dst[ret] = 0; - } - return ret; -} - -#include - -int -Dbtup::nr_delete(Signal* signal, Uint32 senderData, - Uint32 fragPtrI, const Local_key* key, Uint32 gci) -{ - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - Local_key tmp = * key; - tmp.m_page_no= getRealpid(fragPtr.p, tmp.m_page_no); - - PagePtr pagePtr; - Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, tablePtr.p); - - if (!tablePtr.p->tuxCustomTriggers.isEmpty()) - { - jam(); - TuxMaintReq* req = (TuxMaintReq*)signal->getDataPtrSend(); - req->tableId = fragPtr.p->fragTableId; - req->fragId = fragPtr.p->fragmentId; - req->pageId = tmp.m_page_no; - req->pageIndex = tmp.m_page_idx; - req->tupVersion = ptr->get_tuple_version(); - req->opInfo = TuxMaintReq::OpRemove; - removeTuxEntries(signal, tablePtr.p); - } - - Local_key disk; - memcpy(&disk, ptr->get_disk_ref_ptr(tablePtr.p), sizeof(disk)); - - if (tablePtr.p->m_attributes[MM].m_no_of_varsize) - { - jam(); - free_var_rec(fragPtr.p, tablePtr.p, &tmp, pagePtr); - } else { - jam(); - free_fix_rec(fragPtr.p, tablePtr.p, &tmp, (Fix_page*)pagePtr.p); - } - - if (tablePtr.p->m_no_of_disk_attributes) - { - jam(); - - Uint32 sz = (sizeof(Dbtup::Disk_undo::Free) >> 2) + - tablePtr.p->m_offsets[DD].m_fix_header_size - 1; - - int res = c_lgman->alloc_log_space(fragPtr.p->m_logfile_group_id, sz); - ndbrequire(res == 0); - - /** - * 1) alloc log buffer - * 2) get page - * 3) get log buffer - * 4) delete tuple - */ - Page_cache_client::Request preq; - preq.m_page = disk; - preq.m_callback.m_callbackData = senderData; - preq.m_callback.m_callbackFunction = - safe_cast(&Dbtup::nr_delete_page_callback); - int flags = Page_cache_client::COMMIT_REQ; - -#ifdef ERROR_INSERT - if (ERROR_INSERTED(4023) || ERROR_INSERTED(4024)) - { - int rnd = rand() % 100; - int slp = 0; - if (ERROR_INSERTED(4024)) - { - slp = 3000; - } - else if (rnd > 90) - { - slp = 3000; - } - else if (rnd > 70) - { - slp = 100; - } - - ndbout_c("rnd: %d slp: %d", rnd, slp); - - if (slp) - { - flags |= Page_cache_client::DELAY_REQ; - preq.m_delay_until_time = NdbTick_CurrentMillisecond()+(Uint64)slp; - } - } -#endif - - res = m_pgman.get_page(signal, preq, flags); - if (res == 0) - { - goto timeslice; - } - else if (unlikely(res == -1)) - { - return -1; - } - - PagePtr disk_page = *(PagePtr*)&m_pgman.m_ptr; - disk_page_set_dirty(disk_page); - - preq.m_callback.m_callbackFunction = - safe_cast(&Dbtup::nr_delete_log_buffer_callback); - Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id); - res= lgman.get_log_buffer(signal, sz, &preq.m_callback); - switch(res){ - case 0: - signal->theData[2] = disk_page.i; - goto timeslice; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - } - - if (0) ndbout << "DIRECT DISK DELETE: " << disk << endl; - disk_page_free(signal, tablePtr.p, fragPtr.p, - &disk, *(PagePtr*)&disk_page, gci); - return 0; - } - - return 0; - -timeslice: - memcpy(signal->theData, &disk, sizeof(disk)); - return 1; -} - -void -Dbtup::nr_delete_page_callback(Signal* signal, - Uint32 userpointer, Uint32 page_id) -{ - Ptr gpage; - m_global_page_pool.getPtr(gpage, page_id); - PagePtr pagePtr= *(PagePtr*)&gpage; - disk_page_set_dirty(pagePtr); - Dblqh::Nr_op_info op; - op.m_ptr_i = userpointer; - op.m_disk_ref.m_page_no = pagePtr.p->m_page_no; - op.m_disk_ref.m_file_no = pagePtr.p->m_file_no; - c_lqh->get_nr_op_info(&op, page_id); - - Ptr fragPtr; - fragPtr.i= op.m_tup_frag_ptr_i; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - Ptr tablePtr; - tablePtr.i = fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - Uint32 sz = (sizeof(Dbtup::Disk_undo::Free) >> 2) + - tablePtr.p->m_offsets[DD].m_fix_header_size - 1; - - Callback cb; - cb.m_callbackData = userpointer; - cb.m_callbackFunction = - safe_cast(&Dbtup::nr_delete_log_buffer_callback); - Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id); - int res= lgman.get_log_buffer(signal, sz, &cb); - switch(res){ - case 0: - return; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - } - - if (0) ndbout << "PAGE CALLBACK DISK DELETE: " << op.m_disk_ref << endl; - disk_page_free(signal, tablePtr.p, fragPtr.p, - &op.m_disk_ref, pagePtr, op.m_gci); - - c_lqh->nr_delete_complete(signal, &op); - return; -} - -void -Dbtup::nr_delete_log_buffer_callback(Signal* signal, - Uint32 userpointer, - Uint32 unused) -{ - Dblqh::Nr_op_info op; - op.m_ptr_i = userpointer; - c_lqh->get_nr_op_info(&op, RNIL); - - Ptr fragPtr; - fragPtr.i= op.m_tup_frag_ptr_i; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - Ptr tablePtr; - tablePtr.i = fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - Ptr gpage; - m_global_page_pool.getPtr(gpage, op.m_page_id); - PagePtr pagePtr= *(PagePtr*)&gpage; - - /** - * reset page no - */ - if (0) ndbout << "LOGBUFFER CALLBACK DISK DELETE: " << op.m_disk_ref << endl; - - disk_page_free(signal, tablePtr.p, fragPtr.p, - &op.m_disk_ref, pagePtr, op.m_gci); - - c_lqh->nr_delete_complete(signal, &op); -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp deleted file mode 100644 index 6822deb8b19..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp +++ /dev/null @@ -1,285 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_FIXALLOC_CPP -#include "Dbtup.hpp" -#include -#include -#include - -// -// Fixed Allocator -// This module is used to allocate and free fixed size tuples from the -// set of pages attached to a fragment. The fixed size is preset per -// fragment and their can only be one such value per fragment in the -// current implementation. -// -// Public methods -// bool -// alloc_fix_rec(Fragrecord* const regFragPtr, # In -// Tablerec* const regTabPtr, # In -// Uint32 pageType, # In -// Signal* signal, # In -// Uint32& pageOffset, # Out -// PagePtr& pagePtr) # In/Out -// This method allocates a fixed size and the pagePtr is a reference -// to the page and pageOffset is the offset in the page of the tuple. -// -// freeTh() -// This method is used to free a tuple header in normal transaction -// handling. -// -// getThAtPageSr() -// This method is used to allocate a tuple on a set page as part of -// undo log execution. -// -// -// Private methods -// getThAtPage() -// This method gets a tuple from a page with free tuples. -// -// convertThPage() -// Convert an empty page into a page of free tuples in a linked list. -// -// getEmptyPageTh() -// A page recently taken from the set of empty pages on the fragment is -// is made part of the set of free pages with fixed size tuples in the -// fragment. -// -Uint32* -Dbtup::alloc_fix_rec(Fragrecord* const regFragPtr, - Tablerec* const regTabPtr, - Local_key* key, - Uint32 * out_frag_page_id) -{ -/* ---------------------------------------------------------------- */ -/* EITHER NORMAL PAGE REQUESTED OR ALLOCATION FROM COPY PAGE */ -/* FAILED. TRY ALLOCATING FROM NORMAL PAGE. */ -/* ---------------------------------------------------------------- */ - PagePtr pagePtr; - pagePtr.i = regFragPtr->thFreeFirst.firstItem; - if (pagePtr.i == RNIL) { -/* ---------------------------------------------------------------- */ -// No prepared tuple header page with free entries exists. -/* ---------------------------------------------------------------- */ - pagePtr.i = getEmptyPage(regFragPtr); - if (pagePtr.i != RNIL) { - jam(); -/* ---------------------------------------------------------------- */ -// We found empty pages on the fragment. Allocate an empty page and -// convert it into a tuple header page and put it in thFreeFirst-list. -/* ---------------------------------------------------------------- */ - c_page_pool.getPtr(pagePtr); - - ndbassert(pagePtr.p->page_state == ZEMPTY_MM); - - convertThPage((Fix_page*)pagePtr.p, regTabPtr, MM); - - pagePtr.p->page_state = ZTH_MM_FREE; - - LocalDLFifoList free_pages(c_page_pool, regFragPtr->thFreeFirst); - free_pages.addFirst(pagePtr); - } else { - jam(); -/* ---------------------------------------------------------------- */ -/* THERE ARE NO EMPTY PAGES. MEMORY CAN NOT BE ALLOCATED. */ -/* ---------------------------------------------------------------- */ - return 0; - } - } else { - jam(); -/* ---------------------------------------------------------------- */ -/* THIS SHOULD BE THE COMMON PATH THROUGH THE CODE, FREE */ -/* COPY PAGE EXISTED. */ -/* ---------------------------------------------------------------- */ - c_page_pool.getPtr(pagePtr); - } - - Uint32 page_offset= alloc_tuple_from_page(regFragPtr, (Fix_page*)pagePtr.p); - - *out_frag_page_id= pagePtr.p->frag_page_id; - key->m_page_no = pagePtr.i; - key->m_page_idx = page_offset; - return pagePtr.p->m_data + page_offset; -} - -void Dbtup::convertThPage(Fix_page* regPagePtr, - Tablerec* regTabPtr, - Uint32 mm) -{ - Uint32 nextTuple = regTabPtr->m_offsets[mm].m_fix_header_size; - /* - ASSUMES AT LEAST ONE TUPLE HEADER FITS AND THEREFORE NO HANDLING - OF ZERO AS EXTREME CASE - */ - Uint32 cnt= 0; - Uint32 pos= 0; - Uint32 prev = 0xFFFF; -#ifdef VM_TRACE - memset(regPagePtr->m_data, 0xF1, 4*Fix_page::DATA_WORDS); -#endif - Uint32 gci_pos = 2; - Uint32 gci_val = 0xF1F1F1F1; - if (regTabPtr->m_bits & Tablerec::TR_RowGCI) - { - Tuple_header* ptr = 0; - gci_pos = ptr->get_mm_gci(regTabPtr) - (Uint32*)ptr; - gci_val = 0; - } - while (pos + nextTuple <= Fix_page::DATA_WORDS) - { - regPagePtr->m_data[pos] = (prev << 16) | (pos + nextTuple); - regPagePtr->m_data[pos + 1] = Fix_page::FREE_RECORD; - regPagePtr->m_data[pos + gci_pos] = gci_val; - prev = pos; - pos += nextTuple; - cnt ++; - } - - regPagePtr->m_data[prev] |= 0xFFFF; - regPagePtr->next_free_index= 0; - regPagePtr->free_space= cnt; - regPagePtr->m_page_header.m_page_type = File_formats::PT_Tup_fixsize_page; -}//Dbtup::convertThPage() - -Uint32 -Dbtup::alloc_tuple_from_page(Fragrecord* const regFragPtr, - Fix_page* const regPagePtr) -{ - ndbassert(regPagePtr->free_space); - Uint32 idx= regPagePtr->alloc_record(); - if(regPagePtr->free_space == 0) - { - jam(); -/* ---------------------------------------------------------------- */ -/* THIS WAS THE LAST TUPLE HEADER IN THIS PAGE. REMOVE IT FROM*/ -/* THE TUPLE HEADER FREE LIST OR TH COPY FREE LIST. ALSO SET */ -/* A PROPER PAGE STATE. */ -/* */ -/* WE ALSO HAVE TO INSERT AN UNDO LOG ENTRY TO ENSURE PAGE */ -/* ARE MAINTAINED EVEN AFTER A SYSTEM CRASH. */ -/* ---------------------------------------------------------------- */ - ndbrequire(regPagePtr->page_state == ZTH_MM_FREE); - LocalDLFifoList free_pages(c_page_pool, regFragPtr->thFreeFirst); - free_pages.remove((Page*)regPagePtr); - regPagePtr->page_state = ZTH_MM_FULL; - } - - return idx; -}//Dbtup::getThAtPage() - - -void Dbtup::free_fix_rec(Fragrecord* regFragPtr, - Tablerec* regTabPtr, - Local_key* key, - Fix_page* regPagePtr) -{ - Uint32 free= regPagePtr->free_record(key->m_page_idx); - - if(free == 1) - { - jam(); - PagePtr pagePtr = { (Page*)regPagePtr, key->m_page_no }; - LocalDLFifoList free_pages(c_page_pool, regFragPtr->thFreeFirst); - ndbrequire(regPagePtr->page_state == ZTH_MM_FULL); - regPagePtr->page_state = ZTH_MM_FREE; - free_pages.addLast(pagePtr); - } -}//Dbtup::freeTh() - - -int -Dbtup::alloc_page(Tablerec* tabPtrP, Fragrecord* fragPtrP, - PagePtr * ret, Uint32 page_no) -{ - Uint32 pages = fragPtrP->noOfPages; - - if (page_no >= pages) - { - Uint32 start = pages; - while(page_no >= pages) - pages += (pages >> 3) + (pages >> 4) + 2; - allocFragPages(fragPtrP, pages - start); - if (page_no >= (pages = fragPtrP->noOfPages)) - { - terrorCode = ZMEM_NOMEM_ERROR; - return 1; - } - } - - PagePtr pagePtr; - c_page_pool.getPtr(pagePtr, getRealpid(fragPtrP, page_no)); - - LocalDLList alloc_pages(c_page_pool, fragPtrP->emptyPrimPage); - LocalDLFifoList free_pages(c_page_pool, fragPtrP->thFreeFirst); - if (pagePtr.p->page_state == ZEMPTY_MM) - { - convertThPage((Fix_page*)pagePtr.p, tabPtrP, MM); - pagePtr.p->page_state = ZTH_MM_FREE; - alloc_pages.remove(pagePtr); - free_pages.addFirst(pagePtr); - } - - *ret = pagePtr; - return 0; -} - -Uint32* -Dbtup::alloc_fix_rowid(Fragrecord* regFragPtr, - Tablerec* regTabPtr, - Local_key* key, - Uint32 * out_frag_page_id) -{ - Uint32 page_no = key->m_page_no; - Uint32 idx= key->m_page_idx; - - PagePtr pagePtr; - if (alloc_page(regTabPtr, regFragPtr, &pagePtr, page_no)) - { - terrorCode = ZMEM_NOMEM_ERROR; - return 0; - } - - Uint32 state = pagePtr.p->page_state; - LocalDLFifoList free_pages(c_page_pool, regFragPtr->thFreeFirst); - switch(state){ - case ZTH_MM_FREE: - if (((Fix_page*)pagePtr.p)->alloc_record(idx) != idx) - { - terrorCode = ZROWID_ALLOCATED; - return 0; - } - - if(pagePtr.p->free_space == 0) - { - jam(); - pagePtr.p->page_state = ZTH_MM_FULL; - free_pages.remove(pagePtr); - } - - *out_frag_page_id= page_no; - key->m_page_no = pagePtr.i; - key->m_page_idx = idx; - return pagePtr.p->m_data + idx; - case ZTH_MM_FULL: - terrorCode = ZROWID_ALLOCATED; - return 0; - case ZEMPTY_MM: - ndbrequire(false); - } - return 0; /* purify: deadcode */ -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp deleted file mode 100644 index 262701b3d87..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp +++ /dev/null @@ -1,749 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_GEN_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include -#include "AttributeOffset.hpp" -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DEBUG(x) { ndbout << "TUP::" << x << endl; } - -void Dbtup::initData() -{ - cnoOfAttrbufrec = ZNO_OF_ATTRBUFREC; - cnoOfFragrec = MAX_FRAG_PER_NODE; - cnoOfFragoprec = MAX_FRAG_PER_NODE; - cnoOfPageRangeRec = ZNO_OF_PAGE_RANGE_REC; - c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE; - c_noOfBuildIndexRec = 32; - - // Records with constant sizes - init_list_sizes(); -}//Dbtup::initData() - -Dbtup::Dbtup(Block_context& ctx, Pgman* pgman) - : SimulatedBlock(DBTUP, ctx), - c_lqh(0), - m_pgman(this, pgman), - c_extent_hash(c_extent_pool), - c_storedProcPool(), - c_buildIndexList(c_buildIndexPool), - c_undo_buffer(this) -{ - BLOCK_CONSTRUCTOR(Dbtup); - - addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG); - addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB); - addRecSignal(GSN_LCP_FRAG_ORD, &Dbtup::execLCP_FRAG_ORD); - - addRecSignal(GSN_DUMP_STATE_ORD, &Dbtup::execDUMP_STATE_ORD); - addRecSignal(GSN_SEND_PACKED, &Dbtup::execSEND_PACKED); - addRecSignal(GSN_ATTRINFO, &Dbtup::execATTRINFO); - addRecSignal(GSN_STTOR, &Dbtup::execSTTOR); - addRecSignal(GSN_MEMCHECKREQ, &Dbtup::execMEMCHECKREQ); - addRecSignal(GSN_TUPKEYREQ, &Dbtup::execTUPKEYREQ); - addRecSignal(GSN_TUPSEIZEREQ, &Dbtup::execTUPSEIZEREQ); - addRecSignal(GSN_TUPRELEASEREQ, &Dbtup::execTUPRELEASEREQ); - addRecSignal(GSN_STORED_PROCREQ, &Dbtup::execSTORED_PROCREQ); - addRecSignal(GSN_TUPFRAGREQ, &Dbtup::execTUPFRAGREQ); - addRecSignal(GSN_TUP_ADD_ATTRREQ, &Dbtup::execTUP_ADD_ATTRREQ); - addRecSignal(GSN_TUP_COMMITREQ, &Dbtup::execTUP_COMMITREQ); - addRecSignal(GSN_TUP_ABORTREQ, &Dbtup::execTUP_ABORTREQ); - addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR); - addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true); - - // Trigger Signals - addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ); - addRecSignal(GSN_DROP_TRIG_REQ, &Dbtup::execDROP_TRIG_REQ); - - addRecSignal(GSN_DROP_TAB_REQ, &Dbtup::execDROP_TAB_REQ); - - addRecSignal(GSN_TUP_DEALLOCREQ, &Dbtup::execTUP_DEALLOCREQ); - addRecSignal(GSN_TUP_WRITELOG_REQ, &Dbtup::execTUP_WRITELOG_REQ); - - // Ordered index related - addRecSignal(GSN_BUILDINDXREQ, &Dbtup::execBUILDINDXREQ); - - // Tup scan - addRecSignal(GSN_ACC_SCANREQ, &Dbtup::execACC_SCANREQ); - addRecSignal(GSN_NEXT_SCANREQ, &Dbtup::execNEXT_SCANREQ); - addRecSignal(GSN_ACC_CHECK_SCAN, &Dbtup::execACC_CHECK_SCAN); - addRecSignal(GSN_ACCKEYCONF, &Dbtup::execACCKEYCONF); - addRecSignal(GSN_ACCKEYREF, &Dbtup::execACCKEYREF); - addRecSignal(GSN_ACC_ABORTCONF, &Dbtup::execACC_ABORTCONF); - - // Drop table - addRecSignal(GSN_FSREMOVEREF, &Dbtup::execFSREMOVEREF, true); - addRecSignal(GSN_FSREMOVECONF, &Dbtup::execFSREMOVECONF, true); - - attrbufrec = 0; - fragoperrec = 0; - fragrecord = 0; - hostBuffer = 0; - pageRange = 0; - tablerec = 0; - tableDescriptor = 0; - totNoOfPagesAllocated = 0; - cnoOfAllocatedPages = 0; - - initData(); - CLEAR_ERROR_INSERT_VALUE; -}//Dbtup::Dbtup() - -Dbtup::~Dbtup() -{ - // Records with dynamic sizes - deallocRecord((void **)&attrbufrec,"Attrbufrec", - sizeof(Attrbufrec), - cnoOfAttrbufrec); - - deallocRecord((void **)&fragoperrec,"Fragoperrec", - sizeof(Fragoperrec), - cnoOfFragoprec); - - deallocRecord((void **)&fragrecord,"Fragrecord", - sizeof(Fragrecord), - cnoOfFragrec); - - deallocRecord((void **)&hostBuffer,"HostBuffer", - sizeof(HostBuffer), - MAX_NODES); - - deallocRecord((void **)&pageRange,"PageRange", - sizeof(PageRange), - cnoOfPageRangeRec); - - deallocRecord((void **)&tablerec,"Tablerec", - sizeof(Tablerec), - cnoOfTablerec); - - deallocRecord((void **)&tableDescriptor, "TableDescriptor", - sizeof(TableDescriptor), - cnoOfTabDescrRec); - -}//Dbtup::~Dbtup() - -BLOCK_FUNCTIONS(Dbtup) - -void Dbtup::execCONTINUEB(Signal* signal) -{ - jamEntry(); - Uint32 actionType = signal->theData[0]; - Uint32 dataPtr = signal->theData[1]; - switch (actionType) { - case ZINITIALISE_RECORDS: - jam(); - initialiseRecordsLab(signal, dataPtr, - signal->theData[2], signal->theData[3]); - break; - case ZREL_FRAG: - jam(); - releaseFragment(signal, dataPtr, signal->theData[2]); - break; - case ZREPORT_MEMORY_USAGE:{ - jam(); - static int c_currentMemUsed = 0; - Uint32 cnt = signal->theData[1]; - Uint32 tmp = c_page_pool.getSize(); - int now = tmp ? (cnoOfAllocatedPages * 100)/tmp : 0; - const int thresholds[] = { 100, 90, 80, 0 }; - - Uint32 i = 0; - const Uint32 sz = sizeof(thresholds)/sizeof(thresholds[0]); - for(i = 0; i= thresholds[i]){ - now = thresholds[i]; - break; - } - } - - if(now != c_currentMemUsed || - (c_memusage_report_frequency && cnt + 1 == c_memusage_report_frequency)) - { - reportMemoryUsage(signal, - now > c_currentMemUsed ? 1 : - now < c_currentMemUsed ? -1 : 0); - cnt = 0; - c_currentMemUsed = now; - } - else - { - cnt++; - } - signal->theData[0] = ZREPORT_MEMORY_USAGE; - signal->theData[1] = cnt; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); - return; - } - case ZBUILD_INDEX: - jam(); - buildIndex(signal, dataPtr); - break; - case ZTUP_SCAN: - jam(); - { - ScanOpPtr scanPtr; - c_scanOpPool.getPtr(scanPtr, dataPtr); - scanCont(signal, scanPtr); - } - return; - case ZFREE_EXTENT: - { - jam(); - - TablerecPtr tabPtr; - tabPtr.i= dataPtr; - FragrecordPtr fragPtr; - fragPtr.i= signal->theData[2]; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - drop_fragment_free_extent(signal, tabPtr, fragPtr, signal->theData[3]); - return; - } - case ZUNMAP_PAGES: - { - jam(); - - TablerecPtr tabPtr; - tabPtr.i= dataPtr; - FragrecordPtr fragPtr; - fragPtr.i= signal->theData[2]; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - drop_fragment_unmap_pages(signal, tabPtr, fragPtr, signal->theData[3]); - return; - } - case ZFREE_VAR_PAGES: - { - jam(); - drop_fragment_free_var_pages(signal); - return; - } - default: - ndbrequire(false); - break; - }//switch -}//Dbtup::execTUP_CONTINUEB() - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* ------------------- SYSTEM RESTART MODULE ---------------------- */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ -void Dbtup::execSTTOR(Signal* signal) -{ - jamEntry(); - Uint32 startPhase = signal->theData[1]; - Uint32 sigKey = signal->theData[6]; - switch (startPhase) { - case ZSTARTPHASE1: - jam(); - ndbrequire((c_lqh= (Dblqh*)globalData.getBlock(DBLQH)) != 0); - ndbrequire((c_tsman= (Tsman*)globalData.getBlock(TSMAN)) != 0); - ndbrequire((c_lgman= (Lgman*)globalData.getBlock(LGMAN)) != 0); - cownref = calcTupBlockRef(0); - break; - default: - jam(); - break; - }//switch - signal->theData[0] = sigKey; - signal->theData[1] = 3; - signal->theData[2] = 2; - signal->theData[3] = ZSTARTPHASE1; - signal->theData[4] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 5, JBB); - return; -}//Dbtup::execSTTOR() - -/************************************************************************************************/ -// SIZE_ALTREP INITIALIZE DATA STRUCTURES, FILES AND DS VARIABLES, GET READY FOR EXTERNAL -// CONNECTIONS. -/************************************************************************************************/ -void Dbtup::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - jamEntry(); - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_FRAG, &cnoOfFragrec)); - - Uint32 noOfTriggers= 0; - - Uint32 tmp= 0; - - if (ndb_mgm_get_int_parameter(p, CFG_DB_MAX_ALLOCATE, &tmp)) - tmp = 32 * 1024 * 1024; - m_max_allocate_pages = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; - - tmp = 0; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp)); - initPageRangeSize(tmp); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE_DESC, - &cnoOfTabDescrRec)); - Uint32 noOfStoredProc; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_STORED_PROC, - &noOfStoredProc)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS, - &noOfTriggers)); - - cnoOfTabDescrRec = (cnoOfTabDescrRec & 0xFFFFFFF0) + 16; - - initRecords(); - - c_storedProcPool.setSize(noOfStoredProc); - c_buildIndexPool.setSize(c_noOfBuildIndexRec); - c_triggerPool.setSize(noOfTriggers, false, true, true, CFG_DB_NO_TRIGGERS); - - c_extent_hash.setSize(1024); // 4k - - Pool_context pc; - pc.m_block = this; - c_page_request_pool.wo_pool_init(RT_DBTUP_PAGE_REQUEST, pc); - c_extent_pool.init(RT_DBTUP_EXTENT_INFO, pc); - - Uint32 nScanOp; // use TUX config for now - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp)); - c_scanOpPool.setSize(nScanOp + 1); - Uint32 nScanBatch; - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch)); - c_scanLockPool.setSize(nScanOp * nScanBatch); - - - /* read ahead for disk scan can not be more that disk page buffer */ - { - Uint64 tmp = 64*1024*1024; - ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY, &tmp); - m_max_page_read_ahead = (tmp + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE; // in pages - // never read ahead more than 32 pages - if (m_max_page_read_ahead > 32) - m_max_page_read_ahead = 32; - } - - - ScanOpPtr lcp; - ndbrequire(c_scanOpPool.seize(lcp)); - new (lcp.p) ScanOp(); - c_lcp_scan_op= lcp.i; - - czero = 0; - cminusOne = czero - 1; - clastBitMask = 1; - clastBitMask = clastBitMask << 31; - - c_memusage_report_frequency = 0; - ndb_mgm_get_int_parameter(p, CFG_DB_MEMREPORT_FREQUENCY, - &c_memusage_report_frequency); - - initialiseRecordsLab(signal, 0, ref, senderData); -}//Dbtup::execSIZEALT_REP() - -void Dbtup::initRecords() -{ - unsigned i; - Uint32 tmp; - Uint32 tmp1 = 0; - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tmp)); - - // Records with dynamic sizes - Page* ptr =(Page*)allocRecord("Page", sizeof(Page), tmp, false, CFG_DB_DATA_MEM); - c_page_pool.set(ptr, tmp); - - attrbufrec = (Attrbufrec*)allocRecord("Attrbufrec", - sizeof(Attrbufrec), - cnoOfAttrbufrec); - - fragoperrec = (Fragoperrec*)allocRecord("Fragoperrec", - sizeof(Fragoperrec), - cnoOfFragoprec); - - fragrecord = (Fragrecord*)allocRecord("Fragrecord", - sizeof(Fragrecord), - cnoOfFragrec); - - hostBuffer = (HostBuffer*)allocRecord("HostBuffer", - sizeof(HostBuffer), - MAX_NODES); - - tableDescriptor = (TableDescriptor*)allocRecord("TableDescriptor", - sizeof(TableDescriptor), - cnoOfTabDescrRec); - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_OP_RECS, &tmp)); - ndb_mgm_get_int_parameter(p, CFG_DB_NO_LOCAL_OPS, &tmp1); - c_operation_pool.setSize(tmp, false, true, true, - tmp1 == 0 ? CFG_DB_NO_OPS : CFG_DB_NO_LOCAL_OPS); - - pageRange = (PageRange*)allocRecord("PageRange", - sizeof(PageRange), - cnoOfPageRangeRec); - - tablerec = (Tablerec*)allocRecord("Tablerec", - sizeof(Tablerec), - cnoOfTablerec); - - for (i = 0; igetDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = retData; - sendSignal(retRef, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); - } - return; - default: - ndbrequire(false); - break; - }//switch - signal->theData[0] = ZINITIALISE_RECORDS; - signal->theData[1] = switchData + 1; - signal->theData[2] = retRef; - signal->theData[3] = retData; - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); - return; -}//Dbtup::initialiseRecordsLab() - -void Dbtup::execNDB_STTOR(Signal* signal) -{ - jamEntry(); - cndbcntrRef = signal->theData[0]; - Uint32 ownNodeId = signal->theData[1]; - Uint32 startPhase = signal->theData[2]; - switch (startPhase) { - case ZSTARTPHASE1: - jam(); - cownNodeId = ownNodeId; - cownref = calcTupBlockRef(ownNodeId); - break; - case ZSTARTPHASE2: - jam(); - break; - case ZSTARTPHASE3: - jam(); - startphase3Lab(signal, ~0, ~0); - break; - case ZSTARTPHASE4: - jam(); - break; - case ZSTARTPHASE6: - jam(); -/*****************************************/ -/* NOW SET THE DISK WRITE SPEED TO */ -/* PAGES PER TICK AFTER SYSTEM */ -/* RESTART. */ -/*****************************************/ - signal->theData[0] = ZREPORT_MEMORY_USAGE; - signal->theData[1] = 0; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 2); - break; - default: - jam(); - break; - }//switch - signal->theData[0] = cownref; - sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB); -}//Dbtup::execNDB_STTOR() - -void Dbtup::startphase3Lab(Signal* signal, Uint32 config1, Uint32 config2) -{ -}//Dbtup::startphase3Lab() - -void Dbtup::initializeAttrbufrec() -{ - AttrbufrecPtr attrBufPtr; - for (attrBufPtr.i = 0; - attrBufPtr.i < cnoOfAttrbufrec; attrBufPtr.i++) { - refresh_watch_dog(); - ptrAss(attrBufPtr, attrbufrec); - attrBufPtr.p->attrbuf[ZBUF_NEXT] = attrBufPtr.i + 1; - }//for - attrBufPtr.i = cnoOfAttrbufrec - 1; - ptrAss(attrBufPtr, attrbufrec); - attrBufPtr.p->attrbuf[ZBUF_NEXT] = RNIL; - cfirstfreeAttrbufrec = 0; - cnoFreeAttrbufrec = cnoOfAttrbufrec; -}//Dbtup::initializeAttrbufrec() - -void Dbtup::initializeFragoperrec() -{ - FragoperrecPtr fragoperPtr; - for (fragoperPtr.i = 0; fragoperPtr.i < cnoOfFragoprec; fragoperPtr.i++) { - ptrAss(fragoperPtr, fragoperrec); - fragoperPtr.p->nextFragoprec = fragoperPtr.i + 1; - }//for - fragoperPtr.i = cnoOfFragoprec - 1; - ptrAss(fragoperPtr, fragoperrec); - fragoperPtr.p->nextFragoprec = RNIL; - cfirstfreeFragopr = 0; -}//Dbtup::initializeFragoperrec() - -void Dbtup::initializeFragrecord() -{ - FragrecordPtr regFragPtr; - for (regFragPtr.i = 0; regFragPtr.i < cnoOfFragrec; regFragPtr.i++) { - refresh_watch_dog(); - ptrAss(regFragPtr, fragrecord); - new (regFragPtr.p) Fragrecord(); - regFragPtr.p->nextfreefrag = regFragPtr.i + 1; - regFragPtr.p->fragStatus = IDLE; - }//for - regFragPtr.i = cnoOfFragrec - 1; - ptrAss(regFragPtr, fragrecord); - regFragPtr.p->nextfreefrag = RNIL; - cfirstfreefrag = 0; -}//Dbtup::initializeFragrecord() - -void Dbtup::initializeHostBuffer() -{ - Uint32 hostId; - cpackedListIndex = 0; - for (hostId = 0; hostId < MAX_NODES; hostId++) { - hostBuffer[hostId].inPackedList = false; - hostBuffer[hostId].noOfPacketsTA = 0; - hostBuffer[hostId].packetLenTA = 0; - }//for -}//Dbtup::initializeHostBuffer() - - -void Dbtup::initializeOperationrec() -{ - refresh_watch_dog(); -}//Dbtup::initializeOperationrec() - -void Dbtup::initializeTablerec() -{ - TablerecPtr regTabPtr; - for (regTabPtr.i = 0; regTabPtr.i < cnoOfTablerec; regTabPtr.i++) { - jam(); - refresh_watch_dog(); - ptrAss(regTabPtr, tablerec); - initTab(regTabPtr.p); - }//for -}//Dbtup::initializeTablerec() - -void -Dbtup::initTab(Tablerec* const regTabPtr) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - regTabPtr->fragid[i] = RNIL; - regTabPtr->fragrec[i] = RNIL; - }//for - regTabPtr->readFunctionArray = NULL; - regTabPtr->updateFunctionArray = NULL; - regTabPtr->charsetArray = NULL; - - regTabPtr->tabDescriptor = RNIL; - regTabPtr->readKeyArray = RNIL; - - regTabPtr->m_bits = 0; - - regTabPtr->m_no_of_attributes = 0; - regTabPtr->noOfKeyAttr = 0; - - regTabPtr->m_dropTable.tabUserPtr = RNIL; - regTabPtr->m_dropTable.tabUserRef = 0; - regTabPtr->tableStatus = NOT_DEFINED; - - // Clear trigger data - if (!regTabPtr->afterInsertTriggers.isEmpty()) - regTabPtr->afterInsertTriggers.release(); - if (!regTabPtr->afterDeleteTriggers.isEmpty()) - regTabPtr->afterDeleteTriggers.release(); - if (!regTabPtr->afterUpdateTriggers.isEmpty()) - regTabPtr->afterUpdateTriggers.release(); - if (!regTabPtr->subscriptionInsertTriggers.isEmpty()) - regTabPtr->subscriptionInsertTriggers.release(); - if (!regTabPtr->subscriptionDeleteTriggers.isEmpty()) - regTabPtr->subscriptionDeleteTriggers.release(); - if (!regTabPtr->subscriptionUpdateTriggers.isEmpty()) - regTabPtr->subscriptionUpdateTriggers.release(); - if (!regTabPtr->constraintUpdateTriggers.isEmpty()) - regTabPtr->constraintUpdateTriggers.release(); - if (!regTabPtr->tuxCustomTriggers.isEmpty()) - regTabPtr->tuxCustomTriggers.release(); -}//Dbtup::initTab() - -void Dbtup::initializeTabDescr() -{ - TableDescriptorPtr regTabDesPtr; - for (Uint32 i = 0; i < 16; i++) { - cfreeTdList[i] = RNIL; - }//for - for (regTabDesPtr.i = 0; regTabDesPtr.i < cnoOfTabDescrRec; regTabDesPtr.i++) { - refresh_watch_dog(); - ptrAss(regTabDesPtr, tableDescriptor); - regTabDesPtr.p->tabDescr = RNIL; - }//for - freeTabDescr(0, cnoOfTabDescrRec); -}//Dbtup::initializeTabDescr() - -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -/* --------------- CONNECT/DISCONNECT MODULE ---------------------- */ -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -void Dbtup::execTUPSEIZEREQ(Signal* signal) -{ - OperationrecPtr regOperPtr; - jamEntry(); - Uint32 userPtr = signal->theData[0]; - BlockReference userRef = signal->theData[1]; - if (!c_operation_pool.seize(regOperPtr)) - { - jam(); - signal->theData[0] = userPtr; - signal->theData[1] = ZGET_OPREC_ERROR; - sendSignal(userRef, GSN_TUPSEIZEREF, signal, 2, JBB); - return; - }//if - - new (regOperPtr.p) Operationrec(); - regOperPtr.p->firstAttrinbufrec = RNIL; - regOperPtr.p->lastAttrinbufrec = RNIL; - regOperPtr.p->m_any_value = 0; - regOperPtr.p->op_struct.op_type = ZREAD; - regOperPtr.p->op_struct.in_active_list = false; - set_trans_state(regOperPtr.p, TRANS_DISCONNECTED); - regOperPtr.p->storedProcedureId = ZNIL; - regOperPtr.p->prevActiveOp = RNIL; - regOperPtr.p->nextActiveOp = RNIL; - regOperPtr.p->tupVersion = ZNIL; - regOperPtr.p->op_struct.delete_insert_flag = false; - - initOpConnection(regOperPtr.p); - regOperPtr.p->userpointer = userPtr; - signal->theData[0] = regOperPtr.p->userpointer; - signal->theData[1] = regOperPtr.i; - sendSignal(userRef, GSN_TUPSEIZECONF, signal, 2, JBB); - return; -}//Dbtup::execTUPSEIZEREQ() - -#define printFragment(t){ for(Uint32 i = 0; i < MAX_FRAG_PER_NODE;i++){\ - ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \ - t.i, t.p->fragid[i], i, t.p->fragrec[i]); }} - -void Dbtup::execTUPRELEASEREQ(Signal* signal) -{ - OperationrecPtr regOperPtr; - jamEntry(); - regOperPtr.i = signal->theData[0]; - c_operation_pool.getPtr(regOperPtr); - set_trans_state(regOperPtr.p, TRANS_DISCONNECTED); - c_operation_pool.release(regOperPtr); - - signal->theData[0] = regOperPtr.p->userpointer; - sendSignal(DBLQH_REF, GSN_TUPRELEASECONF, signal, 1, JBB); - return; -}//Dbtup::execTUPRELEASEREQ() - -void Dbtup::releaseFragrec(FragrecordPtr regFragPtr) -{ - regFragPtr.p->nextfreefrag = cfirstfreefrag; - cfirstfreefrag = regFragPtr.i; -}//Dbtup::releaseFragrec() - - - diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp deleted file mode 100644 index 51235a30939..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp +++ /dev/null @@ -1,737 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_INDEX_CPP -#include -#include "Dbtup.hpp" -#include -#include -#include -#include -#include "AttributeOffset.hpp" -#include -#include - -// methods used by ordered index - -void -Dbtup::tuxGetTupAddr(Uint32 fragPtrI, - Uint32 pageId, - Uint32 pageIndex, - Uint32& tupAddr) -{ - jamEntry(); - PagePtr pagePtr; - c_page_pool.getPtr(pagePtr, pageId); - Uint32 fragPageId= pagePtr.p->frag_page_id; - tupAddr= (fragPageId << MAX_TUPLES_BITS) | pageIndex; -} - -int -Dbtup::tuxAllocNode(Signal* signal, - Uint32 fragPtrI, - Uint32& pageId, - Uint32& pageOffset, - Uint32*& node) -{ - jamEntry(); - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - terrorCode= 0; - - Local_key key; - Uint32* ptr, frag_page_id; - if ((ptr= alloc_fix_rec(fragPtr.p, tablePtr.p, &key, &frag_page_id)) == 0) - { - jam(); - terrorCode = ZMEM_NOMEM_ERROR; // caller sets error - return terrorCode; - } - pageId= key.m_page_no; - pageOffset= key.m_page_idx; - Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); - Uint32 attrDataOffset= AttributeOffset::getOffset( - tableDescriptor[attrDescIndex + 1].tabDescr); - node= ptr + attrDataOffset; - return 0; -} - -#if 0 -void -Dbtup::tuxFreeNode(Signal* signal, - Uint32 fragPtrI, - Uint32 pageId, - Uint32 pageOffset, - Uint32* node) -{ - jamEntry(); - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - PagePtr pagePtr; - pagePtr.i= pageId; - ptrCheckGuard(pagePtr, cnoOfPage, cpage); - Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); - Uint32 attrDataOffset= AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr); - ndbrequire(node == &pagePtr.p->pageWord[pageOffset] + attrDataOffset); - freeTh(fragPtr.p, tablePtr.p, signal, pagePtr.p, pageOffset); -} -#endif - -void -Dbtup::tuxGetNode(Uint32 fragPtrI, - Uint32 pageId, - Uint32 pageOffset, - Uint32*& node) -{ - jamEntry(); - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - PagePtr pagePtr; - c_page_pool.getPtr(pagePtr, pageId); - Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); - Uint32 attrDataOffset= AttributeOffset::getOffset( - tableDescriptor[attrDescIndex + 1].tabDescr); - node= ((Fix_page*)pagePtr.p)-> - get_ptr(pageOffset, tablePtr.p->m_offsets[MM].m_fix_header_size) + - attrDataOffset; -} -int -Dbtup::tuxReadAttrs(Uint32 fragPtrI, - Uint32 pageId, - Uint32 pageIndex, - Uint32 tupVersion, - const Uint32* attrIds, - Uint32 numAttrs, - Uint32* dataOut) -{ - jamEntry(); - // use own variables instead of globals - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - // search for tuple version if not original - - Operationrec tmpOp; - KeyReqStruct req_struct; - tmpOp.m_tuple_location.m_page_no= pageId; - tmpOp.m_tuple_location.m_page_idx= pageIndex; - - setup_fixed_part(&req_struct, &tmpOp, tablePtr.p); - Tuple_header *tuple_ptr= req_struct.m_tuple_ptr; - if (tuple_ptr->get_tuple_version() != tupVersion) - { - jam(); - OperationrecPtr opPtr; - opPtr.i= tuple_ptr->m_operation_ptr_i; - Uint32 loopGuard= 0; - while (opPtr.i != RNIL) { - c_operation_pool.getPtr(opPtr); - if (opPtr.p->tupVersion == tupVersion) { - jam(); - if (!opPtr.p->m_copy_tuple_location.isNull()) { - req_struct.m_tuple_ptr= (Tuple_header*) - c_undo_buffer.get_ptr(&opPtr.p->m_copy_tuple_location); - } - break; - } - jam(); - opPtr.i= opPtr.p->prevActiveOp; - ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS)); - } - } - // read key attributes from found tuple version - // save globals - TablerecPtr tabptr_old= tabptr; - FragrecordPtr fragptr_old= fragptr; - OperationrecPtr operPtr_old= operPtr; - // new globals - tabptr= tablePtr; - fragptr= fragPtr; - operPtr.i= RNIL; - operPtr.p= NULL; - prepare_read(&req_struct, tablePtr.p, false); - - // do it - int ret = readAttributes(&req_struct, - attrIds, - numAttrs, - dataOut, - ZNIL, - true); - - // restore globals - tabptr= tabptr_old; - fragptr= fragptr_old; - operPtr= operPtr_old; - // done - if (ret == -1) { - ret = terrorCode ? (-(int)terrorCode) : -1; - } - return ret; -} -int -Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) -{ - jamEntry(); - // use own variables instead of globals - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - Operationrec tmpOp; - tmpOp.m_tuple_location.m_page_no= pageId; - tmpOp.m_tuple_location.m_page_idx= pageIndex; - - KeyReqStruct req_struct; - - PagePtr page_ptr; - Uint32* ptr= get_ptr(&page_ptr, &tmpOp.m_tuple_location, tablePtr.p); - req_struct.m_page_ptr = page_ptr; - req_struct.m_tuple_ptr = (Tuple_header*)ptr; - - int ret = 0; - if (! (req_struct.m_tuple_ptr->m_header_bits & Tuple_header::FREE)) - { - req_struct.check_offset[MM]= tablePtr.p->get_check_offset(MM); - req_struct.check_offset[DD]= tablePtr.p->get_check_offset(DD); - - Uint32 num_attr= tablePtr.p->m_no_of_attributes; - Uint32 descr_start= tablePtr.p->tabDescriptor; - TableDescriptor *tab_descr= &tableDescriptor[descr_start]; - ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); - req_struct.attr_descr= tab_descr; - - if(req_struct.m_tuple_ptr->m_header_bits & Tuple_header::ALLOC) - { - Uint32 opPtrI= req_struct.m_tuple_ptr->m_operation_ptr_i; - Operationrec* opPtrP= c_operation_pool.getPtr(opPtrI); - ndbassert(!opPtrP->m_copy_tuple_location.isNull()); - req_struct.m_tuple_ptr= (Tuple_header*) - c_undo_buffer.get_ptr(&opPtrP->m_copy_tuple_location); - } - prepare_read(&req_struct, tablePtr.p, false); - - const Uint32* attrIds= &tableDescriptor[tablePtr.p->readKeyArray].tabDescr; - const Uint32 numAttrs= tablePtr.p->noOfKeyAttr; - // read pk attributes from original tuple - - // save globals - TablerecPtr tabptr_old= tabptr; - FragrecordPtr fragptr_old= fragptr; - OperationrecPtr operPtr_old= operPtr; - - // new globals - tabptr= tablePtr; - fragptr= fragPtr; - operPtr.i= RNIL; - operPtr.p= NULL; - - // do it - ret = readAttributes(&req_struct, - attrIds, - numAttrs, - dataOut, - ZNIL, - xfrmFlag); - // restore globals - tabptr= tabptr_old; - fragptr= fragptr_old; - operPtr= operPtr_old; - // done - if (ret != -1) { - // remove headers - Uint32 n= 0; - Uint32 i= 0; - while (n < numAttrs) { - const AttributeHeader ah(dataOut[i]); - Uint32 size= ah.getDataSize(); - ndbrequire(size != 0); - for (Uint32 j= 0; j < size; j++) { - dataOut[i + j - n]= dataOut[i + j + 1]; - } - n+= 1; - i+= 1 + size; - } - ndbrequire((int)i == ret); - ret -= numAttrs; - } else { - ret= terrorCode ? (-(int)terrorCode) : -1; - } - } - if (tablePtr.p->m_bits & Tablerec::TR_RowGCI) - { - dataOut[ret] = *req_struct.m_tuple_ptr->get_mm_gci(tablePtr.p); - } - else - { - dataOut[ret] = 0; - } - return ret; -} - -int -Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) -{ - jamEntry(); - // get table - TablerecPtr tablePtr; - tablePtr.i = tableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - // get fragment - FragrecordPtr fragPtr; - getFragmentrec(fragPtr, fragId, tablePtr.p); - // get real page id and tuple offset - - Uint32 pageId = getRealpid(fragPtr.p, fragPageId); - // use TUX routine - optimize later - int ret = tuxReadPk(fragPtr.i, pageId, pageIndex, dataOut, xfrmFlag); - return ret; -} - -/* - * TUX index contains all tuple versions. A scan in TUX has scanned - * one of them and asks if it can be returned as scan result. This - * depends on trans id, dirty read flag, and savepoint within trans. - * - * Previously this faked a ZREAD operation and used getPage(). - * In TUP getPage() is run after ACC locking, but TUX comes here - * before ACC access. Instead of modifying getPage() it is more - * clear to do the full check here. - */ -bool -Dbtup::tuxQueryTh(Uint32 fragPtrI, - Uint32 pageId, - Uint32 pageIndex, - Uint32 tupVersion, - Uint32 transId1, - Uint32 transId2, - bool dirty, - Uint32 savepointId) -{ - jamEntry(); - FragrecordPtr fragPtr; - fragPtr.i= fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - TablerecPtr tablePtr; - tablePtr.i= fragPtr.p->fragTableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - PagePtr pagePtr; - pagePtr.i = pageId; - c_page_pool.getPtr(pagePtr); - - KeyReqStruct req_struct; - - { - Operationrec tmpOp; - tmpOp.m_tuple_location.m_page_no = pageId; - tmpOp.m_tuple_location.m_page_idx = pageIndex; - setup_fixed_part(&req_struct, &tmpOp, tablePtr.p); - } - - Tuple_header* tuple_ptr = req_struct.m_tuple_ptr; - - OperationrecPtr currOpPtr; - currOpPtr.i = tuple_ptr->m_operation_ptr_i; - if (currOpPtr.i == RNIL) { - jam(); - // tuple has no operation, any scan can see it - return true; - } - c_operation_pool.getPtr(currOpPtr); - - const bool sameTrans = - c_lqh->is_same_trans(currOpPtr.p->userpointer, transId1, transId2); - - bool res = false; - OperationrecPtr loopOpPtr = currOpPtr; - - if (!sameTrans) { - jam(); - if (!dirty) { - jam(); - if (currOpPtr.p->nextActiveOp == RNIL) { - jam(); - // last op - TUX makes ACC lock request in same timeslice - res = true; - } - } - else { - // loop to first op (returns false) - find_savepoint(loopOpPtr, 0); - const Uint32 op_type = loopOpPtr.p->op_struct.op_type; - - if (op_type != ZINSERT) { - jam(); - // read committed version - const Uint32 origVersion = tuple_ptr->get_tuple_version(); - if (origVersion == tupVersion) { - jam(); - res = true; - } - } - } - } - else { - jam(); - // for own trans, ignore dirty flag - - if (find_savepoint(loopOpPtr, savepointId)) { - jam(); - const Uint32 op_type = loopOpPtr.p->op_struct.op_type; - - if (op_type != ZDELETE) { - jam(); - // check if this op has produced the scanned version - Uint32 loopVersion = loopOpPtr.p->tupVersion; - if (loopVersion == tupVersion) { - jam(); - res = true; - } - } - } - } - - return res; -} - -// ordered index build - -//#define TIME_MEASUREMENT -#ifdef TIME_MEASUREMENT - static Uint32 time_events; - NDB_TICKS tot_time_passed; - Uint32 number_events; -#endif -void -Dbtup::execBUILDINDXREQ(Signal* signal) -{ - jamEntry(); -#ifdef TIME_MEASUREMENT - time_events= 0; - tot_time_passed= 0; - number_events= 1; -#endif - // get new operation - BuildIndexPtr buildPtr; - if (! c_buildIndexList.seize(buildPtr)) { - jam(); - BuildIndexRec buildRec; - memcpy(buildRec.m_request, signal->theData, sizeof(buildRec.m_request)); - buildRec.m_errorCode= BuildIndxRef::Busy; - buildIndexReply(signal, &buildRec); - return; - } - memcpy(buildPtr.p->m_request, - signal->theData, - sizeof(buildPtr.p->m_request)); - // check - buildPtr.p->m_errorCode= BuildIndxRef::NoError; - do { - const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request; - if (buildReq->getTableId() >= cnoOfTablerec) { - jam(); - buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable; - break; - } - TablerecPtr tablePtr; - tablePtr.i= buildReq->getTableId(); - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - if (tablePtr.p->tableStatus != DEFINED) { - jam(); - buildPtr.p->m_errorCode= BuildIndxRef::InvalidPrimaryTable; - break; - } - // memory page format - buildPtr.p->m_build_vs = - tablePtr.p->m_attributes[MM].m_no_of_varsize > 0; - if (DictTabInfo::isOrderedIndex(buildReq->getIndexType())) { - jam(); - const DLList& triggerList = - tablePtr.p->tuxCustomTriggers; - - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - if (triggerPtr.p->indexId == buildReq->getIndexId()) { - jam(); - break; - } - triggerList.next(triggerPtr); - } - if (triggerPtr.i == RNIL) { - jam(); - // trigger was not created - buildPtr.p->m_errorCode = BuildIndxRef::InternalError; - break; - } - buildPtr.p->m_indexId = buildReq->getIndexId(); - buildPtr.p->m_buildRef = DBTUX; - } else if(buildReq->getIndexId() == RNIL) { - jam(); - // REBUILD of acc - buildPtr.p->m_indexId = RNIL; - buildPtr.p->m_buildRef = DBACC; - } else { - jam(); - buildPtr.p->m_errorCode = BuildIndxRef::InvalidIndexType; - break; - } - - // set to first tuple position - const Uint32 firstTupleNo = 0; - buildPtr.p->m_fragNo= 0; - buildPtr.p->m_pageId= 0; - buildPtr.p->m_tupleNo= firstTupleNo; - // start build - buildIndex(signal, buildPtr.i); - return; - } while (0); - // check failed - buildIndexReply(signal, buildPtr.p); - c_buildIndexList.release(buildPtr); -} - -void -Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) -{ - // get build record - BuildIndexPtr buildPtr; - buildPtr.i= buildPtrI; - c_buildIndexList.getPtr(buildPtr); - const BuildIndxReq* buildReq= (const BuildIndxReq*)buildPtr.p->m_request; - // get table - TablerecPtr tablePtr; - tablePtr.i= buildReq->getTableId(); - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - const Uint32 firstTupleNo = 0; - const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size; - -#ifdef TIME_MEASUREMENT - MicroSecondTimer start; - MicroSecondTimer stop; - NDB_TICKS time_passed; -#endif - do { - // get fragment - FragrecordPtr fragPtr; - if (buildPtr.p->m_fragNo == MAX_FRAG_PER_NODE) { - jam(); - // build ready - buildIndexReply(signal, buildPtr.p); - c_buildIndexList.release(buildPtr); - return; - } - ndbrequire(buildPtr.p->m_fragNo < MAX_FRAG_PER_NODE); - fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo]; - if (fragPtr.i == RNIL) { - jam(); - buildPtr.p->m_fragNo++; - buildPtr.p->m_pageId= 0; - buildPtr.p->m_tupleNo= firstTupleNo; - break; - } - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - // get page - PagePtr pagePtr; - if (buildPtr.p->m_pageId >= fragPtr.p->noOfPages) { - jam(); - buildPtr.p->m_fragNo++; - buildPtr.p->m_pageId= 0; - buildPtr.p->m_tupleNo= firstTupleNo; - break; - } - Uint32 realPageId= getRealpid(fragPtr.p, buildPtr.p->m_pageId); - c_page_pool.getPtr(pagePtr, realPageId); - Uint32 pageState= pagePtr.p->page_state; - // skip empty page - if (pageState == ZEMPTY_MM) { - jam(); - buildPtr.p->m_pageId++; - buildPtr.p->m_tupleNo= firstTupleNo; - break; - } - // get tuple - Uint32 pageIndex = ~0; - const Tuple_header* tuple_ptr = 0; - pageIndex = buildPtr.p->m_tupleNo * tupheadsize; - if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) { - jam(); - buildPtr.p->m_pageId++; - buildPtr.p->m_tupleNo= firstTupleNo; - break; - } - tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex]; - // skip over free tuple - if (tuple_ptr->m_header_bits & Tuple_header::FREE) { - jam(); - buildPtr.p->m_tupleNo++; - break; - } - Uint32 tupVersion= tuple_ptr->get_tuple_version(); - OperationrecPtr pageOperPtr; - pageOperPtr.i= tuple_ptr->m_operation_ptr_i; -#ifdef TIME_MEASUREMENT - NdbTick_getMicroTimer(&start); -#endif - // add to index - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - req->errorCode = RNIL; - req->tableId = tablePtr.i; - req->indexId = buildPtr.p->m_indexId; - req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo]; - req->pageId = realPageId; - req->tupVersion = tupVersion; - req->opInfo = TuxMaintReq::OpAdd; - req->tupFragPtrI = fragPtr.i; - req->fragPageId = buildPtr.p->m_pageId; - req->pageIndex = pageIndex; - - if (pageOperPtr.i == RNIL) - { - EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength+2); - } - else - { - /* - If there is an ongoing operation on the tuple then it is either a - copy tuple or an original tuple with an ongoing transaction. In - both cases realPageId and pageOffset refer to the original tuple. - The tuple address stored in TUX will always be the original tuple - but with the tuple version of the tuple we found. - - This is necessary to avoid having to update TUX at abort of - update. If an update aborts then the copy tuple is copied to - the original tuple. The build will however have found that - tuple as a copy tuple. The original tuple is stable and is thus - preferrable to store in TUX. - */ - jam(); - - /** - * Since copy tuples now can't be found on real pages. - * we will here build all copies of the tuple - * - * Note only "real" tupVersion's should be added - * i.e delete's shouldnt be added - * (unless it's the first op, when "original" should be added) - */ - do - { - c_operation_pool.getPtr(pageOperPtr); - if(pageOperPtr.p->op_struct.op_type != ZDELETE || - pageOperPtr.p->is_first_operation()) - { - req->errorCode = RNIL; - req->tupVersion= pageOperPtr.p->tupVersion; - EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength+2); - } - else - { - req->errorCode= 0; - } - pageOperPtr.i= pageOperPtr.p->prevActiveOp; - } while(req->errorCode == 0 && pageOperPtr.i != RNIL); - } - - jamEntry(); - if (req->errorCode != 0) { - switch (req->errorCode) { - case TuxMaintReq::NoMemError: - jam(); - buildPtr.p->m_errorCode= BuildIndxRef::AllocationFailure; - break; - default: - ndbrequire(false); - break; - } - buildIndexReply(signal, buildPtr.p); - c_buildIndexList.release(buildPtr); - return; - } -#ifdef TIME_MEASUREMENT - NdbTick_getMicroTimer(&stop); - time_passed= NdbTick_getMicrosPassed(start, stop); - if (time_passed < 1000) { - time_events++; - tot_time_passed += time_passed; - if (time_events == number_events) { - NDB_TICKS mean_time_passed= tot_time_passed / - (NDB_TICKS)number_events; - ndbout << "Number of events= " << number_events; - ndbout << " Mean time passed= " << mean_time_passed << endl; - number_events <<= 1; - tot_time_passed= (NDB_TICKS)0; - time_events= 0; - } - } -#endif - // next tuple - buildPtr.p->m_tupleNo++; - break; - } while (0); - signal->theData[0]= ZBUILD_INDEX; - signal->theData[1]= buildPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); -} - -void -Dbtup::buildIndexReply(Signal* signal, const BuildIndexRec* buildPtrP) -{ - const BuildIndxReq* const buildReq= - (const BuildIndxReq*)buildPtrP->m_request; - // conf is subset of ref - BuildIndxRef* rep= (BuildIndxRef*)signal->getDataPtr(); - rep->setUserRef(buildReq->getUserRef()); - rep->setConnectionPtr(buildReq->getConnectionPtr()); - rep->setRequestType(buildReq->getRequestType()); - rep->setTableId(buildReq->getTableId()); - rep->setIndexType(buildReq->getIndexType()); - rep->setIndexId(buildReq->getIndexId()); - // conf - if (buildPtrP->m_errorCode == BuildIndxRef::NoError) { - jam(); - sendSignal(rep->getUserRef(), GSN_BUILDINDXCONF, - signal, BuildIndxConf::SignalLength, JBB); - return; - } - // ref - rep->setErrorCode(buildPtrP->m_errorCode); - sendSignal(rep->getUserRef(), GSN_BUILDINDXREF, - signal, BuildIndxRef::SignalLength, JBB); -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp deleted file mode 100644 index dffc966f875..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ /dev/null @@ -1,1486 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_META_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "AttributeOffset.hpp" -#include - -void Dbtup::execTUPFRAGREQ(Signal* signal) -{ - jamEntry(); - - TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); - if (tupFragReq->userPtr == (Uint32)-1) { - jam(); - abortAddFragOp(signal); - return; - } - - FragoperrecPtr fragOperPtr; - FragrecordPtr regFragPtr; - TablerecPtr regTabPtr; - - Uint32 userptr = tupFragReq->userPtr; - Uint32 userblockref = tupFragReq->userRef; - Uint32 reqinfo = tupFragReq->reqInfo; - regTabPtr.i = tupFragReq->tableId; - Uint32 noOfAttributes = tupFragReq->noOfAttr; - Uint32 fragId = tupFragReq->fragId; - /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ - Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; - Uint32 noOfCharsets = tupFragReq->noOfCharsets; - - Uint32 checksumIndicator = tupFragReq->checksumIndicator; - Uint32 gcpIndicator = tupFragReq->globalCheckpointIdIndicator; - Uint32 tablespace_id= tupFragReq->tablespaceid; - Uint32 forceVarPart = tupFragReq->forceVarPartFlag; - - Uint64 maxRows = - (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow; - Uint64 minRows = - (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow; - -#ifndef VM_TRACE - // config mismatch - do not crash if release compiled - if (regTabPtr.i >= cnoOfTablerec) { - jam(); - tupFragReq->userPtr = userptr; - tupFragReq->userRef = 800; - sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); - return; - } -#endif - - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - if (cfirstfreeFragopr == RNIL) { - jam(); - tupFragReq->userPtr = userptr; - tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; - sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); - return; - } - seizeFragoperrec(fragOperPtr); - - fragOperPtr.p->nextFragoprec = RNIL; - fragOperPtr.p->lqhBlockrefFrag = userblockref; - fragOperPtr.p->lqhPtrFrag = userptr; - fragOperPtr.p->fragidFrag = fragId; - fragOperPtr.p->tableidFrag = regTabPtr.i; - fragOperPtr.p->attributeCount = noOfAttributes; - - memset(fragOperPtr.p->m_null_bits, 0, sizeof(fragOperPtr.p->m_null_bits)); - memset(fragOperPtr.p->m_fix_attributes_size, 0, - sizeof(fragOperPtr.p->m_fix_attributes_size)); - memset(fragOperPtr.p->m_var_attributes_size, 0, - sizeof(fragOperPtr.p->m_var_attributes_size)); - - fragOperPtr.p->charsetIndex = 0; - fragOperPtr.p->minRows = minRows; - fragOperPtr.p->maxRows = maxRows; - - ndbrequire(reqinfo == ZADDFRAG); - - getFragmentrec(regFragPtr, fragId, regTabPtr.p); - if (regFragPtr.i != RNIL) { - jam(); - terrorCode= ZEXIST_FRAG_ERROR; - fragrefuse1Lab(signal, fragOperPtr); - return; - } - if (cfirstfreefrag != RNIL) { - jam(); - seizeFragrecord(regFragPtr); - } else { - jam(); - terrorCode= ZFULL_FRAGRECORD_ERROR; - fragrefuse1Lab(signal, fragOperPtr); - return; - } - initFragRange(regFragPtr.p); - if (!addfragtotab(regTabPtr.p, fragId, regFragPtr.i)) { - jam(); - terrorCode= ZNO_FREE_TAB_ENTRY_ERROR; - fragrefuse2Lab(signal, fragOperPtr, regFragPtr); - return; - } - if (cfirstfreerange == RNIL) { - jam(); - terrorCode= ZNO_FREE_PAGE_RANGE_ERROR; - fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - return; - } - - regFragPtr.p->fragTableId= regTabPtr.i; - regFragPtr.p->fragmentId= fragId; - regFragPtr.p->m_tablespace_id= tablespace_id; - regFragPtr.p->m_undo_complete= false; - regFragPtr.p->m_lcp_scan_op = RNIL; - regFragPtr.p->m_lcp_keep_list = RNIL; - regFragPtr.p->m_var_page_chunks = RNIL; - regFragPtr.p->m_restore_lcp_id = RNIL; - - if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || - ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { - jam(); - terrorCode = 1; - fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - CLEAR_ERROR_INSERT_VALUE; - return; - } - - if (regTabPtr.p->tableStatus == NOT_DEFINED) { - jam(); -//----------------------------------------------------------------------------- -// We are setting up references to the header of the tuple. -// Active operation This word contains a reference to the operation active -// on the tuple at the moment. RNIL means no one active at -// all. Not optional. -// Tuple version Uses only low 16 bits. Not optional. -// Checksum The third header word is optional and contains a checksum -// of the tuple header. -// Null-bits A number of words to contain null bits for all -// non-dynamic attributes. Each word contains upto 32 null -// bits. Each time a new word is needed we allocate the -// complete word. Zero nullable attributes means that there -// is no word at all -//----------------------------------------------------------------------------- - fragOperPtr.p->definingFragment= true; - regTabPtr.p->tableStatus= DEFINING; - regTabPtr.p->m_bits = 0; - regTabPtr.p->m_bits |= (checksumIndicator ? Tablerec::TR_Checksum : 0); - regTabPtr.p->m_bits |= (gcpIndicator ? Tablerec::TR_RowGCI : 0); - regTabPtr.p->m_bits |= (forceVarPart ? Tablerec::TR_ForceVarPart : 0); - - regTabPtr.p->m_offsets[MM].m_disk_ref_offset= 0; - regTabPtr.p->m_offsets[MM].m_null_words= 0; - regTabPtr.p->m_offsets[MM].m_fix_header_size= 0; - regTabPtr.p->m_offsets[MM].m_max_var_offset= 0; - - regTabPtr.p->m_offsets[DD].m_disk_ref_offset= 0; - regTabPtr.p->m_offsets[DD].m_null_words= 0; - regTabPtr.p->m_offsets[DD].m_fix_header_size= 0; - regTabPtr.p->m_offsets[DD].m_max_var_offset= 0; - - regTabPtr.p->m_attributes[MM].m_no_of_fixsize= 0; - regTabPtr.p->m_attributes[MM].m_no_of_varsize= 0; - regTabPtr.p->m_attributes[DD].m_no_of_fixsize= 0; - regTabPtr.p->m_attributes[DD].m_no_of_varsize= 0; - - regTabPtr.p->noOfKeyAttr= noOfKeyAttr; - regTabPtr.p->noOfCharsets= noOfCharsets; - regTabPtr.p->m_no_of_attributes= noOfAttributes; - - regTabPtr.p->notNullAttributeMask.clear(); - regTabPtr.p->blobAttributeMask.clear(); - - Uint32 offset[10]; - Uint32 tableDescriptorRef= allocTabDescr(regTabPtr.p, offset); - if (tableDescriptorRef == RNIL) { - jam(); - fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - return; - } - setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset); - } else { - jam(); - fragOperPtr.p->definingFragment= false; - } - signal->theData[0]= fragOperPtr.p->lqhPtrFrag; - signal->theData[1]= fragOperPtr.i; - signal->theData[2]= regFragPtr.i; - signal->theData[3]= fragId; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUPFRAGCONF, signal, 4, JBB); - return; -} - -bool Dbtup::addfragtotab(Tablerec* const regTabPtr, - Uint32 fragId, - Uint32 fragIndex) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (regTabPtr->fragid[i] == RNIL) { - jam(); - regTabPtr->fragid[i]= fragId; - regTabPtr->fragrec[i]= fragIndex; - return true; - } - } - return false; -} - -void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, - Uint32 fragId, - Tablerec* const regTabPtr) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (regTabPtr->fragid[i] == fragId) { - jam(); - regFragPtr.i= regTabPtr->fragrec[i]; - ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); - return; - } - } - regFragPtr.i= RNIL; - ptrNull(regFragPtr); -} - -void Dbtup::seizeFragrecord(FragrecordPtr& regFragPtr) -{ - regFragPtr.i= cfirstfreefrag; - ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); - cfirstfreefrag= regFragPtr.p->nextfreefrag; - regFragPtr.p->nextfreefrag= RNIL; -} - -void Dbtup::seizeFragoperrec(FragoperrecPtr& fragOperPtr) -{ - fragOperPtr.i= cfirstfreeFragopr; - ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); - cfirstfreeFragopr = fragOperPtr.p->nextFragoprec; - fragOperPtr.p->nextFragoprec = RNIL; - fragOperPtr.p->inUse = true; -}//Dbtup::seizeFragoperrec() - -/* **************************************************************** */ -/* ************** TUP_ADD_ATTRREQ ****************** */ -/* **************************************************************** */ -void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) -{ - FragrecordPtr regFragPtr; - FragoperrecPtr fragOperPtr; - TablerecPtr regTabPtr; - - jamEntry(); - fragOperPtr.i= signal->theData[0]; - ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); - Uint32 attrId = signal->theData[2]; - Uint32 attrDescriptor = signal->theData[3]; - Uint32 extType = AttributeDescriptor::getType(attrDescriptor); - // DICT sends charset number in upper half - Uint32 csNumber = (signal->theData[4] >> 16); - - regTabPtr.i= fragOperPtr.p->tableidFrag; - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - - Uint32 fragId= fragOperPtr.p->fragidFrag; - - getFragmentrec(regFragPtr, fragId, regTabPtr.p); - ndbrequire(regFragPtr.i != RNIL); - - ndbrequire(fragOperPtr.p->attributeCount > 0); - fragOperPtr.p->attributeCount--; - const bool lastAttr = (fragOperPtr.p->attributeCount == 0); - - if (regTabPtr.p->tableStatus != DEFINING) - { - ndbrequire(regTabPtr.p->tableStatus == DEFINED); - signal->theData[0] = fragOperPtr.p->lqhPtrFrag; - signal->theData[1] = lastAttr; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, - signal, 2, JBB); - - if(lastAttr) - { - jam(); - /** - * Init Disk_alloc_info - */ - CreateFilegroupImplReq rep; - if(regTabPtr.p->m_no_of_disk_attributes) - { - Tablespace_client tsman(0, c_tsman, 0, 0, - regFragPtr.p->m_tablespace_id); - ndbrequire(tsman.get_tablespace_info(&rep) == 0); - regFragPtr.p->m_logfile_group_id= rep.tablespace.logfile_group_id; - } - else - { - jam(); - regFragPtr.p->m_logfile_group_id = RNIL; - } - new (®FragPtr.p->m_disk_alloc_info) - Disk_alloc_info(regTabPtr.p, rep.tablespace.extent_size); - releaseFragoperrec(fragOperPtr); - } - return; - } - - Uint32 firstTabDesIndex= regTabPtr.p->tabDescriptor + (attrId * ZAD_SIZE); - setTabDescrWord(firstTabDesIndex, attrDescriptor); - Uint32 attrLen = AttributeDescriptor::getSize(attrDescriptor); - - Uint32 attrDes2= 0; - if (!AttributeDescriptor::getDynamic(attrDescriptor)) { - jam(); - Uint32 pos= 0, null_pos; - Uint32 bytes= AttributeDescriptor::getSizeInBytes(attrDescriptor); - Uint32 words= (bytes + 3) / 4; - Uint32 ind= AttributeDescriptor::getDiskBased(attrDescriptor); - ndbrequire(ind <= 1); - null_pos= fragOperPtr.p->m_null_bits[ind]; - - if (AttributeDescriptor::getNullable(attrDescriptor)) - { - jam(); - fragOperPtr.p->m_null_bits[ind]++; - } - else - { - regTabPtr.p->notNullAttributeMask.set(attrId); - } - - if (extType == NDB_TYPE_BLOB || extType == NDB_TYPE_TEXT) { - regTabPtr.p->blobAttributeMask.set(attrId); - } - - switch (AttributeDescriptor::getArrayType(attrDescriptor)) { - case NDB_ARRAYTYPE_FIXED: - { - jam(); - regTabPtr.p->m_attributes[ind].m_no_of_fixsize++; - if(attrLen != 0) - { - jam(); - pos= fragOperPtr.p->m_fix_attributes_size[ind]; - fragOperPtr.p->m_fix_attributes_size[ind] += words; - } - else - { - jam(); - Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor); - fragOperPtr.p->m_null_bits[ind] += bitCount; - } - break; - } - default: - { - jam(); - fragOperPtr.p->m_var_attributes_size[ind] += bytes; - pos= regTabPtr.p->m_attributes[ind].m_no_of_varsize++; - break; - } - }//switch - - AttributeOffset::setOffset(attrDes2, pos); - AttributeOffset::setNullFlagPos(attrDes2, null_pos); - } else { - ndbrequire(false); - } - if (csNumber != 0) { - CHARSET_INFO* cs = all_charsets[csNumber]; - ndbrequire(cs != NULL); - Uint32 i = 0; - while (i < fragOperPtr.p->charsetIndex) { - jam(); - if (regTabPtr.p->charsetArray[i] == cs) - break; - i++; - } - if (i == fragOperPtr.p->charsetIndex) { - jam(); - fragOperPtr.p->charsetIndex++; - } - ndbrequire(i < regTabPtr.p->noOfCharsets); - regTabPtr.p->charsetArray[i]= cs; - AttributeOffset::setCharsetPos(attrDes2, i); - } - setTabDescrWord(firstTabDesIndex + 1, attrDes2); - - if (ERROR_INSERTED(4009) && regTabPtr.p->fragid[0] == fragId && attrId == 0|| - ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr || - ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0|| - ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) { - jam(); - terrorCode = 1; - addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); - CLEAR_ERROR_INSERT_VALUE; - return; - } - -/* **************************************************************** */ -/* ************** TUP_ADD_ATTCONF ****************** */ -/* **************************************************************** */ - if (! lastAttr) { - jam(); - signal->theData[0] = fragOperPtr.p->lqhPtrFrag; - signal->theData[1] = lastAttr; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, - signal, 2, JBB); - return; - } - - ndbrequire(regTabPtr.p->tableStatus == DEFINING); - regTabPtr.p->tableStatus= DEFINED; - regFragPtr.p->fragStatus= ACTIVE; - -#define BTW(x) ((x+31) >> 5) - regTabPtr.p->m_offsets[MM].m_null_words= BTW(fragOperPtr.p->m_null_bits[MM]); - regTabPtr.p->m_offsets[DD].m_null_words= BTW(fragOperPtr.p->m_null_bits[DD]); - - /** - * Fix offsets - */ - Uint32 pos[2] = { 0, 0 }; - if (regTabPtr.p->m_bits & Tablerec::TR_Checksum) - { - pos[0]= 1; - } - - if (regTabPtr.p->m_bits & Tablerec::TR_RowGCI) - { - pos[MM]++; - pos[DD]++; - } - - regTabPtr.p->m_no_of_disk_attributes= - regTabPtr.p->m_attributes[DD].m_no_of_fixsize + - regTabPtr.p->m_attributes[DD].m_no_of_varsize; - - if(regTabPtr.p->m_no_of_disk_attributes > 0) - { - regTabPtr.p->m_offsets[MM].m_disk_ref_offset= pos[MM]; - pos[MM] += Disk_part_ref::SZ32; // 8 bytes - } - else - { - /** - * var part ref is stored at m_disk_ref_offset + Disk_part_ref::SZ32 - */ - regTabPtr.p->m_offsets[MM].m_disk_ref_offset= pos[MM]-Disk_part_ref::SZ32; - } - - if (regTabPtr.p->m_attributes[MM].m_no_of_varsize) - { - pos[MM] += Var_part_ref::SZ32; - regTabPtr.p->m_bits &= ~(Uint32)Tablerec::TR_ForceVarPart; - } - else if (regTabPtr.p->m_bits & Tablerec::TR_ForceVarPart) - { - pos[MM] += Var_part_ref::SZ32; - } - - regTabPtr.p->m_offsets[MM].m_null_offset= pos[MM]; - regTabPtr.p->m_offsets[DD].m_null_offset= pos[DD]; - - pos[MM]+= regTabPtr.p->m_offsets[MM].m_null_words; - pos[DD]+= regTabPtr.p->m_offsets[DD].m_null_words; - - Uint32 *tabDesc = (Uint32*)(tableDescriptor+regTabPtr.p->tabDescriptor); - for(Uint32 i= 0; im_no_of_attributes; i++) - { - Uint32 ind= AttributeDescriptor::getDiskBased(* tabDesc); - Uint32 arr= AttributeDescriptor::getArrayType(* tabDesc++); - - if(arr == NDB_ARRAYTYPE_FIXED) - { - Uint32 desc= * tabDesc; - Uint32 off= AttributeOffset::getOffset(desc) + pos[ind]; - AttributeOffset::setOffset(desc, off); - * tabDesc= desc; - } - tabDesc++; - } - - regTabPtr.p->m_offsets[MM].m_fix_header_size= - Tuple_header::HeaderSize + - fragOperPtr.p->m_fix_attributes_size[MM] + - pos[MM]; - - regTabPtr.p->m_offsets[DD].m_fix_header_size= - fragOperPtr.p->m_fix_attributes_size[DD] + - pos[DD]; - - if(regTabPtr.p->m_attributes[DD].m_no_of_varsize == 0 && - regTabPtr.p->m_attributes[DD].m_no_of_fixsize > 0) - regTabPtr.p->m_offsets[DD].m_fix_header_size += Tuple_header::HeaderSize; - - regTabPtr.p->m_offsets[MM].m_max_var_offset= - fragOperPtr.p->m_var_attributes_size[MM]; - - regTabPtr.p->m_offsets[DD].m_max_var_offset= - fragOperPtr.p->m_var_attributes_size[DD]; - - regTabPtr.p->total_rec_size= - pos[MM] + fragOperPtr.p->m_fix_attributes_size[MM] + - pos[DD] + fragOperPtr.p->m_fix_attributes_size[DD] + - ((fragOperPtr.p->m_var_attributes_size[MM] + 3) >> 2) + - ((fragOperPtr.p->m_var_attributes_size[DD] + 3) >> 2) + - (regTabPtr.p->m_attributes[MM].m_no_of_varsize ? - (regTabPtr.p->m_attributes[MM].m_no_of_varsize + 2) >> 1 : 0) + - (regTabPtr.p->m_attributes[DD].m_no_of_varsize ? - (regTabPtr.p->m_attributes[DD].m_no_of_varsize + 2) >> 1 : 0) + - Tuple_header::HeaderSize + - (regTabPtr.p->m_no_of_disk_attributes ? Tuple_header::HeaderSize : 0); - - setUpQueryRoutines(regTabPtr.p); - setUpKeyArray(regTabPtr.p); - -#if 0 - ndbout << *regTabPtr.p << endl; - Uint32 idx= regTabPtr.p->tabDescriptor; - for(Uint32 i = 0; im_no_of_attributes; i++) - { - ndbout << i << ": " << endl; - ndbout << *(AttributeDescriptor*)(tableDescriptor+idx) << endl; - ndbout << *(AttributeOffset*)(tableDescriptor+idx+1) << endl; - idx += 2; - } -#endif - - { - Uint32 fix_tupheader = regTabPtr.p->m_offsets[MM].m_fix_header_size; - ndbassert(fix_tupheader > 0); - Uint32 noRowsPerPage = ZWORDS_ON_PAGE / fix_tupheader; - Uint32 noAllocatedPages = - (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; - if (fragOperPtr.p->minRows == 0) - noAllocatedPages = 2; - else if (noAllocatedPages == 0) - noAllocatedPages = 2; - noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); - - if (noAllocatedPages == 0) { - jam(); - terrorCode = ZNO_PAGES_ALLOCATED_ERROR; - addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); - return; - }//if - } - - CreateFilegroupImplReq rep; - if(regTabPtr.p->m_no_of_disk_attributes) - { - jam(); - Tablespace_client tsman(0, c_tsman, 0, 0, - regFragPtr.p->m_tablespace_id); - ndbrequire(tsman.get_tablespace_info(&rep) == 0); - regFragPtr.p->m_logfile_group_id= rep.tablespace.logfile_group_id; - } - else - { - jam(); - regFragPtr.p->m_logfile_group_id = RNIL; - } - - new (®FragPtr.p->m_disk_alloc_info) - Disk_alloc_info(regTabPtr.p, rep.tablespace.extent_size); - - if (regTabPtr.p->m_no_of_disk_attributes) - { - jam(); - if(!(getNodeState().startLevel == NodeState::SL_STARTING && - getNodeState().starting.startPhase <= 4)) - { - Callback cb; - jam(); - - cb.m_callbackData= fragOperPtr.i; - cb.m_callbackFunction = - safe_cast(&Dbtup::undo_createtable_callback); - Uint32 sz= sizeof(Disk_undo::Create) >> 2; - - Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id); - if((terrorCode = - c_lgman->alloc_log_space(regFragPtr.p->m_logfile_group_id, sz))) - { - addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); - return; - } - - int res= lgman.get_log_buffer(signal, sz, &cb); - switch(res){ - case 0: - jam(); - signal->theData[0] = 1; - return; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - } - execute(signal, cb, regFragPtr.p->m_logfile_group_id); - return; - } - } - - signal->theData[0] = fragOperPtr.p->lqhPtrFrag; - signal->theData[1] = lastAttr; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, - signal, 2, JBB); - - releaseFragoperrec(fragOperPtr); - - return; -} - -void -Dbtup::undo_createtable_callback(Signal* signal, Uint32 opPtrI, Uint32 unused) -{ - FragrecordPtr regFragPtr; - FragoperrecPtr fragOperPtr; - TablerecPtr regTabPtr; - - fragOperPtr.i= opPtrI; - ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); - - regTabPtr.i= fragOperPtr.p->tableidFrag; - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - - getFragmentrec(regFragPtr, fragOperPtr.p->fragidFrag, regTabPtr.p); - ndbrequire(regFragPtr.i != RNIL); - - Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id); - - Disk_undo::Create create; - create.m_type_length= Disk_undo::UNDO_CREATE << 16 | (sizeof(create) >> 2); - create.m_table = regTabPtr.i; - - Logfile_client::Change c[1] = {{ &create, sizeof(create) >> 2 } }; - - Uint64 lsn= lgman.add_entry(c, 1); - - Logfile_client::Request req; - req.m_callback.m_callbackData= fragOperPtr.i; - req.m_callback.m_callbackFunction = - safe_cast(&Dbtup::undo_createtable_logsync_callback); - - int ret = lgman.sync_lsn(signal, lsn, &req, 0); - switch(ret){ - case 0: - return; - case -1: - warningEvent("Failed to sync log for create of table: %u", regTabPtr.i); - default: - execute(signal, req.m_callback, regFragPtr.p->m_logfile_group_id); - } -} - -void -Dbtup::undo_createtable_logsync_callback(Signal* signal, Uint32 ptrI, - Uint32 res) -{ - jamEntry(); - FragoperrecPtr fragOperPtr; - fragOperPtr.i= ptrI; - ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); - - signal->theData[0] = fragOperPtr.p->lqhPtrFrag; - signal->theData[1] = 1; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, - signal, 2, JBB); - - releaseFragoperrec(fragOperPtr); -} - -/* - * Descriptor has these parts: - * - * 0 readFunctionArray ( one for each attribute ) - * 1 updateFunctionArray ( ditto ) - * 2 charsetArray ( pointers to distinct CHARSET_INFO ) - * 3 readKeyArray ( attribute ids of keys ) - * 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE ) - */ -void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference, - Tablerec* const regTabPtr, - const Uint32* offset) -{ - Uint32* desc= &tableDescriptor[descriptorReference].tabDescr; - regTabPtr->readFunctionArray= (ReadFunction*)(desc + offset[0]); - regTabPtr->updateFunctionArray= (UpdateFunction*)(desc + offset[1]); - regTabPtr->charsetArray= (CHARSET_INFO**)(desc + offset[2]); - regTabPtr->readKeyArray= descriptorReference + offset[3]; - regTabPtr->tabDescriptor= descriptorReference + offset[4]; - regTabPtr->m_real_order_descriptor = descriptorReference + offset[5]; -} - -Uint32 -Dbtup::sizeOfReadFunction() -{ - ReadFunction* tmp= (ReadFunction*)&tableDescriptor[0]; - TableDescriptor* start= &tableDescriptor[0]; - TableDescriptor * end= (TableDescriptor*)(tmp + 1); - return (Uint32)(end - start); -} - -void Dbtup::setUpKeyArray(Tablerec* const regTabPtr) -{ - ndbrequire((regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr) < - cnoOfTabDescrRec); - Uint32* keyArray= &tableDescriptor[regTabPtr->readKeyArray].tabDescr; - Uint32 countKeyAttr= 0; - for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) { - jam(); - Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE); - Uint32 attrDescriptor= getTabDescrWord(refAttr); - if (AttributeDescriptor::getPrimaryKey(attrDescriptor)) { - jam(); - AttributeHeader::init(&keyArray[countKeyAttr], i, 0); - countKeyAttr++; - } - } - ndbrequire(countKeyAttr == regTabPtr->noOfKeyAttr); - - /** - * Setup real order array (16 bit per column) - */ - const Uint32 off= regTabPtr->m_real_order_descriptor; - const Uint32 sz= (regTabPtr->m_no_of_attributes + 1) >> 1; - ndbrequire((off + sz) < cnoOfTabDescrRec); - - Uint32 cnt= 0; - Uint16* order= (Uint16*)&tableDescriptor[off].tabDescr; - for (Uint32 type = 0; type < 4; type++) - { - for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) - { - jam(); - Uint32 refAttr= regTabPtr->tabDescriptor + (i * ZAD_SIZE); - Uint32 desc = getTabDescrWord(refAttr); - Uint32 t = 0; - - if (AttributeDescriptor::getArrayType(desc) != NDB_ARRAYTYPE_FIXED) - { - t += 1; - } - if (AttributeDescriptor::getDiskBased(desc)) - { - t += 2; - } - ndbrequire(t < 4); - if(t == type) - { - * order++ = i << ZAD_LOG_SIZE; - cnt++; - } - } - } - ndbrequire(cnt == regTabPtr->m_no_of_attributes); -} - -void Dbtup::addattrrefuseLab(Signal* signal, - FragrecordPtr regFragPtr, - FragoperrecPtr fragOperPtr, - Tablerec* const regTabPtr, - Uint32 fragId) -{ - releaseFragPages(regFragPtr.p); - deleteFragTab(regTabPtr, fragId); - releaseFragrec(regFragPtr); - releaseTabDescr(regTabPtr); - initTab(regTabPtr); - - signal->theData[0]= fragOperPtr.p->lqhPtrFrag; - signal->theData[1]= terrorCode; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, - GSN_TUP_ADD_ATTRREF, signal, 2, JBB); - releaseFragoperrec(fragOperPtr); -} - -void Dbtup::fragrefuse4Lab(Signal* signal, - FragoperrecPtr fragOperPtr, - FragrecordPtr regFragPtr, - Tablerec* const regTabPtr, - Uint32 fragId) -{ - releaseFragPages(regFragPtr.p); - fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr, fragId); - initTab(regTabPtr); -} - -void Dbtup::fragrefuse3Lab(Signal* signal, - FragoperrecPtr fragOperPtr, - FragrecordPtr regFragPtr, - Tablerec* const regTabPtr, - Uint32 fragId) -{ - fragrefuse2Lab(signal, fragOperPtr, regFragPtr); - deleteFragTab(regTabPtr, fragId); -} - -void Dbtup::fragrefuse2Lab(Signal* signal, - FragoperrecPtr fragOperPtr, - FragrecordPtr regFragPtr) -{ - fragrefuse1Lab(signal, fragOperPtr); - releaseFragrec(regFragPtr); -} - -void Dbtup::fragrefuse1Lab(Signal* signal, FragoperrecPtr fragOperPtr) -{ - fragrefuseLab(signal, fragOperPtr); - releaseFragoperrec(fragOperPtr); -} - -void Dbtup::fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr) -{ - signal->theData[0]= fragOperPtr.p->lqhPtrFrag; - signal->theData[1]= terrorCode; - sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUPFRAGREF, signal, 2, JBB); -} - -void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr) -{ - fragOperPtr.p->inUse = false; - fragOperPtr.p->nextFragoprec = cfirstfreeFragopr; - cfirstfreeFragopr = fragOperPtr.i; -}//Dbtup::releaseFragoperrec() - -void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId) -{ - for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (regTabPtr->fragid[i] == fragId) { - jam(); - regTabPtr->fragid[i]= RNIL; - regTabPtr->fragrec[i]= RNIL; - return; - } - } - ndbrequire(false); -} - -/* - * LQH aborts on-going create table operation. The table is later - * dropped by DICT. - */ -void Dbtup::abortAddFragOp(Signal* signal) -{ - FragoperrecPtr fragOperPtr; - - fragOperPtr.i = signal->theData[1]; - ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec); - ndbrequire(fragOperPtr.p->inUse); - releaseFragoperrec(fragOperPtr); -} - -void -Dbtup::execDROP_TAB_REQ(Signal* signal) -{ - jamEntry(); - if (ERROR_INSERTED(4013)) { -#ifdef VM_TRACE - verifytabdes(); -#endif - } - DropTabReq* req= (DropTabReq*)signal->getDataPtr(); - - TablerecPtr tabPtr; - tabPtr.i= req->tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - tabPtr.p->m_dropTable.tabUserRef = req->senderRef; - tabPtr.p->m_dropTable.tabUserPtr = req->senderData; - tabPtr.p->tableStatus = DROPPING; - - signal->theData[0]= ZREL_FRAG; - signal->theData[1]= tabPtr.i; - signal->theData[2]= RNIL; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); -} - -void Dbtup::releaseTabDescr(Tablerec* const regTabPtr) -{ - Uint32 descriptor= regTabPtr->readKeyArray; - if (descriptor != RNIL) { - jam(); - Uint32 offset[10]; - getTabDescrOffsets(regTabPtr, offset); - - regTabPtr->tabDescriptor= RNIL; - regTabPtr->readKeyArray= RNIL; - regTabPtr->readFunctionArray= NULL; - regTabPtr->updateFunctionArray= NULL; - regTabPtr->charsetArray= NULL; - - // move to start of descriptor - descriptor -= offset[3]; - Uint32 retNo= getTabDescrWord(descriptor + ZTD_DATASIZE); - ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL); - ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE)); - ndbrequire(ZTD_TYPE_NORMAL == - getTabDescrWord((descriptor + retNo) - ZTD_TR_TYPE)); - freeTabDescr(descriptor, retNo); - } -} - -void Dbtup::releaseFragment(Signal* signal, Uint32 tableId, - Uint32 logfile_group_id) -{ - TablerecPtr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - Uint32 fragIndex = RNIL; - Uint32 fragId = RNIL; - Uint32 i = 0; - for (i = 0; i < MAX_FRAG_PER_NODE; i++) { - jam(); - if (tabPtr.p->fragid[i] != RNIL) { - jam(); - fragIndex= tabPtr.p->fragrec[i]; - fragId= tabPtr.p->fragid[i]; - break; - } - } - if (fragIndex != RNIL) { - jam(); - - signal->theData[0] = ZUNMAP_PAGES; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragIndex; - signal->theData[3] = 0; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - return; - } - - if (logfile_group_id != RNIL) - { - Callback cb; - cb.m_callbackData= tabPtr.i; - cb.m_callbackFunction = - safe_cast(&Dbtup::drop_table_log_buffer_callback); - Uint32 sz= sizeof(Disk_undo::Drop) >> 2; - int r0 = c_lgman->alloc_log_space(logfile_group_id, sz); - if (r0) - { - jam(); - warningEvent("Failed to alloc log space for drop table: %u", - tabPtr.i); - goto done; - } - - Logfile_client lgman(this, c_lgman, logfile_group_id); - int res= lgman.get_log_buffer(signal, sz, &cb); - switch(res){ - case 0: - jam(); - return; - case -1: - warningEvent("Failed to get log buffer for drop table: %u", - tabPtr.i); - c_lgman->free_log_space(logfile_group_id, sz); - goto done; - break; - default: - execute(signal, cb, logfile_group_id); - return; - } - } - -done: - drop_table_logsync_callback(signal, tabPtr.i, RNIL); -} - -void -Dbtup::drop_fragment_unmap_pages(Signal *signal, - TablerecPtr tabPtr, - FragrecordPtr fragPtr, - Uint32 pos) -{ - if (tabPtr.p->m_no_of_disk_attributes) - { - jam(); - Disk_alloc_info& alloc_info= fragPtr.p->m_disk_alloc_info; - - if (!alloc_info.m_unmap_pages.isEmpty()) - { - jam(); - ndbout_c("waiting for unmape pages"); - signal->theData[0] = ZUNMAP_PAGES; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragPtr.i; - signal->theData[3] = pos; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - return; - } - while(alloc_info.m_dirty_pages[pos].isEmpty() && pos < MAX_FREE_LIST) - pos++; - - if (pos == MAX_FREE_LIST) - { - if(alloc_info.m_curr_extent_info_ptr_i != RNIL) - { - Local_extent_info_list - list(c_extent_pool, alloc_info.m_free_extents[0]); - Ptr ext_ptr; - c_extent_pool.getPtr(ext_ptr, alloc_info.m_curr_extent_info_ptr_i); - list.add(ext_ptr); - alloc_info.m_curr_extent_info_ptr_i= RNIL; - } - - drop_fragment_free_extent(signal, tabPtr, fragPtr, 0); - return; - } - - Ptr pagePtr; - ArrayPool *pool= (ArrayPool*)&m_global_page_pool; - { - LocalDLList list(*pool, alloc_info.m_dirty_pages[pos]); - list.first(pagePtr); - list.remove(pagePtr); - } - - Page_cache_client::Request req; - req.m_page.m_page_no = pagePtr.p->m_page_no; - req.m_page.m_file_no = pagePtr.p->m_file_no; - - req.m_callback.m_callbackData= pos; - req.m_callback.m_callbackFunction = - safe_cast(&Dbtup::drop_fragment_unmap_page_callback); - - int flags= Page_cache_client::COMMIT_REQ; - int res= m_pgman.get_page(signal, req, flags); - switch(res) - { - case 0: - case -1: - break; - default: - ndbrequire((Uint32)res == pagePtr.i); - drop_fragment_unmap_page_callback(signal, pos, res); - } - return; - } - drop_fragment_free_extent(signal, tabPtr, fragPtr, 0); -} - -void -Dbtup::drop_fragment_unmap_page_callback(Signal* signal, - Uint32 pos, Uint32 page_id) -{ - Ptr page; - m_global_page_pool.getPtr(page, page_id); - - Local_key key; - key.m_page_no = ((Page*)page.p)->m_page_no; - key.m_file_no = ((Page*)page.p)->m_file_no; - - Uint32 fragId = ((Page*)page.p)->m_fragment_id; - Uint32 tableId = ((Page*)page.p)->m_table_id; - m_pgman.drop_page(key, page_id); - - TablerecPtr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - FragrecordPtr fragPtr; - getFragmentrec(fragPtr, fragId, tabPtr.p); - - signal->theData[0] = ZUNMAP_PAGES; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragPtr.i; - signal->theData[3] = pos; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); -} - -void -Dbtup::drop_fragment_free_extent(Signal *signal, - TablerecPtr tabPtr, - FragrecordPtr fragPtr, - Uint32 pos) -{ - if (tabPtr.p->m_no_of_disk_attributes) - { - Disk_alloc_info& alloc_info= fragPtr.p->m_disk_alloc_info; - for(; pos> 2; - (void) c_lgman->alloc_log_space(fragPtr.p->m_logfile_group_id, sz); - - Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id); - - int res= lgman.get_log_buffer(signal, sz, &cb); - switch(res){ - case 0: - jam(); - return; - case -1: - ndbrequire("NOT YET IMPLEMENTED" == 0); - break; - default: - execute(signal, cb, fragPtr.p->m_logfile_group_id); - return; - } -#else - execute(signal, cb, fragPtr.p->m_logfile_group_id); - return; -#endif - } - } - - ArrayPool *cheat_pool= (ArrayPool*)&m_global_page_pool; - for(pos= 0; pos list(* cheat_pool, alloc_info.m_dirty_pages[pos]); - list.remove(); - } - } - - signal->theData[0] = ZFREE_VAR_PAGES; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); -} - -void -Dbtup::drop_table_log_buffer_callback(Signal* signal, Uint32 tablePtrI, - Uint32 logfile_group_id) -{ - TablerecPtr tabPtr; - tabPtr.i = tablePtrI; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - ndbrequire(tabPtr.p->m_no_of_disk_attributes); - - Disk_undo::Drop drop; - drop.m_table = tabPtr.i; - drop.m_type_length = - (Disk_undo::UNDO_DROP << 16) | (sizeof(drop) >> 2); - Logfile_client lgman(this, c_lgman, logfile_group_id); - - Logfile_client::Change c[1] = {{ &drop, sizeof(drop) >> 2 } }; - Uint64 lsn = lgman.add_entry(c, 1); - - Logfile_client::Request req; - req.m_callback.m_callbackData= tablePtrI; - req.m_callback.m_callbackFunction = - safe_cast(&Dbtup::drop_table_logsync_callback); - - int ret = lgman.sync_lsn(signal, lsn, &req, 0); - switch(ret){ - case 0: - return; - case -1: - warningEvent("Failed to syn log for drop of table: %u", tablePtrI); - default: - execute(signal, req.m_callback, logfile_group_id); - } -} - -void -Dbtup::drop_table_logsync_callback(Signal* signal, - Uint32 tabPtrI, - Uint32 logfile_group_id) -{ - TablerecPtr tabPtr; - tabPtr.i = tabPtrI; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - DropTabConf * const dropConf= (DropTabConf *)signal->getDataPtrSend(); - dropConf->senderRef= reference(); - dropConf->senderData= tabPtr.p->m_dropTable.tabUserPtr; - dropConf->tableId= tabPtr.i; - sendSignal(tabPtr.p->m_dropTable.tabUserRef, GSN_DROP_TAB_CONF, - signal, DropTabConf::SignalLength, JBB); - - releaseTabDescr(tabPtr.p); - initTab(tabPtr.p); -} - -void -Dbtup::drop_fragment_free_extent_log_buffer_callback(Signal* signal, - Uint32 fragPtrI, - Uint32 unused) -{ - FragrecordPtr fragPtr; - fragPtr.i = fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - TablerecPtr tabPtr; - tabPtr.i = fragPtr.p->fragTableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - ndbrequire(tabPtr.p->m_no_of_disk_attributes); - Disk_alloc_info& alloc_info= fragPtr.p->m_disk_alloc_info; - - for(Uint32 pos = 0; pos ext_ptr; - list.first(ext_ptr); - -#if NOT_YET_UNDO_FREE_EXTENT -#error "This code is complete" -#error "but not needed until we do dealloc of empty extents" - Disk_undo::FreeExtent free; - free.m_table = tabPtr.i; - free.m_fragment = fragPtr.p->fragmentId; - free.m_file_no = ext_ptr.p->m_key.m_file_no; - free.m_page_no = ext_ptr.p->m_key.m_page_no; - free.m_type_length = - (Disk_undo::UNDO_FREE_EXTENT << 16) | (sizeof(free) >> 2); - Logfile_client lgman(this, c_lgman, fragPtr.p->m_logfile_group_id); - - Logfile_client::Change c[1] = {{ &free, sizeof(free) >> 2 } }; - Uint64 lsn = lgman.add_entry(c, 1); -#else - Uint64 lsn = 0; -#endif - - Tablespace_client tsman(signal, c_tsman, tabPtr.i, - fragPtr.p->fragmentId, - fragPtr.p->m_tablespace_id); - - tsman.free_extent(&ext_ptr.p->m_key, lsn); - c_extent_hash.remove(ext_ptr); - list.release(ext_ptr); - - signal->theData[0] = ZFREE_EXTENT; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragPtr.i; - signal->theData[3] = pos; - sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB); - return; - } - } - ndbrequire(false); -} - -void -Dbtup::drop_fragment_free_var_pages(Signal* signal) -{ - jam(); - Uint32 tableId = signal->theData[1]; - Uint32 fragPtrI = signal->theData[2]; - - TablerecPtr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - FragrecordPtr fragPtr; - fragPtr.i = fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - PagePtr pagePtr; - if ((pagePtr.i = fragPtr.p->m_var_page_chunks) != RNIL) - { - c_page_pool.getPtr(pagePtr); - Var_page* page = (Var_page*)pagePtr.p; - fragPtr.p->m_var_page_chunks = page->next_chunk; - - Uint32 sz = page->chunk_size; - returnCommonArea(pagePtr.i, sz); - - signal->theData[0] = ZFREE_VAR_PAGES; - signal->theData[1] = tabPtr.i; - signal->theData[2] = fragPtr.i; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; - } - - /** - * Remove LCP's for fragment - */ - tabPtr.p->m_dropTable.m_lcpno = 0; - tabPtr.p->m_dropTable.m_fragPtrI = fragPtr.i; - drop_fragment_fsremove(signal, tabPtr, fragPtr); -} - -void -Dbtup::drop_fragment_fsremove_done(Signal* signal, - TablerecPtr tabPtr, - FragrecordPtr fragPtr) -{ - /** - * LCP's removed... - * now continue with "next" - */ - Uint32 logfile_group_id = fragPtr.p->m_logfile_group_id ; - releaseFragPages(fragPtr.p); - Uint32 i; - for(i= 0; ifragrec[i] == fragPtr.i) - break; - - ndbrequire(i != MAX_FRAG_PER_NODE); - tabPtr.p->fragid[i]= RNIL; - tabPtr.p->fragrec[i]= RNIL; - releaseFragrec(fragPtr); - - signal->theData[0]= ZREL_FRAG; - signal->theData[1]= tabPtr.i; - signal->theData[2]= logfile_group_id; - sendSignal(cownref, GSN_CONTINUEB, signal, 3, JBB); - return; -} - -// Remove LCP - -void -Dbtup::drop_fragment_fsremove(Signal* signal, - TablerecPtr tabPtr, - FragrecordPtr fragPtr) -{ - FsRemoveReq* req = (FsRemoveReq*)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = tabPtr.i; - req->directory = 0; - req->ownDirectory = 0; - - Uint32 lcpno = tabPtr.p->m_dropTable.m_lcpno; - Uint32 fragId = fragPtr.p->fragmentId; - Uint32 tableId = fragPtr.p->fragTableId; - - FsOpenReq::setVersion(req->fileNumber, 5); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); - FsOpenReq::v5_setLcpNo(req->fileNumber, lcpno); - FsOpenReq::v5_setTableId(req->fileNumber, tableId); - FsOpenReq::v5_setFragmentId(req->fileNumber, fragId); - sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, - FsRemoveReq::SignalLength, JBB); -} - -void -Dbtup::execFSREMOVEREF(Signal* signal) -{ - jamEntry(); - FsRef* ref = (FsRef*)signal->getDataPtr(); - Uint32 userPointer = ref->userPointer; - FsConf* conf = (FsConf*)signal->getDataPtrSend(); - conf->userPointer = userPointer; - execFSREMOVECONF(signal); -} - -void -Dbtup::execFSREMOVECONF(Signal* signal) -{ - jamEntry(); - FsConf* conf = (FsConf*)signal->getDataPtrSend(); - - TablerecPtr tabPtr; - FragrecordPtr fragPtr; - - tabPtr.i = conf->userPointer; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - ndbrequire(tabPtr.p->tableStatus == DROPPING); - - fragPtr.i = tabPtr.p->m_dropTable.m_fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - tabPtr.p->m_dropTable.m_lcpno++; - if (tabPtr.p->m_dropTable.m_lcpno < 3) - { - jam(); - drop_fragment_fsremove(signal, tabPtr, fragPtr); - } - else - { - jam(); - drop_fragment_fsremove_done(signal, tabPtr, fragPtr); - } -} -// End remove LCP - -void -Dbtup::start_restore_lcp(Uint32 tableId, Uint32 fragId) -{ - TablerecPtr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - tabPtr.p->m_dropTable.tabUserPtr= tabPtr.p->m_attributes[DD].m_no_of_fixsize; - tabPtr.p->m_dropTable.tabUserRef= tabPtr.p->m_attributes[DD].m_no_of_varsize; - - Uint32 *tabDesc = (Uint32*)(tableDescriptor+tabPtr.p->tabDescriptor); - for(Uint32 i= 0; im_no_of_attributes; i++) - { - Uint32 disk= AttributeDescriptor::getDiskBased(* tabDesc); - Uint32 null= AttributeDescriptor::getNullable(* tabDesc); - - ndbrequire(tabPtr.p->notNullAttributeMask.get(i) != null); - if(disk) - tabPtr.p->notNullAttributeMask.clear(i); - tabDesc += 2; - } - - tabPtr.p->m_no_of_disk_attributes = 0; - tabPtr.p->m_attributes[DD].m_no_of_fixsize = 0; - tabPtr.p->m_attributes[DD].m_no_of_varsize = 0; -} -void -Dbtup::complete_restore_lcp(Uint32 tableId, Uint32 fragId) -{ - TablerecPtr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - tabPtr.p->m_attributes[DD].m_no_of_fixsize= tabPtr.p->m_dropTable.tabUserPtr; - tabPtr.p->m_attributes[DD].m_no_of_varsize= tabPtr.p->m_dropTable.tabUserRef; - - tabPtr.p->m_no_of_disk_attributes = - tabPtr.p->m_attributes[DD].m_no_of_fixsize + - tabPtr.p->m_attributes[DD].m_no_of_varsize; - - Uint32 *tabDesc = (Uint32*)(tableDescriptor+tabPtr.p->tabDescriptor); - for(Uint32 i= 0; im_no_of_attributes; i++) - { - Uint32 disk= AttributeDescriptor::getDiskBased(* tabDesc); - Uint32 null= AttributeDescriptor::getNullable(* tabDesc); - - if(disk && !null) - tabPtr.p->notNullAttributeMask.set(i); - - tabDesc += 2; - } -} - -bool -Dbtup::get_frag_info(Uint32 tableId, Uint32 fragId, Uint32* maxPage) -{ - jamEntry(); - TablerecPtr tabPtr; - tabPtr.i= tableId; - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - FragrecordPtr fragPtr; - getFragmentrec(fragPtr, fragId, tabPtr.p); - - if (maxPage) - { - * maxPage = fragPtr.p->noOfPages; - } - - return true; -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp deleted file mode 100644 index 47447bc3755..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp +++ /dev/null @@ -1,390 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_PAG_MAN_CPP -#include "Dbtup.hpp" -#include -#include -#include - -/* ---------------------------------------------------------------- */ -// 4) Page Memory Manager (buddy algorithm) -// -// The following data structures in Dbtup is used by the Page Memory -// Manager. -// -// cfreepageList[16] -// Pages with a header -// -// The cfreepageList is 16 free lists. Free list 0 contains chunks of -// pages with 2^0 (=1) pages in each chunk. Free list 1 chunks of 2^1 -// (=2) pages in each chunk and so forth upto free list 15 which -// contains chunks of 2^15 (=32768) pages in each chunk. -// The cfreepageList array contains the pointer to the first chunk -// in each of those lists. The lists are doubly linked where the -// first page in each chunk contains the next and previous references -// in position ZPAGE_NEXT_CLUST_POS and ZPAGE_PREV_CLUST_POS in the -// page header. -// In addition the leading page and the last page in each chunk is marked -// with a state (=ZFREE_COMMON) in position ZPAGE_STATE_POS in page -// header. This state indicates that the page is the leading or last page -// in a chunk of free pages. Furthermore the leading and last page is -// also marked with a reference to the leading (=ZPAGE_FIRST_CLUST_POS) -// and the last page (=ZPAGE_LAST_CLUST_POS) in the chunk. -// -// The aim of these data structures is to enable a free area handling of -// free pages based on a buddy algorithm. When allocating pages it is -// performed in chunks of pages and the algorithm tries to make the -// chunks as large as possible. -// This manager is invoked when fragments lack internal page space to -// accomodate all the data they are requested to store. It is also -// invoked when fragments deallocate page space back to the free area. -// -// The following routines are part of the external interface: -// void -// allocConsPages(Uint32 noOfPagesToAllocate, #In -// Uint32& noOfPagesAllocated, #Out -// Uint32& retPageRef) #Out -// void -// returnCommonArea(Uint32 retPageRef, #In -// Uint32 retNoPages) #In -// -// allocConsPages tries to allocate noOfPagesToAllocate pages in one chunk. -// If this fails it delivers a chunk as large as possible. It returns the -// i-value of the first page in the chunk delivered, if zero pages returned -// this i-value is undefined. It also returns the size of the chunk actually -// delivered. -// -// returnCommonArea is used when somebody is returning pages to the free area. -// It is used both from internal routines and external routines. -// -// The following routines are private routines used to support the -// above external interface: -// removeCommonArea() -// insertCommonArea() -// findFreeLeftNeighbours() -// findFreeRightNeighbours() -// Uint32 -// nextHigherTwoLog(Uint32 input) -// -// nextHigherTwoLog is a support routine which is a mathematical function with -// an integer as input and an integer as output. It calculates the 2-log of -// (input + 1). If the 2-log of (input + 1) is larger than 15 then the routine -// will return 15. It is part of the external interface since it is also used -// by other similar memory management algorithms. -// -// External dependencies: -// None. -// -// Side Effects: -// Apart from the above mentioned data structures there are no more -// side effects other than through the subroutine parameters in the -// external interface. -// -/* ---------------------------------------------------------------- */ - -/* ---------------------------------------------------------------- */ -/* CALCULATE THE 2-LOG + 1 OF TMP AND PUT RESULT INTO TBITS */ -/* ---------------------------------------------------------------- */ -Uint32 Dbtup::nextHigherTwoLog(Uint32 input) -{ - input = input | (input >> 8); - input = input | (input >> 4); - input = input | (input >> 2); - input = input | (input >> 1); - Uint32 output = (input & 0x5555) + ((input >> 1) & 0x5555); - output = (output & 0x3333) + ((output >> 2) & 0x3333); - output = output + (output >> 4); - output = (output & 0xf) + ((output >> 8) & 0xf); - return output; -}//nextHigherTwoLog() - -void Dbtup::initializePage() -{ - for (Uint32 i = 0; i < 16; i++) { - cfreepageList[i] = RNIL; - }//for - PagePtr pagePtr; - for (pagePtr.i = 0; pagePtr.i < c_page_pool.getSize(); pagePtr.i++) { - jam(); - refresh_watch_dog(); - c_page_pool.getPtr(pagePtr); - pagePtr.p->physical_page_id= RNIL; - pagePtr.p->next_page = pagePtr.i + 1; - pagePtr.p->first_cluster_page = RNIL; - pagePtr.p->next_cluster_page = RNIL; - pagePtr.p->last_cluster_page = RNIL; - pagePtr.p->prev_page = RNIL; - pagePtr.p->page_state = ZFREE_COMMON; - }//for - pagePtr.p->next_page = RNIL; - - /** - * Page 0 cant be part of buddy as - * it will scan left right when searching for bigger blocks, - * if 0 is part of arrat, it can search to -1...which is not good - */ - pagePtr.i = 0; - c_page_pool.getPtr(pagePtr); - pagePtr.p->page_state = ~ZFREE_COMMON; - - Uint32 tmp = 1; - returnCommonArea(tmp, c_page_pool.getSize() - tmp); - cnoOfAllocatedPages = tmp; // Is updated by returnCommonArea -}//Dbtup::initializePage() - -#ifdef VM_TRACE -Uint32 fc_left, fc_right, fc_remove; -#endif - -void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate, - Uint32& noOfPagesAllocated, - Uint32& allocPageRef) -{ -#ifdef VM_TRACE - fc_left = fc_right = fc_remove = 0; -#endif - if (noOfPagesToAllocate == 0){ - jam(); - noOfPagesAllocated = 0; - return; - }//if - - Uint32 firstListToCheck = nextHigherTwoLog(noOfPagesToAllocate - 1); - for (Uint32 i = firstListToCheck; i < 16; i++) { - jam(); - if (cfreepageList[i] != RNIL) { - jam(); -/* ---------------------------------------------------------------- */ -/* PROPER AMOUNT OF PAGES WERE FOUND. NOW SPLIT THE FOUND */ -/* AREA AND RETURN THE PART NOT NEEDED. */ -/* ---------------------------------------------------------------- */ - noOfPagesAllocated = noOfPagesToAllocate; - allocPageRef = cfreepageList[i]; - removeCommonArea(allocPageRef, i); - Uint32 retNo = (1 << i) - noOfPagesToAllocate; - Uint32 retPageRef = allocPageRef + noOfPagesToAllocate; - returnCommonArea(retPageRef, retNo); - return; - }//if - }//for -/* ---------------------------------------------------------------- */ -/* PROPER AMOUNT OF PAGES WERE NOT FOUND. FIND AS MUCH AS */ -/* POSSIBLE. */ -/* ---------------------------------------------------------------- */ - if (firstListToCheck) - { - jam(); - for (Uint32 j = firstListToCheck - 1; (Uint32)~j; j--) { - jam(); - if (cfreepageList[j] != RNIL) { - jam(); -/* ---------------------------------------------------------------- */ -/* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */ -/* ---------------------------------------------------------------- */ - allocPageRef = cfreepageList[j]; - removeCommonArea(allocPageRef, j); - noOfPagesAllocated = 1 << j; - findFreeLeftNeighbours(allocPageRef, noOfPagesAllocated, - noOfPagesToAllocate); - findFreeRightNeighbours(allocPageRef, noOfPagesAllocated, - noOfPagesToAllocate); - - return; - }//if - }//for - } -/* ---------------------------------------------------------------- */ -/* NO FREE AREA AT ALL EXISTED. RETURN ZERO PAGES */ -/* ---------------------------------------------------------------- */ - noOfPagesAllocated = 0; - return; -}//allocConsPages() - -void Dbtup::returnCommonArea(Uint32 retPageRef, Uint32 retNo) -{ - do { - jam(); - if (retNo == 0) { - jam(); - return; - }//if - Uint32 list = nextHigherTwoLog(retNo) - 1; - retNo -= (1 << list); - insertCommonArea(retPageRef, list); - retPageRef += (1 << list); - } while (1); -}//Dbtup::returnCommonArea() - -void Dbtup::findFreeLeftNeighbours(Uint32& allocPageRef, - Uint32& noPagesAllocated, - Uint32 noOfPagesToAllocate) -{ - PagePtr pageFirstPtr, pageLastPtr; - Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated; - Uint32 loop = 0; - while (allocPageRef > 0 && - ++loop < 16) - { - jam(); - pageLastPtr.i = allocPageRef - 1; - c_page_pool.getPtr(pageLastPtr); - if (pageLastPtr.p->page_state != ZFREE_COMMON) { - jam(); - return; - } else { - jam(); - pageFirstPtr.i = pageLastPtr.p->first_cluster_page; - ndbrequire(pageFirstPtr.i != RNIL); - Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i); - removeCommonArea(pageFirstPtr.i, list); - Uint32 listSize = 1 << list; - if (listSize > remainAllocate) { - jam(); - Uint32 retNo = listSize - remainAllocate; - returnCommonArea(pageFirstPtr.i, retNo); - allocPageRef = pageFirstPtr.i + retNo; - noPagesAllocated = noOfPagesToAllocate; - return; - } else { - jam(); - allocPageRef = pageFirstPtr.i; - noPagesAllocated += listSize; - remainAllocate -= listSize; - }//if - }//if -#ifdef VM_TRACE - fc_left++; -#endif - }//while -}//Dbtup::findFreeLeftNeighbours() - -void Dbtup::findFreeRightNeighbours(Uint32& allocPageRef, - Uint32& noPagesAllocated, - Uint32 noOfPagesToAllocate) -{ - PagePtr pageFirstPtr, pageLastPtr; - Uint32 remainAllocate = noOfPagesToAllocate - noPagesAllocated; - if (remainAllocate == 0) { - jam(); - return; - }//if - Uint32 loop = 0; - while ((allocPageRef + noPagesAllocated) < c_page_pool.getSize() && - ++loop < 16) - { - jam(); - pageFirstPtr.i = allocPageRef + noPagesAllocated; - c_page_pool.getPtr(pageFirstPtr); - if (pageFirstPtr.p->page_state != ZFREE_COMMON) { - jam(); - return; - } else { - jam(); - pageLastPtr.i = pageFirstPtr.p->last_cluster_page; - ndbrequire(pageLastPtr.i != RNIL); - Uint32 list = nextHigherTwoLog(pageLastPtr.i - pageFirstPtr.i); - removeCommonArea(pageFirstPtr.i, list); - Uint32 listSize = 1 << list; - if (listSize > remainAllocate) { - jam(); - Uint32 retPageRef = pageFirstPtr.i + remainAllocate; - Uint32 retNo = listSize - remainAllocate; - returnCommonArea(retPageRef, retNo); - noPagesAllocated += remainAllocate; - return; - } else { - jam(); - noPagesAllocated += listSize; - remainAllocate -= listSize; - }//if - }//if -#ifdef VM_TRACE - fc_right++; -#endif - }//while -}//Dbtup::findFreeRightNeighbours() - -void Dbtup::insertCommonArea(Uint32 insPageRef, Uint32 insList) -{ - cnoOfAllocatedPages -= (1 << insList); - PagePtr pageLastPtr, pageInsPtr, pageHeadPtr; - - pageHeadPtr.i = cfreepageList[insList]; - c_page_pool.getPtr(pageInsPtr, insPageRef); - ndbrequire(insList < 16); - pageLastPtr.i = (pageInsPtr.i + (1 << insList)) - 1; - - pageInsPtr.p->page_state = ZFREE_COMMON; - pageInsPtr.p->next_cluster_page = pageHeadPtr.i; - pageInsPtr.p->prev_cluster_page = RNIL; - pageInsPtr.p->last_cluster_page = pageLastPtr.i; - cfreepageList[insList] = pageInsPtr.i; - - if (pageHeadPtr.i != RNIL) - { - jam(); - c_page_pool.getPtr(pageHeadPtr); - pageHeadPtr.p->prev_cluster_page = pageInsPtr.i; - } - - c_page_pool.getPtr(pageLastPtr); - pageLastPtr.p->page_state = ZFREE_COMMON; - pageLastPtr.p->first_cluster_page = pageInsPtr.i; - pageLastPtr.p->next_page = RNIL; -}//Dbtup::insertCommonArea() - -void Dbtup::removeCommonArea(Uint32 remPageRef, Uint32 list) -{ - cnoOfAllocatedPages += (1 << list); - PagePtr pagePrevPtr, pageNextPtr, pageLastPtr, remPagePtr; - - c_page_pool.getPtr(remPagePtr, remPageRef); - ndbrequire(list < 16); - if (cfreepageList[list] == remPagePtr.i) { - jam(); - ndbassert(remPagePtr.p->prev_cluster_page == RNIL); - cfreepageList[list] = remPagePtr.p->next_cluster_page; - pageNextPtr.i = cfreepageList[list]; - if (pageNextPtr.i != RNIL) { - jam(); - c_page_pool.getPtr(pageNextPtr); - pageNextPtr.p->prev_cluster_page = RNIL; - }//if - } else { - pagePrevPtr.i = remPagePtr.p->prev_cluster_page; - pageNextPtr.i = remPagePtr.p->next_cluster_page; - c_page_pool.getPtr(pagePrevPtr); - pagePrevPtr.p->next_cluster_page = pageNextPtr.i; - if (pageNextPtr.i != RNIL) - { - jam(); - c_page_pool.getPtr(pageNextPtr); - pageNextPtr.p->prev_cluster_page = pagePrevPtr.i; - } - }//if - remPagePtr.p->next_cluster_page= RNIL; - remPagePtr.p->last_cluster_page= RNIL; - remPagePtr.p->prev_cluster_page= RNIL; - remPagePtr.p->page_state = ~ZFREE_COMMON; - - pageLastPtr.i = (remPagePtr.i + (1 << list)) - 1; - c_page_pool.getPtr(pageLastPtr); - pageLastPtr.p->first_cluster_page= RNIL; - pageLastPtr.p->page_state = ~ZFREE_COMMON; - -}//Dbtup::removeCommonArea() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp deleted file mode 100644 index bf079627990..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ /dev/null @@ -1,602 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_PAGE_MAP_CPP -#include "Dbtup.hpp" -#include -#include -#include - -// -// PageMap is a service used by Dbtup to map logical page id's to physical -// page id's. The mapping is needs the fragment and the logical page id to -// provide the physical id. -// -// This is a part of Dbtup which is the exclusive user of a certain set of -// variables on the fragment record and it is the exclusive user of the -// struct for page ranges. -// -// -// The following methods operate on the data handled by the page map class. -// -// Public methods -// insertPageRange(Uint32 startPageId, # In -// Uint32 noPages) # In -// Inserts a range of pages into the mapping structure. -// -// void releaseFragPages() -// Releases all pages and their mappings belonging to a fragment. -// -// Uint32 allocFragPages(Uint32 tafpNoAllocRequested) -// Allocate a set of pages to the fragment from the page manager -// -// Uint32 getEmptyPage() -// Get an empty page from the pool of empty pages on the fragment. -// It returns the physical page id of the empty page. -// Returns RNIL if no empty page is available. -// -// Uint32 getRealpid(Uint32 logicalPageId) -// Return the physical page id provided the logical page id -// -// void initializePageRange() -// Initialise free list of page ranges and initialise the page raneg records. -// -// void initFragRange() -// Initialise the fragment variables when allocating a fragment to a table. -// -// void initPageRangeSize(Uint32 size) -// Initialise the number of page ranges. -// -// Uint32 getNoOfPages() -// Get the number of pages on the fragment currently. -// -// -// Private methods -// Uint32 leafPageRangeFull(PageRangePtr currPageRangePtr) -// -// void errorHandler() -// Method to crash NDB kernel in case of weird data set-up -// -// void allocMoreFragPages() -// When no more empty pages are attached to the fragment and we need more -// we allocate more pages from the page manager using this method. -// -// Private data -// On the fragment record -// currentPageRange # The current page range where to insert the next range -// rootPageRange # The root of the page ranges owned -// nextStartRange # The next page id to assign when expanding the -// # page map -// noOfPages # The number of pages in the fragment -// emptyPrimPage # The first page of the empty pages in the fragment -// -// The full page range struct - -Uint32 Dbtup::getEmptyPage(Fragrecord* regFragPtr) -{ - Uint32 pageId = regFragPtr->emptyPrimPage.firstItem; - if (pageId == RNIL) { - jam(); - allocMoreFragPages(regFragPtr); - pageId = regFragPtr->emptyPrimPage.firstItem; - if (pageId == RNIL) { - jam(); - return RNIL; - }//if - }//if - PagePtr pagePtr; - LocalDLList alloc_pages(c_page_pool, regFragPtr->emptyPrimPage); - alloc_pages.getPtr(pagePtr, pageId); - alloc_pages.remove(pagePtr); - return pageId; -}//Dbtup::getEmptyPage() - -Uint32 Dbtup::getRealpid(Fragrecord* regFragPtr, Uint32 logicalPageId) -{ - PageRangePtr grpPageRangePtr; - Uint32 loopLimit; - Uint32 loopCount = 0; - Uint32 pageRangeLimit = cnoOfPageRangeRec; - ndbassert(logicalPageId < getNoOfPages(regFragPtr)); - grpPageRangePtr.i = regFragPtr->rootPageRange; - while (true) { - ndbassert(loopCount++ < 100); - ndbrequire(grpPageRangePtr.i < pageRangeLimit); - ptrAss(grpPageRangePtr, pageRange); - loopLimit = grpPageRangePtr.p->currentIndexPos; - ndbrequire(loopLimit <= 3); - for (Uint32 i = 0; i <= loopLimit; i++) { - jam(); - if (grpPageRangePtr.p->startRange[i] <= logicalPageId) { - if (grpPageRangePtr.p->endRange[i] >= logicalPageId) { - if (grpPageRangePtr.p->type[i] == ZLEAF) { - jam(); - Uint32 realPageId = (logicalPageId - grpPageRangePtr.p->startRange[i]) + - grpPageRangePtr.p->basePageId[i]; - return realPageId; - } else { - ndbrequire(grpPageRangePtr.p->type[i] == ZNON_LEAF); - grpPageRangePtr.i = grpPageRangePtr.p->basePageId[i]; - }//if - }//if - }//if - }//for - }//while - return 0; -}//Dbtup::getRealpid() - -Uint32 Dbtup::getNoOfPages(Fragrecord* const regFragPtr) -{ - return regFragPtr->noOfPages; -}//Dbtup::getNoOfPages() - -void Dbtup::initPageRangeSize(Uint32 size) -{ - cnoOfPageRangeRec = size; -}//Dbtup::initPageRangeSize() - -/* ---------------------------------------------------------------- */ -/* ----------------------- INSERT_PAGE_RANGE_TAB ------------------ */ -/* ---------------------------------------------------------------- */ -/* INSERT A PAGE RANGE INTO THE FRAGMENT */ -/* */ -/* NOTE: THE METHOD IS ATOMIC. EITHER THE ACTION IS */ -/* PERFORMED FULLY OR NO ACTION IS PERFORMED AT ALL. */ -/* TO SUPPORT THIS THE CODE HAS A CLEANUP PART AFTER */ -/* ERRORS. */ -/* ---------------------------------------------------------------- */ -bool Dbtup::insertPageRangeTab(Fragrecord* const regFragPtr, - Uint32 startPageId, - Uint32 noPages) -{ - PageRangePtr currPageRangePtr; - if (cfirstfreerange == RNIL) { - jam(); - return false; - }//if - currPageRangePtr.i = regFragPtr->currentPageRange; - if (currPageRangePtr.i == RNIL) { - jam(); -/* ---------------------------------------------------------------- */ -/* THE FIRST PAGE RANGE IS HANDLED WITH SPECIAL CODE */ -/* ---------------------------------------------------------------- */ - seizePagerange(currPageRangePtr); - regFragPtr->rootPageRange = currPageRangePtr.i; - currPageRangePtr.p->currentIndexPos = 0; - currPageRangePtr.p->parentPtr = RNIL; - } else { - jam(); - ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange); - if (currPageRangePtr.p->currentIndexPos < 3) { - jam(); -/* ---------------------------------------------------------------- */ -/* THE SIMPLE CASE WHEN IT IS ONLY NECESSARY TO FILL IN THE */ -/* NEXT EMPTY POSITION IN THE PAGE RANGE RECORD IS TREATED */ -/* BY COMMON CODE AT THE END OF THE SUBROUTINE. */ -/* ---------------------------------------------------------------- */ - currPageRangePtr.p->currentIndexPos++; - } else { - jam(); - ndbrequire(currPageRangePtr.p->currentIndexPos == 3); - currPageRangePtr.i = leafPageRangeFull(regFragPtr, currPageRangePtr); - if (currPageRangePtr.i == RNIL) { - return false; - }//if - ptrCheckGuard(currPageRangePtr, cnoOfPageRangeRec, pageRange); - }//if - }//if - currPageRangePtr.p->startRange[currPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange; -/* ---------------------------------------------------------------- */ -/* NOW SET THE LEAF LEVEL PAGE RANGE RECORD PROPERLY */ -/* PAGE_RANGE_PTR REFERS TO LEAF RECORD WHEN ARRIVING HERE */ -/* ---------------------------------------------------------------- */ - currPageRangePtr.p->endRange[currPageRangePtr.p->currentIndexPos] = - (regFragPtr->nextStartRange + noPages) - 1; - currPageRangePtr.p->basePageId[currPageRangePtr.p->currentIndexPos] = startPageId; - currPageRangePtr.p->type[currPageRangePtr.p->currentIndexPos] = ZLEAF; -/* ---------------------------------------------------------------- */ -/* WE NEED TO UPDATE THE CURRENT PAGE RANGE IN CASE IT HAS */ -/* CHANGED. WE ALSO NEED TO UPDATE THE NEXT START RANGE */ -/* ---------------------------------------------------------------- */ - regFragPtr->currentPageRange = currPageRangePtr.i; - regFragPtr->nextStartRange += noPages; -/* ---------------------------------------------------------------- */ -/* WE NEED TO UPDATE THE END RANGE IN ALL PAGE RANGE RECORDS */ -/* UP TO THE ROOT. */ -/* ---------------------------------------------------------------- */ - PageRangePtr loopPageRangePtr; - loopPageRangePtr = currPageRangePtr; - while (true) { - jam(); - loopPageRangePtr.i = loopPageRangePtr.p->parentPtr; - if (loopPageRangePtr.i != RNIL) { - jam(); - ptrCheckGuard(loopPageRangePtr, cnoOfPageRangeRec, pageRange); - ndbrequire(loopPageRangePtr.p->currentIndexPos < 4); - loopPageRangePtr.p->endRange[loopPageRangePtr.p->currentIndexPos] += noPages; - } else { - jam(); - break; - }//if - }//while - regFragPtr->noOfPages += noPages; - return true; -}//Dbtup::insertPageRangeTab() - - -void Dbtup::releaseFragPages(Fragrecord* regFragPtr) -{ - if (regFragPtr->rootPageRange == RNIL) { - jam(); - return; - }//if - PageRangePtr regPRPtr; - regPRPtr.i = regFragPtr->rootPageRange; - ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); - while (true) { - jam(); - const Uint32 indexPos = regPRPtr.p->currentIndexPos; - ndbrequire(indexPos < 4); - - const Uint32 basePageId = regPRPtr.p->basePageId[indexPos]; - regPRPtr.p->basePageId[indexPos] = RNIL; - if (basePageId == RNIL) { - jam(); - /** - * Finished with indexPos continue with next - */ - if (indexPos > 0) { - jam(); - regPRPtr.p->currentIndexPos--; - continue; - }//if - - /* ---------------------------------------------------------------- */ - /* THE PAGE RANGE REC IS EMPTY. RELEASE IT. */ - /*----------------------------------------------------------------- */ - Uint32 parentPtr = regPRPtr.p->parentPtr; - releasePagerange(regPRPtr); - - if (parentPtr != RNIL) { - jam(); - regPRPtr.i = parentPtr; - ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); - continue; - }//if - - jam(); - ndbrequire(regPRPtr.i == regFragPtr->rootPageRange); - initFragRange(regFragPtr); - for (Uint32 i = 0; i tmp(c_page_pool, regFragPtr->free_var_page_array[i]); - tmp.remove(); - } - - { - LocalDLList tmp(c_page_pool, regFragPtr->emptyPrimPage); - tmp.remove(); - } - - { - LocalDLFifoList tmp(c_page_pool, regFragPtr->thFreeFirst); - tmp.remove(); - } - - { - LocalSLList tmp(c_page_pool, regFragPtr->m_empty_pages); - tmp.remove(); - } - - return; - } else { - if (regPRPtr.p->type[indexPos] == ZNON_LEAF) { - jam(); - /* ---------------------------------------------------------------- */ - // A non-leaf node, we must release everything below it before we - // release this node. - /* ---------------------------------------------------------------- */ - regPRPtr.i = basePageId; - ptrCheckGuard(regPRPtr, cnoOfPageRangeRec, pageRange); - } else { - jam(); - ndbrequire(regPRPtr.p->type[indexPos] == ZLEAF); - /* ---------------------------------------------------------------- */ - /* PAGE_RANGE_PTR /= RNIL AND THE CURRENT POS IS NOT A CHLED. */ - /*----------------------------------------------------------------- */ - const Uint32 start = regPRPtr.p->startRange[indexPos]; - const Uint32 stop = regPRPtr.p->endRange[indexPos]; - ndbrequire(stop >= start); - const Uint32 retNo = (stop - start + 1); - returnCommonArea(basePageId, retNo); - }//if - }//if - }//while -}//Dbtup::releaseFragPages() - -void Dbtup::initializePageRange() -{ - PageRangePtr regPTRPtr; - for (regPTRPtr.i = 0; - regPTRPtr.i < cnoOfPageRangeRec; regPTRPtr.i++) { - ptrAss(regPTRPtr, pageRange); - regPTRPtr.p->nextFree = regPTRPtr.i + 1; - }//for - regPTRPtr.i = cnoOfPageRangeRec - 1; - ptrAss(regPTRPtr, pageRange); - regPTRPtr.p->nextFree = RNIL; - cfirstfreerange = 0; - c_noOfFreePageRanges = cnoOfPageRangeRec; -}//Dbtup::initializePageRange() - -void Dbtup::initFragRange(Fragrecord* const regFragPtr) -{ - regFragPtr->rootPageRange = RNIL; - regFragPtr->currentPageRange = RNIL; - regFragPtr->noOfPages = 0; - regFragPtr->noOfVarPages = 0; - regFragPtr->noOfPagesToGrow = 2; - regFragPtr->nextStartRange = 0; -}//initFragRange() - -Uint32 Dbtup::allocFragPages(Fragrecord* regFragPtr, Uint32 tafpNoAllocRequested) -{ - Uint32 tafpPagesAllocated = 0; - while (true) { - Uint32 noOfPagesAllocated = 0; - Uint32 noPagesToAllocate = tafpNoAllocRequested - tafpPagesAllocated; - Uint32 retPageRef = RNIL; - allocConsPages(noPagesToAllocate, noOfPagesAllocated, retPageRef); - if (noOfPagesAllocated == 0) { - jam(); - return tafpPagesAllocated; - }//if -/* ---------------------------------------------------------------- */ -/* IT IS NOW TIME TO PUT THE ALLOCATED AREA INTO THE PAGE */ -/* RANGE TABLE. */ -/* ---------------------------------------------------------------- */ - Uint32 startRange = regFragPtr->nextStartRange; - if (!insertPageRangeTab(regFragPtr, retPageRef, noOfPagesAllocated)) { - jam(); - returnCommonArea(retPageRef, noOfPagesAllocated); - return tafpPagesAllocated; - }//if - tafpPagesAllocated += noOfPagesAllocated; - Uint32 loopLimit = retPageRef + noOfPagesAllocated; - PagePtr loopPagePtr; -/* ---------------------------------------------------------------- */ -/* SINCE A NUMBER OF PAGES WERE ALLOCATED FROM COMMON AREA */ -/* WITH SUCCESS IT IS NOW TIME TO CHANGE THE STATE OF */ -/* THOSE PAGES TO EMPTY_MM AND LINK THEM INTO THE EMPTY */ -/* PAGE LIST OF THE FRAGMENT. */ -/* ---------------------------------------------------------------- */ - Uint32 prev = RNIL; - for (loopPagePtr.i = retPageRef; loopPagePtr.i < loopLimit; loopPagePtr.i++) { - jam(); - c_page_pool.getPtr(loopPagePtr); - loopPagePtr.p->page_state = ZEMPTY_MM; - loopPagePtr.p->frag_page_id = startRange + - (loopPagePtr.i - retPageRef); - loopPagePtr.p->physical_page_id = loopPagePtr.i; - loopPagePtr.p->nextList = loopPagePtr.i + 1; - loopPagePtr.p->prevList = prev; - prev = loopPagePtr.i; - }//for - loopPagePtr.i--; - ndbassert(loopPagePtr.p == c_page_pool.getPtr(loopPagePtr.i)); - loopPagePtr.p->nextList = RNIL; - - LocalDLList alloc(c_page_pool, regFragPtr->emptyPrimPage); - if (noOfPagesAllocated > 1) - { - alloc.add(retPageRef, loopPagePtr); - } - else - { - alloc.add(loopPagePtr); - } - -/* ---------------------------------------------------------------- */ -/* WAS ENOUGH PAGES ALLOCATED OR ARE MORE NEEDED. */ -/* ---------------------------------------------------------------- */ - if (tafpPagesAllocated < tafpNoAllocRequested) { - jam(); - } else { - ndbrequire(tafpPagesAllocated == tafpNoAllocRequested); - jam(); - return tafpNoAllocRequested; - }//if - }//while -}//Dbtup::allocFragPages() - -void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) -{ - Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5% - noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25% - noAllocPages += 2; -/* -----------------------------------------------------------------*/ -// We will grow by 18.75% plus two more additional pages to grow -// a little bit quicker in the beginning. -/* -----------------------------------------------------------------*/ - - if (noAllocPages > m_max_allocate_pages) - { - noAllocPages = m_max_allocate_pages; - } - Uint32 allocated = allocFragPages(regFragPtr, noAllocPages); - regFragPtr->noOfPagesToGrow += allocated; -}//Dbtup::allocMoreFragPages() - -Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr) -{ -/* ---------------------------------------------------------------- */ -/* THE COMPLEX CASE WHEN THE LEAF NODE IS FULL. GO UP THE TREE*/ -/* TO FIND THE FIRST RECORD WITH A FREE ENTRY. ALLOCATE NEW */ -/* PAGE RANGE RECORDS THEN ALL THE WAY DOWN TO THE LEAF LEVEL */ -/* AGAIN. THE TREE SHOULD ALWAYS REMAIN BALANCED. */ -/* ---------------------------------------------------------------- */ - PageRangePtr parentPageRangePtr; - PageRangePtr foundPageRangePtr; - parentPageRangePtr = currPageRangePtr; - Uint32 tiprNoLevels = 1; - while (true) { - jam(); - parentPageRangePtr.i = parentPageRangePtr.p->parentPtr; - if (parentPageRangePtr.i == RNIL) { - jam(); -/* ---------------------------------------------------------------- */ -/* WE HAVE REACHED THE ROOT. A NEW ROOT MUST BE ALLOCATED. */ -/* ---------------------------------------------------------------- */ - if (c_noOfFreePageRanges < tiprNoLevels) { - jam(); - return RNIL; - }//if - PageRangePtr oldRootPRPtr; - PageRangePtr newRootPRPtr; - oldRootPRPtr.i = regFragPtr->rootPageRange; - ptrCheckGuard(oldRootPRPtr, cnoOfPageRangeRec, pageRange); - seizePagerange(newRootPRPtr); - regFragPtr->rootPageRange = newRootPRPtr.i; - oldRootPRPtr.p->parentPtr = newRootPRPtr.i; - - newRootPRPtr.p->basePageId[0] = oldRootPRPtr.i; - newRootPRPtr.p->parentPtr = RNIL; - newRootPRPtr.p->startRange[0] = 0; - newRootPRPtr.p->endRange[0] = regFragPtr->nextStartRange - 1; - newRootPRPtr.p->type[0] = ZNON_LEAF; - newRootPRPtr.p->startRange[1] = regFragPtr->nextStartRange; - newRootPRPtr.p->endRange[1] = regFragPtr->nextStartRange - 1; - newRootPRPtr.p->type[1] = ZNON_LEAF; - newRootPRPtr.p->currentIndexPos = 1; - foundPageRangePtr = newRootPRPtr; - break; - } else { - jam(); - ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange); - if (parentPageRangePtr.p->currentIndexPos < 3) { - jam(); - - if (c_noOfFreePageRanges < tiprNoLevels) - { - jam(); - return RNIL; - }//if - -/* ---------------------------------------------------------------- */ -/* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */ -/* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */ -/* ALLOCATE A NEW PAGE RANGE RECORD AND UPDATE THE POINTERS */ -/* ---------------------------------------------------------------- */ - parentPageRangePtr.p->currentIndexPos++; - parentPageRangePtr.p->startRange[parentPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange; - parentPageRangePtr.p->endRange[parentPageRangePtr.p->currentIndexPos] = regFragPtr->nextStartRange - 1; - parentPageRangePtr.p->type[parentPageRangePtr.p->currentIndexPos] = ZNON_LEAF; - foundPageRangePtr = parentPageRangePtr; - break; - } else { - jam(); - ndbrequire(parentPageRangePtr.p->currentIndexPos == 3); -/* ---------------------------------------------------------------- */ -/* THE PAGE RANGE RECORD WAS FULL. FIND THE PARENT RECORD */ -/* AND INCREASE THE NUMBER OF LEVELS WE HAVE TRAVERSED */ -/* GOING UP THE TREE. */ -/* ---------------------------------------------------------------- */ - tiprNoLevels++; - }//if - }//if - }//while -/* ---------------------------------------------------------------- */ -/* REMEMBER THE ERROR LEVEL IN CASE OF ALLOCATION ERRORS */ -/* ---------------------------------------------------------------- */ - PageRangePtr newPageRangePtr; - PageRangePtr prevPageRangePtr; - prevPageRangePtr = foundPageRangePtr; - if (c_noOfFreePageRanges < tiprNoLevels) { - jam(); - return RNIL; - }//if -/* ---------------------------------------------------------------- */ -/* NOW WE HAVE PERFORMED THE SEARCH UPWARDS AND FILLED IN THE */ -/* PROPER FIELDS IN THE PAGE RANGE RECORD WHERE SOME SPACE */ -/* WAS FOUND. THE NEXT STEP IS TO ALLOCATE PAGE RANGES SO */ -/* THAT WE KEEP THE B-TREE BALANCED. THE NEW PAGE RANGE */ -/* ARE ALSO PROPERLY UPDATED ON THE PATH TO THE LEAF LEVEL. */ -/* ---------------------------------------------------------------- */ - while (true) { - jam(); - seizePagerange(newPageRangePtr); - tiprNoLevels--; - ndbrequire(prevPageRangePtr.p->currentIndexPos < 4); - prevPageRangePtr.p->basePageId[prevPageRangePtr.p->currentIndexPos] = newPageRangePtr.i; - newPageRangePtr.p->parentPtr = prevPageRangePtr.i; - newPageRangePtr.p->currentIndexPos = 0; - if (tiprNoLevels > 0) { - jam(); - newPageRangePtr.p->startRange[0] = regFragPtr->nextStartRange; - newPageRangePtr.p->endRange[0] = regFragPtr->nextStartRange - 1; - newPageRangePtr.p->type[0] = ZNON_LEAF; - prevPageRangePtr = newPageRangePtr; - } else { - jam(); - break; - }//if - }//while - return newPageRangePtr.i; -}//Dbtup::leafPageRangeFull() - -void Dbtup::releasePagerange(PageRangePtr regPRPtr) -{ - regPRPtr.p->nextFree = cfirstfreerange; - cfirstfreerange = regPRPtr.i; - c_noOfFreePageRanges++; -}//Dbtup::releasePagerange() - -void Dbtup::seizePagerange(PageRangePtr& regPageRangePtr) -{ - regPageRangePtr.i = cfirstfreerange; - ptrCheckGuard(regPageRangePtr, cnoOfPageRangeRec, pageRange); - cfirstfreerange = regPageRangePtr.p->nextFree; - regPageRangePtr.p->nextFree = RNIL; - regPageRangePtr.p->currentIndexPos = 0; - regPageRangePtr.p->parentPtr = RNIL; - for (Uint32 i = 0; i < 4; i++) { - regPageRangePtr.p->startRange[i] = 1; - regPageRangePtr.p->endRange[i] = 0; - regPageRangePtr.p->type[i] = ZNON_LEAF; - regPageRangePtr.p->basePageId[i] = (Uint32)-1; - }//for - c_noOfFreePageRanges--; -}//Dbtup::seizePagerange() - -void Dbtup::errorHandler(Uint32 errorCode) -{ - switch (errorCode) { - case 0: - jam(); - break; - case 1: - jam(); - break; - case 2: - jam(); - break; - default: - jam(); - } - ndbrequire(false); -}//Dbtup::errorHandler() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp deleted file mode 100644 index 1a027315060..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ /dev/null @@ -1,1687 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_ROUTINES_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include -#include "AttributeOffset.hpp" -#include - -void -Dbtup::setUpQueryRoutines(Tablerec *regTabPtr) -{ - Uint32 startDescriptor= regTabPtr->tabDescriptor; - ndbrequire((startDescriptor + (regTabPtr->m_no_of_attributes << ZAD_LOG_SIZE)) - <= cnoOfTabDescrRec); - for (Uint32 i= 0; i < regTabPtr->m_no_of_attributes; i++) { - Uint32 attrDescrStart= startDescriptor + (i << ZAD_LOG_SIZE); - Uint32 attrDescr= tableDescriptor[attrDescrStart].tabDescr; - Uint32 attrOffset= tableDescriptor[attrDescrStart + 1].tabDescr; - if (!AttributeDescriptor::getDynamic(attrDescr)) { - if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){ - if (!AttributeDescriptor::getNullable(attrDescr)) { - if (AttributeDescriptor::getSize(attrDescr) == 0){ - jam(); - regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL; - regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL; - } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4) { - jam(); - regTabPtr->readFunctionArray[i]= - &Dbtup::readFixedSizeTHOneWordNotNULL; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateFixedSizeTHOneWordNotNULL; - } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) { - jam(); - regTabPtr->readFunctionArray[i]= - &Dbtup::readFixedSizeTHTwoWordNotNULL; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateFixedSizeTHTwoWordNotNULL; - } else { - jam(); - regTabPtr->readFunctionArray[i]= - &Dbtup::readFixedSizeTHManyWordNotNULL; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateFixedSizeTHManyWordNotNULL; - } - // replace functions for char attribute - if (AttributeOffset::getCharsetFlag(attrOffset)) { - jam(); - regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL; - regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL; - } - } else { - if (AttributeDescriptor::getSize(attrDescr) == 0){ - jam(); - regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable; - regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable; - } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 4){ - jam(); - regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable; - regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; - } else if (AttributeDescriptor::getSizeInBytes(attrDescr) == 8) { - jam(); - regTabPtr->readFunctionArray[i]= - &Dbtup::readFixedSizeTHTwoWordNULLable; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateFixedSizeTHManyWordNULLable; - } else { - jam(); - regTabPtr->readFunctionArray[i]= - &Dbtup::readFixedSizeTHManyWordNULLable; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateFixedSizeTHManyWordNULLable; - } - // replace functions for char attribute - if (AttributeOffset::getCharsetFlag(attrOffset)) { - jam(); - regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable; - regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable; - } - } - } else { - if (!AttributeDescriptor::getNullable(attrDescr)) { - regTabPtr->readFunctionArray[i]= - &Dbtup::readVarSizeNotNULL; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateVarSizeNotNULL; - } else { - regTabPtr->readFunctionArray[i]= - &Dbtup::readVarSizeNULLable; - regTabPtr->updateFunctionArray[i]= - &Dbtup::updateVarSizeNULLable; - } - } - if(AttributeDescriptor::getDiskBased(attrDescr)) - { - // array initializer crashes gcc-2.95.3 - ReadFunction r[6]; - { - r[0] = &Dbtup::readDiskBitsNotNULL; - r[1] = &Dbtup::readDiskBitsNULLable; - r[2] = &Dbtup::readDiskFixedSizeNotNULL; - r[3] = &Dbtup::readDiskFixedSizeNULLable; - r[4] = &Dbtup::readDiskVarSizeNULLable; - r[5] = &Dbtup::readDiskVarSizeNotNULL; - } - UpdateFunction u[6]; - { - u[0] = &Dbtup::updateDiskBitsNotNULL; - u[1] = &Dbtup::updateDiskBitsNULLable; - u[2] = &Dbtup::updateDiskFixedSizeNotNULL; - u[3] = &Dbtup::updateDiskFixedSizeNULLable; - u[4] = &Dbtup::updateDiskVarSizeNULLable; - u[5] = &Dbtup::updateDiskVarSizeNotNULL; - } - Uint32 a= - AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED ? 2 : 4; - - if(AttributeDescriptor::getSize(attrDescr) == 0) - a= 0; - - Uint32 b= - AttributeDescriptor::getNullable(attrDescr)? 1 : 0; - regTabPtr->readFunctionArray[i]= r[a+b]; - regTabPtr->updateFunctionArray[i]= u[a+b]; - } - } else { - if (AttributeDescriptor::getArrayType(attrDescr) == NDB_ARRAYTYPE_FIXED){ - jam(); - regTabPtr->readFunctionArray[i]= &Dbtup::readDynFixedSize; - regTabPtr->updateFunctionArray[i]= &Dbtup::updateDynFixedSize; - } else { - regTabPtr->readFunctionArray[i]= &Dbtup::readDynVarSize; - regTabPtr->updateFunctionArray[i]= &Dbtup::updateDynVarSize; - } - } - } -} - -/* ---------------------------------------------------------------- */ -/* THIS ROUTINE IS USED TO READ A NUMBER OF ATTRIBUTES IN THE */ -/* DATABASE AND PLACE THE RESULT IN ATTRINFO RECORDS. */ -// -// In addition to the parameters used in the call it also relies on the -// following variables set-up properly. -// -// operPtr.p Operation record pointer -// fragptr.p Fragment record pointer -// tabptr.p Table record pointer - -// It requires the following fields in KeyReqStruct to be properly -// filled in: -// tuple_header Reference to the tuple -// check_offset Record size -// attr_descr Reference to the Table Descriptor for the table -// -// The read functions in addition expects that the following fields in -// KeyReqStruct is set up: -// out_buf_index Index for output buffer -// max_read Size of output buffer -// attr_descriptor Attribute Descriptor from where attribute size -// can be read -/* ---------------------------------------------------------------- */ -int Dbtup::readAttributes(KeyReqStruct *req_struct, - const Uint32* inBuffer, - Uint32 inBufLen, - Uint32* outBuffer, - Uint32 maxRead, - bool xfrm_flag) -{ - Uint32 attributeId, descr_index, tmpAttrBufIndex, inBufIndex; - Uint32 attributeOffset; - TableDescriptor* attr_descr; - AttributeHeader* ahOut; - - Tablerec* const regTabPtr= tabptr.p; - Uint32 numAttributes= regTabPtr->m_no_of_attributes; - - inBufIndex= 0; - req_struct->out_buf_index= 0; - req_struct->max_read= maxRead; - req_struct->xfrm_flag= xfrm_flag; - while (inBufIndex < inBufLen) { - tmpAttrBufIndex= req_struct->out_buf_index; - AttributeHeader ahIn(inBuffer[inBufIndex]); - inBufIndex++; - attributeId= ahIn.getAttributeId(); - descr_index= attributeId << ZAD_LOG_SIZE; - jam(); - - AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, 0); - ahOut= (AttributeHeader*)&outBuffer[tmpAttrBufIndex]; - req_struct->out_buf_index= tmpAttrBufIndex + 1; - attr_descr= req_struct->attr_descr; - if (attributeId < numAttributes) { - attributeOffset= attr_descr[descr_index + 1].tabDescr; - ReadFunction f= regTabPtr->readFunctionArray[attributeId]; - req_struct->attr_descriptor= attr_descr[descr_index].tabDescr; - if ((this->*f)(outBuffer, - req_struct, - ahOut, - attributeOffset)) { - continue; - } else { - return -1; - } - } else if(attributeId & AttributeHeader::PSEUDO) { - if (attributeId == AttributeHeader::ANY_VALUE) - { - jam(); - Uint32 RlogSize = req_struct->log_size; - operPtr.p->m_any_value = inBuffer[inBufIndex]; - * (clogMemBuffer + RlogSize) = inBuffer[inBufIndex - 1]; - * (clogMemBuffer + RlogSize + 1) = inBuffer[inBufIndex]; - inBufIndex++; - req_struct->out_buf_index = tmpAttrBufIndex; - req_struct->log_size = RlogSize + 2; - continue; - } - jam(); - Uint32 sz= read_pseudo(attributeId, - req_struct, - outBuffer+tmpAttrBufIndex+1); - AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz << 2); - req_struct->out_buf_index= tmpAttrBufIndex + 1 + sz; - } else { - terrorCode = ZATTRIBUTE_ID_ERROR; - return -1; - }//if - }//while - return req_struct->out_buf_index; -} - -bool -Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Uint32 *tuple_header= req_struct->m_tuple_ptr->m_data; - Uint32 indexBuf= req_struct->out_buf_index; - Uint32 readOffset= AttributeOffset::getOffset(attrDes2); - Uint32 const wordRead= tuple_header[readOffset]; - Uint32 newIndexBuf= indexBuf + 1; - Uint32 maxRead= req_struct->max_read; - - ndbrequire(readOffset < req_struct->check_offset[MM]); - if (newIndexBuf <= maxRead) { - jam(); - outBuffer[indexBuf]= wordRead; - ahOut->setDataSize(1); - req_struct->out_buf_index= newIndexBuf; - return true; - } else { - jam(); - terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - } -} - -bool -Dbtup::readFixedSizeTHTwoWordNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Uint32 *tuple_header= req_struct->m_tuple_ptr->m_data; - Uint32 indexBuf= req_struct->out_buf_index; - Uint32 readOffset= AttributeOffset::getOffset(attrDes2); - Uint32 const wordReadFirst= tuple_header[readOffset]; - Uint32 const wordReadSecond= tuple_header[readOffset + 1]; - Uint32 newIndexBuf= indexBuf + 2; - Uint32 maxRead= req_struct->max_read; - - ndbrequire(readOffset + 1 < req_struct->check_offset[MM]); - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setDataSize(2); - outBuffer[indexBuf]= wordReadFirst; - outBuffer[indexBuf + 1]= wordReadSecond; - req_struct->out_buf_index= newIndexBuf; - return true; - } else { - jam(); - terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - } -} - -bool -Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Uint32 attrDescriptor= req_struct->attr_descriptor; - Uint32 *tuple_header= req_struct->m_tuple_ptr->m_data; - Uint32 indexBuf= req_struct->out_buf_index; - Uint32 readOffset= AttributeOffset::getOffset(attrDes2); - Uint32 attrNoOfWords= AttributeDescriptor::getSizeInWords(attrDescriptor); - Uint32 maxRead= req_struct->max_read; - Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2); - - ndbrequire((readOffset + attrNoOfWords - 1) < req_struct->check_offset[MM]); - if (! charsetFlag || ! req_struct->xfrm_flag) { - Uint32 newIndexBuf = indexBuf + attrNoOfWords; - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor)); - MEMCOPY_NO_WORDS(&outBuffer[indexBuf], - &tuple_header[readOffset], - attrNoOfWords); - req_struct->out_buf_index = newIndexBuf; - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - }//if - } else { - jam(); - Tablerec* regTabPtr = tabptr.p; - Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); - uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; - const uchar* srcPtr = (uchar*)&tuple_header[readOffset]; - Uint32 i = AttributeOffset::getCharsetPos(attrDes2); - ndbrequire(i < regTabPtr->noOfCharsets); - CHARSET_INFO* cs = regTabPtr->charsetArray[i]; - Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); - Uint32 lb, len; - bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); - Uint32 xmul = cs->strxfrm_multiply; - if (xmul == 0) - xmul = 1; - Uint32 dstLen = xmul * (srcBytes - lb); - Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); - if (maxIndexBuf <= maxRead && ok) { - jam(); - int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); - ndbrequire(n != -1); - int m = n; - while ((m & 3) != 0) { - dstPtr[m++] = 0; - } - ahOut->setByteSize(n); - Uint32 newIndexBuf = indexBuf + (m >> 2); - ndbrequire(newIndexBuf <= maxRead); - req_struct->out_buf_index = newIndexBuf; - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - } - } - return false; -}//Dbtup::readFixedSizeTHManyWordNotNULL() - -bool -Dbtup::readFixedSizeTHOneWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - if (!nullFlagCheck(req_struct, attrDes2)) { - jam(); - return readFixedSizeTHOneWordNotNULL(outBuffer, - req_struct, - ahOut, - attrDes2); - } else { - jam(); - ahOut->setNULL(); - return true; - } -} - -bool -Dbtup::readFixedSizeTHTwoWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - if (!nullFlagCheck(req_struct, attrDes2)) { - jam(); - return readFixedSizeTHTwoWordNotNULL(outBuffer, - req_struct, - ahOut, - attrDes2); - } else { - jam(); - ahOut->setNULL(); - return true; - } -} - -bool -Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - if (!nullFlagCheck(req_struct, attrDes2)) { - jam(); - return readFixedSizeTHManyWordNotNULL(outBuffer, - req_struct, - ahOut, - attrDes2); - } else { - jam(); - ahOut->setNULL(); - return true; - } -} - -bool -Dbtup::readFixedSizeTHZeroWordNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - jam(); - if (nullFlagCheck(req_struct, attrDes2)) { - jam(); - ahOut->setNULL(); - } - return true; -} - -bool -Dbtup::nullFlagCheck(KeyReqStruct *req_struct, Uint32 attrDes2) -{ - Tablerec* const regTabPtr= tabptr.p; - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - Uint32 pos= AttributeOffset::getNullFlagPos(attrDes2); - - return BitmaskImpl::get(regTabPtr->m_offsets[MM].m_null_words, bits, pos); -} - -bool -Dbtup::disk_nullFlagCheck(KeyReqStruct *req_struct, Uint32 attrDes2) -{ - Tablerec* const regTabPtr= tabptr.p; - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - Uint32 pos= AttributeOffset::getNullFlagPos(attrDes2); - - return BitmaskImpl::get(regTabPtr->m_offsets[DD].m_null_words, bits, pos); -} - -bool -Dbtup::readVarSizeNotNULL(Uint32* out_buffer, - KeyReqStruct *req_struct, - AttributeHeader* ah_out, - Uint32 attr_des2) -{ - Uint32 attr_descriptor, index_buf, var_index; - Uint32 vsize_in_bytes, vsize_in_words, new_index, max_var_size; - Uint32 var_attr_pos, max_read; - - Uint32 idx= req_struct->m_var_data[MM].m_var_len_offset; - var_index= AttributeOffset::getOffset(attr_des2); - Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attr_des2); - var_attr_pos= req_struct->m_var_data[MM].m_offset_array_ptr[var_index]; - vsize_in_bytes= req_struct->m_var_data[MM].m_offset_array_ptr[var_index+idx] - var_attr_pos; - attr_descriptor= req_struct->attr_descriptor; - index_buf= req_struct->out_buf_index; - max_var_size= AttributeDescriptor::getSizeInWords(attr_descriptor); - max_read= req_struct->max_read; - vsize_in_words= convert_byte_to_word_size(vsize_in_bytes); - new_index= index_buf + vsize_in_words; - - ndbrequire(vsize_in_words <= max_var_size); - if (! charsetFlag || ! req_struct->xfrm_flag) - { - if (new_index <= max_read) { - jam(); - ah_out->setByteSize(vsize_in_bytes); - out_buffer[index_buf + (vsize_in_bytes >> 2)] = 0; - memcpy(out_buffer+index_buf, - req_struct->m_var_data[MM].m_data_ptr+var_attr_pos, - vsize_in_bytes); - req_struct->out_buf_index= new_index; - return true; - } - } - else - { - jam(); - Tablerec* regTabPtr = tabptr.p; - Uint32 maxBytes = AttributeDescriptor::getSizeInBytes(attr_descriptor); - Uint32 srcBytes = vsize_in_bytes; - uchar* dstPtr = (uchar*)(out_buffer+index_buf); - const uchar* srcPtr = (uchar*)(req_struct->m_var_data[MM].m_data_ptr+var_attr_pos); - Uint32 i = AttributeOffset::getCharsetPos(attr_des2); - ndbrequire(i < regTabPtr->noOfCharsets); - CHARSET_INFO* cs = regTabPtr->charsetArray[i]; - Uint32 typeId = AttributeDescriptor::getType(attr_descriptor); - Uint32 lb, len; - bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); - Uint32 xmul = cs->strxfrm_multiply; - if (xmul == 0) - xmul = 1; - // see comment in DbtcMain.cpp - Uint32 dstLen = xmul * (maxBytes - lb); - Uint32 maxIndexBuf = index_buf + (dstLen >> 2); - if (maxIndexBuf <= max_read && ok) { - jam(); - int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); - ndbrequire(n != -1); - int m = n; - while ((m & 3) != 0) { - dstPtr[m++] = 0; - } - ah_out->setByteSize(n); - Uint32 newIndexBuf = index_buf + (m >> 2); - ndbrequire(newIndexBuf <= max_read); - req_struct->out_buf_index = newIndexBuf; - return true; - } - } - jam(); - terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; -} - -bool -Dbtup::readVarSizeNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - if (!nullFlagCheck(req_struct, attrDes2)) { - jam(); - return readVarSizeNotNULL(outBuffer, - req_struct, - ahOut, - attrDes2); - } else { - jam(); - ahOut->setNULL(); - return true; - } -} - -bool -Dbtup::readDynFixedSize(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - jam(); - terrorCode= ZVAR_SIZED_NOT_SUPPORTED; - return false; -} - -bool -Dbtup::readDynVarSize(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - jam(); - terrorCode= ZVAR_SIZED_NOT_SUPPORTED; - return false; -}//Dbtup::readDynBigVarSize() - -bool -Dbtup::readDiskFixedSizeNotNULL(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Uint32 attrDescriptor= req_struct->attr_descriptor; - Uint32 *tuple_header= req_struct->m_disk_ptr->m_data; - Uint32 indexBuf= req_struct->out_buf_index; - Uint32 readOffset= AttributeOffset::getOffset(attrDes2); - Uint32 attrNoOfWords= AttributeDescriptor::getSizeInWords(attrDescriptor); - Uint32 maxRead= req_struct->max_read; - Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2); - - ndbrequire((readOffset + attrNoOfWords - 1) < req_struct->check_offset[DD]); - if (! charsetFlag || ! req_struct->xfrm_flag) { - Uint32 newIndexBuf = indexBuf + attrNoOfWords; - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setByteSize(AttributeDescriptor::getSizeInBytes(attrDescriptor)); - MEMCOPY_NO_WORDS(&outBuffer[indexBuf], - &tuple_header[readOffset], - attrNoOfWords); - req_struct->out_buf_index = newIndexBuf; - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - }//if - } else { - jam(); - Tablerec* regTabPtr = tabptr.p; - Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); - uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; - const uchar* srcPtr = (uchar*)&tuple_header[readOffset]; - Uint32 i = AttributeOffset::getCharsetPos(attrDes2); - ndbrequire(i < regTabPtr->noOfCharsets); - CHARSET_INFO* cs = regTabPtr->charsetArray[i]; - Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); - Uint32 lb, len; - bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); - Uint32 xmul = cs->strxfrm_multiply; - if (xmul == 0) - xmul = 1; - Uint32 dstLen = xmul * (srcBytes - lb); - Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); - if (maxIndexBuf <= maxRead && ok) { - jam(); - int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); - ndbrequire(n != -1); - int m = n; - while ((m & 3) != 0) { - dstPtr[m++] = 0; - } - ahOut->setByteSize(n); - Uint32 newIndexBuf = indexBuf + (m >> 2); - ndbrequire(newIndexBuf <= maxRead); - req_struct->out_buf_index = newIndexBuf; - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - } - } - return false; -} - -bool -Dbtup::readDiskFixedSizeNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - if (!disk_nullFlagCheck(req_struct, attrDes2)) { - jam(); - return readDiskFixedSizeNotNULL(outBuffer, - req_struct, - ahOut, - attrDes2); - } else { - jam(); - ahOut->setNULL(); - return true; - } -} - -bool -Dbtup::readDiskVarSizeNotNULL(Uint32* out_buffer, - KeyReqStruct *req_struct, - AttributeHeader* ah_out, - Uint32 attr_des2) -{ - Uint32 attr_descriptor, index_buf, var_index; - Uint32 vsize_in_bytes, vsize_in_words, new_index, max_var_size; - Uint32 var_attr_pos, max_read; - - Uint32 idx= req_struct->m_var_data[DD].m_var_len_offset; - var_index= AttributeOffset::getOffset(attr_des2); - var_attr_pos= req_struct->m_var_data[DD].m_offset_array_ptr[var_index]; - vsize_in_bytes= req_struct->m_var_data[DD].m_offset_array_ptr[var_index+idx] - var_attr_pos; - attr_descriptor= req_struct->attr_descriptor; - index_buf= req_struct->out_buf_index; - max_var_size= AttributeDescriptor::getSizeInWords(attr_descriptor); - max_read= req_struct->max_read; - vsize_in_words= convert_byte_to_word_size(vsize_in_bytes); - new_index= index_buf + vsize_in_words; - - ndbrequire(vsize_in_words <= max_var_size); - if (new_index <= max_read) { - jam(); - ah_out->setByteSize(vsize_in_bytes); - memcpy(out_buffer+index_buf, - req_struct->m_var_data[DD].m_data_ptr+var_attr_pos, - vsize_in_bytes); - req_struct->out_buf_index= new_index; - return true; - } else { - jam(); - terrorCode= ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - } -} - -bool -Dbtup::readDiskVarSizeNULLable(Uint32* outBuffer, - KeyReqStruct *req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - if (!disk_nullFlagCheck(req_struct, attrDes2)) { - jam(); - return readDiskVarSizeNotNULL(outBuffer, - req_struct, - ahOut, - attrDes2); - } else { - jam(); - ahOut->setNULL(); - return true; - } -} - - -/* ---------------------------------------------------------------------- */ -/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */ -/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */ -/* CALLED SEVERAL TIMES FROM THE INTERPRETER. */ -// In addition to the parameters used in the call it also relies on the -// following variables set-up properly. -// -// operPtr.p Operation record pointer -// tabptr.p Table record pointer -/* ---------------------------------------------------------------------- */ -int Dbtup::updateAttributes(KeyReqStruct *req_struct, - Uint32* inBuffer, - Uint32 inBufLen) -{ - Tablerec* const regTabPtr= tabptr.p; - Operationrec* const regOperPtr= operPtr.p; - Uint32 numAttributes= regTabPtr->m_no_of_attributes; - TableDescriptor *attr_descr= req_struct->attr_descr; - - Uint32 inBufIndex= 0; - req_struct->in_buf_index= 0; - req_struct->in_buf_len= inBufLen; - - while (inBufIndex < inBufLen) { - AttributeHeader ahIn(inBuffer[inBufIndex]); - Uint32 attributeId= ahIn.getAttributeId(); - Uint32 attrDescriptorIndex= attributeId << ZAD_LOG_SIZE; - if (likely(attributeId < numAttributes)) { - Uint32 attrDescriptor= attr_descr[attrDescriptorIndex].tabDescr; - Uint32 attributeOffset= attr_descr[attrDescriptorIndex + 1].tabDescr; - if ((AttributeDescriptor::getPrimaryKey(attrDescriptor)) && - (regOperPtr->op_struct.op_type != ZINSERT)) { - if (checkUpdateOfPrimaryKey(req_struct, - &inBuffer[inBufIndex], - regTabPtr)) { - jam(); - terrorCode= ZTRY_UPDATE_PRIMARY_KEY; - return -1; - } - } - UpdateFunction f= regTabPtr->updateFunctionArray[attributeId]; - jam(); - req_struct->attr_descriptor= attrDescriptor; - req_struct->changeMask.set(attributeId); - if (attributeId >= 64) { - if (req_struct->max_attr_id_updated < attributeId) { - Uint32 no_changed_attrs= req_struct->no_changed_attrs; - req_struct->max_attr_id_updated= attributeId; - req_struct->no_changed_attrs= no_changed_attrs + 1; - } - } - if ((this->*f)(inBuffer, - req_struct, - attributeOffset)) { - inBufIndex= req_struct->in_buf_index; - continue; - } else { - jam(); - return -1; - } - } - else if(attributeId == AttributeHeader::DISK_REF) - { - jam(); - Uint32 sz= ahIn.getDataSize(); - ndbrequire(sz == 2); - req_struct->m_tuple_ptr->m_header_bits |= Tuple_header::DISK_PART; - memcpy(req_struct->m_tuple_ptr->get_disk_ref_ptr(regTabPtr), - inBuffer+inBufIndex+1, sz << 2); - inBufIndex += 1 + sz; - req_struct->in_buf_index = inBufIndex; - } - else if(attributeId == AttributeHeader::ANY_VALUE) - { - jam(); - Uint32 sz= ahIn.getDataSize(); - ndbrequire(sz == 1); - regOperPtr->m_any_value = * (inBuffer + inBufIndex + 1); - inBufIndex += 1 + sz; - req_struct->in_buf_index = inBufIndex; - } - else - { - jam(); - terrorCode= ZATTRIBUTE_ID_ERROR; - return -1; - } - } - return 0; -} - -bool -Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct, - Uint32* updateBuffer, - Tablerec* const regTabPtr) -{ - Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS]; - TableDescriptor* attr_descr = req_struct->attr_descr; - AttributeHeader ahIn(*updateBuffer); - Uint32 attributeId = ahIn.getAttributeId(); - Uint32 attrDescriptorIndex = attributeId << ZAD_LOG_SIZE; - Uint32 attrDescriptor = attr_descr[attrDescriptorIndex].tabDescr; - Uint32 attributeOffset = attr_descr[attrDescriptorIndex + 1].tabDescr; - - Uint32 xfrmBuffer[1 + MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY]; - Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attributeOffset); - if (charsetFlag) { - Uint32 csIndex = AttributeOffset::getCharsetPos(attributeOffset); - CHARSET_INFO* cs = regTabPtr->charsetArray[csIndex]; - Uint32 srcPos = 0; - Uint32 dstPos = 0; - xfrm_attr(attrDescriptor, cs, &updateBuffer[1], srcPos, - &xfrmBuffer[1], dstPos, MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY); - ahIn.setDataSize(dstPos); - xfrmBuffer[0] = ahIn.m_value; - updateBuffer = xfrmBuffer; - } - - ReadFunction f = regTabPtr->readFunctionArray[attributeId]; - - AttributeHeader attributeHeader(attributeId, 0); - req_struct->out_buf_index = 0; - req_struct->max_read = MAX_KEY_SIZE_IN_WORDS; - req_struct->attr_descriptor = attrDescriptor; - - bool tmp = req_struct->xfrm_flag; - req_struct->xfrm_flag = true; - ndbrequire((this->*f)(&keyReadBuffer[0], - req_struct, - &attributeHeader, - attributeOffset)); - req_struct->xfrm_flag = tmp; - - ndbrequire(req_struct->out_buf_index == attributeHeader.getDataSize()); - if (ahIn.getDataSize() != attributeHeader.getDataSize()) { - jam(); - return true; - } - if (memcmp(&keyReadBuffer[0], - &updateBuffer[1], - req_struct->out_buf_index << 2) != 0) { - jam(); - return true; - } - return false; -} - -bool -Dbtup::updateFixedSizeTHOneWordNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Uint32 indexBuf= req_struct->in_buf_index; - Uint32 inBufLen= req_struct->in_buf_len; - Uint32 updateOffset= AttributeOffset::getOffset(attrDes2); - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 newIndex= indexBuf + 2; - Uint32 *tuple_header= req_struct->m_tuple_ptr->m_data; - ndbrequire(updateOffset < req_struct->check_offset[MM]); - - if (newIndex <= inBufLen) { - Uint32 updateWord= inBuffer[indexBuf + 1]; - if (!nullIndicator) { - jam(); - req_struct->in_buf_index= newIndex; - tuple_header[updateOffset]= updateWord; - return true; - } else { - jam(); - terrorCode= ZNOT_NULL_ATTR; - return false; - } - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - return true; -} - -bool -Dbtup::updateFixedSizeTHTwoWordNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Uint32 indexBuf= req_struct->in_buf_index; - Uint32 inBufLen= req_struct->in_buf_len; - Uint32 updateOffset= AttributeOffset::getOffset(attrDes2); - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 newIndex= indexBuf + 3; - Uint32 *tuple_header= req_struct->m_tuple_ptr->m_data; - ndbrequire((updateOffset + 1) < req_struct->check_offset[MM]); - - if (newIndex <= inBufLen) { - Uint32 updateWord1= inBuffer[indexBuf + 1]; - Uint32 updateWord2= inBuffer[indexBuf + 2]; - if (!nullIndicator) { - jam(); - req_struct->in_buf_index= newIndex; - tuple_header[updateOffset]= updateWord1; - tuple_header[updateOffset + 1]= updateWord2; - return true; - } else { - jam(); - terrorCode= ZNOT_NULL_ATTR; - return false; - } - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } -} - -bool -Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Uint32 attrDescriptor= req_struct->attr_descriptor; - Uint32 indexBuf= req_struct->in_buf_index; - Uint32 inBufLen= req_struct->in_buf_len; - Uint32 updateOffset= AttributeOffset::getOffset(attrDes2); - Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2); - - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 noOfWords= AttributeDescriptor::getSizeInWords(attrDescriptor); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 newIndex= indexBuf + noOfWords + 1; - Uint32 *tuple_header= req_struct->m_tuple_ptr->m_data; - ndbrequire((updateOffset + noOfWords - 1) < req_struct->check_offset[MM]); - - if (newIndex <= inBufLen) { - if (!nullIndicator) { - jam(); - if (charsetFlag) { - jam(); - Tablerec* regTabPtr = tabptr.p; - Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); - Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); - Uint32 i = AttributeOffset::getCharsetPos(attrDes2); - ndbrequire(i < regTabPtr->noOfCharsets); - // not const in MySQL - CHARSET_INFO* cs = regTabPtr->charsetArray[i]; - int not_used; - const char* ssrc = (const char*)&inBuffer[indexBuf + 1]; - Uint32 lb, len; - if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { - jam(); - terrorCode = ZINVALID_CHAR_FORMAT; - return false; - } - // fast fix bug#7340 - if (typeId != NDB_TYPE_TEXT && - (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, ¬_used) != len) { - jam(); - terrorCode = ZINVALID_CHAR_FORMAT; - return false; - } - } - req_struct->in_buf_index= newIndex; - MEMCOPY_NO_WORDS(&tuple_header[updateOffset], - &inBuffer[indexBuf + 1], - noOfWords); - - return true; - } else { - jam(); - terrorCode= ZNOT_NULL_ATTR; - return false; - } - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } -} - -bool -Dbtup::updateFixedSizeTHManyWordNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr= tabptr.p; - AttributeHeader ahIn(inBuffer[req_struct->in_buf_index]); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 pos= AttributeOffset::getNullFlagPos(attrDes2); - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - - if (!nullIndicator) { - jam(); - BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - return updateFixedSizeTHManyWordNotNULL(inBuffer, - req_struct, - attrDes2); - } else { - Uint32 newIndex= req_struct->in_buf_index + 1; - if (newIndex <= req_struct->in_buf_len) { - BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - jam(); - req_struct->in_buf_index= newIndex; - return true; - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - } -} - -bool -Dbtup::updateVarSizeNotNULL(Uint32* in_buffer, - KeyReqStruct *req_struct, - Uint32 attr_des2) -{ - Uint32 attr_descriptor, index_buf, in_buf_len, var_index, null_ind; - Uint32 vsize_in_words, new_index, max_var_size; - Uint32 var_attr_pos; - char *var_data_start; - Uint16 *vpos_array; - - attr_descriptor= req_struct->attr_descriptor; - index_buf= req_struct->in_buf_index; - in_buf_len= req_struct->in_buf_len; - var_index= AttributeOffset::getOffset(attr_des2); - AttributeHeader ahIn(in_buffer[index_buf]); - null_ind= ahIn.isNULL(); - Uint32 size_in_bytes = ahIn.getByteSize(); - vsize_in_words= (size_in_bytes + 3) >> 2; - max_var_size= AttributeDescriptor::getSizeInBytes(attr_descriptor); - new_index= index_buf + vsize_in_words + 1; - vpos_array= req_struct->m_var_data[MM].m_offset_array_ptr; - Uint32 idx= req_struct->m_var_data[MM].m_var_len_offset; - Uint32 check_offset= req_struct->m_var_data[MM].m_max_var_offset; - - if (new_index <= in_buf_len && vsize_in_words <= max_var_size) { - if (!null_ind) { - jam(); - var_attr_pos= vpos_array[var_index]; - var_data_start= req_struct->m_var_data[MM].m_data_ptr; - vpos_array[var_index+idx]= var_attr_pos+size_in_bytes; - req_struct->in_buf_index= new_index; - - ndbrequire(var_attr_pos+size_in_bytes <= check_offset); - memcpy(var_data_start+var_attr_pos, &in_buffer[index_buf + 1], - size_in_bytes); - return true; - } else { - jam(); - terrorCode= ZNOT_NULL_ATTR; - return false; - } - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - return false; -} - -bool -Dbtup::updateVarSizeNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr= tabptr.p; - AttributeHeader ahIn(inBuffer[req_struct->in_buf_index]); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 pos= AttributeOffset::getNullFlagPos(attrDes2); - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - Uint32 idx= req_struct->m_var_data[MM].m_var_len_offset; - - if (!nullIndicator) { - jam(); - BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - return updateVarSizeNotNULL(inBuffer, - req_struct, - attrDes2); - } else { - Uint32 newIndex= req_struct->in_buf_index + 1; - Uint32 var_index= AttributeOffset::getOffset(attrDes2); - Uint32 var_pos= req_struct->var_pos_array[var_index]; - if (newIndex <= req_struct->in_buf_len) { - jam(); - BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - req_struct->var_pos_array[var_index+idx]= var_pos; - req_struct->in_buf_index= newIndex; - return true; - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - } -} - -bool -Dbtup::updateDynFixedSize(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - jam(); - terrorCode= ZVAR_SIZED_NOT_SUPPORTED; - return false; -} - -bool -Dbtup::updateDynVarSize(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - jam(); - terrorCode= ZVAR_SIZED_NOT_SUPPORTED; - return false; -} - -Uint32 -Dbtup::read_pseudo(Uint32 attrId, - KeyReqStruct *req_struct, - Uint32* outBuffer) -{ - Uint32 tmp[sizeof(SignalHeader)+25]; - Signal * signal = (Signal*)&tmp; - switch(attrId){ - case AttributeHeader::FRAGMENT: - * outBuffer = fragptr.p->fragmentId; - return 1; - case AttributeHeader::FRAGMENT_FIXED_MEMORY: - { - Uint64 tmp= fragptr.p->noOfPages; - tmp*= 32768; - memcpy(outBuffer,&tmp,8); - } - return 2; - case AttributeHeader::FRAGMENT_VARSIZED_MEMORY: - { - Uint64 tmp= fragptr.p->noOfVarPages; - tmp*= 32768; - memcpy(outBuffer,&tmp,8); - } - return 2; - case AttributeHeader::ROW_SIZE: - * outBuffer = tabptr.p->m_offsets[MM].m_fix_header_size << 2; - return 1; - case AttributeHeader::ROW_COUNT: - case AttributeHeader::COMMIT_COUNT: - signal->theData[0] = operPtr.p->userpointer; - signal->theData[1] = attrId; - - EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2); - outBuffer[0] = signal->theData[0]; - outBuffer[1] = signal->theData[1]; - return 2; - case AttributeHeader::RANGE_NO: - signal->theData[0] = operPtr.p->userpointer; - signal->theData[1] = attrId; - - EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2); - outBuffer[0] = signal->theData[0]; - return 1; - case AttributeHeader::DISK_REF: - { - Uint32 *ref= req_struct->m_tuple_ptr->get_disk_ref_ptr(tabptr.p); - outBuffer[0] = ref[0]; - outBuffer[1] = ref[1]; - return 2; - } - case AttributeHeader::RECORDS_IN_RANGE: - signal->theData[0] = operPtr.p->userpointer; - signal->theData[1] = attrId; - - EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2); - outBuffer[0] = signal->theData[0]; - outBuffer[1] = signal->theData[1]; - outBuffer[2] = signal->theData[2]; - outBuffer[3] = signal->theData[3]; - return 4; - case AttributeHeader::ROWID: - outBuffer[0] = req_struct->frag_page_id; - outBuffer[1] = operPtr.p->m_tuple_location.m_page_idx; - return 2; - case AttributeHeader::ROW_GCI: - if (tabptr.p->m_bits & Tablerec::TR_RowGCI) - { - Uint64 tmp = * req_struct->m_tuple_ptr->get_mm_gci(tabptr.p); - memcpy(outBuffer, &tmp, sizeof(tmp)); - return 2; - } - return 0; - case AttributeHeader::COPY_ROWID: - outBuffer[0] = operPtr.p->m_copy_tuple_location.m_page_no; - outBuffer[1] = operPtr.p->m_copy_tuple_location.m_page_idx; - return 2; - default: - return 0; - } -} - -bool -Dbtup::readBitsNotNULL(Uint32* outBuffer, - KeyReqStruct* req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - Uint32 indexBuf = req_struct->out_buf_index; - Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5); - Uint32 maxRead = req_struct->max_read; - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setDataSize((bitCount + 31) >> 5); - req_struct->out_buf_index = newIndexBuf; - - BitmaskImpl::getField(regTabPtr->m_offsets[MM].m_null_words, bits, pos, - bitCount, outBuffer+indexBuf); - - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - }//if -} - -bool -Dbtup::readBitsNULLable(Uint32* outBuffer, - KeyReqStruct* req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - - Uint32 indexBuf = req_struct->out_buf_index; - Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5); - Uint32 maxRead = req_struct->max_read; - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - - if(BitmaskImpl::get(regTabPtr->m_offsets[MM].m_null_words, bits, pos)) - { - jam(); - ahOut->setNULL(); - return true; - } - - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setDataSize((bitCount + 31) >> 5); - req_struct->out_buf_index = newIndexBuf; - BitmaskImpl::getField(regTabPtr->m_offsets[MM].m_null_words, bits, pos+1, - bitCount, outBuffer+indexBuf); - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - }//if -} - -bool -Dbtup::updateBitsNotNULL(Uint32* inBuffer, - KeyReqStruct* req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 indexBuf = req_struct->in_buf_index; - Uint32 inBufLen = req_struct->in_buf_len; - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 nullIndicator = ahIn.isNULL(); - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5); - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - - if (newIndex <= inBufLen) { - if (!nullIndicator) { - BitmaskImpl::setField(regTabPtr->m_offsets[MM].m_null_words, bits, pos, - bitCount, inBuffer+indexBuf+1); - req_struct->in_buf_index = newIndex; - return true; - } else { - jam(); - terrorCode = ZNOT_NULL_ATTR; - return false; - }//if - } else { - jam(); - terrorCode = ZAI_INCONSISTENCY_ERROR; - return false; - }//if - return true; -} - -bool -Dbtup::updateBitsNULLable(Uint32* inBuffer, - KeyReqStruct* req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 indexBuf = req_struct->in_buf_index; - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 nullIndicator = ahIn.isNULL(); - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - Uint32 *bits= req_struct->m_tuple_ptr->get_null_bits(regTabPtr); - - if (!nullIndicator) { - BitmaskImpl::clear(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - BitmaskImpl::setField(regTabPtr->m_offsets[MM].m_null_words, bits, pos+1, - bitCount, inBuffer+indexBuf+1); - - Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5); - req_struct->in_buf_index = newIndex; - return true; - } else { - Uint32 newIndex = indexBuf + 1; - if (newIndex <= req_struct->in_buf_len) - { - jam(); - BitmaskImpl::set(regTabPtr->m_offsets[MM].m_null_words, bits, pos); - - req_struct->in_buf_index = newIndex; - return true; - } else { - jam(); - terrorCode = ZAI_INCONSISTENCY_ERROR; - return false; - }//if - }//if -} - -bool -Dbtup::updateDiskFixedSizeNotNULL(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Uint32 attrDescriptor= req_struct->attr_descriptor; - Uint32 indexBuf= req_struct->in_buf_index; - Uint32 inBufLen= req_struct->in_buf_len; - Uint32 updateOffset= AttributeOffset::getOffset(attrDes2); - Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2); - - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 noOfWords= AttributeDescriptor::getSizeInWords(attrDescriptor); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 newIndex= indexBuf + noOfWords + 1; - Uint32 *tuple_header= req_struct->m_disk_ptr->m_data; - ndbrequire((updateOffset + noOfWords - 1) < req_struct->check_offset[DD]); - - if (newIndex <= inBufLen) { - if (!nullIndicator) { - jam(); - if (charsetFlag) { - jam(); - Tablerec* regTabPtr = tabptr.p; - Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); - Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); - Uint32 i = AttributeOffset::getCharsetPos(attrDes2); - ndbrequire(i < regTabPtr->noOfCharsets); - // not const in MySQL - CHARSET_INFO* cs = regTabPtr->charsetArray[i]; - int not_used; - const char* ssrc = (const char*)&inBuffer[indexBuf + 1]; - Uint32 lb, len; - if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { - jam(); - terrorCode = ZINVALID_CHAR_FORMAT; - return false; - } - // fast fix bug#7340 - if (typeId != NDB_TYPE_TEXT && - (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, ¬_used) != len) { - jam(); - terrorCode = ZINVALID_CHAR_FORMAT; - return false; - } - } - req_struct->in_buf_index= newIndex; - MEMCOPY_NO_WORDS(&tuple_header[updateOffset], - &inBuffer[indexBuf + 1], - noOfWords); - return true; - } else { - jam(); - terrorCode= ZNOT_NULL_ATTR; - return false; - } - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } -} - -bool -Dbtup::updateDiskFixedSizeNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr= tabptr.p; - AttributeHeader ahIn(inBuffer[req_struct->in_buf_index]); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 pos= AttributeOffset::getNullFlagPos(attrDes2); - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - - if (!nullIndicator) { - jam(); - BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - return updateDiskFixedSizeNotNULL(inBuffer, - req_struct, - attrDes2); - } else { - Uint32 newIndex= req_struct->in_buf_index + 1; - if (newIndex <= req_struct->in_buf_len) { - BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - jam(); - req_struct->in_buf_index= newIndex; - return true; - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - } -} - -bool -Dbtup::updateDiskVarSizeNotNULL(Uint32* in_buffer, - KeyReqStruct *req_struct, - Uint32 attr_des2) -{ - Uint32 attr_descriptor, index_buf, in_buf_len, var_index, null_ind; - Uint32 vsize_in_words, new_index, max_var_size; - Uint32 var_attr_pos; - char *var_data_start; - Uint16 *vpos_array; - - attr_descriptor= req_struct->attr_descriptor; - index_buf= req_struct->in_buf_index; - in_buf_len= req_struct->in_buf_len; - var_index= AttributeOffset::getOffset(attr_des2); - AttributeHeader ahIn(in_buffer[index_buf]); - null_ind= ahIn.isNULL(); - Uint32 size_in_bytes = ahIn.getByteSize(); - vsize_in_words= (size_in_bytes + 3) >> 2; - max_var_size= AttributeDescriptor::getSizeInBytes(attr_descriptor); - new_index= index_buf + vsize_in_words + 1; - vpos_array= req_struct->m_var_data[DD].m_offset_array_ptr; - Uint32 idx= req_struct->m_var_data[DD].m_var_len_offset; - Uint32 check_offset= req_struct->m_var_data[DD].m_max_var_offset; - - if (new_index <= in_buf_len && vsize_in_words <= max_var_size) { - if (!null_ind) { - jam(); - var_attr_pos= vpos_array[var_index]; - var_data_start= req_struct->m_var_data[DD].m_data_ptr; - vpos_array[var_index+idx]= var_attr_pos+size_in_bytes; - req_struct->in_buf_index= new_index; - - ndbrequire(var_attr_pos+size_in_bytes <= check_offset); - memcpy(var_data_start+var_attr_pos, &in_buffer[index_buf + 1], - size_in_bytes); - return true; - } else { - jam(); - terrorCode= ZNOT_NULL_ATTR; - return false; - } - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - return false; -} - -bool -Dbtup::updateDiskVarSizeNULLable(Uint32* inBuffer, - KeyReqStruct *req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr= tabptr.p; - AttributeHeader ahIn(inBuffer[req_struct->in_buf_index]); - Uint32 nullIndicator= ahIn.isNULL(); - Uint32 pos= AttributeOffset::getNullFlagPos(attrDes2); - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - Uint32 idx= req_struct->m_var_data[DD].m_var_len_offset; - - if (!nullIndicator) { - jam(); - BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - return updateDiskVarSizeNotNULL(inBuffer, - req_struct, - attrDes2); - } else { - Uint32 newIndex= req_struct->in_buf_index + 1; - Uint32 var_index= AttributeOffset::getOffset(attrDes2); - Uint32 var_pos= req_struct->var_pos_array[var_index]; - if (newIndex <= req_struct->in_buf_len) { - jam(); - BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - req_struct->var_pos_array[var_index+idx]= var_pos; - req_struct->in_buf_index= newIndex; - return true; - } else { - jam(); - terrorCode= ZAI_INCONSISTENCY_ERROR; - return false; - } - } -} - -bool -Dbtup::readDiskBitsNotNULL(Uint32* outBuffer, - KeyReqStruct* req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - Uint32 indexBuf = req_struct->out_buf_index; - Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5); - Uint32 maxRead = req_struct->max_read; - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setDataSize((bitCount + 31) >> 5); - req_struct->out_buf_index = newIndexBuf; - - BitmaskImpl::getField(regTabPtr->m_offsets[DD].m_null_words, bits, pos, - bitCount, outBuffer+indexBuf); - - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - }//if -} - -bool -Dbtup::readDiskBitsNULLable(Uint32* outBuffer, - KeyReqStruct* req_struct, - AttributeHeader* ahOut, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - - Uint32 indexBuf = req_struct->out_buf_index; - Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5); - Uint32 maxRead = req_struct->max_read; - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - - if(BitmaskImpl::get(regTabPtr->m_offsets[DD].m_null_words, bits, pos)) - { - jam(); - ahOut->setNULL(); - return true; - } - - if (newIndexBuf <= maxRead) { - jam(); - ahOut->setDataSize((bitCount + 31) >> 5); - req_struct->out_buf_index = newIndexBuf; - BitmaskImpl::getField(regTabPtr->m_offsets[DD].m_null_words, bits, pos+1, - bitCount, outBuffer+indexBuf); - return true; - } else { - jam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; - return false; - }//if -} - -bool -Dbtup::updateDiskBitsNotNULL(Uint32* inBuffer, - KeyReqStruct* req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 indexBuf = req_struct->in_buf_index; - Uint32 inBufLen = req_struct->in_buf_len; - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 nullIndicator = ahIn.isNULL(); - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5); - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - - if (newIndex <= inBufLen) { - if (!nullIndicator) { - BitmaskImpl::setField(regTabPtr->m_offsets[DD].m_null_words, bits, pos, - bitCount, inBuffer+indexBuf+1); - req_struct->in_buf_index = newIndex; - return true; - } else { - jam(); - terrorCode = ZNOT_NULL_ATTR; - return false; - }//if - } else { - jam(); - terrorCode = ZAI_INCONSISTENCY_ERROR; - return false; - }//if - return true; -} - -bool -Dbtup::updateDiskBitsNULLable(Uint32* inBuffer, - KeyReqStruct* req_struct, - Uint32 attrDes2) -{ - Tablerec* const regTabPtr = tabptr.p; - Uint32 indexBuf = req_struct->in_buf_index; - AttributeHeader ahIn(inBuffer[indexBuf]); - Uint32 nullIndicator = ahIn.isNULL(); - Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2); - Uint32 bitCount = - AttributeDescriptor::getArraySize(req_struct->attr_descriptor); - Uint32 *bits= req_struct->m_disk_ptr->get_null_bits(regTabPtr, DD); - - if (!nullIndicator) { - BitmaskImpl::clear(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - BitmaskImpl::setField(regTabPtr->m_offsets[DD].m_null_words, bits, pos+1, - bitCount, inBuffer+indexBuf+1); - - Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5); - req_struct->in_buf_index = newIndex; - return true; - } else { - Uint32 newIndex = indexBuf + 1; - if (newIndex <= req_struct->in_buf_len) - { - jam(); - BitmaskImpl::set(regTabPtr->m_offsets[DD].m_null_words, bits, pos); - - req_struct->in_buf_index = newIndex; - return true; - } else { - jam(); - terrorCode = ZAI_INCONSISTENCY_ERROR; - return false; - }//if - }//if -} - diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp deleted file mode 100644 index 572be897a13..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp +++ /dev/null @@ -1,1209 +0,0 @@ -/* Copyright (c) 2003, 2005-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_SCAN_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include - -#undef jam -#undef jamEntry -#define jam() { jamLine(32000 + __LINE__); } -#define jamEntry() { jamEntryLine(32000 + __LINE__); } - -#ifdef VM_TRACE -#define dbg(x) globalSignalLoggers.log x -#else -#define dbg(x) -#endif - -void -Dbtup::execACC_SCANREQ(Signal* signal) -{ - jamEntry(); - const AccScanReq reqCopy = *(const AccScanReq*)signal->getDataPtr(); - const AccScanReq* const req = &reqCopy; - ScanOpPtr scanPtr; - scanPtr.i = RNIL; - do { - // find table and fragment - TablerecPtr tablePtr; - tablePtr.i = req->tableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - FragrecordPtr fragPtr; - Uint32 fragId = req->fragmentNo; - fragPtr.i = RNIL; - getFragmentrec(fragPtr, fragId, tablePtr.p); - ndbrequire(fragPtr.i != RNIL); - Fragrecord& frag = *fragPtr.p; - // flags - Uint32 bits = 0; - - - if (AccScanReq::getLcpScanFlag(req->requestInfo)) - { - jam(); - bits |= ScanOp::SCAN_LCP; - c_scanOpPool.getPtr(scanPtr, c_lcp_scan_op); - } - else - { - // seize from pool and link to per-fragment list - LocalDLList list(c_scanOpPool, frag.m_scanList); - if (! list.seize(scanPtr)) { - jam(); - break; - } - } - - if (!AccScanReq::getNoDiskScanFlag(req->requestInfo) - && tablePtr.p->m_no_of_disk_attributes) - { - bits |= ScanOp::SCAN_DD; - } - - bool mm = (bits & ScanOp::SCAN_DD); - if (tablePtr.p->m_attributes[mm].m_no_of_varsize > 0) { - bits |= ScanOp::SCAN_VS; - - // disk pages have fixed page format - ndbrequire(! (bits & ScanOp::SCAN_DD)); - } - if (! AccScanReq::getReadCommittedFlag(req->requestInfo)) { - if (AccScanReq::getLockMode(req->requestInfo) == 0) - bits |= ScanOp::SCAN_LOCK_SH; - else - bits |= ScanOp::SCAN_LOCK_EX; - } - - if (AccScanReq::getNRScanFlag(req->requestInfo)) - { - jam(); - bits |= ScanOp::SCAN_NR; - scanPtr.p->m_endPage = req->maxPage; - if (req->maxPage != RNIL && req->maxPage > frag.noOfPages) - { - ndbout_c("%u %u endPage: %u (noOfPages: %u)", - tablePtr.i, fragId, - req->maxPage, fragPtr.p->noOfPages); - } - } - else - { - jam(); - scanPtr.p->m_endPage = RNIL; - } - - if (AccScanReq::getLcpScanFlag(req->requestInfo)) - { - jam(); - ndbrequire((bits & ScanOp::SCAN_DD) == 0); - ndbrequire((bits & ScanOp::SCAN_LOCK) == 0); - } - - // set up scan op - new (scanPtr.p) ScanOp(); - ScanOp& scan = *scanPtr.p; - scan.m_state = ScanOp::First; - scan.m_bits = bits; - scan.m_userPtr = req->senderData; - scan.m_userRef = req->senderRef; - scan.m_tableId = tablePtr.i; - scan.m_fragId = frag.fragmentId; - scan.m_fragPtrI = fragPtr.i; - scan.m_transId1 = req->transId1; - scan.m_transId2 = req->transId2; - scan.m_savePointId = req->savePointId; - - // conf - AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend(); - conf->scanPtr = req->senderData; - conf->accPtr = scanPtr.i; - conf->flag = AccScanConf::ZNOT_EMPTY_FRAGMENT; - sendSignal(req->senderRef, GSN_ACC_SCANCONF, - signal, AccScanConf::SignalLength, JBB); - return; - } while (0); - if (scanPtr.i != RNIL) { - jam(); - releaseScanOp(scanPtr); - } - // LQH does not handle REF - signal->theData[0] = 0x313; - sendSignal(req->senderRef, GSN_ACC_SCANREF, signal, 1, JBB); -} - -void -Dbtup::execNEXT_SCANREQ(Signal* signal) -{ - jamEntry(); - const NextScanReq reqCopy = *(const NextScanReq*)signal->getDataPtr(); - const NextScanReq* const req = &reqCopy; - ScanOpPtr scanPtr; - c_scanOpPool.getPtr(scanPtr, req->accPtr); - ScanOp& scan = *scanPtr.p; - switch (req->scanFlag) { - case NextScanReq::ZSCAN_NEXT: - jam(); - break; - case NextScanReq::ZSCAN_NEXT_COMMIT: - jam(); - case NextScanReq::ZSCAN_COMMIT: - jam(); - if ((scan.m_bits & ScanOp::SCAN_LOCK) != 0) { - jam(); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Unlock; - lockReq->accOpPtr = req->accOperationPtr; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, - signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - removeAccLockOp(scan, req->accOperationPtr); - } - if (req->scanFlag == NextScanReq::ZSCAN_COMMIT) { - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - unsigned signalLength = 1; - sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - return; - } - break; - case NextScanReq::ZSCAN_CLOSE: - jam(); - if (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) { - jam(); - ndbrequire(scan.m_accLockOp != RNIL); - // use ACC_ABORTCONF to flush out any reply in job buffer - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::AbortWithConf; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, - signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_state = ScanOp::Aborting; - return; - } - if (scan.m_state == ScanOp::Locked) { - jam(); - ndbrequire(scan.m_accLockOp != RNIL); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, - signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_accLockOp = RNIL; - } - scan.m_state = ScanOp::Aborting; - scanClose(signal, scanPtr); - return; - case NextScanReq::ZSCAN_NEXT_ABORT: - jam(); - default: - jam(); - ndbrequire(false); - break; - } - // start looking for next scan result - AccCheckScan* checkReq = (AccCheckScan*)signal->getDataPtrSend(); - checkReq->accPtr = scanPtr.i; - checkReq->checkLcpStop = AccCheckScan::ZNOT_CHECK_LCP_STOP; - EXECUTE_DIRECT(DBTUP, GSN_ACC_CHECK_SCAN, signal, AccCheckScan::SignalLength); - jamEntry(); -} - -void -Dbtup::execACC_CHECK_SCAN(Signal* signal) -{ - jamEntry(); - const AccCheckScan reqCopy = *(const AccCheckScan*)signal->getDataPtr(); - const AccCheckScan* const req = &reqCopy; - ScanOpPtr scanPtr; - c_scanOpPool.getPtr(scanPtr, req->accPtr); - ScanOp& scan = *scanPtr.p; - // fragment - FragrecordPtr fragPtr; - fragPtr.i = scan.m_fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - Fragrecord& frag = *fragPtr.p; - if (req->checkLcpStop == AccCheckScan::ZCHECK_LCP_STOP) { - jam(); - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; - } - if (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) { - jam(); - // LQH asks if we are waiting for lock and we tell it to ask again - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - conf->accOperationPtr = RNIL; // no tuple returned - conf->fragId = frag.fragmentId; - unsigned signalLength = 3; - // if TC has ordered scan close, it will be detected here - sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - return; // stop - } - if (scan.m_state == ScanOp::First) { - jam(); - scanFirst(signal, scanPtr); - } - if (scan.m_state == ScanOp::Next) { - jam(); - bool immediate = scanNext(signal, scanPtr); - if (! immediate) { - jam(); - // time-slicing via TUP or PGMAN - return; - } - } - scanReply(signal, scanPtr); -} - -void -Dbtup::scanReply(Signal* signal, ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - FragrecordPtr fragPtr; - fragPtr.i = scan.m_fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - Fragrecord& frag = *fragPtr.p; - // for reading tuple key in Current state - Uint32* pkData = (Uint32*)c_dataBuffer; - unsigned pkSize = 0; - if (scan.m_state == ScanOp::Current) { - // found an entry to return - jam(); - ndbrequire(scan.m_accLockOp == RNIL); - if (scan.m_bits & ScanOp::SCAN_LOCK) { - jam(); - // read tuple key - use TUX routine - const ScanPos& pos = scan.m_scanPos; - const Local_key& key_mm = pos.m_key_mm; - int ret = tuxReadPk(fragPtr.i, pos.m_realpid_mm, key_mm.m_page_idx, - pkData, true); - ndbrequire(ret > 0); - pkSize = ret; - dbg((DBTUP, "PK size=%d data=%08x", pkSize, pkData[0])); - // get read lock or exclusive lock - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = (scan.m_bits & ScanOp::SCAN_LOCK_SH) ? - AccLockReq::LockShared : AccLockReq::LockExclusive; - lockReq->accOpPtr = RNIL; - lockReq->userPtr = scanPtr.i; - lockReq->userRef = reference(); - lockReq->tableId = scan.m_tableId; - lockReq->fragId = frag.fragmentId; - lockReq->fragPtrI = RNIL; // no cached frag ptr yet - lockReq->hashValue = md5_hash((Uint64*)pkData, pkSize); - lockReq->tupAddr = key_mm.ref(); - lockReq->transId1 = scan.m_transId1; - lockReq->transId2 = scan.m_transId2; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, - signal, AccLockReq::LockSignalLength); - jamEntry(); - switch (lockReq->returnCode) { - case AccLockReq::Success: - jam(); - scan.m_state = ScanOp::Locked; - scan.m_accLockOp = lockReq->accOpPtr; - break; - case AccLockReq::IsBlocked: - jam(); - // normal lock wait - scan.m_state = ScanOp::Blocked; - scan.m_bits |= ScanOp::SCAN_LOCK_WAIT; - scan.m_accLockOp = lockReq->accOpPtr; - // LQH will wake us up - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; - break; - case AccLockReq::Refused: - jam(); - // we cannot see deleted tuple (assert only) - ndbassert(false); - // skip it - scan.m_state = ScanOp::Next; - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; - break; - case AccLockReq::NoFreeOp: - jam(); - // max ops should depend on max scans (assert only) - ndbassert(false); - // stay in Current state - scan.m_state = ScanOp::Current; - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; - break; - default: - ndbrequire(false); - break; - } - } else { - scan.m_state = ScanOp::Locked; - } - } - - if (scan.m_state == ScanOp::Locked) { - // we have lock or do not need one - jam(); - // conf signal - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - // the lock is passed to LQH - Uint32 accLockOp = scan.m_accLockOp; - if (accLockOp != RNIL) { - scan.m_accLockOp = RNIL; - // remember it until LQH unlocks it - addAccLockOp(scan, accLockOp); - } else { - ndbrequire(! (scan.m_bits & ScanOp::SCAN_LOCK)); - // operation RNIL in LQH would signal no tuple returned - accLockOp = (Uint32)-1; - } - const ScanPos& pos = scan.m_scanPos; - conf->accOperationPtr = accLockOp; - conf->fragId = frag.fragmentId; - conf->localKey[0] = pos.m_key_mm.ref(); - conf->localKey[1] = 0; - conf->localKeyLength = 1; - unsigned signalLength = 6; - if (scan.m_bits & ScanOp::SCAN_LOCK) { - sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - } else { - Uint32 blockNo = refToBlock(scan.m_userRef); - EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength); - jamEntry(); - } - // next time look for next entry - scan.m_state = ScanOp::Next; - return; - } - if (scan.m_state == ScanOp::Last || - scan.m_state == ScanOp::Invalid) { - jam(); - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - conf->accOperationPtr = RNIL; - conf->fragId = RNIL; - unsigned signalLength = 3; - sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - return; - } - ndbrequire(false); -} - -/* - * Lock succeeded (after delay) in ACC. If the lock is for current - * entry, set state to Locked. If the lock is for an entry we were - * moved away from, simply unlock it. Finally, if we are closing the - * scan, do nothing since we have already sent an abort request. - */ -void -Dbtup::execACCKEYCONF(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; - ndbrequire(scan.m_bits & ScanOp::SCAN_LOCK_WAIT && scan.m_accLockOp != RNIL); - scan.m_bits &= ~ ScanOp::SCAN_LOCK_WAIT; - if (scan.m_state == ScanOp::Blocked) { - // the lock wait was for current entry - jam(); - scan.m_state = ScanOp::Locked; - // LQH has the ball - return; - } - if (scan.m_state != ScanOp::Aborting) { - // we were moved, release lock - jam(); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_accLockOp = RNIL; - // LQH has the ball - return; - } - // lose the lock - scan.m_accLockOp = RNIL; - // continue at ACC_ABORTCONF -} - -/* - * Lock failed (after delay) in ACC. Probably means somebody ahead of - * us in lock queue deleted the tuple. - */ -void -Dbtup::execACCKEYREF(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; - ndbrequire(scan.m_bits & ScanOp::SCAN_LOCK_WAIT && scan.m_accLockOp != RNIL); - scan.m_bits &= ~ ScanOp::SCAN_LOCK_WAIT; - if (scan.m_state != ScanOp::Aborting) { - jam(); - // release the operation - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_accLockOp = RNIL; - // scan position should already have been moved (assert only) - if (scan.m_state == ScanOp::Blocked) { - jam(); - //ndbassert(false); - if (scan.m_bits & ScanOp::SCAN_NR) - { - jam(); - scan.m_state = ScanOp::Next; - scan.m_scanPos.m_get = ScanPos::Get_tuple; - ndbout_c("Ignoring scan.m_state == ScanOp::Blocked, refetch"); - } - else - { - jam(); - scan.m_state = ScanOp::Next; - ndbout_c("Ignoring scan.m_state == ScanOp::Blocked"); - } - } - // LQH has the ball - return; - } - // lose the lock - scan.m_accLockOp = RNIL; - // continue at ACC_ABORTCONF -} - -/* - * Received when scan is closing. This signal arrives after any - * ACCKEYCON or ACCKEYREF which may have been in job buffer. - */ -void -Dbtup::execACC_ABORTCONF(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; - ndbrequire(scan.m_state == ScanOp::Aborting); - // most likely we are still in lock wait - if (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) { - jam(); - scan.m_bits &= ~ ScanOp::SCAN_LOCK_WAIT; - scan.m_accLockOp = RNIL; - } - scanClose(signal, scanPtr); -} - -void -Dbtup::scanFirst(Signal*, ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - ScanPos& pos = scan.m_scanPos; - Local_key& key = pos.m_key; - const Uint32 bits = scan.m_bits; - // fragment - FragrecordPtr fragPtr; - fragPtr.i = scan.m_fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - Fragrecord& frag = *fragPtr.p; - // in the future should not pre-allocate pages - if (frag.noOfPages == 0 && ((bits & ScanOp::SCAN_NR) == 0)) { - jam(); - scan.m_state = ScanOp::Last; - return; - } - if (! (bits & ScanOp::SCAN_DD)) { - key.m_file_no = ZNIL; - key.m_page_no = 0; - pos.m_get = ScanPos::Get_page_mm; - // for MM scan real page id is cached for efficiency - pos.m_realpid_mm = RNIL; - } else { - Disk_alloc_info& alloc = frag.m_disk_alloc_info; - // for now must check disk part explicitly - if (alloc.m_extent_list.firstItem == RNIL) { - jam(); - scan.m_state = ScanOp::Last; - return; - } - pos.m_extent_info_ptr_i = alloc.m_extent_list.firstItem; - Extent_info* ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i); - key.m_file_no = ext->m_key.m_file_no; - key.m_page_no = ext->m_first_page_no; - pos.m_get = ScanPos::Get_page_dd; - } - key.m_page_idx = 0; - // let scanNext() do the work - scan.m_state = ScanOp::Next; -} - -bool -Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - ScanPos& pos = scan.m_scanPos; - Local_key& key = pos.m_key; - const Uint32 bits = scan.m_bits; - // table - TablerecPtr tablePtr; - tablePtr.i = scan.m_tableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - Tablerec& table = *tablePtr.p; - // fragment - FragrecordPtr fragPtr; - fragPtr.i = scan.m_fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - Fragrecord& frag = *fragPtr.p; - // tuple found - Tuple_header* th = 0; - Uint32 thbits = 0; - Uint32 loop_count = 0; - Uint32 scanGCI = scanPtr.p->m_scanGCI; - Uint32 foundGCI; - - const bool mm = (bits & ScanOp::SCAN_DD); - const bool lcp = (bits & ScanOp::SCAN_LCP); - - Uint32 lcp_list = fragPtr.p->m_lcp_keep_list; - Uint32 size = table.m_offsets[mm].m_fix_header_size; - - if (lcp && lcp_list != RNIL) - goto found_lcp_keep; - - switch(pos.m_get){ - case ScanPos::Get_next_tuple: - case ScanPos::Get_next_tuple_fs: - jam(); - key.m_page_idx += size; - // fall through - case ScanPos::Get_tuple: - case ScanPos::Get_tuple_fs: - jam(); - /** - * We need to refetch page after timeslice - */ - pos.m_get = ScanPos::Get_page; - break; - default: - break; - } - - while (true) { - switch (pos.m_get) { - case ScanPos::Get_next_page: - // move to next page - jam(); - { - if (! (bits & ScanOp::SCAN_DD)) - pos.m_get = ScanPos::Get_next_page_mm; - else - pos.m_get = ScanPos::Get_next_page_dd; - } - continue; - case ScanPos::Get_page: - // get real page - jam(); - { - if (! (bits & ScanOp::SCAN_DD)) - pos.m_get = ScanPos::Get_page_mm; - else - pos.m_get = ScanPos::Get_page_dd; - } - continue; - case ScanPos::Get_next_page_mm: - // move to next logical TUP page - jam(); - { - key.m_page_no++; - if (key.m_page_no >= frag.noOfPages) { - jam(); - - if ((bits & ScanOp::SCAN_NR) && (scan.m_endPage != RNIL)) - { - jam(); - if (key.m_page_no < scan.m_endPage) - { - jam(); - ndbout_c("scanning page %u", key.m_page_no); - goto cont; - } - } - // no more pages, scan ends - pos.m_get = ScanPos::Get_undef; - scan.m_state = ScanOp::Last; - return true; - } - cont: - key.m_page_idx = 0; - pos.m_get = ScanPos::Get_page_mm; - // clear cached value - pos.m_realpid_mm = RNIL; - } - /*FALLTHRU*/ - case ScanPos::Get_page_mm: - // get TUP real page - jam(); - { - if (pos.m_realpid_mm == RNIL) { - jam(); - if (key.m_page_no < frag.noOfPages) - pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no); - else - { - ndbassert(bits & ScanOp::SCAN_NR); - goto nopage; - } - } - PagePtr pagePtr; - c_page_pool.getPtr(pagePtr, pos.m_realpid_mm); - - if (pagePtr.p->page_state == ZEMPTY_MM) { - // skip empty page - jam(); - if (! (bits & ScanOp::SCAN_NR)) - { - pos.m_get = ScanPos::Get_next_page_mm; - break; // incr loop count - } - else - { - jam(); - pos.m_realpid_mm = RNIL; - } - } - nopage: - pos.m_page = pagePtr.p; - pos.m_get = ScanPos::Get_tuple; - } - continue; - case ScanPos::Get_next_page_dd: - // move to next disk page - jam(); - { - Disk_alloc_info& alloc = frag.m_disk_alloc_info; - Local_fragment_extent_list list(c_extent_pool, alloc.m_extent_list); - Ptr ext_ptr; - c_extent_pool.getPtr(ext_ptr, pos.m_extent_info_ptr_i); - Extent_info* ext = ext_ptr.p; - key.m_page_no++; - if (key.m_page_no >= ext->m_first_page_no + alloc.m_extent_size) { - // no more pages in this extent - jam(); - if (! list.next(ext_ptr)) { - // no more extents, scan ends - jam(); - pos.m_get = ScanPos::Get_undef; - scan.m_state = ScanOp::Last; - return true; - } else { - // move to next extent - jam(); - pos.m_extent_info_ptr_i = ext_ptr.i; - ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i); - key.m_file_no = ext->m_key.m_file_no; - key.m_page_no = ext->m_first_page_no; - } - } - key.m_page_idx = 0; - pos.m_get = ScanPos::Get_page_dd; - /* - read ahead for scan in disk order - do read ahead every 8:th page - */ - if ((bits & ScanOp::SCAN_DD) && - (((key.m_page_no - ext->m_first_page_no) & 7) == 0)) - { - jam(); - // initialize PGMAN request - Page_cache_client::Request preq; - preq.m_page = pos.m_key; - preq.m_callback = TheNULLCallback; - - // set maximum read ahead - Uint32 read_ahead = m_max_page_read_ahead; - - while (true) - { - // prepare page read ahead in current extent - Uint32 page_no = preq.m_page.m_page_no; - Uint32 page_no_limit = page_no + read_ahead; - Uint32 limit = ext->m_first_page_no + alloc.m_extent_size; - if (page_no_limit > limit) - { - jam(); - // read ahead crosses extent, set limit for this extent - read_ahead = page_no_limit - limit; - page_no_limit = limit; - // and make sure we only read one extra extent next time around - if (read_ahead > alloc.m_extent_size) - read_ahead = alloc.m_extent_size; - } - else - { - jam(); - read_ahead = 0; // no more to read ahead after this - } - // do read ahead pages for this extent - while (page_no < page_no_limit) - { - // page request to PGMAN - jam(); - preq.m_page.m_page_no = page_no; - int flags = 0; - // ignore result - m_pgman.get_page(signal, preq, flags); - jamEntry(); - page_no++; - } - if (!read_ahead || !list.next(ext_ptr)) - { - // no more extents after this or read ahead done - jam(); - break; - } - // move to next extent and initialize PGMAN request accordingly - Extent_info* ext = c_extent_pool.getPtr(ext_ptr.i); - preq.m_page.m_file_no = ext->m_key.m_file_no; - preq.m_page.m_page_no = ext->m_first_page_no; - } - } // if ScanOp::SCAN_DD read ahead - } - /*FALLTHRU*/ - case ScanPos::Get_page_dd: - // get global page in PGMAN cache - jam(); - { - // check if page is un-allocated or empty - if (likely(! (bits & ScanOp::SCAN_NR))) - { - Tablespace_client tsman(signal, c_tsman, - frag.fragTableId, - frag.fragmentId, - frag.m_tablespace_id); - unsigned uncommitted, committed; - uncommitted = committed = ~(unsigned)0; - int ret = tsman.get_page_free_bits(&key, &uncommitted, &committed); - ndbrequire(ret == 0); - if (committed == 0 && uncommitted == 0) { - // skip empty page - jam(); - pos.m_get = ScanPos::Get_next_page_dd; - break; // incr loop count - } - } - // page request to PGMAN - Page_cache_client::Request preq; - preq.m_page = pos.m_key; - preq.m_callback.m_callbackData = scanPtr.i; - preq.m_callback.m_callbackFunction = - safe_cast(&Dbtup::disk_page_tup_scan_callback); - int flags = 0; - int res = m_pgman.get_page(signal, preq, flags); - jamEntry(); - if (res == 0) { - jam(); - // request queued - pos.m_get = ScanPos::Get_tuple; - return false; - } - ndbrequire(res > 0); - pos.m_page = (Page*)m_pgman.m_ptr.p; - } - pos.m_get = ScanPos::Get_tuple; - continue; - // get tuple - // move to next tuple - case ScanPos::Get_next_tuple: - case ScanPos::Get_next_tuple_fs: - // move to next fixed size tuple - jam(); - { - key.m_page_idx += size; - pos.m_get = ScanPos::Get_tuple_fs; - } - /*FALLTHRU*/ - case ScanPos::Get_tuple: - case ScanPos::Get_tuple_fs: - // get fixed size tuple - jam(); - { - Fix_page* page = (Fix_page*)pos.m_page; - if (key.m_page_idx + size <= Fix_page::DATA_WORDS) - { - pos.m_get = ScanPos::Get_next_tuple_fs; - th = (Tuple_header*)&page->m_data[key.m_page_idx]; - - if (likely(! (bits & ScanOp::SCAN_NR))) - { - jam(); - thbits = th->m_header_bits; - if (! (thbits & Tuple_header::FREE)) - { - goto found_tuple; - } - } - else - { - if (pos.m_realpid_mm == RNIL) - { - jam(); - foundGCI = 0; - goto found_deleted_rowid; - } - thbits = th->m_header_bits; - if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI || - foundGCI == 0) - { - if (! (thbits & Tuple_header::FREE)) - { - jam(); - goto found_tuple; - } - else - { - goto found_deleted_rowid; - } - } - else if (thbits != Fix_page::FREE_RECORD && - th->m_operation_ptr_i != RNIL) - { - jam(); - goto found_tuple; // Locked tuple... - // skip free tuple - } - } - } else { - jam(); - // no more tuples on this page - pos.m_get = ScanPos::Get_next_page; - } - } - break; // incr loop count - found_tuple: - // found possible tuple to return - jam(); - { - // caller has already set pos.m_get to next tuple - if (! (bits & ScanOp::SCAN_LCP && thbits & Tuple_header::LCP_SKIP)) { - Local_key& key_mm = pos.m_key_mm; - if (! (bits & ScanOp::SCAN_DD)) { - key_mm = pos.m_key; - // real page id is already set - } else { - key_mm.assref(th->m_base_record_ref); - // recompute for each disk tuple - pos.m_realpid_mm = getRealpid(fragPtr.p, key_mm.m_page_no); - } - // TUPKEYREQ handles savepoint stuff - scan.m_state = ScanOp::Current; - return true; - } else { - jam(); - // clear it so that it will show up in next LCP - th->m_header_bits = thbits & ~(Uint32)Tuple_header::LCP_SKIP; - if (tablePtr.p->m_bits & Tablerec::TR_Checksum) { - jam(); - setChecksum(th, tablePtr.p); - } - } - } - break; - found_deleted_rowid: - jam(); - { - ndbassert(bits & ScanOp::SCAN_NR); - Local_key& key_mm = pos.m_key_mm; - if (! (bits & ScanOp::SCAN_DD)) { - key_mm = pos.m_key; - // caller has already set pos.m_get to next tuple - // real page id is already set - } else { - key_mm.assref(th->m_base_record_ref); - // recompute for each disk tuple - pos.m_realpid_mm = getRealpid(fragPtr.p, key_mm.m_page_no); - - Fix_page *mmpage = (Fix_page*)c_page_pool.getPtr(pos.m_realpid_mm); - th = (Tuple_header*)(mmpage->m_data + key_mm.m_page_idx); - if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI || - foundGCI == 0) - { - if (! (thbits & Tuple_header::FREE)) - break; - } - } - - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - conf->accOperationPtr = RNIL; - conf->fragId = frag.fragmentId; - conf->localKey[0] = pos.m_key_mm.ref(); - conf->localKey[1] = 0; - conf->localKeyLength = 1; - conf->gci = foundGCI; - Uint32 blockNo = refToBlock(scan.m_userRef); - EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 7); - jamEntry(); - - // TUPKEYREQ handles savepoint stuff - loop_count = 32; - scan.m_state = ScanOp::Next; - return false; - } - break; // incr loop count - default: - ndbrequire(false); - break; - } - if (++loop_count >= 32) - break; - } - // TODO: at drop table we have to flush and terminate these - jam(); - signal->theData[0] = ZTUP_SCAN; - signal->theData[1] = scanPtr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return false; - -found_lcp_keep: - Local_key tmp; - tmp.assref(lcp_list); - tmp.m_page_no = getRealpid(fragPtr.p, tmp.m_page_no); - - Ptr pagePtr; - c_page_pool.getPtr(pagePtr, tmp.m_page_no); - Tuple_header* ptr = (Tuple_header*) - ((Fix_page*)pagePtr.p)->get_ptr(tmp.m_page_idx, 0); - Uint32 headerbits = ptr->m_header_bits; - ndbrequire(headerbits & Tuple_header::LCP_KEEP); - - Uint32 next = ptr->m_operation_ptr_i; - ptr->m_operation_ptr_i = RNIL; - ptr->m_header_bits = headerbits & ~(Uint32)Tuple_header::FREE; - - if (tablePtr.p->m_bits & Tablerec::TR_Checksum) { - jam(); - setChecksum(ptr, tablePtr.p); - } - - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - conf->accOperationPtr = (Uint32)-1; - conf->fragId = frag.fragmentId; - conf->localKey[0] = lcp_list; - conf->localKey[1] = 0; - conf->localKeyLength = 1; - conf->gci = 0; - Uint32 blockNo = refToBlock(scan.m_userRef); - EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 7); - - fragPtr.p->m_lcp_keep_list = next; - ptr->m_header_bits |= Tuple_header::FREED; // RESTORE free flag - if (headerbits & Tuple_header::FREED) - { - if (tablePtr.p->m_attributes[MM].m_no_of_varsize) - { - jam(); - free_var_rec(fragPtr.p, tablePtr.p, &tmp, pagePtr); - } else { - jam(); - free_fix_rec(fragPtr.p, tablePtr.p, &tmp, (Fix_page*)pagePtr.p); - } - } - return false; -} - -void -Dbtup::scanCont(Signal* signal, ScanOpPtr scanPtr) -{ - bool immediate = scanNext(signal, scanPtr); - if (! immediate) { - jam(); - // time-slicing again - return; - } - scanReply(signal, scanPtr); -} - -void -Dbtup::disk_page_tup_scan_callback(Signal* signal, Uint32 scanPtrI, Uint32 page_i) -{ - ScanOpPtr scanPtr; - c_scanOpPool.getPtr(scanPtr, scanPtrI); - ScanOp& scan = *scanPtr.p; - ScanPos& pos = scan.m_scanPos; - // get cache page - Ptr gptr; - m_global_page_pool.getPtr(gptr, page_i); - pos.m_page = (Page*)gptr.p; - // continue - scanCont(signal, scanPtr); -} - -void -Dbtup::scanClose(Signal* signal, ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - ndbrequire(! (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) && scan.m_accLockOp == RNIL); - // unlock all not unlocked by LQH - LocalDLFifoList list(c_scanLockPool, scan.m_accLockOps); - ScanLockPtr lockPtr; - while (list.first(lockPtr)) { - jam(); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = lockPtr.p->m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - list.release(lockPtr); - } - // send conf - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scanPtr.p->m_userPtr; - conf->accOperationPtr = RNIL; - conf->fragId = RNIL; - unsigned signalLength = 3; - sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - releaseScanOp(scanPtr); -} - -void -Dbtup::addAccLockOp(ScanOp& scan, Uint32 accLockOp) -{ - LocalDLFifoList list(c_scanLockPool, scan.m_accLockOps); - ScanLockPtr lockPtr; -#ifdef VM_TRACE - list.first(lockPtr); - while (lockPtr.i != RNIL) { - ndbrequire(lockPtr.p->m_accLockOp != accLockOp); - list.next(lockPtr); - } -#endif - bool ok = list.seize(lockPtr); - ndbrequire(ok); - lockPtr.p->m_accLockOp = accLockOp; -} - -void -Dbtup::removeAccLockOp(ScanOp& scan, Uint32 accLockOp) -{ - LocalDLFifoList list(c_scanLockPool, scan.m_accLockOps); - ScanLockPtr lockPtr; - list.first(lockPtr); - while (lockPtr.i != RNIL) { - if (lockPtr.p->m_accLockOp == accLockOp) { - jam(); - break; - } - list.next(lockPtr); - } - ndbrequire(lockPtr.i != RNIL); - list.release(lockPtr); -} - -void -Dbtup::releaseScanOp(ScanOpPtr& scanPtr) -{ - FragrecordPtr fragPtr; - fragPtr.i = scanPtr.p->m_fragPtrI; - ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); - - if(scanPtr.p->m_bits & ScanOp::SCAN_LCP) - { - jam(); - fragPtr.p->m_lcp_scan_op = RNIL; - scanPtr.p->m_fragPtrI = RNIL; - } - else - { - jam(); - LocalDLList list(c_scanOpPool, fragPtr.p->m_scanList); - list.release(scanPtr); - } -} - -void -Dbtup::execLCP_FRAG_ORD(Signal* signal) -{ - LcpFragOrd* req= (LcpFragOrd*)signal->getDataPtr(); - - TablerecPtr tablePtr; - tablePtr.i = req->tableId; - ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); - - if (tablePtr.p->m_no_of_disk_attributes) - { - jam(); - FragrecordPtr fragPtr; - Uint32 fragId = req->fragmentId; - fragPtr.i = RNIL; - getFragmentrec(fragPtr, fragId, tablePtr.p); - ndbrequire(fragPtr.i != RNIL); - Fragrecord& frag = *fragPtr.p; - - ndbrequire(frag.m_lcp_scan_op == RNIL && c_lcp_scan_op != RNIL); - frag.m_lcp_scan_op = c_lcp_scan_op; - ScanOpPtr scanPtr; - c_scanOpPool.getPtr(scanPtr, frag.m_lcp_scan_op); - ndbrequire(scanPtr.p->m_fragPtrI == RNIL); - scanPtr.p->m_fragPtrI = fragPtr.i; - - scanFirst(signal, scanPtr); - scanPtr.p->m_state = ScanOp::First; - } -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp deleted file mode 100644 index 63f73593ff7..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupStoredProcDef.cpp +++ /dev/null @@ -1,238 +0,0 @@ -/* Copyright (c) 2003, 2005, 2007, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_STORE_PROC_DEF_CPP -#include "Dbtup.hpp" -#include -#include -#include - -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -/* ------------ADD/DROP STORED PROCEDURE MODULE ------------------- */ -/* ---------------------------------------------------------------- */ -/* ---------------------------------------------------------------- */ -void Dbtup::execSTORED_PROCREQ(Signal* signal) -{ - OperationrecPtr regOperPtr; - TablerecPtr regTabPtr; - jamEntry(); - regOperPtr.i = signal->theData[0]; - c_operation_pool.getPtr(regOperPtr); - regTabPtr.i = signal->theData[1]; - ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); - - Uint32 requestInfo = signal->theData[3]; - TransState trans_state= get_trans_state(regOperPtr.p); - ndbrequire(trans_state == TRANS_IDLE || - ((trans_state == TRANS_ERROR_WAIT_STORED_PROCREQ) && - (requestInfo == ZSTORED_PROCEDURE_DELETE))); - ndbrequire(regTabPtr.p->tableStatus == DEFINED); - switch (requestInfo) { - case ZSCAN_PROCEDURE: - jam(); - scanProcedure(signal, - regOperPtr.p, - signal->theData[4]); - break; - case ZCOPY_PROCEDURE: - jam(); - copyProcedure(signal, regTabPtr, regOperPtr.p); - break; - case ZSTORED_PROCEDURE_DELETE: - jam(); - deleteScanProcedure(signal, regOperPtr.p); - break; - default: - ndbrequire(false); - }//switch -}//Dbtup::execSTORED_PROCREQ() - -void Dbtup::deleteScanProcedure(Signal* signal, - Operationrec* regOperPtr) -{ - StoredProcPtr storedPtr; - Uint32 storedProcId = signal->theData[4]; - c_storedProcPool.getPtr(storedPtr, storedProcId); - ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); - ndbrequire(storedPtr.p->storedCounter == 0); - Uint32 firstAttrinbuf = storedPtr.p->storedLinkFirst; - storedPtr.p->storedCode = ZSTORED_PROCEDURE_FREE; - storedPtr.p->storedLinkFirst = RNIL; - storedPtr.p->storedLinkLast = RNIL; - storedPtr.p->storedProcLength = 0; - c_storedProcPool.release(storedPtr); - freeAttrinbufrec(firstAttrinbuf); - regOperPtr->currentAttrinbufLen = 0; - set_trans_state(regOperPtr, TRANS_IDLE); - signal->theData[0] = regOperPtr->userpointer; - signal->theData[1] = storedProcId; - sendSignal(DBLQH_REF, GSN_STORED_PROCCONF, signal, 2, JBB); -}//Dbtup::deleteScanProcedure() - -void Dbtup::scanProcedure(Signal* signal, - Operationrec* regOperPtr, - Uint32 lenAttrInfo) -{ -//-------------------------------------------------------- -// We introduce the maxCheck so that there is always one -// stored procedure entry free for copy procedures. Thus -// no amount of scanning can cause problems for the node -// recovery functionality. -//-------------------------------------------------------- - StoredProcPtr storedPtr; - c_storedProcPool.seize(storedPtr); - ndbrequire(storedPtr.i != RNIL); - storedPtr.p->storedCode = ZSCAN_PROCEDURE; - storedPtr.p->storedCounter = 0; - storedPtr.p->storedProcLength = lenAttrInfo; - storedPtr.p->storedLinkFirst = RNIL; - storedPtr.p->storedLinkLast = RNIL; - set_trans_state(regOperPtr, TRANS_WAIT_STORED_PROCEDURE_ATTR_INFO); - regOperPtr->attrinbufLen = lenAttrInfo; - regOperPtr->currentAttrinbufLen = 0; - regOperPtr->storedProcPtr = storedPtr.i; - if (lenAttrInfo >= ZATTR_BUFFER_SIZE) { // yes ">=" - jam(); - // send REF and change state to ignore the ATTRINFO to come - storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_TOO_MUCH_ATTRINFO_ERROR); - } -}//Dbtup::scanProcedure() - -void Dbtup::copyProcedure(Signal* signal, - TablerecPtr regTabPtr, - Operationrec* regOperPtr) -{ - Uint32 TnoOfAttributes = regTabPtr.p->m_no_of_attributes; - scanProcedure(signal, - regOperPtr, - TnoOfAttributes); - - Uint32 length = 0; - for (Uint32 Ti = 0; Ti < TnoOfAttributes; Ti++) { - AttributeHeader::init(&signal->theData[length + 1], Ti, 0); - length++; - if (length == 24) { - jam(); - ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, - signal->theData+1, length, true)); - length = 0; - }//if - }//for - if (length != 0) { - jam(); - ndbrequire(storedProcedureAttrInfo(signal, regOperPtr, - signal->theData+1, length, true)); - }//if - ndbrequire(regOperPtr->currentAttrinbufLen == 0); -}//Dbtup::copyProcedure() - -bool Dbtup::storedProcedureAttrInfo(Signal* signal, - Operationrec* regOperPtr, - const Uint32 *data, - Uint32 length, - bool copyProcedure) -{ - AttrbufrecPtr regAttrPtr; - Uint32 RnoFree = cnoFreeAttrbufrec; - if (ERROR_INSERTED(4004) && !copyProcedure) { - CLEAR_ERROR_INSERT_VALUE; - storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_SEIZE_ATTRINBUFREC_ERROR); - return false; - }//if - regOperPtr->currentAttrinbufLen += length; - ndbrequire(regOperPtr->currentAttrinbufLen <= regOperPtr->attrinbufLen); - if ((RnoFree > MIN_ATTRBUF) || - (copyProcedure)) { - jam(); - regAttrPtr.i = cfirstfreeAttrbufrec; - ptrCheckGuard(regAttrPtr, cnoOfAttrbufrec, attrbufrec); - regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = 0; - cfirstfreeAttrbufrec = regAttrPtr.p->attrbuf[ZBUF_NEXT]; - cnoFreeAttrbufrec = RnoFree - 1; - regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; - } else { - jam(); - storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_SEIZE_ATTRINBUFREC_ERROR); - return false; - }//if - if (regOperPtr->firstAttrinbufrec == RNIL) { - jam(); - regOperPtr->firstAttrinbufrec = regAttrPtr.i; - }//if - regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; - if (regOperPtr->lastAttrinbufrec != RNIL) { - AttrbufrecPtr tempAttrinbufptr; - jam(); - tempAttrinbufptr.i = regOperPtr->lastAttrinbufrec; - ptrCheckGuard(tempAttrinbufptr, cnoOfAttrbufrec, attrbufrec); - tempAttrinbufptr.p->attrbuf[ZBUF_NEXT] = regAttrPtr.i; - }//if - regOperPtr->lastAttrinbufrec = regAttrPtr.i; - - regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = length; - MEMCOPY_NO_WORDS(®AttrPtr.p->attrbuf[0], - data, - length); - - if (regOperPtr->currentAttrinbufLen < regOperPtr->attrinbufLen) { - jam(); - return true; - }//if - if (ERROR_INSERTED(4005) && !copyProcedure) { - CLEAR_ERROR_INSERT_VALUE; - storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_SEIZE_ATTRINBUFREC_ERROR); - return false; - }//if - - StoredProcPtr storedPtr; - c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcPtr); - ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); - - regOperPtr->currentAttrinbufLen = 0; - storedPtr.p->storedLinkFirst = regOperPtr->firstAttrinbufrec; - storedPtr.p->storedLinkLast = regOperPtr->lastAttrinbufrec; - regOperPtr->firstAttrinbufrec = RNIL; - regOperPtr->lastAttrinbufrec = RNIL; - regOperPtr->m_any_value = 0; - set_trans_state(regOperPtr, TRANS_IDLE); - signal->theData[0] = regOperPtr->userpointer; - signal->theData[1] = storedPtr.i; - sendSignal(DBLQH_REF, GSN_STORED_PROCCONF, signal, 2, JBB); - return true; -}//Dbtup::storedProcedureAttrInfo() - -void Dbtup::storedSeizeAttrinbufrecErrorLab(Signal* signal, - Operationrec* regOperPtr, - Uint32 errorCode) -{ - StoredProcPtr storedPtr; - c_storedProcPool.getPtr(storedPtr, regOperPtr->storedProcPtr); - ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); - - storedPtr.p->storedLinkFirst = regOperPtr->firstAttrinbufrec; - regOperPtr->firstAttrinbufrec = RNIL; - regOperPtr->lastAttrinbufrec = RNIL; - regOperPtr->m_any_value = 0; - set_trans_state(regOperPtr, TRANS_ERROR_WAIT_STORED_PROCREQ); - signal->theData[0] = regOperPtr->userpointer; - signal->theData[1] = errorCode; - signal->theData[2] = regOperPtr->storedProcPtr; - sendSignal(DBLQH_REF, GSN_STORED_PROCREF, signal, 3, JBB); -}//Dbtup::storedSeizeAttrinbufrecErrorLab() - diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp deleted file mode 100644 index 71a19c1840e..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp +++ /dev/null @@ -1,317 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_TAB_DES_MAN_CPP -#include "Dbtup.hpp" -#include -#include -#include - -/* - * TABLE DESCRIPTOR MEMORY MANAGER - * - * Each table has a descriptor which is a contiguous array of words. - * The descriptor is allocated from a global array using a buddy - * algorithm. Free lists exist for each power of 2 words. Freeing - * a piece first merges with free right and left neighbours and then - * divides itself up into free list chunks. - */ - -Uint32 -Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset) -{ - // belongs to configure.in - unsigned sizeOfPointer = sizeof(CHARSET_INFO*); - ndbrequire((sizeOfPointer & 0x3) == 0); - sizeOfPointer = (sizeOfPointer >> 2); - // do in layout order and return offsets (see DbtupMeta.cpp) - Uint32 allocSize = 0; - // magically aligned to 8 bytes - offset[0] = allocSize += ZTD_SIZE; - offset[1] = allocSize += regTabPtr->m_no_of_attributes* sizeOfReadFunction(); - offset[2] = allocSize += regTabPtr->m_no_of_attributes* sizeOfReadFunction(); - offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer; - offset[4] = allocSize += regTabPtr->noOfKeyAttr; - offset[5] = allocSize += regTabPtr->m_no_of_attributes * ZAD_SIZE; - offset[6] = allocSize += (regTabPtr->m_no_of_attributes + 1) >> 1; // real order - allocSize += ZTD_TRAILER_SIZE; - // return number of words - return allocSize; -} - -Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset) -{ - Uint32 reference = RNIL; - Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset); -/* ---------------------------------------------------------------- */ -/* ALWAYS ALLOCATE A MULTIPLE OF 16 WORDS */ -/* ---------------------------------------------------------------- */ - allocSize = (((allocSize - 1) >> 4) + 1) << 4; - Uint32 list = nextHigherTwoLog(allocSize - 1); /* CALCULATE WHICH LIST IT BELONGS TO */ - for (Uint32 i = list; i < 16; i++) { - jam(); - if (cfreeTdList[i] != RNIL) { - jam(); - reference = cfreeTdList[i]; - removeTdArea(reference, i); /* REMOVE THE AREA FROM THE FREELIST */ - Uint32 retNo = (1 << i) - allocSize; /* CALCULATE THE DIFFERENCE */ - if (retNo >= ZTD_FREE_SIZE) { - jam(); - // return unused words, of course without attempting left merge - Uint32 retRef = reference + allocSize; - freeTabDescr(retRef, retNo, false); - } else { - jam(); - allocSize = 1 << i; - }//if - break; - }//if - }//for - if (reference == RNIL) { - jam(); - terrorCode = ZMEM_NOTABDESCR_ERROR; - return RNIL; - } else { - jam(); - setTabDescrWord((reference + allocSize) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL); - setTabDescrWord(reference + ZTD_DATASIZE, allocSize); - - /* INITIALIZE THE TRAILER RECORD WITH TYPE AND SIZE */ - /* THE TRAILER IS USED TO SIMPLIFY MERGE OF FREE AREAS */ - - setTabDescrWord(reference + ZTD_HEADER, ZTD_TYPE_NORMAL); - setTabDescrWord((reference + allocSize) - ZTD_TR_SIZE, allocSize); - return reference; - }//if -}//Dbtup::allocTabDescr() - -void Dbtup::freeTabDescr(Uint32 retRef, Uint32 retNo, bool normal) -{ - itdaMergeTabDescr(retRef, retNo, normal); /* MERGE WITH POSSIBLE NEIGHBOURS */ - while (retNo >= ZTD_FREE_SIZE) { - jam(); - Uint32 list = nextHigherTwoLog(retNo); - list--; /* RETURN TO NEXT LOWER LIST */ - Uint32 sizeOfChunk = 1 << list; - insertTdArea(retRef, list); - retRef += sizeOfChunk; - retNo -= sizeOfChunk; - }//while - ndbassert(retNo == 0); -}//Dbtup::freeTabDescr() - -Uint32 -Dbtup::getTabDescrWord(Uint32 index) -{ - ndbrequire(index < cnoOfTabDescrRec); - return tableDescriptor[index].tabDescr; -}//Dbtup::getTabDescrWord() - -void -Dbtup::setTabDescrWord(Uint32 index, Uint32 word) -{ - ndbrequire(index < cnoOfTabDescrRec); - tableDescriptor[index].tabDescr = word; -}//Dbtup::setTabDescrWord() - -void Dbtup::insertTdArea(Uint32 tabDesRef, Uint32 list) -{ - ndbrequire(list < 16); - setTabDescrWord(tabDesRef + ZTD_FL_HEADER, ZTD_TYPE_FREE); - setTabDescrWord(tabDesRef + ZTD_FL_NEXT, cfreeTdList[list]); - if (cfreeTdList[list] != RNIL) { - jam(); /* PREVIOUSLY EMPTY SLOT */ - setTabDescrWord(cfreeTdList[list] + ZTD_FL_PREV, tabDesRef); - }//if - cfreeTdList[list] = tabDesRef; /* RELINK THE LIST */ - - setTabDescrWord(tabDesRef + ZTD_FL_PREV, RNIL); - setTabDescrWord(tabDesRef + ZTD_FL_SIZE, 1 << list); - setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_FREE); - setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_SIZE, 1 << list); -}//Dbtup::insertTdArea() - -/* - * Merge to-be-removed chunk (which need not be initialized with header - * and trailer) with left and right buddies. The start point retRef - * moves to left and the size retNo increases to match the new chunk. - */ -void Dbtup::itdaMergeTabDescr(Uint32& retRef, Uint32& retNo, bool normal) -{ - // merge right - while ((retRef + retNo) < cnoOfTabDescrRec) { - jam(); - Uint32 tabDesRef = retRef + retNo; - Uint32 headerWord = getTabDescrWord(tabDesRef + ZTD_FL_HEADER); - if (headerWord == ZTD_TYPE_FREE) { - jam(); - Uint32 sizeOfMergedPart = getTabDescrWord(tabDesRef + ZTD_FL_SIZE); - - retNo += sizeOfMergedPart; - Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1); - removeTdArea(tabDesRef, list); - } else { - jam(); - break; - } - } - // merge left - const bool mergeLeft = normal; - while (mergeLeft && retRef > 0) { - jam(); - Uint32 trailerWord = getTabDescrWord(retRef - ZTD_TR_TYPE); - if (trailerWord == ZTD_TYPE_FREE) { - jam(); - Uint32 sizeOfMergedPart = getTabDescrWord(retRef - ZTD_TR_SIZE); - ndbrequire(retRef >= sizeOfMergedPart); - retRef -= sizeOfMergedPart; - retNo += sizeOfMergedPart; - Uint32 list = nextHigherTwoLog(sizeOfMergedPart - 1); - removeTdArea(retRef, list); - } else { - jam(); - break; - } - } - ndbrequire((retRef + retNo) <= cnoOfTabDescrRec); -}//Dbtup::itdaMergeTabDescr() - -/* ---------------------------------------------------------------- */ -/* ------------------------ REMOVE_TD_AREA ------------------------ */ -/* ---------------------------------------------------------------- */ -/* */ -/* THIS ROUTINE REMOVES A TD CHUNK FROM THE POOL OF TD RECORDS */ -/* */ -/* INPUT: TLIST LIST TO USE */ -/* TAB_DESCR_PTR POINTS TO THE CHUNK TO BE REMOVED */ -/* */ -/* SHORTNAME: RMTA */ -/* -----------------------------------------------------------------*/ -void Dbtup::removeTdArea(Uint32 tabDesRef, Uint32 list) -{ - ndbrequire(list < 16); - Uint32 tabDescrNextPtr = getTabDescrWord(tabDesRef + ZTD_FL_NEXT); - Uint32 tabDescrPrevPtr = getTabDescrWord(tabDesRef + ZTD_FL_PREV); - - setTabDescrWord(tabDesRef + ZTD_HEADER, ZTD_TYPE_NORMAL); - setTabDescrWord((tabDesRef + (1 << list)) - ZTD_TR_TYPE, ZTD_TYPE_NORMAL); - - if (tabDesRef == cfreeTdList[list]) { - jam(); - cfreeTdList[list] = tabDescrNextPtr; /* RELINK THE LIST */ - }//if - if (tabDescrNextPtr != RNIL) { - jam(); - setTabDescrWord(tabDescrNextPtr + ZTD_FL_PREV, tabDescrPrevPtr); - }//if - if (tabDescrPrevPtr != RNIL) { - jam(); - setTabDescrWord(tabDescrPrevPtr + ZTD_FL_NEXT, tabDescrNextPtr); - }//if -}//Dbtup::removeTdArea() - -#ifdef VM_TRACE -void -Dbtup::verifytabdes() -{ - struct WordType { - short fl; // free list 0-15 - short ti; // table id - WordType() : fl(-1), ti(-1) {} - }; - WordType* wt = new WordType [cnoOfTabDescrRec]; - uint free_frags = 0; - // free lists - { - for (uint i = 0; i < 16; i++) { - Uint32 desc2 = RNIL; - Uint32 desc = cfreeTdList[i]; - while (desc != RNIL) { - const Uint32 size = (1 << i); - ndbrequire(size >= ZTD_FREE_SIZE); - ndbrequire(desc + size <= cnoOfTabDescrRec); - { Uint32 index = desc + ZTD_FL_HEADER; - ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_FREE); - } - { Uint32 index = desc + ZTD_FL_SIZE; - ndbrequire(tableDescriptor[index].tabDescr == size); - } - { Uint32 index = desc + size - ZTD_TR_TYPE; - ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_FREE); - } - { Uint32 index = desc + size - ZTD_TR_SIZE; - ndbrequire(tableDescriptor[index].tabDescr == size); - } - { Uint32 index = desc + ZTD_FL_PREV; - ndbrequire(tableDescriptor[index].tabDescr == desc2); - } - for (uint j = 0; j < size; j++) { - ndbrequire(wt[desc + j].fl == -1); - wt[desc + j].fl = i; - } - desc2 = desc; - desc = tableDescriptor[desc + ZTD_FL_NEXT].tabDescr; - free_frags++; - } - } - } - // tables - { - for (uint i = 0; i < cnoOfTablerec; i++) { - TablerecPtr ptr; - ptr.i = i; - ptrAss(ptr, tablerec); - if (ptr.p->tableStatus == DEFINED) { - Uint32 offset[10]; - const Uint32 alloc = getTabDescrOffsets(ptr.p, offset); - const Uint32 desc = ptr.p->readKeyArray - offset[3]; - Uint32 size = alloc; - if (size % ZTD_FREE_SIZE != 0) - size += ZTD_FREE_SIZE - size % ZTD_FREE_SIZE; - ndbrequire(desc + size <= cnoOfTabDescrRec); - { Uint32 index = desc + ZTD_FL_HEADER; - ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_NORMAL); - } - { Uint32 index = desc + ZTD_FL_SIZE; - ndbrequire(tableDescriptor[index].tabDescr == size); - } - { Uint32 index = desc + size - ZTD_TR_TYPE; - ndbrequire(tableDescriptor[index].tabDescr == ZTD_TYPE_NORMAL); - } - { Uint32 index = desc + size - ZTD_TR_SIZE; - ndbrequire(tableDescriptor[index].tabDescr == size); - } - for (uint j = 0; j < size; j++) { - ndbrequire(wt[desc + j].ti == -1); - wt[desc + j].ti = i; - } - } - } - } - // all words - { - for (uint i = 0; i < cnoOfTabDescrRec; i++) { - bool is_fl = wt[i].fl != -1; - bool is_ti = wt[i].ti != -1; - ndbrequire(is_fl != is_ti); - } - } - delete [] wt; - ndbout << "verifytabdes: frags=" << free_frags << endl; -} -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp deleted file mode 100644 index f202959b4da..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ /dev/null @@ -1,1291 +0,0 @@ -/* Copyright (c) 2003-2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - -#define DBTUP_C -#define DBTUP_TRIGGER_CPP -#include "Dbtup.hpp" -#include -#include -#include -#include -#include "AttributeOffset.hpp" -#include -#include -#include -#include - -/* **************************************************************** */ -/* ---------------------------------------------------------------- */ -/* ----------------------- TRIGGER HANDLING ----------------------- */ -/* ---------------------------------------------------------------- */ -/* **************************************************************** */ - -DLList* -Dbtup::findTriggerList(Tablerec* table, - TriggerType::Value ttype, - TriggerActionTime::Value ttime, - TriggerEvent::Value tevent) -{ - DLList* tlist = NULL; - switch (ttype) { - case TriggerType::SUBSCRIPTION: - case TriggerType::SUBSCRIPTION_BEFORE: - switch (tevent) { - case TriggerEvent::TE_INSERT: - jam(); - if (ttime == TriggerActionTime::TA_DETACHED) - tlist = &table->subscriptionInsertTriggers; - break; - case TriggerEvent::TE_UPDATE: - jam(); - if (ttime == TriggerActionTime::TA_DETACHED) - tlist = &table->subscriptionUpdateTriggers; - break; - case TriggerEvent::TE_DELETE: - jam(); - if (ttime == TriggerActionTime::TA_DETACHED) - tlist = &table->subscriptionDeleteTriggers; - break; - default: - break; - } - break; - case TriggerType::SECONDARY_INDEX: - switch (tevent) { - case TriggerEvent::TE_INSERT: - jam(); - if (ttime == TriggerActionTime::TA_AFTER) - tlist = &table->afterInsertTriggers; - break; - case TriggerEvent::TE_UPDATE: - jam(); - if (ttime == TriggerActionTime::TA_AFTER) - tlist = &table->afterUpdateTriggers; - break; - case TriggerEvent::TE_DELETE: - jam(); - if (ttime == TriggerActionTime::TA_AFTER) - tlist = &table->afterDeleteTriggers; - break; - default: - break; - } - break; - case TriggerType::ORDERED_INDEX: - switch (tevent) { - case TriggerEvent::TE_CUSTOM: - jam(); - if (ttime == TriggerActionTime::TA_CUSTOM) - tlist = &table->tuxCustomTriggers; - break; - default: - break; - } - break; - case TriggerType::READ_ONLY_CONSTRAINT: - switch (tevent) { - case TriggerEvent::TE_UPDATE: - jam(); - if (ttime == TriggerActionTime::TA_AFTER) - tlist = &table->constraintUpdateTriggers; - break; - default: - break; - } - break; - default: - break; - } - return tlist; -} - -// Trigger signals -void -Dbtup::execCREATE_TRIG_REQ(Signal* signal) -{ - jamEntry(); - BlockReference senderRef = signal->getSendersBlockRef(); - const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr(); - const CreateTrigReq* const req = &reqCopy; - CreateTrigRef::ErrorCode error= CreateTrigRef::NoError; - - // Find table - TablerecPtr tabPtr; - tabPtr.i = req->getTableId(); - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - if (tabPtr.p->tableStatus != DEFINED ) - { - jam(); - error= CreateTrigRef::InvalidTable; - } - // Create trigger and associate it with the table - else if (createTrigger(tabPtr.p, req)) - { - jam(); - // Send conf - CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend(); - conf->setUserRef(reference()); - conf->setConnectionPtr(req->getConnectionPtr()); - conf->setRequestType(req->getRequestType()); - conf->setTableId(req->getTableId()); - conf->setIndexId(req->getIndexId()); - conf->setTriggerId(req->getTriggerId()); - conf->setTriggerInfo(req->getTriggerInfo()); - sendSignal(senderRef, GSN_CREATE_TRIG_CONF, - signal, CreateTrigConf::SignalLength, JBB); - return; - } - else - { - jam(); - error= CreateTrigRef::TooManyTriggers; - } - ndbassert(error != CreateTrigRef::NoError); - // Send ref - CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend(); - ref->setUserRef(reference()); - ref->setConnectionPtr(req->getConnectionPtr()); - ref->setRequestType(req->getRequestType()); - ref->setTableId(req->getTableId()); - ref->setIndexId(req->getIndexId()); - ref->setTriggerId(req->getTriggerId()); - ref->setTriggerInfo(req->getTriggerInfo()); - ref->setErrorCode(error); - sendSignal(senderRef, GSN_CREATE_TRIG_REF, - signal, CreateTrigRef::SignalLength, JBB); -}//Dbtup::execCREATE_TRIG_REQ() - -void -Dbtup::execDROP_TRIG_REQ(Signal* signal) -{ - jamEntry(); - BlockReference senderRef = signal->getSendersBlockRef(); - const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr(); - const DropTrigReq* const req = &reqCopy; - - // Find table - TablerecPtr tabPtr; - tabPtr.i = req->getTableId(); - ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); - - // Drop trigger - Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef)); - if (r == 0){ - // Send conf - DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend(); - conf->setUserRef(senderRef); - conf->setConnectionPtr(req->getConnectionPtr()); - conf->setRequestType(req->getRequestType()); - conf->setTableId(req->getTableId()); - conf->setIndexId(req->getIndexId()); - conf->setTriggerId(req->getTriggerId()); - sendSignal(senderRef, GSN_DROP_TRIG_CONF, - signal, DropTrigConf::SignalLength, JBB); - } else { - // Send ref - DropTrigRef* const ref = (DropTrigRef*)signal->getDataPtrSend(); - ref->setUserRef(senderRef); - ref->setConnectionPtr(req->getConnectionPtr()); - ref->setRequestType(req->getRequestType()); - ref->setTableId(req->getTableId()); - ref->setIndexId(req->getIndexId()); - ref->setTriggerId(req->getTriggerId()); - ref->setErrorCode((DropTrigRef::ErrorCode)r); - ref->setErrorLine(__LINE__); - ref->setErrorNode(refToNode(reference())); - sendSignal(senderRef, GSN_DROP_TRIG_REF, - signal, DropTrigRef::SignalLength, JBB); - } -}//Dbtup::DROP_TRIG_REQ() - -/* ---------------------------------------------------------------- */ -/* ------------------------- createTrigger ------------------------ */ -/* */ -/* Creates a new trigger record by fetching one from the trigger */ -/* pool and associates it with the given table. */ -/* Trigger type can be one of secondary_index, subscription, */ -/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */ -/* api_trigger(NYI) or sql_trigger(NYI). */ -/* Note that this method only checks for total number of allowed */ -/* triggers. Checking the number of allowed triggers per table is */ -/* done by TRIX. */ -/* */ -/* ---------------------------------------------------------------- */ -bool -Dbtup::createTrigger(Tablerec* table, const CreateTrigReq* req) -{ - if (ERROR_INSERTED(4003)) { - CLEAR_ERROR_INSERT_VALUE; - return false; - } - TriggerType::Value ttype = req->getTriggerType(); - TriggerActionTime::Value ttime = req->getTriggerActionTime(); - TriggerEvent::Value tevent = req->getTriggerEvent(); - - DLList* tlist = findTriggerList(table, ttype, ttime, tevent); - ndbrequire(tlist != NULL); - - TriggerPtr tptr; - if (!tlist->seize(tptr)) - return false; - - // Set trigger id - tptr.p->triggerId = req->getTriggerId(); - - // ndbout_c("Create TupTrigger %u = %u %u %u %u", tptr.p->triggerId, table, ttype, ttime, tevent); - - // Set index id - tptr.p->indexId = req->getIndexId(); - - // Set trigger type etc - tptr.p->triggerType = ttype; - tptr.p->triggerActionTime = ttime; - tptr.p->triggerEvent = tevent; - - tptr.p->sendBeforeValues = true; - if ((tptr.p->triggerType == TriggerType::SUBSCRIPTION) && - ((tptr.p->triggerEvent == TriggerEvent::TE_UPDATE) || - (tptr.p->triggerEvent == TriggerEvent::TE_DELETE))) { - jam(); - tptr.p->sendBeforeValues = false; - } - /* - tptr.p->sendOnlyChangedAttributes = false; - if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) || - (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) && - (tptr.p->triggerEvent == TriggerEvent::TE_UPDATE)) { - jam(); - tptr.p->sendOnlyChangedAttributes = true; - } - */ - tptr.p->sendOnlyChangedAttributes = !req->getReportAllMonitoredAttributes(); - // Set monitor all - tptr.p->monitorAllAttributes = req->getMonitorAllAttributes(); - tptr.p->monitorReplicas = req->getMonitorReplicas(); - tptr.p->m_receiverBlock = refToBlock(req->getReceiverRef()); - - tptr.p->attributeMask.clear(); - if (tptr.p->monitorAllAttributes) { - jam(); - for(Uint32 i = 0; i < table->m_no_of_attributes; i++) { - if (!primaryKey(table, i)) { - jam(); - tptr.p->attributeMask.set(i); - } - } - } else { - // Set attribute mask - jam(); - tptr.p->attributeMask = req->getAttributeMask(); - } - return true; -}//Dbtup::createTrigger() - -bool -Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId) -{ - Uint32 attrDescriptorStart = regTabPtr->tabDescriptor; - Uint32 attrDescriptor = getTabDescrWord(attrDescriptorStart + - (attrId * ZAD_SIZE)); - return (bool)AttributeDescriptor::getPrimaryKey(attrDescriptor); -}//Dbtup::primaryKey() - -/* ---------------------------------------------------------------- */ -/* -------------------------- dropTrigger ------------------------- */ -/* */ -/* Deletes a trigger record by disassociating it with the given */ -/* table and returning it to the trigger pool. */ -/* Trigger type can be one of secondary_index, subscription, */ -/* constraint(NYI), foreign_key(NYI), schema_upgrade(NYI), */ -/* api_trigger(NYI) or sql_trigger(NYI). */ -/* */ -/* ---------------------------------------------------------------- */ -Uint32 -Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender) -{ - if (ERROR_INSERTED(4004)) { - CLEAR_ERROR_INSERT_VALUE; - return 9999; - } - Uint32 triggerId = req->getTriggerId(); - - TriggerType::Value ttype = req->getTriggerType(); - TriggerActionTime::Value ttime = req->getTriggerActionTime(); - TriggerEvent::Value tevent = req->getTriggerEvent(); - - // ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender); - - DLList* tlist = findTriggerList(table, ttype, ttime, tevent); - ndbrequire(tlist != NULL); - - Ptr ptr; - for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) { - jam(); - if (ptr.p->triggerId == triggerId) { - if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock) - { - /** - * You can only drop your own triggers for subscription triggers. - * Trigger IDs are private for each block. - * - * SUMA encodes information in the triggerId - * - * Backup doesn't really care about the Ids though. - */ - jam(); - continue; - } - jam(); - tlist->release(ptr.i); - return 0; - } - } - return DropTrigRef::TriggerNotFound; -}//Dbtup::dropTrigger() - -/* ---------------------------------------------------------------- */ -/* -------------- checkImmediateTriggersAfterOp ------------------ */ -/* */ -/* Called after an insert, delete, or update operation takes */ -/* place. Fetches before tuple for deletes and updates and */ -/* after tuple for inserts and updates. */ -/* Executes immediate triggers by sending FIRETRIGORD */ -/* */ -/* ---------------------------------------------------------------- */ -void -Dbtup::checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct, - Operationrec *regOperPtr, - Tablerec *regTablePtr, - bool disk) -{ - if(refToBlock(req_struct->TC_ref) != DBTC) { - return; - } - - if ((regOperPtr->op_struct.primary_replica) && - (!(regTablePtr->afterInsertTriggers.isEmpty()))) { - jam(); - fireImmediateTriggers(req_struct, - regTablePtr->afterInsertTriggers, - regOperPtr, - disk); - } -} - -void -Dbtup::checkImmediateTriggersAfterUpdate(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* regTablePtr, - bool disk) -{ - if(refToBlock(req_struct->TC_ref) != DBTC) { - return; - } - - if ((regOperPtr->op_struct.primary_replica) && - (!(regTablePtr->afterUpdateTriggers.isEmpty()))) { - jam(); - fireImmediateTriggers(req_struct, - regTablePtr->afterUpdateTriggers, - regOperPtr, - disk); - } - if ((regOperPtr->op_struct.primary_replica) && - (!(regTablePtr->constraintUpdateTriggers.isEmpty()))) { - jam(); - fireImmediateTriggers(req_struct, - regTablePtr->constraintUpdateTriggers, - regOperPtr, - disk); - } -} - -void -Dbtup::checkImmediateTriggersAfterDelete(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* regTablePtr, - bool disk) -{ - if(refToBlock(req_struct->TC_ref) != DBTC) { - return; - } - - if ((regOperPtr->op_struct.primary_replica) && - (!(regTablePtr->afterDeleteTriggers.isEmpty()))) { - jam(); - executeTriggers(req_struct, - regTablePtr->afterDeleteTriggers, - regOperPtr, - disk); - } -} - -#if 0 -/* ---------------------------------------------------------------- */ -/* --------------------- checkDeferredTriggers -------------------- */ -/* */ -/* Called before commit after an insert, delete, or update */ -/* operation. Fetches before tuple for deletes and updates and */ -/* after tuple for inserts and updates. */ -/* Executes deferred triggers by sending FIRETRIGORD */ -/* */ -/* ---------------------------------------------------------------- */ -void Dbtup::checkDeferredTriggers(Signal* signal, - Operationrec* const regOperPtr, - Tablerec* const regTablePtr) -{ - jam(); - // NYI -}//Dbtup::checkDeferredTriggers() -#endif - -/* ---------------------------------------------------------------- */ -/* --------------------- checkDetachedTriggers -------------------- */ -/* */ -/* Called at commit after an insert, delete, or update operation. */ -/* Fetches before tuple for deletes and updates and */ -/* after tuple for inserts and updates. */ -/* Executes detached triggers by sending FIRETRIGORD */ -/* */ -/* ---------------------------------------------------------------- */ -void Dbtup::checkDetachedTriggers(KeyReqStruct *req_struct, - Operationrec* regOperPtr, - Tablerec* regTablePtr, - bool disk) -{ - Uint32 save_type = regOperPtr->op_struct.op_type; - Tuple_header *save_ptr = req_struct->m_tuple_ptr; - - switch (save_type) { - case ZUPDATE: - case ZINSERT: - req_struct->m_tuple_ptr = (Tuple_header*) - c_undo_buffer.get_ptr(®OperPtr->m_copy_tuple_location); - break; - } - - /** - * Set correct operation type and fix change mask - * Note ALLOC is set in "orig" tuple - */ - if (save_ptr->m_header_bits & Tuple_header::ALLOC) { - if (save_type == ZDELETE) { - // insert + delete = nothing - jam(); - return; - goto end; - } - regOperPtr->op_struct.op_type = ZINSERT; - } - else if (save_type == ZINSERT) { - /** - * Tuple was not created but last op is INSERT. - * This is possible only on DELETE + INSERT - */ - regOperPtr->op_struct.op_type = ZUPDATE; - } - - switch(regOperPtr->op_struct.op_type) { - case(ZINSERT): - jam(); - if (regTablePtr->subscriptionInsertTriggers.isEmpty()) { - // Table has no active triggers monitoring inserts at commit - jam(); - goto end; - } - - // If any fired immediate insert trigger then fetch after tuple - fireDetachedTriggers(req_struct, - regTablePtr->subscriptionInsertTriggers, - regOperPtr, disk); - break; - case(ZDELETE): - jam(); - if (regTablePtr->subscriptionDeleteTriggers.isEmpty()) { - // Table has no active triggers monitoring deletes at commit - jam(); - goto end; - } - - // Execute any after delete triggers by sending - // FIRETRIGORD with the before tuple - fireDetachedTriggers(req_struct, - regTablePtr->subscriptionDeleteTriggers, - regOperPtr, disk); - break; - case(ZUPDATE): - jam(); - if (regTablePtr->subscriptionUpdateTriggers.isEmpty()) { - // Table has no active triggers monitoring updates at commit - jam(); - goto end; - } - - // If any fired immediate update trigger then fetch after tuple - // and send two FIRETRIGORD one with before tuple and one with after tuple - fireDetachedTriggers(req_struct, - regTablePtr->subscriptionUpdateTriggers, - regOperPtr, disk); - break; - default: - ndbrequire(false); - break; - } - -end: - regOperPtr->op_struct.op_type = save_type; - req_struct->m_tuple_ptr = save_ptr; -} - -void -Dbtup::fireImmediateTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* const regOperPtr, - bool disk) -{ - TriggerPtr trigPtr; - triggerList.first(trigPtr); - while (trigPtr.i != RNIL) { - jam(); - if (trigPtr.p->monitorAllAttributes || - trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) { - jam(); - executeTrigger(req_struct, - trigPtr.p, - regOperPtr, - disk); - }//if - triggerList.next(trigPtr); - }//while -}//Dbtup::fireImmediateTriggers() - -#if 0 -void -Dbtup::fireDeferredTriggers(Signal* signal, - KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* const regOperPtr) -{ - TriggerPtr trigPtr; - triggerList.first(trigPtr); - while (trigPtr.i != RNIL) { - jam(); - if (trigPtr.p->monitorAllAttributes || - trigPtr.p->attributeMask.overlaps(req_struct->changeMask)) { - jam(); - executeTrigger(req_struct, - trigPtr, - regOperPtr); - }//if - triggerList.next(trigPtr); - }//while -}//Dbtup::fireDeferredTriggers() -#endif - -void -Dbtup::fireDetachedTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* const regOperPtr, - bool disk) -{ - - TriggerPtr trigPtr; - - /** - * Set disk page - */ - req_struct->m_disk_page_ptr.i = m_pgman.m_ptr.i; - - ndbrequire(regOperPtr->is_first_operation()); - triggerList.first(trigPtr); - while (trigPtr.i != RNIL) { - jam(); - if ((trigPtr.p->monitorReplicas || - regOperPtr->op_struct.primary_replica) && - (trigPtr.p->monitorAllAttributes || - trigPtr.p->attributeMask.overlaps(req_struct->changeMask))) { - jam(); - executeTrigger(req_struct, - trigPtr.p, - regOperPtr, - disk); - } - triggerList.next(trigPtr); - } -} - -void Dbtup::executeTriggers(KeyReqStruct *req_struct, - DLList& triggerList, - Operationrec* regOperPtr, - bool disk) -{ - TriggerPtr trigPtr; - triggerList.first(trigPtr); - while (trigPtr.i != RNIL) { - jam(); - executeTrigger(req_struct, - trigPtr.p, - regOperPtr, - disk); - triggerList.next(trigPtr); - - } -} - -void Dbtup::executeTrigger(KeyReqStruct *req_struct, - TupTriggerData* const trigPtr, - Operationrec* const regOperPtr, - bool disk) -{ - /** - * The block below does not work together with GREP. - * I have 2 db nodes (2 replicas) -> one node group. - * I want to have FIRETRIG_ORD sent to all SumaParticipants, - * from all nodes in the node group described above. However, - * only one of the nodes in the node group actually sends the - * FIRE_TRIG_ORD, and the other node enters this "hack" below. - * I don't really know what the code snippet below does, but it - * does not work with GREP the way Lars and I want it. - * We need to have triggers fired from both the primary and the - * backup replica, not only the primary as it is now. - * - * Note: In Suma, I have changed triggers to be created with - * setMonitorReplicas(true). - * /Johan - * - * See RT 709 - */ - // XXX quick fix to NR, should fix in LQHKEYREQ instead - /* - if (refToBlock(req_struct->TC_ref) == DBLQH) { - jam(); - return; - } - */ - Signal* signal= req_struct->signal; - BlockReference ref = trigPtr->m_receiverBlock; - Uint32* const keyBuffer = &cinBuffer[0]; - Uint32* const afterBuffer = &coutBuffer[0]; - Uint32* const beforeBuffer = &clogMemBuffer[0]; - - Uint32 noPrimKey, noAfterWords, noBeforeWords; - FragrecordPtr regFragPtr; - regFragPtr.i= regOperPtr->fragmentPtr; - ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); - - if (ref == BACKUP) { - jam(); - /* - In order for the implementation of BACKUP to work even when changing - primaries in the middle of the backup we need to set the trigger on - all replicas. This check checks whether this is the node where this - trigger should be fired. The check should preferably have been put - completely in the BACKUP block but it was about five times simpler - to put it here and also much faster for the backup (small overhead - for everybody else. - */ - signal->theData[0] = trigPtr->triggerId; - signal->theData[1] = regFragPtr.p->fragmentId; - EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2); - jamEntry(); - if (signal->theData[0] == 0) { - jam(); - return; - } - } - if (!readTriggerInfo(trigPtr, - regOperPtr, - req_struct, - regFragPtr.p, - keyBuffer, - noPrimKey, - afterBuffer, - noAfterWords, - beforeBuffer, - noBeforeWords, - disk)) { - jam(); - return; - } -//-------------------------------------------------------------------- -// Now all data for this trigger has been read. It is now time to send -// the trigger information consisting of two or three sets of TRIG_ -// ATTRINFO signals and one FIRE_TRIG_ORD signal. -// We start by setting common header info for all TRIG_ATTRINFO signals. -//-------------------------------------------------------------------- - bool executeDirect; - TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend(); - trigAttrInfo->setConnectionPtr(req_struct->TC_index); - trigAttrInfo->setTriggerId(trigPtr->triggerId); - - switch(trigPtr->triggerType) { - case (TriggerType::SECONDARY_INDEX): - jam(); - ref = req_struct->TC_ref; - executeDirect = false; - break; - case (TriggerType::SUBSCRIPTION): - case (TriggerType::SUBSCRIPTION_BEFORE): - jam(); - // Since only backup uses subscription triggers we send to backup directly for now - ref = trigPtr->m_receiverBlock; - executeDirect = true; - break; - case (TriggerType::READ_ONLY_CONSTRAINT): - terrorCode = ZREAD_ONLY_CONSTRAINT_VIOLATION; - // XXX should return status and abort the rest - return; - default: - ndbrequire(false); - executeDirect= false; // remove warning - }//switch - - req_struct->no_fired_triggers++; - - trigAttrInfo->setAttrInfoType(TrigAttrInfo::PRIMARY_KEY); - sendTrigAttrInfo(signal, keyBuffer, noPrimKey, executeDirect, ref); - - switch(regOperPtr->op_struct.op_type) { - case(ZINSERT): - jam(); - // Send AttrInfo signals with new attribute values - trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES); - sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref); - break; - case(ZDELETE): - if (trigPtr->sendBeforeValues) { - jam(); - trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES); - sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref); - } - break; - case(ZUPDATE): - jam(); - if (trigPtr->sendBeforeValues) { - jam(); - trigAttrInfo->setAttrInfoType(TrigAttrInfo::BEFORE_VALUES); - sendTrigAttrInfo(signal, beforeBuffer, noBeforeWords, executeDirect,ref); - } - trigAttrInfo->setAttrInfoType(TrigAttrInfo::AFTER_VALUES); - sendTrigAttrInfo(signal, afterBuffer, noAfterWords, executeDirect, ref); - break; - default: - ndbrequire(false); - } - sendFireTrigOrd(signal, - req_struct, - regOperPtr, - trigPtr, - regFragPtr.p->fragmentId, - noPrimKey, - noBeforeWords, - noAfterWords); -} - -Uint32 Dbtup::setAttrIds(Bitmask& attributeMask, - Uint32 m_no_of_attributesibutes, - Uint32* inBuffer) -{ - Uint32 bufIndx = 0; - for (Uint32 i = 0; i < m_no_of_attributesibutes; i++) { - jam(); - if (attributeMask.get(i)) { - jam(); - AttributeHeader::init(&inBuffer[bufIndx++], i, 0); - } - } - return bufIndx; -} - -bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, - Operationrec* const regOperPtr, - KeyReqStruct *req_struct, - Fragrecord* const regFragPtr, - Uint32* const keyBuffer, - Uint32& noPrimKey, - Uint32* const afterBuffer, - Uint32& noAfterWords, - Uint32* const beforeBuffer, - Uint32& noBeforeWords, - bool disk) -{ - noAfterWords = 0; - noBeforeWords = 0; - Uint32 readBuffer[MAX_ATTRIBUTES_IN_TABLE]; - -//--------------------------------------------------------------------------- -// Set-up variables needed by readAttributes operPtr.p, tabptr.p -//--------------------------------------------------------------------------- - operPtr.p = regOperPtr; - tabptr.i = regFragPtr->fragTableId; - ptrCheckGuard(tabptr, cnoOfTablerec, tablerec); - - Tablerec* const regTabPtr = tabptr.p; - Uint32 num_attr= regTabPtr->m_no_of_attributes; - Uint32 descr_start= regTabPtr->tabDescriptor; - ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); - - req_struct->check_offset[MM]= regTabPtr->get_check_offset(MM); - req_struct->check_offset[DD]= regTabPtr->get_check_offset(DD); - req_struct->attr_descr= &tableDescriptor[descr_start]; - -//-------------------------------------------------------------------- -// Read Primary Key Values -//-------------------------------------------------------------------- - Tuple_header *save0= req_struct->m_tuple_ptr; - if (regOperPtr->op_struct.op_type == ZDELETE && - !regOperPtr->is_first_operation()) - { - jam(); - req_struct->m_tuple_ptr= (Tuple_header*) - c_undo_buffer.get_ptr(&req_struct->prevOpPtr.p->m_copy_tuple_location); - } - - if (regTabPtr->need_expand(disk)) - prepare_read(req_struct, regTabPtr, disk); - - int ret = readAttributes(req_struct, - &tableDescriptor[regTabPtr->readKeyArray].tabDescr, - regTabPtr->noOfKeyAttr, - keyBuffer, - ZATTR_BUFFER_SIZE, - false); - ndbrequire(ret != -1); - noPrimKey= ret; - - req_struct->m_tuple_ptr = save0; - - Uint32 numAttrsToRead; - if ((regOperPtr->op_struct.op_type == ZUPDATE) && - (trigPtr->sendOnlyChangedAttributes)) { - jam(); -//-------------------------------------------------------------------- -// Update that sends only changed information -//-------------------------------------------------------------------- - Bitmask attributeMask; - attributeMask = trigPtr->attributeMask; - attributeMask.bitAND(req_struct->changeMask); - numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes, - &readBuffer[0]); - - } else if ((regOperPtr->op_struct.op_type == ZDELETE) && - (!trigPtr->sendBeforeValues)) { - jam(); -//-------------------------------------------------------------------- -// Delete without sending before values only read Primary Key -//-------------------------------------------------------------------- - return true; - } else { - jam(); -//-------------------------------------------------------------------- -// All others send all attributes that are monitored, except: -// Omit unchanged blob inlines on update i.e. -// attributeMask & ~ (blobAttributeMask & ~ changeMask) -//-------------------------------------------------------------------- - Bitmask attributeMask; - attributeMask = trigPtr->attributeMask; - if (regOperPtr->op_struct.op_type == ZUPDATE) { - Bitmask tmpMask = regTabPtr->blobAttributeMask; - tmpMask.bitANDC(req_struct->changeMask); - attributeMask.bitANDC(tmpMask); - } - numAttrsToRead = setAttrIds(attributeMask, regTabPtr->m_no_of_attributes, - &readBuffer[0]); - } - ndbrequire(numAttrsToRead < MAX_ATTRIBUTES_IN_TABLE); -//-------------------------------------------------------------------- -// Read Main tuple values -//-------------------------------------------------------------------- - if (regOperPtr->op_struct.op_type != ZDELETE) - { - jam(); - int ret = readAttributes(req_struct, - &readBuffer[0], - numAttrsToRead, - afterBuffer, - ZATTR_BUFFER_SIZE, - false); - ndbrequire(ret != -1); - noAfterWords= ret; - } else { - jam(); - noAfterWords = 0; - } - -//-------------------------------------------------------------------- -// Read Copy tuple values for UPDATE's -//-------------------------------------------------------------------- -// Initialise pagep and tuple offset for read of copy tuple -//-------------------------------------------------------------------- - if ((regOperPtr->op_struct.op_type == ZUPDATE || - regOperPtr->op_struct.op_type == ZDELETE) && - (trigPtr->sendBeforeValues)) { - jam(); - - Tuple_header *save= req_struct->m_tuple_ptr; - PagePtr tmp; - if(regOperPtr->is_first_operation()) - { - Uint32 *ptr= get_ptr(&tmp, ®OperPtr->m_tuple_location, regTabPtr); - req_struct->m_tuple_ptr= (Tuple_header*)ptr; - } - else - { - Uint32 *ptr= - c_undo_buffer.get_ptr(&req_struct->prevOpPtr.p->m_copy_tuple_location); - - req_struct->m_tuple_ptr= (Tuple_header*)ptr; - } - - if (regTabPtr->need_expand(disk)) - prepare_read(req_struct, regTabPtr, disk); - - int ret = readAttributes(req_struct, - &readBuffer[0], - numAttrsToRead, - beforeBuffer, - ZATTR_BUFFER_SIZE, - false); - req_struct->m_tuple_ptr= save; - ndbrequire(ret != -1); - noBeforeWords = ret; - if (trigPtr->m_receiverBlock != SUMA && - (noAfterWords == noBeforeWords) && - (memcmp(afterBuffer, beforeBuffer, noAfterWords << 2) == 0)) { -//-------------------------------------------------------------------- -// Although a trigger was fired it was not necessary since the old -// value and the new value was exactly the same -//-------------------------------------------------------------------- - jam(); - //XXX does this work with collations? - return false; - } - } - return true; -} - -void Dbtup::sendTrigAttrInfo(Signal* signal, - Uint32* data, - Uint32 dataLen, - bool executeDirect, - BlockReference receiverReference) -{ - TrigAttrInfo* const trigAttrInfo = (TrigAttrInfo *)signal->getDataPtrSend(); - Uint32 sigLen; - Uint32 dataIndex = 0; - do { - sigLen = dataLen - dataIndex; - if (sigLen > TrigAttrInfo::DataLength) { - jam(); - sigLen = TrigAttrInfo::DataLength; - } - MEMCOPY_NO_WORDS(trigAttrInfo->getData(), - data + dataIndex, - sigLen); - if (executeDirect) { - jam(); - EXECUTE_DIRECT(receiverReference, - GSN_TRIG_ATTRINFO, - signal, - TrigAttrInfo::StaticLength + sigLen); - jamEntry(); - } else { - jam(); - sendSignal(receiverReference, - GSN_TRIG_ATTRINFO, - signal, - TrigAttrInfo::StaticLength + sigLen, - JBB); - } - dataIndex += sigLen; - } while (dataLen != dataIndex); -} - -void Dbtup::sendFireTrigOrd(Signal* signal, - KeyReqStruct *req_struct, - Operationrec * const regOperPtr, - TupTriggerData* const trigPtr, - Uint32 fragmentId, - Uint32 noPrimKeyWords, - Uint32 noBeforeValueWords, - Uint32 noAfterValueWords) -{ - FireTrigOrd* const fireTrigOrd = (FireTrigOrd *)signal->getDataPtrSend(); - - fireTrigOrd->setConnectionPtr(req_struct->TC_index); - fireTrigOrd->setTriggerId(trigPtr->triggerId); - fireTrigOrd->fragId= fragmentId; - - switch(regOperPtr->op_struct.op_type) { - case(ZINSERT): - jam(); - fireTrigOrd->setTriggerEvent(TriggerEvent::TE_INSERT); - break; - case(ZDELETE): - jam(); - fireTrigOrd->setTriggerEvent(TriggerEvent::TE_DELETE); - break; - case(ZUPDATE): - jam(); - fireTrigOrd->setTriggerEvent(TriggerEvent::TE_UPDATE); - break; - default: - ndbrequire(false); - break; - } - - fireTrigOrd->setNoOfPrimaryKeyWords(noPrimKeyWords); - fireTrigOrd->setNoOfBeforeValueWords(noBeforeValueWords); - fireTrigOrd->setNoOfAfterValueWords(noAfterValueWords); - - switch(trigPtr->triggerType) { - case (TriggerType::SECONDARY_INDEX): - jam(); - sendSignal(req_struct->TC_ref, GSN_FIRE_TRIG_ORD, - signal, FireTrigOrd::SignalLength, JBB); - break; - case (TriggerType::SUBSCRIPTION_BEFORE): // Only Suma - jam(); - // Since only backup uses subscription triggers we - // send to backup directly for now - fireTrigOrd->setGCI(req_struct->gci); - fireTrigOrd->setHashValue(req_struct->hash_value); - fireTrigOrd->m_any_value = regOperPtr->m_any_value; - EXECUTE_DIRECT(trigPtr->m_receiverBlock, - GSN_FIRE_TRIG_ORD, - signal, - FireTrigOrd::SignalLengthSuma); - break; - case (TriggerType::SUBSCRIPTION): - jam(); - // Since only backup uses subscription triggers we - // send to backup directly for now - fireTrigOrd->setGCI(req_struct->gci); - EXECUTE_DIRECT(trigPtr->m_receiverBlock, - GSN_FIRE_TRIG_ORD, - signal, - FireTrigOrd::SignalWithGCILength); - break; - default: - ndbrequire(false); - break; - } -} - -/* - * Ordered index triggers. - * - * Insert: add entry to index - * Update: add entry to index, de|ay remove until commit - * Delete: do nothing, delay remove until commit - * Commit: remove entry delayed from update and delete - * Abort : remove entry added by insert and update - * - * See Notes.txt for the details. - */ - -int -Dbtup::executeTuxInsertTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr) -{ - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - // fill in constant part - req->tableId = regFragPtr->fragTableId; - req->fragId = regFragPtr->fragmentId; - req->pageId = regOperPtr->m_tuple_location.m_page_no; - req->pageIndex = regOperPtr->m_tuple_location.m_page_idx; - req->tupVersion = regOperPtr->tupVersion; - req->opInfo = TuxMaintReq::OpAdd; - return addTuxEntries(signal, regOperPtr, regTabPtr); -} - -int -Dbtup::executeTuxUpdateTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr) -{ - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - // fill in constant part - req->tableId = regFragPtr->fragTableId; - req->fragId = regFragPtr->fragmentId; - req->pageId = regOperPtr->m_tuple_location.m_page_no; - req->pageIndex = regOperPtr->m_tuple_location.m_page_idx; - req->tupVersion = regOperPtr->tupVersion; - req->opInfo = TuxMaintReq::OpAdd; - return addTuxEntries(signal, regOperPtr, regTabPtr); -} - -int -Dbtup::addTuxEntries(Signal* signal, - Operationrec* regOperPtr, - Tablerec* regTabPtr) -{ - if (ERROR_INSERTED(4022)) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - terrorCode = 9999; - return -1; - } - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - const DLList& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - Uint32 failPtrI; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - jam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - if (ERROR_INSERTED(4023) && - ! triggerList.hasNext(triggerPtr)) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - terrorCode = 9999; - failPtrI = triggerPtr.i; - goto fail; - } - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - jamEntry(); - if (req->errorCode != 0) { - jam(); - terrorCode = req->errorCode; - failPtrI = triggerPtr.i; - goto fail; - } - triggerList.next(triggerPtr); - } - return 0; -fail: - req->opInfo = TuxMaintReq::OpRemove; - triggerList.first(triggerPtr); - while (triggerPtr.i != failPtrI) { - jam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - jamEntry(); - ndbrequire(req->errorCode == 0); - triggerList.next(triggerPtr); - } -#ifdef VM_TRACE - ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl; -#endif - return -1; -} - -int -Dbtup::executeTuxDeleteTriggers(Signal* signal, - Operationrec* const regOperPtr, - Fragrecord* const regFragPtr, - Tablerec* const regTabPtr) -{ - // do nothing - return 0; -} - -void -Dbtup::executeTuxCommitTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr) -{ - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - Uint32 tupVersion; - if (regOperPtr->op_struct.op_type == ZINSERT) { - if (! regOperPtr->op_struct.delete_insert_flag) - return; - jam(); - tupVersion= decr_tup_version(regOperPtr->tupVersion); - } else if (regOperPtr->op_struct.op_type == ZUPDATE) { - jam(); - tupVersion= decr_tup_version(regOperPtr->tupVersion); - } else if (regOperPtr->op_struct.op_type == ZDELETE) { - if (regOperPtr->op_struct.delete_insert_flag) - return; - jam(); - tupVersion= regOperPtr->tupVersion; - } else { - ndbrequire(false); - tupVersion= 0; // remove warning - } - // fill in constant part - req->tableId = regFragPtr->fragTableId; - req->fragId = regFragPtr->fragmentId; - req->pageId = regOperPtr->m_tuple_location.m_page_no; - req->pageIndex = regOperPtr->m_tuple_location.m_page_idx; - req->tupVersion = tupVersion; - req->opInfo = TuxMaintReq::OpRemove; - removeTuxEntries(signal, regTabPtr); -} - -void -Dbtup::executeTuxAbortTriggers(Signal* signal, - Operationrec* regOperPtr, - Fragrecord* regFragPtr, - Tablerec* regTabPtr) -{ - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - // get version - Uint32 tupVersion; - if (regOperPtr->op_struct.op_type == ZINSERT) { - jam(); - tupVersion = regOperPtr->tupVersion; - } else if (regOperPtr->op_struct.op_type == ZUPDATE) { - jam(); - tupVersion = regOperPtr->tupVersion; - } else if (regOperPtr->op_struct.op_type == ZDELETE) { - jam(); - return; - } else { - ndbrequire(false); - tupVersion= 0; // remove warning - } - // fill in constant part - req->tableId = regFragPtr->fragTableId; - req->fragId = regFragPtr->fragmentId; - req->pageId = regOperPtr->m_tuple_location.m_page_no; - req->pageIndex = regOperPtr->m_tuple_location.m_page_idx; - req->tupVersion = tupVersion; - req->opInfo = TuxMaintReq::OpRemove; - removeTuxEntries(signal, regTabPtr); -} - -void -Dbtup::removeTuxEntries(Signal* signal, - Tablerec* regTabPtr) -{ - TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); - const DLList& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - jam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL, - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - jamEntry(); - // must succeed - ndbrequire(req->errorCode == 0); - triggerList.next(triggerPtr); - } -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp deleted file mode 100644 index 499a05a40f6..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ /dev/null @@ -1,420 +0,0 @@ -/* Copyright (c) 2004-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUP_C -#define DBTUP_VAR_ALLOC_CPP -#include "Dbtup.hpp" - -void Dbtup::init_list_sizes(void) -{ - c_min_list_size[0]= 200; - c_max_list_size[0]= 499; - - c_min_list_size[1]= 500; - c_max_list_size[1]= 999; - - c_min_list_size[2]= 1000; - c_max_list_size[2]= 4079; - - c_min_list_size[3]= 4080; - c_max_list_size[3]= 8159; - - c_min_list_size[4]= 0; - c_max_list_size[4]= 199; -} - -/* - Allocator for variable sized segments - Part of the external interface for variable sized segments - - This method is used to allocate and free variable sized tuples and - parts of tuples. This part can be used to implement variable sized - attributes without wasting memory. It can be used to support small - BLOB's attached to the record. It can also be used to support adding - and dropping attributes without the need to copy the entire table. - - SYNOPSIS - fragPtr A pointer to the fragment description - tabPtr A pointer to the table description - alloc_size Size of the allocated record - signal The signal object to be used if a signal needs to - be sent - RETURN VALUES - Returns true if allocation was successful otherwise false - - page_offset Page offset of allocated record - page_index Page index of allocated record - page_ptr The i and p value of the page where the record was - allocated -*/ -Uint32* Dbtup::alloc_var_rec(Fragrecord* fragPtr, - Tablerec* tabPtr, - Uint32 alloc_size, - Local_key* key, - Uint32 * out_frag_page_id) -{ - /** - * TODO alloc fix+var part - */ - Uint32 *ptr = alloc_fix_rec(fragPtr, tabPtr, key, out_frag_page_id); - if (unlikely(ptr == 0)) - { - return 0; - } - - ndbassert(alloc_size >= tabPtr->m_offsets[MM].m_fix_header_size); - - alloc_size -= tabPtr->m_offsets[MM].m_fix_header_size; - - - Local_key varref; - if (likely(alloc_var_part(fragPtr, tabPtr, alloc_size, &varref) != 0)) - { - Tuple_header* tuple = (Tuple_header*)ptr; - Var_part_ref* dst = tuple->get_var_part_ref_ptr(tabPtr); - dst->assign(&varref); - return ptr; - } - - PagePtr pagePtr; - c_page_pool.getPtr(pagePtr, key->m_page_no); - free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p); - return 0; -} - -Uint32* -Dbtup::alloc_var_part(Fragrecord* fragPtr, - Tablerec* tabPtr, - Uint32 alloc_size, - Local_key* key) -{ - PagePtr pagePtr; - pagePtr.i= get_alloc_page(fragPtr, (alloc_size + 1)); - if (pagePtr.i == RNIL) { - jam(); - if ((pagePtr.i= get_empty_var_page(fragPtr)) == RNIL) { - jam(); - return 0; - } - c_page_pool.getPtr(pagePtr); - ((Var_page*)pagePtr.p)->init(); - pagePtr.p->list_index = MAX_FREE_LIST - 1; - LocalDLList list(c_page_pool, - fragPtr->free_var_page_array[MAX_FREE_LIST-1]); - list.add(pagePtr); - /* - * Tup scan and index build check ZEMPTY_MM to skip un-init()ed - * page. Change state here. For varsize it means "page in use". - */ - pagePtr.p->page_state = ZTH_MM_FREE; - } else { - c_page_pool.getPtr(pagePtr); - jam(); - } - Uint32 idx= ((Var_page*)pagePtr.p) - ->alloc_record(alloc_size, (Var_page*)ctemp_page, Var_page::CHAIN); - - key->m_page_no = pagePtr.i; - key->m_page_idx = idx; - - update_free_page_list(fragPtr, pagePtr); - return ((Var_page*)pagePtr.p)->get_ptr(idx); -} - -/* - Deallocator for variable sized segments - Part of the external interface for variable sized segments - - SYNOPSIS - fragPtr A pointer to the fragment description - tabPtr A pointer to the table description - signal The signal object to be used if a signal needs to - be sent - page_ptr A reference to the page of the variable sized - segment - free_page_index Page index on page of variable sized segment - which is freed - RETURN VALUES - Returns true if deallocation was successful otherwise false -*/ -void Dbtup::free_var_rec(Fragrecord* fragPtr, - Tablerec* tabPtr, - Local_key* key, - Ptr pagePtr) -{ - /** - * TODO free fix + var part - */ - Uint32 *ptr = ((Fix_page*)pagePtr.p)->get_ptr(key->m_page_idx, 0); - Tuple_header* tuple = (Tuple_header*)ptr; - - Local_key ref; - Var_part_ref * varref = tuple->get_var_part_ref_ptr(tabPtr); - varref->copyout(&ref); - - free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p); - - c_page_pool.getPtr(pagePtr, ref.m_page_no); - ((Var_page*)pagePtr.p)->free_record(ref.m_page_idx, Var_page::CHAIN); - - ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS); - if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1) - { - jam(); - /* - This code could be used when we release pages. - remove_free_page(signal,fragPtr,page_header,page_header->list_index); - return_empty_page(fragPtr, page_header); - */ - update_free_page_list(fragPtr, pagePtr); - } else { - jam(); - update_free_page_list(fragPtr, pagePtr); - } - return; -} - -int -Dbtup::realloc_var_part(Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr, - Var_part_ref* refptr, Uint32 oldsz, Uint32 newsz) -{ - Uint32 add = newsz - oldsz; - Var_page* pageP = (Var_page*)pagePtr.p; - Local_key oldref; - refptr->copyout(&oldref); - - if (pageP->free_space >= add) - { - jam(); - if(!pageP->is_space_behind_entry(oldref.m_page_idx, add)) - { - if(0) printf("extra reorg"); - jam(); - /** - * In this case we need to reorganise the page to fit. To ensure we - * don't complicate matters we make a little trick here where we - * fool the reorg_page to avoid copying the entry at hand and copy - * that separately at the end. This means we need to copy it out of - * the page before reorg_page to save the entry contents. - */ - Uint32* copyBuffer= cinBuffer; - memcpy(copyBuffer, pageP->get_ptr(oldref.m_page_idx), 4*oldsz); - pageP->set_entry_len(oldref.m_page_idx, 0); - pageP->free_space += oldsz; - pageP->reorg((Var_page*)ctemp_page); - memcpy(pageP->get_free_space_ptr(), copyBuffer, 4*oldsz); - pageP->set_entry_offset(oldref.m_page_idx, pageP->insert_pos); - add += oldsz; - } - pageP->grow_entry(oldref.m_page_idx, add); - update_free_page_list(fragPtr, pagePtr); - } - else - { - Local_key newref; - Uint32 *src = pageP->get_ptr(oldref.m_page_idx); - Uint32 *dst = alloc_var_part(fragPtr, tabPtr, newsz, &newref); - if (unlikely(dst == 0)) - return -1; - - ndbassert(oldref.m_page_no != newref.m_page_no); - ndbassert(pageP->get_entry_len(oldref.m_page_idx) == oldsz); - memcpy(dst, src, 4*oldsz); - refptr->assign(&newref); - - pageP->free_record(oldref.m_page_idx, Var_page::CHAIN); - update_free_page_list(fragPtr, pagePtr); - } - - return 0; -} - - -/* ------------------------------------------------------------------------ */ -// Get a page from one of free lists. If the desired free list is empty we -// try with the next until we have tried all possible lists. -/* ------------------------------------------------------------------------ */ -Uint32 -Dbtup::get_alloc_page(Fragrecord* fragPtr, Uint32 alloc_size) -{ - Uint32 i, start_index, loop= 0; - PagePtr pagePtr; - - start_index= calculate_free_list_impl(alloc_size); - if (start_index == (MAX_FREE_LIST - 1)) { - jam(); - } else { - jam(); - ndbrequire(start_index < (MAX_FREE_LIST - 1)); - start_index++; - } - for (i= start_index; i < MAX_FREE_LIST; i++) { - jam(); - if (!fragPtr->free_var_page_array[i].isEmpty()) { - jam(); - return fragPtr->free_var_page_array[i].firstItem; - } - } - ndbrequire(start_index > 0); - i= start_index - 1; - LocalDLList list(c_page_pool, fragPtr->free_var_page_array[i]); - for(list.first(pagePtr); !pagePtr.isNull() && loop < 16; ) - { - jam(); - if (pagePtr.p->free_space >= alloc_size) { - jam(); - return pagePtr.i; - } - loop++; - list.next(pagePtr); - } - return RNIL; -} - -Uint32 -Dbtup::get_empty_var_page(Fragrecord* fragPtr) -{ - PagePtr ptr; - LocalSLList list(c_page_pool, fragPtr->m_empty_pages); - if (list.remove_front(ptr)) - { - return ptr.i; - } - - Uint32 cnt; - allocConsPages(10, cnt, ptr.i); - fragPtr->noOfVarPages+= cnt; - if (unlikely(cnt == 0)) - { - return RNIL; - } - - PagePtr ret = ptr; - for (Uint32 i = 0; iphysical_page_id = ptr.i; - ptr.p->page_state = ZEMPTY_MM; - ptr.p->nextList = ptr.i + 1; - ptr.p->prevList = RNIL; - ptr.p->frag_page_id = RNIL; - } - - if (cnt > 1) - { - ptr.p->nextList = RNIL; - list.add(ret.i + 1, ptr); - } - - c_page_pool.getPtr(ret); - - Var_page* page = (Var_page*)ret.p; - page->chunk_size = cnt; - page->next_chunk = fragPtr->m_var_page_chunks; - fragPtr->m_var_page_chunks = ret.i; - - return ret.i; -} - -/* ------------------------------------------------------------------------ */ -// Check if the page needs to go to a new free page list. -/* ------------------------------------------------------------------------ */ -void Dbtup::update_free_page_list(Fragrecord* fragPtr, - Ptr pagePtr) -{ - Uint32 free_space, list_index; - free_space= pagePtr.p->free_space; - list_index= pagePtr.p->list_index; - if ((free_space < c_min_list_size[list_index]) || - (free_space > c_max_list_size[list_index])) { - Uint32 new_list_index= calculate_free_list_impl(free_space); - if (list_index != MAX_FREE_LIST) { - jam(); - /* - * Only remove it from its list if it is in a list - */ - LocalDLList - list(c_page_pool, fragPtr->free_var_page_array[list_index]); - list.remove(pagePtr); - } - if (free_space < c_min_list_size[new_list_index]) { - /* - We have not sufficient amount of free space to put it into any - free list. Thus the page will not be available for new inserts. - This can only happen for the free list with least guaranteed - free space. - */ - jam(); - ndbrequire(new_list_index == 0); - pagePtr.p->list_index= MAX_FREE_LIST; - } else { - jam(); - LocalDLList list(c_page_pool, - fragPtr->free_var_page_array[new_list_index]); - list.add(pagePtr); - pagePtr.p->list_index = new_list_index; - } - } -} - -/* ------------------------------------------------------------------------ */ -// Given size of free space, calculate the free list to put it into -/* ------------------------------------------------------------------------ */ -Uint32 Dbtup::calculate_free_list_impl(Uint32 free_space_size) const -{ - Uint32 i; - for (i = 0; i < MAX_FREE_LIST; i++) { - jam(); - if (free_space_size <= c_max_list_size[i]) { - jam(); - return i; - } - } - ndbrequire(false); - return 0; -} - -Uint32* -Dbtup::alloc_var_rowid(Fragrecord* fragPtr, - Tablerec* tabPtr, - Uint32 alloc_size, - Local_key* key, - Uint32 * out_frag_page_id) -{ - Uint32 *ptr = alloc_fix_rowid(fragPtr, tabPtr, key, out_frag_page_id); - if (unlikely(ptr == 0)) - { - return 0; - } - - ndbassert(alloc_size >= tabPtr->m_offsets[MM].m_fix_header_size); - - alloc_size -= tabPtr->m_offsets[MM].m_fix_header_size; - - Local_key varref; - if (likely(alloc_var_part(fragPtr, tabPtr, alloc_size, &varref) != 0)) - { - Tuple_header* tuple = (Tuple_header*)ptr; - Var_part_ref* dst = (Var_part_ref*)tuple->get_var_part_ref_ptr(tabPtr); - dst->assign(&varref); - return ptr; - } - - PagePtr pagePtr; - c_page_pool.getPtr(pagePtr, key->m_page_no); - free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p); - return 0; -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/Makefile.am b/storage/ndb/src/kernel/blocks/dbtup/Makefile.am deleted file mode 100644 index 3c424449dc5..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2004-2005 MySQL AB -# Use is subject to license terms -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -include $(top_srcdir)/storage/ndb/config/common.mk.am -include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am - -EXTRA_PROGRAMS = test_varpage -test_varpage_SOURCES = test_varpage.cpp tuppage.cpp -test_varpage_LDFLAGS = @ndb_bin_am_ldflags@ \ - $(top_builddir)/storage/ndb/src/libndbclient.la \ - $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/dbug/libdbug.a \ - $(top_builddir)/strings/libmystrings.a diff --git a/storage/ndb/src/kernel/blocks/dbtup/Notes.txt b/storage/ndb/src/kernel/blocks/dbtup/Notes.txt deleted file mode 100644 index c2973bb0a76..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/Notes.txt +++ /dev/null @@ -1,198 +0,0 @@ -Operations, tuples, versions -============================ - -Operation types. - -INSERT insert new original tuple, or insert after delete -UPDATE update -DELETE delete - -Following need not be considered here. - -READ does not change tuples or versions -WRITE turns into INSERT or UPDATE in LQH - -We use more specific names in some cases: - -first/INSERT initial insert of new tuple -delete/INSERT INSERT preceded by DELETE -DELETE/last DELETE as last operation -DELETE/insert DELETE followed by INSERT - -Tuple + op Can be followed by --------------- ------------------ -does not exist first/INSERT -tuple exists UPDATE DELETE -INSERT UPDATE DELETE -UPDATE UPDATE DELETE -DELETE delete/INSERT - -Operations on same tuple are kept in doubly linked list until -commit or abort. The links at both ends are RNIL i.e. the list -is not circular. The links are: - -nextActiveOp the operation BEFORE this one, in event order -prevActiveOp the operation AFTER this one, in event order - -Operations are done on the "original tuple" i.e. the tuple is -modified in place. If an operation is about to write over data -in original tuple, it first copies the tuple to a "copy tuple". - -Operation Copy tuple ---------- ---------- -first/INSERT no -delete/INSERT yes (this is in effect an update) -UPDATE yes -DELETE no - -The operation points to the tuples via: - -realPageId page i-value of original tuple -pageOffset word offset of original tuple on the page -realPageIdC page i-value of copy tuple or RNIL is no copy exists -pageOffsetC word offset of copy tuple on the page - -The original tuple and the copy tuple (if any) point back to -the operation via word 0. In copy tuple this pointer is never -changed. In original tuple however it always points to the LATEST -existing operation i.e. the one with prevActiveOp == RNIL. -Thus word 0 of original tuple is changed on 2 occasions: - -- when a new operation is added to the list -- when commit or abort removes the latest operation - -Note that commit/abort of operations occurs in random order. -The list is adjusted accordingly. - -Versions --------- - -Tuple version is stored in tuple word 1. A new original tuple -gets version 0. The version is incremented by each new operation -which makes a copy tuple. Version number wraps around at 15 bits. - -When a copy tuple is made, the version in original tuple is copied -to copy tuple as part of tuple data. This takes place before -the version in original tuple is updated. - -Each operation record contains tuple version called tupVersion. - -- at insert of new original tuple, tupVersion is set to 0 - -- if tuple already exists, the FIRST operation (in event order) - reads tupVersion from tuple word 1. If the operation is - not DELETE, the version is incremented - -- subsequent operation reads tupVersion from the operation - BEFORE it (nextActiveOp). If this subsequent operation is - not DELETE, the version is incremented - -When the operation writes the tuple it sets word 1 to tupVersion. -In detail, per operation type, where INSERT is divided into -insert of new original tuple and insert after delete: - -Operation Copy Increment Set version in original ---------- ---- --------- ----------------------- -first/INSERT no no yes, to 0 -delete/INSERT yes yes yes -UPDATE yes yes yes -DELETE no no no - -Thus an existing version is incremented if and only if -a copy tuple is made. - -Ordered index maintenance -------------------------- - -Each index entry has logical tuple address and tuple version. -Index entries are added during prepare phase (when each operation -is executed) and removed during commit or abort phase. - -Access to correct tuple version (original or copy) is required -in TUX which reads index key values 1) to check that at least one -is not null 2) to do tree search 3) to set min/max prefixes. -See "Read attributes" below. - -An additional complication is that commit/abort of operations -arrives in random order. So we cannot check for, for example, -DELETE/insert by looking at prevActiveOp. - -Phase Op Action Version in ------ -- ------ ---------- -prepare INSERT add op and original -prepare UPDATE add op and original -prepare DELETE none - - -commit first/INSERT none - -commit delete/INSERT remove copy tuple 1) -commit UPDATE remove copy tuple 1) -commit DELETE/last remove op and original -commit DELETE/insert none - - -abort INSERT remove op -abort UPDATE remove op -abort DELETE none - - -1) alternatively, store prevTupVersion in operation record. - -Abort from ordered index error ------------------------------- - -Obviously, index update failure causes operation failure. -The operation is then aborted later by TC. - -The problem here is with multiple indexes. Some may have been -updated successfully before the one that failed. Therefore -the trigger code aborts the successful ones already in -the prepare phase. - -In other words, multiple indexes are treated as one. - -Abort from any cause --------------------- - -[ hairy stuff ] - -Read attributes, query status ------------------------------ - -TUP_READ_ATTRS signal (or equivalent direct call) reads attribute -values. Input is logical address of original tuple and tuple -version. The steps are: - -- Translate logical address to physical address of original tuple. - -- If version of original tuple in word 1 is right, stop. - -- Otherwise word 0 points to LATEST not yet deleted operation. - Walk through operation list via nextActiveOp. - -- If an operation on the list has realPageIdC == RNIL, skip it. - -- Otherwise find copy tuple via realPageIdC, pageOffsetC. - If the version of the copy tuple in word 1 is right, stop. - -- Call readAttributes() on the tuple found (original or copy). - -In short, the version must exist in some not yet deleted tuple, -either in original or in some copy. - -Note that this must work during all phases since index code -needs to read index key attributes from correct tuple version in -each add/remove operation. - -TUP_QUERY_TH signal (or equivalent direct call) does same search -for tuple version. It is called from index scan and returns info -used to decide if the scan can see the tuple. - -This signal may also be called during any phase since commit/abort -of all operations is not done in one time-slice. - -Problems --------- - -Current abort code can destroy a tuple version too early. This -happens in test case "ticuur" (insert-commit-update-update-rollback), -if abort of first update arrives before abort of second update. - -vim: set textwidth=68: diff --git a/storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.cpp deleted file mode 100644 index bb45f4e0e0e..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.cpp +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "Undo_buffer.hpp" -#define DBTUP_C -#include "Dbtup.hpp" - -#if ZPAGE_STATE_POS != 0 -#error "PROBLEM!" -#endif - -struct UndoPage -{ - File_formats::Page_header m_page_header; - Uint32 _tupdata1; - Uint32 m_state; // Used by buddy alg - Uint32 m_words_used; - Uint32 m_ref_count; - Uint32 m_data[GLOBAL_PAGE_SIZE_WORDS-4-(sizeof(File_formats::Page_header)>>2)]; - - STATIC_CONST( DATA_WORDS = GLOBAL_PAGE_SIZE_WORDS-4-(sizeof(File_formats::Page_header)>>2) ); -}; - -Undo_buffer::Undo_buffer(Dbtup* tup) -{ - m_tup= tup; - m_first_free= RNIL; -} - -Uint32 * -Undo_buffer::alloc_copy_tuple(Local_key* dst, Uint32 words) -{ - UndoPage* page; - assert(words); - if(m_first_free == RNIL) - { - Uint32 count; - m_tup->allocConsPages(1, count, m_first_free); - if(count == 0) - return 0; - page= (UndoPage*)m_tup->c_page_pool.getPtr(m_first_free); - page->m_state= ~ZFREE_COMMON; - page->m_words_used= 0; - page->m_ref_count= 0; - } - - page= (UndoPage*)m_tup->c_page_pool.getPtr(m_first_free); - - Uint32 pos= page->m_words_used; - if(words + pos > UndoPage::DATA_WORDS) - { - m_first_free= RNIL; - return alloc_copy_tuple(dst, words); - } - - dst->m_page_no = m_first_free; - dst->m_page_idx = pos; - - page->m_ref_count++; - page->m_words_used = pos + words; - return page->m_data + pos; -} - -void -Undo_buffer::shrink_copy_tuple(Local_key* key, Uint32 words) -{ - assert(key->m_page_no == m_first_free); - UndoPage* page= (UndoPage*)m_tup->c_page_pool.getPtr(key->m_page_no); - assert(page->m_words_used >= words); - page->m_words_used -= words; -} - -void -Undo_buffer::free_copy_tuple(Local_key* key) -{ - UndoPage* page= (UndoPage*)m_tup->c_page_pool.getPtr(key->m_page_no); - Uint32 cnt= page->m_ref_count; - assert(cnt); - - page->m_ref_count= cnt - 1; - - if(cnt - 1 == 0) - { - page->m_words_used= 0; - if(m_first_free == key->m_page_no) - { - //ndbout_c("resetting page"); - } - else - { - //ndbout_c("returning page"); - m_tup->returnCommonArea(key->m_page_no, 1); - } - } - key->setNull(); -} - -Uint32 * -Undo_buffer::get_ptr(Local_key* key) -{ - return ((UndoPage*)(m_tup->c_page_pool.getPtr(key->m_page_no)))->m_data+key->m_page_idx; -} - diff --git a/storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.hpp b/storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.hpp deleted file mode 100644 index 67ddefd0480..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/Undo_buffer.hpp +++ /dev/null @@ -1,57 +0,0 @@ -/* Copyright (c) 2003, 2005, 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __UNDO_BUFFER_HPP -#define __UNDO_BUFFER_HPP - -#include -#include - -struct Undo_buffer -{ - Undo_buffer(class Dbtup*); - - /** - * Alloc space for a copy tuple of size words - * store address to copy in dst - * supply pointer to original in curr - * - * @return 0 if unable to alloc space - */ - Uint32 * alloc_copy_tuple(Local_key* dst, Uint32 words); - - /** - * Shrink size of copy tuple - * note: Only shrink latest allocated tuple - */ - void shrink_copy_tuple(Local_key* dst, Uint32 words); - - /** - * Free space for copy tuple at key - */ - void free_copy_tuple(Local_key* key); - - /** - * Get pointer to copy tuple - */ - Uint32 * get_ptr(Local_key* key); - -private: - class Dbtup* m_tup; - Uint32 m_first_free; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtup/test_varpage.cpp b/storage/ndb/src/kernel/blocks/dbtup/test_varpage.cpp deleted file mode 100644 index 2e52fe04949..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/test_varpage.cpp +++ /dev/null @@ -1,297 +0,0 @@ -/* Copyright (C) 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include "tuppage.hpp" -#include - -struct Record -{ - Uint32 idx; - Uint32 size; - Uint32* data; -}; - -NdbOut& -operator <<(NdbOut& out, const Record& rec) -{ - out << "[ idx: " << rec.idx << " sz: " << rec.size << " ]"; - return out; -} - -#define TRACE(x) x - -static -bool -cmp(const Uint32 *p1, const Uint32 *p2, Uint32 words) -{ - if(memcmp(p1, p2, 4*words) == 0) - return true; - - for(Uint32 i = 0; i free; - for(Uint32 i = page.high_index - 1; i > 0; i--) - { - if (page.get_index_word(i) & page.FREE) - { - free.push_back(i); - if (free.size() > 100) - break; - } - } - if (free.size()) - { - rec.idx = free[rand() % free.size()]; - if (page.alloc_record(rec.idx, rec.size, &tmp) != rec.idx) - { - abort(); - } - } - else - { - rec.idx = page.high_index; - if (page.alloc_record(rec.idx, rec.size, &tmp) != rec.idx) - { - if (rec.size + 1 != page.free_space) - abort(); - delete [] rec.data; - ndbout_c(" FAIL"); - break; - } - } - } - else if(rnd < 80) - { - // Alloc with id, outside of directory - rec.idx = page.high_index + (rand() % (page.free_space - rec.size)); - if (page.alloc_record(rec.idx, rec.size, &tmp) != rec.idx) - { - abort(); - } - } - else - { - rec.idx = page.high_index + (page.free_space - rec.size) + 1; - if (page.alloc_record(rec.idx, rec.size, &tmp) == rec.idx) - { - abort(); - } - delete [] rec.data; - ndbout_c(" FAIL"); - break; - } - - Uint32 pos = page.get_ptr(rec.idx) - page.m_data; - ndbout << " -> " << rec.idx - << " pos: " << pos << endl; - Uint32* ptr= page.get_ptr(rec.idx); - memcpy(ptr, rec.data, 4*rec.size); - records[allocated++] = rec; - break; - } - case 1: // Free - { - int no= rand() % allocated; - Record rec= records[no]; - Uint32 pos = page.get_ptr(rec.idx) - page.m_data; - ndbout << "Free hi: " << page.high_index << " no: " << no << " idx: " << rec.idx << " pos: " << pos << endl; - Uint32* ptr= page.get_ptr(rec.idx); - assert(page.get_entry_len(rec.idx) == rec.size); - cmp(ptr, rec.data, rec.size); - delete[] rec.data; - page.free_record(rec.idx, 0); - - for (unsigned k = no; k + 1 < allocated; k++) - records[k] = records[k+1]; - allocated--; - - break; - } - case 2: // Reorg - ndbout << "Reorg" << endl; - page.reorg(&tmp); - break; - case 3: - { - Uint32 free = page.free_space; - if (free <= 2) - { - goto shrink; - } - free /= 2; - int no = rand() % allocated; - Record rec= records[no]; - ndbout << "Expand no: " << no << " idx: " << rec.idx - << " add: " << free << " reorg: " - << !page.is_space_behind_entry(rec.idx, free) - << endl; - if (!page.is_space_behind_entry(rec.idx, free)) - { - Uint32 buffer[8192]; - Uint32 len = page.get_entry_len(rec.idx); - memcpy(buffer, page.get_ptr(rec.idx), 4*len); - page.set_entry_len(rec.idx, 0); - page.free_space += len; - page.reorg(&tmp); - memcpy(page.get_free_space_ptr(), buffer, 4*len); - page.set_entry_offset(rec.idx, page.insert_pos); - free += len; - records[no].size = 0; - } - - page.grow_entry(rec.idx, free); - records[no].size += free; - Uint32 *ptr = page.get_ptr(rec.idx); - Uint32 *new_data = new Uint32[records[no].size]; - for(Uint32 i= 0; i 1) - { - time_t seed = time(0); - srand(seed); - fprintf(stderr, "srand(%d)\n", seed); - } - // alloc, free, reorg, grow, shrink - - int t1[] = { 10, 60, 70, 85, 100 }; - int t2[] = { 30, 60, 70, 85, 100 }; - int t3[] = { 50, 60, 70, 85, 100 }; - - do_test(10000, t1); - do_test(10000, t2); - do_test(10000, t3); - - return 0; -} - -template class Vector; - -// hp3750 -struct Signal { Signal(); int foo; }; -Signal::Signal(){} diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp deleted file mode 100644 index 524592b99ee..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.cpp +++ /dev/null @@ -1,480 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include "tuppage.hpp" -#include "Dbtup.hpp" - -/** - * Fix pages maintain a double linked list of free entries - * - * Var pages has a directory where each entry is - * [ C(1), F(1), L(15), P(15) ] - * C is chain bit, (is it a full tuple or just chain) - * F is free bit - * If true, L is prev free entry (in directory) - * P is next free entry (in directory) - * else - * L is len of entry - * P is pos of entry - */ - -Uint32 -Tup_fixsize_page::alloc_record() -{ - assert(free_space); - Uint32 page_idx = next_free_index; - assert(page_idx + 1 < DATA_WORDS); - - Uint32 prev = m_data[page_idx] >> 16; - Uint32 next = m_data[page_idx] & 0xFFFF; - - assert(prev == 0xFFFF); - assert(m_data[page_idx + 1] == FREE_RECORD); - - m_data[page_idx + 1] = 0; - if (next != 0xFFFF) - { - assert(free_space > 1); - Uint32 nextP = m_data[next]; - assert((nextP >> 16) == page_idx); - m_data[next] = 0xFFFF0000 | (nextP & 0xFFFF); - } - else - { - assert(free_space == 1); - } - - next_free_index = next; - free_space--; - return page_idx; -} - -Uint32 -Tup_fixsize_page::alloc_record(Uint32 page_idx) -{ - assert(page_idx + 1 < DATA_WORDS); - if (likely(free_space && m_data[page_idx + 1] == FREE_RECORD)) - { - Uint32 prev = m_data[page_idx] >> 16; - Uint32 next = m_data[page_idx] & 0xFFFF; - - assert(prev != 0xFFFF || (next_free_index == page_idx)); - if (prev == 0xFFFF) - { - next_free_index = next; - } - else - { - Uint32 prevP = m_data[prev]; - m_data[prev] = (prevP & 0xFFFF0000) | next; - } - - if (next != 0xFFFF) - { - Uint32 nextP = m_data[next]; - m_data[next] = (prev << 16) | (nextP & 0xFFFF); - } - free_space --; - m_data[page_idx + 1] = 0; - return page_idx; - } - return ~0; -} - -Uint32 -Tup_fixsize_page::free_record(Uint32 page_idx) -{ - Uint32 next = next_free_index; - - assert(page_idx + 1 < DATA_WORDS); - assert(m_data[page_idx + 1] != FREE_RECORD); - - if (next == 0xFFFF) - { - assert(free_space == 0); - } - else - { - assert(free_space); - assert(next + 1 < DATA_WORDS); - Uint32 nextP = m_data[next]; - assert((nextP >> 16) == 0xFFFF); - m_data[next] = (page_idx << 16) | (nextP & 0xFFFF); - assert(m_data[next + 1] == FREE_RECORD); - } - - next_free_index = page_idx; - m_data[page_idx] = 0xFFFF0000 | next; - m_data[page_idx + 1] = FREE_RECORD; - - return ++free_space; -} - -void -Tup_varsize_page::init() -{ - free_space= DATA_WORDS - 1; - high_index= 1; - insert_pos= 0; - next_free_index= END_OF_FREE_LIST; - m_page_header.m_page_type = File_formats::PT_Tup_varsize_page; -} - -Uint32 -Tup_varsize_page::alloc_record(Uint32 page_idx, Uint32 alloc_size, - Tup_varsize_page* temp) -{ - assert(page_idx); // 0 is not allowed - Uint32 free = free_space; - Uint32 largest_size= DATA_WORDS - (insert_pos + high_index); - Uint32 free_list = next_free_index; - - if (page_idx < high_index) - { - Uint32 *ptr = get_index_ptr(page_idx); - Uint32 word = *ptr; - - if (unlikely((free < alloc_size) || ! (word & FREE))) - { - return ~0; - } - - if (alloc_size >= largest_size) - { - /* - We can't fit this segment between the insert position and the end of - the index entries. We will pack the page so that all free space - exists between the insert position and the end of the index entries. - */ - reorg(temp); - } - - Uint32 next = (word & NEXT_MASK) >> NEXT_SHIFT; - Uint32 prev = (word & PREV_MASK) >> PREV_SHIFT; - - if (next != END_OF_FREE_LIST) - { - Uint32 * next_ptr = get_index_ptr(next); - Uint32 next_word = * next_ptr; - * next_ptr = (next_word & ~PREV_MASK) | (prev << PREV_SHIFT); - } - - if (prev != END_OF_FREE_LIST) - { - Uint32 * prev_ptr = get_index_ptr(prev); - Uint32 prev_word = * prev_ptr; - * prev_ptr = (prev_word & ~NEXT_MASK) | (next << NEXT_SHIFT); - } - else - { - assert(next_free_index == page_idx); - next_free_index = next; - } - - * ptr = insert_pos + (alloc_size << LEN_SHIFT); - free -= alloc_size; - } - else - { - /** - * We need to expand directory - */ - Uint32 hi = high_index; - Uint32 expand = (page_idx + 1 - hi); - Uint32 size = alloc_size + expand; - if (unlikely(size > free)) - { - return ~0; - } - - if (size >= largest_size) - { - /* - We can't fit this segment between the insert position and the end of - the index entries. We will pack the page so that all free space - exists between the insert position and the end of the index entries. - */ - reorg(temp); - } - - Uint32 *ptr = m_data + DATA_WORDS - hi; - if (page_idx == hi) - { - * ptr = insert_pos + (alloc_size << LEN_SHIFT); - } - else - { - if (free_list != END_OF_FREE_LIST) - { - Uint32 * prev_ptr = get_index_ptr(free_list); - Uint32 prev_word = * prev_ptr; - * prev_ptr = (prev_word & ~PREV_MASK) | (hi << PREV_SHIFT); - } - - for (; hi < page_idx;) - { - * ptr-- = FREE | (free_list << NEXT_SHIFT) | ((hi+1) << PREV_SHIFT); - free_list = hi++; - } - - * ptr++ = insert_pos + (alloc_size << LEN_SHIFT); - * ptr = ((* ptr) & ~PREV_MASK) | (END_OF_FREE_LIST << PREV_SHIFT); - - next_free_index = hi - 1; - } - high_index = hi + 1; - free -= size; - } - - free_space = free; - insert_pos += alloc_size; - - return page_idx; -} - -Uint32 -Tup_varsize_page::alloc_record(Uint32 alloc_size, - Tup_varsize_page* temp, Uint32 chain) -{ - assert(free_space >= alloc_size); - Uint32 largest_size= DATA_WORDS - (insert_pos + high_index); - if (alloc_size >= largest_size) { - /* - We can't fit this segment between the insert position and the end of - the index entries. We will pack the page so that all free space - exists between the insert position and the end of the index entries. - */ - reorg(temp); - largest_size= DATA_WORDS - (insert_pos + high_index); - } - assert(largest_size > alloc_size); - - Uint32 page_idx; - if (next_free_index == END_OF_FREE_LIST) { - /* - We are out of free index slots. We will extend the array of free - slots - */ - page_idx= high_index++; - free_space--; - } else { - // Pick an empty slot among the index entries - page_idx= next_free_index; - assert((get_index_word(page_idx) & FREE) == FREE); - assert(((get_index_word(page_idx) & PREV_MASK) >> PREV_SHIFT) == - END_OF_FREE_LIST); - next_free_index= (get_index_word(page_idx) & NEXT_MASK) >> NEXT_SHIFT; - assert(next_free_index); - if (next_free_index != END_OF_FREE_LIST) - { - Uint32 *ptr = get_index_ptr(next_free_index); - Uint32 word = *ptr; - * ptr = (word & ~PREV_MASK) | (END_OF_FREE_LIST << PREV_SHIFT); - } - } - - assert(chain == 0 || chain == CHAIN); - * get_index_ptr(page_idx) = insert_pos + chain + (alloc_size << LEN_SHIFT); - - insert_pos += alloc_size; - free_space -= alloc_size; - //ndbout_c("%p->alloc_record(%d%s) -> %d", this,alloc_size, (chain ? " CHAIN" : ""),page_idx); - return page_idx; -} - -Uint32 -Tup_varsize_page::free_record(Uint32 page_idx, Uint32 chain) -{ - //ndbout_c("%p->free_record(%d%s)", this, page_idx, (chain ? " CHAIN": "")); - Uint32 *index_ptr= get_index_ptr(page_idx); - Uint32 index_word= * index_ptr; - Uint32 entry_pos= (index_word & POS_MASK) >> POS_SHIFT; - Uint32 entry_len= (index_word & LEN_MASK) >> LEN_SHIFT; - assert(chain == 0 || chain == CHAIN); - assert((index_word & CHAIN) == chain); -#ifdef VM_TRACE - memset(m_data + entry_pos, 0xF2, 4*entry_len); -#endif - if (page_idx + 1 == high_index) { - /* - We are removing the last in the entry list. We could potentially - have several free entries also before this. To take that into account - we will rebuild the free list and thus compress it and update the - free space accordingly. - */ - rebuild_index(index_ptr); - } else { - if (next_free_index != END_OF_FREE_LIST) - { - Uint32 *ptr = get_index_ptr(next_free_index); - Uint32 word = *ptr; - assert(((word & PREV_MASK) >> PREV_SHIFT) == END_OF_FREE_LIST); - * ptr = (word & ~PREV_MASK) | (page_idx << PREV_SHIFT); - } - * index_ptr= FREE | next_free_index | (END_OF_FREE_LIST << PREV_SHIFT); - next_free_index= page_idx; - assert(next_free_index); - } - - free_space+= entry_len; - // If we're the "last" entry, decrease insert_pos - insert_pos -= (entry_pos + entry_len == insert_pos ? entry_len : 0); - - return free_space; -} - -void -Tup_varsize_page::rebuild_index(Uint32* index_ptr) -{ - Uint32 empty= 1; - Uint32 *end= m_data + DATA_WORDS; - - /** - * Scan until you find first non empty index pos - */ - for(index_ptr++; index_ptr < end; index_ptr++) - if((* index_ptr) & FREE) - empty++; - else - break; - - if(index_ptr == end) - { - // Totally free page - high_index = 1; - free_space += empty; - next_free_index = END_OF_FREE_LIST; - return; - } - - Uint32 next= END_OF_FREE_LIST; - Uint32 dummy; - Uint32 *prev_ptr = &dummy; - for(index_ptr++; index_ptr < end; index_ptr++) - { - if ((* index_ptr) & FREE) - { - * index_ptr= FREE | next; - next= (end - index_ptr); - * prev_ptr |= (next << PREV_SHIFT); - prev_ptr = index_ptr; - } - } - - * prev_ptr |= (END_OF_FREE_LIST << PREV_SHIFT); - - high_index -= empty; - free_space += empty; - next_free_index= next; - assert(next_free_index); -} - -void -Tup_varsize_page::reorg(Tup_varsize_page* copy_page) -{ - Uint32 new_insert_pos= 0; - Uint32 old_insert_pos= insert_pos; - - // Copy key data part of page to a temporary page. - memcpy(copy_page->m_data, m_data, 4*old_insert_pos); - assert(high_index > 0); - Uint32* index_ptr= get_index_ptr(high_index-1); - Uint32 *end_of_page= m_data + DATA_WORDS; - for (; index_ptr < end_of_page; index_ptr++) - { - Uint32 index_word= * index_ptr; - Uint32 entry_len= (index_word & LEN_MASK) >> LEN_SHIFT; - if (!(index_word & FREE) && entry_len) - { - /* - We found an index item that needs to be packed. - We will update the index entry and copy the data to the page. - */ - Uint32 entry_pos= (index_word & POS_MASK) >> POS_SHIFT; - assert(entry_pos + entry_len <= old_insert_pos); - assert(new_insert_pos + entry_len <= old_insert_pos); - * index_ptr= (new_insert_pos << POS_SHIFT) + (index_word & ~POS_MASK); - memcpy(m_data+new_insert_pos, copy_page->m_data+entry_pos, 4*entry_len); - - new_insert_pos += entry_len; - } - } - insert_pos= new_insert_pos; -} - -NdbOut& -operator<< (NdbOut& out, const Tup_varsize_page& page) -{ - out << "[ Varpage " << &page << ": free: " << page.free_space - << " (" << (page.DATA_WORDS - (page.insert_pos + page.high_index + 1)) << ")" - << " insert_pos: " << page.insert_pos - << " high_index: " << page.high_index - << " index: " << flush; - - const Uint32 *index_ptr= page.m_data+page.DATA_WORDS-1; - for(Uint32 i = 1; i> page.POS_SHIFT) - << " len: " << ((* index_ptr & page.LEN_MASK) >> page.LEN_SHIFT) - << ((* index_ptr & page.CHAIN) ? " CHAIN " : " ") - << "]" << flush; - else - out << " FREE ]" << flush; - } - - out << " free list: " << flush; - Uint32 next= page.next_free_index; - while(next != page.END_OF_FREE_LIST) - { - out << next << " " << flush; - next= ((* (page.m_data+page.DATA_WORDS-next)) & page.NEXT_MASK) >> page.NEXT_SHIFT; - } - out << "]"; - return out; -} - -NdbOut& -operator<< (NdbOut& out, const Tup_fixsize_page& page) -{ - out << "[ Fixpage " << &page - << ": frag_page: " << page.frag_page_id - << " page_no: " << page.m_page_no - << " file_no: " << page.m_file_no - << " table: " << page.m_table_id - << " fragment: " << page.m_fragment_id - << " uncommitted_used_space: " << page.uncommitted_used_space - << " free: " << page.free_space; - - out << " free list: " << hex << page.next_free_index << " " << flush; -#if 0 - Uint32 startTuple = page.next_free_index >> 16; - Uint32 cnt = 0; - Uint32 next= startTuple; - while((next & 0xFFFF) != 0xFFFF) - { - cnt++; - out << dec << "(" << (next & 0xFFFF) << " " << hex << next << ") " << flush; - assert(page.m_data[(next & 0xFFFF) + 1] == Dbtup::Tuple_header::FREE); - next= * (page.m_data + ( next & 0xFFFF )); - } - assert(cnt == page.free_space); -#endif - out << "]"; - return out; -} diff --git a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp b/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp deleted file mode 100644 index 9558a911a86..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtup/tuppage.hpp +++ /dev/null @@ -1,266 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __NDB_TUP_PAGE_HPP -#define __NDB_TUP_PAGE_HPP - -#include -#include "../diskpage.hpp" - -struct Tup_page -{ - struct File_formats::Page_header m_page_header; - Uint32 m_restart_seq; - Uint32 page_state; - union { - Uint32 next_page; - Uint32 nextList; - }; - union { - Uint32 prev_page; - Uint32 prevList; - }; - Uint32 first_cluster_page; - Uint32 last_cluster_page; - Uint32 next_cluster_page; - Uint32 prev_cluster_page; - Uint32 frag_page_id; - Uint32 physical_page_id; - Uint32 free_space; - Uint32 next_free_index; - Uint32 list_index; // free space in page bits/list, 0x8000 means not in free - Uint32 uncommitted_used_space; - Uint32 m_page_no; - Uint32 m_file_no; - Uint32 m_table_id; - Uint32 m_fragment_id; - Uint32 m_extent_no; - Uint32 m_extent_info_ptr; - Uint32 unused_ph[9]; - - STATIC_CONST( DATA_WORDS = File_formats::NDB_PAGE_SIZE_WORDS - 32 ); - - Uint32 m_data[DATA_WORDS]; -}; - -struct Tup_fixsize_page -{ - struct File_formats::Page_header m_page_header; - Uint32 m_restart_seq; - Uint32 page_state; - Uint32 next_page; - Uint32 prev_page; - Uint32 first_cluster_page; - Uint32 last_cluster_page; - Uint32 next_cluster_page; - Uint32 prev_cluster_page; - Uint32 frag_page_id; - Uint32 physical_page_id; - Uint32 free_space; - Uint32 next_free_index; - Uint32 list_index; - Uint32 uncommitted_used_space; - Uint32 m_page_no; - Uint32 m_file_no; - Uint32 m_table_id; - Uint32 m_fragment_id; - Uint32 m_extent_no; - Uint32 m_extent_info_ptr; - Uint32 unused_ph[9]; - - STATIC_CONST( FREE_RECORD = ~(Uint32)0 ); - STATIC_CONST( DATA_WORDS = File_formats::NDB_PAGE_SIZE_WORDS - 32 ); - - Uint32 m_data[DATA_WORDS]; - - Uint32* get_ptr(Uint32 page_idx, Uint32 rec_size){ - assert(page_idx + rec_size <= DATA_WORDS); - return m_data + page_idx; - } - - /** - * Alloc record from page - * return page_idx - **/ - Tup_fixsize_page() {} - Uint32 alloc_record(); - Uint32 alloc_record(Uint32 page_idx); - Uint32 free_record(Uint32 page_idx); -}; - -struct Tup_varsize_page -{ - struct File_formats::Page_header m_page_header; - Uint32 m_restart_seq; - Uint32 page_state; - Uint32 next_page; - Uint32 prev_page; - union { - Uint32 first_cluster_page; - Uint32 chunk_size; - }; - union { - Uint32 last_cluster_page; - Uint32 next_chunk; - }; - Uint32 next_cluster_page; - Uint32 prev_cluster_page; - Uint32 frag_page_id; - Uint32 physical_page_id; - Uint32 free_space; - Uint32 next_free_index; - Uint32 list_index; - Uint32 uncommitted_used_space; - Uint32 m_page_no; - Uint32 m_file_no; - Uint32 m_table_id; - Uint32 m_fragment_id; - Uint32 m_extent_no; - Uint32 m_extent_info_ptr; - Uint32 high_index; // size of index + 1 - Uint32 insert_pos; - Uint32 unused_ph[7]; - - STATIC_CONST( DATA_WORDS = File_formats::NDB_PAGE_SIZE_WORDS - 32 ); - STATIC_CONST( CHAIN = 0x80000000 ); - STATIC_CONST( FREE = 0x40000000 ); - STATIC_CONST( LEN_MASK = 0x3FFF8000 ); - STATIC_CONST( POS_MASK = 0x00007FFF ); - STATIC_CONST( LEN_SHIFT = 15 ); - STATIC_CONST( POS_SHIFT = 0 ); - STATIC_CONST( END_OF_FREE_LIST = POS_MASK ); - - STATIC_CONST( NEXT_MASK = POS_MASK ); - STATIC_CONST( NEXT_SHIFT = POS_SHIFT ); - STATIC_CONST( PREV_MASK = LEN_MASK ); - STATIC_CONST( PREV_SHIFT = LEN_SHIFT ); - - Uint32 m_data[DATA_WORDS]; - - Tup_varsize_page() {} - void init(); - - Uint32* get_free_space_ptr() { - return m_data+insert_pos; - } - - Uint32 largest_frag_size() const { - return DATA_WORDS - (high_index + insert_pos); - } - - Uint32 *get_index_ptr(Uint32 page_idx) { - assert(page_idx < high_index); - return (m_data + (DATA_WORDS - page_idx)); - } - - Uint32 get_index_word(Uint32 page_idx) const { - assert(page_idx < high_index); - return * (m_data + (DATA_WORDS - page_idx)); - } - - /** - * Alloc record from page, return page_idx - * temp is used when having to reorg page before allocating - */ - Uint32 alloc_record(Uint32 size, Tup_varsize_page* temp, Uint32 chain); - - /** - * Alloc page_idx from page, return page_idx - * temp is used when having to reorg page before allocating - */ - Uint32 alloc_record(Uint32 page_idx, Uint32 size, Tup_varsize_page* temp); - - /** - * Free record from page - */ - Uint32 free_record(Uint32 page_idx, Uint32 chain); - - void reorg(Tup_varsize_page* temp); - void rebuild_index(Uint32* ptr); - - /** - * Check if one can grow tuple wo/ reorg - */ - bool is_space_behind_entry(Uint32 page_index, Uint32 growth_len) const { - Uint32 idx= get_index_word(page_index); - Uint32 pos= (idx & POS_MASK) >> POS_SHIFT; - Uint32 len= (idx & LEN_MASK) >> LEN_SHIFT; - if ((pos + len == insert_pos) && - (insert_pos + growth_len < DATA_WORDS - high_index)) - return true; - return false; - } - - void grow_entry(Uint32 page_index, Uint32 growth_len) { - assert(free_space >= growth_len); - - Uint32 *pos= get_index_ptr(page_index); - Uint32 idx= *pos; - assert(! (idx & FREE)); - assert((((idx & POS_MASK) >> POS_SHIFT) + ((idx & LEN_MASK) >> LEN_SHIFT)) - == insert_pos); - - * pos= idx + (growth_len << LEN_SHIFT); - insert_pos+= growth_len; - free_space-= growth_len; - } - - void shrink_entry(Uint32 page_index, Uint32 new_size){ - Uint32 *pos= get_index_ptr(page_index); - Uint32 idx= *pos; - Uint32 old_pos = (idx & POS_MASK) >> POS_SHIFT; - Uint32 old_size = (idx & LEN_MASK) >> LEN_SHIFT; - - assert( ! (idx & FREE)); - assert(old_size >= new_size); - - * pos= (idx & ~LEN_MASK) + (new_size << LEN_SHIFT); - Uint32 shrink = old_size - new_size; -#ifdef VM_TRACE - memset(m_data + old_pos + new_size, 0xF1, 4 * shrink); -#endif - free_space+= shrink; - if(insert_pos == (old_pos + old_size)) - insert_pos -= shrink; - } - - Uint32* get_ptr(Uint32 page_idx) { - return m_data + ((get_index_word(page_idx) & POS_MASK) >> POS_SHIFT); - } - - void set_entry_offset(Uint32 page_idx, Uint32 offset){ - Uint32 *pos= get_index_ptr(page_idx); - * pos = (* pos & ~POS_MASK) + (offset << POS_SHIFT); - } - - void set_entry_len(Uint32 page_idx, Uint32 len) { - Uint32 *pos= get_index_ptr(page_idx); - * pos = (*pos & ~LEN_MASK) + (len << LEN_SHIFT); - } - - Uint32 get_entry_len(Uint32 page_idx) const { - return (get_index_word(page_idx) & LEN_MASK) >> LEN_SHIFT; - } - - Uint32 get_entry_chain(Uint32 page_idx) const { - return get_index_word(page_idx) & CHAIN; - } -}; - -NdbOut& operator<< (NdbOut& out, const Tup_varsize_page& page); -NdbOut& operator<< (NdbOut& out, const Tup_fixsize_page& page); - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp deleted file mode 100644 index 55a0c97b74a..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp +++ /dev/null @@ -1,1208 +0,0 @@ -/* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBTUX_H -#define DBTUX_H - -#include -#include -#include -#include -#include -#include -#include -#include - -// big brother -#include - -// signal classes -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// debug -#ifdef VM_TRACE -#include -#include -#endif - -// jams -#undef jam -#undef jamEntry -#ifdef DBTUX_GEN_CPP -#define jam() jamLine(10000 + __LINE__) -#define jamEntry() jamEntryLine(10000 + __LINE__) -#endif -#ifdef DBTUX_META_CPP -#define jam() jamLine(20000 + __LINE__) -#define jamEntry() jamEntryLine(20000 + __LINE__) -#endif -#ifdef DBTUX_MAINT_CPP -#define jam() jamLine(30000 + __LINE__) -#define jamEntry() jamEntryLine(30000 + __LINE__) -#endif -#ifdef DBTUX_NODE_CPP -#define jam() jamLine(40000 + __LINE__) -#define jamEntry() jamEntryLine(40000 + __LINE__) -#endif -#ifdef DBTUX_TREE_CPP -#define jam() jamLine(50000 + __LINE__) -#define jamEntry() jamEntryLine(50000 + __LINE__) -#endif -#ifdef DBTUX_SCAN_CPP -#define jam() jamLine(60000 + __LINE__) -#define jamEntry() jamEntryLine(60000 + __LINE__) -#endif -#ifdef DBTUX_SEARCH_CPP -#define jam() jamLine(70000 + __LINE__) -#define jamEntry() jamEntryLine(70000 + __LINE__) -#endif -#ifdef DBTUX_CMP_CPP -#define jam() jamLine(80000 + __LINE__) -#define jamEntry() jamEntryLine(80000 + __LINE__) -#endif -#ifdef DBTUX_STAT_CPP -#define jam() jamLine(90000 + __LINE__) -#define jamEntry() jamEntryLine(90000 + __LINE__) -#endif -#ifdef DBTUX_DEBUG_CPP -#define jam() jamLine(100000 + __LINE__) -#define jamEntry() jamEntryLine(100000 + __LINE__) -#endif -#ifndef jam -#define jam() jamLine(__LINE__) -#define jamEntry() jamEntryLine(__LINE__) -#endif - -#undef max -#undef min - -class Configuration; - -class Dbtux : public SimulatedBlock { -public: - Dbtux(Block_context& ctx); - virtual ~Dbtux(); - - // pointer to TUP instance in this thread - Dbtup* c_tup; - -private: - // sizes are in words (Uint32) - STATIC_CONST( MaxIndexFragments = MAX_FRAG_PER_NODE ); - STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX ); - STATIC_CONST( MaxAttrDataSize = 2048 ); -public: - STATIC_CONST( DescPageSize = 256 ); -private: - STATIC_CONST( MaxTreeNodeSize = MAX_TTREE_NODE_SIZE ); - STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE ); - STATIC_CONST( ScanBoundSegmentSize = 7 ); - STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN ); - STATIC_CONST( MaxTreeDepth = 32 ); // strict - BLOCK_DEFINES(Dbtux); - - // forward declarations - struct DescEnt; - - // Pointer to array of Uint32 represents attribute data and bounds - - typedef Uint32 *Data; - inline AttributeHeader& ah(Data data) { - return *reinterpret_cast(data); - } - - typedef const Uint32* ConstData; - inline const AttributeHeader& ah(ConstData data) { - return *reinterpret_cast(data); - } - - // AttributeHeader size is assumed to be 1 word - STATIC_CONST( AttributeHeaderSize = 1 ); - - /* - * Logical tuple address, "local key". Identifies table tuples. - */ - typedef Uint32 TupAddr; - STATIC_CONST( NullTupAddr = (Uint32)-1 ); - - /* - * Physical tuple address in TUP. Provides fast access to table tuple - * or index node. Valid within the db node and across timeslices. - * Not valid between db nodes or across restarts. - * - * To avoid wasting an Uint16 the pageid is split in two. - */ - struct TupLoc { - private: - Uint16 m_pageId1; // page i-value (big-endian) - Uint16 m_pageId2; - Uint16 m_pageOffset; // page offset in words - public: - TupLoc(); - TupLoc(Uint32 pageId, Uint16 pageOffset); - Uint32 getPageId() const; - void setPageId(Uint32 pageId); - Uint32 getPageOffset() const; - void setPageOffset(Uint32 pageOffset); - bool operator==(const TupLoc& loc) const; - bool operator!=(const TupLoc& loc) const; - }; - - /* - * There is no const member NullTupLoc since the compiler may not be - * able to optimize it to TupLoc() constants. Instead null values are - * constructed on the stack with TupLoc(). - */ -#define NullTupLoc TupLoc() - - // tree definitions - - /* - * Tree entry. Points to a tuple in primary table via physical - * address of "original" tuple and tuple version. - * - * ZTUP_VERSION_BITS must be 15 (or less). - */ - struct TreeEnt; - friend struct TreeEnt; - struct TreeEnt { - TupLoc m_tupLoc; // address of original tuple - unsigned m_tupVersion : 15; // version - TreeEnt(); - // methods - bool eqtuple(const TreeEnt ent) const; - bool eq(const TreeEnt ent) const; - int cmp(const TreeEnt ent) const; - }; - STATIC_CONST( TreeEntSize = sizeof(TreeEnt) >> 2 ); - static const TreeEnt NullTreeEnt; - - /* - * Tree node has 1) fixed part 2) a prefix of index key data for min - * entry 3) max and min entries 4) rest of entries 5) one extra entry - * used as work space. - * - * struct TreeNode part 1, size 6 words - * min prefix part 2, size TreeHead::m_prefSize - * max entry part 3 - * min entry part 3 - * rest of entries part 4 - * work entry part 5 - * - * There are 3 links to other nodes: left child, right child, parent. - * Occupancy (number of entries) is at least 1 except temporarily when - * a node is about to be removed. - */ - struct TreeNode; - friend struct TreeNode; - struct TreeNode { - TupLoc m_link[3]; // link to 0-left child 1-right child 2-parent - unsigned m_side : 2; // we are 0-left child 1-right child 2-root - unsigned m_balance : 2; // balance -1, 0, +1 plus 1 for Solaris CC - unsigned pad1 : 4; - Uint8 m_occup; // current number of entries - Uint32 m_nodeScan; // list of scans at this node - TreeNode(); - }; - STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 ); - - /* - * Tree node "access size" was for an early version with signal - * interface to TUP. It is now used only to compute sizes. - */ - enum AccSize { - AccNone = 0, - AccHead = 1, // part 1 - AccPref = 2, // parts 1-3 - AccFull = 3 // parts 1-5 - }; - - /* - * Tree header. There is one in each fragment. Contains tree - * parameters and address of root node. - */ - struct TreeHead; - friend struct TreeHead; - struct TreeHead { - Uint8 m_nodeSize; // words in tree node - Uint8 m_prefSize; // words in min prefix - Uint8 m_minOccup; // min entries in internal node - Uint8 m_maxOccup; // max entries in node - Uint32 m_entryCount; // stat: current entries - TupLoc m_root; // root node - TreeHead(); - // methods - unsigned getSize(AccSize acc) const; - Data getPref(TreeNode* node) const; - TreeEnt* getEntList(TreeNode* node) const; - }; - - /* - * Tree position. Specifies node, position within node (from 0 to - * m_occup), and whether the position is at an existing entry or - * before one (if any). Position m_occup points past the node and is - * also represented by position 0 of next node. Includes direction - * used by scan. - */ - struct TreePos; - friend struct TreePos; - struct TreePos { - TupLoc m_loc; // physical node address - Uint16 m_pos; // position 0 to m_occup - Uint8 m_dir; // see scanNext - TreePos(); - }; - - // packed metadata - - /* - * Descriptor page. The "hot" metadata for an index is stored as - * a contiguous array of words on some page. - */ - struct DescPage; - friend struct DescPage; - struct DescPage { - Uint32 m_nextPage; - Uint32 m_numFree; // number of free words - union { - Uint32 m_data[DescPageSize]; - Uint32 nextPool; - }; - DescPage(); - }; - typedef Ptr DescPagePtr; - ArrayPool c_descPagePool; - Uint32 c_descPageList; - - /* - * Header for index metadata. Size must be multiple of word size. - */ - struct DescHead { - unsigned m_indexId : 24; - unsigned pad1 : 8; - }; - STATIC_CONST( DescHeadSize = sizeof(DescHead) >> 2 ); - - /* - * Attribute metadata. Size must be multiple of word size. - * - * Prefix comparison of char data must use strxfrm and binary - * comparison. The charset is currently unused. - */ - struct DescAttr { - Uint32 m_attrDesc; // standard AttributeDescriptor - Uint16 m_primaryAttrId; - unsigned m_typeId : 6; - unsigned m_charset : 10; - }; - STATIC_CONST( DescAttrSize = sizeof(DescAttr) >> 2 ); - - /* - * Complete metadata for one index. The array of attributes has - * variable size. - */ - friend struct DescEnt; - struct DescEnt { - DescHead m_descHead; - DescAttr m_descAttr[1]; // variable size data - }; - - // range scan - - /* - * Scan bounds are stored in linked list of segments. - */ - typedef DataBuffer ScanBound; - typedef DataBuffer::ConstDataBufferIterator ScanBoundIterator; - typedef DataBuffer::DataBufferPool ScanBoundPool; - ScanBoundPool c_scanBoundPool; - - // ScanLock - struct ScanLock { - Uint32 m_accLockOp; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - typedef Ptr ScanLockPtr; - ArrayPool c_scanLockPool; - - /* - * Scan operation. - * - * Tuples are locked one at a time. The current lock op is set to - * RNIL as soon as the lock is obtained and passed to LQH. We must - * however remember all locks which LQH has not returned for unlocking - * since they must be aborted by us when the scan is closed. - * - * Scan state describes the entry we are interested in. There is - * a separate lock wait flag. It may be for current entry or it may - * be for an entry we were moved away from. In any case nothing - * happens with current entry before lock wait flag is cleared. - * - * An unfinished scan is always linked to some tree node, and has - * current position and direction (see comments at scanNext). There - * is also a copy of latest entry found. - */ - struct ScanOp; - friend struct ScanOp; - struct ScanOp { - enum { - Undef = 0, - First = 1, // before first entry - Current = 2, // at some entry - Found = 3, // return current as next scan result - Blocked = 4, // found and waiting for ACC lock - Locked = 5, // found and locked or no lock needed - Next = 6, // looking for next extry - Last = 7, // after last entry - Aborting = 8, // lock wait at scan close - Invalid = 9 // cannot return REF to LQH currently - }; - Uint16 m_state; - Uint16 m_lockwait; - Uint32 m_userPtr; // scanptr.i in LQH - Uint32 m_userRef; - Uint32 m_tableId; - Uint32 m_indexId; - Uint32 m_fragId; - Uint32 m_fragPtrI; - Uint32 m_transId1; - Uint32 m_transId2; - Uint32 m_savePointId; - // lock waited for or obtained and not yet passed to LQH - Uint32 m_accLockOp; - // locks obtained and passed to LQH but not yet returned by LQH - DLFifoList::Head m_accLockOps; - Uint8 m_readCommitted; // no locking - Uint8 m_lockMode; - Uint8 m_descending; - ScanBound m_boundMin; - ScanBound m_boundMax; - ScanBound* m_bound[2]; // pointers to above 2 - Uint16 m_boundCnt[2]; // number of bounds in each - TreePos m_scanPos; // position - TreeEnt m_scanEnt; // latest entry found - Uint32 m_nodeScan; // next scan at node (single-linked) - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - ScanOp(ScanBoundPool& scanBoundPool); - }; - typedef Ptr ScanOpPtr; - ArrayPool c_scanOpPool; - - // indexes and fragments - - /* - * Ordered index. Top level data structure. The primary table (table - * being indexed) lives in TUP. - */ - struct Index; - friend struct Index; - struct Index { - enum State { - NotDefined = 0, - Defining = 1, - Online = 2, // triggers activated and build done - Dropping = 9 - }; - State m_state; - DictTabInfo::TableType m_tableType; - Uint32 m_tableId; - Uint16 unused; - Uint16 m_numFrags; - Uint32 m_fragId[MaxIndexFragments]; - Uint32 m_fragPtrI[MaxIndexFragments]; - Uint32 m_descPage; // descriptor page - Uint16 m_descOff; // offset within the page - Uint16 m_numAttrs; - bool m_storeNullKey; - union { - Uint32 nextPool; - }; - Index(); - }; - typedef Ptr IndexPtr; - ArrayPool c_indexPool; - - /* - * Fragment of an index, as known to DIH/TC. Represents the two - * duplicate fragments known to LQH/ACC/TUP. Includes tree header. - * There are no maintenance operation records yet. - */ - struct Frag; - friend struct Frag; - struct Frag { - Uint32 m_tableId; // copy from index level - Uint32 m_indexId; - Uint16 unused; - Uint16 m_fragId; - Uint32 m_descPage; // copy from index level - Uint16 m_descOff; - Uint16 m_numAttrs; - bool m_storeNullKey; - TreeHead m_tree; - TupLoc m_freeLoc; // list of free index nodes - DLList m_scanList; // current scans on this fragment - Uint32 m_tupIndexFragPtrI; - Uint32 m_tupTableFragPtrI; - Uint32 m_accTableFragPtrI; - union { - Uint32 nextPool; - }; - Frag(ArrayPool& scanOpPool); - }; - typedef Ptr FragPtr; - ArrayPool c_fragPool; - - /* - * Fragment metadata operation. - */ - struct FragOp { - Uint32 m_userPtr; - Uint32 m_userRef; - Uint32 m_indexId; - Uint32 m_fragId; - Uint32 m_fragPtrI; - Uint32 m_fragNo; // fragment number starting at zero - Uint32 m_numAttrsRecvd; - union { - Uint32 nextPool; - }; - FragOp(); - }; - typedef Ptr FragOpPtr; - ArrayPool c_fragOpPool; - - // node handles - - /* - * A node handle is a reference to a tree node in TUP. It is used to - * operate on the node. Node handles are allocated on the stack. - */ - struct NodeHandle; - friend struct NodeHandle; - struct NodeHandle { - Frag& m_frag; // fragment using the node - TupLoc m_loc; // physical node address - TreeNode* m_node; // pointer to node storage - NodeHandle(Frag& frag); - NodeHandle(const NodeHandle& node); - NodeHandle& operator=(const NodeHandle& node); - // check if unassigned - bool isNull(); - // getters - TupLoc getLink(unsigned i); - unsigned getChilds(); // cannot spell - unsigned getSide(); - unsigned getOccup(); - int getBalance(); - Uint32 getNodeScan(); - // setters - void setLink(unsigned i, TupLoc loc); - void setSide(unsigned i); - void setOccup(unsigned n); - void setBalance(int b); - void setNodeScan(Uint32 scanPtrI); - // access other parts of the node - Data getPref(); - TreeEnt getEnt(unsigned pos); - TreeEnt getMinMax(unsigned i); - // for ndbrequire and ndbassert - void progError(int line, int cause, const char* file); - }; - - // methods - - /* - * DbtuxGen.cpp - */ - void execCONTINUEB(Signal* signal); - void execSTTOR(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - // utils - void setKeyAttrs(const Frag& frag); - void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData); - void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize); - void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize); - void unpackBound(const ScanBound& bound, Data data); - - /* - * DbtuxMeta.cpp - */ - void execTUXFRAGREQ(Signal* signal); - void execTUX_ADD_ATTRREQ(Signal* signal); - void execALTER_INDX_REQ(Signal* signal); - void execDROP_TAB_REQ(Signal* signal); - bool allocDescEnt(IndexPtr indexPtr); - void freeDescEnt(IndexPtr indexPtr); - void abortAddFragOp(Signal* signal); - void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData); - - /* - * DbtuxMaint.cpp - */ - void execTUX_MAINT_REQ(Signal* signal); - - /* - * DbtuxNode.cpp - */ - int allocNode(Signal* signal, NodeHandle& node); - void selectNode(NodeHandle& node, TupLoc loc); - void insertNode(NodeHandle& node); - void deleteNode(NodeHandle& node); - void setNodePref(NodeHandle& node); - // node operations - void nodePushUp(NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList); - void nodePushUpScans(NodeHandle& node, unsigned pos); - void nodePopDown(NodeHandle& node, unsigned pos, TreeEnt& en, Uint32* scanList); - void nodePopDownScans(NodeHandle& node, unsigned pos); - void nodePushDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList); - void nodePushDownScans(NodeHandle& node, unsigned pos); - void nodePopUp(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList); - void nodePopUpScans(NodeHandle& node, unsigned pos); - void nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i); - // scans linked to node - void addScanList(NodeHandle& node, unsigned pos, Uint32 scanList); - void removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList); - void moveScanList(NodeHandle& node, unsigned pos); - void linkScan(NodeHandle& node, ScanOpPtr scanPtr); - void unlinkScan(NodeHandle& node, ScanOpPtr scanPtr); - bool islinkScan(NodeHandle& node, ScanOpPtr scanPtr); - - /* - * DbtuxTree.cpp - */ - // add entry - void treeAdd(Frag& frag, TreePos treePos, TreeEnt ent); - void treeAddFull(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent); - void treeAddNode(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i); - void treeAddRebalance(Frag& frag, NodeHandle node, unsigned i); - // remove entry - void treeRemove(Frag& frag, TreePos treePos); - void treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos); - void treeRemoveSemi(Frag& frag, NodeHandle node, unsigned i); - void treeRemoveLeaf(Frag& frag, NodeHandle node); - void treeRemoveNode(Frag& frag, NodeHandle node); - void treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i); - // rotate - void treeRotateSingle(Frag& frag, NodeHandle& node, unsigned i); - void treeRotateDouble(Frag& frag, NodeHandle& node, unsigned i); - - /* - * DbtuxScan.cpp - */ - void execACC_SCANREQ(Signal* signal); - void execTUX_BOUND_INFO(Signal* signal); - void execNEXT_SCANREQ(Signal* signal); - void execACC_CHECK_SCAN(Signal* signal); - void execACCKEYCONF(Signal* signal); - void execACCKEYREF(Signal* signal); - void execACC_ABORTCONF(Signal* signal); - void scanFirst(ScanOpPtr scanPtr); - void scanFind(ScanOpPtr scanPtr); - void scanNext(ScanOpPtr scanPtr, bool fromMaintReq); - bool scanCheck(ScanOpPtr scanPtr, TreeEnt ent); - bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent); - void scanClose(Signal* signal, ScanOpPtr scanPtr); - void abortAccLockOps(Signal* signal, ScanOpPtr scanPtr); - void addAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp); - void removeAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp); - void releaseScanOp(ScanOpPtr& scanPtr); - - /* - * DbtuxSearch.cpp - */ - bool searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos); - bool searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos); - void searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos); - void searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos); - void searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos); - - /* - * DbtuxCmp.cpp - */ - int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize); - int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize); - - /* - * DbtuxStat.cpp - */ - void execREAD_PSEUDO_REQ(Signal* signal); - void statRecordsInRange(ScanOpPtr scanPtr, Uint32* out); - Uint32 getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir); - unsigned getPathToNode(NodeHandle node, Uint16* path); - - /* - * DbtuxDebug.cpp - */ - void execDUMP_STATE_ORD(Signal* signal); -#ifdef VM_TRACE - struct PrintPar { - char m_path[100]; // LR prefix - unsigned m_side; // expected side - TupLoc m_parent; // expected parent address - int m_depth; // returned depth - unsigned m_occup; // returned occupancy - TreeEnt m_minmax[2]; // returned subtree min and max - bool m_ok; // returned status - PrintPar(); - }; - void printTree(Signal* signal, Frag& frag, NdbOut& out); - void printNode(Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par); - friend class NdbOut& operator<<(NdbOut&, const TupLoc&); - friend class NdbOut& operator<<(NdbOut&, const TreeEnt&); - friend class NdbOut& operator<<(NdbOut&, const TreeNode&); - friend class NdbOut& operator<<(NdbOut&, const TreeHead&); - friend class NdbOut& operator<<(NdbOut&, const TreePos&); - friend class NdbOut& operator<<(NdbOut&, const DescAttr&); - friend class NdbOut& operator<<(NdbOut&, const ScanOp&); - friend class NdbOut& operator<<(NdbOut&, const Index&); - friend class NdbOut& operator<<(NdbOut&, const Frag&); - friend class NdbOut& operator<<(NdbOut&, const FragOp&); - friend class NdbOut& operator<<(NdbOut&, const NodeHandle&); - FILE* debugFile; - NdbOut debugOut; - unsigned debugFlags; - enum { - DebugMeta = 1, // log create and drop index - DebugMaint = 2, // log maintenance ops - DebugTree = 4, // log and check tree after each op - DebugScan = 8, // log scans - DebugLock = 16 // log ACC locks - }; - STATIC_CONST( DataFillByte = 0xa2 ); - STATIC_CONST( NodeFillByte = 0xa4 ); -#endif - - // start up info - Uint32 c_internalStartPhase; - Uint32 c_typeOfStart; - - /* - * Global data set at operation start. Unpacked from index metadata. - * Not passed as parameter to methods. Invalid across timeslices. - * - * TODO inline all into index metadata - */ - - // index key attr ids with sizes in AttributeHeader format - Data c_keyAttrs; - - // pointers to index key comparison functions - NdbSqlUtil::Cmp** c_sqlCmp; - - /* - * Other buffers used during the operation. - */ - - // buffer for search key data with headers - Data c_searchKey; - - // buffer for current entry key data with headers - Data c_entryKey; - - // buffer for scan bounds and keyinfo (primary key) - Data c_dataBuffer; - - // inlined utils - DescEnt& getDescEnt(Uint32 descPage, Uint32 descOff); - Uint32 getTupAddr(const Frag& frag, TreeEnt ent); - static unsigned min(unsigned x, unsigned y); - static unsigned max(unsigned x, unsigned y); -}; - -// Dbtux::TupLoc - -inline -Dbtux::TupLoc::TupLoc() : - m_pageId1(RNIL >> 16), - m_pageId2(RNIL & 0xFFFF), - m_pageOffset(0) -{ -} - -inline -Dbtux::TupLoc::TupLoc(Uint32 pageId, Uint16 pageOffset) : - m_pageId1(pageId >> 16), - m_pageId2(pageId & 0xFFFF), - m_pageOffset(pageOffset) -{ -} - -inline Uint32 -Dbtux::TupLoc::getPageId() const -{ - return (m_pageId1 << 16) | m_pageId2; -} - -inline void -Dbtux::TupLoc::setPageId(Uint32 pageId) -{ - m_pageId1 = (pageId >> 16); - m_pageId2 = (pageId & 0xFFFF); -} - -inline Uint32 -Dbtux::TupLoc::getPageOffset() const -{ - return (Uint32)m_pageOffset; -} - -inline void -Dbtux::TupLoc::setPageOffset(Uint32 pageOffset) -{ - m_pageOffset = (Uint16)pageOffset; -} - -inline bool -Dbtux::TupLoc::operator==(const TupLoc& loc) const -{ - return - m_pageId1 == loc.m_pageId1 && - m_pageId2 == loc.m_pageId2 && - m_pageOffset == loc.m_pageOffset; -} - -inline bool -Dbtux::TupLoc::operator!=(const TupLoc& loc) const -{ - return ! (*this == loc); -} - -// Dbtux::TreeEnt - -inline -Dbtux::TreeEnt::TreeEnt() : - m_tupLoc(), - m_tupVersion(0) -{ -} - -inline bool -Dbtux::TreeEnt::eqtuple(const TreeEnt ent) const -{ - return - m_tupLoc == ent.m_tupLoc; -} - -inline bool -Dbtux::TreeEnt::eq(const TreeEnt ent) const -{ - return - m_tupLoc == ent.m_tupLoc && - m_tupVersion == ent.m_tupVersion; -} - -inline int -Dbtux::TreeEnt::cmp(const TreeEnt ent) const -{ - if (m_tupLoc.getPageId() < ent.m_tupLoc.getPageId()) - return -1; - if (m_tupLoc.getPageId() > ent.m_tupLoc.getPageId()) - return +1; - if (m_tupLoc.getPageOffset() < ent.m_tupLoc.getPageOffset()) - return -1; - if (m_tupLoc.getPageOffset() > ent.m_tupLoc.getPageOffset()) - return +1; - /* - * Guess if one tuple version has wrapped around. This is well - * defined ordering on existing versions since versions are assigned - * consecutively and different versions exists only on uncommitted - * tuple. Assuming max 2**14 uncommitted ops on same tuple. - */ - const unsigned version_wrap_limit = (1 << (ZTUP_VERSION_BITS - 1)); - if (m_tupVersion < ent.m_tupVersion) { - if (unsigned(ent.m_tupVersion - m_tupVersion) < version_wrap_limit) - return -1; - else - return +1; - } - if (m_tupVersion > ent.m_tupVersion) { - if (unsigned(m_tupVersion - ent.m_tupVersion) < version_wrap_limit) - return +1; - else - return -1; - } - return 0; -} - -// Dbtux::TreeNode - -inline -Dbtux::TreeNode::TreeNode() : - m_side(2), - m_balance(0 + 1), - pad1(0), - m_occup(0), - m_nodeScan(RNIL) -{ - m_link[0] = NullTupLoc; - m_link[1] = NullTupLoc; - m_link[2] = NullTupLoc; -} - -// Dbtux::TreeHead - -inline -Dbtux::TreeHead::TreeHead() : - m_nodeSize(0), - m_prefSize(0), - m_minOccup(0), - m_maxOccup(0), - m_entryCount(0), - m_root() -{ -} - -inline unsigned -Dbtux::TreeHead::getSize(AccSize acc) const -{ - switch (acc) { - case AccNone: - return 0; - case AccHead: - return NodeHeadSize; - case AccPref: - return NodeHeadSize + m_prefSize + 2 * TreeEntSize; - case AccFull: - return m_nodeSize; - } - return 0; -} - -inline Dbtux::Data -Dbtux::TreeHead::getPref(TreeNode* node) const -{ - Uint32* ptr = (Uint32*)node + NodeHeadSize; - return ptr; -} - -inline Dbtux::TreeEnt* -Dbtux::TreeHead::getEntList(TreeNode* node) const -{ - Uint32* ptr = (Uint32*)node + NodeHeadSize + m_prefSize; - return (TreeEnt*)ptr; -} - -// Dbtux::TreePos - -inline -Dbtux::TreePos::TreePos() : - m_loc(), - m_pos(ZNIL), - m_dir(255) -{ -} - -// Dbtux::DescPage - -inline -Dbtux::DescPage::DescPage() : - m_nextPage(RNIL), - m_numFree(ZNIL) -{ - for (unsigned i = 0; i < DescPageSize; i++) { -#ifdef VM_TRACE - m_data[i] = 0x13571357; -#else - m_data[i] = 0; -#endif - } -} - -// Dbtux::ScanOp - -inline -Dbtux::ScanOp::ScanOp(ScanBoundPool& scanBoundPool) : - m_state(Undef), - m_lockwait(false), - m_userPtr(RNIL), - m_userRef(RNIL), - m_tableId(RNIL), - m_indexId(RNIL), - m_fragPtrI(RNIL), - m_transId1(0), - m_transId2(0), - m_savePointId(0), - m_accLockOp(RNIL), - m_accLockOps(), - m_readCommitted(0), - m_lockMode(0), - m_descending(0), - m_boundMin(scanBoundPool), - m_boundMax(scanBoundPool), - m_scanPos(), - m_scanEnt(), - m_nodeScan(RNIL) -{ - m_bound[0] = &m_boundMin; - m_bound[1] = &m_boundMax; - m_boundCnt[0] = 0; - m_boundCnt[1] = 0; -} - -// Dbtux::Index - -inline -Dbtux::Index::Index() : - m_state(NotDefined), - m_tableType(DictTabInfo::UndefTableType), - m_tableId(RNIL), - m_numFrags(0), - m_descPage(RNIL), - m_descOff(0), - m_numAttrs(0), - m_storeNullKey(false) -{ - for (unsigned i = 0; i < MaxIndexFragments; i++) { - m_fragId[i] = ZNIL; - m_fragPtrI[i] = RNIL; - }; -} - -// Dbtux::Frag - -inline -Dbtux::Frag::Frag(ArrayPool& scanOpPool) : - m_tableId(RNIL), - m_indexId(RNIL), - m_fragId(ZNIL), - m_descPage(RNIL), - m_descOff(0), - m_numAttrs(ZNIL), - m_storeNullKey(false), - m_tree(), - m_freeLoc(), - m_scanList(scanOpPool), - m_tupIndexFragPtrI(RNIL) -{ - m_tupTableFragPtrI = RNIL; - m_accTableFragPtrI = RNIL; -} - -// Dbtux::FragOp - -inline -Dbtux::FragOp::FragOp() : - m_userPtr(RNIL), - m_userRef(RNIL), - m_indexId(RNIL), - m_fragId(ZNIL), - m_fragPtrI(RNIL), - m_fragNo(ZNIL), - m_numAttrsRecvd(ZNIL) -{ -} - -// Dbtux::NodeHandle - -inline -Dbtux::NodeHandle::NodeHandle(Frag& frag) : - m_frag(frag), - m_loc(), - m_node(0) -{ -} - -inline -Dbtux::NodeHandle::NodeHandle(const NodeHandle& node) : - m_frag(node.m_frag), - m_loc(node.m_loc), - m_node(node.m_node) -{ -} - -inline Dbtux::NodeHandle& -Dbtux::NodeHandle::operator=(const NodeHandle& node) -{ - ndbassert(&m_frag == &node.m_frag); - m_loc = node.m_loc; - m_node = node.m_node; - return *this; -} - -inline bool -Dbtux::NodeHandle::isNull() -{ - return m_node == 0; -} - -inline Dbtux::TupLoc -Dbtux::NodeHandle::getLink(unsigned i) -{ - ndbrequire(i <= 2); - return m_node->m_link[i]; -} - -inline unsigned -Dbtux::NodeHandle::getChilds() -{ - return (m_node->m_link[0] != NullTupLoc) + (m_node->m_link[1] != NullTupLoc); -} - -inline unsigned -Dbtux::NodeHandle::getSide() -{ - return m_node->m_side; -} - -inline unsigned -Dbtux::NodeHandle::getOccup() -{ - return m_node->m_occup; -} - -inline int -Dbtux::NodeHandle::getBalance() -{ - return (int)m_node->m_balance - 1; -} - -inline Uint32 -Dbtux::NodeHandle::getNodeScan() -{ - return m_node->m_nodeScan; -} - -inline void -Dbtux::NodeHandle::setLink(unsigned i, TupLoc loc) -{ - ndbrequire(i <= 2); - m_node->m_link[i] = loc; -} - -inline void -Dbtux::NodeHandle::setSide(unsigned i) -{ - ndbrequire(i <= 2); - m_node->m_side = i; -} - -inline void -Dbtux::NodeHandle::setOccup(unsigned n) -{ - TreeHead& tree = m_frag.m_tree; - ndbrequire(n <= tree.m_maxOccup); - m_node->m_occup = n; -} - -inline void -Dbtux::NodeHandle::setBalance(int b) -{ - ndbrequire(abs(b) <= 1); - m_node->m_balance = (unsigned)(b + 1); -} - -inline void -Dbtux::NodeHandle::setNodeScan(Uint32 scanPtrI) -{ - m_node->m_nodeScan = scanPtrI; -} - -inline Dbtux::Data -Dbtux::NodeHandle::getPref() -{ - TreeHead& tree = m_frag.m_tree; - return tree.getPref(m_node); -} - -inline Dbtux::TreeEnt -Dbtux::NodeHandle::getEnt(unsigned pos) -{ - TreeHead& tree = m_frag.m_tree; - TreeEnt* entList = tree.getEntList(m_node); - const unsigned occup = m_node->m_occup; - ndbrequire(pos < occup); - return entList[(1 + pos) % occup]; -} - -inline Dbtux::TreeEnt -Dbtux::NodeHandle::getMinMax(unsigned i) -{ - const unsigned occup = m_node->m_occup; - ndbrequire(i <= 1 && occup != 0); - return getEnt(i == 0 ? 0 : occup - 1); -} - -// parameters for methods - -#ifdef VM_TRACE -inline -Dbtux::PrintPar::PrintPar() : - // caller fills in - m_path(), - m_side(255), - m_parent(), - // default return values - m_depth(0), - m_occup(0), - m_ok(true) -{ -} -#endif - -// utils - -inline Dbtux::DescEnt& -Dbtux::getDescEnt(Uint32 descPage, Uint32 descOff) -{ - DescPagePtr pagePtr; - pagePtr.i = descPage; - c_descPagePool.getPtr(pagePtr); - ndbrequire(descOff < DescPageSize); - DescEnt* descEnt = (DescEnt*)&pagePtr.p->m_data[descOff]; - return *descEnt; -} - -inline Uint32 -Dbtux::getTupAddr(const Frag& frag, TreeEnt ent) -{ - const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI; - const TupLoc tupLoc = ent.m_tupLoc; - Uint32 tupAddr = NullTupAddr; - c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), tupAddr); - jamEntry(); - return tupAddr; -} - -inline unsigned -Dbtux::min(unsigned x, unsigned y) -{ - return x < y ? x : y; -} - -inline unsigned -Dbtux::max(unsigned x, unsigned y) -{ - return x > y ? x : y; -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp deleted file mode 100644 index 32520502991..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp +++ /dev/null @@ -1,175 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_CMP_CPP -#include "Dbtux.hpp" - -/* - * Search key vs node prefix or entry. - * - * The comparison starts at given attribute position. The position is - * updated by number of equal initial attributes found. The entry data - * may be partial in which case CmpUnknown may be returned. - * - * The attributes are normalized and have variable size given in words. - */ -int -Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen) -{ - const unsigned numAttrs = frag.m_numAttrs; - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - // skip to right position in search key only - for (unsigned i = 0; i < start; i++) { - jam(); - searchKey += AttributeHeaderSize + ah(searchKey).getDataSize(); - } - // number of words of entry data left - unsigned len2 = maxlen; - int ret = 0; - while (start < numAttrs) { - if (len2 <= AttributeHeaderSize) { - jam(); - ret = NdbSqlUtil::CmpUnknown; - break; - } - len2 -= AttributeHeaderSize; - if (! ah(searchKey).isNULL()) { - if (! ah(entryData).isNULL()) { - jam(); - // verify attribute id - const DescAttr& descAttr = descEnt.m_descAttr[start]; - ndbrequire(ah(searchKey).getAttributeId() == descAttr.m_primaryAttrId); - ndbrequire(ah(entryData).getAttributeId() == descAttr.m_primaryAttrId); - // sizes - const unsigned size1 = ah(searchKey).getDataSize(); - const unsigned size2 = min(ah(entryData).getDataSize(), len2); - len2 -= size2; - // compare - NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start]; - const Uint32* const p1 = &searchKey[AttributeHeaderSize]; - const Uint32* const p2 = &entryData[AttributeHeaderSize]; - const bool full = (maxlen == MaxAttrDataSize); - ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full); - if (ret != 0) { - jam(); - break; - } - } else { - jam(); - // not NULL > NULL - ret = +1; - break; - } - } else { - if (! ah(entryData).isNULL()) { - jam(); - // NULL < not NULL - ret = -1; - break; - } - } - searchKey += AttributeHeaderSize + ah(searchKey).getDataSize(); - entryData += AttributeHeaderSize + ah(entryData).getDataSize(); - start++; - } - return ret; -} - -/* - * Scan bound vs node prefix or entry. - * - * Compare lower or upper bound and index entry data. The entry data - * may be partial in which case CmpUnknown may be returned. Otherwise - * returns -1 if the bound is to the left of the entry and +1 if the - * bound is to the right of the entry. - * - * The routine is similar to cmpSearchKey, but 0 is never returned. - * Suppose all attributes compare equal. Recall that all bounds except - * possibly the last one are non-strict. Use the given bound direction - * (0-lower 1-upper) and strictness of last bound to return -1 or +1. - * - * Following example illustrates this. We are at (a=2, b=3). - * - * idir bounds strict return - * 0 a >= 2 and b >= 3 no -1 - * 0 a >= 2 and b > 3 yes +1 - * 1 a <= 2 and b <= 3 no +1 - * 1 a <= 2 and b < 3 yes -1 - * - * The attributes are normalized and have variable size given in words. - */ -int -Dbtux::cmpScanBound(const Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen) -{ - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - // direction 0-lower 1-upper - ndbrequire(idir <= 1); - // number of words of data left - unsigned len2 = maxlen; - // in case of no bounds, init last type to something non-strict - unsigned type = 4; - while (boundCount != 0) { - if (len2 <= AttributeHeaderSize) { - jam(); - return NdbSqlUtil::CmpUnknown; - } - len2 -= AttributeHeaderSize; - // get and skip bound type (it is used after the loop) - type = boundInfo[0]; - boundInfo += 1; - if (! ah(boundInfo).isNULL()) { - if (! ah(entryData).isNULL()) { - jam(); - // verify attribute id - const Uint32 index = ah(boundInfo).getAttributeId(); - ndbrequire(index < frag.m_numAttrs); - const DescAttr& descAttr = descEnt.m_descAttr[index]; - ndbrequire(ah(entryData).getAttributeId() == descAttr.m_primaryAttrId); - // sizes - const unsigned size1 = ah(boundInfo).getDataSize(); - const unsigned size2 = min(ah(entryData).getDataSize(), len2); - len2 -= size2; - // compare - NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index]; - const Uint32* const p1 = &boundInfo[AttributeHeaderSize]; - const Uint32* const p2 = &entryData[AttributeHeaderSize]; - const bool full = (maxlen == MaxAttrDataSize); - int ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full); - if (ret != 0) { - jam(); - return ret; - } - } else { - jam(); - // not NULL > NULL - return +1; - } - } else { - jam(); - if (! ah(entryData).isNULL()) { - jam(); - // NULL < not NULL - return -1; - } - } - boundInfo += AttributeHeaderSize + ah(boundInfo).getDataSize(); - entryData += AttributeHeaderSize + ah(entryData).getDataSize(); - boundCount -= 1; - } - // all attributes were equal - const int strict = (type & 0x1); - return (idir == 0 ? (strict == 0 ? -1 : +1) : (strict == 0 ? +1 : -1)); -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp deleted file mode 100644 index 933ec77258e..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp +++ /dev/null @@ -1,417 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_DEBUG_CPP -#include "Dbtux.hpp" - -/* - * 12001 log file 0-close 1-open 2-append 3-append to signal log - * 12002 log flags 1-meta 2-maint 4-tree 8-scan - */ -void -Dbtux::execDUMP_STATE_ORD(Signal* signal) -{ - jamEntry(); -#ifdef VM_TRACE - if (signal->theData[0] == DumpStateOrd::TuxLogToFile) { - unsigned flag = signal->theData[1]; - const char* const tuxlog = "tux.log"; - FILE* slFile = globalSignalLoggers.getOutputStream(); - if (flag <= 3) { - if (debugFile != 0) { - if (debugFile != slFile) - fclose(debugFile); - debugFile = 0; - debugOut = *new NdbOut(*new NullOutputStream()); - } - if (flag == 1) - debugFile = fopen(tuxlog, "w"); - if (flag == 2) - debugFile = fopen(tuxlog, "a"); - if (flag == 3) - debugFile = slFile; - if (debugFile != 0) - debugOut = *new NdbOut(*new FileOutputStream(debugFile)); - } - return; - } - if (signal->theData[0] == DumpStateOrd::TuxSetLogFlags) { - debugFlags = signal->theData[1]; - return; - } - if (signal->theData[0] == DumpStateOrd::TuxMetaDataJunk) { - abort(); - } -#endif -} - -#ifdef VM_TRACE - -void -Dbtux::printTree(Signal* signal, Frag& frag, NdbOut& out) -{ - TreeHead& tree = frag.m_tree; - PrintPar par; - strcpy(par.m_path, "."); - par.m_side = 2; - par.m_parent = NullTupLoc; - printNode(frag, out, tree.m_root, par); - out.m_out->flush(); - if (! par.m_ok) { - if (debugFile == 0) { - signal->theData[0] = 12001; - signal->theData[1] = 1; - execDUMP_STATE_ORD(signal); - if (debugFile != 0) { - printTree(signal, frag, debugOut); - } - } - ndbrequire(false); - } -} - -void -Dbtux::printNode(Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par) -{ - if (loc == NullTupLoc) { - par.m_depth = 0; - return; - } - TreeHead& tree = frag.m_tree; - NodeHandle node(frag); - selectNode(node, loc); - out << par.m_path << " " << node << endl; - // check children - PrintPar cpar[2]; - ndbrequire(strlen(par.m_path) + 1 < sizeof(par.m_path)); - for (unsigned i = 0; i <= 1; i++) { - sprintf(cpar[i].m_path, "%s%c", par.m_path, "LR"[i]); - cpar[i].m_side = i; - cpar[i].m_depth = 0; - cpar[i].m_parent = loc; - printNode(frag, out, node.getLink(i), cpar[i]); - if (! cpar[i].m_ok) { - par.m_ok = false; - } - } - static const char* const sep = " *** "; - // check child-parent links - if (node.getLink(2) != par.m_parent) { - par.m_ok = false; - out << par.m_path << sep; - out << "parent loc " << hex << node.getLink(2); - out << " should be " << hex << par.m_parent << endl; - } - if (node.getSide() != par.m_side) { - par.m_ok = false; - out << par.m_path << sep; - out << "side " << dec << node.getSide(); - out << " should be " << dec << par.m_side << endl; - } - // check balance - const int balance = -cpar[0].m_depth + cpar[1].m_depth; - if (node.getBalance() != balance) { - par.m_ok = false; - out << par.m_path << sep; - out << "balance " << node.getBalance(); - out << " should be " << balance << endl; - } - if (abs(node.getBalance()) > 1) { - par.m_ok = false; - out << par.m_path << sep; - out << "balance " << node.getBalance() << " is invalid" << endl; - } - // check occupancy - if (node.getOccup() == 0 || node.getOccup() > tree.m_maxOccup) { - par.m_ok = false; - out << par.m_path << sep; - out << "occupancy " << node.getOccup(); - out << " zero or greater than max " << tree.m_maxOccup << endl; - } - // check for occupancy of interior node - if (node.getChilds() == 2 && node.getOccup() < tree.m_minOccup) { - par.m_ok = false; - out << par.m_path << sep; - out << "occupancy " << node.getOccup() << " of interior node"; - out << " less than min " << tree.m_minOccup << endl; - } -#ifdef dbtux_totally_groks_t_trees - // check missed semi-leaf/leaf merge - for (unsigned i = 0; i <= 1; i++) { - if (node.getLink(i) != NullTupLoc && - node.getLink(1 - i) == NullTupLoc && - // our semi-leaf seems to satify interior minOccup condition - node.getOccup() < tree.m_minOccup) { - par.m_ok = false; - out << par.m_path << sep; - out << "missed merge with child " << i << endl; - } - } -#endif - // check inline prefix - { ConstData data1 = node.getPref(); - Uint32 data2[MaxPrefSize]; - memset(data2, DataFillByte, MaxPrefSize << 2); - readKeyAttrs(frag, node.getMinMax(0), 0, c_searchKey); - copyAttrs(frag, c_searchKey, data2, tree.m_prefSize); - for (unsigned n = 0; n < tree.m_prefSize; n++) { - if (data1[n] != data2[n]) { - par.m_ok = false; - out << par.m_path << sep; - out << "inline prefix mismatch word " << n; - out << " value " << hex << data1[n]; - out << " should be " << hex << data2[n] << endl; - break; - } - } - } - // check ordering within node - for (unsigned j = 1; j < node.getOccup(); j++) { - const TreeEnt ent1 = node.getEnt(j - 1); - const TreeEnt ent2 = node.getEnt(j); - unsigned start = 0; - readKeyAttrs(frag, ent1, start, c_searchKey); - readKeyAttrs(frag, ent2, start, c_entryKey); - int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey); - if (ret == 0) - ret = ent1.cmp(ent2); - if (ret != -1) { - par.m_ok = false; - out << par.m_path << sep; - out << " disorder within node at pos " << j << endl; - } - } - // check ordering wrt subtrees - for (unsigned i = 0; i <= 1; i++) { - if (node.getLink(i) == NullTupLoc) - continue; - const TreeEnt ent1 = cpar[i].m_minmax[1 - i]; - const TreeEnt ent2 = node.getMinMax(i); - unsigned start = 0; - readKeyAttrs(frag, ent1, start, c_searchKey); - readKeyAttrs(frag, ent2, start, c_entryKey); - int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey); - if (ret == 0) - ret = ent1.cmp(ent2); - if (ret != (i == 0 ? -1 : +1)) { - par.m_ok = false; - out << par.m_path << sep; - out << " disorder wrt subtree " << i << endl; - } - } - // return values - par.m_depth = 1 + max(cpar[0].m_depth, cpar[1].m_depth); - par.m_occup = node.getOccup(); - for (unsigned i = 0; i <= 1; i++) { - if (node.getLink(i) == NullTupLoc) - par.m_minmax[i] = node.getMinMax(i); - else - par.m_minmax[i] = cpar[i].m_minmax[i]; - } -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::TupLoc& loc) -{ - if (loc == Dbtux::NullTupLoc) { - out << "null"; - } else { - out << dec << loc.getPageId(); - out << "." << dec << loc.getPageOffset(); - } - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::TreeEnt& ent) -{ - out << ent.m_tupLoc; - out << "-" << dec << ent.m_tupVersion; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::TreeNode& node) -{ - out << "[TreeNode " << hex << &node; - out << " [left " << node.m_link[0] << "]"; - out << " [right " << node.m_link[1] << "]"; - out << " [up " << node.m_link[2] << "]"; - out << " [side " << dec << node.m_side << "]"; - out << " [occup " << dec << node.m_occup << "]"; - out << " [balance " << dec << (int)node.m_balance - 1 << "]"; - out << " [nodeScan " << hex << node.m_nodeScan << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::TreeHead& tree) -{ - out << "[TreeHead " << hex << &tree; - out << " [nodeSize " << dec << tree.m_nodeSize << "]"; - out << " [prefSize " << dec << tree.m_prefSize << "]"; - out << " [minOccup " << dec << tree.m_minOccup << "]"; - out << " [maxOccup " << dec << tree.m_maxOccup << "]"; - out << " [AccHead " << dec << tree.getSize(Dbtux::AccHead) << "]"; - out << " [AccPref " << dec << tree.getSize(Dbtux::AccPref) << "]"; - out << " [AccFull " << dec << tree.getSize(Dbtux::AccFull) << "]"; - out << " [root " << hex << tree.m_root << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::TreePos& pos) -{ - out << "[TreePos " << hex << &pos; - out << " [loc " << pos.m_loc << "]"; - out << " [pos " << dec << pos.m_pos << "]"; - out << " [dir " << dec << pos.m_dir << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::DescAttr& descAttr) -{ - out << "[DescAttr " << hex << &descAttr; - out << " [attrDesc " << hex << descAttr.m_attrDesc; - out << " [primaryAttrId " << dec << descAttr.m_primaryAttrId << "]"; - out << " [typeId " << dec << descAttr.m_typeId << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::ScanOp& scan) -{ - Dbtux* tux = (Dbtux*)globalData.getBlock(DBTUX); - out << "[ScanOp " << hex << &scan; - out << " [state " << dec << scan.m_state << "]"; - out << " [lockwait " << dec << scan.m_lockwait << "]"; - out << " [indexId " << dec << scan.m_indexId << "]"; - out << " [fragId " << dec << scan.m_fragId << "]"; - out << " [transId " << hex << scan.m_transId1 << " " << scan.m_transId2 << "]"; - out << " [savePointId " << dec << scan.m_savePointId << "]"; - out << " [accLockOp " << hex << scan.m_accLockOp << "]"; - out << " [accLockOps"; - { - DLFifoList::Head head = scan.m_accLockOps; - LocalDLFifoList list(tux->c_scanLockPool, head); - Dbtux::ScanLockPtr lockPtr; - list.first(lockPtr); - while (lockPtr.i != RNIL) { - out << " " << hex << lockPtr.p->m_accLockOp; - list.next(lockPtr); - } - } - out << "]"; - out << " [readCommitted " << dec << scan.m_readCommitted << "]"; - out << " [lockMode " << dec << scan.m_lockMode << "]"; - out << " [descending " << dec << scan.m_descending << "]"; - out << " [pos " << scan.m_scanPos << "]"; - out << " [ent " << scan.m_scanEnt << "]"; - for (unsigned i = 0; i <= 1; i++) { - out << " [bound " << dec << i; - Dbtux::ScanBound& bound = *scan.m_bound[i]; - Dbtux::ScanBoundIterator iter; - bound.first(iter); - for (unsigned j = 0; j < bound.getSize(); j++) { - out << " " << hex << *iter.data; - bound.next(iter); - } - out << "]"; - } - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::Index& index) -{ - Dbtux* tux = (Dbtux*)globalData.getBlock(DBTUX); - out << "[Index " << hex << &index; - out << " [tableId " << dec << index.m_tableId << "]"; - out << " [numFrags " << dec << index.m_numFrags << "]"; - for (unsigned i = 0; i < index.m_numFrags; i++) { - out << " [frag " << dec << i << " "; - const Dbtux::Frag& frag = *tux->c_fragPool.getPtr(index.m_fragPtrI[i]); - out << frag; - out << "]"; - } - out << " [descPage " << hex << index.m_descPage << "]"; - out << " [descOff " << dec << index.m_descOff << "]"; - out << " [numAttrs " << dec << index.m_numAttrs << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::Frag& frag) -{ - out << "[Frag " << hex << &frag; - out << " [tableId " << dec << frag.m_tableId << "]"; - out << " [indexId " << dec << frag.m_indexId << "]"; - out << " [fragId " << dec << frag.m_fragId << "]"; - out << " [descPage " << hex << frag.m_descPage << "]"; - out << " [descOff " << dec << frag.m_descOff << "]"; - out << " [numAttrs " << dec << frag.m_numAttrs << "]"; - out << " [tree " << frag.m_tree << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::FragOp& fragOp) -{ - out << "[FragOp " << hex << &fragOp; - out << " [userPtr " << dec << fragOp.m_userPtr << "]"; - out << " [indexId " << dec << fragOp.m_indexId << "]"; - out << " [fragId " << dec << fragOp.m_fragId << "]"; - out << " [fragNo " << dec << fragOp.m_fragNo << "]"; - out << " numAttrsRecvd " << dec << fragOp.m_numAttrsRecvd << "]"; - out << "]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Dbtux::NodeHandle& node) -{ - const Dbtux::Frag& frag = node.m_frag; - const Dbtux::TreeHead& tree = frag.m_tree; - out << "[NodeHandle " << hex << &node; - out << " [loc " << node.m_loc << "]"; - out << " [node " << *node.m_node << "]"; - const Uint32* data; - out << " [pref"; - data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize; - for (unsigned j = 0; j < tree.m_prefSize; j++) - out << " " << hex << data[j]; - out << "]"; - out << " [entList"; - unsigned numpos = node.m_node->m_occup; - data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + tree.m_prefSize; - const Dbtux::TreeEnt* entList = (const Dbtux::TreeEnt*)data; - // print entries in logical order - for (unsigned pos = 1; pos <= numpos; pos++) - out << " " << entList[pos % numpos]; - out << "]"; - out << "]"; - return out; -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp deleted file mode 100644 index fde64161cab..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp +++ /dev/null @@ -1,338 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_GEN_CPP -#include "Dbtux.hpp" - -Dbtux::Dbtux(Block_context& ctx) : - SimulatedBlock(DBTUX, ctx), - c_tup(0), - c_descPageList(RNIL), -#ifdef VM_TRACE - debugFile(0), - debugOut(*new NullOutputStream()), - debugFlags(0), -#endif - c_internalStartPhase(0), - c_typeOfStart(NodeState::ST_ILLEGAL_TYPE), - c_dataBuffer(0) -{ - BLOCK_CONSTRUCTOR(Dbtux); - // verify size assumptions (also when release-compiled) - ndbrequire( - (sizeof(TreeEnt) & 0x3) == 0 && - (sizeof(TreeNode) & 0x3) == 0 && - (sizeof(DescHead) & 0x3) == 0 && - (sizeof(DescAttr) & 0x3) == 0 - ); - /* - * DbtuxGen.cpp - */ - addRecSignal(GSN_CONTINUEB, &Dbtux::execCONTINUEB); - addRecSignal(GSN_STTOR, &Dbtux::execSTTOR); - addRecSignal(GSN_READ_CONFIG_REQ, &Dbtux::execREAD_CONFIG_REQ, true); - /* - * DbtuxMeta.cpp - */ - addRecSignal(GSN_TUXFRAGREQ, &Dbtux::execTUXFRAGREQ); - addRecSignal(GSN_TUX_ADD_ATTRREQ, &Dbtux::execTUX_ADD_ATTRREQ); - addRecSignal(GSN_ALTER_INDX_REQ, &Dbtux::execALTER_INDX_REQ); - addRecSignal(GSN_DROP_TAB_REQ, &Dbtux::execDROP_TAB_REQ); - /* - * DbtuxMaint.cpp - */ - addRecSignal(GSN_TUX_MAINT_REQ, &Dbtux::execTUX_MAINT_REQ); - /* - * DbtuxScan.cpp - */ - addRecSignal(GSN_ACC_SCANREQ, &Dbtux::execACC_SCANREQ); - addRecSignal(GSN_TUX_BOUND_INFO, &Dbtux::execTUX_BOUND_INFO); - addRecSignal(GSN_NEXT_SCANREQ, &Dbtux::execNEXT_SCANREQ); - addRecSignal(GSN_ACC_CHECK_SCAN, &Dbtux::execACC_CHECK_SCAN); - addRecSignal(GSN_ACCKEYCONF, &Dbtux::execACCKEYCONF); - addRecSignal(GSN_ACCKEYREF, &Dbtux::execACCKEYREF); - addRecSignal(GSN_ACC_ABORTCONF, &Dbtux::execACC_ABORTCONF); - /* - * DbtuxStat.cpp - */ - addRecSignal(GSN_READ_PSEUDO_REQ, &Dbtux::execREAD_PSEUDO_REQ); - /* - * DbtuxDebug.cpp - */ - addRecSignal(GSN_DUMP_STATE_ORD, &Dbtux::execDUMP_STATE_ORD); -} - -Dbtux::~Dbtux() -{ -} - -void -Dbtux::execCONTINUEB(Signal* signal) -{ - jamEntry(); - const Uint32* data = signal->getDataPtr(); - switch (data[0]) { - case TuxContinueB::DropIndex: // currently unused - { - IndexPtr indexPtr; - c_indexPool.getPtr(indexPtr, data[1]); - dropIndex(signal, indexPtr, data[2], data[3]); - } - break; - default: - ndbrequire(false); - break; - } -} - -/* - * STTOR is sent to one block at a time. In NDBCNTR it triggers - * NDB_STTOR to the "old" blocks. STTOR carries start phase (SP) and - * NDB_STTOR carries internal start phase (ISP). - * - * SP ISP activities - * 1 none - * 2 1 - * 3 2 recover metadata, activate indexes - * 4 3 recover data - * 5 4-6 - * 6 skip - * 7 skip - * 8 7 build non-logged indexes on SR - * - * DBTUX catches type of start (IS, SR, NR, INR) at SP 3 and updates - * internal start phase at SP 7. These are used to prevent index - * maintenance operations caused by redo log at SR. - */ -void -Dbtux::execSTTOR(Signal* signal) -{ - jamEntry(); - Uint32 startPhase = signal->theData[1]; - switch (startPhase) { - case 1: - jam(); - CLEAR_ERROR_INSERT_VALUE; - c_tup = (Dbtup*)globalData.getBlock(DBTUP); - ndbrequire(c_tup != 0); - break; - case 3: - jam(); - c_typeOfStart = signal->theData[7]; - break; - case 7: - c_internalStartPhase = 6; - default: - jam(); - break; - } - signal->theData[0] = 0; // garbage - signal->theData[1] = 0; // garbage - signal->theData[2] = 0; // garbage - signal->theData[3] = 1; - signal->theData[4] = 3; // for c_typeOfStart - signal->theData[5] = 7; // for c_internalStartPhase - signal->theData[6] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 7, JBB); -} - -void -Dbtux::execREAD_CONFIG_REQ(Signal* signal) -{ - jamEntry(); - - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - ndbrequire(req->noOfParameters == 0); - - Uint32 nIndex; - Uint32 nFragment; - Uint32 nAttribute; - Uint32 nScanOp; - Uint32 nScanBatch; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_INDEX, &nIndex)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_FRAGMENT, &nFragment)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp)); - ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_BATCH_SIZE, &nScanBatch)); - - const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * DescAttrSize + DescPageSize - 1) / DescPageSize; - const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4; - const Uint32 nScanLock = nScanOp * nScanBatch; - - c_indexPool.setSize(nIndex); - c_fragPool.setSize(nFragment); - c_descPagePool.setSize(nDescPage); - c_fragOpPool.setSize(MaxIndexFragments); - c_scanOpPool.setSize(nScanOp); - c_scanBoundPool.setSize(nScanBoundWords); - c_scanLockPool.setSize(nScanLock); - /* - * Index id is physical array index. We seize and initialize all - * index records now. - */ - IndexPtr indexPtr; - while (1) { - jam(); - refresh_watch_dog(); - c_indexPool.seize(indexPtr); - if (indexPtr.i == RNIL) { - jam(); - break; - } - new (indexPtr.p) Index(); - } - // allocate buffers - c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes); - c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes); - c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize); - c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize); - c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1); - // ack - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - -// utils - -void -Dbtux::setKeyAttrs(const Frag& frag) -{ - Data keyAttrs = c_keyAttrs; // global - NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global - const unsigned numAttrs = frag.m_numAttrs; - const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff); - for (unsigned i = 0; i < numAttrs; i++) { - jam(); - const DescAttr& descAttr = descEnt.m_descAttr[i]; - Uint32 size = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc); - // set attr id and fixed size - ah(keyAttrs) = AttributeHeader(descAttr.m_primaryAttrId, size); - keyAttrs += 1; - // set comparison method pointer - const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); - ndbrequire(sqlType.m_cmp != 0); - *(sqlCmp++) = sqlType.m_cmp; - } -} - -void -Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData) -{ - ConstData keyAttrs = c_keyAttrs; // global - const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI; - const TupLoc tupLoc = ent.m_tupLoc; - const Uint32 tupVersion = ent.m_tupVersion; - ndbrequire(start < frag.m_numAttrs); - const Uint32 numAttrs = frag.m_numAttrs - start; - // skip to start position in keyAttrs only - keyAttrs += start; - int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), tupVersion, keyAttrs, numAttrs, keyData); - jamEntry(); - // TODO handle error - ndbrequire(ret > 0); -#ifdef VM_TRACE - if (debugFlags & (DebugMaint | DebugScan)) { - debugOut << "readKeyAttrs:" << endl; - ConstData data = keyData; - Uint32 totalSize = 0; - for (Uint32 i = start; i < frag.m_numAttrs; i++) { - Uint32 attrId = ah(data).getAttributeId(); - Uint32 dataSize = ah(data).getDataSize(); - debugOut << i << " attrId=" << attrId << " size=" << dataSize; - data += 1; - for (Uint32 j = 0; j < dataSize; j++) { - debugOut << " " << hex << data[0]; - data += 1; - } - debugOut << endl; - totalSize += 1 + dataSize; - } - ndbassert((int)totalSize == ret); - } -#endif -} - -void -Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize) -{ - const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI; - const TupLoc tupLoc = ent.m_tupLoc; - int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), pkData, true); - jamEntry(); - // TODO handle error - ndbrequire(ret > 0); - pkSize = ret; -} - -/* - * Copy attribute data with headers. Input is all index key data. - * Copies whatever fits. - */ -void -Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2) -{ - unsigned n = frag.m_numAttrs; - unsigned len2 = maxlen2; - while (n != 0) { - jam(); - const unsigned dataSize = ah(data1).getDataSize(); - // copy header - if (len2 == 0) - return; - data2[0] = data1[0]; - data1 += 1; - data2 += 1; - len2 -= 1; - // copy data - for (unsigned i = 0; i < dataSize; i++) { - if (len2 == 0) - return; - data2[i] = data1[i]; - len2 -= 1; - } - data1 += dataSize; - data2 += dataSize; - n -= 1; - } -#ifdef VM_TRACE - memset(data2, DataFillByte, len2 << 2); -#endif -} - -void -Dbtux::unpackBound(const ScanBound& bound, Data dest) -{ - ScanBoundIterator iter; - bound.first(iter); - const unsigned n = bound.getSize(); - unsigned j; - for (j = 0; j < n; j++) { - dest[j] = *iter.data; - bound.next(iter); - } -} - -BLOCK_FUNCTIONS(Dbtux) diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp deleted file mode 100644 index 65fc1d114f0..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_MAINT_CPP -#include "Dbtux.hpp" - -/* - * Maintain index. - */ - -void -Dbtux::execTUX_MAINT_REQ(Signal* signal) -{ - jamEntry(); - TuxMaintReq* const sig = (TuxMaintReq*)signal->getDataPtrSend(); - // ignore requests from redo log - if (c_internalStartPhase < 6 && - c_typeOfStart != NodeState::ST_NODE_RESTART && - c_typeOfStart != NodeState::ST_INITIAL_NODE_RESTART) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugMaint) { - TupLoc tupLoc(sig->pageId, sig->pageIndex); - debugOut << "opInfo=" << hex << sig->opInfo; - debugOut << " tableId=" << dec << sig->tableId; - debugOut << " indexId=" << dec << sig->indexId; - debugOut << " fragId=" << dec << sig->fragId; - debugOut << " tupLoc=" << tupLoc; - debugOut << " tupVersion=" << dec << sig->tupVersion; - debugOut << " -- ignored at ISP=" << dec << c_internalStartPhase; - debugOut << " TOS=" << dec << c_typeOfStart; - debugOut << endl; - } -#endif - sig->errorCode = 0; - return; - } - TuxMaintReq reqCopy = *sig; - TuxMaintReq* const req = &reqCopy; - const Uint32 opCode = req->opInfo & 0xFF; - const Uint32 opFlag = req->opInfo >> 8; - // get the index - IndexPtr indexPtr; - c_indexPool.getPtr(indexPtr, req->indexId); - ndbrequire(indexPtr.p->m_tableId == req->tableId); - // get base fragment id and extra bits - const Uint32 fragId = req->fragId; - // get the fragment - FragPtr fragPtr; - fragPtr.i = RNIL; - for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) { - jam(); - if (indexPtr.p->m_fragId[i] == fragId) { - jam(); - c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]); - break; - } - } - ndbrequire(fragPtr.i != RNIL); - Frag& frag = *fragPtr.p; - // set up index keys for this operation - setKeyAttrs(frag); - // set up search entry - TreeEnt ent; - ent.m_tupLoc = TupLoc(req->pageId, req->pageIndex); - ent.m_tupVersion = req->tupVersion; - // read search key - readKeyAttrs(frag, ent, 0, c_searchKey); - if (! frag.m_storeNullKey) { - // check if all keys are null - const unsigned numAttrs = frag.m_numAttrs; - bool allNull = true; - for (unsigned i = 0; i < numAttrs; i++) { - if (c_searchKey[i] != 0) { - jam(); - allNull = false; - break; - } - } - if (allNull) { - jam(); - req->errorCode = 0; - *sig = *req; - return; - } - } -#ifdef VM_TRACE - if (debugFlags & DebugMaint) { - debugOut << "opCode=" << dec << opCode; - debugOut << " opFlag=" << dec << opFlag; - debugOut << " tableId=" << dec << req->tableId; - debugOut << " indexId=" << dec << req->indexId; - debugOut << " fragId=" << dec << req->fragId; - debugOut << " entry=" << ent; - debugOut << endl; - } -#endif - // do the operation - req->errorCode = 0; - TreePos treePos; - bool ok; - switch (opCode) { - case TuxMaintReq::OpAdd: - jam(); - ok = searchToAdd(frag, c_searchKey, ent, treePos); -#ifdef VM_TRACE - if (debugFlags & DebugMaint) { - debugOut << treePos << (! ok ? " - error" : "") << endl; - } -#endif - if (! ok) { - jam(); - // there is no "Building" state so this will have to do - if (indexPtr.p->m_state == Index::Online) { - jam(); - req->errorCode = TuxMaintReq::SearchError; - } - break; - } - /* - * At most one new node is inserted in the operation. Pre-allocate - * it so that the operation cannot fail. - */ - if (frag.m_freeLoc == NullTupLoc) { - jam(); - NodeHandle node(frag); - req->errorCode = allocNode(signal, node); - if (req->errorCode != 0) { - jam(); - break; - } - // link to freelist - node.setLink(0, frag.m_freeLoc); - frag.m_freeLoc = node.m_loc; - ndbrequire(frag.m_freeLoc != NullTupLoc); - } - treeAdd(frag, treePos, ent); - break; - case TuxMaintReq::OpRemove: - jam(); - ok = searchToRemove(frag, c_searchKey, ent, treePos); -#ifdef VM_TRACE - if (debugFlags & DebugMaint) { - debugOut << treePos << (! ok ? " - error" : "") << endl; - } -#endif - if (! ok) { - jam(); - // there is no "Building" state so this will have to do - if (indexPtr.p->m_state == Index::Online) { - jam(); - req->errorCode = TuxMaintReq::SearchError; - } - break; - } - treeRemove(frag, treePos); - break; - default: - ndbrequire(false); - break; - } -#ifdef VM_TRACE - if (debugFlags & DebugTree) { - printTree(signal, frag, debugOut); - } -#endif - // copy back - *sig = *req; -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp deleted file mode 100644 index 423ca83af14..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp +++ /dev/null @@ -1,512 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_META_CPP -#include "Dbtux.hpp" -#include - -/* - * Create index. - * - * For historical reasons it looks like we are adding random fragments - * and attributes to existing index. In fact all fragments must be - * created at one time and they have identical attributes. - */ - -void -Dbtux::execTUXFRAGREQ(Signal* signal) -{ - jamEntry(); - if (signal->theData[0] == (Uint32)-1) { - jam(); - abortAddFragOp(signal); - return; - } - const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr(); - const TuxFragReq* const req = &reqCopy; - IndexPtr indexPtr; - indexPtr.i = RNIL; - FragOpPtr fragOpPtr; - fragOpPtr.i = RNIL; - TuxFragRef::ErrorCode errorCode = TuxFragRef::NoError; - do { - // get the index record - if (req->tableId >= c_indexPool.getSize()) { - jam(); - errorCode = TuxFragRef::InvalidRequest; - break; - } - c_indexPool.getPtr(indexPtr, req->tableId); - if (indexPtr.p->m_state != Index::NotDefined && - indexPtr.p->m_state != Index::Defining) { - jam(); - errorCode = TuxFragRef::InvalidRequest; - indexPtr.i = RNIL; // leave alone - break; - } - // get new operation record - c_fragOpPool.seize(fragOpPtr); - ndbrequire(fragOpPtr.i != RNIL); - new (fragOpPtr.p) FragOp(); - fragOpPtr.p->m_userPtr = req->userPtr; - fragOpPtr.p->m_userRef = req->userRef; - fragOpPtr.p->m_indexId = req->tableId; - fragOpPtr.p->m_fragId = req->fragId; - fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags; - fragOpPtr.p->m_numAttrsRecvd = 0; -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; - } -#endif - // check if index has place for more fragments - ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments); - // seize new fragment record - FragPtr fragPtr; - c_fragPool.seize(fragPtr); - if (fragPtr.i == RNIL) { - jam(); - errorCode = TuxFragRef::NoFreeFragment; - break; - } - new (fragPtr.p) Frag(c_scanOpPool); - fragPtr.p->m_tableId = req->primaryTableId; - fragPtr.p->m_indexId = req->tableId; - fragPtr.p->m_fragId = req->fragId; - fragPtr.p->m_numAttrs = req->noOfAttr; - fragPtr.p->m_storeNullKey = true; // not yet configurable - fragPtr.p->m_tupIndexFragPtrI = req->tupIndexFragPtrI; - fragPtr.p->m_tupTableFragPtrI = req->tupTableFragPtrI[0]; - fragPtr.p->m_accTableFragPtrI = req->accTableFragPtrI[0]; - // add the fragment to the index - indexPtr.p->m_fragId[indexPtr.p->m_numFrags] = req->fragId; - indexPtr.p->m_fragPtrI[indexPtr.p->m_numFrags] = fragPtr.i; - indexPtr.p->m_numFrags++; - // save under operation - fragOpPtr.p->m_fragPtrI = fragPtr.i; - // prepare to receive attributes - if (fragOpPtr.p->m_fragNo == 0) { - jam(); - // receiving first fragment - ndbrequire( - indexPtr.p->m_state == Index::NotDefined && - DictTabInfo::isOrderedIndex(req->tableType) && - req->noOfAttr > 0 && - req->noOfAttr <= MaxIndexAttributes && - indexPtr.p->m_descPage == RNIL); - indexPtr.p->m_state = Index::Defining; - indexPtr.p->m_tableType = (DictTabInfo::TableType)req->tableType; - indexPtr.p->m_tableId = req->primaryTableId; - indexPtr.p->m_numAttrs = req->noOfAttr; - indexPtr.p->m_storeNullKey = true; // not yet configurable - // allocate attribute descriptors - if (! allocDescEnt(indexPtr)) { - jam(); - errorCode = TuxFragRef::NoFreeAttributes; - break; - } - } else { - // receiving subsequent fragment - jam(); - ndbrequire( - indexPtr.p->m_state == Index::Defining && - indexPtr.p->m_tableType == (DictTabInfo::TableType)req->tableType && - indexPtr.p->m_tableId == req->primaryTableId && - indexPtr.p->m_numAttrs == req->noOfAttr); - } - // copy metadata address to each fragment - fragPtr.p->m_descPage = indexPtr.p->m_descPage; - fragPtr.p->m_descOff = indexPtr.p->m_descOff; -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl; - } -#endif - // error inserts - if (ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0 || - ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1) { - jam(); - errorCode = (TuxFragRef::ErrorCode)1; - CLEAR_ERROR_INSERT_VALUE; - break; - } - // success - TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend(); - conf->userPtr = req->userPtr; - conf->tuxConnectPtr = fragOpPtr.i; - conf->fragPtr = fragPtr.i; - conf->fragId = fragPtr.p->m_fragId; - sendSignal(req->userRef, GSN_TUXFRAGCONF, - signal, TuxFragConf::SignalLength, JBB); - return; - } while (0); - // error - TuxFragRef* const ref = (TuxFragRef*)signal->getDataPtrSend(); - ref->userPtr = req->userPtr; - ref->errorCode = errorCode; - sendSignal(req->userRef, GSN_TUXFRAGREF, - signal, TuxFragRef::SignalLength, JBB); - if (fragOpPtr.i != RNIL) { -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Release on frag error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; - } -#endif - c_fragOpPool.release(fragOpPtr); - } - if (indexPtr.i != RNIL) { - jam(); - // let DICT drop the unfinished index - } -} - -void -Dbtux::execTUX_ADD_ATTRREQ(Signal* signal) -{ - jamEntry(); - const TuxAddAttrReq reqCopy = *(const TuxAddAttrReq*)signal->getDataPtr(); - const TuxAddAttrReq* const req = &reqCopy; - // get the records - FragOpPtr fragOpPtr; - IndexPtr indexPtr; - FragPtr fragPtr; - c_fragOpPool.getPtr(fragOpPtr, req->tuxConnectPtr); - c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId); - c_fragPool.getPtr(fragPtr, fragOpPtr.p->m_fragPtrI); - TuxAddAttrRef::ErrorCode errorCode = TuxAddAttrRef::NoError; - do { - // expected attribute id - const unsigned attrId = fragOpPtr.p->m_numAttrsRecvd++; - ndbrequire( - indexPtr.p->m_state == Index::Defining && - attrId < indexPtr.p->m_numAttrs && - attrId == req->attrId); - // define the attribute - DescEnt& descEnt = getDescEnt(indexPtr.p->m_descPage, indexPtr.p->m_descOff); - DescAttr& descAttr = descEnt.m_descAttr[attrId]; - descAttr.m_attrDesc = req->attrDescriptor; - descAttr.m_primaryAttrId = req->primaryAttrId; - descAttr.m_typeId = AttributeDescriptor::getType(req->attrDescriptor); - descAttr.m_charset = (req->extTypeInfo >> 16); -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl; - } -#endif - // check that type is valid and has a binary comparison method - const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId); - if (type.m_typeId == NdbSqlUtil::Type::Undefined || - type.m_cmp == 0) { - jam(); - errorCode = TuxAddAttrRef::InvalidAttributeType; - break; - } - if (descAttr.m_charset != 0) { - uint err; - CHARSET_INFO *cs = all_charsets[descAttr.m_charset]; - ndbrequire(cs != 0); - if ((err = NdbSqlUtil::check_column_for_ordered_index(descAttr.m_typeId, cs))) { - jam(); - errorCode = (TuxAddAttrRef::ErrorCode) err; - break; - } - } - const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd); - if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 || - ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr || - ERROR_INSERTED(12005) && fragOpPtr.p->m_fragNo == 1 && attrId == 0 || - ERROR_INSERTED(12006) && fragOpPtr.p->m_fragNo == 1 && lastAttr) { - errorCode = (TuxAddAttrRef::ErrorCode)1; - CLEAR_ERROR_INSERT_VALUE; - break; - } - if (lastAttr) { - jam(); - // initialize tree header - TreeHead& tree = fragPtr.p->m_tree; - new (&tree) TreeHead(); - // make these configurable later - tree.m_nodeSize = MAX_TTREE_NODE_SIZE; - tree.m_prefSize = MAX_TTREE_PREF_SIZE; - const unsigned maxSlack = MAX_TTREE_NODE_SLACK; - // size up to and including first 2 entries - const unsigned pref = tree.getSize(AccPref); - if (! (pref <= tree.m_nodeSize)) { - jam(); - errorCode = TuxAddAttrRef::InvalidNodeSize; - break; - } - const unsigned slots = (tree.m_nodeSize - pref) / TreeEntSize; - // leave out work space entry - tree.m_maxOccup = 2 + slots - 1; - // min occupancy of interior node must be at least 2 - if (! (2 + maxSlack <= tree.m_maxOccup)) { - jam(); - errorCode = TuxAddAttrRef::InvalidNodeSize; - break; - } - tree.m_minOccup = tree.m_maxOccup - maxSlack; - // root node does not exist (also set by ctor) - tree.m_root = NullTupLoc; -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - if (fragOpPtr.p->m_fragNo == 0) { - debugOut << "Index id=" << indexPtr.i; - debugOut << " nodeSize=" << tree.m_nodeSize; - debugOut << " headSize=" << NodeHeadSize; - debugOut << " prefSize=" << tree.m_prefSize; - debugOut << " entrySize=" << TreeEntSize; - debugOut << " minOccup=" << tree.m_minOccup; - debugOut << " maxOccup=" << tree.m_maxOccup; - debugOut << endl; - } - } -#endif - // fragment is defined -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Release frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; - } -#endif - c_fragOpPool.release(fragOpPtr); - } - // success - TuxAddAttrConf* conf = (TuxAddAttrConf*)signal->getDataPtrSend(); - conf->userPtr = fragOpPtr.p->m_userPtr; - conf->lastAttr = lastAttr; - sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRCONF, - signal, TuxAddAttrConf::SignalLength, JBB); - return; - } while (0); - // error - TuxAddAttrRef* ref = (TuxAddAttrRef*)signal->getDataPtrSend(); - ref->userPtr = fragOpPtr.p->m_userPtr; - ref->errorCode = errorCode; - sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRREF, - signal, TuxAddAttrRef::SignalLength, JBB); -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Release on attr error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; - } -#endif - c_fragOpPool.release(fragOpPtr); - // let DICT drop the unfinished index -} - -/* - * LQH aborts on-going create index operation. - */ -void -Dbtux::abortAddFragOp(Signal* signal) -{ - FragOpPtr fragOpPtr; - IndexPtr indexPtr; - c_fragOpPool.getPtr(fragOpPtr, signal->theData[1]); - c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId); -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Release on abort frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl; - } -#endif - c_fragOpPool.release(fragOpPtr); - // let DICT drop the unfinished index -} - -/* - * Set index online. Currently at system restart this arrives before - * build and is therefore not correct. - */ -void -Dbtux::execALTER_INDX_REQ(Signal* signal) -{ - jamEntry(); - const AlterIndxReq reqCopy = *(const AlterIndxReq*)signal->getDataPtr(); - const AlterIndxReq* const req = &reqCopy; - // set index online after build - IndexPtr indexPtr; - c_indexPool.getPtr(indexPtr, req->getIndexId()); - indexPtr.p->m_state = Index::Online; -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Online index " << indexPtr.i << " " << *indexPtr.p << endl; - } -#endif - // success - AlterIndxConf* const conf = (AlterIndxConf*)signal->getDataPtrSend(); - conf->setUserRef(reference()); - conf->setConnectionPtr(req->getConnectionPtr()); - conf->setRequestType(req->getRequestType()); - conf->setTableId(req->getTableId()); - conf->setIndexId(req->getIndexId()); - conf->setIndexVersion(req->getIndexVersion()); - sendSignal(req->getUserRef(), GSN_ALTER_INDX_CONF, - signal, AlterIndxConf::SignalLength, JBB); -} - -/* - * Drop index. - * - * Uses same DROP_TAB_REQ signal as normal tables. - */ - -void -Dbtux::execDROP_TAB_REQ(Signal* signal) -{ - jamEntry(); - const DropTabReq reqCopy = *(const DropTabReq*)signal->getDataPtr(); - const DropTabReq* const req = &reqCopy; - IndexPtr indexPtr; - - Uint32 tableId = req->tableId; - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - if (tableId >= c_indexPool.getSize()) { - jam(); - // reply to sender - DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->tableId = tableId; - sendSignal(senderRef, GSN_DROP_TAB_CONF, - signal, DropTabConf::SignalLength, JBB); - return; - } - - c_indexPool.getPtr(indexPtr, req->tableId); - // drop works regardless of index state -#ifdef VM_TRACE - if (debugFlags & DebugMeta) { - debugOut << "Drop index " << indexPtr.i << " " << *indexPtr.p << endl; - } -#endif - ndbrequire(req->senderRef != 0); - dropIndex(signal, indexPtr, req->senderRef, req->senderData); -} - -void -Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData) -{ - jam(); - indexPtr.p->m_state = Index::Dropping; - // drop fragments - while (indexPtr.p->m_numFrags > 0) { - jam(); - Uint32 i = --indexPtr.p->m_numFrags; - FragPtr fragPtr; - c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]); - c_fragPool.release(fragPtr); - } - // drop attributes - if (indexPtr.p->m_descPage != RNIL) { - jam(); - freeDescEnt(indexPtr); - indexPtr.p->m_descPage = RNIL; - } - if (senderRef != 0) { - jam(); - // reply to sender - DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - conf->tableId = indexPtr.i; - sendSignal(senderRef, GSN_DROP_TAB_CONF, - signal, DropTabConf::SignalLength, JBB); - } - new (indexPtr.p) Index(); -} - -/* - * Subroutines. - */ - -bool -Dbtux::allocDescEnt(IndexPtr indexPtr) -{ - jam(); - const unsigned size = DescHeadSize + indexPtr.p->m_numAttrs * DescAttrSize; - DescPagePtr pagePtr; - pagePtr.i = c_descPageList; - while (pagePtr.i != RNIL) { - jam(); - c_descPagePool.getPtr(pagePtr); - if (pagePtr.p->m_numFree >= size) { - jam(); - break; - } - pagePtr.i = pagePtr.p->m_nextPage; - } - if (pagePtr.i == RNIL) { - jam(); - if (! c_descPagePool.seize(pagePtr)) { - jam(); - return false; - } - new (pagePtr.p) DescPage(); - // add in front of list - pagePtr.p->m_nextPage = c_descPageList; - c_descPageList = pagePtr.i; - pagePtr.p->m_numFree = DescPageSize; - } - ndbrequire(pagePtr.p->m_numFree >= size); - indexPtr.p->m_descPage = pagePtr.i; - indexPtr.p->m_descOff = DescPageSize - pagePtr.p->m_numFree; - pagePtr.p->m_numFree -= size; - DescEnt& descEnt = getDescEnt(indexPtr.p->m_descPage, indexPtr.p->m_descOff); - descEnt.m_descHead.m_indexId = indexPtr.i; - descEnt.m_descHead.pad1 = 0; - return true; -} - -void -Dbtux::freeDescEnt(IndexPtr indexPtr) -{ - DescPagePtr pagePtr; - c_descPagePool.getPtr(pagePtr, indexPtr.p->m_descPage); - Uint32* const data = pagePtr.p->m_data; - const unsigned size = DescHeadSize + indexPtr.p->m_numAttrs * DescAttrSize; - unsigned off = indexPtr.p->m_descOff; - // move the gap to the free area at the top - while (off + size < DescPageSize - pagePtr.p->m_numFree) { - jam(); - // next entry to move over the gap - DescEnt& descEnt2 = *(DescEnt*)&data[off + size]; - Uint32 indexId2 = descEnt2.m_descHead.m_indexId; - Index& index2 = *c_indexPool.getPtr(indexId2); - unsigned size2 = DescHeadSize + index2.m_numAttrs * DescAttrSize; - ndbrequire( - index2.m_descPage == pagePtr.i && - index2.m_descOff == off + size); - // move the entry (overlapping copy if size < size2) - unsigned i; - for (i = 0; i < size2; i++) { - jam(); - data[off + i] = data[off + size + i]; - } - off += size2; - // adjust page offset in index and all fragments - index2.m_descOff -= size; - for (i = 0; i < index2.m_numFrags; i++) { - jam(); - Frag& frag2 = *c_fragPool.getPtr(index2.m_fragPtrI[i]); - frag2.m_descOff -= size; - ndbrequire( - frag2.m_descPage == index2.m_descPage && - frag2.m_descOff == index2.m_descOff); - } - } - ndbrequire(off + size == DescPageSize - pagePtr.p->m_numFree); - pagePtr.p->m_numFree += size; -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp deleted file mode 100644 index 4cbd9103c94..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp +++ /dev/null @@ -1,590 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_NODE_CPP -#include "Dbtux.hpp" - -/* - * Allocate index node in TUP. - */ -int -Dbtux::allocNode(Signal* signal, NodeHandle& node) -{ - if (ERROR_INSERTED(12007)) { - jam(); - CLEAR_ERROR_INSERT_VALUE; - return TuxMaintReq::NoMemError; - } - Frag& frag = node.m_frag; - Uint32 pageId = NullTupLoc.getPageId(); - Uint32 pageOffset = NullTupLoc.getPageOffset(); - Uint32* node32 = 0; - int errorCode = c_tup->tuxAllocNode(signal, frag.m_tupIndexFragPtrI, pageId, pageOffset, node32); - jamEntry(); - if (errorCode == 0) { - jam(); - node.m_loc = TupLoc(pageId, pageOffset); - node.m_node = reinterpret_cast(node32); - ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0); - } else { - switch (errorCode) { - case 827: - errorCode = TuxMaintReq::NoMemError; - break; - } - } - return errorCode; -} - -/* - * Set handle to point to existing node. - */ -void -Dbtux::selectNode(NodeHandle& node, TupLoc loc) -{ - Frag& frag = node.m_frag; - ndbrequire(loc != NullTupLoc); - Uint32 pageId = loc.getPageId(); - Uint32 pageOffset = loc.getPageOffset(); - Uint32* node32 = 0; - c_tup->tuxGetNode(frag.m_tupIndexFragPtrI, pageId, pageOffset, node32); - jamEntry(); - node.m_loc = loc; - node.m_node = reinterpret_cast(node32); - ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0); -} - -/* - * Set handle to point to new node. Uses a pre-allocated node. - */ -void -Dbtux::insertNode(NodeHandle& node) -{ - Frag& frag = node.m_frag; - // unlink from freelist - selectNode(node, frag.m_freeLoc); - frag.m_freeLoc = node.getLink(0); - new (node.m_node) TreeNode(); -#ifdef VM_TRACE - TreeHead& tree = frag.m_tree; - memset(node.getPref(), DataFillByte, tree.m_prefSize << 2); - TreeEnt* entList = tree.getEntList(node.m_node); - memset(entList, NodeFillByte, (tree.m_maxOccup + 1) * (TreeEntSize << 2)); -#endif -} - -/* - * Delete existing node. Simply put it on the freelist. - */ -void -Dbtux::deleteNode(NodeHandle& node) -{ - Frag& frag = node.m_frag; - ndbrequire(node.getOccup() == 0); - // link to freelist - node.setLink(0, frag.m_freeLoc); - frag.m_freeLoc = node.m_loc; - // invalidate the handle - node.m_loc = NullTupLoc; - node.m_node = 0; -} - -/* - * Set prefix. Copies the number of words that fits. Includes - * attribute headers for now. XXX use null mask instead - */ -void -Dbtux::setNodePref(NodeHandle& node) -{ - const Frag& frag = node.m_frag; - const TreeHead& tree = frag.m_tree; - readKeyAttrs(frag, node.getMinMax(0), 0, c_entryKey); - copyAttrs(frag, c_entryKey, node.getPref(), tree.m_prefSize); -} - -// node operations - -/* - * Add entry at position. Move entries greater than or equal to the old - * one (if any) to the right. - * - * X - * v - * A B C D E _ _ => A B C X D E _ - * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 - * - * Add list of scans at the new entry. - */ -void -Dbtux::nodePushUp(NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList) -{ - Frag& frag = node.m_frag; - TreeHead& tree = frag.m_tree; - const unsigned occup = node.getOccup(); - ndbrequire(occup < tree.m_maxOccup && pos <= occup); - // fix old scans - if (node.getNodeScan() != RNIL) - nodePushUpScans(node, pos); - // fix node - TreeEnt* const entList = tree.getEntList(node.m_node); - entList[occup] = entList[0]; - TreeEnt* const tmpList = entList + 1; - for (unsigned i = occup; i > pos; i--) { - jam(); - tmpList[i] = tmpList[i - 1]; - } - tmpList[pos] = ent; - entList[0] = entList[occup + 1]; - node.setOccup(occup + 1); - // add new scans - if (scanList != RNIL) - addScanList(node, pos, scanList); - // fix prefix - if (occup == 0 || pos == 0) - setNodePref(node); -} - -void -Dbtux::nodePushUpScans(NodeHandle& node, unsigned pos) -{ - const unsigned occup = node.getOccup(); - ScanOpPtr scanPtr; - scanPtr.i = node.getNodeScan(); - do { - jam(); - c_scanOpPool.getPtr(scanPtr); - TreePos& scanPos = scanPtr.p->m_scanPos; - ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup); - if (scanPos.m_pos >= pos) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "At pushUp pos=" << pos << " " << node << endl; - } -#endif - scanPos.m_pos++; - } - scanPtr.i = scanPtr.p->m_nodeScan; - } while (scanPtr.i != RNIL); -} - -/* - * Remove and return entry at position. Move entries greater than the - * removed one to the left. This is the opposite of nodePushUp. - * - * D - * ^ ^ - * A B C D E F _ => A B C E F _ _ - * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 - * - * Scans at removed entry are returned if non-zero location is passed or - * else moved forward. - */ -void -Dbtux::nodePopDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32* scanList) -{ - Frag& frag = node.m_frag; - TreeHead& tree = frag.m_tree; - const unsigned occup = node.getOccup(); - ndbrequire(occup <= tree.m_maxOccup && pos < occup); - if (node.getNodeScan() != RNIL) { - // remove or move scans at this position - if (scanList == 0) - moveScanList(node, pos); - else - removeScanList(node, pos, *scanList); - // fix other scans - if (node.getNodeScan() != RNIL) - nodePopDownScans(node, pos); - } - // fix node - TreeEnt* const entList = tree.getEntList(node.m_node); - entList[occup] = entList[0]; - TreeEnt* const tmpList = entList + 1; - ent = tmpList[pos]; - for (unsigned i = pos; i < occup - 1; i++) { - jam(); - tmpList[i] = tmpList[i + 1]; - } - entList[0] = entList[occup - 1]; - node.setOccup(occup - 1); - // fix prefix - if (occup != 1 && pos == 0) - setNodePref(node); -} - -void -Dbtux::nodePopDownScans(NodeHandle& node, unsigned pos) -{ - const unsigned occup = node.getOccup(); - ScanOpPtr scanPtr; - scanPtr.i = node.getNodeScan(); - do { - jam(); - c_scanOpPool.getPtr(scanPtr); - TreePos& scanPos = scanPtr.p->m_scanPos; - ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup); - // handled before - ndbrequire(scanPos.m_pos != pos); - if (scanPos.m_pos > pos) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "At popDown pos=" << pos << " " << node << endl; - } -#endif - scanPos.m_pos--; - } - scanPtr.i = scanPtr.p->m_nodeScan; - } while (scanPtr.i != RNIL); -} - -/* - * Add entry at existing position. Move entries less than or equal to - * the old one to the left. Remove and return old min entry. - * - * X A - * ^ v ^ - * A B C D E _ _ => B C D X E _ _ - * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 - * - * Return list of scans at the removed position 0. - */ -void -Dbtux::nodePushDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList) -{ - Frag& frag = node.m_frag; - TreeHead& tree = frag.m_tree; - const unsigned occup = node.getOccup(); - ndbrequire(occup <= tree.m_maxOccup && pos < occup); - if (node.getNodeScan() != RNIL) { - // remove scans at 0 - removeScanList(node, 0, scanList); - // fix other scans - if (node.getNodeScan() != RNIL) - nodePushDownScans(node, pos); - } - // fix node - TreeEnt* const entList = tree.getEntList(node.m_node); - entList[occup] = entList[0]; - TreeEnt* const tmpList = entList + 1; - TreeEnt oldMin = tmpList[0]; - for (unsigned i = 0; i < pos; i++) { - jam(); - tmpList[i] = tmpList[i + 1]; - } - tmpList[pos] = ent; - ent = oldMin; - entList[0] = entList[occup]; - // fix prefix - if (true) - setNodePref(node); -} - -void -Dbtux::nodePushDownScans(NodeHandle& node, unsigned pos) -{ - const unsigned occup = node.getOccup(); - ScanOpPtr scanPtr; - scanPtr.i = node.getNodeScan(); - do { - jam(); - c_scanOpPool.getPtr(scanPtr); - TreePos& scanPos = scanPtr.p->m_scanPos; - ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup); - // handled before - ndbrequire(scanPos.m_pos != 0); - if (scanPos.m_pos <= pos) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "At pushDown pos=" << pos << " " << node << endl; - } -#endif - scanPos.m_pos--; - } - scanPtr.i = scanPtr.p->m_nodeScan; - } while (scanPtr.i != RNIL); -} - -/* - * Remove and return entry at position. Move entries less than the - * removed one to the right. Replace min entry by the input entry. - * This is the opposite of nodePushDown. - * - * X D - * v ^ ^ - * A B C D E _ _ => X A B C E _ _ - * 0 1 2 3 4 5 6 0 1 2 3 4 5 6 - * - * Move scans at removed entry and add scans at the new entry. - */ -void -Dbtux::nodePopUp(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList) -{ - Frag& frag = node.m_frag; - TreeHead& tree = frag.m_tree; - const unsigned occup = node.getOccup(); - ndbrequire(occup <= tree.m_maxOccup && pos < occup); - if (node.getNodeScan() != RNIL) { - // move scans whose entry disappears - moveScanList(node, pos); - // fix other scans - if (node.getNodeScan() != RNIL) - nodePopUpScans(node, pos); - } - // fix node - TreeEnt* const entList = tree.getEntList(node.m_node); - entList[occup] = entList[0]; - TreeEnt* const tmpList = entList + 1; - TreeEnt newMin = ent; - ent = tmpList[pos]; - for (unsigned i = pos; i > 0; i--) { - jam(); - tmpList[i] = tmpList[i - 1]; - } - tmpList[0] = newMin; - entList[0] = entList[occup]; - // add scans - if (scanList != RNIL) - addScanList(node, 0, scanList); - // fix prefix - if (true) - setNodePref(node); -} - -void -Dbtux::nodePopUpScans(NodeHandle& node, unsigned pos) -{ - const unsigned occup = node.getOccup(); - ScanOpPtr scanPtr; - scanPtr.i = node.getNodeScan(); - do { - jam(); - c_scanOpPool.getPtr(scanPtr); - TreePos& scanPos = scanPtr.p->m_scanPos; - ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup); - ndbrequire(scanPos.m_pos != pos); - if (scanPos.m_pos < pos) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Fix scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "At popUp pos=" << pos << " " << node << endl; - } -#endif - scanPos.m_pos++; - } - scanPtr.i = scanPtr.p->m_nodeScan; - } while (scanPtr.i != RNIL); -} - -/* - * Move number of entries from another node to this node before the min - * (i=0) or after the max (i=1). Expensive but not often used. - */ -void -Dbtux::nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i) -{ - ndbrequire(i <= 1); - while (cnt != 0) { - TreeEnt ent; - Uint32 scanList = RNIL; - nodePopDown(srcNode, i == 0 ? srcNode.getOccup() - 1 : 0, ent, &scanList); - nodePushUp(dstNode, i == 0 ? 0 : dstNode.getOccup(), ent, scanList); - cnt--; - } -} - -// scans linked to node - - -/* - * Add list of scans to node at given position. - */ -void -Dbtux::addScanList(NodeHandle& node, unsigned pos, Uint32 scanList) -{ - ScanOpPtr scanPtr; - scanPtr.i = scanList; - do { - jam(); - c_scanOpPool.getPtr(scanPtr); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Add scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "To pos=" << pos << " " << node << endl; - } -#endif - const Uint32 nextPtrI = scanPtr.p->m_nodeScan; - scanPtr.p->m_nodeScan = RNIL; - linkScan(node, scanPtr); - TreePos& scanPos = scanPtr.p->m_scanPos; - // set position but leave direction alone - scanPos.m_loc = node.m_loc; - scanPos.m_pos = pos; - scanPtr.i = nextPtrI; - } while (scanPtr.i != RNIL); -} - -/* - * Remove list of scans from node at given position. The return - * location must point to existing list (in fact RNIL always). - */ -void -Dbtux::removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList) -{ - ScanOpPtr scanPtr; - scanPtr.i = node.getNodeScan(); - do { - jam(); - c_scanOpPool.getPtr(scanPtr); - const Uint32 nextPtrI = scanPtr.p->m_nodeScan; - TreePos& scanPos = scanPtr.p->m_scanPos; - ndbrequire(scanPos.m_loc == node.m_loc); - if (scanPos.m_pos == pos) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Remove scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "Fron pos=" << pos << " " << node << endl; - } -#endif - unlinkScan(node, scanPtr); - scanPtr.p->m_nodeScan = scanList; - scanList = scanPtr.i; - // unset position but leave direction alone - scanPos.m_loc = NullTupLoc; - scanPos.m_pos = ZNIL; - } - scanPtr.i = nextPtrI; - } while (scanPtr.i != RNIL); -} - -/* - * Move list of scans away from entry about to be removed. Uses scan - * method scanNext(). - */ -void -Dbtux::moveScanList(NodeHandle& node, unsigned pos) -{ - ScanOpPtr scanPtr; - scanPtr.i = node.getNodeScan(); - do { - jam(); - c_scanOpPool.getPtr(scanPtr); - TreePos& scanPos = scanPtr.p->m_scanPos; - const Uint32 nextPtrI = scanPtr.p->m_nodeScan; - ndbrequire(scanPos.m_loc == node.m_loc); - if (scanPos.m_pos == pos) { - jam(); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Move scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "At pos=" << pos << " " << node << endl; - } -#endif - scanNext(scanPtr, true); - ndbrequire(! (scanPos.m_loc == node.m_loc && scanPos.m_pos == pos)); - } - scanPtr.i = nextPtrI; - } while (scanPtr.i != RNIL); -} - -/* - * Link scan to the list under the node. The list is single-linked and - * ordering does not matter. - */ -void -Dbtux::linkScan(NodeHandle& node, ScanOpPtr scanPtr) -{ -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Link scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "To node " << node << endl; - } -#endif - ndbrequire(! islinkScan(node, scanPtr) && scanPtr.p->m_nodeScan == RNIL); - scanPtr.p->m_nodeScan = node.getNodeScan(); - node.setNodeScan(scanPtr.i); -} - -/* - * Unlink a scan from the list under the node. - */ -void -Dbtux::unlinkScan(NodeHandle& node, ScanOpPtr scanPtr) -{ -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Unlink scan " << scanPtr.i << " " << *scanPtr.p << endl; - debugOut << "From node " << node << endl; - } -#endif - ScanOpPtr currPtr; - currPtr.i = node.getNodeScan(); - ScanOpPtr prevPtr; - prevPtr.i = RNIL; - while (true) { - jam(); - c_scanOpPool.getPtr(currPtr); - Uint32 nextPtrI = currPtr.p->m_nodeScan; - if (currPtr.i == scanPtr.i) { - jam(); - if (prevPtr.i == RNIL) { - node.setNodeScan(nextPtrI); - } else { - jam(); - prevPtr.p->m_nodeScan = nextPtrI; - } - scanPtr.p->m_nodeScan = RNIL; - // check for duplicates - ndbrequire(! islinkScan(node, scanPtr)); - return; - } - prevPtr = currPtr; - currPtr.i = nextPtrI; - } -} - -/* - * Check if a scan is linked to this node. Only for ndbrequire. - */ -bool -Dbtux::islinkScan(NodeHandle& node, ScanOpPtr scanPtr) -{ - ScanOpPtr currPtr; - currPtr.i = node.getNodeScan(); - while (currPtr.i != RNIL) { - jam(); - c_scanOpPool.getPtr(currPtr); - if (currPtr.i == scanPtr.i) { - jam(); - return true; - } - currPtr.i = currPtr.p->m_nodeScan; - } - return false; -} - -void -Dbtux::NodeHandle::progError(int line, int cause, const char* file) -{ - ErrorReporter::handleAssert("Dbtux::NodeHandle: assert failed", file, line); -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp deleted file mode 100644 index 0b910eff341..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ /dev/null @@ -1,1123 +0,0 @@ -/* Copyright (c) 2003-2006, 2008 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_SCAN_CPP -#include "Dbtux.hpp" -#include - -void -Dbtux::execACC_SCANREQ(Signal* signal) -{ - jamEntry(); - const AccScanReq reqCopy = *(const AccScanReq*)signal->getDataPtr(); - const AccScanReq* const req = &reqCopy; - ScanOpPtr scanPtr; - scanPtr.i = RNIL; - do { - // get the index - IndexPtr indexPtr; - c_indexPool.getPtr(indexPtr, req->tableId); - // get the fragment - FragPtr fragPtr; - fragPtr.i = RNIL; - for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) { - jam(); - if (indexPtr.p->m_fragId[i] == req->fragmentNo) { - jam(); - c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]); - break; - } - } - ndbrequire(fragPtr.i != RNIL); - Frag& frag = *fragPtr.p; - // must be normal DIH/TC fragment - TreeHead& tree = frag.m_tree; - // check for empty fragment - if (tree.m_root == NullTupLoc) { - jam(); - AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend(); - conf->scanPtr = req->senderData; - conf->accPtr = RNIL; - conf->flag = AccScanConf::ZEMPTY_FRAGMENT; - sendSignal(req->senderRef, GSN_ACC_SCANCONF, - signal, AccScanConf::SignalLength, JBB); - return; - } - // seize from pool and link to per-fragment list - if (! frag.m_scanList.seize(scanPtr)) { - jam(); - break; - } - new (scanPtr.p) ScanOp(c_scanBoundPool); - scanPtr.p->m_state = ScanOp::First; - scanPtr.p->m_userPtr = req->senderData; - scanPtr.p->m_userRef = req->senderRef; - scanPtr.p->m_tableId = indexPtr.p->m_tableId; - scanPtr.p->m_indexId = indexPtr.i; - scanPtr.p->m_fragId = fragPtr.p->m_fragId; - scanPtr.p->m_fragPtrI = fragPtr.i; - scanPtr.p->m_transId1 = req->transId1; - scanPtr.p->m_transId2 = req->transId2; - scanPtr.p->m_savePointId = req->savePointId; - scanPtr.p->m_readCommitted = AccScanReq::getReadCommittedFlag(req->requestInfo); - scanPtr.p->m_lockMode = AccScanReq::getLockMode(req->requestInfo); - scanPtr.p->m_descending = AccScanReq::getDescendingFlag(req->requestInfo); - /* - * readCommitted lockMode keyInfo - * 1 0 0 - read committed (no lock) - * 0 0 0 - read latest (read lock) - * 0 1 1 - read exclusive (write lock) - */ -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Seize scan " << scanPtr.i << " " << *scanPtr.p << endl; - } -#endif - // conf - AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend(); - conf->scanPtr = req->senderData; - conf->accPtr = scanPtr.i; - conf->flag = AccScanConf::ZNOT_EMPTY_FRAGMENT; - sendSignal(req->senderRef, GSN_ACC_SCANCONF, - signal, AccScanConf::SignalLength, JBB); - return; - } while (0); - if (scanPtr.i != RNIL) { - jam(); - releaseScanOp(scanPtr); - } - // LQH does not handle REF - signal->theData[0] = 0x313; - sendSignal(req->senderRef, GSN_ACC_SCANREF, - signal, 1, JBB); -} - -/* - * Receive bounds for scan in single direct call. The bounds can arrive - * in any order. Attribute ids are those of index table. - * - * Replace EQ by equivalent LE + GE. Check for conflicting bounds. - * Check that sets of lower and upper bounds are on initial sequences of - * keys and that all but possibly last bound is non-strict. - * - * Finally save the sets of lower and upper bounds (i.e. start key and - * end key). Full bound type is included but only the strict bit is - * used since lower and upper have now been separated. - */ -void -Dbtux::execTUX_BOUND_INFO(Signal* signal) -{ - jamEntry(); - // get records - TuxBoundInfo* const sig = (TuxBoundInfo*)signal->getDataPtrSend(); - const TuxBoundInfo* const req = (const TuxBoundInfo*)sig; - ScanOp& scan = *c_scanOpPool.getPtr(req->tuxScanPtrI); - const Index& index = *c_indexPool.getPtr(scan.m_indexId); - const DescEnt& descEnt = getDescEnt(index.m_descPage, index.m_descOff); - // collect normalized lower and upper bounds - struct BoundInfo { - int type2; // with EQ -> LE/GE - Uint32 offset; // offset in xfrmData - Uint32 size; - }; - BoundInfo boundInfo[2][MaxIndexAttributes]; - const unsigned dstSize = 1024 * MAX_XFRM_MULTIPLY; - Uint32 xfrmData[dstSize]; - Uint32 dstPos = 0; - // largest attrId seen plus one - Uint32 maxAttrId[2] = { 0, 0 }; - // walk through entries - const Uint32* const data = (Uint32*)sig + TuxBoundInfo::SignalLength; - Uint32 offset = 0; - while (offset + 2 <= req->boundAiLength) { - jam(); - const unsigned type = data[offset]; - const AttributeHeader* ah = (const AttributeHeader*)&data[offset + 1]; - const Uint32 attrId = ah->getAttributeId(); - const Uint32 dataSize = ah->getDataSize(); - if (type > 4 || attrId >= index.m_numAttrs || dstPos + 2 + dataSize > dstSize) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::InvalidAttrInfo; - return; - } - // copy header - xfrmData[dstPos + 0] = data[offset + 0]; - xfrmData[dstPos + 1] = data[offset + 1]; - // copy bound value - Uint32 dstWords = 0; - if (! ah->isNULL()) { - jam(); - const uchar* srcPtr = (const uchar*)&data[offset + 2]; - const DescAttr& descAttr = descEnt.m_descAttr[attrId]; - Uint32 typeId = descAttr.m_typeId; - Uint32 maxBytes = AttributeDescriptor::getSizeInBytes(descAttr.m_attrDesc); - Uint32 lb, len; - bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, maxBytes, lb, len); - if (! ok) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::InvalidCharFormat; - return; - } - Uint32 srcBytes = lb + len; - Uint32 srcWords = (srcBytes + 3) / 4; - if (srcWords != dataSize) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::InvalidAttrInfo; - return; - } - uchar* dstPtr = (uchar*)&xfrmData[dstPos + 2]; - if (descAttr.m_charset == 0) { - memcpy(dstPtr, srcPtr, srcWords << 2); - dstWords = srcWords; - } else { - jam(); - CHARSET_INFO* cs = all_charsets[descAttr.m_charset]; - Uint32 xmul = cs->strxfrm_multiply; - if (xmul == 0) - xmul = 1; - // see comment in DbtcMain.cpp - Uint32 dstLen = xmul * (maxBytes - lb); - if (dstLen > ((dstSize - dstPos) << 2)) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::TooMuchAttrInfo; - return; - } - int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); - ndbrequire(n != -1); - while ((n & 3) != 0) { - dstPtr[n++] = 0; - } - dstWords = n / 4; - } - } - for (unsigned j = 0; j <= 1; j++) { - jam(); - // check if lower/upper bit matches - const unsigned luBit = (j << 1); - if ((type & 0x2) != luBit && type != 4) - continue; - // EQ -> LE, GE - const unsigned type2 = (type & 0x1) | luBit; - // fill in any gap - while (maxAttrId[j] <= attrId) { - jam(); - BoundInfo& b = boundInfo[j][maxAttrId[j]]; - maxAttrId[j]++; - b.type2 = -1; - } - BoundInfo& b = boundInfo[j][attrId]; - if (b.type2 != -1) { - // compare with previously defined bound - if (b.type2 != (int)type2 || - b.size != 2 + dstWords || - memcmp(&xfrmData[b.offset + 2], &xfrmData[dstPos + 2], dstWords << 2) != 0) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::InvalidBounds; - return; - } - } else { - // fix length - AttributeHeader* ah = (AttributeHeader*)&xfrmData[dstPos + 1]; - ah->setDataSize(dstWords); - // enter new bound - jam(); - b.type2 = type2; - b.offset = dstPos; - b.size = 2 + dstWords; - } - } - // jump to next - offset += 2 + dataSize; - dstPos += 2 + dstWords; - } - if (offset != req->boundAiLength) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::InvalidAttrInfo; - return; - } - for (unsigned j = 0; j <= 1; j++) { - // save lower/upper bound in index attribute id order - for (unsigned i = 0; i < maxAttrId[j]; i++) { - jam(); - const BoundInfo& b = boundInfo[j][i]; - // check for gap or strict bound before last - if (b.type2 == -1 || (i + 1 < maxAttrId[j] && (b.type2 & 0x1))) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::InvalidBounds; - return; - } - bool ok = scan.m_bound[j]->append(&xfrmData[b.offset], b.size); - if (! ok) { - jam(); - scan.m_state = ScanOp::Invalid; - sig->errorCode = TuxBoundInfo::OutOfBuffers; - return; - } - } - scan.m_boundCnt[j] = maxAttrId[j]; - } - // no error - sig->errorCode = 0; -} - -void -Dbtux::execNEXT_SCANREQ(Signal* signal) -{ - jamEntry(); - const NextScanReq reqCopy = *(const NextScanReq*)signal->getDataPtr(); - const NextScanReq* const req = &reqCopy; - ScanOpPtr scanPtr; - scanPtr.i = req->accPtr; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "NEXT_SCANREQ scan " << scanPtr.i << " " << scan << endl; - } -#endif - // handle unlock previous and close scan - switch (req->scanFlag) { - case NextScanReq::ZSCAN_NEXT: - jam(); - break; - case NextScanReq::ZSCAN_NEXT_COMMIT: - jam(); - case NextScanReq::ZSCAN_COMMIT: - jam(); - if (! scan.m_readCommitted) { - jam(); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Unlock; - lockReq->accOpPtr = req->accOperationPtr; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - removeAccLockOp(scanPtr, req->accOperationPtr); - } - if (req->scanFlag == NextScanReq::ZSCAN_COMMIT) { - jam(); - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - unsigned signalLength = 1; - sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - return; - } - break; - case NextScanReq::ZSCAN_CLOSE: - jam(); - // unlink from tree node first to avoid state changes - if (scan.m_scanPos.m_loc != NullTupLoc) { - jam(); - const TupLoc loc = scan.m_scanPos.m_loc; - NodeHandle node(frag); - selectNode(node, loc); - unlinkScan(node, scanPtr); - scan.m_scanPos.m_loc = NullTupLoc; - } - if (scan.m_lockwait) { - jam(); - ndbrequire(scan.m_accLockOp != RNIL); - // use ACC_ABORTCONF to flush out any reply in job buffer - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::AbortWithConf; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, - AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_state = ScanOp::Aborting; - return; - } - if (scan.m_state == ScanOp::Locked) { - jam(); - ndbrequire(scan.m_accLockOp != RNIL); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, - AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_accLockOp = RNIL; - } - scan.m_state = ScanOp::Aborting; - scanClose(signal, scanPtr); - return; - case NextScanReq::ZSCAN_NEXT_ABORT: - jam(); - default: - jam(); - ndbrequire(false); - break; - } - // start looking for next scan result - AccCheckScan* checkReq = (AccCheckScan*)signal->getDataPtrSend(); - checkReq->accPtr = scanPtr.i; - checkReq->checkLcpStop = AccCheckScan::ZNOT_CHECK_LCP_STOP; - EXECUTE_DIRECT(DBTUX, GSN_ACC_CHECK_SCAN, signal, AccCheckScan::SignalLength); - jamEntry(); -} - -void -Dbtux::execACC_CHECK_SCAN(Signal* signal) -{ - jamEntry(); - const AccCheckScan reqCopy = *(const AccCheckScan*)signal->getDataPtr(); - const AccCheckScan* const req = &reqCopy; - ScanOpPtr scanPtr; - scanPtr.i = req->accPtr; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "ACC_CHECK_SCAN scan " << scanPtr.i << " " << scan << endl; - } -#endif - if (req->checkLcpStop == AccCheckScan::ZCHECK_LCP_STOP) { - jam(); - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; // stop - } - if (scan.m_lockwait) { - jam(); - // LQH asks if we are waiting for lock and we tell it to ask again - const TreeEnt ent = scan.m_scanEnt; - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - conf->accOperationPtr = RNIL; // no tuple returned - conf->fragId = frag.m_fragId; - unsigned signalLength = 3; - // if TC has ordered scan close, it will be detected here - sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - return; // stop - } - if (scan.m_state == ScanOp::First) { - jam(); - // search is done only once in single range scan - scanFirst(scanPtr); - } - if (scan.m_state == ScanOp::Current || - scan.m_state == ScanOp::Next) { - jam(); - // look for next - scanFind(scanPtr); - } - // for reading tuple key in Found or Locked state - Data pkData = c_dataBuffer; - unsigned pkSize = 0; // indicates not yet done - if (scan.m_state == ScanOp::Found) { - // found an entry to return - jam(); - ndbrequire(scan.m_accLockOp == RNIL); - if (! scan.m_readCommitted) { - jam(); - const TreeEnt ent = scan.m_scanEnt; - // read tuple key - readTablePk(frag, ent, pkData, pkSize); - // get read lock or exclusive lock - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = - scan.m_lockMode == 0 ? AccLockReq::LockShared : AccLockReq::LockExclusive; - lockReq->accOpPtr = RNIL; - lockReq->userPtr = scanPtr.i; - lockReq->userRef = reference(); - lockReq->tableId = scan.m_tableId; - lockReq->fragId = frag.m_fragId; - lockReq->fragPtrI = frag.m_accTableFragPtrI; - const Uint32* const buf32 = static_cast(pkData); - const Uint64* const buf64 = reinterpret_cast(buf32); - lockReq->hashValue = md5_hash(buf64, pkSize); - lockReq->tupAddr = getTupAddr(frag, ent); - lockReq->transId1 = scan.m_transId1; - lockReq->transId2 = scan.m_transId2; - // execute - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::LockSignalLength); - jamEntry(); - switch (lockReq->returnCode) { - case AccLockReq::Success: - jam(); - scan.m_state = ScanOp::Locked; - scan.m_accLockOp = lockReq->accOpPtr; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Lock immediate scan " << scanPtr.i << " " << scan << endl; - } -#endif - break; - case AccLockReq::IsBlocked: - jam(); - // normal lock wait - scan.m_state = ScanOp::Blocked; - scan.m_lockwait = true; - scan.m_accLockOp = lockReq->accOpPtr; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Lock wait scan " << scanPtr.i << " " << scan << endl; - } -#endif - // LQH will wake us up - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; // stop - break; - case AccLockReq::Refused: - jam(); - // we cannot see deleted tuple (assert only) - ndbassert(false); - // skip it - scan.m_state = ScanOp::Next; - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; // stop - break; - case AccLockReq::NoFreeOp: - jam(); - // max ops should depend on max scans (assert only) - ndbassert(false); - // stay in Found state - scan.m_state = ScanOp::Found; - signal->theData[0] = scan.m_userPtr; - signal->theData[1] = true; - EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); - jamEntry(); - return; // stop - break; - default: - ndbrequire(false); - break; - } - } else { - scan.m_state = ScanOp::Locked; - } - } - if (scan.m_state == ScanOp::Locked) { - // we have lock or do not need one - jam(); - // read keys if not already done (uses signal) - const TreeEnt ent = scan.m_scanEnt; - // conf signal - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - // the lock is passed to LQH - Uint32 accLockOp = scan.m_accLockOp; - if (accLockOp != RNIL) { - scan.m_accLockOp = RNIL; - // remember it until LQH unlocks it - addAccLockOp(scanPtr, accLockOp); - } else { - ndbrequire(scan.m_readCommitted); - // operation RNIL in LQH would signal no tuple returned - accLockOp = (Uint32)-1; - } - conf->accOperationPtr = accLockOp; - conf->fragId = frag.m_fragId; - conf->localKey[0] = getTupAddr(frag, ent); - conf->localKey[1] = 0; - conf->localKeyLength = 1; - unsigned signalLength = 6; - // add key info - if (! scan.m_readCommitted) { - sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - } else { - Uint32 blockNo = refToBlock(scan.m_userRef); - EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength); - } - // next time look for next entry - scan.m_state = ScanOp::Next; - return; - } - // XXX in ACC this is checked before req->checkLcpStop - if (scan.m_state == ScanOp::Last || - scan.m_state == ScanOp::Invalid) { - jam(); - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scan.m_userPtr; - conf->accOperationPtr = RNIL; - conf->fragId = RNIL; - unsigned signalLength = 3; - sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - return; - } - ndbrequire(false); -} - -/* - * Lock succeeded (after delay) in ACC. If the lock is for current - * entry, set state to Locked. If the lock is for an entry we were - * moved away from, simply unlock it. Finally, if we are closing the - * scan, do nothing since we have already sent an abort request. - */ -void -Dbtux::execACCKEYCONF(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Lock obtained scan " << scanPtr.i << " " << scan << endl; - } -#endif - ndbrequire(scan.m_lockwait && scan.m_accLockOp != RNIL); - scan.m_lockwait = false; - if (scan.m_state == ScanOp::Blocked) { - // the lock wait was for current entry - jam(); - scan.m_state = ScanOp::Locked; - // LQH has the ball - return; - } - if (scan.m_state != ScanOp::Aborting) { - // we were moved, release lock - jam(); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_accLockOp = RNIL; - // LQH has the ball - return; - } - // lose the lock - scan.m_accLockOp = RNIL; - // continue at ACC_ABORTCONF -} - -/* - * Lock failed (after delay) in ACC. Probably means somebody ahead of - * us in lock queue deleted the tuple. - */ -void -Dbtux::execACCKEYREF(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Lock refused scan " << scanPtr.i << " " << scan << endl; - } -#endif - ndbrequire(scan.m_lockwait && scan.m_accLockOp != RNIL); - scan.m_lockwait = false; - if (scan.m_state != ScanOp::Aborting) { - jam(); - // release the operation - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = scan.m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - scan.m_accLockOp = RNIL; - // scan position should already have been moved (assert only) - if (scan.m_state == ScanOp::Blocked) { - jam(); - ndbassert(false); - scan.m_state = ScanOp::Next; - } - // LQH has the ball - return; - } - // lose the lock - scan.m_accLockOp = RNIL; - // continue at ACC_ABORTCONF -} - -/* - * Received when scan is closing. This signal arrives after any - * ACCKEYCON or ACCKEYREF which may have been in job buffer. - */ -void -Dbtux::execACC_ABORTCONF(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - ScanOp& scan = *scanPtr.p; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "ACC_ABORTCONF scan " << scanPtr.i << " " << scan << endl; - } -#endif - ndbrequire(scan.m_state == ScanOp::Aborting); - // most likely we are still in lock wait - if (scan.m_lockwait) { - jam(); - scan.m_lockwait = false; - scan.m_accLockOp = RNIL; - } - scanClose(signal, scanPtr); -} - -/* - * Find start position for single range scan. - */ -void -Dbtux::scanFirst(ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Enter first scan " << scanPtr.i << " " << scan << endl; - } -#endif - // set up index keys for this operation - setKeyAttrs(frag); - // scan direction 0, 1 - const unsigned idir = scan.m_descending; - unpackBound(*scan.m_bound[idir], c_dataBuffer); - TreePos treePos; - searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], scan.m_descending, treePos); - if (treePos.m_loc != NullTupLoc) { - scan.m_scanPos = treePos; - // link the scan to node found - NodeHandle node(frag); - selectNode(node, treePos.m_loc); - linkScan(node, scanPtr); - if (treePos.m_dir == 3) { - jam(); - // check upper bound - TreeEnt ent = node.getEnt(treePos.m_pos); - if (scanCheck(scanPtr, ent)) - scan.m_state = ScanOp::Current; - else - scan.m_state = ScanOp::Last; - } else { - scan.m_state = ScanOp::Next; - } - } else { - jam(); - scan.m_state = ScanOp::Last; - } -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Leave first scan " << scanPtr.i << " " << scan << endl; - } -#endif -} - -/* - * Look for entry to return as scan result. - */ -void -Dbtux::scanFind(ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Enter find scan " << scanPtr.i << " " << scan << endl; - } -#endif - ndbrequire(scan.m_state == ScanOp::Current || scan.m_state == ScanOp::Next); - while (1) { - jam(); - if (scan.m_state == ScanOp::Next) - scanNext(scanPtr, false); - if (scan.m_state == ScanOp::Current) { - jam(); - const TreePos pos = scan.m_scanPos; - NodeHandle node(frag); - selectNode(node, pos.m_loc); - const TreeEnt ent = node.getEnt(pos.m_pos); - if (scanVisible(scanPtr, ent)) { - jam(); - scan.m_state = ScanOp::Found; - scan.m_scanEnt = ent; - break; - } - } else { - jam(); - break; - } - scan.m_state = ScanOp::Next; - } -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Leave find scan " << scanPtr.i << " " << scan << endl; - } -#endif -} - -/* - * Move to next entry. The scan is already linked to some node. When - * we leave, if an entry was found, it will be linked to a possibly - * different node. The scan has a position, and a direction which tells - * from where we came to this position. This is one of (all comments - * are in terms of ascending scan): - * - * 0 - up from left child (scan this node next) - * 1 - up from right child (proceed to parent) - * 2 - up from root (the scan ends) - * 3 - left to right within node (at end proceed to right child) - * 4 - down from parent (proceed to left child) - * - * If an entry was found, scan direction is 3. Therefore tree - * re-organizations need not worry about scan direction. - * - * This method is also used to move a scan when its entry is removed - * (see moveScanList). If the scan is Blocked, we check if it remains - * Blocked on a different version of the tuple. Otherwise the tuple is - * lost and state becomes Current. - */ -void -Dbtux::scanNext(ScanOpPtr scanPtr, bool fromMaintReq) -{ - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); -#ifdef VM_TRACE - if (debugFlags & (DebugMaint | DebugScan)) { - debugOut << "Enter next scan " << scanPtr.i << " " << scan << endl; - } -#endif - // cannot be moved away from tuple we have locked - ndbrequire(scan.m_state != ScanOp::Locked); - // set up index keys for this operation - setKeyAttrs(frag); - // scan direction - const unsigned idir = scan.m_descending; // 0, 1 - const int jdir = 1 - 2 * (int)idir; // 1, -1 - // use copy of position - TreePos pos = scan.m_scanPos; - // get and remember original node - NodeHandle origNode(frag); - selectNode(origNode, pos.m_loc); - ndbrequire(islinkScan(origNode, scanPtr)); - // current node in loop - NodeHandle node = origNode; - // copy of entry found - TreeEnt ent; - while (true) { - jam(); -#ifdef VM_TRACE - if (debugFlags & (DebugMaint | DebugScan)) { - debugOut << "Current scan " << scanPtr.i << " pos " << pos << " node " << node << endl; - } -#endif - if (pos.m_dir == 2) { - // coming up from root ends the scan - jam(); - pos.m_loc = NullTupLoc; - break; - } - if (node.m_loc != pos.m_loc) { - jam(); - selectNode(node, pos.m_loc); - } - if (pos.m_dir == 4) { - // coming down from parent proceed to left child - jam(); - TupLoc loc = node.getLink(idir); - if (loc != NullTupLoc) { - jam(); - pos.m_loc = loc; - pos.m_dir = 4; // unchanged - continue; - } - // pretend we came from left child - pos.m_dir = idir; - } - const unsigned occup = node.getOccup(); - if (occup == 0) { - jam(); - ndbrequire(fromMaintReq); - // move back to parent - see comment in treeRemoveInner - pos.m_loc = node.getLink(2); - pos.m_dir = node.getSide(); - continue; - } - if (pos.m_dir == idir) { - // coming up from left child scan current node - jam(); - pos.m_pos = idir == 0 ? (Uint16)-1 : occup; - pos.m_dir = 3; - } - if (pos.m_dir == 3) { - // before or within node - jam(); - // advance position - becomes ZNIL (> occup) if 0 and descending - pos.m_pos += jdir; - if (pos.m_pos < occup) { - jam(); - pos.m_dir = 3; // unchanged - ent = node.getEnt(pos.m_pos); - if (! scanCheck(scanPtr, ent)) { - jam(); - pos.m_loc = NullTupLoc; - } - break; - } - // after node proceed to right child - TupLoc loc = node.getLink(1 - idir); - if (loc != NullTupLoc) { - jam(); - pos.m_loc = loc; - pos.m_dir = 4; - continue; - } - // pretend we came from right child - pos.m_dir = 1 - idir; - } - if (pos.m_dir == 1 - idir) { - // coming up from right child proceed to parent - jam(); - pos.m_loc = node.getLink(2); - pos.m_dir = node.getSide(); - continue; - } - ndbrequire(false); - } - // copy back position - scan.m_scanPos = pos; - // relink - if (pos.m_loc != NullTupLoc) { - ndbrequire(pos.m_dir == 3); - ndbrequire(pos.m_loc == node.m_loc); - if (origNode.m_loc != node.m_loc) { - jam(); - unlinkScan(origNode, scanPtr); - linkScan(node, scanPtr); - } - if (scan.m_state != ScanOp::Blocked) { - scan.m_state = ScanOp::Current; - } else { - jam(); - ndbrequire(fromMaintReq); - TreeEnt& scanEnt = scan.m_scanEnt; - ndbrequire(scanEnt.m_tupLoc != NullTupLoc); - if (scanEnt.eqtuple(ent)) { - // remains blocked on another version - scanEnt = ent; - } else { - jam(); - scanEnt.m_tupLoc = NullTupLoc; - scan.m_state = ScanOp::Current; - } - } - } else { - jam(); - unlinkScan(origNode, scanPtr); - scan.m_state = ScanOp::Last; - } -#ifdef VM_TRACE - if (debugFlags & (DebugMaint | DebugScan)) { - debugOut << "Leave next scan " << scanPtr.i << " " << scan << endl; - } -#endif -} - -/* - * Check end key. Return true if scan is still within range. - */ -bool -Dbtux::scanCheck(ScanOpPtr scanPtr, TreeEnt ent) -{ - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); - const unsigned idir = scan.m_descending; - const int jdir = 1 - 2 * (int)idir; - unpackBound(*scan.m_bound[1 - idir], c_dataBuffer); - unsigned boundCnt = scan.m_boundCnt[1 - idir]; - readKeyAttrs(frag, ent, 0, c_entryKey); - int ret = cmpScanBound(frag, 1 - idir, c_dataBuffer, boundCnt, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - if (jdir * ret > 0) - return true; - // hit upper bound of single range scan - return false; -} - -/* - * Check if an entry is visible to the scan. - * - * There is a special check to never accept same tuple twice in a row. - * This is faster than asking TUP. It also fixes some special cases - * which are not analyzed or handled yet. - */ -bool -Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent) -{ - const ScanOp& scan = *scanPtr.p; - const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); - Uint32 tableFragPtrI = frag.m_tupTableFragPtrI; - Uint32 pageId = ent.m_tupLoc.getPageId(); - Uint32 pageOffset = ent.m_tupLoc.getPageOffset(); - Uint32 tupVersion = ent.m_tupVersion; - // check for same tuple twice in row - if (scan.m_scanEnt.m_tupLoc == ent.m_tupLoc) - { - jam(); - return false; - } - Uint32 transId1 = scan.m_transId1; - Uint32 transId2 = scan.m_transId2; - bool dirty = scan.m_readCommitted; - Uint32 savePointId = scan.m_savePointId; - bool ret = c_tup->tuxQueryTh(tableFragPtrI, pageId, pageOffset, tupVersion, transId1, transId2, dirty, savePointId); - jamEntry(); - return ret; -} - -/* - * Finish closing of scan and send conf. Any lock wait has been done - * already. - */ -void -Dbtux::scanClose(Signal* signal, ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; - ndbrequire(! scan.m_lockwait && scan.m_accLockOp == RNIL); - // unlock all not unlocked by LQH - if (! scan.m_accLockOps.isEmpty()) { - jam(); - abortAccLockOps(signal, scanPtr); - } - // send conf - NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); - conf->scanPtr = scanPtr.p->m_userPtr; - conf->accOperationPtr = RNIL; - conf->fragId = RNIL; - unsigned signalLength = 3; - sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, - signal, signalLength, JBB); - releaseScanOp(scanPtr); -} - -void -Dbtux::abortAccLockOps(Signal* signal, ScanOpPtr scanPtr) -{ - ScanOp& scan = *scanPtr.p; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Abort locks in scan " << scanPtr.i << " " << scan << endl; - } -#endif - LocalDLFifoList list(c_scanLockPool, scan.m_accLockOps); - ScanLockPtr lockPtr; - while (list.first(lockPtr)) { - jam(); - AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); - lockReq->returnCode = RNIL; - lockReq->requestInfo = AccLockReq::Abort; - lockReq->accOpPtr = lockPtr.p->m_accLockOp; - EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength); - jamEntry(); - ndbrequire(lockReq->returnCode == AccLockReq::Success); - list.release(lockPtr); - } -} - -void -Dbtux::addAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp) -{ - ScanOp& scan = *scanPtr.p; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Add lock " << hex << accLockOp << dec - << " to scan " << scanPtr.i << " " << scan << endl; - } -#endif - LocalDLFifoList list(c_scanLockPool, scan.m_accLockOps); - ScanLockPtr lockPtr; -#ifdef VM_TRACE - list.first(lockPtr); - while (lockPtr.i != RNIL) { - ndbrequire(lockPtr.p->m_accLockOp != accLockOp); - list.next(lockPtr); - } -#endif - bool ok = list.seize(lockPtr); - ndbrequire(ok); - ndbrequire(accLockOp != RNIL); - lockPtr.p->m_accLockOp = accLockOp; -} - -void -Dbtux::removeAccLockOp(ScanOpPtr scanPtr, Uint32 accLockOp) -{ - ScanOp& scan = *scanPtr.p; -#ifdef VM_TRACE - if (debugFlags & (DebugScan | DebugLock)) { - debugOut << "Remove lock " << hex << accLockOp << dec - << " from scan " << scanPtr.i << " " << scan << endl; - } -#endif - LocalDLFifoList list(c_scanLockPool, scan.m_accLockOps); - ScanLockPtr lockPtr; - list.first(lockPtr); - while (lockPtr.i != RNIL) { - if (lockPtr.p->m_accLockOp == accLockOp) { - jam(); - break; - } - list.next(lockPtr); - } - ndbrequire(lockPtr.i != RNIL); - list.release(lockPtr); -} - -/* - * Release allocated records. - */ -void -Dbtux::releaseScanOp(ScanOpPtr& scanPtr) -{ -#ifdef VM_TRACE - if (debugFlags & DebugScan) { - debugOut << "Release scan " << scanPtr.i << " " << *scanPtr.p << endl; - } -#endif - Frag& frag = *c_fragPool.getPtr(scanPtr.p->m_fragPtrI); - scanPtr.p->m_boundMin.release(); - scanPtr.p->m_boundMax.release(); - // unlink from per-fragment list and release from pool - frag.m_scanList.release(scanPtr); -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp deleted file mode 100644 index 9e84f61ab70..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp +++ /dev/null @@ -1,432 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_SEARCH_CPP -#include "Dbtux.hpp" - -/* - * Search for entry to add. - * - * Similar to searchToRemove (see below). - */ -bool -Dbtux::searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos) -{ - const TreeHead& tree = frag.m_tree; - const unsigned numAttrs = frag.m_numAttrs; - NodeHandle currNode(frag); - currNode.m_loc = tree.m_root; - if (currNode.m_loc == NullTupLoc) { - // empty tree - jam(); - return true; - } - NodeHandle glbNode(frag); // potential g.l.b of final node - /* - * In order to not (yet) change old behaviour, a position between - * 2 nodes returns the one at the bottom of the tree. - */ - NodeHandle bottomNode(frag); - while (true) { - jam(); - selectNode(currNode, currNode.m_loc); - int ret; - // compare prefix - unsigned start = 0; - ret = cmpSearchKey(frag, start, searchKey, currNode.getPref(), tree.m_prefSize); - if (ret == NdbSqlUtil::CmpUnknown) { - jam(); - // read and compare remaining attributes - ndbrequire(start < numAttrs); - readKeyAttrs(frag, currNode.getMinMax(0), start, c_entryKey); - ret = cmpSearchKey(frag, start, searchKey, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - } - if (ret == 0) { - jam(); - // keys are equal, compare entry values - ret = searchEnt.cmp(currNode.getMinMax(0)); - } - if (ret < 0) { - jam(); - const TupLoc loc = currNode.getLink(0); - if (loc != NullTupLoc) { - jam(); - // continue to left subtree - currNode.m_loc = loc; - continue; - } - if (! glbNode.isNull()) { - jam(); - // move up to the g.l.b but remember the bottom node - bottomNode = currNode; - currNode = glbNode; - } - } else if (ret > 0) { - jam(); - const TupLoc loc = currNode.getLink(1); - if (loc != NullTupLoc) { - jam(); - // save potential g.l.b - glbNode = currNode; - // continue to right subtree - currNode.m_loc = loc; - continue; - } - } else { - jam(); - treePos.m_loc = currNode.m_loc; - treePos.m_pos = 0; - // entry found - error - return false; - } - break; - } - // anticipate - treePos.m_loc = currNode.m_loc; - // binary search - int lo = -1; - int hi = currNode.getOccup(); - int ret; - while (1) { - jam(); - // hi - lo > 1 implies lo < j < hi - int j = (hi + lo) / 2; - // read and compare attributes - unsigned start = 0; - readKeyAttrs(frag, currNode.getEnt(j), start, c_entryKey); - ret = cmpSearchKey(frag, start, searchKey, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - if (ret == 0) { - jam(); - // keys are equal, compare entry values - ret = searchEnt.cmp(currNode.getEnt(j)); - } - if (ret < 0) - hi = j; - else if (ret > 0) - lo = j; - else { - treePos.m_pos = j; - // entry found - error - return false; - } - if (hi - lo == 1) - break; - } - if (ret < 0) { - jam(); - treePos.m_pos = hi; - return true; - } - if ((uint) hi < currNode.getOccup()) { - jam(); - treePos.m_pos = hi; - return true; - } - if (bottomNode.isNull()) { - jam(); - treePos.m_pos = hi; - return true; - } - jam(); - // backwards compatible for now - treePos.m_loc = bottomNode.m_loc; - treePos.m_pos = 0; - return true; -} - -/* - * Search for entry to remove. - * - * Compares search key to each node min. A move to right subtree can - * overshoot target node. The last such node is saved. The final node - * is a semi-leaf or leaf. If search key is less than final node min - * then the saved node is the g.l.b of the final node and we move back - * to it. - */ -bool -Dbtux::searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos) -{ - const TreeHead& tree = frag.m_tree; - const unsigned numAttrs = frag.m_numAttrs; - NodeHandle currNode(frag); - currNode.m_loc = tree.m_root; - if (currNode.m_loc == NullTupLoc) { - // empty tree - failed - jam(); - return false; - } - NodeHandle glbNode(frag); // potential g.l.b of final node - while (true) { - jam(); - selectNode(currNode, currNode.m_loc); - int ret; - // compare prefix - unsigned start = 0; - ret = cmpSearchKey(frag, start, searchKey, currNode.getPref(), tree.m_prefSize); - if (ret == NdbSqlUtil::CmpUnknown) { - jam(); - // read and compare remaining attributes - ndbrequire(start < numAttrs); - readKeyAttrs(frag, currNode.getMinMax(0), start, c_entryKey); - ret = cmpSearchKey(frag, start, searchKey, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - } - if (ret == 0) { - jam(); - // keys are equal, compare entry values - ret = searchEnt.cmp(currNode.getMinMax(0)); - } - if (ret < 0) { - jam(); - const TupLoc loc = currNode.getLink(0); - if (loc != NullTupLoc) { - jam(); - // continue to left subtree - currNode.m_loc = loc; - continue; - } - if (! glbNode.isNull()) { - jam(); - // move up to the g.l.b - currNode = glbNode; - } - } else if (ret > 0) { - jam(); - const TupLoc loc = currNode.getLink(1); - if (loc != NullTupLoc) { - jam(); - // save potential g.l.b - glbNode = currNode; - // continue to right subtree - currNode.m_loc = loc; - continue; - } - } else { - jam(); - treePos.m_loc = currNode.m_loc; - treePos.m_pos = 0; - return true; - } - break; - } - // anticipate - treePos.m_loc = currNode.m_loc; - // pos 0 was handled above - for (unsigned j = 1, occup = currNode.getOccup(); j < occup; j++) { - jam(); - // compare only the entry - if (searchEnt.eq(currNode.getEnt(j))) { - jam(); - treePos.m_pos = j; - return true; - } - } - treePos.m_pos = currNode.getOccup(); - // not found - failed - return false; -} - -/* - * Search for scan start position. - * - * Similar to searchToAdd. The routines differ somewhat depending on - * scan direction and are done by separate methods. - */ -void -Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos) -{ - const TreeHead& tree = frag.m_tree; - if (tree.m_root != NullTupLoc) { - if (! descending) - searchToScanAscending(frag, boundInfo, boundCount, treePos); - else - searchToScanDescending(frag, boundInfo, boundCount, treePos); - return; - } - // empty tree -} - -void -Dbtux::searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos) -{ - const TreeHead& tree = frag.m_tree; - NodeHandle currNode(frag); - currNode.m_loc = tree.m_root; - NodeHandle glbNode(frag); // potential g.l.b of final node - NodeHandle bottomNode(frag); - while (true) { - jam(); - selectNode(currNode, currNode.m_loc); - int ret; - // compare prefix - ret = cmpScanBound(frag, 0, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize); - if (ret == NdbSqlUtil::CmpUnknown) { - jam(); - // read and compare all attributes - readKeyAttrs(frag, currNode.getMinMax(0), 0, c_entryKey); - ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - } - if (ret < 0) { - // bound is left of this node - jam(); - const TupLoc loc = currNode.getLink(0); - if (loc != NullTupLoc) { - jam(); - // continue to left subtree - currNode.m_loc = loc; - continue; - } - if (! glbNode.isNull()) { - jam(); - // move up to the g.l.b but remember the bottom node - bottomNode = currNode; - currNode = glbNode; - } else { - // start scanning this node - treePos.m_loc = currNode.m_loc; - treePos.m_pos = 0; - treePos.m_dir = 3; - return; - } - } else { - // bound is at or right of this node - jam(); - const TupLoc loc = currNode.getLink(1); - if (loc != NullTupLoc) { - jam(); - // save potential g.l.b - glbNode = currNode; - // continue to right subtree - currNode.m_loc = loc; - continue; - } - } - break; - } - for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) { - jam(); - int ret; - // read and compare attributes - readKeyAttrs(frag, currNode.getEnt(j), 0, c_entryKey); - ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - if (ret < 0) { - // found first entry satisfying the bound - treePos.m_loc = currNode.m_loc; - treePos.m_pos = j; - treePos.m_dir = 3; - return; - } - } - // bound is to right of this node - if (! bottomNode.isNull()) { - jam(); - // start scanning the l.u.b - treePos.m_loc = bottomNode.m_loc; - treePos.m_pos = 0; - treePos.m_dir = 3; - return; - } - // start scanning upwards (pretend we came from right child) - treePos.m_loc = currNode.m_loc; - treePos.m_dir = 1; -} - -void -Dbtux::searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos) -{ - const TreeHead& tree = frag.m_tree; - NodeHandle currNode(frag); - currNode.m_loc = tree.m_root; - NodeHandle glbNode(frag); // potential g.l.b of final node - NodeHandle bottomNode(frag); - while (true) { - jam(); - selectNode(currNode, currNode.m_loc); - int ret; - // compare prefix - ret = cmpScanBound(frag, 1, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize); - if (ret == NdbSqlUtil::CmpUnknown) { - jam(); - // read and compare all attributes - readKeyAttrs(frag, currNode.getMinMax(0), 0, c_entryKey); - ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - } - if (ret < 0) { - // bound is left of this node - jam(); - const TupLoc loc = currNode.getLink(0); - if (loc != NullTupLoc) { - jam(); - // continue to left subtree - currNode.m_loc = loc; - continue; - } - if (! glbNode.isNull()) { - jam(); - // move up to the g.l.b but remember the bottom node - bottomNode = currNode; - currNode = glbNode; - } else { - // empty result set - return; - } - } else { - // bound is at or right of this node - jam(); - const TupLoc loc = currNode.getLink(1); - if (loc != NullTupLoc) { - jam(); - // save potential g.l.b - glbNode = currNode; - // continue to right subtree - currNode.m_loc = loc; - continue; - } - } - break; - } - for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) { - jam(); - int ret; - // read and compare attributes - readKeyAttrs(frag, currNode.getEnt(j), 0, c_entryKey); - ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_entryKey); - ndbrequire(ret != NdbSqlUtil::CmpUnknown); - if (ret < 0) { - if (j > 0) { - // start scanning from previous entry - treePos.m_loc = currNode.m_loc; - treePos.m_pos = j - 1; - treePos.m_dir = 3; - return; - } - // start scanning upwards (pretend we came from left child) - treePos.m_loc = currNode.m_loc; - treePos.m_pos = 0; - treePos.m_dir = 0; - return; - } - } - // start scanning this node - treePos.m_loc = currNode.m_loc; - treePos.m_pos = currNode.getOccup() - 1; - treePos.m_dir = 3; -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp deleted file mode 100644 index 143996bf7ff..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp +++ /dev/null @@ -1,159 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_STAT_CPP -#include "Dbtux.hpp" - -void -Dbtux::execREAD_PSEUDO_REQ(Signal* signal) -{ - jamEntry(); - ScanOpPtr scanPtr; - scanPtr.i = signal->theData[0]; - c_scanOpPool.getPtr(scanPtr); - if (signal->theData[1] == AttributeHeader::RECORDS_IN_RANGE) { - jam(); - statRecordsInRange(scanPtr, &signal->theData[0]); - } else { - ndbassert(false); - } -} - -/* - * Estimate entries in range. Scan is at first entry. Search for last - * entry i.e. start of descending scan. Use the 2 positions to estimate - * entries before and after the range. Finally get entries in range by - * subtracting from total. Errors come from imperfectly balanced tree - * and from uncommitted entries which differ only in tuple version. - * - * Returns 4 Uint32 values: 0) total entries 1) in range 2) before range - * 3) after range. 1-3) are estimates and need not add up to 0). - */ -void -Dbtux::statRecordsInRange(ScanOpPtr scanPtr, Uint32* out) -{ - ScanOp& scan = *scanPtr.p; - Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); - TreeHead& tree = frag.m_tree; - // get first and last position - TreePos pos1 = scan.m_scanPos; - TreePos pos2; - { // as in scanFirst() - setKeyAttrs(frag); - const unsigned idir = 1; - const ScanBound& bound = *scan.m_bound[idir]; - ScanBoundIterator iter; - bound.first(iter); - for (unsigned j = 0; j < bound.getSize(); j++) { - jam(); - c_dataBuffer[j] = *iter.data; - bound.next(iter); - } - searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], true, pos2); - // committed read (same timeslice) and range not empty - ndbrequire(pos2.m_loc != NullTupLoc); - } - out[0] = frag.m_tree.m_entryCount; - out[2] = getEntriesBeforeOrAfter(frag, pos1, 0); - out[3] = getEntriesBeforeOrAfter(frag, pos2, 1); - if (pos1.m_loc == pos2.m_loc) { - ndbrequire(pos2.m_pos >= pos1.m_pos); - out[1] = pos2.m_pos - pos1.m_pos + 1; - } else { - Uint32 rem = out[2] + out[3]; - if (out[0] > rem) { - out[1] = out[0] - rem; - } else { - // random guess one node apart - out[1] = tree.m_maxOccup; - } - } -} - -/* - * Estimate number of entries strictly before or after given position. - * Each branch to right direction wins parent node and the subtree on - * the other side. Subtree entries is estimated from depth and total - * entries by assuming that the tree is perfectly balanced. - */ -Uint32 -Dbtux::getEntriesBeforeOrAfter(Frag& frag, TreePos pos, unsigned idir) -{ - NodeHandle node(frag); - selectNode(node, pos.m_loc); - Uint16 path[MaxTreeDepth + 1]; - unsigned depth = getPathToNode(node, path); - ndbrequire(depth != 0 && depth <= MaxTreeDepth); - TreeHead& tree = frag.m_tree; - Uint32 cnt = 0; - Uint32 tot = tree.m_entryCount; - unsigned i = 0; - // contribution from levels above - while (i + 1 < depth) { - unsigned occup2 = (path[i] >> 8); - unsigned side = (path[i + 1] & 0xFF); - // subtree of this node has about half the entries - tot = tot >= occup2 ? (tot - occup2) / 2 : 0; - // branch to other side wins parent and a subtree - if (side != idir) { - cnt += occup2; - cnt += tot; - } - i++; - } - // contribution from this node - unsigned occup = (path[i] >> 8); - ndbrequire(pos.m_pos < occup); - if (idir == 0) { - if (pos.m_pos != 0) - cnt += pos.m_pos - 1; - } else { - cnt += occup - (pos.m_pos + 1); - } - // contribution from levels below - tot = tot >= occup ? (tot - occup) / 2 : 0; - cnt += tot; - return cnt; -} - -/* - * Construct path to given node. Returns depth. Root node has path - * 2 and depth 1. In general the path is 2{0,1}* where 0,1 is the side - * (left,right branch). In addition the occupancy of each node is - * returned in the upper 8 bits. - */ -unsigned -Dbtux::getPathToNode(NodeHandle node, Uint16* path) -{ - TupLoc loc = node.m_loc; - unsigned i = MaxTreeDepth; - while (loc != NullTupLoc) { - jam(); - selectNode(node, loc); - path[i] = node.getSide() | (node.getOccup() << 8); - loc = node.getLink(2); - ndbrequire(i != 0); - i--; - } - unsigned depth = MaxTreeDepth - i; - unsigned j = 0; - while (j < depth) { - path[j] = path[i + 1 + j]; - j++; - } - path[j] = 0xFFFF; // catch bug - return depth; -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp deleted file mode 100644 index c130a71b60e..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp +++ /dev/null @@ -1,717 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define DBTUX_TREE_CPP -#include "Dbtux.hpp" - -/* - * Add entry. Handle the case when there is room for one more. This - * is the common case given slack in nodes. - */ -void -Dbtux::treeAdd(Frag& frag, TreePos treePos, TreeEnt ent) -{ - TreeHead& tree = frag.m_tree; - NodeHandle node(frag); - do { - if (treePos.m_loc != NullTupLoc) { - // non-empty tree - jam(); - selectNode(node, treePos.m_loc); - unsigned pos = treePos.m_pos; - if (node.getOccup() < tree.m_maxOccup) { - // node has room - jam(); - nodePushUp(node, pos, ent, RNIL); - break; - } - treeAddFull(frag, node, pos, ent); - break; - } - jam(); - insertNode(node); - nodePushUp(node, 0, ent, RNIL); - node.setSide(2); - tree.m_root = node.m_loc; - break; - } while (0); - tree.m_entryCount++; -} - -/* - * Add entry when node is full. Handle the case when there is g.l.b - * node in left subtree with room for one more. It will receive the min - * entry of this node. The min entry could be the entry to add. - */ -void -Dbtux::treeAddFull(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent) -{ - TreeHead& tree = frag.m_tree; - TupLoc loc = lubNode.getLink(0); - if (loc != NullTupLoc) { - // find g.l.b node - NodeHandle glbNode(frag); - do { - jam(); - selectNode(glbNode, loc); - loc = glbNode.getLink(1); - } while (loc != NullTupLoc); - if (glbNode.getOccup() < tree.m_maxOccup) { - // g.l.b node has room - jam(); - Uint32 scanList = RNIL; - if (pos != 0) { - jam(); - // add the new entry and return min entry - nodePushDown(lubNode, pos - 1, ent, scanList); - } - // g.l.b node receives min entry from l.u.b node - nodePushUp(glbNode, glbNode.getOccup(), ent, scanList); - return; - } - treeAddNode(frag, lubNode, pos, ent, glbNode, 1); - return; - } - treeAddNode(frag, lubNode, pos, ent, lubNode, 0); -} - -/* - * Add entry when there is no g.l.b node in left subtree or the g.l.b - * node is full. We must add a new left or right child node which - * becomes the new g.l.b node. - */ -void -Dbtux::treeAddNode(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i) -{ - NodeHandle glbNode(frag); - insertNode(glbNode); - // connect parent and child - parentNode.setLink(i, glbNode.m_loc); - glbNode.setLink(2, parentNode.m_loc); - glbNode.setSide(i); - Uint32 scanList = RNIL; - if (pos != 0) { - jam(); - // add the new entry and return min entry - nodePushDown(lubNode, pos - 1, ent, scanList); - } - // g.l.b node receives min entry from l.u.b node - nodePushUp(glbNode, 0, ent, scanList); - // re-balance the tree - treeAddRebalance(frag, parentNode, i); -} - -/* - * Re-balance tree after adding a node. The process starts with the - * parent of the added node. - */ -void -Dbtux::treeAddRebalance(Frag& frag, NodeHandle node, unsigned i) -{ - while (true) { - // height of subtree i has increased by 1 - int j = (i == 0 ? -1 : +1); - int b = node.getBalance(); - if (b == 0) { - // perfectly balanced - jam(); - node.setBalance(j); - // height change propagates up - } else if (b == -j) { - // height of shorter subtree increased - jam(); - node.setBalance(0); - // height of tree did not change - done - break; - } else if (b == j) { - // height of longer subtree increased - jam(); - NodeHandle childNode(frag); - selectNode(childNode, node.getLink(i)); - int b2 = childNode.getBalance(); - if (b2 == b) { - jam(); - treeRotateSingle(frag, node, i); - } else if (b2 == -b) { - jam(); - treeRotateDouble(frag, node, i); - } else { - // height of subtree increased so it cannot be perfectly balanced - ndbrequire(false); - } - // height of tree did not increase - done - break; - } else { - ndbrequire(false); - } - TupLoc parentLoc = node.getLink(2); - if (parentLoc == NullTupLoc) { - jam(); - // root node - done - break; - } - i = node.getSide(); - selectNode(node, parentLoc); - } -} - -/* - * Remove entry. Optimize for nodes with slack. Handle the case when - * there is no underflow i.e. occupancy remains at least minOccup. For - * interior nodes this is a requirement. For others it means that we do - * not need to consider merge of semi-leaf and leaf. - */ -void -Dbtux::treeRemove(Frag& frag, TreePos treePos) -{ - TreeHead& tree = frag.m_tree; - unsigned pos = treePos.m_pos; - NodeHandle node(frag); - selectNode(node, treePos.m_loc); - TreeEnt ent; - do { - if (node.getOccup() > tree.m_minOccup) { - // no underflow in any node type - jam(); - nodePopDown(node, pos, ent, 0); - break; - } - if (node.getChilds() == 2) { - // underflow in interior node - jam(); - treeRemoveInner(frag, node, pos); - break; - } - // remove entry in semi/leaf - nodePopDown(node, pos, ent, 0); - if (node.getLink(0) != NullTupLoc) { - jam(); - treeRemoveSemi(frag, node, 0); - break; - } - if (node.getLink(1) != NullTupLoc) { - jam(); - treeRemoveSemi(frag, node, 1); - break; - } - treeRemoveLeaf(frag, node); - break; - } while (0); - ndbrequire(tree.m_entryCount != 0); - tree.m_entryCount--; -} - -/* - * Remove entry when interior node underflows. There is g.l.b node in - * left subtree to borrow an entry from. The max entry of the g.l.b - * node becomes the min entry of this node. - */ -void -Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos) -{ - TreeEnt ent; - // find g.l.b node - NodeHandle glbNode(frag); - TupLoc loc = lubNode.getLink(0); - do { - jam(); - selectNode(glbNode, loc); - loc = glbNode.getLink(1); - } while (loc != NullTupLoc); - // borrow max entry from semi/leaf - Uint32 scanList = RNIL; - nodePopDown(glbNode, glbNode.getOccup() - 1, ent, &scanList); - // g.l.b may be empty now - // a descending scan may try to enter the empty g.l.b - // we prevent this in scanNext - nodePopUp(lubNode, pos, ent, scanList); - if (glbNode.getLink(0) != NullTupLoc) { - jam(); - treeRemoveSemi(frag, glbNode, 0); - return; - } - treeRemoveLeaf(frag, glbNode); -} - -/* - * Handle semi-leaf after removing an entry. Move entries from leaf to - * semi-leaf to bring semi-leaf occupancy above minOccup, if possible. - * The leaf may become empty. - */ -void -Dbtux::treeRemoveSemi(Frag& frag, NodeHandle semiNode, unsigned i) -{ - TreeHead& tree = frag.m_tree; - ndbrequire(semiNode.getChilds() < 2); - TupLoc leafLoc = semiNode.getLink(i); - NodeHandle leafNode(frag); - selectNode(leafNode, leafLoc); - if (semiNode.getOccup() < tree.m_minOccup) { - jam(); - unsigned cnt = min(leafNode.getOccup(), tree.m_minOccup - semiNode.getOccup()); - nodeSlide(semiNode, leafNode, cnt, i); - if (leafNode.getOccup() == 0) { - // remove empty leaf - jam(); - treeRemoveNode(frag, leafNode); - } - } -} - -/* - * Handle leaf after removing an entry. If parent is semi-leaf, move - * entries to it as in the semi-leaf case. If parent is interior node, - * do nothing. - */ -void -Dbtux::treeRemoveLeaf(Frag& frag, NodeHandle leafNode) -{ - TreeHead& tree = frag.m_tree; - TupLoc parentLoc = leafNode.getLink(2); - if (parentLoc != NullTupLoc) { - jam(); - NodeHandle parentNode(frag); - selectNode(parentNode, parentLoc); - unsigned i = leafNode.getSide(); - if (parentNode.getLink(1 - i) == NullTupLoc) { - // parent is semi-leaf - jam(); - if (parentNode.getOccup() < tree.m_minOccup) { - jam(); - unsigned cnt = min(leafNode.getOccup(), tree.m_minOccup - parentNode.getOccup()); - nodeSlide(parentNode, leafNode, cnt, i); - } - } - } - if (leafNode.getOccup() == 0) { - jam(); - // remove empty leaf - treeRemoveNode(frag, leafNode); - } -} - -/* - * Remove empty leaf. - */ -void -Dbtux::treeRemoveNode(Frag& frag, NodeHandle leafNode) -{ - TreeHead& tree = frag.m_tree; - ndbrequire(leafNode.getChilds() == 0); - TupLoc parentLoc = leafNode.getLink(2); - unsigned i = leafNode.getSide(); - deleteNode(leafNode); - if (parentLoc != NullTupLoc) { - jam(); - NodeHandle parentNode(frag); - selectNode(parentNode, parentLoc); - parentNode.setLink(i, NullTupLoc); - // re-balance the tree - treeRemoveRebalance(frag, parentNode, i); - return; - } - // tree is now empty - tree.m_root = NullTupLoc; -} - -/* - * Re-balance tree after removing a node. The process starts with the - * parent of the removed node. - */ -void -Dbtux::treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i) -{ - while (true) { - // height of subtree i has decreased by 1 - int j = (i == 0 ? -1 : +1); - int b = node.getBalance(); - if (b == 0) { - // perfectly balanced - jam(); - node.setBalance(-j); - // height of tree did not change - done - return; - } else if (b == j) { - // height of longer subtree has decreased - jam(); - node.setBalance(0); - // height change propagates up - } else if (b == -j) { - // height of shorter subtree has decreased - jam(); - // child on the other side - NodeHandle childNode(frag); - selectNode(childNode, node.getLink(1 - i)); - int b2 = childNode.getBalance(); - if (b2 == b) { - jam(); - treeRotateSingle(frag, node, 1 - i); - // height of tree decreased and propagates up - } else if (b2 == -b) { - jam(); - treeRotateDouble(frag, node, 1 - i); - // height of tree decreased and propagates up - } else { - jam(); - treeRotateSingle(frag, node, 1 - i); - // height of tree did not change - done - return; - } - } else { - ndbrequire(false); - } - TupLoc parentLoc = node.getLink(2); - if (parentLoc == NullTupLoc) { - jam(); - // root node - done - return; - } - i = node.getSide(); - selectNode(node, parentLoc); - } -} - -/* - * Single rotation about node 5. One of LL (i=0) or RR (i=1). - * - * 0 0 - * | | - * 5 ==> 3 - * / \ / \ - * 3 6 2 5 - * / \ / / \ - * 2 4 1 4 6 - * / - * 1 - * - * In this change 5,3 and 2 must always be there. 0, 1, 2, 4 and 6 are - * all optional. If 4 are there it changes side. -*/ -void -Dbtux::treeRotateSingle(Frag& frag, NodeHandle& node, unsigned i) -{ - ndbrequire(i <= 1); - /* - 5 is the old top node that have been unbalanced due to an insert or - delete. The balance is still the old balance before the update. - Verify that bal5 is 1 if RR rotate and -1 if LL rotate. - */ - NodeHandle node5 = node; - const TupLoc loc5 = node5.m_loc; - const int bal5 = node5.getBalance(); - const int side5 = node5.getSide(); - ndbrequire(bal5 + (1 - i) == i); - /* - 3 is the new root of this part of the tree which is to swap place with - node 5. For an insert to cause this it must have the same balance as 5. - For deletes it can have the balance 0. - */ - TupLoc loc3 = node5.getLink(i); - NodeHandle node3(frag); - selectNode(node3, loc3); - const int bal3 = node3.getBalance(); - /* - 2 must always be there but is not changed. Thus we mereley check that it - exists. - */ - ndbrequire(node3.getLink(i) != NullTupLoc); - /* - 4 is not necessarily there but if it is there it will move from one - side of 3 to the other side of 5. For LL it moves from the right side - to the left side and for RR it moves from the left side to the right - side. This means that it also changes parent from 3 to 5. - */ - TupLoc loc4 = node3.getLink(1 - i); - NodeHandle node4(frag); - if (loc4 != NullTupLoc) { - jam(); - selectNode(node4, loc4); - ndbrequire(node4.getSide() == (1 - i) && - node4.getLink(2) == loc3); - node4.setSide(i); - node4.setLink(2, loc5); - }//if - - /* - Retrieve the address of 5's parent before it is destroyed - */ - TupLoc loc0 = node5.getLink(2); - - /* - The next step is to perform the rotation. 3 will inherit 5's parent - and side. 5 will become a child of 3 on the right side for LL and on - the left side for RR. - 5 will get 3 as the parent. It will get 4 as a child and it will be - on the right side of 3 for LL and left side of 3 for RR. - The final step of the rotate is to check whether 5 originally had any - parent. If it had not then 3 is the new root node. - We will also verify some preconditions for the change to occur. - 1. 3 must have had 5 as parent before the change. - 2. 3's side is left for LL and right for RR before change. - */ - ndbrequire(node3.getLink(2) == loc5); - ndbrequire(node3.getSide() == i); - node3.setLink(1 - i, loc5); - node3.setLink(2, loc0); - node3.setSide(side5); - node5.setLink(i, loc4); - node5.setLink(2, loc3); - node5.setSide(1 - i); - if (loc0 != NullTupLoc) { - jam(); - NodeHandle node0(frag); - selectNode(node0, loc0); - node0.setLink(side5, loc3); - } else { - jam(); - frag.m_tree.m_root = loc3; - }//if - /* The final step of the change is to update the balance of 3 and - 5 that changed places. There are two cases here. The first case is - when 3 unbalanced in the same direction by an insert or a delete. - In this case the changes will make the tree balanced again for both - 3 and 5. - The second case only occurs at deletes. In this case 3 starts out - balanced. In the figure above this could occur if 4 starts out with - a right node and the rotate is triggered by a delete of 6's only child. - In this case 5 will change balance but still be unbalanced and 3 will - be unbalanced in the opposite direction of 5. - */ - if (bal3 == bal5) { - jam(); - node3.setBalance(0); - node5.setBalance(0); - } else if (bal3 == 0) { - jam(); - node3.setBalance(-bal5); - node5.setBalance(bal5); - } else { - ndbrequire(false); - }//if - /* - Set node to 3 as return parameter for enabling caller to continue - traversing the tree. - */ - node = node3; -} - -/* - * Double rotation about node 6. One of LR (i=0) or RL (i=1). - * - * 0 0 - * | | - * 6 ==> 4 - * / \ / \ - * 2 7 2 6 - * / \ / \ / \ - * 1 4 1 3 5 7 - * / \ - * 3 5 - * - * In this change 6, 2 and 4 must be there, all others are optional. - * We will start by proving a Lemma. - * Lemma: - * The height of the sub-trees 1 and 7 and the maximum height of the - * threes from 3 and 5 are all the same. - * Proof: - * maxheight(3,5) is defined as the maximum height of 3 and 5. - * If height(7) > maxheight(3,5) then the AVL condition is ok and we - * don't need to perform a rotation. - * If height(7) < maxheight(3,5) then the balance of 6 would be at least - * -3 which cannot happen in an AVL tree even before a rotation. - * Thus we conclude that height(7) == maxheight(3,5) - * - * The next step is to prove that the height of 1 is equal to maxheight(3,5). - * If height(1) - 1 > maxheight(3,5) then we would have - * balance in 6 equal to -3 at least which cannot happen in an AVL-tree. - * If height(1) - 1 = maxheight(3,5) then we should have solved the - * unbalance with a single rotate and not with a double rotate. - * If height(1) + 1 = maxheight(3,5) then we would be doing a rotate - * with node 2 as the root of the rotation. - * If height(1) + k = maxheight(3,5) where k >= 2 then the tree could not have - * been an AVL-tree before the insert or delete. - * Thus we conclude that height(1) = maxheight(3,5) - * - * Thus we conclude that height(1) = maxheight(3,5) = height(7). - * - * Observation: - * The balance of node 4 before the rotation can be any (-1, 0, +1). - * - * The following changes are needed: - * Node 6: - * 1) Changes parent from 0 -> 4 - * 2) 1 - i link stays the same - * 3) i side link is derived from 1 - i side link from 4 - * 4) Side is set to 1 - i - * 5) Balance change: - * If balance(4) == 0 then balance(6) = 0 - * since height(3) = height(5) = maxheight(3,5) = height(7) - * If balance(4) == +1 then balance(6) = 0 - * since height(5) = maxheight(3,5) = height(7) - * If balance(4) == -1 then balance(6) = 1 - * since height(5) + 1 = maxheight(3,5) = height(7) - * - * Node 2: - * 1) Changes parent from 6 -> 4 - * 2) i side link stays the same - * 3) 1 - i side link is derived from i side link of 4 - * 4) Side is set to i (thus not changed) - * 5) Balance change: - * If balance(4) == 0 then balance(2) = 0 - * since height(3) = height(5) = maxheight(3,5) = height(1) - * If balance(4) == -1 then balance(2) = 0 - * since height(3) = maxheight(3,5) = height(1) - * If balance(4) == +1 then balance(6) = 1 - * since height(3) + 1 = maxheight(3,5) = height(1) - * - * Node 4: - * 1) Inherits parent from 6 - * 2) i side link is 2 - * 3) 1 - i side link is 6 - * 4) Side is inherited from 6 - * 5) Balance(4) = 0 independent of previous balance - * Proof: - * If height(1) = 0 then only 2, 4 and 6 are involved and then it is - * trivially true. - * If height(1) >= 1 then we are sure that 1 and 7 exist with the same - * height and that if 3 and 5 exist they are of the same height as 1 and - * 7 and thus we know that 4 is balanced since newheight(2) = newheight(6). - * - * If Node 3 exists: - * 1) Change parent from 4 to 2 - * 2) Change side from i to 1 - i - * - * If Node 5 exists: - * 1) Change parent from 4 to 6 - * 2) Change side from 1 - i to i - * - * If Node 0 exists: - * 1) previous link to 6 is replaced by link to 4 on proper side - * - * Node 1 and 7 needs no changes at all. - * - * Some additional requires are that balance(2) = - balance(6) = -1/+1 since - * otherwise we would do a single rotate. - * - * The balance(6) is -1 if i == 0 and 1 if i == 1 - * - */ -void -Dbtux::treeRotateDouble(Frag& frag, NodeHandle& node, unsigned i) -{ - TreeHead& tree = frag.m_tree; - - // old top node - NodeHandle node6 = node; - const TupLoc loc6 = node6.m_loc; - // the un-updated balance - const int bal6 = node6.getBalance(); - const unsigned side6 = node6.getSide(); - - // level 1 - TupLoc loc2 = node6.getLink(i); - NodeHandle node2(frag); - selectNode(node2, loc2); - const int bal2 = node2.getBalance(); - - // level 2 - TupLoc loc4 = node2.getLink(1 - i); - NodeHandle node4(frag); - selectNode(node4, loc4); - const int bal4 = node4.getBalance(); - - ndbrequire(i <= 1); - ndbrequire(bal6 + (1 - i) == i); - ndbrequire(bal2 == -bal6); - ndbrequire(node2.getLink(2) == loc6); - ndbrequire(node2.getSide() == i); - ndbrequire(node4.getLink(2) == loc2); - - // level 3 - TupLoc loc3 = node4.getLink(i); - TupLoc loc5 = node4.getLink(1 - i); - - // fill up leaf before it becomes internal - if (loc3 == NullTupLoc && loc5 == NullTupLoc) { - jam(); - if (node4.getOccup() < tree.m_minOccup) { - jam(); - unsigned cnt = tree.m_minOccup - node4.getOccup(); - ndbrequire(cnt < node2.getOccup()); - nodeSlide(node4, node2, cnt, i); - ndbrequire(node4.getOccup() >= tree.m_minOccup); - ndbrequire(node2.getOccup() != 0); - } - } else { - if (loc3 != NullTupLoc) { - jam(); - NodeHandle node3(frag); - selectNode(node3, loc3); - node3.setLink(2, loc2); - node3.setSide(1 - i); - } - if (loc5 != NullTupLoc) { - jam(); - NodeHandle node5(frag); - selectNode(node5, loc5); - node5.setLink(2, node6.m_loc); - node5.setSide(i); - } - } - // parent - TupLoc loc0 = node6.getLink(2); - NodeHandle node0(frag); - // perform the rotation - node6.setLink(i, loc5); - node6.setLink(2, loc4); - node6.setSide(1 - i); - - node2.setLink(1 - i, loc3); - node2.setLink(2, loc4); - - node4.setLink(i, loc2); - node4.setLink(1 - i, loc6); - node4.setLink(2, loc0); - node4.setSide(side6); - - if (loc0 != NullTupLoc) { - jam(); - selectNode(node0, loc0); - node0.setLink(side6, loc4); - } else { - jam(); - frag.m_tree.m_root = loc4; - } - // set balance of changed nodes - node4.setBalance(0); - if (bal4 == 0) { - jam(); - node2.setBalance(0); - node6.setBalance(0); - } else if (bal4 == -bal2) { - jam(); - node2.setBalance(0); - node6.setBalance(bal2); - } else if (bal4 == bal2) { - jam(); - node2.setBalance(-bal2); - node6.setBalance(0); - } else { - ndbrequire(false); - } - // new top node - node = node4; -} diff --git a/storage/ndb/src/kernel/blocks/dbtux/Times.txt b/storage/ndb/src/kernel/blocks/dbtux/Times.txt deleted file mode 100644 index 68120084846..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/Times.txt +++ /dev/null @@ -1,151 +0,0 @@ -ordered index performance -========================= - -"mc02" 2x1700 MHz linux-2.4.9 gcc-2.96 -O3 one db-node - -case a: maintenance: index on Unsigned -testOIBasic -case u -table 1 -index 2 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging - -case b: maintenance: index on Varchar(5) + Varchar(5) + Varchar(20) + Unsigned -testOIBasic -case u -table 2 -index 5 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging - -case c: full scan: index on PK Unsigned -testOIBasic -case v -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging - -case d: scan 1 tuple via EQ: index on PK Unsigned -testOIBasic -case w -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -samples 50000 -subloop 1 -nologging -v2 - -a, b -1 million rows, pk update without index, pk update with index -shows ms / 1000 rows for each and pct overhead - -c -1 million rows, index on PK, full table scan, full index scan -shows ms / 1000 rows for each and index time overhead - -d -1 million rows, index on PK, read table via each pk, scan index for each pk -shows ms / 1000 rows for each and index time overhead -samples 10% of all PKs (100,000 pk reads, 100,000 scans) - -the "pct" values are from more accurate total times (not shown) -comments [ ... ] are after the case - -040616 mc02/a 40 ms 87 ms 114 pct - mc02/b 51 ms 128 ms 148 pct - -optim 1 mc02/a 38 ms 85 ms 124 pct - mc02/b 51 ms 123 ms 140 pct - -optim 2 mc02/a 41 ms 80 ms 96 pct - mc02/b 51 ms 117 ms 128 pct - -optim 3 mc02/a 43 ms 80 ms 85 pct - mc02/b 54 ms 118 ms 117 pct - -optim 4 mc02/a 42 ms 80 ms 87 pct - mc02/b 51 ms 119 ms 129 pct - -optim 5 mc02/a 43 ms 77 ms 77 pct - mc02/b 54 ms 118 ms 117 pct - -optim 6 mc02/a 42 ms 70 ms 66 pct - mc02/b 53 ms 109 ms 105 pct - -optim 7 mc02/a 42 ms 69 ms 61 pct - mc02/b 52 ms 106 ms 101 pct - -optim 8 mc02/a 42 ms 69 ms 62 pct - mc02/b 54 ms 104 ms 92 pct - -optim 9 mc02/a 43 ms 67 ms 54 pct - mc02/b 53 ms 102 ms 91 pct - -optim 10 mc02/a 44 ms 65 ms 46 pct - mc02/b 53 ms 88 ms 66 pct - -optim 11 mc02/a 43 ms 63 ms 46 pct - mc02/b 52 ms 86 ms 63 pct - -optim 12 mc02/a 38 ms 55 ms 43 pct - mc02/b 47 ms 77 ms 63 pct - mc02/c 10 ms 14 ms 47 pct - mc02/d 176 ms 281 ms 59 pct - -optim 13 mc02/a 40 ms 57 ms 42 pct - mc02/b 47 ms 77 ms 61 pct - mc02/c 9 ms 13 ms 50 pct - mc02/d 170 ms 256 ms 50 pct - -optim 13 mc02/a 39 ms 59 ms 50 pct - mc02/b 47 ms 77 ms 61 pct - mc02/c 9 ms 12 ms 44 pct - mc02/d 246 ms 289 ms 17 pct - -[ after wl-1884 store all-NULL keys (the tests have pctnull=10 per column) ] -[ case d: bug in testOIBasic killed PK read performance ] - -optim 14 mc02/a 41 ms 60 ms 44 pct - mc02/b 46 ms 81 ms 73 pct - mc02/c 9 ms 13 ms 37 pct - mc02/d 242 ms 285 ms 17 pct - -[ case b: do long keys suffer from many subroutine calls? ] -[ case d: bug in testOIBasic killed PK read performance ] - -none mc02/a 35 ms 60 ms 71 pct - mc02/b 42 ms 75 ms 76 pct - mc02/c 5 ms 12 ms 106 pct - mc02/d 165 ms 238 ms 44 pct - -[ johan re-installed mc02 as fedora gcc-3.3.2, tux uses more C++ stuff than tup] - -charsets mc02/a 35 ms 60 ms 71 pct - mc02/b 42 ms 84 ms 97 pct - mc02/c 5 ms 12 ms 109 pct - mc02/d 190 ms 236 ms 23 pct - -[ case b: TUX can no longer use pointers to TUP data ] - -optim 15 mc02/a 34 ms 60 ms 72 pct - mc02/b 42 ms 85 ms 100 pct - mc02/c 5 ms 12 ms 110 pct - mc02/d 178 ms 242 ms 35 pct - -[ corrected wasted space in index node ] - -optim 16 mc02/a 34 ms 53 ms 53 pct - mc02/b 42 ms 75 ms 75 pct - -[ binary search of bounding node when adding entry ] - -none mc02/a 35 ms 53 ms 51 pct - mc02/b 42 ms 75 ms 76 pct - -[ rewrote treeAdd / treeRemove ] - -optim 17 mc02/a 35 ms 52 ms 49 pct - mc02/b 43 ms 75 ms 75 pct - -[ allow slack (2) in interior nodes - almost no effect?? ] - -wl-1942 mc02/a 35 ms 52 ms 49 pct - mc02/b 42 ms 75 ms 76 pct - -before mc02/c 5 ms 13 ms 126 pct - mc02/d 134 ms 238 ms 78 pct - -after mc02/c 5 ms 10 ms 70 pct - mc02/d 178 ms 242 ms 69 pct - -[ prelim performance fix for max batch size 16 -> 992 ] - -wl-2066 mc02/c 5 ms 10 ms 87 pct -before mc02/d 140 ms 237 ms 69 pct - -wl-2066 mc02/c 5 ms 10 ms 69 pct -after mc02/d 150 ms 229 ms 52 pct - -[ wl-2066 = remove ACC storage, use TUX test to see effect ] - -vim: set et: diff --git a/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html b/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html deleted file mode 100644 index 264809cefd3..00000000000 --- a/storage/ndb/src/kernel/blocks/dbtux/tuxstatus.html +++ /dev/null @@ -1,120 +0,0 @@ - - - -NDB Ordered Index Status - - -

-

NDB Ordered Index Status

-

-

Alpha release Jan 30, 2004

-

-

    -
  • - Up to 32 index attributes of any type, possibly nullable. -
  • - Index build i.e. table need not be empty. -
  • - Logging NOT done, index rebuilt at system restart. -
  • - Single range scan with lower and upper bounds. -
  • - Scan with locking: read latest, read for update. -
  • - LIMITED number of parallel scans. -
  • - Total result set NOT in index key order. -
  • - NDB ODBC optimizer to use ordered index for equality but NOT for ranges. -
  • - MySQL optimizer to use ordered index for equality and ranges. -
-

-As an example, consider following index on integer attributes. -

-SQL>create index X on T (A, B, C) nologging; -

-Single range scan means that bounds are set on -an initial sequence of index keys, and all but last is an equality. -
-For example following scans are supported (the last 2 not via NDB ODBC). -

-SQL>select * from T where A = 1; -
-SQL>select * from T where A = 1 and B = 10 and C = 20; -
-SQL>select * from T where A < 10; -
-SQL>select * from T where A = 1 and 10 < B and B < 20; -

-Following scans are NOT supported: -

-SQL>select * from T where B = 1; -
-SQL>select * from T where A < 10 and B < 20; -
-

Features and dates

-[ Now = Jan 19 ] -

-

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FeatureNow Jan 30 Mar 01 Never
Index maintenanceX - - -
Basic scanX 1) - - -
Scan bounds on nullable attributes- X - -
Scan with locking- X - -
NDB ODBC equality bounds- X - -
MySQL integration- X - -
Index build2) X - -
Unlimited number of scans3) - X -
Total ordering- - X -
Multiple range scan- - X -
NDB ODBC range bounds- - - X
Logging- - - X
-

-1) No locking and bounds must be on non-nullable key attributes. -
-2) Currently table must be empty when index is created. -
-3) Currently limited to 11 simultaneous per fragment. - - diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp deleted file mode 100644 index 398c63add84..00000000000 --- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp +++ /dev/null @@ -1,2608 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "DbUtil.hpp" - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: Startup - * ------------------------------------------------------------------------ - * - * Constructors, startup, initializations - **************************************************************************/ - -DbUtil::DbUtil(Block_context& ctx) : - SimulatedBlock(DBUTIL, ctx), - c_runningPrepares(c_preparePool), - c_seizingTransactions(c_transactionPool), - c_runningTransactions(c_transactionPool), - c_lockQueues(c_lockQueuePool) -{ - BLOCK_CONSTRUCTOR(DbUtil); - - // Add received signals - addRecSignal(GSN_READ_CONFIG_REQ, &DbUtil::execREAD_CONFIG_REQ); - addRecSignal(GSN_STTOR, &DbUtil::execSTTOR); - addRecSignal(GSN_NDB_STTOR, &DbUtil::execNDB_STTOR); - addRecSignal(GSN_DUMP_STATE_ORD, &DbUtil::execDUMP_STATE_ORD); - addRecSignal(GSN_CONTINUEB, &DbUtil::execCONTINUEB); - - //addRecSignal(GSN_TCSEIZEREF, &DbUtil::execTCSEIZEREF); - addRecSignal(GSN_TCSEIZECONF, &DbUtil::execTCSEIZECONF); - addRecSignal(GSN_TCKEYCONF, &DbUtil::execTCKEYCONF); - addRecSignal(GSN_TCKEYREF, &DbUtil::execTCKEYREF); - addRecSignal(GSN_TCROLLBACKREP, &DbUtil::execTCROLLBACKREP); - - //addRecSignal(GSN_TCKEY_FAILCONF, &DbUtil::execTCKEY_FAILCONF); - //addRecSignal(GSN_TCKEY_FAILREF, &DbUtil::execTCKEY_FAILREF); - addRecSignal(GSN_TRANSID_AI, &DbUtil::execTRANSID_AI); - - /** - * Sequence Service - */ - addRecSignal(GSN_UTIL_SEQUENCE_REQ, &DbUtil::execUTIL_SEQUENCE_REQ); - // Debug - addRecSignal(GSN_UTIL_SEQUENCE_REF, &DbUtil::execUTIL_SEQUENCE_REF); - addRecSignal(GSN_UTIL_SEQUENCE_CONF, &DbUtil::execUTIL_SEQUENCE_CONF); - - /** - * Locking - */ - addRecSignal(GSN_UTIL_CREATE_LOCK_REQ, &DbUtil::execUTIL_CREATE_LOCK_REQ); - addRecSignal(GSN_UTIL_DESTROY_LOCK_REQ, &DbUtil::execUTIL_DESTORY_LOCK_REQ); - addRecSignal(GSN_UTIL_LOCK_REQ, &DbUtil::execUTIL_LOCK_REQ); - addRecSignal(GSN_UTIL_UNLOCK_REQ, &DbUtil::execUTIL_UNLOCK_REQ); - - /** - * Backend towards Dict - */ - addRecSignal(GSN_GET_TABINFOREF, &DbUtil::execGET_TABINFOREF); - addRecSignal(GSN_GET_TABINFO_CONF, &DbUtil::execGET_TABINFO_CONF); - - /** - * Prepare / Execute / Release Services - */ - addRecSignal(GSN_UTIL_PREPARE_REQ, &DbUtil::execUTIL_PREPARE_REQ); - addRecSignal(GSN_UTIL_PREPARE_CONF, &DbUtil::execUTIL_PREPARE_CONF); - addRecSignal(GSN_UTIL_PREPARE_REF, &DbUtil::execUTIL_PREPARE_REF); - - addRecSignal(GSN_UTIL_EXECUTE_REQ, &DbUtil::execUTIL_EXECUTE_REQ); - addRecSignal(GSN_UTIL_EXECUTE_CONF, &DbUtil::execUTIL_EXECUTE_CONF); - addRecSignal(GSN_UTIL_EXECUTE_REF, &DbUtil::execUTIL_EXECUTE_REF); - - addRecSignal(GSN_UTIL_RELEASE_REQ, &DbUtil::execUTIL_RELEASE_REQ); - addRecSignal(GSN_UTIL_RELEASE_CONF, &DbUtil::execUTIL_RELEASE_CONF); - addRecSignal(GSN_UTIL_RELEASE_REF, &DbUtil::execUTIL_RELEASE_REF); -} - -DbUtil::~DbUtil() -{ -} - -BLOCK_FUNCTIONS(DbUtil) - -void -DbUtil::releasePrepare(PreparePtr prepPtr) { - prepPtr.p->preparePages.release(); - c_runningPrepares.release(prepPtr); // Automatic release in pool -} - -void -DbUtil::releasePreparedOperation(PreparedOperationPtr prepOpPtr) { - prepOpPtr.p->attrMapping.release(); - prepOpPtr.p->attrInfo.release(); - prepOpPtr.p->rsInfo.release(); - prepOpPtr.p->pkBitmask.clear(); - c_preparedOperationPool.release(prepOpPtr); // No list holding these structs -} - -void -DbUtil::releaseTransaction(TransactionPtr transPtr){ - transPtr.p->executePages.release(); - OperationPtr opPtr; - for(transPtr.p->operations.first(opPtr); opPtr.i != RNIL; - transPtr.p->operations.next(opPtr)){ - opPtr.p->attrInfo.release(); - opPtr.p->keyInfo.release(); - opPtr.p->rs.release(); - if (opPtr.p->prepOp != 0 && opPtr.p->prepOp_i != RNIL) { - if (opPtr.p->prepOp->releaseFlag) { - PreparedOperationPtr prepOpPtr; - prepOpPtr.i = opPtr.p->prepOp_i; - prepOpPtr.p = opPtr.p->prepOp; - releasePreparedOperation(prepOpPtr); - } - } - } - transPtr.p->operations.release(); - c_runningTransactions.release(transPtr); -} - -void -DbUtil::execREAD_CONFIG_REQ(Signal* signal) -{ - jamEntry(); - - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - c_pagePool.setSize(10); - c_preparePool.setSize(1); // one parallel prepare at a time - c_preparedOperationPool.setSize(5); // three hardcoded, two for test - c_operationPool.setSize(64); // 64 parallel operations - c_transactionPool.setSize(32); // 16 parallel transactions - c_attrMappingPool.setSize(100); - c_dataBufPool.setSize(6000); // 6000*11*4 = 264K > 8k+8k*16 = 256k - { - SLList tmp(c_preparePool); - PreparePtr ptr; - while(tmp.seize(ptr)) - new (ptr.p) Prepare(c_pagePool); - tmp.release(); - } - { - SLList tmp(c_operationPool); - OperationPtr ptr; - while(tmp.seize(ptr)) - new (ptr.p) Operation(c_dataBufPool, c_dataBufPool, c_dataBufPool); - tmp.release(); - } - { - SLList tmp(c_preparedOperationPool); - PreparedOperationPtr ptr; - while(tmp.seize(ptr)) - new (ptr.p) PreparedOperation(c_attrMappingPool, - c_dataBufPool, c_dataBufPool); - tmp.release(); - } - { - SLList tmp(c_transactionPool); - TransactionPtr ptr; - while(tmp.seize(ptr)) - new (ptr.p) Transaction(c_pagePool, c_operationPool); - tmp.release(); - } - - c_lockQueuePool.setSize(5); - c_lockElementPool.setSize(5); - c_lockQueues.setSize(8); - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - -void -DbUtil::execSTTOR(Signal* signal) -{ - jamEntry(); - - const Uint32 startphase = signal->theData[1]; - - if(startphase == 1){ - c_transId[0] = (number() << 20) + (getOwnNodeId() << 8); - c_transId[1] = 0; - } - - if(startphase == 6){ - hardcodedPrepare(); - connectTc(signal); - } - - signal->theData[0] = 0; - signal->theData[3] = 1; - signal->theData[4] = 6; - signal->theData[5] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB); - - return; -} - -void -DbUtil::execNDB_STTOR(Signal* signal) -{ - (void)signal; // Don't want compiler warning - - jamEntry(); -} - - -/*************************** - * Seize a number of TC records - * to use for Util transactions - */ - -void -DbUtil::connectTc(Signal* signal){ - - TransactionPtr ptr; - while(c_seizingTransactions.seize(ptr)){ - signal->theData[0] = ptr.i << 1; // See TcCommitConf - signal->theData[1] = reference(); - sendSignal(DBTC_REF, GSN_TCSEIZEREQ, signal, 2, JBB); - } -} - -void -DbUtil::execTCSEIZECONF(Signal* signal){ - jamEntry(); - - TransactionPtr ptr; - ptr.i = signal->theData[0] >> 1; - c_seizingTransactions.getPtr(ptr, signal->theData[0] >> 1); - ptr.p->connectPtr = signal->theData[1]; - - c_seizingTransactions.release(ptr); -} - - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: Misc - * ------------------------------------------------------------------------ - * - * ContinueB, Dump - **************************************************************************/ - -void -DbUtil::execCONTINUEB(Signal* signal){ - jamEntry(); - const Uint32 Tdata0 = signal->theData[0]; - - switch(Tdata0){ - default: - ndbrequire(0); - } -} - -void -DbUtil::execDUMP_STATE_ORD(Signal* signal){ - jamEntry(); - - /**************************************************************************** - * SEQUENCE SERVICE - * - * 200 : Simple test of Public Sequence Interface - * ---------------------------------------------- - * - Sends a SEQUENCE_REQ signal to Util (itself) - */ - const Uint32 tCase = signal->theData[0]; - if(tCase == 200){ - jam() - ndbout << "--------------------------------------------------" << endl; - UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtrSend(); - Uint32 seqId = 1; - Uint32 reqTy = UtilSequenceReq::CurrVal; - - if(signal->length() > 1) seqId = signal->theData[1]; - if(signal->length() > 2) reqTy = signal->theData[2]; - - req->senderData = 12; - req->sequenceId = seqId; - req->requestType = reqTy; - - sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ, - signal, UtilSequenceReq::SignalLength, JBB); - } - - /****************************************************************************/ - /* // Obsolete tests, should be rewritten for long signals!! - if(tCase == 210){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0], 128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Delete); - w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0"); - w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0 - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 211){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Insert); - w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0"); - w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0 - w.add(UtilPrepareReq::AttributeName, "NEXTID"); // AttrNo = 1 - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 212){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Update); - w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0"); - w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0 - w.add(UtilPrepareReq::AttributeName, "NEXTID"); // AttrNo = 1 - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 213){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Read); - w.add(UtilPrepareReq::TableName, "sys/def/SYSTAB_0"); - w.add(UtilPrepareReq::AttributeName, "SYSKEY_0"); // AttrNo = 0 - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 214){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0], 128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Delete); - w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0 - w.add(UtilPrepareReq::AttributeId, (unsigned int)0);// SYSKEY_0 - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 215){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Insert); - w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0 - w.add(UtilPrepareReq::AttributeId, (unsigned int)0); // SYSKEY_0 - w.add(UtilPrepareReq::AttributeId, 1); // NEXTID - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 216){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Update); - w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0 - w.add(UtilPrepareReq::AttributeId, (unsigned int)0);// SYSKEY_0 - w.add(UtilPrepareReq::AttributeId, 1); // NEXTID - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - if(tCase == 217){ - jam(); - ndbout << "--------------------------------------------------" << endl; - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, UtilPrepareReq::Read); - w.add(UtilPrepareReq::TableId, (unsigned int)0); // SYSTAB_0 - w.add(UtilPrepareReq::AttributeId, (unsigned int)0);// SYSKEY_0 - Uint32 length = w.getWordsUsed(); - ndbassert(length <= pageSizeInWords); - - sendUtilPrepareReqSignals(signal, propPage, length); - } - */ - /****************************************************************************/ - /* // Obsolete tests, should be rewritten for long signals!! - if(tCase == 220){ - jam(); - ndbout << "--------------------------------------------------" << endl; - Uint32 prepI = signal->theData[1]; - Uint32 length = signal->theData[2]; - Uint32 attributeValue0 = signal->theData[3]; - Uint32 attributeValue1a = signal->theData[4]; - Uint32 attributeValue1b = signal->theData[5]; - ndbrequire(prepI != 0); - - UtilExecuteReq * req = (UtilExecuteReq *)signal->getDataPtrSend(); - - req->senderData = 221; - req->prepareId = prepI; - req->totalDataLen = length; // Including headers - req->offset = 0; - - AttributeHeader::init(&req->attrData[0], 0, 1); // AttrNo 0, DataSize - req->attrData[1] = attributeValue0; // AttrValue - AttributeHeader::init(&req->attrData[2], 1, 2); // AttrNo 1, DataSize - req->attrData[3] = attributeValue1a; // AttrValue - req->attrData[4] = attributeValue1b; // AttrValue - - printUTIL_EXECUTE_REQ(stdout, signal->getDataPtrSend(), 3 + 5,0); - sendSignal(DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal, 3 + 5, JBB); - } -*/ - /**************************************************************************** - * 230 : PRINT STATE - */ -#ifdef ARRAY_GUARD - if(tCase == 230){ - jam(); - - ndbout << "--------------------------------------------------" << endl; - if (signal->length() <= 1) { - ndbout << "Usage: DUMP 230 " << endl - << "[1] Print Prepare (running) records" << endl - << "[2] Print PreparedOperation records" << endl - << "[3] Print Transaction records" << endl - << "[4] Print Operation records" << endl - << "Ex. \"dump 230 1 2\" prints Prepare record no 2." << endl - << endl - << "210 : PREPARE_REQ DELETE SYSTAB_0 SYSKEY_0" << endl - << "211 : PREPARE_REQ INSERT SYSTAB_0 SYSKEY_0 NEXTID" << endl - << "212 : PREPARE_REQ UPDATE SYSTAB_0 SYSKEY_0 NEXTID" << endl - << "213 : PREPARE_REQ READ SYSTAB_0 SYSKEY_0" << endl - << "214 : PREPARE_REQ DELETE SYSTAB_0 SYSKEY_0 using id" << endl - << "215 : PREPARE_REQ INSERT SYSTAB_0 SYSKEY_0 NEXTID using id" << endl - << "216 : PREPARE_REQ UPDATE SYSTAB_0 SYSKEY_0 NEXTID using id" << endl - << "217 : PREPARE_REQ READ SYSTAB_0 SYSKEY_0 using id" << endl - << "220 : EXECUTE_REQ " <theData[1]) { - case 1: - // ** Print a specific record ** - if (signal->length() >= 3) { - PreparePtr prepPtr; - if (!c_preparePool.isSeized(signal->theData[2])) { - ndbout << "Prepare Id: " << signal->theData[2] - << " (Not seized!)" << endl; - } else { - c_preparePool.getPtr(prepPtr, signal->theData[2]); - prepPtr.p->print(); - } - return; - } - - // ** Print all records ** - PreparePtr prepPtr; - if (!c_runningPrepares.first(prepPtr)) { - ndbout << "No Prepare records exist" << endl; - return; - } - - while (!prepPtr.isNull()) { - prepPtr.p->print(); - c_runningPrepares.next(prepPtr); - } - return; - - case 2: - // ** Print a specific record ** - if (signal->length() >= 3) { - if (!c_preparedOperationPool.isSeized(signal->theData[2])) { - ndbout << "PreparedOperation Id: " << signal->theData[2] - << " (Not seized!)" << endl; - return; - } - ndbout << "PreparedOperation Id: " << signal->theData[2] << endl; - PreparedOperationPtr prepOpPtr; - c_preparedOperationPool.getPtr(prepOpPtr, signal->theData[2]); - prepOpPtr.p->print(); - return; - } - - // ** Print all records ** -#if 0 // not implemented - PreparedOperationPtr prepOpPtr; - if (!c_runningPreparedOperations.first(prepOpPtr)) { - ndbout << "No PreparedOperations exist" << endl; - return; - } - while (!prepOpPtr.isNull()) { - ndbout << "[-PreparedOperation no " << prepOpPtr.i << ":"; - prepOpPtr.p->print(); - ndbout << "]"; - c_runningPreparedOperations.next(prepOpPtr); - } -#endif - return; - - case 3: - // ** Print a specific record ** - if (signal->length() >= 3) { - ndbout << "Print specific record not implemented." << endl; - return; - } - - // ** Print all records ** - ndbout << "Print all records not implemented, specify an Id." << endl; - return; - - case 4: - ndbout << "Not implemented" << endl; - return; - - default: - ndbout << "Unknown input (try without any data)" << endl; - return; - } - } -#endif - if(tCase == 240 && signal->getLength() == 2){ - MutexManager::ActiveMutexPtr ptr; - ndbrequire(c_mutexMgr.seize(ptr)); - ptr.p->m_mutexId = signal->theData[1]; - Callback c = { safe_cast(&DbUtil::mutex_created), ptr.i }; - ptr.p->m_callback = c; - c_mutexMgr.create(signal, ptr); - ndbout_c("c_mutexMgr.create ptrI=%d mutexId=%d", ptr.i, ptr.p->m_mutexId); - } - - if(tCase == 241 && signal->getLength() == 2){ - MutexManager::ActiveMutexPtr ptr; - ndbrequire(c_mutexMgr.seize(ptr)); - ptr.p->m_mutexId = signal->theData[1]; - Callback c = { safe_cast(&DbUtil::mutex_locked), ptr.i }; - ptr.p->m_callback = c; - c_mutexMgr.lock(signal, ptr); - ndbout_c("c_mutexMgr.lock ptrI=%d mutexId=%d", ptr.i, ptr.p->m_mutexId); - } - - if(tCase == 242 && signal->getLength() == 2){ - MutexManager::ActiveMutexPtr ptr; - ptr.i = signal->theData[1]; - c_mutexMgr.getPtr(ptr); - Callback c = { safe_cast(&DbUtil::mutex_unlocked), ptr.i }; - ptr.p->m_callback = c; - c_mutexMgr.unlock(signal, ptr); - ndbout_c("c_mutexMgr.unlock ptrI=%d mutexId=%d", ptr.i, ptr.p->m_mutexId); - } - - if(tCase == 243 && signal->getLength() == 3){ - MutexManager::ActiveMutexPtr ptr; - ndbrequire(c_mutexMgr.seize(ptr)); - ptr.p->m_mutexId = signal->theData[1]; - ptr.p->m_mutexKey = signal->theData[2]; - Callback c = { safe_cast(&DbUtil::mutex_destroyed), ptr.i }; - ptr.p->m_callback = c; - c_mutexMgr.destroy(signal, ptr); - ndbout_c("c_mutexMgr.destroy ptrI=%d mutexId=%d key=%d", - ptr.i, ptr.p->m_mutexId, ptr.p->m_mutexKey); - } -} - -void -DbUtil::mutex_created(Signal* signal, Uint32 ptrI, Uint32 retVal){ - MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI; - c_mutexMgr.getPtr(ptr); - ndbout_c("mutex_created - mutexId=%d, retVal=%d", - ptr.p->m_mutexId, retVal); - c_mutexMgr.release(ptrI); -} - -void -DbUtil::mutex_destroyed(Signal* signal, Uint32 ptrI, Uint32 retVal){ - MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI; - c_mutexMgr.getPtr(ptr); - ndbout_c("mutex_destroyed - mutexId=%d, retVal=%d", - ptr.p->m_mutexId, retVal); - c_mutexMgr.release(ptrI); -} - -void -DbUtil::mutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){ - MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI; - c_mutexMgr.getPtr(ptr); - ndbout_c("mutex_locked - mutexId=%d, retVal=%d key=%d ptrI=%d", - ptr.p->m_mutexId, retVal, ptr.p->m_mutexKey, ptrI); - if(retVal) - c_mutexMgr.release(ptrI); -} - -void -DbUtil::mutex_unlocked(Signal* signal, Uint32 ptrI, Uint32 retVal){ - MutexManager::ActiveMutexPtr ptr; ptr.i = ptrI; - c_mutexMgr.getPtr(ptr); - ndbout_c("mutex_unlocked - mutexId=%d, retVal=%d", - ptr.p->m_mutexId, retVal); - if(!retVal) - c_mutexMgr.release(ptrI); -} - -void -DbUtil::execUTIL_SEQUENCE_REF(Signal* signal){ - jamEntry(); - ndbout << "UTIL_SEQUENCE_REF" << endl; - printUTIL_SEQUENCE_REF(stdout, signal->getDataPtrSend(), signal->length(), 0); -} - -void -DbUtil::execUTIL_SEQUENCE_CONF(Signal* signal){ - jamEntry(); - ndbout << "UTIL_SEQUENCE_CONF" << endl; - printUTIL_SEQUENCE_CONF(stdout, signal->getDataPtrSend(), signal->length(),0); -} - -void -DbUtil::execUTIL_PREPARE_CONF(Signal* signal){ - jamEntry(); - ndbout << "UTIL_PREPARE_CONF" << endl; - printUTIL_PREPARE_CONF(stdout, signal->getDataPtrSend(), signal->length(), 0); -} - -void -DbUtil::execUTIL_PREPARE_REF(Signal* signal){ - jamEntry(); - ndbout << "UTIL_PREPARE_REF" << endl; - printUTIL_PREPARE_REF(stdout, signal->getDataPtrSend(), signal->length(), 0); -} - -void -DbUtil::execUTIL_EXECUTE_CONF(Signal* signal) { - jamEntry(); - ndbout << "UTIL_EXECUTE_CONF" << endl; - printUTIL_EXECUTE_CONF(stdout, signal->getDataPtrSend(), signal->length(), 0); -} - -void -DbUtil::execUTIL_EXECUTE_REF(Signal* signal) { - jamEntry(); - - ndbout << "UTIL_EXECUTE_REF" << endl; - printUTIL_EXECUTE_REF(stdout, signal->getDataPtrSend(), signal->length(), 0); -} - -void -DbUtil::execUTIL_RELEASE_CONF(Signal* signal) { - jamEntry(); - ndbout << "UTIL_RELEASE_CONF" << endl; -} - -void -DbUtil::execUTIL_RELEASE_REF(Signal* signal) { - jamEntry(); - - ndbout << "UTIL_RELEASE_REF" << endl; -} - -void -DbUtil::sendUtilPrepareRef(Signal* signal, UtilPrepareRef::ErrorCode error, - Uint32 recipient, Uint32 senderData){ - UtilPrepareRef * ref = (UtilPrepareRef *)signal->getDataPtrSend(); - ref->errorCode = error; - ref->senderData = senderData; - - sendSignal(recipient, GSN_UTIL_PREPARE_REF, signal, - UtilPrepareRef::SignalLength, JBB); -} - -void -DbUtil::sendUtilExecuteRef(Signal* signal, UtilExecuteRef::ErrorCode error, - Uint32 TCerror, Uint32 recipient, Uint32 senderData){ - - UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend(); - ref->senderData = senderData; - ref->errorCode = error; - ref->TCErrorCode = TCerror; - - sendSignal(recipient, GSN_UTIL_EXECUTE_REF, signal, - UtilPrepareRef::SignalLength, JBB); -} - - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: Prepare service - * ------------------------------------------------------------------------ - * - * Prepares a transaction by storing info in some structs - **************************************************************************/ - -void -DbUtil::execUTIL_PREPARE_REQ(Signal* signal) -{ - jamEntry(); - - /**************** - * Decode Signal - ****************/ - UtilPrepareReq * req = (UtilPrepareReq *)signal->getDataPtr(); - const Uint32 senderRef = req->senderRef; - const Uint32 senderData = req->senderData; - - if(signal->getNoOfSections() == 0) { - // Missing prepare data - jam(); - releaseSections(signal); - sendUtilPrepareRef(signal, UtilPrepareRef::MISSING_PROPERTIES_SECTION, - senderRef, senderData); - return; - } - - PreparePtr prepPtr; - SegmentedSectionPtr ptr; - - jam(); - if(!c_runningPrepares.seize(prepPtr)) { - jam(); - releaseSections(signal); - sendUtilPrepareRef(signal, UtilPrepareRef::PREPARE_SEIZE_ERROR, - senderRef, senderData); - return; - }; - signal->getSection(ptr, UtilPrepareReq::PROPERTIES_SECTION); - const Uint32 noPages = (ptr.sz + sizeof(Page32)) / sizeof(Page32); - ndbassert(noPages > 0); - if (!prepPtr.p->preparePages.seize(noPages)) { - jam(); - releaseSections(signal); - sendUtilPrepareRef(signal, UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR, - senderRef, senderData); - c_preparePool.release(prepPtr); - return; - } - // Save SimpleProperties - Uint32* target = &prepPtr.p->preparePages.getPtr(0)->data[0]; - copy(target, ptr); - prepPtr.p->prepDataLen = ptr.sz; - // Release long signal sections - releaseSections(signal); - // Check table properties with DICT - SimplePropertiesSectionReader reader(ptr, getSectionSegmentPool()); - prepPtr.p->clientRef = senderRef; - prepPtr.p->clientData = senderData; - // Release long signal sections - releaseSections(signal); - readPrepareProps(signal, &reader, prepPtr.i); -} - -void DbUtil::readPrepareProps(Signal* signal, - SimpleProperties::Reader* reader, - Uint32 senderData) -{ - jam(); -#if 0 - printf("DbUtil::readPrepareProps: Received SimpleProperties:\n"); - reader->printAll(ndbout); -#endif - ndbrequire(reader->first()); - ndbrequire(reader->getKey() == UtilPrepareReq::NoOfOperations); - ndbrequire(reader->getUint32() == 1); // Only one op/trans implemented - - ndbrequire(reader->next()); - ndbrequire(reader->getKey() == UtilPrepareReq::OperationType); - - ndbrequire(reader->next()); - UtilPrepareReq::KeyValue tableKey = - (UtilPrepareReq::KeyValue) reader->getKey(); - ndbrequire((tableKey == UtilPrepareReq::TableName) || - (tableKey == UtilPrepareReq::TableId)); - - /************************ - * Ask Dict for metadata - ************************/ - { - GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = senderData; - if (tableKey == UtilPrepareReq::TableName) { - jam(); - char tableName[MAX_TAB_NAME_SIZE]; - req->requestType = GetTabInfoReq::RequestByName | - GetTabInfoReq::LongSignalConf; - - req->tableNameLen = reader->getValueLen(); // Including trailing \0 - - /******************************************** - * Code signal data and send signals to DICT - ********************************************/ - - ndbrequire(req->tableNameLen < MAX_TAB_NAME_SIZE); - reader->getString((char*)tableName); - LinearSectionPtr ptr[1]; - ptr[0].p = (Uint32*)tableName; - ptr[0].sz = req->tableNameLen; - sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB, ptr,1); - - } - else { // (tableKey == UtilPrepareReq::TableId) - jam(); - req->requestType = GetTabInfoReq::RequestById | - GetTabInfoReq::LongSignalConf; - req->tableId = reader->getUint32(); - sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal, - GetTabInfoReq::SignalLength, JBB); - } - - } -} - -/** - * @note We assume that this signal comes due to a request related - * to a Prepare struct. DictTabInfo:s 'senderData' denotes - * the Prepare struct related to the request. - */ -void -DbUtil::execGET_TABINFO_CONF(Signal* signal){ - jamEntry(); - - if(!assembleFragments(signal)){ - jam(); - return; - } - - /**************** - * Decode signal - ****************/ - GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr(); - const Uint32 prepI = conf->senderData; - const Uint32 totalLen = conf->totalLen; - - SegmentedSectionPtr dictTabInfoPtr; - signal->getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO); - ndbrequire(dictTabInfoPtr.sz == totalLen); - - PreparePtr prepPtr; - c_runningPrepares.getPtr(prepPtr, prepI); - prepareOperation(signal, prepPtr); -} - -void -DbUtil::execGET_TABINFOREF(Signal* signal){ - jamEntry(); - - GetTabInfoRef * ref = (GetTabInfoRef *)signal->getDataPtr(); - Uint32 prepI = ref->senderData; -#define EVENT_DEBUG -#if 0 //def EVENT_DEBUG - ndbout << "Signal GET_TABINFOREF received." << endl; - ndbout << "Error Code: " << ref->errorCode << endl; - - switch (ref->errorCode) { - case GetTabInfoRef::InvalidTableId: - ndbout << " Msg: Invalid table id" << endl; - break; - case GetTabInfoRef::TableNotDefined: - ndbout << " Msg: Table not defined" << endl; - break; - case GetTabInfoRef::TableNameToLong: - ndbout << " Msg: Table node too long" << endl; - break; - default: - ndbout << " Msg: Unknown error returned from Dict" << endl; - break; - } -#endif - - PreparePtr prepPtr; - c_runningPrepares.getPtr(prepPtr, prepI); - - sendUtilPrepareRef(signal, UtilPrepareRef::DICT_TAB_INFO_ERROR, - prepPtr.p->clientRef, prepPtr.p->clientData); - - releasePrepare(prepPtr); -} - - -/****************************************************************************** - * Prepare Operation - * - * Using a prepare record, prepare an operation (i.e. create PreparedOperation). - * Info from both Pepare request (PreparePages) and DictTabInfo is used. - * - * Algorithm: - * -# Seize AttrbuteMapping - * - Lookup in preparePages how many attributes should be prepared - * - Seize AttributeMapping - * -# For each attributes in preparePages - * - Lookup id and isPK in dictInfoPages - * - Store "no -> (AttributeId, Position)" in AttributeMapping - * -# For each map in AttributeMapping - * - if (isPK) then assign offset - ******************************************************************************/ -void -DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr) -{ - jam(); - - /******************************************* - * Seize and store PreparedOperation struct - *******************************************/ - PreparedOperationPtr prepOpPtr; - if(!c_preparedOperationPool.seize(prepOpPtr)) { - jam(); - releaseSections(signal); - sendUtilPrepareRef(signal, UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR, - prepPtr.p->clientRef, prepPtr.p->clientData); - releasePrepare(prepPtr); - return; - } - prepPtr.p->prepOpPtr = prepOpPtr; - - /******************** - * Read request info - ********************/ - SimplePropertiesLinearReader prepPagesReader(&prepPtr.p->preparePages.getPtr(0)->data[0], - prepPtr.p->prepDataLen); - - ndbrequire(prepPagesReader.first()); - ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::NoOfOperations); - const Uint32 noOfOperations = prepPagesReader.getUint32(); - ndbrequire(noOfOperations == 1); - - ndbrequire(prepPagesReader.next()); - ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::OperationType); - const Uint32 operationType = prepPagesReader.getUint32(); - - ndbrequire(prepPagesReader.next()); - - char tableName[MAX_TAB_NAME_SIZE]; - Uint32 tableId; - UtilPrepareReq::KeyValue tableKey = - (UtilPrepareReq::KeyValue) prepPagesReader.getKey(); - if (tableKey == UtilPrepareReq::TableId) { - jam(); - tableId = prepPagesReader.getUint32(); - } - else { - jam(); - ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::TableName); - ndbrequire(prepPagesReader.getValueLen() <= MAX_TAB_NAME_SIZE); - prepPagesReader.getString(tableName); - } - /****************************************************************** - * Seize AttributeMapping (by counting no of attribs in prepPages) - ******************************************************************/ - Uint32 noOfAttributes = 0; // No of attributes in PreparePages (used later) - while(prepPagesReader.next()) { - if (tableKey == UtilPrepareReq::TableName) { - jam(); - ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::AttributeName); - } else { - jam(); - ndbrequire(prepPagesReader.getKey() == UtilPrepareReq::AttributeId); - } - noOfAttributes++; - } - ndbrequire(prepPtr.p->prepOpPtr.p->attrMapping.seize(noOfAttributes)); - if (operationType == UtilPrepareReq::Read) { - ndbrequire(prepPtr.p->prepOpPtr.p->rsInfo.seize(noOfAttributes)); - } - /*************************************** - * For each attribute name, lookup info - ***************************************/ - // Goto start of attribute names - ndbrequire(prepPagesReader.first() && prepPagesReader.next() && - prepPagesReader.next()); - - DictTabInfo::Table tableDesc; tableDesc.init(); - AttrMappingBuffer::DataBufferIterator attrMappingIt; - ndbrequire(prepPtr.p->prepOpPtr.p->attrMapping.first(attrMappingIt)); - - ResultSetBuffer::DataBufferIterator rsInfoIt; - if (operationType == UtilPrepareReq::Read) { - ndbrequire(prepPtr.p->prepOpPtr.p->rsInfo.first(rsInfoIt)); - } - - Uint32 noOfPKAttribsStored = 0; - Uint32 noOfNonPKAttribsStored = 0; - Uint32 attrLength = 0; - Uint32 pkAttrLength = 0; - char attrNameRequested[MAX_ATTR_NAME_SIZE]; - Uint32 attrIdRequested; - - while(prepPagesReader.next()) { - UtilPrepareReq::KeyValue attributeKey = - (UtilPrepareReq::KeyValue) prepPagesReader.getKey(); - - ndbrequire((attributeKey == UtilPrepareReq::AttributeName) || - (attributeKey == UtilPrepareReq::AttributeId)); - if (attributeKey == UtilPrepareReq::AttributeName) { - jam(); - ndbrequire(prepPagesReader.getValueLen() <= MAX_ATTR_NAME_SIZE); - - prepPagesReader.getString(attrNameRequested); - attrIdRequested= ~0u; - } else { - jam(); - attrIdRequested = prepPagesReader.getUint32(); - } - /***************************************** - * Copy DictTabInfo into tableDesc struct - *****************************************/ - - SegmentedSectionPtr ptr; - signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO); - SimplePropertiesSectionReader dictInfoReader(ptr, getSectionSegmentPool()); - - SimpleProperties::UnpackStatus unpackStatus; - unpackStatus = SimpleProperties::unpack(dictInfoReader, &tableDesc, - DictTabInfo::TableMapping, - DictTabInfo::TableMappingSize, - true, true); - ndbrequire(unpackStatus == SimpleProperties::Break); - - /************************ - * Lookup in DictTabInfo - ************************/ - DictTabInfo::Attribute attrDesc; attrDesc.init(); - char attrName[MAX_ATTR_NAME_SIZE]; - Uint32 attrId= ~(Uint32)0; - bool attributeFound = false; - Uint32 noOfKeysFound = 0; // # PK attrs found before attr in DICTdata - Uint32 noOfNonKeysFound = 0; // # nonPK attrs found before attr in DICTdata - for (Uint32 i=0; iclientRef, prepPtr.p->clientData); - infoEvent("UTIL: Unknown attribute requested: %s in table: %s", - attrNameRequested, tableName); - releasePreparedOperation(prepOpPtr); - releasePrepare(prepPtr); - return; - } - - /************************************************************** - * Attribute found - store in mapping (AttributeId, Position) - **************************************************************/ - AttributeHeader attrMap(attrDesc.AttributeId, // 1. Store AttrId - 0); - - if (attrDesc.AttributeKeyFlag) { - // ** Attribute belongs to PK ** - prepOpPtr.p->pkBitmask.set(attrDesc.AttributeId); - attrMap.setDataSize(noOfKeysFound - 1); // 2. Store Position - noOfPKAttribsStored++; - } else { - attrMap.setDataSize(0x3fff); // 2. Store Position (fake) - noOfNonPKAttribsStored++; - - /*********************************************************** - * Error: Read nonPK Attr before all PK attr have been read - ***********************************************************/ - if (noOfPKAttribsStored != tableDesc.NoOfKeyAttr) { - jam(); - releaseSections(signal); - sendUtilPrepareRef(signal, - UtilPrepareRef::DICT_TAB_INFO_ERROR, - prepPtr.p->clientRef, prepPtr.p->clientData); - infoEvent("UTIL: Non-PK attr not allowed before " - "all PK attrs have been defined, table: %s", - tableName); - releasePreparedOperation(prepOpPtr); - releasePrepare(prepPtr); - return; - } - } - *(attrMappingIt.data) = attrMap.m_value; -#if 0 - ndbout << "BEFORE: attrLength: " << attrLength << endl; -#endif - { - int len = 0; - switch (attrDesc.AttributeSize) { - case DictTabInfo::an8Bit: - len = (attrDesc.AttributeArraySize + 3)/ 4; - break; - case DictTabInfo::a16Bit: - len = (attrDesc.AttributeArraySize + 1) / 2; - break; - case DictTabInfo::a32Bit: - len = attrDesc.AttributeArraySize; - break; - case DictTabInfo::a64Bit: - len = attrDesc.AttributeArraySize * 2; - break; - case DictTabInfo::a128Bit: - len = attrDesc.AttributeArraySize * 4; - break; - } - attrLength += len; - if (attrDesc.AttributeKeyFlag) - pkAttrLength += len; - - if (operationType == UtilPrepareReq::Read) { - AttributeHeader::init(rsInfoIt.data, - attrDesc.AttributeId, // 1. Store AttrId - len << 2); - prepOpPtr.p->rsInfo.next(rsInfoIt, 1); - } - } -#if 0 - ndbout << ": AttributeSize: " << attrDesc.AttributeSize << endl; - ndbout << ": AttributeArraySize: " << attrDesc.AttributeArraySize << endl; - ndbout << "AFTER: attrLength: " << attrLength << endl; -#endif - //attrMappingIt.print(stdout); - //prepPtr.p->prepOpPtr.p->attrMapping.print(stdout); - prepPtr.p->prepOpPtr.p->attrMapping.next(attrMappingIt, 1); - } - - /*************************** - * Error: Not all PKs found - ***************************/ - if (noOfPKAttribsStored != tableDesc.NoOfKeyAttr) { - jam(); - releaseSections(signal); - sendUtilPrepareRef(signal, - UtilPrepareRef::DICT_TAB_INFO_ERROR, - prepPtr.p->clientRef, prepPtr.p->clientData); - infoEvent("UTIL: Not all primary key attributes requested for table: %s", - tableName); - releasePreparedOperation(prepOpPtr); - releasePrepare(prepPtr); - return; - } - -#if 0 - AttrMappingBuffer::ConstDataBufferIterator tmpIt; - for (prepPtr.p->prepOpPtr.p->attrMapping.first(tmpIt); tmpIt.curr.i != RNIL; - prepPtr.p->prepOpPtr.p->attrMapping.next(tmpIt)) { - AttributeHeader* ah = (AttributeHeader *) tmpIt.data; - ah->print(stdout); - } -#endif - - /********************************************** - * Preparing of PreparedOperation signal train - **********************************************/ - Uint32 static_len = TcKeyReq::StaticLength; - prepOpPtr.p->tckey.tableId = tableDesc.TableId; - prepOpPtr.p->tckey.tableSchemaVersion = tableDesc.TableVersion; - prepOpPtr.p->noOfKeyAttr = tableDesc.NoOfKeyAttr; - prepOpPtr.p->keyLen = tableDesc.KeyLength; // Total no of words in PK - if (prepOpPtr.p->keyLen > TcKeyReq::MaxKeyInfo) { - jam(); - prepOpPtr.p->tckeyLenInBytes = (static_len + TcKeyReq::MaxKeyInfo) * 4; - } else { - jam(); - prepOpPtr.p->tckeyLenInBytes = (static_len + prepOpPtr.p->keyLen) * 4; - } - prepOpPtr.p->keyDataPos = static_len; // Start of keyInfo[] in tckeyreq - - Uint32 requestInfo = 0; - TcKeyReq::setAbortOption(requestInfo, TcKeyReq::AbortOnError); - TcKeyReq::setKeyLength(requestInfo, tableDesc.KeyLength); - switch(operationType) { - case(UtilPrepareReq::Read): - prepOpPtr.p->rsLen = - attrLength + - tableDesc.NoOfKeyAttr + - noOfNonPKAttribsStored; // Read needs a resultset - prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored; - prepOpPtr.p->tckey.attrLen = prepOpPtr.p->noOfAttr; - TcKeyReq::setOperationType(requestInfo, ZREAD); - break; - case(UtilPrepareReq::Update): - prepOpPtr.p->rsLen = 0; - prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored; - prepOpPtr.p->tckey.attrLen = attrLength + prepOpPtr.p->noOfAttr; - TcKeyReq::setOperationType(requestInfo, ZUPDATE); - break; - case(UtilPrepareReq::Insert): - prepOpPtr.p->rsLen = 0; - prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored; - prepOpPtr.p->tckey.attrLen = attrLength + prepOpPtr.p->noOfAttr; - TcKeyReq::setOperationType(requestInfo, ZINSERT); - break; - case(UtilPrepareReq::Delete): - // The number of attributes should equal the size of the primary key - ndbrequire(tableDesc.KeyLength == attrLength); - prepOpPtr.p->rsLen = 0; - prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr; - prepOpPtr.p->tckey.attrLen = 0; - TcKeyReq::setOperationType(requestInfo, ZDELETE); - break; - case(UtilPrepareReq::Write): - prepOpPtr.p->rsLen = 0; - prepOpPtr.p->noOfAttr = tableDesc.NoOfKeyAttr + noOfNonPKAttribsStored; - prepOpPtr.p->tckey.attrLen = attrLength + prepOpPtr.p->noOfAttr; - TcKeyReq::setOperationType(requestInfo, ZWRITE); - break; - } - TcKeyReq::setAIInTcKeyReq(requestInfo, 0); // Attrinfo sent separately - prepOpPtr.p->tckey.requestInfo = requestInfo; - - /**************************** - * Confirm completed prepare - ****************************/ - UtilPrepareConf * conf = (UtilPrepareConf *)signal->getDataPtr(); - conf->senderData = prepPtr.p->clientData; - conf->prepareId = prepPtr.p->prepOpPtr.i; - - releaseSections(signal); - sendSignal(prepPtr.p->clientRef, GSN_UTIL_PREPARE_CONF, signal, - UtilPrepareConf::SignalLength, JBB); - -#if 0 - prepPtr.p->prepOpPtr.p->print(); -#endif - releasePrepare(prepPtr); -} - - -void -DbUtil::execUTIL_RELEASE_REQ(Signal* signal){ - jamEntry(); - - UtilReleaseReq * req = (UtilReleaseReq *)signal->getDataPtr(); - const Uint32 clientRef = signal->senderBlockRef(); - const Uint32 prepareId = req->prepareId; - const Uint32 senderData = req->senderData; - -#if 0 - /** - * This only works in when ARRAY_GUARD is defined (debug-mode) - */ - if (!c_preparedOperationPool.isSeized(prepareId)) { - UtilReleaseRef * ref = (UtilReleaseRef *)signal->getDataPtr(); - ref->prepareId = prepareId; - ref->errorCode = UtilReleaseRef::NO_SUCH_PREPARE_SEIZED; - sendSignal(clientRef, GSN_UTIL_RELEASE_REF, signal, - UtilReleaseRef::SignalLength, JBB); - } -#endif - PreparedOperationPtr prepOpPtr; - c_preparedOperationPool.getPtr(prepOpPtr, prepareId); - - releasePreparedOperation(prepOpPtr); - - UtilReleaseConf * const conf = (UtilReleaseConf*)signal->getDataPtrSend(); - conf->senderData = senderData; - sendSignal(clientRef, GSN_UTIL_RELEASE_CONF, signal, - UtilReleaseConf::SignalLength, JBB); -} - - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: Sequence Service - * ------------------------------------------------------------------------ - * - * A service with a stored incrementable number - **************************************************************************/ - -void -DbUtil::hardcodedPrepare() { - /** - * Prepare SequenceCurrVal (READ) - */ - { - PreparedOperationPtr ptr; - ndbrequire(c_preparedOperationPool.seizeId(ptr, 0)); - ptr.p->keyLen = 1; - ptr.p->tckey.attrLen = 1; - ptr.p->rsLen = 3; - ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength + - ptr.p->keyLen + ptr.p->tckey.attrLen) * 4; - ptr.p->keyDataPos = TcKeyReq::StaticLength; - ptr.p->tckey.tableId = 0; - Uint32 requestInfo = 0; - TcKeyReq::setAbortOption(requestInfo, TcKeyReq::CommitIfFailFree); - TcKeyReq::setOperationType(requestInfo, ZREAD); - TcKeyReq::setKeyLength(requestInfo, 1); - TcKeyReq::setAIInTcKeyReq(requestInfo, 1); - ptr.p->tckey.requestInfo = requestInfo; - ptr.p->tckey.tableSchemaVersion = 1; - - // This is actually attr data - AttributeHeader::init(&ptr.p->tckey.distrGroupHashValue, 1, 0); - - ndbrequire(ptr.p->rsInfo.seize(1)); - ResultSetInfoBuffer::DataBufferIterator it; - ptr.p->rsInfo.first(it); - AttributeHeader::init(it.data, 1, 2 << 2); // Attribute 1 - 2 data words - } - - /** - * Prepare SequenceNextVal (UPDATE) - */ - { - PreparedOperationPtr ptr; - ndbrequire(c_preparedOperationPool.seizeId(ptr, 1)); - ptr.p->keyLen = 1; - ptr.p->rsLen = 3; - ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength + ptr.p->keyLen + 5) * 4; - ptr.p->keyDataPos = TcKeyReq::StaticLength; - ptr.p->tckey.attrLen = 11; - ptr.p->tckey.tableId = 0; - Uint32 requestInfo = 0; - TcKeyReq::setAbortOption(requestInfo, TcKeyReq::CommitIfFailFree); - TcKeyReq::setOperationType(requestInfo, ZUPDATE); - TcKeyReq::setKeyLength(requestInfo, 1); - TcKeyReq::setAIInTcKeyReq(requestInfo, 5); - TcKeyReq::setInterpretedFlag(requestInfo, 1); - ptr.p->tckey.requestInfo = requestInfo; - ptr.p->tckey.tableSchemaVersion = 1; - - // Signal is packed, which is why attrInfo is at distrGroupHashValue - // position - Uint32 * attrInfo = &ptr.p->tckey.distrGroupHashValue; - attrInfo[0] = 0; // IntialReadSize - attrInfo[1] = 5; // InterpretedSize - attrInfo[2] = 0; // FinalUpdateSize - attrInfo[3] = 1; // FinalReadSize - attrInfo[4] = 0; // SubroutineSize - - { // AttrInfo - ndbrequire(ptr.p->attrInfo.seize(6)); - AttrInfoBuffer::DataBufferIterator it; - ptr.p->attrInfo.first(it); - * it.data = Interpreter::Read(1, 6); - ndbrequire(ptr.p->attrInfo.next(it)); - * it.data = Interpreter::LoadConst16(7, 1); - ndbrequire(ptr.p->attrInfo.next(it)); - * it.data = Interpreter::Add(7, 6, 7); - ndbrequire(ptr.p->attrInfo.next(it)); - * it.data = Interpreter::Write(1, 7); - ndbrequire(ptr.p->attrInfo.next(it)); - * it.data = Interpreter::ExitOK(); - - ndbrequire(ptr.p->attrInfo.next(it)); - AttributeHeader::init(it.data, 1, 0); - } - - { // ResultSet - ndbrequire(ptr.p->rsInfo.seize(1)); - ResultSetInfoBuffer::DataBufferIterator it; - ptr.p->rsInfo.first(it); - AttributeHeader::init(it.data, 1, 2 << 2); // Attribute 1 - 2 data words - } - } - - /** - * Prepare CreateSequence (INSERT) - */ - { - PreparedOperationPtr ptr; - ndbrequire(c_preparedOperationPool.seizeId(ptr, 2)); - ptr.p->keyLen = 1; - ptr.p->tckey.attrLen = 5; - ptr.p->rsLen = 0; - ptr.p->tckeyLenInBytes = (TcKeyReq::StaticLength + - ptr.p->keyLen + ptr.p->tckey.attrLen) * 4; - ptr.p->keyDataPos = TcKeyReq::StaticLength; - ptr.p->tckey.tableId = 0; - Uint32 requestInfo = 0; - TcKeyReq::setAbortOption(requestInfo, TcKeyReq::CommitIfFailFree); - TcKeyReq::setOperationType(requestInfo, ZINSERT); - TcKeyReq::setKeyLength(requestInfo, 1); - TcKeyReq::setAIInTcKeyReq(requestInfo, 0); - ptr.p->tckey.requestInfo = requestInfo; - ptr.p->tckey.tableSchemaVersion = 1; - } -} - -void -DbUtil::execUTIL_SEQUENCE_REQ(Signal* signal){ - jamEntry(); - - UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtr(); - - PreparedOperation * prepOp; - - switch(req->requestType){ - case UtilSequenceReq::CurrVal: - prepOp = c_preparedOperationPool.getPtr(0); //c_SequenceCurrVal - break; - case UtilSequenceReq::NextVal: - prepOp = c_preparedOperationPool.getPtr(1); //c_SequenceNextVal - break; - case UtilSequenceReq::Create: - prepOp = c_preparedOperationPool.getPtr(2); //c_CreateSequence - break; - default: - ndbrequire(false); - prepOp = 0; // remove warning - } - - /** - * 1 Transaction with 1 operation - */ - TransactionPtr transPtr; - ndbrequire(c_runningTransactions.seize(transPtr)); - - OperationPtr opPtr; - ndbrequire(transPtr.p->operations.seize(opPtr)); - - ndbrequire(opPtr.p->rs.seize(prepOp->rsLen)); - ndbrequire(opPtr.p->keyInfo.seize(prepOp->keyLen)); - - transPtr.p->gsn = GSN_UTIL_SEQUENCE_REQ; - transPtr.p->clientRef = signal->senderBlockRef(); - transPtr.p->clientData = req->senderData; - transPtr.p->sequence.sequenceId = req->sequenceId; - transPtr.p->sequence.requestType = req->requestType; - - opPtr.p->prepOp = prepOp; - opPtr.p->prepOp_i = RNIL; - - KeyInfoBuffer::DataBufferIterator it; - opPtr.p->keyInfo.first(it); - it.data[0] = transPtr.p->sequence.sequenceId; - - if(req->requestType == UtilSequenceReq::Create){ - ndbrequire(opPtr.p->attrInfo.seize(5)); - AttrInfoBuffer::DataBufferIterator it; - - opPtr.p->attrInfo.first(it); - AttributeHeader::init(it.data, 0, 1 << 2); - - ndbrequire(opPtr.p->attrInfo.next(it)); - * it.data = transPtr.p->sequence.sequenceId; - - ndbrequire(opPtr.p->attrInfo.next(it)); - AttributeHeader::init(it.data, 1, 2 << 2); - - ndbrequire(opPtr.p->attrInfo.next(it)); - * it.data = 0; - - ndbrequire(opPtr.p->attrInfo.next(it)); - * it.data = 0; - } - - runTransaction(signal, transPtr); -} - -int -DbUtil::getResultSet(Signal* signal, const Transaction * transP, - struct LinearSectionPtr sectionsPtr[]) { - OperationPtr opPtr; - ndbrequire(transP->operations.first(opPtr)); - ndbrequire(transP->operations.hasNext(opPtr) == false); - - int noAttr = 0; - int dataSz = 0; - Uint32* tmpBuf = signal->theData + 25; - const Uint32* headerBuffer = tmpBuf; - - const ResultSetBuffer & rs = opPtr.p->rs; - ResultSetInfoBuffer::ConstDataBufferIterator it; - - // extract headers - for(rs.first(it); it.curr.i != RNIL; ) { - *tmpBuf++ = it.data[0]; - rs.next(it, ((AttributeHeader*)&it.data[0])->getDataSize() + 1); - noAttr++; - } - - if (noAttr == 0) - return 0; - - const Uint32* dataBuffer = tmpBuf; - - // extract data - for(rs.first(it); it.curr.i != RNIL; ) { - int sz = ((AttributeHeader*)&it.data[0])->getDataSize(); - rs.next(it,1); - for (int i = 0; i < sz; i++) { - *tmpBuf++ = *it.data; - rs.next(it,1); - dataSz++; - } - } - - sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = (Uint32 *)headerBuffer; - sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr; - sectionsPtr[UtilExecuteReq::DATA_SECTION].p = (Uint32 *)dataBuffer; - sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataSz; - - return 1; -} - -void -DbUtil::reportSequence(Signal* signal, const Transaction * transP){ - OperationPtr opPtr; - ndbrequire(transP->operations.first(opPtr)); - ndbrequire(transP->operations.hasNext(opPtr) == false); - - if(transP->errorCode == 0){ - jam(); // OK - - UtilSequenceConf * ret = (UtilSequenceConf *)signal->getDataPtrSend(); - ret->senderData = transP->clientData; - ret->sequenceId = transP->sequence.sequenceId; - ret->requestType = transP->sequence.requestType; - - bool ok = false; - switch(transP->sequence.requestType){ - case UtilSequenceReq::CurrVal: - case UtilSequenceReq::NextVal:{ - ok = true; - ndbrequire(opPtr.p->rsRecv == 3); - - ResultSetBuffer::DataBufferIterator rsit; - ndbrequire(opPtr.p->rs.first(rsit)); - - ret->sequenceValue[0] = rsit.data[1]; - ret->sequenceValue[1] = rsit.data[2]; - break; - } - case UtilSequenceReq::Create: - ok = true; - ret->sequenceValue[0] = 0; - ret->sequenceValue[1] = 0; - break; - } - ndbrequire(ok); - sendSignal(transP->clientRef, GSN_UTIL_SEQUENCE_CONF, signal, - UtilSequenceConf::SignalLength, JBB); - return; - } - - UtilSequenceRef::ErrorCode errCode = UtilSequenceRef::TCError; - - switch(transP->sequence.requestType) - { - case UtilSequenceReq::CurrVal: - case UtilSequenceReq::NextVal:{ - if (transP->errorCode == 626) - errCode = UtilSequenceRef::NoSuchSequence; - break; - } - case UtilSequenceReq::Create: - break; - } - - UtilSequenceRef * ret = (UtilSequenceRef *)signal->getDataPtrSend(); - ret->senderData = transP->clientData; - ret->sequenceId = transP->sequence.sequenceId; - ret->requestType = transP->sequence.requestType; - ret->errorCode = (Uint32)errCode; - sendSignal(transP->clientRef, GSN_UTIL_SEQUENCE_REF, signal, - UtilSequenceRef::SignalLength, JBB); -} -#if 0 - Ndb ndb("ndb","def"); - NdbConnection* tConnection = ndb.startTransaction(); - NdbOperation* tOperation = tConnection->getNdbOperation("SYSTAB_0"); - - //#if 0 && API_CODE - if( tOperation != NULL ) { - tOperation->interpretedUpdateTuple(); - tOperation->equal((U_Int32)0, keyValue ); - tNextId_Result = tOperation->getValue((U_Int32)1); - tOperation->incValue((U_Int32)1, (U_Int32)8192); - - if (tConnection->execute( Commit ) != -1 ) { - U_Int64 tValue = tNextId_Result->u_64_value(); // Read result value - theFirstTransId = tValue; - theLastTransId = tValue + 8191; - closeTransaction(tConnection); - return startTransactionLocal(aPriority, nodeId); - } - } - /** - * IntialReadSize = 0; - * InterpretedSize = incValue(1); - * FinalUpdateSize = 0; - * FinalReadSize = 1; // Read value - * SubroutineSize = 0; - */ -#endif - - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: Transaction execution request - * ------------------------------------------------------------------------ - * - * Handle requests to execute a prepared transaction - **************************************************************************/ - -void -DbUtil::execUTIL_EXECUTE_REQ(Signal* signal) -{ - jamEntry(); - - UtilExecuteReq * req = (UtilExecuteReq *)signal->getDataPtr(); - const Uint32 clientRef = req->senderRef; - const Uint32 clientData = req->senderData; - const Uint32 prepareId = req->getPrepareId(); - const bool releaseFlag = req->getReleaseFlag(); - - if(signal->getNoOfSections() == 0) { - // Missing prepare data - jam(); - releaseSections(signal); - sendUtilExecuteRef(signal, UtilExecuteRef::MissingDataSection, - 0, clientRef, clientData); - return; - } - /******************************* - * Get PreparedOperation struct - *******************************/ - PreparedOperationPtr prepOpPtr; - c_preparedOperationPool.getPtr(prepOpPtr, prepareId); - - prepOpPtr.p->releaseFlag = releaseFlag; - - TransactionPtr transPtr; - OperationPtr opPtr; - SegmentedSectionPtr headerPtr, dataPtr; - - signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION); - SectionReader headerReader(headerPtr, getSectionSegmentPool()); - signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION); - SectionReader dataReader(dataPtr, getSectionSegmentPool()); - -#if 0 //def EVENT_DEBUG - // Debugging - printf("DbUtil::execUTIL_EXECUTEL_REQ: Headers (%u): ", headerPtr.sz); - Uint32 word; - while(headerReader.getWord(&word)) - printf("H'%.8x ", word); - printf("\n"); - printf("DbUtil::execUTIL_EXECUTEL_REQ: Data (%u): ", dataPtr.sz); - headerReader.reset(); - while(dataReader.getWord(&word)) - printf("H'%.8x ", word); - printf("\n"); - dataReader.reset(); -#endif - -// Uint32 totalDataLen = headerPtr.sz + dataPtr.sz; - - /************************************************************ - * Seize Transaction record - ************************************************************/ - ndbrequire(c_runningTransactions.seize(transPtr)); - transPtr.p->gsn = GSN_UTIL_EXECUTE_REQ; - transPtr.p->clientRef = clientRef; - transPtr.p->clientData = clientData; - ndbrequire(transPtr.p->operations.seize(opPtr)); - opPtr.p->prepOp = prepOpPtr.p; - opPtr.p->prepOp_i = prepOpPtr.i; - -#if 0 //def EVENT_DEBUG - printf("opPtr.p->rs.seize( %u )\n", prepOpPtr.p->rsLen); -#endif - ndbrequire(opPtr.p->rs.seize(prepOpPtr.p->rsLen)); - - /*********************************************************** - * Store signal data on linear memory in Transaction record - ***********************************************************/ - KeyInfoBuffer* keyInfo = &opPtr.p->keyInfo; - AttrInfoBuffer* attrInfo = &opPtr.p->attrInfo; - AttributeHeader header; - Uint32* tempBuf = signal->theData + 25; - bool dataComplete = true; - - while(headerReader.getWord((Uint32 *)&header)) { - Uint32* bufStart = tempBuf; - header.insertHeader(tempBuf++); - for(unsigned int i = 0; i < header.getDataSize(); i++) { - if (!dataReader.getWord(tempBuf++)) { - dataComplete = false; - break; - } - } - bool res = true; - -#if 0 //def EVENT_DEBUG - if (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo) == - TcKeyReq::Read) { - if(prepOpPtr.p->pkBitmask.get(header.getAttributeId())) - printf("PrimaryKey\n"); - } - printf("AttrId %u Hdrsz %d Datasz %u \n", - header.getAttributeId(), - header.getHeaderSize(), - header.getDataSize()); -#endif - - if(prepOpPtr.p->pkBitmask.get(header.getAttributeId())) - // A primary key attribute - res = keyInfo->append(bufStart + header.getHeaderSize(), - header.getDataSize()); - - switch (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo)) { - case ZREAD: - res &= attrInfo->append(bufStart, header.getHeaderSize()); - break; - case ZDELETE: - // no attrinfo for Delete - break; - default: - res &= attrInfo->append(bufStart, - header.getHeaderSize() + header.getDataSize()); - } - - if (!res) { - // Failed to allocate buffer data - jam(); - releaseSections(signal); - sendUtilExecuteRef(signal, UtilExecuteRef::AllocationError, - 0, clientRef, clientData); - releaseTransaction(transPtr); - return; - } - } - if (!dataComplete) { - // Missing data in data section - jam(); - releaseSections(signal); - sendUtilExecuteRef(signal, UtilExecuteRef::MissingData, - 0, clientRef, clientData); - releaseTransaction(transPtr); - return; - } - - // quick hack for hash index build - if (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo) != ZREAD){ - prepOpPtr.p->tckey.attrLen = - prepOpPtr.p->attrInfo.getSize() + opPtr.p->attrInfo.getSize(); - TcKeyReq::setKeyLength(prepOpPtr.p->tckey.requestInfo, keyInfo->getSize()); - } - -#if 0 - const Uint32 l1 = prepOpPtr.p->tckey.attrLen; - const Uint32 l2 = - prepOpPtr.p->attrInfo.getSize() + opPtr.p->attrInfo.getSize(); - - if (TcKeyReq::getOperationType(prepOpPtr.p->tckey.requestInfo) != ZREAD){ - ndbrequire(l1 == l2); - } else { - ndbout_c("TcKeyReq::Read"); - } -#endif - - releaseSections(signal); - transPtr.p->noOfRetries = 3; - runTransaction(signal, transPtr); -} - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: General transaction machinery - * ------------------------------------------------------------------------ - * Executes a prepared transaction - **************************************************************************/ -void -DbUtil::runTransaction(Signal* signal, TransactionPtr transPtr){ - - /* Init transaction */ - transPtr.p->sent = 0; - transPtr.p->recv = 0; - transPtr.p->errorCode = 0; - getTransId(transPtr.p); - - OperationPtr opPtr; - ndbrequire(transPtr.p->operations.first(opPtr)); - - /* First operation */ - Uint32 start = 0; - TcKeyReq::setStartFlag(start, 1); - runOperation(signal, transPtr, opPtr, start); - transPtr.p->sent ++; - - /* Rest of operations */ - start = 0; - while(opPtr.i != RNIL){ - runOperation(signal, transPtr, opPtr, start); - transPtr.p->sent ++; - } - //transPtr.p->print(); -} - -void -DbUtil::runOperation(Signal* signal, TransactionPtr & transPtr, - OperationPtr & opPtr, Uint32 start) { - Uint32 opI = opPtr.i; - Operation * op = opPtr.p; - const PreparedOperation * pop = op->prepOp; - - if(!transPtr.p->operations.next(opPtr)){ - TcKeyReq::setCommitFlag(start, 1); // Last operation - TcKeyReq::setExecuteFlag(start, 1); - } - -#if 0 //def EVENT_DEBUG - if (TcKeyReq::getOperationType(pop->tckey.requestInfo) == - TcKeyReq::Read) { - printf("TcKeyReq::Read runOperation\n"); - } -#endif - - /** - * Init operation w.r.t result set - */ - initResultSet(op->rs, pop->rsInfo); - op->rs.first(op->rsIterator); - op->rsRecv = 0; -#if 0 //def EVENT_DEBUG - printf("pop->rsLen %u\n", pop->rsLen); -#endif - op->rsExpect = 0; - op->transPtrI = transPtr.i; - - TcKeyReq * tcKey = (TcKeyReq*)signal->getDataPtrSend(); - //ndbout << "*** 6 ***"<< endl; pop->print(); - memcpy(tcKey, &pop->tckey, pop->tckeyLenInBytes); - //ndbout << "*** 6b ***"<< endl; - //printTCKEYREQ(stdout, signal->getDataPtrSend(), - // pop->tckeyLenInBytes >> 2, 0); - tcKey->apiConnectPtr = transPtr.p->connectPtr; - tcKey->senderData = opI; - tcKey->transId1 = transPtr.p->transId[0]; - tcKey->transId2 = transPtr.p->transId[1]; - tcKey->requestInfo |= start; - -#if 0 //def EVENT_DEBUG - // Debugging - printf("DbUtil::runOperation: KEYINFO\n"); - op->keyInfo.print(stdout); - printf("DbUtil::runOperation: ATTRINFO\n"); - op->attrInfo.print(stdout); -#endif - - /** - * Key Info - */ - //KeyInfoBuffer::DataBufferIterator kit; - KeyInfoIterator kit; - op->keyInfo.first(kit); - Uint32 *keyDst = ((Uint32*)tcKey) + pop->keyDataPos; - for(Uint32 i = 0; i<8 && kit.curr.i != RNIL; i++, op->keyInfo.next(kit)){ - keyDst[i] = * kit.data; - } - //ndbout << "*** 7 ***" << endl; - //printTCKEYREQ(stdout, signal->getDataPtrSend(), - // pop->tckeyLenInBytes >> 2, 0); - -#if 0 //def EVENT_DEBUG - printf("DbUtil::runOperation: sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, %d , JBB)\n", pop->tckeyLenInBytes >> 2); - printTCKEYREQ(stdout, signal->getDataPtr(), pop->tckeyLenInBytes >> 2,0); -#endif - sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, pop->tckeyLenInBytes >> 2, JBB); - - /** - * More the 8 words of key info not implemented - */ - // ndbrequire(kit.curr.i == RNIL); // Yes it is - - /** - * KeyInfo - */ - KeyInfo* keyInfo = (KeyInfo *)signal->getDataPtrSend(); - keyInfo->connectPtr = transPtr.p->connectPtr; - keyInfo->transId[0] = transPtr.p->transId[0]; - keyInfo->transId[1] = transPtr.p->transId[1]; - sendKeyInfo(signal, keyInfo, op->keyInfo, kit); - - /** - * AttrInfo - */ - AttrInfo* attrInfo = (AttrInfo *)signal->getDataPtrSend(); - attrInfo->connectPtr = transPtr.p->connectPtr; - attrInfo->transId[0] = transPtr.p->transId[0]; - attrInfo->transId[1] = transPtr.p->transId[1]; - - AttrInfoIterator ait; - pop->attrInfo.first(ait); - sendAttrInfo(signal, attrInfo, pop->attrInfo, ait); - - op->attrInfo.first(ait); - sendAttrInfo(signal, attrInfo, op->attrInfo, ait); -} - -void -DbUtil::sendKeyInfo(Signal* signal, - KeyInfo* keyInfo, - const KeyInfoBuffer & keyBuf, - KeyInfoIterator & kit) -{ - while(kit.curr.i != RNIL) { - Uint32 *keyDst = keyInfo->keyData; - Uint32 keyDataLen = 0; - for(Uint32 i = 0; iattrData; - Uint32 i = 0; - for(i = 0; igetDataSize() + 1); -#endif - rs.next(rsit, ((AttributeHeader*)&rsit.data[0])->getDataSize() + 1); - } -} - -void -DbUtil::getTransId(Transaction * transP){ - - Uint32 tmp[2]; - tmp[0] = c_transId[0]; - tmp[1] = c_transId[1]; - - transP->transId[0] = tmp[0]; - transP->transId[1] = tmp[1]; - - c_transId[1] = tmp[1] + 1; -} - - - -/************************************************************************** - * ------------------------------------------------------------------------ - * MODULE: Post Execute - * ------------------------------------------------------------------------ - * - * Handles result from a sent transaction - **************************************************************************/ - -/** - * execTRANSID_AI - * - * Receive result from transaction - * - * NOTE: This codes assumes that - * TransidAI::DataLength = ResultSetBuffer::getSegmentSize() * n - */ -void -DbUtil::execTRANSID_AI(Signal* signal){ - jamEntry(); -#if 0 //def EVENT_DEBUG - ndbout_c("File: %s line: %u",__FILE__,__LINE__); -#endif - - const Uint32 opI = signal->theData[0]; - const Uint32 transId1 = signal->theData[1]; - const Uint32 transId2 = signal->theData[2]; - const Uint32 dataLen = signal->length() - 3; - - Operation * opP = c_operationPool.getPtr(opI); - TransactionPtr transPtr; - c_runningTransactions.getPtr(transPtr, opP->transPtrI); - - ndbrequire(transId1 == transPtr.p->transId[0] && - transId2 == transPtr.p->transId[1]); - opP->rsRecv += dataLen; - - /** - * Save result - */ - const Uint32 *src = &signal->theData[3]; - ResultSetBuffer::DataBufferIterator rs = opP->rsIterator; - - ndbrequire(opP->rs.import(rs,src,dataLen)); - opP->rs.next(rs, dataLen); - opP->rsIterator = rs; - - if(!opP->complete()){ - jam(); - return; - } - - transPtr.p->recv++; - if(!transPtr.p->complete()){ - jam(); - return; - } - - finishTransaction(signal, transPtr); -} - -void -DbUtil::execTCKEYCONF(Signal* signal){ - jamEntry(); -#if 0 //def EVENT_DEBUG - ndbout_c("File: %s line: %u",__FILE__,__LINE__); -#endif - - TcKeyConf * keyConf = (TcKeyConf*)signal->getDataPtr(); - - //const Uint32 gci = keyConf->gci; - const Uint32 transI = keyConf->apiConnectPtr >> 1; - const Uint32 confInfo = keyConf->confInfo; - const Uint32 transId1 = keyConf->transId1; - const Uint32 transId2 = keyConf->transId2; - - Uint32 recv = 0; - const Uint32 ops = TcKeyConf::getNoOfOperations(confInfo); - for(Uint32 i = 0; ioperations[i].apiOperationPtr); - - ndbrequire(opPtr.p->transPtrI == transI); - opPtr.p->rsExpect += keyConf->operations[i].attrInfoLen; - if(opPtr.p->complete()){ - recv++; - } - } - - /** - * Check commit ack marker flag - */ - if (TcKeyConf::getMarkerFlag(confInfo)){ - signal->theData[0] = transId1; - signal->theData[1] = transId2; - sendSignal(DBTC_REF, GSN_TC_COMMIT_ACK, signal, 2, JBB); - }//if - - TransactionPtr transPtr; - c_runningTransactions.getPtr(transPtr, transI); - ndbrequire(transId1 == transPtr.p->transId[0] && - transId2 == transPtr.p->transId[1]); - - transPtr.p->recv += recv; - if(!transPtr.p->complete()){ - jam(); - return; - } - finishTransaction(signal, transPtr); -} - -void -DbUtil::execTCKEYREF(Signal* signal){ - jamEntry(); -#if 0 //def EVENT_DEBUG - ndbout_c("File: %s line: %u",__FILE__,__LINE__); -#endif - - const Uint32 transI = signal->theData[0] >> 1; - const Uint32 transId1 = signal->theData[1]; - const Uint32 transId2 = signal->theData[2]; - const Uint32 errCode = signal->theData[3]; - - TransactionPtr transPtr; - c_runningTransactions.getPtr(transPtr, transI); - ndbrequire(transId1 == transPtr.p->transId[0] && - transId2 == transPtr.p->transId[1]); - - //if(getClassification(errCode) == PermanentError){ - //} - - //ndbout << "Transaction error (code: " << errCode << ")" << endl; - - transPtr.p->errorCode = errCode; - finishTransaction(signal, transPtr); -} - -void -DbUtil::execTCROLLBACKREP(Signal* signal){ - jamEntry(); -#if 0 //def EVENT_DEBUG - ndbout_c("File: %s line: %u",__FILE__,__LINE__); -#endif - - const Uint32 transI = signal->theData[0] >> 1; - const Uint32 transId1 = signal->theData[1]; - const Uint32 transId2 = signal->theData[2]; - const Uint32 errCode = signal->theData[3]; - - TransactionPtr transPtr; - c_runningTransactions.getPtr(transPtr, transI); - ndbrequire(transId1 == transPtr.p->transId[0] && - transId2 == transPtr.p->transId[1]); - - //if(getClassification(errCode) == PermanentError){ - //} - -#if 0 //def EVENT_DEBUG - ndbout << "Transaction error (code: " << errCode << ")" << endl; -#endif - - if(transPtr.p->noOfRetries > 0){ - transPtr.p->noOfRetries--; - switch(errCode){ - case 266: - case 410: - case 1204: -#if 0 - ndbout_c("errCode: %d noOfRetries: %d -> retry", - errCode, transPtr.p->noOfRetries); -#endif - runTransaction(signal, transPtr); - return; - } - } - - transPtr.p->errorCode = errCode; - finishTransaction(signal, transPtr); -} - -void -DbUtil::finishTransaction(Signal* signal, TransactionPtr transPtr){ -#if 0 //def EVENT_DEBUG - ndbout_c("Transaction %x %x completed %s", - transPtr.p->transId[0], - transPtr.p->transId[1], - transPtr.p->errorCode == 0 ? "OK" : "FAILED"); -#endif - - /* - How to find the correct RS? Could we have multi-RS/transaction? - - Operation * opP = c_operationPool.getPtr(opI); - - ResultSetBuffer::DataBufferIterator rsit; - ndbrequire(opP->rs.first(rsit)); - ndbout << "F Result: " << rsit.data << endl; - - while (opP->rs.next(rsit)) { - ndbout << "R Result: " << rsit.data << endl; - } - */ - - switch(transPtr.p->gsn){ - case GSN_UTIL_SEQUENCE_REQ: - jam(); - reportSequence(signal, transPtr.p); - break; - case GSN_UTIL_EXECUTE_REQ: - if (transPtr.p->errorCode) { - UtilExecuteRef * ret = (UtilExecuteRef *)signal->getDataPtrSend(); - ret->senderData = transPtr.p->clientData; - ret->errorCode = UtilExecuteRef::TCError; - ret->TCErrorCode = transPtr.p->errorCode; - sendSignal(transPtr.p->clientRef, GSN_UTIL_EXECUTE_REF, signal, - UtilExecuteRef::SignalLength, JBB); - } else { - struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections]; - UtilExecuteConf * ret = (UtilExecuteConf *)signal->getDataPtrSend(); - ret->senderData = transPtr.p->clientData; - if (getResultSet(signal, transPtr.p, sectionsPtr)) { -#if 0 //def EVENT_DEBUG - for (int j = 0; j < 2; j++) { - printf("Result set %u %u\n", j,sectionsPtr[j].sz); - for (int i=0; i < sectionsPtr[j].sz; i++) - printf("H'%.8x ", sectionsPtr[j].p[i]); - printf("\n"); - } -#endif - sendSignal(transPtr.p->clientRef, GSN_UTIL_EXECUTE_CONF, signal, - UtilExecuteConf::SignalLength, JBB, - sectionsPtr, UtilExecuteReq::NoOfSections); - } else - sendSignal(transPtr.p->clientRef, GSN_UTIL_EXECUTE_CONF, signal, - UtilExecuteConf::SignalLength, JBB); - } - break; - default: - ndbrequire(0); - break; - } - releaseTransaction(transPtr); -} - -void -DbUtil::execUTIL_LOCK_REQ(Signal * signal){ - jamEntry(); - UtilLockReq * req = (UtilLockReq*)signal->getDataPtr(); - const Uint32 lockId = req->lockId; - - LockQueuePtr lockQPtr; - if(!c_lockQueues.find(lockQPtr, lockId)){ - jam(); - sendLOCK_REF(signal, req, UtilLockRef::NoSuchLock); - return; - } - -// const Uint32 requestInfo = req->requestInfo; - const Uint32 senderNode = refToNode(req->senderRef); - if(senderNode != getOwnNodeId() && senderNode != 0){ - jam(); - sendLOCK_REF(signal, req, UtilLockRef::DistributedLockNotSupported); - return; - } - - LocalDLFifoList queue(c_lockElementPool, - lockQPtr.p->m_queue); - if(req->requestInfo & UtilLockReq::TryLock && !queue.isEmpty()){ - jam(); - sendLOCK_REF(signal, req, UtilLockRef::LockAlreadyHeld); - return; - } - - LockQueueElementPtr lockEPtr; - if(!c_lockElementPool.seize(lockEPtr)){ - jam(); - sendLOCK_REF(signal, req, UtilLockRef::OutOfLockRecords); - return; - } - - lockEPtr.p->m_senderRef = req->senderRef; - lockEPtr.p->m_senderData = req->senderData; - - if(queue.isEmpty()){ - jam(); - sendLOCK_CONF(signal, lockQPtr.p, lockEPtr.p); - } - - queue.add(lockEPtr); -} - -void -DbUtil::execUTIL_UNLOCK_REQ(Signal* signal){ - jamEntry(); - - UtilUnlockReq * req = (UtilUnlockReq*)signal->getDataPtr(); - const Uint32 lockId = req->lockId; - - LockQueuePtr lockQPtr; - if(!c_lockQueues.find(lockQPtr, lockId)){ - jam(); - sendUNLOCK_REF(signal, req, UtilUnlockRef::NoSuchLock); - return; - } - - LocalDLFifoList queue(c_lockElementPool, - lockQPtr.p->m_queue); - LockQueueElementPtr lockEPtr; - if(!queue.first(lockEPtr)){ - jam(); - sendUNLOCK_REF(signal, req, UtilUnlockRef::NotLockOwner); - return; - } - - if(lockQPtr.p->m_lockKey != req->lockKey){ - jam(); - sendUNLOCK_REF(signal, req, UtilUnlockRef::NotLockOwner); - return; - } - - sendUNLOCK_CONF(signal, lockQPtr.p, lockEPtr.p); - queue.release(lockEPtr); - - if(queue.first(lockEPtr)){ - jam(); - sendLOCK_CONF(signal, lockQPtr.p, lockEPtr.p); - return; - } -} - -void -DbUtil::sendLOCK_REF(Signal* signal, - const UtilLockReq * req, UtilLockRef::ErrorCode err){ - const Uint32 senderData = req->senderData; - const Uint32 senderRef = req->senderRef; - const Uint32 lockId = req->lockId; - - UtilLockRef * ref = (UtilLockRef*)signal->getDataPtrSend(); - ref->senderData = senderData; - ref->senderRef = reference(); - ref->lockId = lockId; - ref->errorCode = err; - sendSignal(senderRef, GSN_UTIL_LOCK_REF, signal, - UtilLockRef::SignalLength, JBB); -} - -void -DbUtil::sendLOCK_CONF(Signal* signal, - LockQueue * lockQP, - LockQueueElement * lockEP){ - const Uint32 senderData = lockEP->m_senderData; - const Uint32 senderRef = lockEP->m_senderRef; - const Uint32 lockId = lockQP->m_lockId; - const Uint32 lockKey = ++lockQP->m_lockKey; - - UtilLockConf * conf = (UtilLockConf*)signal->getDataPtrSend(); - conf->senderData = senderData; - conf->senderRef = reference(); - conf->lockId = lockId; - conf->lockKey = lockKey; - sendSignal(senderRef, GSN_UTIL_LOCK_CONF, signal, - UtilLockConf::SignalLength, JBB); -} - -void -DbUtil::sendUNLOCK_REF(Signal* signal, - const UtilUnlockReq* req, UtilUnlockRef::ErrorCode err){ - - const Uint32 senderData = req->senderData; - const Uint32 senderRef = req->senderRef; - const Uint32 lockId = req->lockId; - - UtilUnlockRef * ref = (UtilUnlockRef*)signal->getDataPtrSend(); - ref->senderData = senderData; - ref->senderRef = reference(); - ref->lockId = lockId; - ref->errorCode = err; - sendSignal(senderRef, GSN_UTIL_UNLOCK_REF, signal, - UtilUnlockRef::SignalLength, JBB); -} - -void -DbUtil::sendUNLOCK_CONF(Signal* signal, - LockQueue * lockQP, - LockQueueElement * lockEP){ - const Uint32 senderData = lockEP->m_senderData; - const Uint32 senderRef = lockEP->m_senderRef; - const Uint32 lockId = lockQP->m_lockId; - ++lockQP->m_lockKey; - - UtilUnlockConf * conf = (UtilUnlockConf*)signal->getDataPtrSend(); - conf->senderData = senderData; - conf->senderRef = reference(); - conf->lockId = lockId; - sendSignal(senderRef, GSN_UTIL_UNLOCK_CONF, signal, - UtilUnlockConf::SignalLength, JBB); -} - -void -DbUtil::execUTIL_CREATE_LOCK_REQ(Signal* signal){ - jamEntry(); - UtilCreateLockReq req = * (UtilCreateLockReq*)signal->getDataPtr(); - - UtilCreateLockRef::ErrorCode err = UtilCreateLockRef::OK; - - do { - LockQueuePtr lockQPtr; - if(c_lockQueues.find(lockQPtr, req.lockId)){ - jam(); - err = UtilCreateLockRef::LockIdAlreadyUsed; - break; - } - - if(req.lockType != UtilCreateLockReq::Mutex){ - jam(); - err = UtilCreateLockRef::UnsupportedLockType; - break; - } - - if(!c_lockQueues.seize(lockQPtr)){ - jam(); - err = UtilCreateLockRef::OutOfLockQueueRecords; - break; - } - - new (lockQPtr.p) LockQueue(req.lockId); - c_lockQueues.add(lockQPtr); - - UtilCreateLockConf * conf = (UtilCreateLockConf*)signal->getDataPtrSend(); - conf->senderData = req.senderData; - conf->senderRef = reference(); - conf->lockId = req.lockId; - - sendSignal(req.senderRef, GSN_UTIL_CREATE_LOCK_CONF, signal, - UtilCreateLockConf::SignalLength, JBB); - return; - } while(false); - - UtilCreateLockRef * ref = (UtilCreateLockRef*)signal->getDataPtrSend(); - ref->senderData = req.senderData; - ref->senderRef = reference(); - ref->lockId = req.lockId; - ref->errorCode = err; - - sendSignal(req.senderRef, GSN_UTIL_CREATE_LOCK_REF, signal, - UtilCreateLockRef::SignalLength, JBB); -} - -void -DbUtil::execUTIL_DESTORY_LOCK_REQ(Signal* signal){ - jamEntry(); - - UtilDestroyLockReq req = * (UtilDestroyLockReq*)signal->getDataPtr(); - UtilDestroyLockRef::ErrorCode err = UtilDestroyLockRef::OK; - do { - LockQueuePtr lockQPtr; - if(!c_lockQueues.find(lockQPtr, req.lockId)){ - jam(); - err = UtilDestroyLockRef::NoSuchLock; - break; - } - - LocalDLFifoList queue(c_lockElementPool, - lockQPtr.p->m_queue); - LockQueueElementPtr lockEPtr; - if(!queue.first(lockEPtr)){ - jam(); - err = UtilDestroyLockRef::NotLockOwner; - break; - } - - if(lockQPtr.p->m_lockKey != req.lockKey){ - jam(); - err = UtilDestroyLockRef::NotLockOwner; - break; - } - - /** - * OK - */ - - // Inform all in lock queue that queue has been destroyed - UtilLockRef * ref = (UtilLockRef*)signal->getDataPtrSend(); - ref->lockId = req.lockId; - ref->errorCode = UtilLockRef::NoSuchLock; - ref->senderRef = reference(); - LockQueueElementPtr loopPtr = lockEPtr; - for(queue.next(loopPtr); !loopPtr.isNull(); queue.next(loopPtr)){ - jam(); - ref->senderData = loopPtr.p->m_senderData; - const Uint32 senderRef = loopPtr.p->m_senderRef; - sendSignal(senderRef, GSN_UTIL_LOCK_REF, signal, - UtilLockRef::SignalLength, JBB); - } - queue.release(); - c_lockQueues.release(lockQPtr); - - // Send Destroy conf - UtilDestroyLockConf* conf=(UtilDestroyLockConf*)signal->getDataPtrSend(); - conf->senderData = req.senderData; - conf->senderRef = reference(); - conf->lockId = req.lockId; - sendSignal(req.senderRef, GSN_UTIL_DESTROY_LOCK_CONF, signal, - UtilDestroyLockConf::SignalLength, JBB); - return; - } while(false); - - UtilDestroyLockRef * ref = (UtilDestroyLockRef*)signal->getDataPtrSend(); - ref->senderData = req.senderData; - ref->senderRef = reference(); - ref->lockId = req.lockId; - ref->errorCode = err; - sendSignal(req.senderRef, GSN_UTIL_DESTROY_LOCK_REF, signal, - UtilDestroyLockRef::SignalLength, JBB); -} - -template class ArrayPool; diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp deleted file mode 100644 index 9f7b7202740..00000000000 --- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp +++ /dev/null @@ -1,485 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef DBUTIL_H -#define DBUTIL_H - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define UTIL_WORDS_PER_PAGE 1023 - -/** - * @class DbUtil - * @brief Database utilities - * - * This block implements transactional services which can be used by other - * blocks. - * - * @section secSequence Module: The Sequence Service - * - * A sequence is a varaible stored in the database. Each time it is - * requested with "NextVal" it returns a unique number. If requested - * with "CurrVal" it returns the current number. - * - * - Request: SEQUENCE_REQ - * Requests the 'NextVal' or 'CurrVal' for sequence variable 'sequenceId'. - * - * - Response: SEQUENCE_CONF / REF (if failure) - * Returns value requested. - */ -class DbUtil : public SimulatedBlock -{ -public: - DbUtil(Block_context& ctx); - virtual ~DbUtil(); - BLOCK_DEFINES(DbUtil); - -protected: - /** - * Startup & Misc - */ - void execREAD_CONFIG_REQ(Signal* signal); - void execSTTOR(Signal* signal); - void execNDB_STTOR(Signal* signal); - void execDUMP_STATE_ORD(Signal* signal); - void execCONTINUEB(Signal* signal); - - /** - * Sequence Service : Public interface - */ - void execUTIL_SEQUENCE_REQ(Signal* signal); - void execUTIL_SEQUENCE_REF(Signal* signal); - void execUTIL_SEQUENCE_CONF(Signal* signal); - - /** - * Prepare Service : Public interface - */ - void execUTIL_PREPARE_REQ(Signal* signal); - void execUTIL_PREPARE_CONF(Signal* signal); - void execUTIL_PREPARE_REF(Signal* signal); - - /** - * Delete Service : Public interface - */ - void execUTIL_DELETE_REQ(Signal* signal); - void execUTIL_DELETE_REF(Signal* signal); - void execUTIL_DELETE_CONF(Signal* signal); - - /** - * Execute Service : Public interface - */ - void execUTIL_EXECUTE_REQ(Signal* signal); - void execUTIL_EXECUTE_REF(Signal* signal); - void execUTIL_EXECUTE_CONF(Signal* signal); - - /** - * Prepare Release Service : Public interface - */ - void execUTIL_RELEASE_REQ(Signal* signal); - void execUTIL_RELEASE_CONF(Signal* signal); - void execUTIL_RELEASE_REF(Signal* signal); - - /** - * Backend interface to a used TC service - */ - void execTCSEIZECONF(Signal* signal); - void execTCKEYCONF(Signal* signal); - void execTCKEYREF(Signal* signal); - void execTCROLLBACKREP(Signal* signal); - void execTCKEY_FAILCONF(Signal* signal); - void execTCKEY_FAILREF(Signal* signal); - void execTRANSID_AI(Signal* signal); - - /** - * Backend interface to a used DICT service - */ - void execGET_TABINFOREF(Signal*); - void execGET_TABINFO_CONF(Signal* signal); - -private: - -public: - struct PreparedOperation; - - typedef DataBuffer<11> KeyInfoBuffer; - typedef KeyInfoBuffer::ConstDataBufferIterator KeyInfoIterator; - typedef DataBuffer<11> AttrInfoBuffer; - typedef AttrInfoBuffer::ConstDataBufferIterator AttrInfoIterator; - typedef DataBuffer<11> ResultSetBuffer; - typedef DataBuffer<11> ResultSetInfoBuffer; - typedef DataBuffer<1> AttrMappingBuffer; - - /** - * @struct Page32 - * @brief For storing SimpleProperties objects and similar temporary data - */ - struct Page32 { - Uint32 data[UTIL_WORDS_PER_PAGE]; - Uint32 nextPool; // Note: This used as data when seized - }; - - /** - * @struct Prepare - * @brief Info regarding prepare request (contains a prepared operation) - * - * The prepare phase interprets the table and attribute names sent - * in the prepare request from the client and asks DICT for meta - * information. - */ - struct Prepare { - Prepare(ArrayPool & ap) : preparePages(ap) {} - - /*** Client info ***/ - Uint32 clientRef; - Uint32 clientData; - - /** - * SimpleProp sent in UTIL_PREPARE_REQ - * - * Example format: - * - UtilPrepareReq::NoOfOperations=1 - * - UtilPrepareReq::OperationType=UtilPrepareReq::Delete - * - UtilPrepareReq::TableName="SYSTAB_0" - * - UtilPrepareReq::AttributeName="SYSKEY_0" - */ - Uint32 prepDataLen; - Array preparePages; - - /*** PreparedOperation constructed in Prepare phase ***/ - Ptr prepOpPtr; - - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - - void print() const { - ndbout << "[-Prepare-" << endl - << " clientRef: " << clientRef - << ", clientData: " << clientData - << "]" << endl; - } - }; - - /** - * @struct PreparedOperation - * @brief Contains instantiated TcKeyReq signaldata for operation - * - * The prepare phase is finished by storing the request in a - * PreparedOperation record. - */ - struct PreparedOperation { - PreparedOperation(AttrMappingBuffer::DataBufferPool & am, - AttrInfoBuffer::DataBufferPool & ai, - ResultSetInfoBuffer::DataBufferPool & rs) : - releaseFlag(false), attrMapping(am), attrInfo(ai), rsInfo(rs) - { - pkBitmask.clear(); - } - - /*** Various Operation Info ***/ - Uint32 keyLen; // Length of primary key (fixed size is assumed) - Uint32 rsLen; // Size of result set - Uint32 noOfKeyAttr; // Number of key attributes - Uint32 noOfAttr; // Number of attributes - bool releaseFlag; // flag if operation release after completion - - /** - * Attribute Mapping - * - * This datastructure (buffer of AttributeHeader:s) are used to map - * each execute request to a TCKEYREQ train of signals. - * - * The datastructure contains (AttributeId, Position) pairs, where - * - AttributeId is id used in database, and - * - Position is position of attribute value in TCKEYREQ keyinfo - * part of the train of signals which will be send to TC. - * Position == 0x3fff means it should *not* be sent - * in keyinfo part. - */ - AttrMappingBuffer attrMapping; - - /*** First signal in tckeyreq train ***/ - Uint32 tckeyLenInBytes; // TcKeyReq total signal length (in bytes) - Uint32 keyDataPos; // Where to store keydata[] in tckey signal - // (in #words from base in tckey signal) - TcKeyReq tckey; // Signaldata for first signal in train - - /*** Attrinfo signals sent to TC (part of tckeyreq train) ***/ - AttrInfoBuffer attrInfo; - - /*** Result of executed operation ***/ - ResultSetInfoBuffer rsInfo; - - Bitmask pkBitmask; - - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - - void print() const { - ndbout << "[-PreparedOperation-" << endl - << " keyLen: " << keyLen - << ", rsLen: " << rsLen - << ", noOfKeyAttr: " << noOfKeyAttr - << ", noOfAttr: " << noOfAttr - << ", tckeyLenInBytes: " << tckeyLenInBytes - << ", keyDataPos: " << keyDataPos << endl - << "-AttrMapping- (AttrId, KeyPos)-pairs " - << "(Pos=3fff if non-key attr):" << endl; - attrMapping.print(stdout); - ndbout << "[-tckey- "; - printTCKEYREQ(stdout, (Uint32*)&tckey, 8, 0); - ndbout << "[-attrInfo- "; - attrInfo.print(stdout); - ndbout << "[-rsInfo- "; - rsInfo.print(stdout); - ndbout << "]]]]" << endl; - } - }; - - /** - * @struct Operation - * @brief Used in execution (contains resultset and buffers for result) - */ - struct Operation { - Operation(KeyInfoBuffer::DataBufferPool & ki, - AttrInfoBuffer::DataBufferPool & ai, - ResultSetBuffer::DataBufferPool & _rs) : - prepOp_i(RNIL), keyInfo(ki), attrInfo(ai), rs(_rs) {} - - PreparedOperation * prepOp; - Uint32 prepOp_i; - KeyInfoBuffer keyInfo; - AttrInfoBuffer attrInfo; - ResultSetBuffer rs; - ResultSetBuffer::DataBufferIterator rsIterator; - - Uint32 transPtrI; - - Uint32 rsRecv; - Uint32 rsExpect; - inline bool complete() const { return rsRecv == rsExpect; } - - union { - Uint32 nextPool; - Uint32 nextList; - }; - - void print() const { - ndbout << "[-Operation-" << endl - << " transPtrI: " << transPtrI - << ", rsRecv: " << rsRecv; - ndbout << "[-PreparedOperation-" << endl; - prepOp->print(); - ndbout << "[-keyInfo-" << endl; - keyInfo.print(stdout); - ndbout << "[-attrInfo-" << endl; - attrInfo.print(stdout); - ndbout << "]]" << endl; - } - }; - - /** - * @struct Transaction - * @brief Used in execution (contains list of operations) - */ - struct Transaction { - Transaction(ArrayPool & ap, ArrayPool & op) : - executePages(ap), operations(op) {} - - Uint32 clientRef; - Uint32 clientData; - Array executePages; - - Uint32 gsn; // Request type (SEQUENCE, DELETE, etc) - union { - /** - * Sequence transaction - */ - struct { - Uint32 sequenceId; - Uint32 requestType; - } sequence; - }; - - Uint32 connectPtr; - Uint32 transId[2]; - SLList operations; - - Uint32 errorCode; - Uint32 noOfRetries; - Uint32 sent; // No of operations sent - Uint32 recv; // No of completed operations received - inline bool complete() const { return sent == recv; }; - - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - - void print() const { - ndbout << "[-Transaction-" << endl - << " clientRef: " << clientRef - << ", clientData: " << clientData - << ", gsn: " << gsn - << ", errorCode: " << errorCode - << endl - << " sent: " << sent << " operations" - << ", recv: " << recv << " completed operations"; - OperationPtr opPtr; - this->operations.first(opPtr); - while(opPtr.i != RNIL){ - ndbout << "[-Operation-" << endl; - opPtr.p->print(); - this->operations.next(opPtr); - } - ndbout << "]" << endl; - } - }; - - typedef Ptr Page32Ptr; - typedef Ptr PreparePtr; - typedef Ptr TransactionPtr; - typedef Ptr OperationPtr; - typedef Ptr PreparedOperationPtr; - - Uint32 c_transId[2]; - ArrayPool c_pagePool; - ArrayPool c_preparePool; - ArrayPool c_operationPool; - ArrayPool c_preparedOperationPool; - ArrayPool c_transactionPool; - - DataBuffer<1>::DataBufferPool c_attrMappingPool; - DataBuffer<11>::DataBufferPool c_dataBufPool; - DLList c_runningPrepares; - DLList c_seizingTransactions; // Being seized at TC - DLList c_runningTransactions; // Seized and now exec. - - void getTransId(Transaction *); - void initResultSet(ResultSetBuffer &, const ResultSetInfoBuffer &); - void runTransaction(Signal* signal, TransactionPtr); - void runOperation(Signal* signal, TransactionPtr &, OperationPtr &, Uint32); - void sendKeyInfo(Signal* signal, - KeyInfo* keyInfo, - const KeyInfoBuffer & keyBuf, - KeyInfoIterator & kit); - void sendAttrInfo(Signal*, - AttrInfo* attrInfo, - const AttrInfoBuffer &, - AttrInfoIterator & ait); - int getResultSet(Signal* signal, const Transaction * transP, - struct LinearSectionPtr sectionsPtr[]); - void finishTransaction(Signal*, TransactionPtr); - void releaseTransaction(TransactionPtr transPtr); - void hardcodedPrepare(); - void connectTc(Signal* signal); - void reportSequence(Signal*, const Transaction *); - void readPrepareProps(Signal* signal, - SimpleProperties::Reader* reader, - Uint32 senderData); - void prepareOperation(Signal*, PreparePtr); - void sendUtilPrepareRef(Signal*, UtilPrepareRef::ErrorCode, Uint32, Uint32); - void sendUtilExecuteRef(Signal*, UtilExecuteRef::ErrorCode, - Uint32, Uint32, Uint32); - void releasePrepare(PreparePtr); - void releasePreparedOperation(PreparedOperationPtr); - - /*************************************************************************** - * Lock manager - */ - struct LockQueueElement { - Uint32 m_senderData; - Uint32 m_senderRef; - union { - Uint32 nextPool; - Uint32 nextList; - }; - Uint32 prevList; - }; - typedef Ptr LockQueueElementPtr; - - struct LockQueue { - LockQueue(){} - LockQueue(Uint32 id) : m_queue() { m_lockId = id; m_lockKey = 0;} - union { - Uint32 m_lockId; - Uint32 key; - }; - Uint32 m_lockKey; - DLFifoList::Head m_queue; - union { - Uint32 nextHash; - Uint32 nextPool; - }; - Uint32 prevHash; - - Uint32 hashValue() const { - return m_lockId; - } - bool equal(const LockQueue & rec) const { - return m_lockId == rec.m_lockId; - } - }; - typedef Ptr LockQueuePtr; - - - ArrayPool c_lockQueuePool; - ArrayPool c_lockElementPool; - KeyTable c_lockQueues; - - void execUTIL_CREATE_LOCK_REQ(Signal* signal); - void execUTIL_DESTORY_LOCK_REQ(Signal* signal); - void execUTIL_LOCK_REQ(Signal* signal); - void execUTIL_UNLOCK_REQ(Signal* signal); - - void sendLOCK_REF(Signal*, const UtilLockReq * req, UtilLockRef::ErrorCode); - void sendLOCK_CONF(Signal*, LockQueue *, LockQueueElement *); - - void sendUNLOCK_REF(Signal*, const UtilUnlockReq*, UtilUnlockRef::ErrorCode); - void sendUNLOCK_CONF(Signal*, LockQueue *, LockQueueElement *); - - // For testing of mutex:es - void mutex_created(Signal* signal, Uint32 mutexId, Uint32 retVal); - void mutex_destroyed(Signal* signal, Uint32 mutexId, Uint32 retVal); - void mutex_locked(Signal* signal, Uint32 mutexId, Uint32 retVal); - void mutex_unlocked(Signal* signal, Uint32 mutexId, Uint32 retVal); -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt deleted file mode 100644 index cc8c1985009..00000000000 --- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.txt +++ /dev/null @@ -1,68 +0,0 @@ -UTIL Protocols --------------- -Transactions are executed in two phases: -1) PREPARE -2) EXECUTE - - -PREPARE PHASE -------------- -1) ** REQUEST ** - Client (any block) requests prepare service from Util: - - Client --UTIL_PREPARE_REQ--> Util - ... - Client --UTIL_PREPARE_REQ--> Util - -2) ** DICTINFO ** - Util requests Dict for information about table: - - Util --GET_TABINFOREQ--> Dict - - Util <--DICTTABINFO-- Dict - ... - Util <--DICTTABINFO-- Dict - -3) ** PREPARE ** - Operation (= transaction) is prepared (DbUtil::prepareOperation) - - a) AttrMapping is created (a map used to read of the - actual execute request attribute values and put them in KEYINFO) - - b) TC Signal train is prepared - -4) ** CONFIRM ** - Request is confirmed - - Client <--UTIL_PREPARE_CONF-- Util - - -EXECUTE PHASE -------------- -1) Client (any block) requests execute service from Util: - (Execute can be INSERT, DELETE,...) - - Client --UTIL_EXECUTE_REQ--> Util (Multi-signals not yet implemented) - ... - Client --UTIL_EXECUTE_REQ--> Util - -2) Util --TCKEYREQ--> tc - - Util --KEYINFO--> tc (sometimes) (Not yet implemented) - ... - Util --KEYINFO--> tc - - Util --ATTRINFO--> tc (sometimes) - ... - Util --ATTRINFO--> tc - -3) Util <--TCKEYCONF-- tc - - Util --TC_COMMIT_ACK-->tc (sometimes) - - (in parallel with) - - Util <--TRANSID_AI-- tc (sometimes) - ... - Util <--TRANSID_AI-- tc - diff --git a/storage/ndb/src/kernel/blocks/diskpage.cpp b/storage/ndb/src/kernel/blocks/diskpage.cpp deleted file mode 100644 index c782d1367d9..00000000000 --- a/storage/ndb/src/kernel/blocks/diskpage.cpp +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (C) 2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include "diskpage.hpp" -#include -#include -#include - -void -File_formats::Zero_page_header::init(File_type ft, - Uint32 node_id, - Uint32 version, - Uint32 now) -{ - memcpy(m_magic, "NDBDISK", 8); - m_byte_order = 0x12345678; - m_page_size = File_formats::NDB_PAGE_SIZE; - m_ndb_version = version; - m_node_id = node_id; - m_file_type = ft; - m_time = now; -} - -int -File_formats::Zero_page_header::validate(File_type ft, - Uint32 node_id, - Uint32 version, - Uint32 now) -{ - return 0; // TODO Check header -} - -NdbOut& -operator<<(NdbOut& out, const File_formats::Zero_page_header& obj) -{ - char buf[256]; - out << "page size: " << obj.m_page_size << endl; - out << "ndb version: " << obj.m_ndb_version << ", " << - ndbGetVersionString(obj.m_ndb_version, 0, buf, sizeof(buf)) << endl; - out << "ndb node id: " << obj.m_node_id << endl; - out << "file type: " << obj.m_file_type << endl; - out << "time: " << obj.m_time << ", " - << ctime((time_t*)&obj.m_time)<< endl; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const File_formats::Datafile::Zero_page& obj) -{ - out << obj.m_page_header << endl; - out << "m_file_no: " << obj.m_file_no << endl; - out << "m_tablespace_id: " << obj.m_tablespace_id << endl; - out << "m_tablespace_version: " << obj.m_tablespace_version << endl; - out << "m_data_pages: " << obj.m_data_pages << endl; - out << "m_extent_pages: " << obj.m_extent_pages << endl; - out << "m_extent_size: " << obj.m_extent_size << endl; - out << "m_extent_count: " << obj.m_extent_count << endl; - out << "m_extent_headers_per_page: " << obj.m_extent_headers_per_page << endl; - out << "m_extent_header_words: " << obj.m_extent_header_words << endl; - out << "m_extent_header_bits_per_page: " << obj.m_extent_header_bits_per_page << endl; - - return out; -} - -NdbOut& -operator<<(NdbOut& out, const File_formats::Undofile::Zero_page& obj) -{ - out << obj.m_page_header << endl; - out << "m_file_id: " << obj.m_file_id << endl; - out << "m_logfile_group_id: " << obj.m_logfile_group_id << endl; - out << "m_logfile_group_version: " << obj.m_logfile_group_version << endl; - out << "m_undo_pages: " << obj.m_undo_pages << endl; - - return out; -} - diff --git a/storage/ndb/src/kernel/blocks/diskpage.hpp b/storage/ndb/src/kernel/blocks/diskpage.hpp deleted file mode 100644 index 16098d39b45..00000000000 --- a/storage/ndb/src/kernel/blocks/diskpage.hpp +++ /dev/null @@ -1,242 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef __NDB_DISKPAGE_HPP -#define __NDB_DISKPAGE_HPP - -#include - -struct File_formats -{ - STATIC_CONST( NDB_PAGE_SIZE = 32768 ); - STATIC_CONST( NDB_PAGE_SIZE_WORDS = NDB_PAGE_SIZE >> 2); - - enum File_type - { - FT_Datafile = 0x1, - FT_Undofile = 0x2 - }; - - struct Page_header - { - Uint32 m_page_lsn_hi; - Uint32 m_page_lsn_lo; - Uint32 m_page_type; - }; - - enum Page_type - { - PT_Unallocated = 0x0, - PT_Extent_page = 0x1, - PT_Tup_fixsize_page = 0x2, - PT_Tup_varsize_page = 0x3, - PT_Undopage = 0x4 - }; - - struct Zero_page_header - { - char m_magic[8]; - Uint32 m_byte_order; - Uint32 m_page_size; - Uint32 m_ndb_version; - Uint32 m_node_id; - Uint32 m_file_type; - Uint32 m_time; // time(0) - Zero_page_header() {} - void init(File_type ft, Uint32 node_id, Uint32 version, Uint32 now); - int validate(File_type ft, Uint32 node_id, Uint32 version, Uint32 now); - }; - - STATIC_CONST( NDB_PAGE_HEADER_WORDS = sizeof(Page_header) >> 2); - - struct Datafile - { - struct Zero_page - { - struct Zero_page_header m_page_header; - Uint32 m_file_no; // Local_key - Uint32 m_file_id; // DICT id - Uint32 m_tablespace_id; - Uint32 m_tablespace_version; - Uint32 m_data_pages; - Uint32 m_extent_pages; - Uint32 m_extent_size; - Uint32 m_extent_count; - Uint32 m_extent_headers_per_page; - Uint32 m_extent_header_words; - Uint32 m_extent_header_bits_per_page; - }; - - struct Extent_header - { - Uint32 m_table; - union - { - Uint32 m_fragment_id; - Uint32 m_next_free_extent; - }; - Extent_header() {} - Uint32 m_page_bitmask[1]; // (BitsPerPage*ExtentSize)/(32*PageSize) - Uint32 get_free_bits(Uint32 page) const; - Uint32 get_free_word_offset(Uint32 page) const; - void update_free_bits(Uint32 page, Uint32 bit); - bool check_free(Uint32 extent_size) const ; - }; - - STATIC_CONST( EXTENT_HEADER_BITMASK_BITS_PER_PAGE = 4 ); - STATIC_CONST( EXTENT_HEADER_FIXED_WORDS = (sizeof(Extent_header)>>2) - 1); - static Uint32 extent_header_words(Uint32 extent_size_in_pages); - - struct Extent_page - { - struct Page_header m_page_header; - Extent_header m_extents[1]; - - Extent_page() {} - Extent_header* get_header(Uint32 extent_no, Uint32 extent_size); - }; - - STATIC_CONST( EXTENT_PAGE_WORDS = NDB_PAGE_SIZE_WORDS - NDB_PAGE_HEADER_WORDS ); - - struct Data_page - { - struct Page_header m_page_header; - }; - }; - - struct Undofile - { - struct Zero_page - { - struct Zero_page_header m_page_header; - Uint32 m_file_id; - Uint32 m_logfile_group_id; - Uint32 m_logfile_group_version; - Uint32 m_undo_pages; - }; - struct Undo_page - { - struct Page_header m_page_header; - Uint32 m_words_used; - Uint32 m_data[1]; - }; - - struct Undo_entry - { - Uint32 m_file_no; - Uint32 m_page_no; - struct - { - Uint32 m_len_offset; - Uint32 m_data[1]; - } m_changes[1]; - Uint32 m_length; // [ 16-bit type | 16 bit length of entry ] - }; - - enum Undo_type { - UNDO_LCP_FIRST = 1 // First LCP record with specific lcp id - ,UNDO_LCP = 2 // LCP Start - - /** - * TUP Undo record - */ - ,UNDO_TUP_ALLOC = 3 - ,UNDO_TUP_UPDATE = 4 - ,UNDO_TUP_FREE = 5 - ,UNDO_TUP_CREATE = 6 - ,UNDO_TUP_DROP = 7 - ,UNDO_TUP_ALLOC_EXTENT = 8 - ,UNDO_TUP_FREE_EXTENT = 9 - - ,UNDO_END = 0x7FFF - ,UNDO_NEXT_LSN = 0x8000 - }; - - struct Undo_lcp - { - Uint32 m_lcp_id; - Uint32 m_type_length; // 16 bit type, 16 bit length - }; - }; - STATIC_CONST( UNDO_PAGE_WORDS = NDB_PAGE_SIZE_WORDS - NDB_PAGE_HEADER_WORDS - 1); -}; - - -/** - * Compute size of extent header in words - */ -inline Uint32 -File_formats::Datafile::extent_header_words(Uint32 extent_size_in_pages) -{ - return EXTENT_HEADER_FIXED_WORDS + - ((extent_size_in_pages * EXTENT_HEADER_BITMASK_BITS_PER_PAGE + 31) >> 5); -} - -inline -File_formats::Datafile::Extent_header* -File_formats::Datafile::Extent_page::get_header(Uint32 no, Uint32 extent_size) -{ - Uint32 * tmp = (Uint32*)m_extents; - tmp += no*File_formats::Datafile::extent_header_words(extent_size); - return (Extent_header*)tmp; -} - -inline -Uint32 -File_formats::Datafile::Extent_header::get_free_bits(Uint32 page) const -{ - return ((m_page_bitmask[page >> 3] >> ((page & 7) << 2))) & 15; -} - -inline -Uint32 -File_formats::Datafile::Extent_header::get_free_word_offset(Uint32 page) const -{ - return page >> 3; -} - -inline -void -File_formats::Datafile::Extent_header::update_free_bits(Uint32 page, - Uint32 bit) -{ - Uint32 shift = (page & 7) << 2; - Uint32 mask = (15 << shift); - Uint32 org = m_page_bitmask[page >> 3]; - m_page_bitmask[page >> 3] = (org & ~mask) | (bit << shift); -} - -inline -bool -File_formats::Datafile::Extent_header::check_free(Uint32 extent_size) const -{ - Uint32 words = (extent_size * EXTENT_HEADER_BITMASK_BITS_PER_PAGE + 31) >> 5; - Uint32 sum = 0; - for(; words; words--) - sum |= m_page_bitmask[words-1]; - - if(sum & 0x3333) - return false; - - return true; -} - -#include -NdbOut& operator<<(NdbOut& out, const File_formats::Zero_page_header&); -NdbOut& operator<<(NdbOut& out, const File_formats::Datafile::Zero_page&); -NdbOut& operator<<(NdbOut& out, const File_formats::Undofile::Zero_page&); - -#endif diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp deleted file mode 100644 index cd3fc0d4fbb..00000000000 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ /dev/null @@ -1,3209 +0,0 @@ -/* - Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; version 2 of - the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -*/ - -#include "lgman.hpp" -#include "diskpage.hpp" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "ndbfs/Ndbfs.hpp" -#include "dbtup/Dbtup.hpp" - -#include -extern EventLogger g_eventLogger; - -#include - -/** - * ---------------------> (time) - * - * = start of lcp 1 - * = stop of lcp 1 - * = start of lcp 2 - * = stop of lcp 2 - * - * If ndb crashes before - * the entire undo log from crash point until has to be applied - * - * at the undo log can be cut til - */ - -#define DEBUG_UNDO_EXECUTION 0 -#define DEBUG_SEARCH_LOG_HEAD 0 - -Lgman::Lgman(Block_context & ctx) : - SimulatedBlock(LGMAN, ctx), - m_logfile_group_list(m_logfile_group_pool), - m_logfile_group_hash(m_logfile_group_pool) -{ - BLOCK_CONSTRUCTOR(Lgman); - - // Add received signals - addRecSignal(GSN_STTOR, &Lgman::execSTTOR); - addRecSignal(GSN_READ_CONFIG_REQ, &Lgman::execREAD_CONFIG_REQ); - addRecSignal(GSN_DUMP_STATE_ORD, &Lgman::execDUMP_STATE_ORD); - addRecSignal(GSN_CONTINUEB, &Lgman::execCONTINUEB); - - addRecSignal(GSN_CREATE_FILE_REQ, &Lgman::execCREATE_FILE_REQ); - addRecSignal(GSN_CREATE_FILEGROUP_REQ, &Lgman::execCREATE_FILEGROUP_REQ); - - addRecSignal(GSN_DROP_FILE_REQ, &Lgman::execDROP_FILE_REQ); - addRecSignal(GSN_DROP_FILEGROUP_REQ, &Lgman::execDROP_FILEGROUP_REQ); - - addRecSignal(GSN_FSWRITEREQ, &Lgman::execFSWRITEREQ); - addRecSignal(GSN_FSWRITEREF, &Lgman::execFSWRITEREF, true); - addRecSignal(GSN_FSWRITECONF, &Lgman::execFSWRITECONF); - - addRecSignal(GSN_FSOPENREF, &Lgman::execFSOPENREF, true); - addRecSignal(GSN_FSOPENCONF, &Lgman::execFSOPENCONF); - - addRecSignal(GSN_FSCLOSECONF, &Lgman::execFSCLOSECONF); - - addRecSignal(GSN_FSREADREF, &Lgman::execFSREADREF, true); - addRecSignal(GSN_FSREADCONF, &Lgman::execFSREADCONF); - - addRecSignal(GSN_LCP_FRAG_ORD, &Lgman::execLCP_FRAG_ORD); - addRecSignal(GSN_END_LCP_REQ, &Lgman::execEND_LCP_REQ); - addRecSignal(GSN_SUB_GCP_COMPLETE_REP, &Lgman::execSUB_GCP_COMPLETE_REP); - addRecSignal(GSN_START_RECREQ, &Lgman::execSTART_RECREQ); - - addRecSignal(GSN_END_LCP_CONF, &Lgman::execEND_LCP_CONF); - - addRecSignal(GSN_GET_TABINFOREQ, &Lgman::execGET_TABINFOREQ); - - m_last_lsn = 1; - m_logfile_group_hash.setSize(10); -} - -Lgman::~Lgman() -{ -} - -BLOCK_FUNCTIONS(Lgman) - -void -Lgman::execREAD_CONFIG_REQ(Signal* signal) -{ - jamEntry(); - - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - Pool_context pc; - pc.m_block = this; - m_log_waiter_pool.wo_pool_init(RT_LGMAN_LOG_WAITER, pc); - m_file_pool.init(RT_LGMAN_FILE, pc); - m_logfile_group_pool.init(RT_LGMAN_FILEGROUP, pc); - m_data_buffer_pool.setSize(10); - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - -void -Lgman::execSTTOR(Signal* signal) -{ - jamEntry(); - sendSTTORRY(signal); - - return; -}//Lgman::execNDB_STTOR() - -void -Lgman::sendSTTORRY(Signal* signal) -{ - signal->theData[0] = 0; - signal->theData[3] = 1; - signal->theData[4] = 2; - signal->theData[5] = 3; - signal->theData[6] = 4; - signal->theData[7] = 5; - signal->theData[8] = 6; - signal->theData[9] = 255; // No more start phases from missra - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 10, JBB); -} - -void -Lgman::execCONTINUEB(Signal* signal){ - jamEntry(); - - Uint32 type= signal->theData[0]; - Uint32 ptrI = signal->theData[1]; - switch(type){ - case LgmanContinueB::FILTER_LOG: - jam(); - break; - case LgmanContinueB::CUT_LOG_TAIL: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - cut_log_tail(signal, ptr); - return; - } - case LgmanContinueB::FLUSH_LOG: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - flush_log(signal, ptr, signal->theData[2]); - return; - } - case LgmanContinueB::PROCESS_LOG_BUFFER_WAITERS: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - process_log_buffer_waiters(signal, ptr); - return; - } - case LgmanContinueB::FIND_LOG_HEAD: - jam(); - Ptr ptr; - if(ptrI != RNIL) - { - m_logfile_group_pool.getPtr(ptr, ptrI); - find_log_head(signal, ptr); - } - else - { - init_run_undo_log(signal); - } - return; - case LgmanContinueB::EXECUTE_UNDO_RECORD: - jam(); - execute_undo_record(signal); - return; - case LgmanContinueB::STOP_UNDO_LOG: - jam(); - stop_run_undo_log(signal); - return; - case LgmanContinueB::READ_UNDO_LOG: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - read_undo_log(signal, ptr); - return; - } - case LgmanContinueB::PROCESS_LOG_SYNC_WAITERS: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - process_log_sync_waiters(signal, ptr); - return; - } - case LgmanContinueB::FORCE_LOG_SYNC: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - force_log_sync(signal, ptr, signal->theData[2], signal->theData[3]); - return; - } - case LgmanContinueB::DROP_FILEGROUP: - { - jam(); - Ptr ptr; - m_logfile_group_pool.getPtr(ptr, ptrI); - if (ptr.p->m_state & Logfile_group::LG_THREAD_MASK) - { - jam(); - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, - signal->length()); - return; - } - Uint32 ref = signal->theData[2]; - Uint32 data = signal->theData[3]; - drop_filegroup_drop_files(signal, ptr, ref, data); - return; - } - } -} - -void -Lgman::execDUMP_STATE_ORD(Signal* signal){ - jamEntry(); - if(signal->theData[0] == 12001) - { - Ptr ptr; - m_logfile_group_list.first(ptr); - while(!ptr.isNull()) - { - infoEvent("lfg %d state: %x fs: %d lsn " - "[ last: %lld s(req): %lld s:ed: %lld lcp: %lld ] waiters: %d %d", - ptr.p->m_logfile_group_id, ptr.p->m_state, - ptr.p->m_outstanding_fs, - ptr.p->m_last_lsn, ptr.p->m_last_sync_req_lsn, - ptr.p->m_last_synced_lsn, ptr.p->m_last_lcp_lsn, - !ptr.p->m_log_buffer_waiters.isEmpty(), - !ptr.p->m_log_sync_waiters.isEmpty()); - if (!ptr.p->m_log_buffer_waiters.isEmpty()) - { - Ptr waiter; - Local_log_waiter_list - list(m_log_waiter_pool, ptr.p->m_log_buffer_waiters); - list.first(waiter); - infoEvent(" free_buffer_words: %d head(waiters).sz: %d %d", - ptr.p->m_free_buffer_words, - waiter.p->m_size, - 2*File_formats::UNDO_PAGE_WORDS); - } - if (!ptr.p->m_log_sync_waiters.isEmpty()) - { - Ptr waiter; - Local_log_waiter_list - list(m_log_waiter_pool, ptr.p->m_log_sync_waiters); - list.first(waiter); - infoEvent(" m_last_synced_lsn: %lld head(waiters %x).m_sync_lsn: %lld", - ptr.p->m_last_synced_lsn, - waiter.i, - waiter.p->m_sync_lsn); - - while(!waiter.isNull()) - { - ndbout_c("ptr: %x %p lsn: %lld next: %x", - waiter.i, waiter.p, waiter.p->m_sync_lsn, waiter.p->nextList); - list.next(waiter); - } - } - m_logfile_group_list.next(ptr); - } - } -} - -void -Lgman::execCREATE_FILEGROUP_REQ(Signal* signal){ - jamEntry(); - CreateFilegroupImplReq* req= (CreateFilegroupImplReq*)signal->getDataPtr(); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - - Ptr ptr; - CreateFilegroupImplRef::ErrorCode err = CreateFilegroupImplRef::NoError; - do { - if (m_logfile_group_hash.find(ptr, req->filegroup_id)) - { - jam(); - err = CreateFilegroupImplRef::FilegroupAlreadyExists; - break; - } - - if (!m_logfile_group_list.isEmpty()) - { - jam(); - err = CreateFilegroupImplRef::OneLogfileGroupLimit; - break; - } - - if (!m_logfile_group_pool.seize(ptr)) - { - jam(); - err = CreateFilegroupImplRef::OutOfFilegroupRecords; - break; - } - - new (ptr.p) Logfile_group(req); - - if (!alloc_logbuffer_memory(ptr, req->logfile_group.buffer_size)) - { - jam(); - err= CreateFilegroupImplRef::OutOfLogBufferMemory; - m_logfile_group_pool.release(ptr); - break; - } - - m_logfile_group_hash.add(ptr); - m_logfile_group_list.add(ptr); - - if (getNodeState().getNodeRestartInProgress() || - getNodeState().getSystemRestartInProgress()) - { - ptr.p->m_state = Logfile_group::LG_STARTING; - } - - CreateFilegroupImplConf* conf= - (CreateFilegroupImplConf*)signal->getDataPtr(); - conf->senderData = senderData; - conf->senderRef = reference(); - sendSignal(senderRef, GSN_CREATE_FILEGROUP_CONF, signal, - CreateFilegroupImplConf::SignalLength, JBB); - - return; - } while(0); - - CreateFilegroupImplRef* ref= (CreateFilegroupImplRef*)signal->getDataPtr(); - ref->senderData = senderData; - ref->senderRef = reference(); - ref->errorCode = err; - sendSignal(senderRef, GSN_CREATE_FILEGROUP_REF, signal, - CreateFilegroupImplRef::SignalLength, JBB); -} - -void -Lgman::execDROP_FILEGROUP_REQ(Signal* signal) -{ - jamEntry(); - - Uint32 errorCode = 0; - DropFilegroupImplReq req = *(DropFilegroupImplReq*)signal->getDataPtr(); - do - { - Ptr ptr; - if (!m_logfile_group_hash.find(ptr, req.filegroup_id)) - { - errorCode = DropFilegroupImplRef::NoSuchFilegroup; - break; - } - - if (ptr.p->m_version != req.filegroup_version) - { - errorCode = DropFilegroupImplRef::InvalidFilegroupVersion; - break; - } - - switch(req.requestInfo){ - case DropFilegroupImplReq::Prepare: - break; - case DropFilegroupImplReq::Commit: - m_logfile_group_list.remove(ptr); - ptr.p->m_state |= Logfile_group::LG_DROPPING; - signal->theData[0] = LgmanContinueB::DROP_FILEGROUP; - signal->theData[1] = ptr.i; - signal->theData[2] = req.senderRef; - signal->theData[3] = req.senderData; - sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB); - return; - case DropFilegroupImplReq::Abort: - break; - default: - ndbrequire(false); - } - } while(0); - - if (errorCode) - { - DropFilegroupImplRef* ref = - (DropFilegroupImplRef*)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = req.senderData; - ref->errorCode = errorCode; - sendSignal(req.senderRef, GSN_DROP_FILEGROUP_REF, signal, - DropFilegroupImplRef::SignalLength, JBB); - } - else - { - DropFilegroupImplConf* conf = - (DropFilegroupImplConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = req.senderData; - sendSignal(req.senderRef, GSN_DROP_FILEGROUP_CONF, signal, - DropFilegroupImplConf::SignalLength, JBB); - } -} - -void -Lgman::drop_filegroup_drop_files(Signal* signal, - Ptr ptr, - Uint32 ref, Uint32 data) -{ - jam(); - ndbrequire(! (ptr.p->m_state & Logfile_group::LG_THREAD_MASK)); - ndbrequire(ptr.p->m_outstanding_fs == 0); - - Local_undofile_list list(m_file_pool, ptr.p->m_files); - Ptr file_ptr; - - if (list.first(file_ptr)) - { - jam(); - ndbrequire(! (file_ptr.p->m_state & Undofile::FS_OUTSTANDING)); - file_ptr.p->m_create.m_senderRef = ref; - file_ptr.p->m_create.m_senderData = data; - create_file_abort(signal, ptr, file_ptr); - return; - } - - Local_undofile_list metalist(m_file_pool, ptr.p->m_meta_files); - if (metalist.first(file_ptr)) - { - jam(); - metalist.remove(file_ptr); - list.add(file_ptr); - file_ptr.p->m_create.m_senderRef = ref; - file_ptr.p->m_create.m_senderData = data; - create_file_abort(signal, ptr, file_ptr); - return; - } - - free_logbuffer_memory(ptr); - m_logfile_group_hash.release(ptr); - DropFilegroupImplConf *conf = (DropFilegroupImplConf*)signal->getDataPtr(); - conf->senderData = data; - conf->senderRef = reference(); - sendSignal(ref, GSN_DROP_FILEGROUP_CONF, signal, - DropFilegroupImplConf::SignalLength, JBB); -} - -void -Lgman::execCREATE_FILE_REQ(Signal* signal) -{ - jamEntry(); - CreateFileImplReq* req= (CreateFileImplReq*)signal->getDataPtr(); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - Uint32 requestInfo = req->requestInfo; - - Ptr ptr; - CreateFileImplRef::ErrorCode err = CreateFileImplRef::NoError; - do { - if (!m_logfile_group_hash.find(ptr, req->filegroup_id)) - { - jam(); - err = CreateFileImplRef::InvalidFilegroup; - break; - } - - if (ptr.p->m_version != req->filegroup_version) - { - jam(); - err = CreateFileImplRef::InvalidFilegroupVersion; - break; - } - - Ptr file_ptr; - switch(requestInfo){ - case CreateFileImplReq::Commit: - { - jam(); - ndbrequire(find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id)); - file_ptr.p->m_create.m_senderRef = req->senderRef; - file_ptr.p->m_create.m_senderData = req->senderData; - create_file_commit(signal, ptr, file_ptr); - return; - } - case CreateFileImplReq::Abort: - { - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - if (find_file_by_id(file_ptr, ptr.p->m_meta_files, req->file_id)) - { - jam(); - file_ptr.p->m_create.m_senderRef = senderRef; - file_ptr.p->m_create.m_senderData = senderData; - create_file_abort(signal, ptr, file_ptr); - } - else - { - CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr(); - jam(); - conf->senderData = senderData; - conf->senderRef = reference(); - sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal, - CreateFileImplConf::SignalLength, JBB); - } - return; - } - default: // prepare - break; - } - - if (!m_file_pool.seize(file_ptr)) - { - jam(); - err = CreateFileImplRef::OutOfFileRecords; - break; - } - - if(ERROR_INSERTED(15000) || - (sizeof(void*) == 4 && req->file_size_hi & 0xFFFFFFFF)) - { - jam(); - if(signal->getNoOfSections()) - releaseSections(signal); - - CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr(); - ref->senderData = senderData; - ref->senderRef = reference(); - ref->errorCode = CreateFileImplRef::FileSizeTooLarge; - sendSignal(senderRef, GSN_CREATE_FILE_REF, signal, - CreateFileImplRef::SignalLength, JBB); - return; - } - - new (file_ptr.p) Undofile(req, ptr.i); - - Local_undofile_list tmp(m_file_pool, ptr.p->m_meta_files); - tmp.add(file_ptr); - - open_file(signal, file_ptr, req->requestInfo); - return; - } while(0); - - CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr(); - ref->senderData = senderData; - ref->senderRef = reference(); - ref->errorCode = err; - sendSignal(senderRef, GSN_CREATE_FILE_REF, signal, - CreateFileImplRef::SignalLength, JBB); -} - -void -Lgman::open_file(Signal* signal, Ptr ptr, Uint32 requestInfo) -{ - FsOpenReq* req = (FsOpenReq*)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = ptr.i; - - memset(req->fileNumber, 0, sizeof(req->fileNumber)); - FsOpenReq::setVersion(req->fileNumber, 4); // Version 4 = specified filename - - req->fileFlags = 0; - req->fileFlags |= FsOpenReq::OM_READWRITE; - req->fileFlags |= FsOpenReq::OM_DIRECT; - req->fileFlags |= FsOpenReq::OM_SYNC; - switch(requestInfo){ - case CreateFileImplReq::Create: - req->fileFlags |= FsOpenReq::OM_CREATE_IF_NONE; - req->fileFlags |= FsOpenReq::OM_INIT; - ptr.p->m_state = Undofile::FS_CREATING; - break; - case CreateFileImplReq::CreateForce: - req->fileFlags |= FsOpenReq::OM_CREATE; - req->fileFlags |= FsOpenReq::OM_INIT; - ptr.p->m_state = Undofile::FS_CREATING; - break; - case CreateFileImplReq::Open: - req->fileFlags |= FsOpenReq::OM_CHECK_SIZE; - ptr.p->m_state = Undofile::FS_OPENING; - break; - default: - ndbrequire(false); - } - - req->page_size = File_formats::NDB_PAGE_SIZE; - Uint64 size = (Uint64)ptr.p->m_file_size * (Uint64)File_formats::NDB_PAGE_SIZE; - req->file_size_hi = size >> 32; - req->file_size_lo = size & 0xFFFFFFFF; - - // Forward filename - sendSignal(NDBFS_REF, GSN_FSOPENREQ, signal, FsOpenReq::SignalLength, JBB); -} - -void -Lgman::execFSWRITEREQ(Signal* signal) -{ - jamEntry(); - Ptr ptr; - Ptr page_ptr; - FsReadWriteReq* req= (FsReadWriteReq*)signal->getDataPtr(); - - m_file_pool.getPtr(ptr, req->userPointer); - m_global_page_pool.getPtr(page_ptr, req->data.pageData[0]); - - if (req->varIndex == 0) - { - jam(); - File_formats::Undofile::Zero_page* page = - (File_formats::Undofile::Zero_page*)page_ptr.p; - page->m_page_header.init(File_formats::FT_Undofile, - getOwnNodeId(), - ndbGetOwnVersion(), - time(0)); - page->m_file_id = ptr.p->m_file_id; - page->m_logfile_group_id = ptr.p->m_create.m_logfile_group_id; - page->m_logfile_group_version = ptr.p->m_create.m_logfile_group_version; - page->m_undo_pages = ptr.p->m_file_size - 1; // minus zero page - } - else - { - jam(); - File_formats::Undofile::Undo_page* page = - (File_formats::Undofile::Undo_page*)page_ptr.p; - page->m_page_header.m_page_lsn_hi = 0; - page->m_page_header.m_page_lsn_lo = 0; - page->m_page_header.m_page_type = File_formats::PT_Undopage; - page->m_words_used = 0; - } -} - -void -Lgman::execFSOPENREF(Signal* signal) -{ - jamEntry(); - - Ptr ptr; - Ptr lg_ptr; - FsRef* ref = (FsRef*)signal->getDataPtr(); - - Uint32 errCode = ref->errorCode; - Uint32 osErrCode = ref->osErrorCode; - - m_file_pool.getPtr(ptr, ref->userPointer); - m_logfile_group_pool.getPtr(lg_ptr, ptr.p->m_logfile_group_ptr_i); - - { - CreateFileImplRef* ref= (CreateFileImplRef*)signal->getDataPtr(); - ref->senderData = ptr.p->m_create.m_senderData; - ref->senderRef = reference(); - ref->errorCode = CreateFileImplRef::FileError; - ref->fsErrCode = errCode; - ref->osErrCode = osErrCode; - sendSignal(ptr.p->m_create.m_senderRef, GSN_CREATE_FILE_REF, signal, - CreateFileImplRef::SignalLength, JBB); - } - - Local_undofile_list meta(m_file_pool, lg_ptr.p->m_meta_files); - meta.release(ptr); -} - -#define HEAD 0 -#define TAIL 1 - -void -Lgman::execFSOPENCONF(Signal* signal) -{ - jamEntry(); - Ptr ptr; - - FsConf* conf = (FsConf*)signal->getDataPtr(); - - Uint32 fd = conf->filePointer; - m_file_pool.getPtr(ptr, conf->userPointer); - - ptr.p->m_fd = fd; - - { - Uint32 senderRef = ptr.p->m_create.m_senderRef; - Uint32 senderData = ptr.p->m_create.m_senderData; - - CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr(); - conf->senderData = senderData; - conf->senderRef = reference(); - sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal, - CreateFileImplConf::SignalLength, JBB); - } -} - -bool -Lgman::find_file_by_id(Ptr& ptr, - Local_undofile_list::Head& head, Uint32 id) -{ - Local_undofile_list list(m_file_pool, head); - for(list.first(ptr); !ptr.isNull(); list.next(ptr)) - if(ptr.p->m_file_id == id) - return true; - return false; -} - -void -Lgman::create_file_commit(Signal* signal, - Ptr lg_ptr, - Ptr ptr) -{ - Uint32 senderRef = ptr.p->m_create.m_senderRef; - Uint32 senderData = ptr.p->m_create.m_senderData; - - bool first= false; - if(ptr.p->m_state == Undofile::FS_CREATING && - (lg_ptr.p->m_state & Logfile_group::LG_ONLINE)) - { - jam(); - Local_undofile_list free(m_file_pool, lg_ptr.p->m_files); - Local_undofile_list meta(m_file_pool, lg_ptr.p->m_meta_files); - first= free.isEmpty(); - meta.remove(ptr); - if(!first) - { - /** - * Add log file next after current head - */ - Ptr curr; - m_file_pool.getPtr(curr, lg_ptr.p->m_file_pos[HEAD].m_ptr_i); - if(free.next(curr)) - free.insert(ptr, curr); // inserts before (that's why the extra next) - else - free.add(ptr); - - ptr.p->m_state = Undofile::FS_ONLINE | Undofile::FS_EMPTY; - } - else - { - /** - * First file isn't empty as it can be written to at any time - */ - free.add(ptr); - ptr.p->m_state = Undofile::FS_ONLINE; - lg_ptr.p->m_state |= Logfile_group::LG_FLUSH_THREAD; - signal->theData[0] = LgmanContinueB::FLUSH_LOG; - signal->theData[1] = lg_ptr.i; - signal->theData[2] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - } - } - else - { - ptr.p->m_state = Undofile::FS_SORTING; - } - - ptr.p->m_online.m_lsn = 0; - ptr.p->m_online.m_outstanding = 0; - - Uint64 add= ptr.p->m_file_size - 1; - lg_ptr.p->m_free_file_words += add * File_formats::UNDO_PAGE_WORDS; - - if(first) - { - jam(); - - Buffer_idx tmp= { ptr.i, 0 }; - lg_ptr.p->m_file_pos[HEAD] = lg_ptr.p->m_file_pos[TAIL] = tmp; - - /** - * Init log tail pointer - */ - lg_ptr.p->m_tail_pos[0] = tmp; - lg_ptr.p->m_tail_pos[1] = tmp; - lg_ptr.p->m_tail_pos[2] = tmp; - lg_ptr.p->m_next_reply_ptr_i = ptr.i; - } - - validate_logfile_group(lg_ptr, "create_file_commit"); - - CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr(); - conf->senderData = senderData; - conf->senderRef = reference(); - sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal, - CreateFileImplConf::SignalLength, JBB); -} - -void -Lgman::create_file_abort(Signal* signal, - Ptr lg_ptr, - Ptr ptr) -{ - if (ptr.p->m_fd == RNIL) - { - ((FsConf*)signal->getDataPtr())->userPointer = ptr.i; - execFSCLOSECONF(signal); - return; - } - - FsCloseReq *req= (FsCloseReq*)signal->getDataPtrSend(); - req->filePointer = ptr.p->m_fd; - req->userReference = reference(); - req->userPointer = ptr.i; - req->fileFlag = 0; - FsCloseReq::setRemoveFileFlag(req->fileFlag, true); - - sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, - FsCloseReq::SignalLength, JBB); -} - -void -Lgman::execFSCLOSECONF(Signal* signal) -{ - Ptr ptr; - Ptr lg_ptr; - Uint32 ptrI = ((FsConf*)signal->getDataPtr())->userPointer; - m_file_pool.getPtr(ptr, ptrI); - - Uint32 senderRef = ptr.p->m_create.m_senderRef; - Uint32 senderData = ptr.p->m_create.m_senderData; - - m_logfile_group_pool.getPtr(lg_ptr, ptr.p->m_logfile_group_ptr_i); - - if (lg_ptr.p->m_state & Logfile_group::LG_DROPPING) - { - jam(); - { - Local_undofile_list list(m_file_pool, lg_ptr.p->m_files); - list.release(ptr); - } - drop_filegroup_drop_files(signal, lg_ptr, senderRef, senderData); - } - else - { - jam(); - Local_undofile_list list(m_file_pool, lg_ptr.p->m_meta_files); - list.release(ptr); - - CreateFileImplConf* conf= (CreateFileImplConf*)signal->getDataPtr(); - conf->senderData = senderData; - conf->senderRef = reference(); - sendSignal(senderRef, GSN_CREATE_FILE_CONF, signal, - CreateFileImplConf::SignalLength, JBB); - } -} - -void -Lgman::execDROP_FILE_REQ(Signal* signal) -{ - jamEntry(); - ndbrequire(false); -} - -#define CONSUMER 0 -#define PRODUCER 1 - -Lgman::Logfile_group::Logfile_group(const CreateFilegroupImplReq* req) -{ - m_logfile_group_id = req->filegroup_id; - m_version = req->filegroup_version; - m_state = LG_ONLINE; - m_outstanding_fs = 0; - m_next_reply_ptr_i = RNIL; - - m_last_lsn = 0; - m_last_synced_lsn = 0; - m_last_sync_req_lsn = 0; - m_max_sync_req_lsn = 0; - m_last_read_lsn = 0; - m_file_pos[0].m_ptr_i= m_file_pos[1].m_ptr_i = RNIL; - - m_free_file_words = 0; - m_free_buffer_words = 0; - m_pos[CONSUMER].m_current_page.m_ptr_i = RNIL;// { m_buffer_pages, idx } - m_pos[CONSUMER].m_current_pos.m_ptr_i = RNIL; // { page ptr.i, m_words_used} - m_pos[PRODUCER].m_current_page.m_ptr_i = RNIL;// { m_buffer_pages, idx } - m_pos[PRODUCER].m_current_pos.m_ptr_i = RNIL; // { page ptr.i, m_words_used} - - m_tail_pos[2].m_ptr_i= RNIL; - m_tail_pos[2].m_idx= ~0; - - m_tail_pos[0] = m_tail_pos[1] = m_tail_pos[2]; -} - -bool -Lgman::alloc_logbuffer_memory(Ptr ptr, Uint32 bytes) -{ - Uint32 pages= (((bytes + 3) >> 2) + File_formats::NDB_PAGE_SIZE_WORDS - 1) - / File_formats::NDB_PAGE_SIZE_WORDS; - Uint32 requested= pages; - { - Page_map map(m_data_buffer_pool, ptr.p->m_buffer_pages); - while(pages) - { - Uint32 ptrI; - Uint32 cnt = pages > 64 ? 64 : pages; - m_ctx.m_mm.alloc_pages(RG_DISK_OPERATIONS, &ptrI, &cnt, 1); - if (cnt) - { - Buffer_idx range; - range.m_ptr_i= ptrI; - range.m_idx = cnt; - - ndbrequire(map.append((Uint32*)&range, 2)); - pages -= range.m_idx; - } - else - { - break; - } - } - } - - if(2*pages > requested) - { - // less than half allocated - free_logbuffer_memory(ptr); - return false; - } - - if(pages != 0) - { - warningEvent("Allocated %d pages for log buffer space, logfile_group: %d" - " , requested %d pages", - (requested-pages), ptr.p->m_logfile_group_id, requested); - } - - init_logbuffer_pointers(ptr); - return true; -} - -void -Lgman::init_logbuffer_pointers(Ptr ptr) -{ - Page_map map(m_data_buffer_pool, ptr.p->m_buffer_pages); - Page_map::Iterator it; - union { - Uint32 tmp[2]; - Buffer_idx range; - }; - - map.first(it); - tmp[0] = *it.data; - ndbrequire(map.next(it)); - tmp[1] = *it.data; - - ptr.p->m_pos[CONSUMER].m_current_page.m_ptr_i = 0; // Index in page map - ptr.p->m_pos[CONSUMER].m_current_page.m_idx = range.m_idx - 1;// left range - ptr.p->m_pos[CONSUMER].m_current_pos.m_ptr_i = range.m_ptr_i; // Which page - ptr.p->m_pos[CONSUMER].m_current_pos.m_idx = 0; // Page pos - - ptr.p->m_pos[PRODUCER].m_current_page.m_ptr_i = 0; // Index in page map - ptr.p->m_pos[PRODUCER].m_current_page.m_idx = range.m_idx - 1;// left range - ptr.p->m_pos[PRODUCER].m_current_pos.m_ptr_i = range.m_ptr_i; // Which page - ptr.p->m_pos[PRODUCER].m_current_pos.m_idx = 0; // Page pos - - Uint32 pages= range.m_idx; - while(map.next(it)) - { - tmp[0] = *it.data; - ndbrequire(map.next(it)); - tmp[1] = *it.data; - pages += range.m_idx; - } - - ptr.p->m_free_buffer_words = pages * File_formats::UNDO_PAGE_WORDS; -} - -Uint32 -Lgman::compute_free_file_pages(Ptr ptr) -{ - Buffer_idx head= ptr.p->m_file_pos[HEAD]; - Buffer_idx tail= ptr.p->m_file_pos[TAIL]; - Uint32 pages = 0; - if (head.m_ptr_i == tail.m_ptr_i && head.m_idx < tail.m_idx) - { - pages += tail.m_idx - head.m_idx; - } - else - { - Ptr file; - m_file_pool.getPtr(file, head.m_ptr_i); - Local_undofile_list list(m_file_pool, ptr.p->m_files); - - do - { - pages += (file.p->m_file_size - head.m_idx - 1); - if(!list.next(file)) - list.first(file); - head.m_idx = 0; - } while(file.i != tail.m_ptr_i); - - pages += tail.m_idx - head.m_idx; - } - return pages; -} - -void -Lgman::free_logbuffer_memory(Ptr ptr) -{ - union { - Uint32 tmp[2]; - Buffer_idx range; - }; - - Page_map map(m_data_buffer_pool, ptr.p->m_buffer_pages); - - Page_map::Iterator it; - map.first(it); - while(!it.isNull()) - { - tmp[0] = *it.data; - ndbrequire(map.next(it)); - tmp[1] = *it.data; - - m_ctx.m_mm.release_pages(RG_DISK_OPERATIONS, range.m_ptr_i, range.m_idx); - map.next(it); - } - map.release(); -} - -Lgman::Undofile::Undofile(const struct CreateFileImplReq* req, Uint32 ptrI) -{ - m_fd = RNIL; - m_file_id = req->file_id; - m_logfile_group_ptr_i= ptrI; - - Uint64 pages = req->file_size_hi; - pages = (pages << 32) | req->file_size_lo; - pages /= GLOBAL_PAGE_SIZE; - m_file_size = pages; - - m_create.m_senderRef = req->senderRef; // During META - m_create.m_senderData = req->senderData; // During META - m_create.m_logfile_group_id = req->filegroup_id; -} - -Logfile_client::Logfile_client(SimulatedBlock* block, - Lgman* lgman, Uint32 logfile_group_id) -{ - m_block= block->number(); - m_lgman= lgman; - m_logfile_group_id= logfile_group_id; -} - -int -Logfile_client::sync_lsn(Signal* signal, - Uint64 lsn, Request* req, Uint32 flags) -{ - Ptr ptr; - if(m_lgman->m_logfile_group_list.first(ptr)) - { - if(ptr.p->m_last_synced_lsn >= lsn) - { - return 1; - } - - bool empty= false; - Ptr wait; - { - Lgman::Local_log_waiter_list - list(m_lgman->m_log_waiter_pool, ptr.p->m_log_sync_waiters); - - empty= list.isEmpty(); - if(!list.seize(wait)) - return -1; - - wait.p->m_block= m_block; - wait.p->m_sync_lsn= lsn; - memcpy(&wait.p->m_callback, &req->m_callback, - sizeof(SimulatedBlock::Callback)); - - ptr.p->m_max_sync_req_lsn = lsn > ptr.p->m_max_sync_req_lsn ? - lsn : ptr.p->m_max_sync_req_lsn; - } - - if(ptr.p->m_last_sync_req_lsn < lsn && - ! (ptr.p->m_state & Lgman::Logfile_group::LG_FORCE_SYNC_THREAD)) - { - ptr.p->m_state |= Lgman::Logfile_group::LG_FORCE_SYNC_THREAD; - signal->theData[0] = LgmanContinueB::FORCE_LOG_SYNC; - signal->theData[1] = ptr.i; - signal->theData[2] = lsn >> 32; - signal->theData[3] = lsn & 0xFFFFFFFF; - m_lgman->sendSignalWithDelay(m_lgman->reference(), - GSN_CONTINUEB, signal, 10, 4); - } - return 0; - } - return -1; -} - -void -Lgman::force_log_sync(Signal* signal, - Ptr ptr, - Uint32 lsn_hi, Uint32 lsn_lo) -{ - Local_log_waiter_list list(m_log_waiter_pool, ptr.p->m_log_sync_waiters); - Uint64 force_lsn = lsn_hi; force_lsn <<= 32; force_lsn += lsn_lo; - - if(ptr.p->m_last_sync_req_lsn < force_lsn) - { - /** - * Do force - */ - Buffer_idx pos= ptr.p->m_pos[PRODUCER].m_current_pos; - GlobalPage *page = m_shared_page_pool.getPtr(pos.m_ptr_i); - - Uint32 free= File_formats::UNDO_PAGE_WORDS - pos.m_idx; - if(pos.m_idx) // don't flush empty page... - { - Uint64 lsn= ptr.p->m_last_lsn - 1; - - File_formats::Undofile::Undo_page* undo= - (File_formats::Undofile::Undo_page*)page; - undo->m_page_header.m_page_lsn_lo = lsn & 0xFFFFFFFF; - undo->m_page_header.m_page_lsn_hi = lsn >> 32; - undo->m_words_used= File_formats::UNDO_PAGE_WORDS - free; - - /** - * Update free space with extra NOOP - */ - ndbrequire(ptr.p->m_free_file_words >= free); - ndbrequire(ptr.p->m_free_buffer_words > free); - ptr.p->m_free_file_words -= free; - ptr.p->m_free_buffer_words -= free; - - validate_logfile_group(ptr, "force_log_sync"); - - next_page(ptr.p, PRODUCER); - ptr.p->m_pos[PRODUCER].m_current_pos.m_idx = 0; - } - } - - - - Uint64 max_req_lsn = ptr.p->m_max_sync_req_lsn; - if(max_req_lsn > force_lsn && - max_req_lsn > ptr.p->m_last_sync_req_lsn) - { - ndbrequire(ptr.p->m_state & Lgman::Logfile_group::LG_FORCE_SYNC_THREAD); - signal->theData[0] = LgmanContinueB::FORCE_LOG_SYNC; - signal->theData[1] = ptr.i; - signal->theData[2] = max_req_lsn >> 32; - signal->theData[3] = max_req_lsn & 0xFFFFFFFF; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 4); - } - else - { - ptr.p->m_state &= ~(Uint32)Lgman::Logfile_group::LG_FORCE_SYNC_THREAD; - } -} - -void -Lgman::process_log_sync_waiters(Signal* signal, Ptr ptr) -{ - Local_log_waiter_list - list(m_log_waiter_pool, ptr.p->m_log_sync_waiters); - - if(list.isEmpty()) - { - return; - } - - bool removed= false; - Ptr waiter; - list.first(waiter); - Uint32 logfile_group_id = ptr.p->m_logfile_group_id; - - if(waiter.p->m_sync_lsn <= ptr.p->m_last_synced_lsn) - { - removed= true; - Uint32 block = waiter.p->m_block; - SimulatedBlock* b = globalData.getBlock(block); - b->execute(signal, waiter.p->m_callback, logfile_group_id); - - list.releaseFirst(waiter); - } - - if(removed && !list.isEmpty()) - { - ptr.p->m_state |= Logfile_group::LG_SYNC_WAITERS_THREAD; - signal->theData[0] = LgmanContinueB::PROCESS_LOG_SYNC_WAITERS; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - } - else - { - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_SYNC_WAITERS_THREAD; - } -} - - -Uint32* -Lgman::get_log_buffer(Ptr ptr, Uint32 sz) -{ - GlobalPage *page; - page=m_shared_page_pool.getPtr(ptr.p->m_pos[PRODUCER].m_current_pos.m_ptr_i); - - Uint32 total_free= ptr.p->m_free_buffer_words; - assert(total_free >= sz); - Uint32 pos= ptr.p->m_pos[PRODUCER].m_current_pos.m_idx; - Uint32 free= File_formats::UNDO_PAGE_WORDS - pos; - - if(sz <= free) - { -next: - // fits this page wo/ problem - ndbrequire(total_free > sz); - ptr.p->m_free_buffer_words = total_free - sz; - ptr.p->m_pos[PRODUCER].m_current_pos.m_idx = pos + sz; - return ((File_formats::Undofile::Undo_page*)page)->m_data + pos; - } - - /** - * It didn't fit page...fill page with a NOOP log entry - */ - Uint64 lsn= ptr.p->m_last_lsn - 1; - File_formats::Undofile::Undo_page* undo= - (File_formats::Undofile::Undo_page*)page; - undo->m_page_header.m_page_lsn_lo = lsn & 0xFFFFFFFF; - undo->m_page_header.m_page_lsn_hi = lsn >> 32; - undo->m_words_used= File_formats::UNDO_PAGE_WORDS - free; - - /** - * Update free space with extra NOOP - */ - ndbrequire(ptr.p->m_free_file_words >= free); - ptr.p->m_free_file_words -= free; - - validate_logfile_group(ptr, "get_log_buffer"); - - pos= 0; - assert(total_free >= free); - total_free -= free; - page= m_shared_page_pool.getPtr(next_page(ptr.p, PRODUCER)); - goto next; -} - -Uint32 -Lgman::next_page(Logfile_group* ptrP, Uint32 i) -{ - Uint32 page_ptr_i= ptrP->m_pos[i].m_current_pos.m_ptr_i; - Uint32 left_in_range= ptrP->m_pos[i].m_current_page.m_idx; - if(left_in_range > 0) - { - ptrP->m_pos[i].m_current_page.m_idx = left_in_range - 1; - ptrP->m_pos[i].m_current_pos.m_ptr_i = page_ptr_i + 1; - return page_ptr_i + 1; - } - else - { - Lgman::Page_map map(m_data_buffer_pool, ptrP->m_buffer_pages); - Uint32 pos= (ptrP->m_pos[i].m_current_page.m_ptr_i + 2) % map.getSize(); - Lgman::Page_map::Iterator it; - map.position(it, pos); - - union { - Uint32 tmp[2]; - Lgman::Buffer_idx range; - }; - - tmp[0] = *it.data; map.next(it); - tmp[1] = *it.data; - - ptrP->m_pos[i].m_current_page.m_ptr_i = pos; // New index in map - ptrP->m_pos[i].m_current_page.m_idx = range.m_idx - 1; // Free pages - ptrP->m_pos[i].m_current_pos.m_ptr_i = range.m_ptr_i; // Current page - // No need to set ptrP->m_current_pos.m_idx, that is set "in higher"-func - return range.m_ptr_i; - } -} - -int -Logfile_client::get_log_buffer(Signal* signal, Uint32 sz, - SimulatedBlock::Callback* callback) -{ - sz += 2; // lsn - Lgman::Logfile_group key; - key.m_logfile_group_id= m_logfile_group_id; - Ptr ptr; - if(m_lgman->m_logfile_group_hash.find(ptr, key)) - { - if(ptr.p->m_free_buffer_words >= (sz + 2*File_formats::UNDO_PAGE_WORDS)&& - ptr.p->m_log_buffer_waiters.isEmpty()) - { - return 1; - } - - bool empty= false; - { - Ptr wait; - Lgman::Local_log_waiter_list - list(m_lgman->m_log_waiter_pool, ptr.p->m_log_buffer_waiters); - - empty= list.isEmpty(); - if(!list.seize(wait)) - { - return -1; - } - - wait.p->m_size= sz; - wait.p->m_block= m_block; - memcpy(&wait.p->m_callback, callback,sizeof(SimulatedBlock::Callback)); - } - - return 0; - } - return -1; -} - -NdbOut& -operator<<(NdbOut& out, const Lgman::Buffer_idx& pos) -{ - out << "[ " - << pos.m_ptr_i << " " - << pos.m_idx << " ]"; - return out; -} - -NdbOut& -operator<<(NdbOut& out, const Lgman::Logfile_group::Position& pos) -{ - out << "[ (" - << pos.m_current_page.m_ptr_i << " " - << pos.m_current_page.m_idx << ") (" - << pos.m_current_pos.m_ptr_i << " " - << pos.m_current_pos.m_idx << ") ]"; - return out; -} - -void -Lgman::flush_log(Signal* signal, Ptr ptr, Uint32 force) -{ - Logfile_group::Position consumer= ptr.p->m_pos[CONSUMER]; - Logfile_group::Position producer= ptr.p->m_pos[PRODUCER]; - - jamEntry(); - - if(consumer.m_current_page == producer.m_current_page) - { - -#if 0 - if (force) - { - ndbout_c("force: %d ptr.p->m_file_pos[HEAD].m_ptr_i= %x", - force, ptr.p->m_file_pos[HEAD].m_ptr_i); - ndbout_c("consumer.m_current_page: %d %d producer.m_current_page: %d %d", - consumer.m_current_page.m_ptr_i, consumer.m_current_page.m_idx, - producer.m_current_page.m_ptr_i, producer.m_current_page.m_idx); - } -#endif - if (! (ptr.p->m_state & Logfile_group::LG_DROPPING)) - { - jam(); - - if (ptr.p->m_log_buffer_waiters.isEmpty() || ptr.p->m_outstanding_fs) - { - force = 0; - } - - if (force < 2) - { - signal->theData[0] = LgmanContinueB::FLUSH_LOG; - signal->theData[1] = ptr.i; - signal->theData[2] = force + 1; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, - force ? 10 : 100, 3); - return; - } - else - { - Buffer_idx pos= producer.m_current_pos; - GlobalPage *page = m_shared_page_pool.getPtr(pos.m_ptr_i); - - Uint32 free= File_formats::UNDO_PAGE_WORDS - pos.m_idx; - - ndbout_c("force flush %d %d", pos.m_idx, ptr.p->m_free_buffer_words); - - ndbrequire(pos.m_idx); // don't flush empty page... - Uint64 lsn= ptr.p->m_last_lsn - 1; - - File_formats::Undofile::Undo_page* undo= - (File_formats::Undofile::Undo_page*)page; - undo->m_page_header.m_page_lsn_lo = lsn & 0xFFFFFFFF; - undo->m_page_header.m_page_lsn_hi = lsn >> 32; - undo->m_words_used= File_formats::UNDO_PAGE_WORDS - free; - - /** - * Update free space with extra NOOP - */ - ndbrequire(ptr.p->m_free_file_words >= free); - ndbrequire(ptr.p->m_free_buffer_words > free); - ptr.p->m_free_file_words -= free; - ptr.p->m_free_buffer_words -= free; - - validate_logfile_group(ptr, "force_log_flush"); - - next_page(ptr.p, PRODUCER); - ptr.p->m_pos[PRODUCER].m_current_pos.m_idx = 0; - producer = ptr.p->m_pos[PRODUCER]; - // break through - } - } - else - { - jam(); - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_FLUSH_THREAD; - return; - } - } - - bool full= false; - Uint32 tot= 0; - while(!(consumer.m_current_page == producer.m_current_page) && !full) - { - validate_logfile_group(ptr, "before flush log"); - - Uint32 cnt; // pages written - Uint32 page= consumer.m_current_pos.m_ptr_i; - if(consumer.m_current_page.m_ptr_i == producer.m_current_page.m_ptr_i) - { - if(consumer.m_current_page.m_idx > producer.m_current_page.m_idx) - { - jam(); - Uint32 tmp= - consumer.m_current_page.m_idx - producer.m_current_page.m_idx; - cnt= write_log_pages(signal, ptr, page, tmp); - assert(cnt <= tmp); - - consumer.m_current_pos.m_ptr_i += cnt; - consumer.m_current_page.m_idx -= cnt; - full= (tmp > cnt); - } - else - { - // Only 1 chunk - ndbrequire(ptr.p->m_buffer_pages.getSize() == 2); - Uint32 tmp= consumer.m_current_page.m_idx + 1; - cnt= write_log_pages(signal, ptr, page, tmp); - assert(cnt <= tmp); - - if(cnt == tmp) - { - jam(); - /** - * Entire chunk is written - * move to next - */ - ptr.p->m_pos[CONSUMER].m_current_page.m_idx= 0; - next_page(ptr.p, CONSUMER); - consumer = ptr.p->m_pos[CONSUMER]; - } - else - { - jam(); - /** - * Failed to write entire chunk... - */ - full= true; - consumer.m_current_page.m_idx -= cnt; - consumer.m_current_pos.m_ptr_i += cnt; - } - } - } - else - { - Uint32 tmp= consumer.m_current_page.m_idx + 1; - cnt= write_log_pages(signal, ptr, page, tmp); - assert(cnt <= tmp); - - if(cnt == tmp) - { - jam(); - /** - * Entire chunk is written - * move to next - */ - ptr.p->m_pos[CONSUMER].m_current_page.m_idx= 0; - next_page(ptr.p, CONSUMER); - consumer = ptr.p->m_pos[CONSUMER]; - } - else - { - jam(); - /** - * Failed to write entire chunk... - */ - full= true; - consumer.m_current_page.m_idx -= cnt; - consumer.m_current_pos.m_ptr_i += cnt; - } - } - - tot += cnt; - if(cnt) - validate_logfile_group(ptr, " after flush_log"); - } - - ptr.p->m_pos[CONSUMER]= consumer; - - if (! (ptr.p->m_state & Logfile_group::LG_DROPPING)) - { - signal->theData[0] = LgmanContinueB::FLUSH_LOG; - signal->theData[1] = ptr.i; - signal->theData[2] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - } - else - { - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_FLUSH_THREAD; - } -} - -void -Lgman::process_log_buffer_waiters(Signal* signal, Ptr ptr) -{ - Uint32 free_buffer= ptr.p->m_free_buffer_words; - Local_log_waiter_list - list(m_log_waiter_pool, ptr.p->m_log_buffer_waiters); - - if(list.isEmpty()) - { - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_WAITERS_THREAD; - return; - } - - bool removed= false; - Ptr waiter; - list.first(waiter); - Uint32 logfile_group_id = ptr.p->m_logfile_group_id; - if(waiter.p->m_size + 2*File_formats::UNDO_PAGE_WORDS < free_buffer) - { - removed= true; - Uint32 block = waiter.p->m_block; - SimulatedBlock* b = globalData.getBlock(block); - b->execute(signal, waiter.p->m_callback, logfile_group_id); - - list.releaseFirst(waiter); - } - - if(removed && !list.isEmpty()) - { - ptr.p->m_state |= Logfile_group::LG_WAITERS_THREAD; - signal->theData[0] = LgmanContinueB::PROCESS_LOG_BUFFER_WAITERS; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - } - else - { - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_WAITERS_THREAD; - } -} - -#define REALLY_SLOW_FS 0 - -Uint32 -Lgman::write_log_pages(Signal* signal, Ptr ptr, - Uint32 pageId, Uint32 in_pages) -{ - assert(in_pages); - Ptr filePtr; - Buffer_idx head= ptr.p->m_file_pos[HEAD]; - Buffer_idx tail= ptr.p->m_file_pos[TAIL]; - m_file_pool.getPtr(filePtr, head.m_ptr_i); - - if(filePtr.p->m_online.m_outstanding > 0) - { - jam(); - return 0; - } - - Uint32 sz= filePtr.p->m_file_size - 1; // skip zero - Uint32 max, pages= in_pages; - - if(!(head.m_ptr_i == tail.m_ptr_i && head.m_idx < tail.m_idx)) - { - max= sz - head.m_idx; - } - else - { - max= tail.m_idx - head.m_idx; - } - - FsReadWriteReq* req= (FsReadWriteReq*)signal->getDataPtrSend(); - req->filePointer = filePtr.p->m_fd; - req->userReference = reference(); - req->userPointer = filePtr.i; - req->varIndex = 1+head.m_idx; // skip zero page - req->numberOfPages = pages; - req->data.pageData[0] = pageId; - req->operationFlag = 0; - FsReadWriteReq::setFormatFlag(req->operationFlag, - FsReadWriteReq::fsFormatSharedPage); - - if(max > pages) - { - jam(); - max= pages; - head.m_idx += max; - ptr.p->m_file_pos[HEAD] = head; - - if (REALLY_SLOW_FS) - sendSignalWithDelay(NDBFS_REF, GSN_FSWRITEREQ, signal, REALLY_SLOW_FS, - FsReadWriteReq::FixedLength + 1); - else - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - filePtr.p->m_online.m_outstanding = max; - filePtr.p->m_state |= Undofile::FS_OUTSTANDING; - - File_formats::Undofile::Undo_page *page= (File_formats::Undofile::Undo_page*) - m_shared_page_pool.getPtr(pageId + max - 1); - Uint64 lsn = 0; - lsn += page->m_page_header.m_page_lsn_hi; lsn <<= 32; - lsn += page->m_page_header.m_page_lsn_lo; - - filePtr.p->m_online.m_lsn = lsn; // Store last writereq lsn on file - ptr.p->m_last_sync_req_lsn = lsn; // And logfile_group - } - else - { - jam(); - req->numberOfPages = max; - FsReadWriteReq::setSyncFlag(req->operationFlag, 1); - - if (REALLY_SLOW_FS) - sendSignalWithDelay(NDBFS_REF, GSN_FSWRITEREQ, signal, REALLY_SLOW_FS, - FsReadWriteReq::FixedLength + 1); - else - sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - filePtr.p->m_online.m_outstanding = max; - filePtr.p->m_state |= Undofile::FS_OUTSTANDING; - - File_formats::Undofile::Undo_page *page= (File_formats::Undofile::Undo_page*) - m_shared_page_pool.getPtr(pageId + max - 1); - Uint64 lsn = 0; - lsn += page->m_page_header.m_page_lsn_hi; lsn <<= 32; - lsn += page->m_page_header.m_page_lsn_lo; - - filePtr.p->m_online.m_lsn = lsn; // Store last writereq lsn on file - ptr.p->m_last_sync_req_lsn = lsn; // And logfile_group - - Ptr next = filePtr; - Local_undofile_list files(m_file_pool, ptr.p->m_files); - if(!files.next(next)) - { - jam(); - files.first(next); - } - ndbout_c("changing file from %d to %d", filePtr.i, next.i); - filePtr.p->m_state |= Undofile::FS_MOVE_NEXT; - next.p->m_state &= ~(Uint32)Undofile::FS_EMPTY; - - head.m_idx= 0; - head.m_ptr_i= next.i; - ptr.p->m_file_pos[HEAD] = head; - if(max < pages) - max += write_log_pages(signal, ptr, pageId + max, pages - max); - } - - assert(max); - return max; -} - -void -Lgman::execFSWRITEREF(Signal* signal) -{ - jamEntry(); - SimulatedBlock::execFSWRITEREF(signal); - ndbrequire(false); -} - -void -Lgman::execFSWRITECONF(Signal* signal) -{ - jamEntry(); - FsConf * conf = (FsConf*)signal->getDataPtr(); - Ptr ptr; - m_file_pool.getPtr(ptr, conf->userPointer); - - ndbrequire(ptr.p->m_state & Undofile::FS_OUTSTANDING); - ptr.p->m_state &= ~(Uint32)Undofile::FS_OUTSTANDING; - - Ptr lg_ptr; - m_logfile_group_pool.getPtr(lg_ptr, ptr.p->m_logfile_group_ptr_i); - - Uint32 cnt= lg_ptr.p->m_outstanding_fs; - ndbrequire(cnt); - - if(lg_ptr.p->m_next_reply_ptr_i == ptr.i) - { - Uint32 tot= 0; - Uint64 lsn = 0; - { - Local_undofile_list files(m_file_pool, lg_ptr.p->m_files); - while(cnt && ! (ptr.p->m_state & Undofile::FS_OUTSTANDING)) - { - Uint32 state= ptr.p->m_state; - Uint32 pages= ptr.p->m_online.m_outstanding; - ndbrequire(pages); - ptr.p->m_online.m_outstanding= 0; - ptr.p->m_state &= ~(Uint32)Undofile::FS_MOVE_NEXT; - tot += pages; - cnt--; - - lsn = ptr.p->m_online.m_lsn; - - if((state & Undofile::FS_MOVE_NEXT) && !files.next(ptr)) - files.first(ptr); - } - } - - ndbassert(tot); - lg_ptr.p->m_outstanding_fs = cnt; - lg_ptr.p->m_free_buffer_words += (tot * File_formats::UNDO_PAGE_WORDS); - lg_ptr.p->m_next_reply_ptr_i = ptr.i; - lg_ptr.p->m_last_synced_lsn = lsn; - - if(! (lg_ptr.p->m_state & Logfile_group::LG_SYNC_WAITERS_THREAD)) - { - process_log_sync_waiters(signal, lg_ptr); - } - - if(! (lg_ptr.p->m_state & Logfile_group::LG_WAITERS_THREAD)) - { - process_log_buffer_waiters(signal, lg_ptr); - } - } - else - { - ndbout_c("miss matched writes"); - } - - return; -} - -void -Lgman::execLCP_FRAG_ORD(Signal* signal) -{ - jamEntry(); - - LcpFragOrd * ord = (LcpFragOrd *)signal->getDataPtr(); - Uint32 lcp_id= ord->lcpId; - Uint32 frag_id = ord->fragmentId; - Uint32 table_id = ord->tableId; - - Ptr ptr; - m_logfile_group_list.first(ptr); - - Uint32 entry= lcp_id == m_latest_lcp ? - File_formats::Undofile::UNDO_LCP : File_formats::Undofile::UNDO_LCP_FIRST; - if(!ptr.isNull() && ! (ptr.p->m_state & Logfile_group::LG_CUT_LOG_THREAD)) - { - jam(); - ptr.p->m_state |= Logfile_group::LG_CUT_LOG_THREAD; - signal->theData[0] = LgmanContinueB::CUT_LOG_TAIL; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - } - - if(!ptr.isNull() && ptr.p->m_last_lsn) - { - Uint32 undo[3]; - undo[0] = lcp_id; - undo[1] = (table_id << 16) | frag_id; - undo[2] = (entry << 16 ) | (sizeof(undo) >> 2); - - Uint64 last_lsn= m_last_lsn; - - if(ptr.p->m_last_lsn == last_lsn -#ifdef VM_TRACE - && ((rand() % 100) > 50) -#endif - ) - { - undo[2] |= File_formats::Undofile::UNDO_NEXT_LSN << 16; - Uint32 *dst= get_log_buffer(ptr, sizeof(undo) >> 2); - memcpy(dst, undo, sizeof(undo)); - ndbrequire(ptr.p->m_free_file_words >= (sizeof(undo) >> 2)); - ptr.p->m_free_file_words -= (sizeof(undo) >> 2); - } - else - { - Uint32 *dst= get_log_buffer(ptr, (sizeof(undo) >> 2) + 2); - * dst++ = last_lsn >> 32; - * dst++ = last_lsn & 0xFFFFFFFF; - memcpy(dst, undo, sizeof(undo)); - ndbrequire(ptr.p->m_free_file_words >= (sizeof(undo) >> 2)); - ptr.p->m_free_file_words -= ((sizeof(undo) >> 2) + 2); - } - ptr.p->m_last_lcp_lsn = last_lsn; - m_last_lsn = ptr.p->m_last_lsn = last_lsn + 1; - - validate_logfile_group(ptr, "execLCP_FRAG_ORD"); - } - - while(!ptr.isNull()) - { - if (ptr.p->m_last_lsn) - { - /** - * First LCP_FRAGORD for each LCP, sets tail pos - */ - if(m_latest_lcp != lcp_id) - { - ptr.p->m_tail_pos[0] = ptr.p->m_tail_pos[1]; - ptr.p->m_tail_pos[1] = ptr.p->m_tail_pos[2]; - ptr.p->m_tail_pos[2] = ptr.p->m_file_pos[HEAD]; - } - - if(0) - ndbout_c - ("execLCP_FRAG_ORD (%d %d) (%d %d) (%d %d) free pages: %ld", - ptr.p->m_tail_pos[0].m_ptr_i, ptr.p->m_tail_pos[0].m_idx, - ptr.p->m_tail_pos[1].m_ptr_i, ptr.p->m_tail_pos[1].m_idx, - ptr.p->m_tail_pos[2].m_ptr_i, ptr.p->m_tail_pos[2].m_idx, - (long) (ptr.p->m_free_file_words / File_formats::UNDO_PAGE_WORDS)); - } - m_logfile_group_list.next(ptr); - } - - m_latest_lcp = lcp_id; -} - -void -Lgman::execEND_LCP_REQ(Signal* signal) -{ - EndLcpReq* req= (EndLcpReq*)signal->getDataPtr(); - ndbrequire(m_latest_lcp == req->backupId); - - Ptr ptr; - m_logfile_group_list.first(ptr); - bool wait= false; - while(!ptr.isNull()) - { - Uint64 lcp_lsn = ptr.p->m_last_lcp_lsn; - if(ptr.p->m_last_synced_lsn < lcp_lsn) - { - wait= true; - if(signal->getSendersBlockRef() != reference()) - { - Logfile_client tmp(this, this, ptr.p->m_logfile_group_id); - Logfile_client::Request req; - req.m_callback.m_callbackData = ptr.i; - req.m_callback.m_callbackFunction = safe_cast(&Lgman::endlcp_callback); - ndbrequire(tmp.sync_lsn(signal, lcp_lsn, &req, 0) == 0); - } - } - else - { - ptr.p->m_last_lcp_lsn = 0; - } - m_logfile_group_list.next(ptr); - } - - if(wait) - { - return; - } - - signal->theData[0] = 0; - sendSignal(DBLQH_REF, GSN_END_LCP_CONF, signal, 1, JBB); -} - -void -Lgman::endlcp_callback(Signal* signal, Uint32 ptr, Uint32 res) -{ - EndLcpReq* req= (EndLcpReq*)signal->getDataPtr(); - req->backupId = m_latest_lcp; - execEND_LCP_REQ(signal); -} - -void -Lgman::cut_log_tail(Signal* signal, Ptr ptr) -{ - bool done= true; - if (likely(ptr.p->m_last_lsn)) - { - Buffer_idx tmp= ptr.p->m_tail_pos[0]; - Buffer_idx tail= ptr.p->m_file_pos[TAIL]; - - Ptr filePtr; - m_file_pool.getPtr(filePtr, tail.m_ptr_i); - - if(!(tmp == tail)) - { - Uint32 free; - if(tmp.m_ptr_i == tail.m_ptr_i && tail.m_idx < tmp.m_idx) - { - free= tmp.m_idx - tail.m_idx; - ptr.p->m_free_file_words += free * File_formats::UNDO_PAGE_WORDS; - ptr.p->m_file_pos[TAIL] = tmp; - } - else - { - free= filePtr.p->m_file_size - tail.m_idx - 1; - ptr.p->m_free_file_words += free * File_formats::UNDO_PAGE_WORDS; - - Ptr next = filePtr; - Local_undofile_list files(m_file_pool, ptr.p->m_files); - while(files.next(next) && (next.p->m_state & Undofile::FS_EMPTY)) - ndbrequire(next.i != filePtr.i); - if(next.isNull()) - { - jam(); - files.first(next); - while((next.p->m_state & Undofile::FS_EMPTY) && files.next(next)) - ndbrequire(next.i != filePtr.i); - } - - tmp.m_idx= 0; - tmp.m_ptr_i= next.i; - ptr.p->m_file_pos[TAIL] = tmp; - done= false; - } - } - - validate_logfile_group(ptr, "cut log"); - } - - if (done) - { - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_CUT_LOG_THREAD; - m_logfile_group_list.next(ptr); - } - - if(!done || !ptr.isNull()) - { - ptr.p->m_state |= Logfile_group::LG_CUT_LOG_THREAD; - signal->theData[0] = LgmanContinueB::CUT_LOG_TAIL; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - } -} - -void -Lgman::execSUB_GCP_COMPLETE_REP(Signal* signal) -{ - jamEntry(); - - Ptr ptr; - m_logfile_group_list.first(ptr); - - /** - * Filter all logfile groups in parallell - */ - return; // NOT IMPLETMENT YET - - signal->theData[0] = LgmanContinueB::FILTER_LOG; - while(!ptr.isNull()) - { - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - m_logfile_group_list.next(ptr); - } -} - -int -Lgman::alloc_log_space(Uint32 ref, Uint32 words) -{ - ndbrequire(words); - words += 2; // lsn - Logfile_group key; - key.m_logfile_group_id= ref; - Ptr ptr; - if(m_logfile_group_hash.find(ptr, key) && - ptr.p->m_free_file_words >= (words + (4 * File_formats::UNDO_PAGE_WORDS))) - { - ptr.p->m_free_file_words -= words; - validate_logfile_group(ptr, "alloc_log_space"); - return 0; - } - - if(ptr.isNull()) - { - return -1; - } - - return 1501; -} - -int -Lgman::free_log_space(Uint32 ref, Uint32 words) -{ - ndbrequire(words); - Logfile_group key; - key.m_logfile_group_id= ref; - Ptr ptr; - if(m_logfile_group_hash.find(ptr, key)) - { - ptr.p->m_free_file_words += (words + 2); - validate_logfile_group(ptr, "free_log_space"); - return 0; - } - ndbrequire(false); - return -1; -} - -Uint64 -Logfile_client::add_entry(const Change* src, Uint32 cnt) -{ - Uint32 i, tot= 0; - for(i= 0; im_last_lsn; - { - Lgman::Logfile_group key; - key.m_logfile_group_id= m_logfile_group_id; - Ptr ptr; - if(m_lgman->m_logfile_group_hash.find(ptr, key)) - { - Uint64 last_lsn_filegroup= ptr.p->m_last_lsn; - if(last_lsn_filegroup == last_lsn -#ifdef VM_TRACE - && ((rand() % 100) > 50) -#endif - ) - { - dst= m_lgman->get_log_buffer(ptr, tot); - for(i= 0; im_free_file_words += 2; - ptr.p->m_free_buffer_words += 2; - m_lgman->validate_logfile_group(ptr); - } - else - { - dst= m_lgman->get_log_buffer(ptr, tot + 2); - * dst++ = last_lsn >> 32; - * dst++ = last_lsn & 0xFFFFFFFF; - for(i= 0; im_last_lsn = ptr.p->m_last_lsn = last_lsn + 1; - - return last_lsn; - } -} - -void -Lgman::execSTART_RECREQ(Signal* signal) -{ - m_latest_lcp = signal->theData[0]; - - Ptr ptr; - m_logfile_group_list.first(ptr); - - if(ptr.i != RNIL) - { - infoEvent("Applying undo to LCP: %d", m_latest_lcp); - ndbout_c("Applying undo to LCP: %d", m_latest_lcp); - find_log_head(signal, ptr); - return; - } - - signal->theData[0] = reference(); - sendSignal(DBLQH_REF, GSN_START_RECCONF, signal, 1, JBB); -} - -void -Lgman::find_log_head(Signal* signal, Ptr ptr) -{ - ndbrequire(ptr.p->m_state & - (Logfile_group::LG_STARTING | Logfile_group::LG_SORTING)); - - if(ptr.p->m_meta_files.isEmpty() && ptr.p->m_files.isEmpty()) - { - jam(); - /** - * Logfile_group wo/ any files - */ - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_STARTING; - ptr.p->m_state |= Logfile_group::LG_ONLINE; - m_logfile_group_list.next(ptr); - signal->theData[0] = LgmanContinueB::FIND_LOG_HEAD; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - return; - } - - ptr.p->m_state = Logfile_group::LG_SORTING; - - /** - * Read first page from each undofile (1 file at a time...) - */ - Local_undofile_list files(m_file_pool, ptr.p->m_meta_files); - Ptr file_ptr; - files.first(file_ptr); - - if(!file_ptr.isNull()) - { - /** - * Use log buffer memory when reading - */ - Uint32 page_id = ptr.p->m_pos[CONSUMER].m_current_pos.m_ptr_i; - file_ptr.p->m_online.m_outstanding= page_id; - - FsReadWriteReq* req= (FsReadWriteReq*)signal->getDataPtrSend(); - req->filePointer = file_ptr.p->m_fd; - req->userReference = reference(); - req->userPointer = file_ptr.i; - req->varIndex = 1; // skip zero page - req->numberOfPages = 1; - req->data.pageData[0] = page_id; - req->operationFlag = 0; - FsReadWriteReq::setFormatFlag(req->operationFlag, - FsReadWriteReq::fsFormatSharedPage); - - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - file_ptr.p->m_state |= Undofile::FS_OUTSTANDING; - return; - } - else - { - /** - * All files have read first page - * and m_files is sorted acording to lsn - */ - ndbrequire(!ptr.p->m_files.isEmpty()); - Local_undofile_list read_files(m_file_pool, ptr.p->m_files); - read_files.last(file_ptr); - - - /** - * Init binary search - */ - ptr.p->m_state = Logfile_group::LG_SEARCHING; - file_ptr.p->m_state = Undofile::FS_SEARCHING; - ptr.p->m_file_pos[TAIL].m_idx = 1; // left page - ptr.p->m_file_pos[HEAD].m_idx = file_ptr.p->m_file_size; - ptr.p->m_file_pos[HEAD].m_ptr_i = ((file_ptr.p->m_file_size - 1) >> 1) + 1; - - Uint32 page_id = ptr.p->m_pos[CONSUMER].m_current_pos.m_ptr_i; - file_ptr.p->m_online.m_outstanding= page_id; - - FsReadWriteReq* req= (FsReadWriteReq*)signal->getDataPtrSend(); - req->filePointer = file_ptr.p->m_fd; - req->userReference = reference(); - req->userPointer = file_ptr.i; - req->varIndex = ptr.p->m_file_pos[HEAD].m_ptr_i; - req->numberOfPages = 1; - req->data.pageData[0] = page_id; - req->operationFlag = 0; - FsReadWriteReq::setFormatFlag(req->operationFlag, - FsReadWriteReq::fsFormatSharedPage); - - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - file_ptr.p->m_state |= Undofile::FS_OUTSTANDING; - return; - } -} - -void -Lgman::execFSREADCONF(Signal* signal) -{ - jamEntry(); - - Ptr ptr; - Ptr lg_ptr; - FsConf* conf = (FsConf*)signal->getDataPtr(); - - m_file_pool.getPtr(ptr, conf->userPointer); - m_logfile_group_pool.getPtr(lg_ptr, ptr.p->m_logfile_group_ptr_i); - - ndbrequire(ptr.p->m_state & Undofile::FS_OUTSTANDING); - ptr.p->m_state &= ~(Uint32)Undofile::FS_OUTSTANDING; - - Uint32 cnt= lg_ptr.p->m_outstanding_fs; - ndbrequire(cnt); - - if((ptr.p->m_state & Undofile::FS_EXECUTING)== Undofile::FS_EXECUTING) - { - jam(); - - if(lg_ptr.p->m_next_reply_ptr_i == ptr.i) - { - Uint32 tot= 0; - Local_undofile_list files(m_file_pool, lg_ptr.p->m_files); - while(cnt && ! (ptr.p->m_state & Undofile::FS_OUTSTANDING)) - { - Uint32 state= ptr.p->m_state; - Uint32 pages= ptr.p->m_online.m_outstanding; - ndbrequire(pages); - ptr.p->m_online.m_outstanding= 0; - ptr.p->m_state &= ~(Uint32)Undofile::FS_MOVE_NEXT; - tot += pages; - cnt--; - - if((state & Undofile::FS_MOVE_NEXT) && !files.prev(ptr)) - files.last(ptr); - } - - lg_ptr.p->m_outstanding_fs = cnt; - lg_ptr.p->m_pos[PRODUCER].m_current_pos.m_idx += tot; - lg_ptr.p->m_next_reply_ptr_i = ptr.i; - } - return; - } - - lg_ptr.p->m_outstanding_fs = cnt - 1; - - Ptr page_ptr; - m_shared_page_pool.getPtr(page_ptr, ptr.p->m_online.m_outstanding); - ptr.p->m_online.m_outstanding= 0; - - File_formats::Undofile::Undo_page* page = - (File_formats::Undofile::Undo_page*)page_ptr.p; - - Uint64 lsn = 0; - lsn += page->m_page_header.m_page_lsn_hi; lsn <<= 32; - lsn += page->m_page_header.m_page_lsn_lo; - - switch(ptr.p->m_state){ - case Undofile::FS_SORTING: - jam(); - break; - case Undofile::FS_SEARCHING: - jam(); - find_log_head_in_file(signal, lg_ptr, ptr, lsn); - return; - default: - case Undofile::FS_EXECUTING: - case Undofile::FS_CREATING: - case Undofile::FS_DROPPING: - case Undofile::FS_ONLINE: - case Undofile::FS_OPENING: - case Undofile::FS_EMPTY: - jam(); - ndbrequire(false); - } - - /** - * Prepare for execution - */ - ptr.p->m_state = Undofile::FS_EXECUTING; - ptr.p->m_online.m_lsn = lsn; - - /** - * Insert into m_files - */ - { - Local_undofile_list meta(m_file_pool, lg_ptr.p->m_meta_files); - Local_undofile_list files(m_file_pool, lg_ptr.p->m_files); - meta.remove(ptr); - - Ptr loop; - files.first(loop); - while(!loop.isNull() && loop.p->m_online.m_lsn <= lsn) - files.next(loop); - - if(loop.isNull()) - { - /** - * File has highest lsn, add last - */ - jam(); - files.add(ptr); - } - else - { - /** - * Insert file in correct position in file list - */ - files.insert(ptr, loop); - } - } - find_log_head(signal, lg_ptr); -} - -void -Lgman::execFSREADREF(Signal* signal) -{ - jamEntry(); - SimulatedBlock::execFSREADREF(signal); - ndbrequire(false); -} - -void -Lgman::find_log_head_in_file(Signal* signal, - Ptr ptr, - Ptr file_ptr, - Uint64 last_lsn) -{ - // a b - // 3 4 5 0 1 - Uint32 curr= ptr.p->m_file_pos[HEAD].m_ptr_i; - Uint32 head= ptr.p->m_file_pos[HEAD].m_idx; - Uint32 tail= ptr.p->m_file_pos[TAIL].m_idx; - - ndbrequire(head > tail); - Uint32 diff = head - tail; - - if(DEBUG_SEARCH_LOG_HEAD) - printf("tail: %d(%lld) head: %d last: %d(%lld) -> ", - tail, file_ptr.p->m_online.m_lsn, - head, curr, last_lsn); - if(last_lsn > file_ptr.p->m_online.m_lsn) - { - if(DEBUG_SEARCH_LOG_HEAD) - printf("moving tail "); - - file_ptr.p->m_online.m_lsn = last_lsn; - ptr.p->m_file_pos[TAIL].m_idx = tail = curr; - } - else - { - if(DEBUG_SEARCH_LOG_HEAD) - printf("moving head "); - - ptr.p->m_file_pos[HEAD].m_idx = head = curr; - } - - if(diff > 1) - { - // We need to find more pages to be sure... - ptr.p->m_file_pos[HEAD].m_ptr_i = curr = ((head + tail) >> 1); - - if(DEBUG_SEARCH_LOG_HEAD) - ndbout_c("-> new search tail: %d(%lld) head: %d -> %d", - tail, file_ptr.p->m_online.m_lsn, - head, curr); - - Uint32 page_id = ptr.p->m_pos[CONSUMER].m_current_pos.m_ptr_i; - file_ptr.p->m_online.m_outstanding= page_id; - - FsReadWriteReq* req= (FsReadWriteReq*)signal->getDataPtrSend(); - req->filePointer = file_ptr.p->m_fd; - req->userReference = reference(); - req->userPointer = file_ptr.i; - req->varIndex = curr; - req->numberOfPages = 1; - req->data.pageData[0] = page_id; - req->operationFlag = 0; - FsReadWriteReq::setFormatFlag(req->operationFlag, - FsReadWriteReq::fsFormatSharedPage); - - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - file_ptr.p->m_state |= Undofile::FS_OUTSTANDING; - return; - } - - ndbrequire(diff == 1); - if(DEBUG_SEARCH_LOG_HEAD) - ndbout_c("-> found last page: %d", tail); - - ptr.p->m_state = 0; - file_ptr.p->m_state = Undofile::FS_EXECUTING; - ptr.p->m_last_lsn = file_ptr.p->m_online.m_lsn; - ptr.p->m_last_read_lsn = file_ptr.p->m_online.m_lsn; - ptr.p->m_last_synced_lsn = file_ptr.p->m_online.m_lsn; - m_last_lsn = file_ptr.p->m_online.m_lsn; - - /** - * Set HEAD position - */ - ptr.p->m_file_pos[HEAD].m_ptr_i = file_ptr.i; - ptr.p->m_file_pos[HEAD].m_idx = tail; - - ptr.p->m_file_pos[TAIL].m_ptr_i = file_ptr.i; - ptr.p->m_file_pos[TAIL].m_idx = tail - 1; - ptr.p->m_next_reply_ptr_i = file_ptr.i; - - { - Local_undofile_list files(m_file_pool, ptr.p->m_files); - if(tail == 1) - { - /** - * HEAD is first page in a file... - * -> PREV should be in previous file - */ - Ptr prev = file_ptr; - if(!files.prev(prev)) - { - files.last(prev); - } - ptr.p->m_file_pos[TAIL].m_ptr_i = prev.i; - ptr.p->m_file_pos[TAIL].m_idx = prev.p->m_file_size - 1; - ptr.p->m_next_reply_ptr_i = prev.i; - } - - SimulatedBlock* fs = globalData.getBlock(NDBFS); - infoEvent("Undo head - %s page: %d lsn: %lld", - fs->get_filename(file_ptr.p->m_fd), - tail, file_ptr.p->m_online.m_lsn); - g_eventLogger.info("Undo head - %s page: %d lsn: %lld", - fs->get_filename(file_ptr.p->m_fd), - tail, file_ptr.p->m_online.m_lsn); - - for(files.prev(file_ptr); !file_ptr.isNull(); files.prev(file_ptr)) - { - infoEvent(" - next - %s(%lld)", - fs->get_filename(file_ptr.p->m_fd), - file_ptr.p->m_online.m_lsn); - - g_eventLogger.info(" - next - %s(%lld)", - fs->get_filename(file_ptr.p->m_fd), - file_ptr.p->m_online.m_lsn); - } - } - - /** - * Start next logfile group - */ - m_logfile_group_list.next(ptr); - signal->theData[0] = LgmanContinueB::FIND_LOG_HEAD; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); -} - -void -Lgman::init_run_undo_log(Signal* signal) -{ - /** - * Perform initial sorting of logfile groups - */ - Ptr group; - Logfile_group_list& list= m_logfile_group_list; - Logfile_group_list tmp(m_logfile_group_pool); - - list.first(group); - while(!group.isNull()) - { - Ptr ptr= group; - list.next(group); - list.remove(ptr); - - { - /** - * Init buffer pointers - */ - ptr.p->m_free_buffer_words -= File_formats::UNDO_PAGE_WORDS; - ptr.p->m_pos[CONSUMER].m_current_page.m_idx = 0; // 0 more pages read - ptr.p->m_pos[PRODUCER].m_current_page.m_idx = 0; // 0 more pages read - - Uint32 page = ptr.p->m_pos[CONSUMER].m_current_pos.m_ptr_i; - File_formats::Undofile::Undo_page* pageP = - (File_formats::Undofile::Undo_page*)m_shared_page_pool.getPtr(page); - - ptr.p->m_pos[CONSUMER].m_current_pos.m_idx = pageP->m_words_used; - ptr.p->m_pos[PRODUCER].m_current_pos.m_idx = 1; - ptr.p->m_last_read_lsn++; - } - - /** - * Start producer thread - */ - signal->theData[0] = LgmanContinueB::READ_UNDO_LOG; - signal->theData[1] = ptr.i; - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - - /** - * Insert in correct position in list of logfile_group's - */ - Ptr pos; - for(tmp.first(pos); !pos.isNull(); tmp.next(pos)) - if(ptr.p->m_last_read_lsn >= pos.p->m_last_read_lsn) - break; - - if(pos.isNull()) - tmp.add(ptr); - else - tmp.insert(ptr, pos); - - ptr.p->m_state = - Logfile_group::LG_EXEC_THREAD | Logfile_group::LG_READ_THREAD; - } - list = tmp; - - execute_undo_record(signal); -} - -void -Lgman::read_undo_log(Signal* signal, Ptr ptr) -{ - Uint32 cnt, free= ptr.p->m_free_buffer_words; - - if(! (ptr.p->m_state & Logfile_group::LG_EXEC_THREAD)) - { - jam(); - /** - * Logfile_group is done... - */ - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_READ_THREAD; - stop_run_undo_log(signal); - return; - } - - if(free <= File_formats::UNDO_PAGE_WORDS) - { - signal->theData[0] = LgmanContinueB::READ_UNDO_LOG; - signal->theData[1] = ptr.i; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); - return; - } - - Logfile_group::Position producer= ptr.p->m_pos[PRODUCER]; - Logfile_group::Position consumer= ptr.p->m_pos[CONSUMER]; - - if(producer.m_current_page.m_idx == 0) - { - /** - * zero pages left in range -> switch range - */ - Lgman::Page_map::Iterator it; - Page_map map(m_data_buffer_pool, ptr.p->m_buffer_pages); - Uint32 sz = map.getSize(); - Uint32 pos= (producer.m_current_page.m_ptr_i + sz - 2) % sz; - map.position(it, pos); - union { - Uint32 _tmp[2]; - Lgman::Buffer_idx range; - }; - _tmp[0] = *it.data; map.next(it); _tmp[1] = *it.data; - producer.m_current_page.m_ptr_i = pos; - producer.m_current_page.m_idx = range.m_idx; - producer.m_current_pos.m_ptr_i = range.m_ptr_i + range.m_idx; - } - - if(producer.m_current_page.m_ptr_i == consumer.m_current_page.m_ptr_i && - producer.m_current_pos.m_ptr_i > consumer.m_current_pos.m_ptr_i) - { - Uint32 max= - producer.m_current_pos.m_ptr_i - consumer.m_current_pos.m_ptr_i - 1; - ndbrequire(free >= max * File_formats::UNDO_PAGE_WORDS); - cnt= read_undo_pages(signal, ptr, producer.m_current_pos.m_ptr_i, max); - ndbrequire(cnt <= max); - producer.m_current_pos.m_ptr_i -= cnt; - producer.m_current_page.m_idx -= cnt; - } - else - { - Uint32 max= producer.m_current_page.m_idx; - ndbrequire(free >= max * File_formats::UNDO_PAGE_WORDS); - cnt= read_undo_pages(signal, ptr, producer.m_current_pos.m_ptr_i, max); - ndbrequire(cnt <= max); - producer.m_current_pos.m_ptr_i -= cnt; - producer.m_current_page.m_idx -= cnt; - } - - ndbrequire(free >= cnt * File_formats::UNDO_PAGE_WORDS); - free -= (cnt * File_formats::UNDO_PAGE_WORDS); - ptr.p->m_free_buffer_words = free; - ptr.p->m_pos[PRODUCER] = producer; - - signal->theData[0] = LgmanContinueB::READ_UNDO_LOG; - signal->theData[1] = ptr.i; - - if(free > File_formats::UNDO_PAGE_WORDS) - sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); - else - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); -} - -Uint32 -Lgman::read_undo_pages(Signal* signal, Ptr ptr, - Uint32 pageId, Uint32 pages) -{ - ndbrequire(pages); - Ptr filePtr; - Buffer_idx tail= ptr.p->m_file_pos[TAIL]; - m_file_pool.getPtr(filePtr, tail.m_ptr_i); - - if(filePtr.p->m_online.m_outstanding > 0) - { - jam(); - return 0; - } - - Uint32 max= tail.m_idx; - - FsReadWriteReq* req= (FsReadWriteReq*)signal->getDataPtrSend(); - req->filePointer = filePtr.p->m_fd; - req->userReference = reference(); - req->userPointer = filePtr.i; - req->operationFlag = 0; - FsReadWriteReq::setFormatFlag(req->operationFlag, - FsReadWriteReq::fsFormatSharedPage); - - - if(max > pages) - { - jam(); - tail.m_idx -= pages; - - req->varIndex = 1 + tail.m_idx; - req->numberOfPages = pages; - req->data.pageData[0] = pageId - pages; - ptr.p->m_file_pos[TAIL] = tail; - - if(DEBUG_UNDO_EXECUTION) - ndbout_c("a reading from file: %d page(%d-%d) into (%d-%d)", - ptr.i, 1 + tail.m_idx, 1+tail.m_idx+pages-1, - pageId - pages, pageId - 1); - - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - filePtr.p->m_state |= Undofile::FS_OUTSTANDING; - filePtr.p->m_online.m_outstanding = pages; - max = pages; - } - else - { - jam(); - - ndbrequire(tail.m_idx - max == 0); - req->varIndex = 1; - req->numberOfPages = max; - req->data.pageData[0] = pageId - max; - - if(DEBUG_UNDO_EXECUTION) - ndbout_c("b reading from file: %d page(%d-%d) into (%d-%d)", - ptr.i, 1 , 1+max-1, - pageId - max, pageId - 1); - - sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, - FsReadWriteReq::FixedLength + 1, JBA); - - ptr.p->m_outstanding_fs++; - filePtr.p->m_online.m_outstanding = max; - filePtr.p->m_state |= Undofile::FS_OUTSTANDING | Undofile::FS_MOVE_NEXT; - - Ptr prev = filePtr; - { - Local_undofile_list files(m_file_pool, ptr.p->m_files); - if(!files.prev(prev)) - { - jam(); - files.last(prev); - } - } - if(DEBUG_UNDO_EXECUTION) - ndbout_c("changing file from %d to %d", filePtr.i, prev.i); - - tail.m_idx= prev.p->m_file_size - 1; - tail.m_ptr_i= prev.i; - ptr.p->m_file_pos[TAIL] = tail; - if(max < pages && filePtr.i != prev.i) - max += read_undo_pages(signal, ptr, pageId - max, pages - max); - } - - return max; - -} - -void -Lgman::execute_undo_record(Signal* signal) -{ - Uint64 lsn; - const Uint32* ptr; - Dbtup* tup= (Dbtup*)globalData.getBlock(DBTUP); - if((ptr = get_next_undo_record(&lsn))) - { - Uint32 len= (* ptr) & 0xFFFF; - Uint32 type= (* ptr) >> 16; - Uint32 mask= type & ~(Uint32)File_formats::Undofile::UNDO_NEXT_LSN; - switch(mask){ - case File_formats::Undofile::UNDO_END: - stop_run_undo_log(signal); - return; - case File_formats::Undofile::UNDO_LCP: - case File_formats::Undofile::UNDO_LCP_FIRST: - { - Uint32 lcp = * (ptr - len + 1); - if(m_latest_lcp && lcp > m_latest_lcp) - { - if (0) - { - const Uint32 * base = ptr - len + 1; - Uint32 lcp = base[0]; - Uint32 tableId = base[1] >> 16; - Uint32 fragId = base[1] & 0xFFFF; - - ndbout_c("NOT! ignoring lcp: %u tab: %u frag: %u", - lcp, tableId, fragId); - } - } - - if(m_latest_lcp == 0 || - lcp < m_latest_lcp || - (lcp == m_latest_lcp && - mask == File_formats::Undofile::UNDO_LCP_FIRST)) - { - stop_run_undo_log(signal); - return; - } - // Fallthrough - } - case File_formats::Undofile::UNDO_TUP_ALLOC: - case File_formats::Undofile::UNDO_TUP_UPDATE: - case File_formats::Undofile::UNDO_TUP_FREE: - case File_formats::Undofile::UNDO_TUP_CREATE: - case File_formats::Undofile::UNDO_TUP_DROP: - case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT: - case File_formats::Undofile::UNDO_TUP_FREE_EXTENT: - tup->disk_restart_undo(signal, lsn, mask, ptr - len + 1, len); - return; - default: - ndbrequire(false); - } - } - signal->theData[0] = LgmanContinueB::EXECUTE_UNDO_RECORD; - sendSignal(LGMAN_REF, GSN_CONTINUEB, signal, 1, JBB); - - return; -} - -const Uint32* -Lgman::get_next_undo_record(Uint64 * this_lsn) -{ - Ptr ptr; - m_logfile_group_list.first(ptr); - - Logfile_group::Position consumer= ptr.p->m_pos[CONSUMER]; - Logfile_group::Position producer= ptr.p->m_pos[PRODUCER]; - if(producer.m_current_pos.m_idx < 2) - { - jam(); - /** - * Wait for fetching pages... - */ - return 0; - } - - Uint32 pos = consumer.m_current_pos.m_idx; - Uint32 page = consumer.m_current_pos.m_ptr_i; - - File_formats::Undofile::Undo_page* pageP=(File_formats::Undofile::Undo_page*) - m_shared_page_pool.getPtr(page); - - if(pos == 0) - { - /** - * End of log - */ - pageP->m_data[0] = (File_formats::Undofile::UNDO_END << 16) | 1 ; - pageP->m_page_header.m_page_lsn_hi = 0; - pageP->m_page_header.m_page_lsn_lo = 0; - pos= consumer.m_current_pos.m_idx= pageP->m_words_used = 1; - this_lsn = 0; - return pageP->m_data; - } - - Uint32 *record= pageP->m_data + pos - 1; - Uint32 len= (* record) & 0xFFFF; - ndbrequire(len); - Uint32 *prev= record - len; - Uint64 lsn = 0; - - // Same page - if(((* record) >> 16) & File_formats::Undofile::UNDO_NEXT_LSN) - { - lsn = ptr.p->m_last_read_lsn - 1; - ndbrequire((Int64)lsn >= 0); - } - else - { - ndbrequire(pos >= 3); - lsn += * (prev - 1); lsn <<= 32; - lsn += * (prev - 0); - len += 2; - ndbrequire((Int64)lsn >= 0); - } - - - ndbrequire(pos >= len); - - if(pos == len) - { - /** - * Switching page - */ - ndbrequire(producer.m_current_pos.m_idx); - ptr.p->m_pos[PRODUCER].m_current_pos.m_idx --; - - if(consumer.m_current_page.m_idx) - { - consumer.m_current_page.m_idx--; // left in range - consumer.m_current_pos.m_ptr_i --; // page - } - else - { - // 0 pages left in range...switch range - Lgman::Page_map::Iterator it; - Page_map map(m_data_buffer_pool, ptr.p->m_buffer_pages); - Uint32 sz = map.getSize(); - Uint32 tmp = (consumer.m_current_page.m_ptr_i + sz - 2) % sz; - - map.position(it, tmp); - union { - Uint32 _tmp[2]; - Lgman::Buffer_idx range; - }; - - _tmp[0] = *it.data; map.next(it); _tmp[1] = *it.data; - - consumer.m_current_page.m_idx = range.m_idx - 1; // left in range - consumer.m_current_page.m_ptr_i = tmp; // pos in map - - consumer.m_current_pos.m_ptr_i = range.m_ptr_i + range.m_idx - 1; // page - } - - if(DEBUG_UNDO_EXECUTION) - ndbout_c("reading from %d", consumer.m_current_pos.m_ptr_i); - - pageP=(File_formats::Undofile::Undo_page*) - m_shared_page_pool.getPtr(consumer.m_current_pos.m_ptr_i); - - pos= consumer.m_current_pos.m_idx= pageP->m_words_used; - - Uint64 tmp = 0; - tmp += pageP->m_page_header.m_page_lsn_hi; tmp <<= 32; - tmp += pageP->m_page_header.m_page_lsn_lo; - - prev = pageP->m_data + pos - 1; - - if(((* prev) >> 16) & File_formats::Undofile::UNDO_NEXT_LSN) - ndbrequire(lsn + 1 == ptr.p->m_last_read_lsn); - - ptr.p->m_pos[CONSUMER] = consumer; - ptr.p->m_free_buffer_words += File_formats::UNDO_PAGE_WORDS; - } - else - { - ptr.p->m_pos[CONSUMER].m_current_pos.m_idx -= len; - } - - * this_lsn = ptr.p->m_last_read_lsn = lsn; - - /** - * Re-sort log file groups - */ - Ptr sort = ptr; - if(m_logfile_group_list.next(sort)) - { - while(!sort.isNull() && sort.p->m_last_read_lsn > lsn) - m_logfile_group_list.next(sort); - - if(sort.i != ptr.p->nextList) - { - m_logfile_group_list.remove(ptr); - if(sort.isNull()) - m_logfile_group_list.add(ptr); - else - m_logfile_group_list.insert(ptr, sort); - } - } - return record; -} - -void -Lgman::stop_run_undo_log(Signal* signal) -{ - bool running = false, outstanding = false; - Ptr ptr; - m_logfile_group_list.first(ptr); - while(!ptr.isNull()) - { - /** - * Mark exec thread as completed - */ - ptr.p->m_state &= ~(Uint32)Logfile_group::LG_EXEC_THREAD; - - if(ptr.p->m_state & Logfile_group::LG_READ_THREAD) - { - /** - * Thread is still running...wait for it to complete - */ - running = true; - } - else if(ptr.p->m_outstanding_fs) - { - outstanding = true; // a FSREADREQ is outstanding...wait for it - } - else if(ptr.p->m_state != Logfile_group::LG_ONLINE) - { - /** - * Fix log TAIL - */ - ndbrequire(ptr.p->m_state == 0); - ptr.p->m_state = Logfile_group::LG_ONLINE; - Buffer_idx tail= ptr.p->m_file_pos[TAIL]; - Uint32 pages= ptr.p->m_pos[PRODUCER].m_current_pos.m_idx; - - while(pages) - { - Ptr file; - m_file_pool.getPtr(file, tail.m_ptr_i); - Uint32 page= tail.m_idx; - Uint32 size= file.p->m_file_size; - ndbrequire(size >= page); - Uint32 diff= size - page; - - if(pages >= diff) - { - pages -= diff; - Local_undofile_list files(m_file_pool, ptr.p->m_files); - if(!files.next(file)) - files.first(file); - tail.m_idx = 1; - tail.m_ptr_i= file.i; - } - else - { - tail.m_idx += pages; - pages= 0; - } - } - ptr.p->m_tail_pos[0] = tail; - ptr.p->m_tail_pos[1] = tail; - ptr.p->m_tail_pos[2] = tail; - ptr.p->m_file_pos[TAIL] = tail; - - init_logbuffer_pointers(ptr); - - { - Buffer_idx head= ptr.p->m_file_pos[HEAD]; - Ptr file; - m_file_pool.getPtr(file, head.m_ptr_i); - if (head.m_idx == file.p->m_file_size - 1) - { - Local_undofile_list files(m_file_pool, ptr.p->m_files); - if(!files.next(file)) - { - jam(); - files.first(file); - } - head.m_idx = 0; - head.m_ptr_i = file.i; - ptr.p->m_file_pos[HEAD] = head; - } - } - - ptr.p->m_free_file_words = (Uint64)File_formats::UNDO_PAGE_WORDS * - (Uint64)compute_free_file_pages(ptr); - ptr.p->m_next_reply_ptr_i = ptr.p->m_file_pos[HEAD].m_ptr_i; - - ptr.p->m_state |= Logfile_group::LG_FLUSH_THREAD; - signal->theData[0] = LgmanContinueB::FLUSH_LOG; - signal->theData[1] = ptr.i; - signal->theData[2] = 0; - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - - if(1) - { - SimulatedBlock* fs = globalData.getBlock(NDBFS); - Ptr hf, tf; - m_file_pool.getPtr(tf, tail.m_ptr_i); - m_file_pool.getPtr(hf, ptr.p->m_file_pos[HEAD].m_ptr_i); - infoEvent("Logfile group: %d ", ptr.p->m_logfile_group_id); - g_eventLogger.info("Logfile group: %d ", ptr.p->m_logfile_group_id); - infoEvent(" head: %s page: %d", - fs->get_filename(hf.p->m_fd), - ptr.p->m_file_pos[HEAD].m_idx); - g_eventLogger.info(" head: %s page: %d", - fs->get_filename(hf.p->m_fd), - ptr.p->m_file_pos[HEAD].m_idx); - infoEvent(" tail: %s page: %d", - fs->get_filename(tf.p->m_fd), tail.m_idx); - g_eventLogger.info(" tail: %s page: %d", - fs->get_filename(tf.p->m_fd), tail.m_idx); - } - } - - m_logfile_group_list.next(ptr); - } - - if(running) - { - jam(); - return; - } - - if(outstanding) - { - jam(); - signal->theData[0] = LgmanContinueB::STOP_UNDO_LOG; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); - return; - } - - infoEvent("Flushing page cache after undo completion"); - g_eventLogger.info("Flushing page cache after undo completion"); - - /** - * Start flushing pages (local, LCP) - */ - LcpFragOrd * ord = (LcpFragOrd *)signal->getDataPtr(); - ord->lcpId = m_latest_lcp; - sendSignal(PGMAN_REF, GSN_LCP_FRAG_ORD, signal, - LcpFragOrd::SignalLength, JBB); - - EndLcpReq* req= (EndLcpReq*)signal->getDataPtr(); - req->senderRef = reference(); - sendSignal(PGMAN_REF, GSN_END_LCP_REQ, signal, - EndLcpReq::SignalLength, JBB); -} - -void -Lgman::execEND_LCP_CONF(Signal* signal) -{ - Dbtup* tup= (Dbtup*)globalData.getBlock(DBTUP); - tup->disk_restart_undo(signal, 0, File_formats::Undofile::UNDO_END, 0, 0); - - /** - * pgman has completed flushing all pages - * - * insert "fake" LCP record preventing undo to be "rerun" - */ - Uint32 undo[3]; - undo[0] = m_latest_lcp; - undo[1] = (0 << 16) | 0; - undo[2] = (File_formats::Undofile::UNDO_LCP_FIRST << 16 ) - | (sizeof(undo) >> 2); - - Ptr ptr; - ndbrequire(m_logfile_group_list.first(ptr)); - - Uint64 last_lsn= m_last_lsn; - if(ptr.p->m_last_lsn == last_lsn -#ifdef VM_TRACE - && ((rand() % 100) > 50) -#endif - ) - { - undo[2] |= File_formats::Undofile::UNDO_NEXT_LSN << 16; - Uint32 *dst= get_log_buffer(ptr, sizeof(undo) >> 2); - memcpy(dst, undo, sizeof(undo)); - ndbrequire(ptr.p->m_free_file_words >= (sizeof(undo) >> 2)); - ptr.p->m_free_file_words -= (sizeof(undo) >> 2); - } - else - { - Uint32 *dst= get_log_buffer(ptr, (sizeof(undo) >> 2) + 2); - * dst++ = last_lsn >> 32; - * dst++ = last_lsn & 0xFFFFFFFF; - memcpy(dst, undo, sizeof(undo)); - ndbrequire(ptr.p->m_free_file_words >= ((sizeof(undo) >> 2) + 2)); - ptr.p->m_free_file_words -= ((sizeof(undo) >> 2) + 2); - } - m_last_lsn = ptr.p->m_last_lsn = last_lsn + 1; - - ptr.p->m_last_synced_lsn = last_lsn; - while(m_logfile_group_list.next(ptr)) - ptr.p->m_last_synced_lsn = last_lsn; - - infoEvent("Flushing complete"); - g_eventLogger.info("Flushing complete"); - - signal->theData[0] = reference(); - sendSignal(DBLQH_REF, GSN_START_RECCONF, signal, 1, JBB); -} - -#ifdef VM_TRACE -void -Lgman::validate_logfile_group(Ptr ptr, const char * heading) -{ - do - { - if (ptr.p->m_file_pos[HEAD].m_ptr_i == RNIL) - break; - - Uint32 pages = compute_free_file_pages(ptr); - - Uint32 group_pages = - ((ptr.p->m_free_file_words + File_formats::UNDO_PAGE_WORDS - 1)/ File_formats::UNDO_PAGE_WORDS) ; - Uint32 last = ptr.p->m_free_file_words % File_formats::UNDO_PAGE_WORDS; - - if(! (pages >= group_pages)) - { - ndbout << heading << " Tail: " << ptr.p->m_file_pos[TAIL] - << " Head: " << ptr.p->m_file_pos[HEAD] - << " free: " << group_pages << "(" << last << ")" - << " found: " << pages; - for(Uint32 i = 0; i<3; i++) - { - ndbout << " - " << ptr.p->m_tail_pos[i]; - } - ndbout << endl; - - ndbrequire(pages >= group_pages); - } - } while(0); -} -#endif - -void Lgman::execGET_TABINFOREQ(Signal* signal) -{ - jamEntry(); - - if(!assembleFragments(signal)) - { - return; - } - - GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0]; - - const Uint32 reqType = req->requestType & (~GetTabInfoReq::LongSignalConf); - BlockReference retRef= req->senderRef; - Uint32 senderData= req->senderData; - Uint32 tableId= req->tableId; - - if(reqType == GetTabInfoReq::RequestByName){ - jam(); - if(signal->getNoOfSections()) - releaseSections(signal); - - sendGET_TABINFOREF(signal, req, GetTabInfoRef::NoFetchByName); - return; - } - - Logfile_group key; - key.m_logfile_group_id= tableId; - Ptr ptr; - m_logfile_group_hash.find(ptr, key); - - if(ptr.p->m_logfile_group_id != tableId) - { - jam(); - if(signal->getNoOfSections()) - releaseSections(signal); - - sendGET_TABINFOREF(signal, req, GetTabInfoRef::InvalidTableId); - return; - } - - - GetTabInfoConf *conf = (GetTabInfoConf *)&signal->theData[0]; - - conf->senderData= senderData; - conf->tableId= tableId; - conf->freeWordsHi= ptr.p->m_free_file_words >> 32; - conf->freeWordsLo= ptr.p->m_free_file_words & 0xFFFFFFFF; - conf->tableType= DictTabInfo::LogfileGroup; - conf->senderRef= reference(); - sendSignal(retRef, GSN_GET_TABINFO_CONF, signal, - GetTabInfoConf::SignalLength, JBB); -} - -void Lgman::sendGET_TABINFOREF(Signal* signal, - GetTabInfoReq * req, - GetTabInfoRef::ErrorCode errorCode) -{ - jamEntry(); - GetTabInfoRef * const ref = (GetTabInfoRef *)&signal->theData[0]; - /** - * The format of GetTabInfo Req/Ref is the same - */ - BlockReference retRef = req->senderRef; - ref->errorCode = errorCode; - - sendSignal(retRef, GSN_GET_TABINFOREF, signal, signal->length(), JBB); -} diff --git a/storage/ndb/src/kernel/blocks/lgman.hpp b/storage/ndb/src/kernel/blocks/lgman.hpp deleted file mode 100644 index c15e0d029e6..00000000000 --- a/storage/ndb/src/kernel/blocks/lgman.hpp +++ /dev/null @@ -1,367 +0,0 @@ -/* Copyright (c) 2003, 2005-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef LGMAN_H -#define LGMAN_H - -#include - -#include -#include -#include -#include -#include -#include -#include "diskpage.hpp" -#include - -#include -#include - -class Lgman : public SimulatedBlock -{ -public: - Lgman(Block_context& ctx); - virtual ~Lgman(); - BLOCK_DEFINES(Lgman); - -protected: - - void execSTTOR(Signal* signal); - void sendSTTORRY(Signal*); - void execREAD_CONFIG_REQ(Signal* signal); - void execDUMP_STATE_ORD(Signal* signal); - void execCONTINUEB(Signal* signal); - - void execCREATE_FILE_REQ(Signal* signal); - void execCREATE_FILEGROUP_REQ(Signal* signal); - void execDROP_FILE_REQ(Signal* signal); - void execDROP_FILEGROUP_REQ(Signal* signal); - - void execFSWRITEREQ(Signal*); - void execFSWRITEREF(Signal*); - void execFSWRITECONF(Signal*); - - void execFSOPENREF(Signal*); - void execFSOPENCONF(Signal*); - - void execFSCLOSEREF(Signal*); - void execFSCLOSECONF(Signal*); - - void execFSREADREF(Signal*); - void execFSREADCONF(Signal*); - - void execLCP_FRAG_ORD(Signal*); - void execEND_LCP_REQ(Signal*); - void execSUB_GCP_COMPLETE_REP(Signal*); - - void execSTART_RECREQ(Signal*); - void execEND_LCP_CONF(Signal*); - - void execGET_TABINFOREQ(Signal*); - - void sendGET_TABINFOREF(Signal* signal, - GetTabInfoReq * req, - GetTabInfoRef::ErrorCode errorCode); - -public: - struct Log_waiter - { - Callback m_callback; - union { - Uint32 m_size; - Uint64 m_sync_lsn; - }; - Uint32 m_block; - Uint32 nextList; - Uint32 m_magic; - }; - - typedef RecordPool Log_waiter_pool; - typedef SLFifoListImpl Log_waiter_list; - typedef LocalSLFifoListImpl Local_log_waiter_list; - - struct Undofile - { - Undofile(){} - Undofile(const struct CreateFileImplReq*, Uint32 lg_ptr_i); - - Uint32 m_magic; - Uint32 m_file_id; // Dict obj id - Uint32 m_logfile_group_ptr_i; - - Uint32 m_file_size; - Uint32 m_state; - Uint32 m_fd; // When speaking to NDBFS - - enum FileState - { - FS_CREATING = 0x1 // File is being created - ,FS_DROPPING = 0x2 // File is being dropped - ,FS_ONLINE = 0x4 // File is online - ,FS_OPENING = 0x8 // File is being opened during SR - ,FS_SORTING = 0x10 // Files in group are being sorted - ,FS_SEARCHING = 0x20 // File is being searched for end of log - ,FS_EXECUTING = 0x40 // File is used for executing UNDO log - ,FS_EMPTY = 0x80 // File is empty (used when online) - ,FS_OUTSTANDING = 0x100 // File has outstanding request - ,FS_MOVE_NEXT = 0x200 // When receiving reply move to next file - }; - - union { - struct { - Uint32 m_outstanding; // Outstaning pages - Uint64 m_lsn; // Used when finding log head - } m_online; - struct { - Uint32 m_senderData; - Uint32 m_senderRef; - Uint32 m_logfile_group_id; - Uint32 m_logfile_group_version; - } m_create; - }; - - Uint32 nextList; - union { - Uint32 prevList; - Uint32 nextPool; - }; - }; - - typedef RecordPool Undofile_pool; - typedef DLFifoListImpl Undofile_list; - typedef LocalDLFifoListImpl Local_undofile_list; - typedef LocalDataBuffer<15> Page_map; - - struct Buffer_idx - { - Uint32 m_ptr_i; - Uint32 m_idx; - bool operator== (const Buffer_idx& bi) const { - return (m_ptr_i == bi.m_ptr_i && m_idx == bi.m_idx); - } - }; - - struct Logfile_group - { - Logfile_group(){} - Logfile_group(const struct CreateFilegroupImplReq*); - - Uint32 m_magic; - union { - Uint32 key; - Uint32 m_logfile_group_id; - }; - Uint32 m_version; - Uint16 m_state; - Uint16 m_outstanding_fs; - Uint32 m_next_reply_ptr_i; - - enum Logfile_group_state - { - LG_ONLINE = 0x001 - ,LG_SORTING = 0x002 // Sorting files - ,LG_SEARCHING = 0x004 // Searching in last file - ,LG_EXEC_THREAD = 0x008 // Execute thread is running - ,LG_READ_THREAD = 0x010 // Read thread is running - ,LG_FORCE_SYNC_THREAD = 0x020 - ,LG_SYNC_WAITERS_THREAD = 0x040 - ,LG_CUT_LOG_THREAD = 0x080 - ,LG_WAITERS_THREAD = 0x100 - ,LG_FLUSH_THREAD = 0x200 - ,LG_DROPPING = 0x400 - ,LG_STARTING = 0x800 - }; - - static const Uint32 LG_THREAD_MASK = Logfile_group::LG_FORCE_SYNC_THREAD | - Logfile_group::LG_SYNC_WAITERS_THREAD | - Logfile_group::LG_CUT_LOG_THREAD | - Logfile_group::LG_WAITERS_THREAD | - Logfile_group::LG_FLUSH_THREAD; - - Uint64 m_last_lsn; - Uint64 m_last_sync_req_lsn; // Outstanding - Uint64 m_last_synced_lsn; // - Uint64 m_max_sync_req_lsn; // User requested lsn - union { - Uint64 m_last_read_lsn; - Uint64 m_last_lcp_lsn; - }; - Log_waiter_list::Head m_log_sync_waiters; - - Buffer_idx m_tail_pos[3]; // 0 is cut, 1 is saved, 2 is current - Buffer_idx m_file_pos[2]; // 0 tail, 1 head = { file_ptr_i, page_no } - Uint64 m_free_file_words; // Free words in logfile group - - Undofile_list::Head m_files; // Files in log - Undofile_list::Head m_meta_files;// Files being created or dropped - - Uint32 m_free_buffer_words; // Free buffer page words - Log_waiter_list::Head m_log_buffer_waiters; - Page_map::Head m_buffer_pages; // Pairs of { ptr.i, count } - struct Position { - Buffer_idx m_current_page; // { m_buffer_pages.i, left in range } - Buffer_idx m_current_pos; // { page ptr.i, m_words_used } - } m_pos[2]; // 0 is reader (lgman) 1 is writer (tup) - - Uint32 nextHash; - Uint32 prevHash; - Uint32 nextList; - union { - Uint32 prevList; - Uint32 nextPool; - }; - Uint32 hashValue() const { - return key; - } - bool equal(const Logfile_group& rec) const { - return key == rec.key; - } - }; - - typedef RecordPool Logfile_group_pool; - typedef DLFifoListImpl Logfile_group_list; - typedef LocalDLFifoListImpl Local_logfile_group_list; - typedef KeyTableImpl Logfile_group_hash; - - /** - * Alloc/free space in log - * Alloction will be removed at either/or - * 1) Logfile_client::add_entry - * 2) free_log_space - */ - int alloc_log_space(Uint32 logfile_ref, Uint32 words); - int free_log_space(Uint32 logfile_ref, Uint32 words); - -private: - friend class Logfile_client; - - Undofile_pool m_file_pool; - Logfile_group_pool m_logfile_group_pool; - Log_waiter_pool m_log_waiter_pool; - - Page_map::DataBufferPool m_data_buffer_pool; - - Uint64 m_last_lsn; - Uint32 m_latest_lcp; - Logfile_group_list m_logfile_group_list; - Logfile_group_hash m_logfile_group_hash; - - bool alloc_logbuffer_memory(Ptr, Uint32 pages); - void init_logbuffer_pointers(Ptr); - void free_logbuffer_memory(Ptr); - Uint32 compute_free_file_pages(Ptr); - Uint32* get_log_buffer(Ptr, Uint32 sz); - void process_log_buffer_waiters(Signal* signal, Ptr); - Uint32 next_page(Logfile_group* ptrP, Uint32 i); - - void force_log_sync(Signal*, Ptr, Uint32 lsnhi, Uint32 lnslo); - void process_log_sync_waiters(Signal* signal, Ptr); - - void cut_log_tail(Signal*, Ptr ptr); - void endlcp_callback(Signal*, Uint32, Uint32); - void open_file(Signal*, Ptr, Uint32 requestInfo); - - void flush_log(Signal*, Ptr, Uint32 force); - Uint32 write_log_pages(Signal*, Ptr, - Uint32 pageId, Uint32 pages); - - void find_log_head(Signal* signal, Ptr ptr); - void find_log_head_in_file(Signal*, Ptr,Ptr,Uint64); - - void init_run_undo_log(Signal*); - void read_undo_log(Signal*, Ptr ptr); - Uint32 read_undo_pages(Signal*, Ptr, - Uint32 pageId, Uint32 pages); - - void execute_undo_record(Signal*); - const Uint32* get_next_undo_record(Uint64* lsn); - void stop_run_undo_log(Signal* signal); - void init_tail_ptr(Signal* signal, Ptr ptr); - - bool find_file_by_id(Ptr&, Undofile_list::Head&, Uint32 id); - void create_file_commit(Signal* signal, Ptr, Ptr); - void create_file_abort(Signal* signal, Ptr, Ptr); - -#ifdef VM_TRACE - void validate_logfile_group(Ptr ptr, const char * = 0); -#else - void validate_logfile_group(Ptr ptr, const char * = 0) {} -#endif - - void drop_filegroup_drop_files(Signal*, Ptr, - Uint32 ref, Uint32 data); -}; - -class Logfile_client { - Uint32 m_block; - Lgman * m_lgman; -public: - Uint32 m_logfile_group_id; - - Logfile_client() {} - Logfile_client(SimulatedBlock* block, Lgman*, Uint32 logfile_group_id); - - struct Request - { - SimulatedBlock::Callback m_callback; - }; - - /** - * Request flags - */ - enum RequestFlags - { - }; - - /** - * Make sure a lsn is stored - * @return -1, on error - * 0, request in queued - * >0, done - */ - int sync_lsn(Signal*, Uint64, Request*, Uint32 flags); - - /** - * Undolog entries - */ - struct Change - { - const void * ptr; - Uint32 len; - }; - - Uint64 add_entry(const void*, Uint32 len); - Uint64 add_entry(const Change*, Uint32 cnt); - - Uint64 add_entry(Local_key, void * base, Change*); - Uint64 add_entry(Local_key, Uint32 off, Uint32 change); - - /** - * Check for space in log buffer - * - * return >0 if available - * 0 on time slice - * -1 on error - */ - int get_log_buffer(Signal*, Uint32 sz, SimulatedBlock::Callback* m_callback); - -private: - Uint32* get_log_buffer(Uint32 sz); -}; - - -#endif diff --git a/storage/ndb/src/kernel/blocks/mutexes.hpp b/storage/ndb/src/kernel/blocks/mutexes.hpp deleted file mode 100644 index 64df5d45d27..00000000000 --- a/storage/ndb/src/kernel/blocks/mutexes.hpp +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef KERNEL_MUTEXES_HPP -#define KERNEL_MUTEXES_HPP - -#include - -/** - * This mutex is used by: - * DIH - before sending START_LCP to all participants - * DICT - before commiting a CREATE TABLE - * BACKUP - before sending DEFINE_BACKUP - */ -#define DIH_START_LCP_MUTEX 0 -#define DICT_COMMIT_TABLE_MUTEX 0 - -/** - * This mutex is used by - * DIH - before switching primary replica - * BACKUP - before sending DEFINE_BACKUP - */ -#define DIH_SWITCH_PRIMARY_MUTEX 1 -#define BACKUP_DEFINE_MUTEX 1 - -#endif diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp deleted file mode 100644 index 03ed858db1e..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp +++ /dev/null @@ -1,388 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef NDBCNTR_H -#define NDBCNTR_H - - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#ifdef NDBCNTR_C -/* -2.1 GLOBAL SYMBOLS ------------------- -*/ -/* -2.2 LOCAL SYMBOLS ------------------ -*/ -#define ZNO_NDB_BLOCKS 6 /* ACC, DICT, DIH, LQH, TC, TUP */ - -#define ZNOT_AVAILABLE 913 - -//------- OTHERS --------------------------------------------- -#define ZSTARTUP 1 -#define ZSHUTDOWN 2 - -#define ZSIZE_NDB_BLOCKS_REC 16 /* MAX BLOCKS IN NDB */ -#define ZSIZE_SYSTAB 2048 -#define ZSTART_PHASE_1 1 -#define ZSTART_PHASE_2 2 -#define ZSTART_PHASE_3 3 -#define ZSTART_PHASE_4 4 -#define ZSTART_PHASE_5 5 -#define ZSTART_PHASE_6 6 -#define ZSTART_PHASE_7 7 -#define ZSTART_PHASE_8 8 -#define ZSTART_PHASE_9 9 -#define ZSTART_PHASE_END 255 -#define ZWAITPOINT_4_1 1 -#define ZWAITPOINT_4_2 2 -#define ZWAITPOINT_5_1 3 -#define ZWAITPOINT_5_2 4 -#define ZWAITPOINT_6_1 5 -#define ZWAITPOINT_6_2 6 -#define ZWAITPOINT_7_1 7 -#define ZWAITPOINT_7_2 8 -#define ZSYSTAB_VERSION 1 -#endif - -class Ndbcntr: public SimulatedBlock { -public: -// Records - -/* FSREADREQ FSWRITEREQ */ -/** - * 2.3 RECORDS AND FILESIZES - * ------------------------------------------------------------ - */ - - struct StartRecord { - StartRecord() {} - Uint64 m_startTime; - - void reset(); - NdbNodeBitmask m_starting; - NdbNodeBitmask m_waiting; // == (m_withLog | m_withoutLog) - NdbNodeBitmask m_withLog; - NdbNodeBitmask m_withoutLog; - Uint32 m_lastGci; - Uint32 m_lastGciNodeId; - - Uint64 m_startPartialTimeout; - Uint64 m_startPartitionedTimeout; - Uint64 m_startFailureTimeout; - struct { - Uint32 m_nodeId; - Uint32 m_lastGci; - } m_logNodes[MAX_NDB_NODES]; - Uint32 m_logNodesCount; - } c_start; - - struct NdbBlocksRec { - BlockReference blockref; - }; /* p2c: size = 2 bytes */ - - typedef Ptr NdbBlocksRecPtr; - - /** - * Ndbcntr creates and initializes system tables on initial system start. - * The tables are defined in static structs in NdbcntrSysTable.cpp. - */ - struct SysColumn { - unsigned pos; - const char* name; - // DictTabInfo - DictTabInfo::ExtType type; - Uint32 length; - bool keyFlag; - bool nullable; - }; - struct SysTable { - const char* name; - unsigned columnCount; - const SysColumn* columnList; - // DictTabInfo - DictTabInfo::TableType tableType; - DictTabInfo::FragmentType fragmentType; - bool tableLoggedFlag; - // saved table id - mutable Uint32 tableId; - }; - struct SysIndex { - const char* name; - const SysTable* primaryTable; - Uint32 columnCount; - Uint32 columnList[4]; - // DictTabInfo - DictTabInfo::TableType indexType; - DictTabInfo::FragmentType fragmentType; - bool indexLoggedFlag; - // saved index table id - mutable Uint32 indexId; - }; - static const SysTable* g_sysTableList[]; - static const unsigned g_sysTableCount; - // the system tables - static const SysTable g_sysTable_SYSTAB_0; - static const SysTable g_sysTable_NDBEVENTS_0; - -public: - Ndbcntr(Block_context&); - virtual ~Ndbcntr(); - -private: - BLOCK_DEFINES(Ndbcntr); - - // Transit signals - void execAPI_START_REP(Signal*); - void execCONTINUEB(Signal* signal); - void execREAD_NODESCONF(Signal* signal); - void execREAD_NODESREF(Signal* signal); - void execCM_ADD_REP(Signal* signal); - void execCNTR_START_REQ(Signal* signal); - void execCNTR_START_REF(Signal* signal); - void execCNTR_START_CONF(Signal* signal); - void execCNTR_START_REP(Signal* signal); - void execCNTR_WAITREP(Signal* signal); - void execNODE_FAILREP(Signal* signal); - void execSYSTEM_ERROR(Signal* signal); - - // Received signals - void execDUMP_STATE_ORD(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); - void execSTTOR(Signal* signal); - void execTCSEIZECONF(Signal* signal); - void execTCSEIZEREF(Signal* signal); - void execTCRELEASECONF(Signal* signal); - void execTCRELEASEREF(Signal* signal); - void execTCKEYCONF(Signal* signal); - void execTCKEYREF(Signal* signal); - void execTCROLLBACKREP(Signal* signal); - void execGETGCICONF(Signal* signal); - void execDIH_RESTARTCONF(Signal* signal); - void execDIH_RESTARTREF(Signal* signal); - void execCREATE_TABLE_REF(Signal* signal); - void execCREATE_TABLE_CONF(Signal* signal); - void execNDB_STTORRY(Signal* signal); - void execNDB_STARTCONF(Signal* signal); - void execREAD_NODESREQ(Signal* signal); - void execNDB_STARTREF(Signal* signal); - - void execSTOP_PERM_REF(Signal* signal); - void execSTOP_PERM_CONF(Signal* signal); - - void execSTOP_ME_REF(Signal* signal); - void execSTOP_ME_CONF(Signal* signal); - - void execWAIT_GCP_REF(Signal* signal); - void execWAIT_GCP_CONF(Signal* signal); - - void execSTOP_REQ(Signal* signal); - void execSTOP_CONF(Signal* signal); - void execRESUME_REQ(Signal* signal); - - void execCHANGE_NODE_STATE_CONF(Signal* signal); - - void execABORT_ALL_REF(Signal* signal); - void execABORT_ALL_CONF(Signal* signal); - - // Statement blocks - void sendCreateTabReq(Signal* signal, const char* buffer, Uint32 bufLen); - void startInsertTransactions(Signal* signal); - void initData(Signal* signal); - void resetStartVariables(Signal* signal); - void sendCntrStartReq(Signal* signal); - void sendCntrStartRef(Signal*, Uint32 nodeId, CntrStartRef::ErrorCode); - void sendNdbSttor(Signal* signal); - void sendSttorry(Signal* signal); - - bool trySystemRestart(Signal* signal); - void startWaitingNodes(Signal* signal); - CheckNodeGroups::Output checkNodeGroups(Signal*, const NdbNodeBitmask &); - - // Generated statement blocks - void systemErrorLab(Signal* signal, int line); - - void createSystableLab(Signal* signal, unsigned index); - void crSystab7Lab(Signal* signal); - void crSystab8Lab(Signal* signal); - void crSystab9Lab(Signal* signal); - - void startPhase1Lab(Signal* signal); - void startPhase2Lab(Signal* signal); - void startPhase3Lab(Signal* signal); - void startPhase4Lab(Signal* signal); - void startPhase5Lab(Signal* signal); - // jump 2 to resync phase counters - void startPhase8Lab(Signal* signal); - void startPhase9Lab(Signal* signal); - void ph2ALab(Signal* signal); - void ph2CLab(Signal* signal); - void ph2ELab(Signal* signal); - void ph2FLab(Signal* signal); - void ph2GLab(Signal* signal); - void ph3ALab(Signal* signal); - void ph4ALab(Signal* signal); - void ph4BLab(Signal* signal); - void ph4CLab(Signal* signal); - void ph5ALab(Signal* signal); - void ph6ALab(Signal* signal); - void ph6BLab(Signal* signal); - void ph7ALab(Signal* signal); - void ph8ALab(Signal* signal); - - - void waitpoint41Lab(Signal* signal); - void waitpoint51Lab(Signal* signal); - void waitpoint52Lab(Signal* signal); - void waitpoint61Lab(Signal* signal); - void waitpoint71Lab(Signal* signal); - - void updateNodeState(Signal* signal, const NodeState & newState) const ; - void getNodeGroup(Signal* signal); - - // Initialisation - void initData(); - void initRecords(); - - // Variables - /**------------------------------------------------------------------------ - * CONTAIN INFO ABOUT ALL NODES IN CLUSTER. NODE_PTR ARE USED AS NODE NUMBER - * IF THE STATE ARE ZDELETE THEN THE NODE DOESN'T EXIST. NODES ARE ALLOWED - * TO REGISTER (ZADD) DURING RESTART. - * - * WHEN THE SYSTEM IS RUNNING THE MASTER WILL CHECK IF ANY NODE HAS MADE - * A CNTR_MASTERREQ AND TAKE CARE OF THE REQUEST. - * TO CONFIRM THE REQ, THE MASTER DEMANDS THAT ALL RUNNING NODES HAS VOTED - * FOR THE NEW NODE. - * NODE_PTR:MASTER_REQ IS USED DURING RESTART TO LOG - * POSTPONED CNTR_MASTERREQ'S - *------------------------------------------------------------------------*/ - NdbBlocksRec *ndbBlocksRec; - - /* - 2.4 COMMON STORED VARIABLES - */ - UintR cgciSystab; - UintR ckey; - //UintR csystabId; - UintR cnoWaitrep6; - UintR cnoWaitrep7; - UintR ctcConnectionP; - UintR ctcReqInfo; - Uint8 ctransidPhase; - Uint16 cresponses; - - Uint8 cstartPhase; - Uint16 cinternalStartphase; - - Uint16 cmasterNodeId; - Uint16 cndbBlocksCount; - Uint16 cnoStartNodes; - UintR cnoWaitrep; - NodeState::StartType ctypeOfStart; - Uint16 cdynamicNodeId; - - Uint32 c_fsRemoveCount; - Uint32 c_nodeGroup; - void clearFilesystem(Signal* signal); - void execFSREMOVECONF(Signal* signal); - - NdbNodeBitmask c_allDefinedNodes; - NdbNodeBitmask c_clusterNodes; // All members of qmgr cluster - NdbNodeBitmask c_startedNodes; // All cntr started nodes - -public: - struct StopRecord { - public: - StopRecord(Ndbcntr & _cntr) : cntr(_cntr) { - stopReq.senderRef = 0; - } - - Ndbcntr & cntr; - StopReq stopReq; // Signal data - NDB_TICKS stopInitiatedTime; // When was the stop initiated - - bool checkNodeFail(Signal* signal); - void checkTimeout(Signal* signal); - void checkApiTimeout(Signal* signal); - void checkTcTimeout(Signal* signal); - void checkLqhTimeout_1(Signal* signal); - void checkLqhTimeout_2(Signal* signal); - - BlockNumber number() const { return cntr.number(); } - void progError(int line, int cause, const char * extra) { - cntr.progError(line, cause, extra); - } - - enum StopNodesStep { - SR_BLOCK_GCP_START_GCP = 0, - SR_WAIT_COMPLETE_GCP = 1, - SR_UNBLOCK_GCP_START_GCP = 2, - SR_QMGR_STOP_REQ = 3, - SR_WAIT_NODE_FAILURES = 4, - SR_CLUSTER_SHUTDOWN = 12 - } m_state; - SignalCounter m_stop_req_counter; - }; -private: - StopRecord c_stopRec; - friend struct StopRecord; - - struct Missra { - Missra(Ndbcntr & ref) : cntr(ref) { } - - Uint32 currentBlockIndex; - Uint32 currentStartPhase; - Uint32 nextStartPhase[NO_OF_BLOCKS]; - - void execSTART_ORD(Signal* signal); - void execSTTORRY(Signal* signal); - void sendNextSTTOR(Signal* signal); - void execREAD_CONFIG_CONF(Signal* signal); - void sendNextREAD_CONFIG_REQ(Signal* signal); - - BlockNumber number() const { return cntr.number(); } - void progError(int line, int cause, const char * extra) { - cntr.progError(line, cause, extra); - } - Ndbcntr & cntr; - }; - - Missra c_missra; - friend struct Missra; - - void execSTTORRY(Signal* signal); - void execSTART_ORD(Signal* signal); - void execREAD_CONFIG_CONF(Signal*); - - friend struct UpgradeStartup; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp deleted file mode 100644 index 3b1120926f6..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - - - -#define NDBCNTR_C -#include "Ndbcntr.hpp" -#include - -#define DEBUG(x) { ndbout << "Ndbcntr::" << x << endl; } - - -void Ndbcntr::initData() -{ - c_start.reset(); - cmasterNodeId = 0; - cnoStartNodes = 0; - cnoWaitrep = 0; - // Records with constant sizes - ndbBlocksRec = new NdbBlocksRec[ZSIZE_NDB_BLOCKS_REC]; -}//Ndbcntr::initData() - -void Ndbcntr::initRecords() -{ - // Records with dynamic sizes -}//Ndbcntr::initRecords() - -Ndbcntr::Ndbcntr(Block_context& ctx): - SimulatedBlock(NDBCNTR, ctx), - cnoWaitrep6(0), - cnoWaitrep7(0), - c_stopRec(* this), - c_missra(* this) -{ - - BLOCK_CONSTRUCTOR(Ndbcntr); - - // Transit signals - addRecSignal(GSN_CONTINUEB, &Ndbcntr::execCONTINUEB); - addRecSignal(GSN_READ_NODESCONF, &Ndbcntr::execREAD_NODESCONF); - addRecSignal(GSN_READ_NODESREF, &Ndbcntr::execREAD_NODESREF); - addRecSignal(GSN_CM_ADD_REP, &Ndbcntr::execCM_ADD_REP); - addRecSignal(GSN_CNTR_START_REQ, &Ndbcntr::execCNTR_START_REQ); - addRecSignal(GSN_CNTR_START_REF, &Ndbcntr::execCNTR_START_REF); - addRecSignal(GSN_CNTR_START_CONF, &Ndbcntr::execCNTR_START_CONF); - addRecSignal(GSN_CNTR_WAITREP, &Ndbcntr::execCNTR_WAITREP); - addRecSignal(GSN_CNTR_START_REP, &Ndbcntr::execCNTR_START_REP); - addRecSignal(GSN_API_START_REP, &Ndbcntr::execAPI_START_REP, true); - addRecSignal(GSN_NODE_FAILREP, &Ndbcntr::execNODE_FAILREP); - addRecSignal(GSN_SYSTEM_ERROR , &Ndbcntr::execSYSTEM_ERROR); - - // Received signals - addRecSignal(GSN_DUMP_STATE_ORD, &Ndbcntr::execDUMP_STATE_ORD); - addRecSignal(GSN_READ_CONFIG_REQ, &Ndbcntr::execREAD_CONFIG_REQ); - addRecSignal(GSN_STTOR, &Ndbcntr::execSTTOR); - addRecSignal(GSN_TCSEIZECONF, &Ndbcntr::execTCSEIZECONF); - addRecSignal(GSN_TCSEIZEREF, &Ndbcntr::execTCSEIZEREF); - addRecSignal(GSN_TCRELEASECONF, &Ndbcntr::execTCRELEASECONF); - addRecSignal(GSN_TCRELEASEREF, &Ndbcntr::execTCRELEASEREF); - addRecSignal(GSN_TCKEYCONF, &Ndbcntr::execTCKEYCONF); - addRecSignal(GSN_TCKEYREF, &Ndbcntr::execTCKEYREF); - addRecSignal(GSN_TCROLLBACKREP, &Ndbcntr::execTCROLLBACKREP); - addRecSignal(GSN_GETGCICONF, &Ndbcntr::execGETGCICONF); - addRecSignal(GSN_DIH_RESTARTCONF, &Ndbcntr::execDIH_RESTARTCONF); - addRecSignal(GSN_DIH_RESTARTREF, &Ndbcntr::execDIH_RESTARTREF); - addRecSignal(GSN_CREATE_TABLE_REF, &Ndbcntr::execCREATE_TABLE_REF); - addRecSignal(GSN_CREATE_TABLE_CONF, &Ndbcntr::execCREATE_TABLE_CONF); - addRecSignal(GSN_NDB_STTORRY, &Ndbcntr::execNDB_STTORRY); - addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF); - addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ); - addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF); - - addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF); - addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF); - - addRecSignal(GSN_STOP_ME_REF, &Ndbcntr::execSTOP_ME_REF); - addRecSignal(GSN_STOP_ME_CONF, &Ndbcntr::execSTOP_ME_CONF); - - addRecSignal(GSN_STOP_REQ, &Ndbcntr::execSTOP_REQ); - addRecSignal(GSN_STOP_CONF, &Ndbcntr::execSTOP_CONF); - addRecSignal(GSN_RESUME_REQ, &Ndbcntr::execRESUME_REQ); - - addRecSignal(GSN_WAIT_GCP_REF, &Ndbcntr::execWAIT_GCP_REF); - addRecSignal(GSN_WAIT_GCP_CONF, &Ndbcntr::execWAIT_GCP_CONF); - addRecSignal(GSN_CHANGE_NODE_STATE_CONF, - &Ndbcntr::execCHANGE_NODE_STATE_CONF); - - addRecSignal(GSN_ABORT_ALL_REF, &Ndbcntr::execABORT_ALL_REF); - addRecSignal(GSN_ABORT_ALL_CONF, &Ndbcntr::execABORT_ALL_CONF); - - addRecSignal(GSN_START_ORD, &Ndbcntr::execSTART_ORD); - addRecSignal(GSN_STTORRY, &Ndbcntr::execSTTORRY); - addRecSignal(GSN_READ_CONFIG_CONF, &Ndbcntr::execREAD_CONFIG_CONF); - - addRecSignal(GSN_FSREMOVECONF, &Ndbcntr::execFSREMOVECONF); - - initData(); - ctypeOfStart = NodeState::ST_ILLEGAL_TYPE; - c_start.m_startTime = NdbTick_CurrentMillisecond(); -}//Ndbcntr::Ndbcntr() - -Ndbcntr::~Ndbcntr() -{ - delete []ndbBlocksRec; - -}//Ndbcntr::~Ndbcntr() - -BLOCK_FUNCTIONS(Ndbcntr) diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp deleted file mode 100644 index 4c58509f9a1..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ /dev/null @@ -1,3094 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#define NDBCNTR_C -#include "Ndbcntr.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include - -#include -#include -#include - -#include -#include - -// used during shutdown for reporting current startphase -// accessed from Emulator.cpp, NdbShutdown() -Uint32 g_currentStartPhase; - -/** - * ALL_BLOCKS Used during start phases and while changing node state - * - * NDBFS_REF Has to be before NDBCNTR_REF (due to "ndb -i" stuff) - */ -struct BlockInfo { - BlockReference Ref; // BlockReference - Uint32 NextSP; // Next start phase - Uint32 ErrorInsertStart; - Uint32 ErrorInsertStop; -}; - -static BlockInfo ALL_BLOCKS[] = { - { NDBFS_REF, 0 , 2000, 2999 }, - { DBTC_REF, 1 , 8000, 8035 }, - { DBDIH_REF, 1 , 7000, 7173 }, - { DBLQH_REF, 1 , 5000, 5030 }, - { DBACC_REF, 1 , 3000, 3999 }, - { DBTUP_REF, 1 , 4000, 4007 }, - { DBDICT_REF, 1 , 6000, 6003 }, - { NDBCNTR_REF, 0 , 1000, 1999 }, - { CMVMI_REF, 1 , 9000, 9999 }, // before QMGR - { QMGR_REF, 1 , 1, 999 }, - { TRIX_REF, 1 , 0, 0 }, - { BACKUP_REF, 1 , 10000, 10999 }, - { DBUTIL_REF, 1 , 11000, 11999 }, - { SUMA_REF, 1 , 13000, 13999 }, - { DBTUX_REF, 1 , 12000, 12999 } - ,{ TSMAN_REF, 1 , 0, 0 } - ,{ LGMAN_REF, 1 , 0, 0 } - ,{ PGMAN_REF, 1 , 0, 0 } - ,{ RESTORE_REF,1 , 0, 0 } -}; - -static const Uint32 ALL_BLOCKS_SZ = sizeof(ALL_BLOCKS)/sizeof(BlockInfo); - -static BlockReference readConfigOrder[ALL_BLOCKS_SZ] = { - CMVMI_REF, - DBTUP_REF, - DBACC_REF, - DBTC_REF, - DBLQH_REF, - DBTUX_REF, - DBDICT_REF, - DBDIH_REF, - NDBFS_REF, - NDBCNTR_REF, - QMGR_REF, - TRIX_REF, - BACKUP_REF, - DBUTIL_REF, - SUMA_REF, - TSMAN_REF, - LGMAN_REF, - PGMAN_REF, - RESTORE_REF -}; - -/*******************************/ -/* CONTINUEB */ -/*******************************/ -void Ndbcntr::execCONTINUEB(Signal* signal) -{ - jamEntry(); - UintR Ttemp1 = signal->theData[0]; - switch (Ttemp1) { - case ZSTARTUP:{ - if(getNodeState().startLevel == NodeState::SL_STARTED){ - jam(); - return; - } - - if(cmasterNodeId == getOwnNodeId() && c_start.m_starting.isclear()){ - jam(); - trySystemRestart(signal); - // Fall-through - } - - Uint64 now = NdbTick_CurrentMillisecond(); - if(now > c_start.m_startFailureTimeout) - { - jam(); - Uint32 to_3= 0; - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3); - BaseString tmp; - tmp.append("Shutting down node as total restart time exceeds " - " StartFailureTimeout as set in config file "); - if(to_3 == 0) - tmp.append(" 0 (inifinite)"); - else - tmp.appfmt(" %d", to_3); - - progError(__LINE__, NDBD_EXIT_RESTART_TIMEOUT, tmp.c_str()); - } - - signal->theData[0] = ZSTARTUP; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1); - break; - } - case ZSHUTDOWN: - jam(); - c_stopRec.checkTimeout(signal); - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch -}//Ndbcntr::execCONTINUEB() - -void -Ndbcntr::execAPI_START_REP(Signal* signal) -{ - if(refToBlock(signal->getSendersBlockRef()) == QMGR) - { - for(Uint32 i = 0; igetDataPtr(); - char buf[100]; - int killingNode = refToNode(sysErr->errorRef); - Uint32 data1 = sysErr->data1; - - jamEntry(); - switch (sysErr->errorCode){ - case SystemError::GCPStopDetected: - BaseString::snprintf(buf, sizeof(buf), - "Node %d killed this node because " - "GCP stop was detected", - killingNode); - break; - - case SystemError::CopyFragRefError: - CRASH_INSERTION(1000); - BaseString::snprintf(buf, sizeof(buf), - "Killed by node %d as " - "copyfrag failed, error: %u", - killingNode, data1); - break; - - case SystemError::StartFragRefError: - BaseString::snprintf(buf, sizeof(buf), - "Node %d killed this node because " - "it replied StartFragRef error code: %u.", - killingNode, data1); - break; - - case SystemError::CopySubscriptionRef: - BaseString::snprintf(buf, sizeof(buf), - "Node %d killed this node because " - "it could not copy a subscription during node restart. " - "Copy subscription error code: %u.", - killingNode, data1); - break; - case SystemError::CopySubscriberRef: - BaseString::snprintf(buf, sizeof(buf), - "Node %d killed this node because " - "it could not start a subscriber during node restart. " - "Copy subscription error code: %u.", - killingNode, data1); - break; - default: - BaseString::snprintf(buf, sizeof(buf), "System error %d, " - " this node was killed by node %d", - sysErr->errorCode, killingNode); - break; - } - - progError(__LINE__, NDBD_EXIT_SYSTEM_ERROR, buf); - return; -}//Ndbcntr::execSYSTEM_ERROR() - -void -Ndbcntr::execREAD_CONFIG_REQ(Signal* signal) -{ - jamEntry(); - - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - -void Ndbcntr::execSTTOR(Signal* signal) -{ - jamEntry(); - cstartPhase = signal->theData[1]; - - cndbBlocksCount = 0; - cinternalStartphase = cstartPhase - 1; - - switch (cstartPhase) { - case 0: - if(m_ctx.m_config.getInitialStart()){ - jam(); - c_fsRemoveCount = 0; - clearFilesystem(signal); - return; - } - sendSttorry(signal); - break; - case ZSTART_PHASE_1: - jam(); - { - Uint32 db_watchdog_interval = 0; - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndb_mgm_get_int_parameter(p, CFG_DB_WATCHDOG_INTERVAL, &db_watchdog_interval); - ndbrequire(db_watchdog_interval); - update_watch_dog_timer(db_watchdog_interval); - } - startPhase1Lab(signal); - break; - case ZSTART_PHASE_2: - jam(); - startPhase2Lab(signal); - break; - case ZSTART_PHASE_3: - jam(); - startPhase3Lab(signal); - break; - case ZSTART_PHASE_4: - jam(); - startPhase4Lab(signal); - break; - case ZSTART_PHASE_5: - jam(); - startPhase5Lab(signal); - break; - case 6: - jam(); - getNodeGroup(signal); - // Fall through - break; - case ZSTART_PHASE_8: - jam(); - startPhase8Lab(signal); - break; - case ZSTART_PHASE_9: - jam(); - startPhase9Lab(signal); - break; - default: - jam(); - sendSttorry(signal); - break; - }//switch -}//Ndbcntr::execSTTOR() - -void -Ndbcntr::getNodeGroup(Signal* signal){ - jam(); - CheckNodeGroups * sd = (CheckNodeGroups*)signal->getDataPtrSend(); - sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::GetNodeGroup; - EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal, - CheckNodeGroups::SignalLength); - jamEntry(); - c_nodeGroup = sd->output; - sendSttorry(signal); -} - -/*******************************/ -/* NDB_STTORRY */ -/*******************************/ -void Ndbcntr::execNDB_STTORRY(Signal* signal) -{ - jamEntry(); - switch (cstartPhase) { - case ZSTART_PHASE_2: - jam(); - ph2GLab(signal); - return; - break; - case ZSTART_PHASE_3: - jam(); - ph3ALab(signal); - return; - break; - case ZSTART_PHASE_4: - jam(); - ph4BLab(signal); - return; - break; - case ZSTART_PHASE_5: - jam(); - ph5ALab(signal); - return; - break; - case ZSTART_PHASE_6: - jam(); - ph6ALab(signal); - return; - break; - case ZSTART_PHASE_7: - jam(); - ph6BLab(signal); - return; - break; - case ZSTART_PHASE_8: - jam(); - ph7ALab(signal); - return; - break; - case ZSTART_PHASE_9: - jam(); - ph8ALab(signal); - return; - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - return; - break; - }//switch -}//Ndbcntr::execNDB_STTORRY() - -void Ndbcntr::startPhase1Lab(Signal* signal) -{ - jamEntry(); - - initData(signal); - - cdynamicNodeId = 0; - - NdbBlocksRecPtr ndbBlocksPtr; - ndbBlocksPtr.i = 0; - ptrAss(ndbBlocksPtr, ndbBlocksRec); - ndbBlocksPtr.p->blockref = DBLQH_REF; - ndbBlocksPtr.i = 1; - ptrAss(ndbBlocksPtr, ndbBlocksRec); - ndbBlocksPtr.p->blockref = DBDICT_REF; - ndbBlocksPtr.i = 2; - ptrAss(ndbBlocksPtr, ndbBlocksRec); - ndbBlocksPtr.p->blockref = DBTUP_REF; - ndbBlocksPtr.i = 3; - ptrAss(ndbBlocksPtr, ndbBlocksRec); - ndbBlocksPtr.p->blockref = DBACC_REF; - ndbBlocksPtr.i = 4; - ptrAss(ndbBlocksPtr, ndbBlocksRec); - ndbBlocksPtr.p->blockref = DBTC_REF; - ndbBlocksPtr.i = 5; - ptrAss(ndbBlocksPtr, ndbBlocksRec); - ndbBlocksPtr.p->blockref = DBDIH_REF; - sendSttorry(signal); - return; -} - -void Ndbcntr::execREAD_NODESREF(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Ndbcntr::execREAD_NODESREF() - - -/*******************************/ -/* NDB_STARTREF */ -/*******************************/ -void Ndbcntr::execNDB_STARTREF(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Ndbcntr::execNDB_STARTREF() - -/*******************************/ -/* STTOR */ -/*******************************/ -void Ndbcntr::startPhase2Lab(Signal* signal) -{ - c_start.m_lastGci = 0; - c_start.m_lastGciNodeId = getOwnNodeId(); - - signal->theData[0] = reference(); - sendSignal(DBDIH_REF, GSN_DIH_RESTARTREQ, signal, 1, JBB); - return; -}//Ndbcntr::startPhase2Lab() - -/*******************************/ -/* DIH_RESTARTCONF */ -/*******************************/ -void Ndbcntr::execDIH_RESTARTCONF(Signal* signal) -{ - jamEntry(); - //cmasterDihId = signal->theData[0]; - c_start.m_lastGci = signal->theData[1]; - ctypeOfStart = NodeState::ST_SYSTEM_RESTART; - ph2ALab(signal); - return; -}//Ndbcntr::execDIH_RESTARTCONF() - -/*******************************/ -/* DIH_RESTARTREF */ -/*******************************/ -void Ndbcntr::execDIH_RESTARTREF(Signal* signal) -{ - jamEntry(); - ctypeOfStart = NodeState::ST_INITIAL_START; - ph2ALab(signal); - return; -}//Ndbcntr::execDIH_RESTARTREF() - -void Ndbcntr::ph2ALab(Signal* signal) -{ - /******************************/ - /* request configured nodes */ - /* from QMGR */ - /* READ_NODESREQ */ - /******************************/ - signal->theData[0] = reference(); - sendSignal(QMGR_REF, GSN_READ_NODESREQ, signal, 1, JBB); - return; -}//Ndbcntr::ph2ALab() - -inline -Uint64 -setTimeout(Uint64 time, Uint32 timeoutValue){ - if(timeoutValue == 0) - return ~(Uint64)0; - return time + timeoutValue; -} - -/*******************************/ -/* READ_NODESCONF */ -/*******************************/ -void Ndbcntr::execREAD_NODESCONF(Signal* signal) -{ - jamEntry(); - const ReadNodesConf * readNodes = (ReadNodesConf *)&signal->theData[0]; - - cmasterNodeId = readNodes->masterNodeId; - cdynamicNodeId = readNodes->ndynamicId; - - /** - * All defined nodes... - */ - c_allDefinedNodes.assign(NdbNodeBitmask::Size, readNodes->allNodes); - c_clusterNodes.assign(NdbNodeBitmask::Size, readNodes->clusterNodes); - - Uint32 to_1 = 30000; - Uint32 to_2 = 0; - Uint32 to_3 = 0; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - - ndbrequire(p != 0); - ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT, &to_1); - ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTITION_TIMEOUT, &to_2); - ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3); - - c_start.m_startTime = NdbTick_CurrentMillisecond(); - c_start.m_startPartialTimeout = setTimeout(c_start.m_startTime, to_1); - c_start.m_startPartitionedTimeout = setTimeout(c_start.m_startTime, to_2); - c_start.m_startFailureTimeout = setTimeout(c_start.m_startTime, to_3); - - UpgradeStartup::sendCmAppChg(* this, signal, 0); // ADD - - sendCntrStartReq(signal); - - signal->theData[0] = ZSTARTUP; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1); - - return; -} - -void -Ndbcntr::execCM_ADD_REP(Signal* signal){ - jamEntry(); - c_clusterNodes.set(signal->theData[0]); -} - -void -Ndbcntr::sendCntrStartReq(Signal * signal){ - jamEntry(); - - CntrStartReq * req = (CntrStartReq*)signal->getDataPtrSend(); - req->startType = ctypeOfStart; - req->lastGci = c_start.m_lastGci; - req->nodeId = getOwnNodeId(); - sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_START_REQ, - signal, CntrStartReq::SignalLength, JBB); -} - -void -Ndbcntr::execCNTR_START_REF(Signal * signal){ - jamEntry(); - const CntrStartRef * ref = (CntrStartRef*)signal->getDataPtr(); - - switch(ref->errorCode){ - case CntrStartRef::NotMaster: - jam(); - cmasterNodeId = ref->masterNodeId; - sendCntrStartReq(signal); - return; - case CntrStartRef::StopInProgress: - jam(); - progError(__LINE__, NDBD_EXIT_RESTART_DURING_SHUTDOWN); - } - ndbrequire(false); -} - -void -Ndbcntr::StartRecord::reset(){ - m_starting.clear(); - m_waiting.clear(); - m_withLog.clear(); - m_withoutLog.clear(); - m_lastGci = m_lastGciNodeId = 0; - m_startPartialTimeout = ~0; - m_startPartitionedTimeout = ~0; - m_startFailureTimeout = ~0; - - m_logNodesCount = 0; -} - -void -Ndbcntr::execCNTR_START_CONF(Signal * signal){ - jamEntry(); - const CntrStartConf * conf = (CntrStartConf*)signal->getDataPtr(); - - cnoStartNodes = conf->noStartNodes; - ctypeOfStart = (NodeState::StartType)conf->startType; - c_start.m_lastGci = conf->startGci; - cmasterNodeId = conf->masterNodeId; - NdbNodeBitmask tmp; - tmp.assign(NdbNodeBitmask::Size, conf->startedNodes); - c_startedNodes.bitOR(tmp); - c_start.m_starting.assign(NdbNodeBitmask::Size, conf->startingNodes); - ph2GLab(signal); - - UpgradeStartup::sendCmAppChg(* this, signal, 2); //START -} - -/** - * Tried with parallell nr, but it crashed in DIH - * so I turned it off, as I don't want to debug DIH now... - * Jonas 19/11-03 - * - * After trying for 2 hours, I gave up. - * DIH is not designed to support it, and - * it requires quite of lot of changes to - * make it work - * Jonas 5/12-03 - */ -#define PARALLELL_NR 0 - -#if PARALLELL_NR -const bool parallellNR = true; -#else -const bool parallellNR = false; -#endif - -void -Ndbcntr::execCNTR_START_REP(Signal* signal){ - jamEntry(); - Uint32 nodeId = signal->theData[0]; - c_startedNodes.set(nodeId); - c_start.m_starting.clear(nodeId); - - /** - * Inform all interested blocks that node has started - */ - for(Uint32 i = 0; igetDataPtr(); - - const Uint32 nodeId = req->nodeId; - const Uint32 lastGci = req->lastGci; - const NodeState::StartType st = (NodeState::StartType)req->startType; - - if(cmasterNodeId == 0){ - jam(); - // Has not completed READNODES yet - sendSignalWithDelay(reference(), GSN_CNTR_START_REQ, signal, 100, - signal->getLength()); - return; - } - - if(cmasterNodeId != getOwnNodeId()){ - jam(); - sendCntrStartRef(signal, nodeId, CntrStartRef::NotMaster); - return; - } - - const NodeState & nodeState = getNodeState(); - switch(nodeState.startLevel){ - case NodeState::SL_NOTHING: - case NodeState::SL_CMVMI: - jam(); - ndbrequire(false); - case NodeState::SL_STARTING: - case NodeState::SL_STARTED: - jam(); - break; - - case NodeState::SL_STOPPING_1: - case NodeState::SL_STOPPING_2: - case NodeState::SL_STOPPING_3: - case NodeState::SL_STOPPING_4: - jam(); - sendCntrStartRef(signal, nodeId, CntrStartRef::StopInProgress); - return; - } - - /** - * Am I starting (or started) - */ - const bool starting = (nodeState.startLevel != NodeState::SL_STARTED); - - c_start.m_waiting.set(nodeId); - switch(st){ - case NodeState::ST_INITIAL_START: - jam(); - c_start.m_withoutLog.set(nodeId); - break; - case NodeState::ST_SYSTEM_RESTART: - jam(); - c_start.m_withLog.set(nodeId); - if(starting && lastGci > c_start.m_lastGci){ - jam(); - CntrStartRef * ref = (CntrStartRef*)signal->getDataPtrSend(); - ref->errorCode = CntrStartRef::NotMaster; - ref->masterNodeId = nodeId; - NodeReceiverGroup rg (NDBCNTR, c_start.m_waiting); - sendSignal(rg, GSN_CNTR_START_REF, signal, - CntrStartRef::SignalLength, JBB); - return; - } - if(starting){ - jam(); - Uint32 i = c_start.m_logNodesCount++; - c_start.m_logNodes[i].m_nodeId = nodeId; - c_start.m_logNodes[i].m_lastGci = req->lastGci; - } - break; - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - case NodeState::ST_ILLEGAL_TYPE: - ndbrequire(false); - } - - const bool startInProgress = !c_start.m_starting.isclear(); - - if((starting && startInProgress) || (startInProgress && !parallellNR)){ - jam(); - // We're already starting together with a bunch of nodes - // Let this node wait... - return; - } - - if(starting){ - jam(); - trySystemRestart(signal); - } else { - jam(); - startWaitingNodes(signal); - } - return; -} - -void -Ndbcntr::startWaitingNodes(Signal * signal){ - -#if ! PARALLELL_NR - const Uint32 nodeId = c_start.m_waiting.find(0); - const Uint32 Tref = calcNdbCntrBlockRef(nodeId); - ndbrequire(nodeId != c_start.m_waiting.NotFound); - - NodeState::StartType nrType = NodeState::ST_NODE_RESTART; - if(c_start.m_withoutLog.get(nodeId)){ - jam(); - nrType = NodeState::ST_INITIAL_NODE_RESTART; - } - - /** - * Let node perform restart - */ - CntrStartConf * conf = (CntrStartConf*)signal->getDataPtrSend(); - conf->noStartNodes = 1; - conf->startType = nrType; - conf->startGci = ~0; // Not used - conf->masterNodeId = getOwnNodeId(); - BitmaskImpl::clear(NdbNodeBitmask::Size, conf->startingNodes); - BitmaskImpl::set(NdbNodeBitmask::Size, conf->startingNodes, nodeId); - c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes); - sendSignal(Tref, GSN_CNTR_START_CONF, signal, - CntrStartConf::SignalLength, JBB); - - c_start.m_waiting.clear(nodeId); - c_start.m_withLog.clear(nodeId); - c_start.m_withoutLog.clear(nodeId); - c_start.m_starting.set(nodeId); -#else - // Parallell nr - - c_start.m_starting = c_start.m_waiting; - c_start.m_waiting.clear(); - - CntrStartConf * conf = (CntrStartConf*)signal->getDataPtrSend(); - conf->noStartNodes = 1; - conf->startGci = ~0; // Not used - conf->masterNodeId = getOwnNodeId(); - c_start.m_starting.copyto(NdbNodeBitmask::Size, conf->startingNodes); - c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes); - - char buf[100]; - if(!c_start.m_withLog.isclear()){ - jam(); - ndbout_c("Starting nodes w/ log: %s", c_start.m_withLog.getText(buf)); - - NodeReceiverGroup rg(NDBCNTR, c_start.m_withLog); - conf->startType = NodeState::ST_NODE_RESTART; - - sendSignal(rg, GSN_CNTR_START_CONF, signal, - CntrStartConf::SignalLength, JBB); - } - - if(!c_start.m_withoutLog.isclear()){ - jam(); - ndbout_c("Starting nodes wo/ log: %s", c_start.m_withoutLog.getText(buf)); - NodeReceiverGroup rg(NDBCNTR, c_start.m_withoutLog); - conf->startType = NodeState::ST_INITIAL_NODE_RESTART; - - sendSignal(rg, GSN_CNTR_START_CONF, signal, - CntrStartConf::SignalLength, JBB); - } - - c_start.m_waiting.clear(); - c_start.m_withLog.clear(); - c_start.m_withoutLog.clear(); -#endif -} - -void -Ndbcntr::sendCntrStartRef(Signal * signal, - Uint32 nodeId, CntrStartRef::ErrorCode code){ - CntrStartRef * ref = (CntrStartRef*)signal->getDataPtrSend(); - ref->errorCode = code; - ref->masterNodeId = cmasterNodeId; - sendSignal(calcNdbCntrBlockRef(nodeId), GSN_CNTR_START_REF, signal, - CntrStartRef::SignalLength, JBB); -} - -CheckNodeGroups::Output -Ndbcntr::checkNodeGroups(Signal* signal, const NdbNodeBitmask & mask){ - CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0]; - sd->blockRef = reference(); - sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::ArbitCheck; - sd->mask = mask; - EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal, - CheckNodeGroups::SignalLength); - jamEntry(); - return (CheckNodeGroups::Output)sd->output; -} - -bool -Ndbcntr::trySystemRestart(Signal* signal){ - /** - * System restart something - */ - const bool allNodes = c_start.m_waiting.equal(c_allDefinedNodes); - const bool allClusterNodes = c_start.m_waiting.equal(c_clusterNodes); - - if(!allClusterNodes){ - jam(); - return false; - } - - NodeState::StartType srType = NodeState::ST_SYSTEM_RESTART; - if(c_start.m_waiting.equal(c_start.m_withoutLog)) - { - jam(); - srType = NodeState::ST_INITIAL_START; - c_start.m_starting = c_start.m_withoutLog; // Used for starting... - c_start.m_withoutLog.clear(); - } else { - - CheckNodeGroups::Output wLog = checkNodeGroups(signal, c_start.m_withLog); - - switch (wLog) { - case CheckNodeGroups::Win: - jam(); - break; - case CheckNodeGroups::Lose: - jam(); - // If we lose with all nodes, then we're in trouble - ndbrequire(!allNodes); - return false; - case CheckNodeGroups::Partitioning: - jam(); - bool allowPartition = (c_start.m_startPartitionedTimeout != (Uint64)~0); - - if(allNodes){ - if(allowPartition){ - jam(); - break; - } - ndbrequire(false); // All nodes -> partitioning, which is not allowed - } - - break; - } - - // For now only with the "logged"-ones. - // Let the others do node restart afterwards... - c_start.m_starting = c_start.m_withLog; - c_start.m_withLog.clear(); - } - - /** - * Okidoki, we try to start - */ - CntrStartConf * conf = (CntrStartConf*)signal->getDataPtr(); - conf->noStartNodes = c_start.m_starting.count(); - conf->startType = srType; - conf->startGci = c_start.m_lastGci; - conf->masterNodeId = c_start.m_lastGciNodeId; - c_start.m_starting.copyto(NdbNodeBitmask::Size, conf->startingNodes); - c_startedNodes.copyto(NdbNodeBitmask::Size, conf->startedNodes); - - ndbrequire(c_start.m_lastGciNodeId == getOwnNodeId()); - - NodeReceiverGroup rg(NDBCNTR, c_start.m_starting); - sendSignal(rg, GSN_CNTR_START_CONF, signal, CntrStartConf::SignalLength,JBB); - - c_start.m_waiting.bitANDC(c_start.m_starting); - - return true; -} - -void Ndbcntr::ph2GLab(Signal* signal) -{ - if (cndbBlocksCount < ZNO_NDB_BLOCKS) { - jam(); - sendNdbSttor(signal); - return; - }//if - sendSttorry(signal); - return; -}//Ndbcntr::ph2GLab() - -/* -4.4 START PHASE 3 */ -/*###########################################################################*/ -// SEND SIGNAL NDBSTTOR TO ALL BLOCKS, ACC, DICT, DIH, LQH, TC AND TUP -// WHEN ALL BLOCKS HAVE RETURNED THEIR NDB_STTORRY ALL BLOCK HAVE FINISHED -// THEIR LOCAL CONNECTIONs SUCESSFULLY -// AND THEN WE CAN SEND APPL_STARTREG TO INFORM QMGR THAT WE ARE READY TO -// SET UP DISTRIBUTED CONNECTIONS. -/*--------------------------------------------------------------*/ -// THIS IS NDB START PHASE 3. -/*--------------------------------------------------------------*/ -/*******************************/ -/* STTOR */ -/*******************************/ -void Ndbcntr::startPhase3Lab(Signal* signal) -{ - ph3ALab(signal); - return; -}//Ndbcntr::startPhase3Lab() - -/*******************************/ -/* NDB_STTORRY */ -/*******************************/ -void Ndbcntr::ph3ALab(Signal* signal) -{ - if (cndbBlocksCount < ZNO_NDB_BLOCKS) { - jam(); - sendNdbSttor(signal); - return; - }//if - - sendSttorry(signal); - return; -}//Ndbcntr::ph3ALab() - -/* -4.5 START PHASE 4 */ -/*###########################################################################*/ -// WAIT FOR ALL NODES IN CLUSTER TO CHANGE STATE INTO ZSTART , -// APPL_CHANGEREP IS ALWAYS SENT WHEN SOMEONE HAVE -// CHANGED THEIR STATE. APPL_STARTCONF INDICATES THAT ALL NODES ARE IN START -// STATE SEND NDB_STARTREQ TO DIH AND THEN WAIT FOR NDB_STARTCONF -/*---------------------------------------------------------------------------*/ -/*******************************/ -/* STTOR */ -/*******************************/ -void Ndbcntr::startPhase4Lab(Signal* signal) -{ - ph4ALab(signal); -}//Ndbcntr::startPhase4Lab() - - -void Ndbcntr::ph4ALab(Signal* signal) -{ - ph4BLab(signal); - return; -}//Ndbcntr::ph4ALab() - -/*******************************/ -/* NDB_STTORRY */ -/*******************************/ -void Ndbcntr::ph4BLab(Signal* signal) -{ -/*--------------------------------------*/ -/* CASE: CSTART_PHASE = ZSTART_PHASE_4 */ -/*--------------------------------------*/ - if (cndbBlocksCount < ZNO_NDB_BLOCKS) { - jam(); - sendNdbSttor(signal); - return; - }//if - if ((ctypeOfStart == NodeState::ST_NODE_RESTART) || - (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - sendSttorry(signal); - return; - }//if - waitpoint41Lab(signal); - return; -}//Ndbcntr::ph4BLab() - -void Ndbcntr::waitpoint41Lab(Signal* signal) -{ - if (getOwnNodeId() == cmasterNodeId) { - jam(); -/*--------------------------------------*/ -/* MASTER WAITS UNTIL ALL SLAVES HAS */ -/* SENT THE REPORTS */ -/*--------------------------------------*/ - cnoWaitrep++; - if (cnoWaitrep == cnoStartNodes) { - jam(); - cnoWaitrep = 0; -/*---------------------------------------------------------------------------*/ -// NDB_STARTREQ STARTS UP ALL SET UP OF DISTRIBUTION INFORMATION IN DIH AND -// DICT. AFTER SETTING UP THIS -// DATA IT USES THAT DATA TO SET UP WHICH FRAGMENTS THAT ARE TO START AND -// WHERE THEY ARE TO START. THEN -// IT SETS UP THE FRAGMENTS AND RECOVERS THEM BY: -// 1) READING A LOCAL CHECKPOINT FROM DISK. -// 2) EXECUTING THE UNDO LOG ON INDEX AND DATA. -// 3) EXECUTING THE FRAGMENT REDO LOG FROM ONE OR SEVERAL NODES TO -// RESTORE THE RESTART CONFIGURATION OF DATA IN NDB CLUSTER. -/*---------------------------------------------------------------------------*/ - signal->theData[0] = reference(); - signal->theData[1] = ctypeOfStart; - sendSignal(DBDIH_REF, GSN_NDB_STARTREQ, signal, 2, JBB); - }//if - } else { - jam(); -/*--------------------------------------*/ -/* SLAVE NODES WILL PASS HERE ONCE AND */ -/* SEND A WAITPOINT REPORT TO MASTER. */ -/* SLAVES WONT DO ANYTHING UNTIL THEY */ -/* RECEIVE A WAIT REPORT FROM THE MASTER*/ -/*--------------------------------------*/ - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_4_1; - sendSignal(calcNdbCntrBlockRef(cmasterNodeId), - GSN_CNTR_WAITREP, signal, 2, JBB); - }//if - return; -}//Ndbcntr::waitpoint41Lab() - -/*******************************/ -/* NDB_STARTCONF */ -/*******************************/ -void Ndbcntr::execNDB_STARTCONF(Signal* signal) -{ - jamEntry(); - - NodeReceiverGroup rg(NDBCNTR, c_start.m_starting); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_4_2; - sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB); - return; -}//Ndbcntr::execNDB_STARTCONF() - -/* -4.6 START PHASE 5 */ -/*###########################################################################*/ -// SEND APPL_RUN TO THE QMGR IN THIS BLOCK -// SEND NDB_STTOR ALL BLOCKS ACC, DICT, DIH, LQH, TC AND TUP THEN WAIT FOR -// THEIR NDB_STTORRY -/*---------------------------------------------------------------------------*/ -/*******************************/ -/* STTOR */ -/*******************************/ -void Ndbcntr::startPhase5Lab(Signal* signal) -{ - ph5ALab(signal); - return; -}//Ndbcntr::startPhase5Lab() - -/*******************************/ -/* NDB_STTORRY */ -/*******************************/ -/*---------------------------------------------------------------------------*/ -// THIS IS NDB START PHASE 5. -/*---------------------------------------------------------------------------*/ -// IN THIS START PHASE TUP INITIALISES DISK FILES FOR DISK STORAGE IF INITIAL -// START. DIH WILL START UP -// THE GLOBAL CHECKPOINT PROTOCOL AND WILL CONCLUDE ANY UNFINISHED TAKE OVERS -// THAT STARTED BEFORE THE SYSTEM CRASH. -/*---------------------------------------------------------------------------*/ -void Ndbcntr::ph5ALab(Signal* signal) -{ - if (cndbBlocksCount < ZNO_NDB_BLOCKS) { - jam(); - sendNdbSttor(signal); - return; - }//if - - cstartPhase = cstartPhase + 1; - cinternalStartphase = cstartPhase - 1; - if (getOwnNodeId() == cmasterNodeId) { - switch(ctypeOfStart){ - case NodeState::ST_INITIAL_START: - jam(); - /*--------------------------------------*/ - /* MASTER CNTR IS RESPONSIBLE FOR */ - /* CREATING SYSTEM TABLES */ - /*--------------------------------------*/ - createSystableLab(signal, 0); - return; - case NodeState::ST_SYSTEM_RESTART: - jam(); - waitpoint52Lab(signal); - return; - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - jam(); - break; - case NodeState::ST_ILLEGAL_TYPE: - jam(); - break; - } - ndbrequire(false); - } - - /** - * Not master - */ - NdbSttor * const req = (NdbSttor*)signal->getDataPtrSend(); - switch(ctypeOfStart){ - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - jam(); - /*----------------------------------------------------------------------*/ - // SEND NDB START PHASE 5 IN NODE RESTARTS TO COPY DATA TO THE NEWLY - // STARTED NODE. - /*----------------------------------------------------------------------*/ - req->senderRef = reference(); - req->nodeId = getOwnNodeId(); - req->internalStartPhase = cinternalStartphase; - req->typeOfStart = ctypeOfStart; - req->masterNodeId = cmasterNodeId; - - //#define TRACE_STTOR -#ifdef TRACE_STTOR - ndbout_c("sending NDB_STTOR(%d) to DIH", cinternalStartphase); -#endif - sendSignal(DBDIH_REF, GSN_NDB_STTOR, signal, - NdbSttor::SignalLength, JBB); - return; - case NodeState::ST_INITIAL_START: - case NodeState::ST_SYSTEM_RESTART: - jam(); - /*--------------------------------------*/ - /* DURING SYSTEMRESTART AND INITALSTART:*/ - /* SLAVE NODES WILL PASS HERE ONCE AND */ - /* SEND A WAITPOINT REPORT TO MASTER. */ - /* SLAVES WONT DO ANYTHING UNTIL THEY */ - /* RECEIVE A WAIT REPORT FROM THE MASTER*/ - /* WHEN THE MASTER HAS FINISHED HIS WORK*/ - /*--------------------------------------*/ - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_5_2; - sendSignal(calcNdbCntrBlockRef(cmasterNodeId), - GSN_CNTR_WAITREP, signal, 2, JBB); - return; - default: - ndbrequire(false); - } -}//Ndbcntr::ph5ALab() - -void Ndbcntr::waitpoint52Lab(Signal* signal) -{ - cnoWaitrep = cnoWaitrep + 1; -/*---------------------------------------------------------------------------*/ -// THIS WAITING POINT IS ONLY USED BY A MASTER NODE. WE WILL EXECUTE NDB START -// PHASE 5 FOR DIH IN THE -// MASTER. THIS WILL START UP LOCAL CHECKPOINTS AND WILL ALSO CONCLUDE ANY -// UNFINISHED LOCAL CHECKPOINTS -// BEFORE THE SYSTEM CRASH. THIS WILL ENSURE THAT WE ALWAYS RESTART FROM A -// WELL KNOWN STATE. -/*---------------------------------------------------------------------------*/ -/*--------------------------------------*/ -/* MASTER WAITS UNTIL HE RECEIVED WAIT */ -/* REPORTS FROM ALL SLAVE CNTR */ -/*--------------------------------------*/ - if (cnoWaitrep == cnoStartNodes) { - jam(); - cnoWaitrep = 0; - - NdbSttor * const req = (NdbSttor*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->nodeId = getOwnNodeId(); - req->internalStartPhase = cinternalStartphase; - req->typeOfStart = ctypeOfStart; - req->masterNodeId = cmasterNodeId; -#ifdef TRACE_STTOR - ndbout_c("sending NDB_STTOR(%d) to DIH", cinternalStartphase); -#endif - sendSignal(DBDIH_REF, GSN_NDB_STTOR, signal, - NdbSttor::SignalLength, JBB); - }//if - return; -}//Ndbcntr::waitpoint52Lab() - -/*******************************/ -/* NDB_STTORRY */ -/*******************************/ -void Ndbcntr::ph6ALab(Signal* signal) -{ - if ((ctypeOfStart == NodeState::ST_NODE_RESTART) || - (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - waitpoint51Lab(signal); - return; - }//if - - NodeReceiverGroup rg(NDBCNTR, c_start.m_starting); - rg.m_nodes.clear(getOwnNodeId()); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_5_1; - sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB); - - waitpoint51Lab(signal); - return; -}//Ndbcntr::ph6ALab() - -void Ndbcntr::waitpoint51Lab(Signal* signal) -{ - cstartPhase = cstartPhase + 1; -/*---------------------------------------------------------------------------*/ -// A FINAL STEP IS NOW TO SEND NDB_STTOR TO TC. THIS MAKES IT POSSIBLE TO -// CONNECT TO TC FOR APPLICATIONS. -// THIS IS NDB START PHASE 6 WHICH IS FOR ALL BLOCKS IN ALL NODES. -/*---------------------------------------------------------------------------*/ - cinternalStartphase = cstartPhase - 1; - cndbBlocksCount = 0; - ph6BLab(signal); - return; -}//Ndbcntr::waitpoint51Lab() - -void Ndbcntr::ph6BLab(Signal* signal) -{ - // c_missra.currentStartPhase - cstartPhase - cinternalStartphase = - // 5 - 7 - 6 - if (cndbBlocksCount < ZNO_NDB_BLOCKS) { - jam(); - sendNdbSttor(signal); - return; - }//if - if ((ctypeOfStart == NodeState::ST_NODE_RESTART) || - (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - sendSttorry(signal); - return; - } - waitpoint61Lab(signal); -} - -void Ndbcntr::waitpoint61Lab(Signal* signal) -{ - if (getOwnNodeId() == cmasterNodeId) { - jam(); - cnoWaitrep6++; - if (cnoWaitrep6 == cnoStartNodes) { - jam(); - NodeReceiverGroup rg(NDBCNTR, c_start.m_starting); - rg.m_nodes.clear(getOwnNodeId()); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_6_2; - sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB); - sendSttorry(signal); - } - } else { - jam(); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_6_1; - sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_WAITREP, signal, 2, JBB); - } -} - -// Start phase 8 (internal 7) -void Ndbcntr::startPhase8Lab(Signal* signal) -{ - cinternalStartphase = cstartPhase - 1; - cndbBlocksCount = 0; - ph7ALab(signal); -} - -void Ndbcntr::ph7ALab(Signal* signal) -{ - while (cndbBlocksCount < ZNO_NDB_BLOCKS) { - jam(); - sendNdbSttor(signal); - return; - } - if ((ctypeOfStart == NodeState::ST_NODE_RESTART) || - (ctypeOfStart == NodeState::ST_INITIAL_NODE_RESTART)) { - jam(); - sendSttorry(signal); - return; - } - waitpoint71Lab(signal); -} - -void Ndbcntr::waitpoint71Lab(Signal* signal) -{ - if (getOwnNodeId() == cmasterNodeId) { - jam(); - cnoWaitrep7++; - if (cnoWaitrep7 == cnoStartNodes) { - jam(); - NodeReceiverGroup rg(NDBCNTR, c_start.m_starting); - rg.m_nodes.clear(getOwnNodeId()); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_7_2; - sendSignal(rg, GSN_CNTR_WAITREP, signal, 2, JBB); - sendSttorry(signal); - } - } else { - jam(); - signal->theData[0] = getOwnNodeId(); - signal->theData[1] = ZWAITPOINT_7_1; - sendSignal(calcNdbCntrBlockRef(cmasterNodeId), GSN_CNTR_WAITREP, signal, 2, JBB); - } -} - -// Start phase 9 (internal 8) -void Ndbcntr::startPhase9Lab(Signal* signal) -{ - cinternalStartphase = cstartPhase - 1; - cndbBlocksCount = 0; - ph8ALab(signal); -} - -void Ndbcntr::ph8ALab(Signal* signal) -{ -/*---------------------------------------------------------------------------*/ -// NODES WHICH PERFORM A NODE RESTART NEEDS TO GET THE DYNAMIC ID'S -// OF THE OTHER NODES HERE. -/*---------------------------------------------------------------------------*/ - sendSttorry(signal); - resetStartVariables(signal); - return; -}//Ndbcntr::ph8BLab() - -/*******************************/ -/* CNTR_WAITREP */ -/*******************************/ -void Ndbcntr::execCNTR_WAITREP(Signal* signal) -{ - Uint16 twaitPoint; - - jamEntry(); - twaitPoint = signal->theData[1]; - switch (twaitPoint) { - case ZWAITPOINT_4_1: - jam(); - waitpoint41Lab(signal); - break; - case ZWAITPOINT_4_2: - jam(); - sendSttorry(signal); - break; - case ZWAITPOINT_5_1: - jam(); - waitpoint51Lab(signal); - break; - case ZWAITPOINT_5_2: - jam(); - waitpoint52Lab(signal); - break; - case ZWAITPOINT_6_1: - jam(); - waitpoint61Lab(signal); - break; - case ZWAITPOINT_6_2: - jam(); - sendSttorry(signal); - break; - case ZWAITPOINT_7_1: - jam(); - waitpoint71Lab(signal); - break; - case ZWAITPOINT_7_2: - jam(); - sendSttorry(signal); - break; - default: - jam(); - systemErrorLab(signal, __LINE__); - break; - }//switch -}//Ndbcntr::execCNTR_WAITREP() - -/*******************************/ -/* NODE_FAILREP */ -/*******************************/ -void Ndbcntr::execNODE_FAILREP(Signal* signal) -{ - jamEntry(); - - if (ERROR_INSERTED(1001)) - { - sendSignalWithDelay(reference(), GSN_NODE_FAILREP, signal, 100, - signal->getLength()); - return; - } - - const NodeFailRep * nodeFail = (NodeFailRep *)&signal->theData[0]; - NdbNodeBitmask allFailed; - allFailed.assign(NdbNodeBitmask::Size, nodeFail->theNodes); - - NdbNodeBitmask failedStarted = c_startedNodes; - NdbNodeBitmask failedStarting = c_start.m_starting; - NdbNodeBitmask failedWaiting = c_start.m_waiting; - - failedStarted.bitAND(allFailed); - failedStarting.bitAND(allFailed); - failedWaiting.bitAND(allFailed); - - const bool tMasterFailed = allFailed.get(cmasterNodeId); - const bool tStarted = !failedStarted.isclear(); - const bool tStarting = !failedStarting.isclear(); - - if(tMasterFailed){ - jam(); - /** - * If master has failed choose qmgr president as master - */ - cmasterNodeId = nodeFail->masterNodeId; - } - - /** - * Clear node bitmasks from failed nodes - */ - c_start.m_starting.bitANDC(allFailed); - c_start.m_waiting.bitANDC(allFailed); - c_start.m_withLog.bitANDC(allFailed); - c_start.m_withoutLog.bitANDC(allFailed); - c_clusterNodes.bitANDC(allFailed); - c_startedNodes.bitANDC(allFailed); - - const NodeState & st = getNodeState(); - if(st.startLevel == st.SL_STARTING){ - jam(); - - const Uint32 phase = st.starting.startPhase; - - const bool tStartConf = (phase > 2) || (phase == 2 && cndbBlocksCount > 0); - - if(tMasterFailed){ - progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, - "Unhandled node failure during restart"); - } - - if(tStartConf && tStarting){ - // One of other starting nodes has crashed... - progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, - "Unhandled node failure of starting node during restart"); - } - - if(tStartConf && tStarted){ - // One of other started nodes has crashed... - progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, - "Unhandled node failure of started node during restart"); - } - - Uint32 nodeId = 0; - while(!allFailed.isclear()){ - nodeId = allFailed.find(nodeId + 1); - allFailed.clear(nodeId); - signal->theData[0] = nodeId; - sendSignal(QMGR_REF, GSN_NDB_FAILCONF, signal, 1, JBB); - }//for - - return; - } - - ndbrequire(!allFailed.get(getOwnNodeId())); - - NodeFailRep * rep = (NodeFailRep *)&signal->theData[0]; - rep->masterNodeId = cmasterNodeId; - - sendSignal(DBTC_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - sendSignal(DBLQH_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - sendSignal(DBDIH_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - sendSignal(DBDICT_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - sendSignal(BACKUP_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - sendSignal(SUMA_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - sendSignal(QMGR_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - - if (c_stopRec.stopReq.senderRef) - { - jam(); - switch(c_stopRec.m_state){ - case StopRecord::SR_WAIT_NODE_FAILURES: - { - jam(); - NdbNodeBitmask tmp; - tmp.assign(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - tmp.bitANDC(allFailed); - tmp.copyto(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - - if (tmp.isclear()) - { - jam(); - if (c_stopRec.stopReq.senderRef != RNIL) - { - jam(); - StopConf * const stopConf = (StopConf *)&signal->theData[0]; - stopConf->senderData = c_stopRec.stopReq.senderData; - stopConf->nodeState = (Uint32) NodeState::SL_SINGLEUSER; - sendSignal(c_stopRec.stopReq.senderRef, GSN_STOP_CONF, signal, - StopConf::SignalLength, JBB); - } - - c_stopRec.stopReq.senderRef = 0; - WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = StopRecord::SR_UNBLOCK_GCP_START_GCP; - req->requestType = WaitGCPReq::UnblockStartGcp; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBA); - } - break; - } - case StopRecord::SR_QMGR_STOP_REQ: - { - NdbNodeBitmask tmp; - tmp.assign(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - tmp.bitANDC(allFailed); - - if (tmp.isclear()) - { - Uint32 nodeId = allFailed.find(0); - tmp.set(nodeId); - - StopConf* conf = (StopConf*)signal->getDataPtrSend(); - conf->senderData = c_stopRec.stopReq.senderData; - conf->nodeId = nodeId; - sendSignal(reference(), - GSN_STOP_CONF, signal, StopConf::SignalLength, JBB); - } - - tmp.copyto(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - - break; - } - case StopRecord::SR_BLOCK_GCP_START_GCP: - case StopRecord::SR_WAIT_COMPLETE_GCP: - case StopRecord::SR_UNBLOCK_GCP_START_GCP: - case StopRecord::SR_CLUSTER_SHUTDOWN: - break; - } - } - - signal->theData[0] = NDB_LE_NODE_FAILREP; - signal->theData[2] = 0; - - Uint32 nodeId = 0; - while(!allFailed.isclear()){ - nodeId = allFailed.find(nodeId + 1); - allFailed.clear(nodeId); - signal->theData[1] = nodeId; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - }//for - - return; -}//Ndbcntr::execNODE_FAILREP() - -/*******************************/ -/* READ_NODESREQ */ -/*******************************/ -void Ndbcntr::execREAD_NODESREQ(Signal* signal) -{ - jamEntry(); - - /*----------------------------------------------------------------------*/ - // ANY BLOCK MAY SEND A REQUEST ABOUT NDB NODES AND VERSIONS IN THE - // SYSTEM. THIS REQUEST CAN ONLY BE HANDLED IN - // ABSOLUTE STARTPHASE 3 OR LATER - /*----------------------------------------------------------------------*/ - BlockReference TuserBlockref = signal->theData[0]; - ReadNodesConf * const readNodes = (ReadNodesConf *)&signal->theData[0]; - - /** - * Prepare inactiveNodes bitmask. - * The concept as such is by the way pretty useless. - * It makes parallell starts more or less impossible... - */ - NdbNodeBitmask tmp1; - tmp1.bitOR(c_startedNodes); - if(!getNodeState().getNodeRestartInProgress()){ - tmp1.bitOR(c_start.m_starting); - } else { - tmp1.set(getOwnNodeId()); - } - - NdbNodeBitmask tmp2; - tmp2.bitOR(c_allDefinedNodes); - tmp2.bitANDC(tmp1); - /** - * Fill in return signal - */ - tmp2.copyto(NdbNodeBitmask::Size, readNodes->inactiveNodes); - c_allDefinedNodes.copyto(NdbNodeBitmask::Size, readNodes->allNodes); - c_clusterNodes.copyto(NdbNodeBitmask::Size, readNodes->clusterNodes); - c_startedNodes.copyto(NdbNodeBitmask::Size, readNodes->startedNodes); - c_start.m_starting.copyto(NdbNodeBitmask::Size, readNodes->startingNodes); - - readNodes->noOfNodes = c_allDefinedNodes.count(); - readNodes->masterNodeId = cmasterNodeId; - readNodes->ndynamicId = cdynamicNodeId; - if (cstartPhase > ZSTART_PHASE_2) { - jam(); - sendSignal(TuserBlockref, GSN_READ_NODESCONF, signal, - ReadNodesConf::SignalLength, JBB); - - } else { - jam(); - signal->theData[0] = ZNOT_AVAILABLE; - sendSignal(TuserBlockref, GSN_READ_NODESREF, signal, 1, JBB); - }//if -}//Ndbcntr::execREAD_NODESREQ() - -/*----------------------------------------------------------------------*/ -// SENDS APPL_ERROR TO QMGR AND THEN SET A POINTER OUT OF BOUNDS -/*----------------------------------------------------------------------*/ -void Ndbcntr::systemErrorLab(Signal* signal, int line) -{ - progError(line, NDBD_EXIT_NDBREQUIRE); /* BUG INSERTION */ - return; -}//Ndbcntr::systemErrorLab() - -/*###########################################################################*/ -/* CNTR MASTER CREATES AND INITIALIZES A SYSTEMTABLE AT INITIALSTART */ -/* |-2048| # 1 00000001 | */ -/* | : | : | */ -/* | -1 | # 1 00000001 | */ -/* | 1 | 0 | tupleid sequence now created on first use */ -/* | : | : | v */ -/* | 2048| 0 | v */ -/*---------------------------------------------------------------------------*/ -void Ndbcntr::createSystableLab(Signal* signal, unsigned index) -{ - if (index >= g_sysTableCount) { - ndbassert(index == g_sysTableCount); - startInsertTransactions(signal); - return; - } - const SysTable& table = *g_sysTableList[index]; - Uint32 propPage[256]; - LinearWriter w(propPage, 256); - - // XXX remove commented-out lines later - - w.first(); - w.add(DictTabInfo::TableName, table.name); - w.add(DictTabInfo::TableLoggedFlag, table.tableLoggedFlag); - //w.add(DictTabInfo::TableKValue, 6); - //w.add(DictTabInfo::MinLoadFactor, 70); - //w.add(DictTabInfo::MaxLoadFactor, 80); - w.add(DictTabInfo::FragmentTypeVal, (Uint32)table.fragmentType); - //w.add(DictTabInfo::NoOfKeyAttr, 1); - w.add(DictTabInfo::NoOfAttributes, (Uint32)table.columnCount); - //w.add(DictTabInfo::NoOfNullable, (Uint32)0); - //w.add(DictTabInfo::NoOfVariable, (Uint32)0); - //w.add(DictTabInfo::KeyLength, 1); - w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType); - w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE); - - for (unsigned i = 0; i < table.columnCount; i++) { - const SysColumn& column = table.columnList[i]; - ndbassert(column.pos == i); - w.add(DictTabInfo::AttributeName, column.name); - w.add(DictTabInfo::AttributeId, (Uint32)i); - w.add(DictTabInfo::AttributeKeyFlag, (Uint32)column.keyFlag); - w.add(DictTabInfo::AttributeStorageType, - (Uint32)NDB_STORAGETYPE_MEMORY); - w.add(DictTabInfo::AttributeArrayType, - (Uint32)NDB_ARRAYTYPE_FIXED); - w.add(DictTabInfo::AttributeNullableFlag, (Uint32)column.nullable); - w.add(DictTabInfo::AttributeExtType, (Uint32)column.type); - w.add(DictTabInfo::AttributeExtLength, (Uint32)column.length); - w.add(DictTabInfo::AttributeEnd, (Uint32)true); - } - w.add(DictTabInfo::TableEnd, (Uint32)true); - - Uint32 length = w.getWordsUsed(); - LinearSectionPtr ptr[3]; - ptr[0].p = &propPage[0]; - ptr[0].sz = length; - - CreateTableReq* const req = (CreateTableReq*)signal->getDataPtrSend(); - req->senderData = index; - req->senderRef = reference(); - sendSignal(DBDICT_REF, GSN_CREATE_TABLE_REQ, signal, - CreateTableReq::SignalLength, JBB, ptr, 1); - return; -}//Ndbcntr::createSystableLab() - -void Ndbcntr::execCREATE_TABLE_REF(Signal* signal) -{ - jamEntry(); - progError(__LINE__,NDBD_EXIT_NDBREQUIRE, "CREATE_TABLE_REF"); - return; -}//Ndbcntr::execDICTTABREF() - -void Ndbcntr::execCREATE_TABLE_CONF(Signal* signal) -{ - jamEntry(); - CreateTableConf * const conf = (CreateTableConf*)signal->getDataPtrSend(); - //csystabId = conf->tableId; - ndbrequire(conf->senderData < g_sysTableCount); - const SysTable& table = *g_sysTableList[conf->senderData]; - table.tableId = conf->tableId; - createSystableLab(signal, conf->senderData + 1); - //startInsertTransactions(signal); - return; -}//Ndbcntr::execDICTTABCONF() - -/*******************************/ -/* DICTRELEASECONF */ -/*******************************/ -void Ndbcntr::startInsertTransactions(Signal* signal) -{ - jamEntry(); - - ckey = 1; - ctransidPhase = ZTRUE; - signal->theData[0] = 0; - signal->theData[1] = reference(); - sendSignal(DBTC_REF, GSN_TCSEIZEREQ, signal, 2, JBB); - return; -}//Ndbcntr::startInsertTransactions() - -/*******************************/ -/* TCSEIZECONF */ -/*******************************/ -void Ndbcntr::execTCSEIZECONF(Signal* signal) -{ - jamEntry(); - ctcConnectionP = signal->theData[1]; - crSystab7Lab(signal); - return; -}//Ndbcntr::execTCSEIZECONF() - -const unsigned int RowsPerCommit = 16; -void Ndbcntr::crSystab7Lab(Signal* signal) -{ - UintR tkey; - UintR Tmp; - - TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0]; - - UintR reqInfo_Start = 0; - tcKeyReq->setOperationType(reqInfo_Start, ZINSERT); // Insert - tcKeyReq->setKeyLength (reqInfo_Start, 1); - tcKeyReq->setAIInTcKeyReq (reqInfo_Start, 5); - tcKeyReq->setAbortOption (reqInfo_Start, TcKeyReq::AbortOnError); - -/* KEY LENGTH = 1, ATTRINFO LENGTH IN TCKEYREQ = 5 */ - cresponses = 0; - const UintR guard0 = ckey + (RowsPerCommit - 1); - for (Tmp = ckey; Tmp <= guard0; Tmp++) { - UintR reqInfo = reqInfo_Start; - if (Tmp == ckey) { // First iteration, Set start flag - jam(); - tcKeyReq->setStartFlag(reqInfo, 1); - } //if - if (Tmp == guard0) { // Last iteration, Set commit flag - jam(); - tcKeyReq->setCommitFlag(reqInfo, 1); - tcKeyReq->setExecuteFlag(reqInfo, 1); - } //if - if (ctransidPhase == ZTRUE) { - jam(); - tkey = 0; - tkey = tkey - Tmp; - } else { - jam(); - tkey = Tmp; - }//if - - tcKeyReq->apiConnectPtr = ctcConnectionP; - tcKeyReq->attrLen = 5; - tcKeyReq->tableId = g_sysTable_SYSTAB_0.tableId; - tcKeyReq->requestInfo = reqInfo; - tcKeyReq->tableSchemaVersion = ZSYSTAB_VERSION; - tcKeyReq->transId1 = 0; - tcKeyReq->transId2 = ckey; - -//------------------------------------------------------------- -// There is no optional part in this TCKEYREQ. There is one -// key word and five ATTRINFO words. -//------------------------------------------------------------- - Uint32* tKeyDataPtr = &tcKeyReq->scanInfo; - Uint32* tAIDataPtr = &tKeyDataPtr[1]; - - tKeyDataPtr[0] = tkey; - - AttributeHeader::init(&tAIDataPtr[0], 0, 1 << 2); - tAIDataPtr[1] = tkey; - AttributeHeader::init(&tAIDataPtr[2], 1, 2 << 2); - tAIDataPtr[3] = (tkey << 16); - tAIDataPtr[4] = 1; - sendSignal(DBTC_REF, GSN_TCKEYREQ, signal, - TcKeyReq::StaticLength + 6, JBB); - }//for - ckey = ckey + RowsPerCommit; - return; -}//Ndbcntr::crSystab7Lab() - -/*******************************/ -/* TCKEYCONF09 */ -/*******************************/ -void Ndbcntr::execTCKEYCONF(Signal* signal) -{ - const TcKeyConf * const keyConf = (TcKeyConf *)&signal->theData[0]; - - jamEntry(); - cgciSystab = keyConf->gci; - UintR confInfo = keyConf->confInfo; - - if (TcKeyConf::getMarkerFlag(confInfo)){ - Uint32 transId1 = keyConf->transId1; - Uint32 transId2 = keyConf->transId2; - signal->theData[0] = transId1; - signal->theData[1] = transId2; - sendSignal(DBTC_REF, GSN_TC_COMMIT_ACK, signal, 2, JBB); - }//if - - cresponses = cresponses + TcKeyConf::getNoOfOperations(confInfo); - if (TcKeyConf::getCommitFlag(confInfo)){ - jam(); - ndbrequire(cresponses == RowsPerCommit); - - crSystab8Lab(signal); - return; - } - return; -}//Ndbcntr::tckeyConfLab() - -void Ndbcntr::crSystab8Lab(Signal* signal) -{ - if (ckey < ZSIZE_SYSTAB) { - jam(); - crSystab7Lab(signal); - return; - } else if (ctransidPhase == ZTRUE) { - jam(); - ckey = 1; - ctransidPhase = ZFALSE; - // skip 2nd loop - tupleid sequence now created on first use - }//if - signal->theData[0] = ctcConnectionP; - signal->theData[1] = reference(); - signal->theData[2] = 0; - sendSignal(DBTC_REF, GSN_TCRELEASEREQ, signal, 2, JBB); - return; -}//Ndbcntr::crSystab8Lab() - -/*******************************/ -/* TCRELEASECONF */ -/*******************************/ -void Ndbcntr::execTCRELEASECONF(Signal* signal) -{ - jamEntry(); - waitpoint52Lab(signal); - return; -}//Ndbcntr::execTCRELEASECONF() - -void Ndbcntr::crSystab9Lab(Signal* signal) -{ - signal->theData[1] = reference(); - sendSignalWithDelay(DBDIH_REF, GSN_GETGCIREQ, signal, 100, 2); - return; -}//Ndbcntr::crSystab9Lab() - -/*******************************/ -/* GETGCICONF */ -/*******************************/ -void Ndbcntr::execGETGCICONF(Signal* signal) -{ - jamEntry(); - -#ifndef NO_GCP - if (signal->theData[1] < cgciSystab) { - jam(); -/*--------------------------------------*/ -/* MAKE SURE THAT THE SYSTABLE IS */ -/* NOW SAFE ON DISK */ -/*--------------------------------------*/ - crSystab9Lab(signal); - return; - }//if -#endif - waitpoint52Lab(signal); - return; -}//Ndbcntr::execGETGCICONF() - -void Ndbcntr::execTCKEYREF(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Ndbcntr::execTCKEYREF() - -void Ndbcntr::execTCROLLBACKREP(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Ndbcntr::execTCROLLBACKREP() - -void Ndbcntr::execTCRELEASEREF(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Ndbcntr::execTCRELEASEREF() - -void Ndbcntr::execTCSEIZEREF(Signal* signal) -{ - jamEntry(); - systemErrorLab(signal, __LINE__); - return; -}//Ndbcntr::execTCSEIZEREF() - - -/*---------------------------------------------------------------------------*/ -/*INITIALIZE VARIABLES AND RECORDS */ -/*---------------------------------------------------------------------------*/ -void Ndbcntr::initData(Signal* signal) -{ - c_start.reset(); - cmasterNodeId = 0; - cnoStartNodes = 0; - cnoWaitrep = 0; -}//Ndbcntr::initData() - - -/*---------------------------------------------------------------------------*/ -/*RESET VARIABLES USED DURING THE START */ -/*---------------------------------------------------------------------------*/ -void Ndbcntr::resetStartVariables(Signal* signal) -{ - cnoStartNodes = 0; - cnoWaitrep6 = cnoWaitrep7 = 0; -}//Ndbcntr::resetStartVariables() - - -/*---------------------------------------------------------------------------*/ -// SEND THE SIGNAL -// INPUT CNDB_BLOCKS_COUNT -/*---------------------------------------------------------------------------*/ -void Ndbcntr::sendNdbSttor(Signal* signal) -{ - NdbBlocksRecPtr ndbBlocksPtr; - - ndbBlocksPtr.i = cndbBlocksCount; - ptrCheckGuard(ndbBlocksPtr, ZSIZE_NDB_BLOCKS_REC, ndbBlocksRec); - - NdbSttor * const req = (NdbSttor*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->nodeId = getOwnNodeId(); - req->internalStartPhase = cinternalStartphase; - req->typeOfStart = ctypeOfStart; - req->masterNodeId = cmasterNodeId; - - for (int i = 0; i < 16; i++) { - // Garbage - req->config[i] = 0x88776655; - //cfgBlockPtr.p->cfgData[i]; - } - - //#define MAX_STARTPHASE 2 -#ifdef TRACE_STTOR - ndbout_c("sending NDB_STTOR(%d) to %s", - cinternalStartphase, - getBlockName( refToBlock(ndbBlocksPtr.p->blockref))); -#endif - sendSignal(ndbBlocksPtr.p->blockref, GSN_NDB_STTOR, signal, 22, JBB); - cndbBlocksCount++; -}//Ndbcntr::sendNdbSttor() - -/*---------------------------------------------------------------------------*/ -// JUST SEND THE SIGNAL -/*---------------------------------------------------------------------------*/ -void Ndbcntr::sendSttorry(Signal* signal) -{ - signal->theData[3] = ZSTART_PHASE_1; - signal->theData[4] = ZSTART_PHASE_2; - signal->theData[5] = ZSTART_PHASE_3; - signal->theData[6] = ZSTART_PHASE_4; - signal->theData[7] = ZSTART_PHASE_5; - signal->theData[8] = ZSTART_PHASE_6; - // skip simulated phase 7 - signal->theData[9] = ZSTART_PHASE_8; - signal->theData[10] = ZSTART_PHASE_9; - signal->theData[11] = ZSTART_PHASE_END; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 12, JBB); -}//Ndbcntr::sendSttorry() - -void -Ndbcntr::execDUMP_STATE_ORD(Signal* signal) -{ - DumpStateOrd * const & dumpState = (DumpStateOrd *)&signal->theData[0]; - Uint32 arg = dumpState->args[0]; - - if(arg == 13){ - infoEvent("Cntr: cstartPhase = %d, cinternalStartphase = %d, block = %d", - cstartPhase, cinternalStartphase, cndbBlocksCount); - infoEvent("Cntr: cmasterNodeId = %d", cmasterNodeId); - } - - if (arg == DumpStateOrd::NdbcntrTestStopOnError){ - if (m_ctx.m_config.stopOnError() == true) - ((Configuration&)m_ctx.m_config).stopOnError(false); - - const BlockReference tblockref = calcNdbCntrBlockRef(getOwnNodeId()); - - SystemError * const sysErr = (SystemError*)&signal->theData[0]; - sysErr->errorCode = SystemError::TestStopOnError; - sysErr->errorRef = reference(); - sendSignal(tblockref, GSN_SYSTEM_ERROR, signal, - SystemError::SignalLength, JBA); - } - - if (arg == DumpStateOrd::NdbcntrStopNodes) - { - NdbNodeBitmask mask; - for(Uint32 i = 1; igetLength(); i++) - mask.set(signal->theData[i]); - - StopReq* req = (StopReq*)signal->getDataPtrSend(); - req->senderRef = RNIL; - req->senderData = 123; - req->requestInfo = 0; - req->singleuser = 0; - req->singleUserApi = 0; - mask.copyto(NdbNodeBitmask::Size, req->nodes); - StopReq::setPerformRestart(req->requestInfo, 1); - StopReq::setNoStart(req->requestInfo, 1); - StopReq::setStopNodes(req->requestInfo, 1); - StopReq::setStopAbort(req->requestInfo, 1); - - sendSignal(reference(), GSN_STOP_REQ, signal, - StopReq::SignalLength, JBB); - return; - } - -}//Ndbcntr::execDUMP_STATE_ORD() - -void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{ - NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0]; - - if (newState.startLevel == NodeState::SL_STARTED) - { - CRASH_INSERTION(1000); - } - - stateRep->nodeState = newState; - stateRep->nodeState.masterNodeId = cmasterNodeId; - stateRep->nodeState.setNodeGroup(c_nodeGroup); - - for(Uint32 i = 0; itheData[0]; - //ResumeRef * const ref = (ResumeRef *)&signal->theData[0]; - - jamEntry(); - - signal->theData[0] = NDB_LE_SingleUser; - signal->theData[1] = 2; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - //Uint32 senderData = req->senderData; - //BlockReference senderRef = req->senderRef; - NodeState newState(NodeState::SL_STARTED); - updateNodeState(signal, newState); - c_stopRec.stopReq.senderRef=0; -} - -void -Ndbcntr::execSTOP_REQ(Signal* signal){ - StopReq * const req = (StopReq *)&signal->theData[0]; - StopRef * const ref = (StopRef *)&signal->theData[0]; - Uint32 singleuser = req->singleuser; - jamEntry(); - Uint32 senderData = req->senderData; - BlockReference senderRef = req->senderRef; - bool abort = StopReq::getStopAbort(req->requestInfo); - bool stopnodes = StopReq::getStopNodes(req->requestInfo); - - if(!singleuser && - (getNodeState().startLevel < NodeState::SL_STARTED || - (abort && !stopnodes))) - { - /** - * Node is not started yet - * - * So stop it quickly - */ - jam(); - const Uint32 reqInfo = req->requestInfo; - if(StopReq::getPerformRestart(reqInfo)){ - jam(); - StartOrd * startOrd = (StartOrd *)&signal->theData[0]; - startOrd->restartInfo = reqInfo; - sendSignal(CMVMI_REF, GSN_START_ORD, signal, 1, JBA); - } else { - jam(); - sendSignal(CMVMI_REF, GSN_STOP_ORD, signal, 1, JBA); - } - return; - } - - if(c_stopRec.stopReq.senderRef != 0 || - (cmasterNodeId == getOwnNodeId() && !c_start.m_starting.isclear())) - { - /** - * Requested a system shutdown - */ - if(!singleuser && StopReq::getSystemStop(req->requestInfo)){ - jam(); - sendSignalWithDelay(reference(), GSN_STOP_REQ, signal, 100, - StopReq::SignalLength); - return; - } - - /** - * Requested a node shutdown - */ - if(c_stopRec.stopReq.senderRef && - StopReq::getSystemStop(c_stopRec.stopReq.requestInfo)) - ref->errorCode = StopRef::SystemShutdownInProgress; - else - ref->errorCode = StopRef::NodeShutdownInProgress; - ref->senderData = senderData; - ref->masterNodeId = cmasterNodeId; - - if (senderRef != RNIL) - sendSignal(senderRef, GSN_STOP_REF, signal, StopRef::SignalLength, JBB); - return; - } - - if (stopnodes && !abort) - { - jam(); - ref->errorCode = StopRef::UnsupportedNodeShutdown; - ref->senderData = senderData; - ref->masterNodeId = cmasterNodeId; - if (senderRef != RNIL) - sendSignal(senderRef, GSN_STOP_REF, signal, StopRef::SignalLength, JBB); - return; - } - - if (stopnodes && cmasterNodeId != getOwnNodeId()) - { - jam(); - ref->errorCode = StopRef::MultiNodeShutdownNotMaster; - ref->senderData = senderData; - ref->masterNodeId = cmasterNodeId; - if (senderRef != RNIL) - sendSignal(senderRef, GSN_STOP_REF, signal, StopRef::SignalLength, JBB); - return; - } - - c_stopRec.stopReq = * req; - c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond(); - - if (stopnodes) - { - jam(); - - if(!c_stopRec.checkNodeFail(signal)) - { - jam(); - return; - } - - char buf[100]; - NdbNodeBitmask mask; - mask.assign(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - infoEvent("Initiating shutdown abort of %s", mask.getText(buf)); - ndbout_c("Initiating shutdown abort of %s", mask.getText(buf)); - - WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = StopRecord::SR_BLOCK_GCP_START_GCP; - req->requestType = WaitGCPReq::BlockStartGcp; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); - return; - } - else if(!singleuser) - { - if(StopReq::getSystemStop(c_stopRec.stopReq.requestInfo)) - { - jam(); - if(StopReq::getPerformRestart(c_stopRec.stopReq.requestInfo)) - { - ((Configuration&)m_ctx.m_config).stopOnError(false); - } - } - if(!c_stopRec.checkNodeFail(signal)) - { - jam(); - return; - } - signal->theData[0] = NDB_LE_NDBStopStarted; - signal->theData[1] = StopReq::getSystemStop(c_stopRec.stopReq.requestInfo) ? 1 : 0; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - } - else - { - signal->theData[0] = NDB_LE_SingleUser; - signal->theData[1] = 0; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - } - - NodeState newState(NodeState::SL_STOPPING_1, - StopReq::getSystemStop(c_stopRec.stopReq.requestInfo)); - - if(singleuser) { - newState.setSingleUser(true); - newState.setSingleUserApi(c_stopRec.stopReq.singleUserApi); - } - updateNodeState(signal, newState); - signal->theData[0] = ZSHUTDOWN; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void -Ndbcntr::StopRecord::checkTimeout(Signal* signal){ - jamEntry(); - - if(!cntr.getNodeState().getSingleUserMode()) - if(!checkNodeFail(signal)){ - jam(); - return; - } - - switch(cntr.getNodeState().startLevel){ - case NodeState::SL_STOPPING_1: - checkApiTimeout(signal); - break; - case NodeState::SL_STOPPING_2: - checkTcTimeout(signal); - break; - case NodeState::SL_STOPPING_3: - checkLqhTimeout_1(signal); - break; - case NodeState::SL_STOPPING_4: - checkLqhTimeout_2(signal); - break; - case NodeState::SL_SINGLEUSER: - break; - default: - ndbrequire(false); - } -} - -bool -Ndbcntr::StopRecord::checkNodeFail(Signal* signal){ - jam(); - if(StopReq::getSystemStop(stopReq.requestInfo)){ - jam(); - return true; - } - - /** - * Check if I can survive me stopping - */ - NodeBitmask ndbMask; - ndbMask.assign(cntr.c_startedNodes); - - if (StopReq::getStopNodes(stopReq.requestInfo)) - { - NdbNodeBitmask tmp; - tmp.assign(NdbNodeBitmask::Size, stopReq.nodes); - - NdbNodeBitmask ndbStopNodes; - ndbStopNodes.assign(NdbNodeBitmask::Size, stopReq.nodes); - ndbStopNodes.bitAND(ndbMask); - ndbStopNodes.copyto(NdbNodeBitmask::Size, stopReq.nodes); - - ndbMask.bitANDC(tmp); - - bool allNodesStopped = true; - int i ; - for( i = 0; i < (int) NdbNodeBitmask::Size; i++ ){ - if ( stopReq.nodes[i] != 0 ){ - allNodesStopped = false; - break; - } - } - - if ( allNodesStopped ) { - StopConf * const stopConf = (StopConf *)&signal->theData[0]; - stopConf->senderData = stopReq.senderData; - stopConf->nodeState = (Uint32) NodeState::SL_NOTHING; - cntr.sendSignal(stopReq.senderRef, GSN_STOP_CONF, signal, - StopConf::SignalLength, JBB); - stopReq.senderRef = 0; - return false; - } - - } - else - { - ndbMask.clear(cntr.getOwnNodeId()); - } - - CheckNodeGroups* sd = (CheckNodeGroups*)&signal->theData[0]; - sd->blockRef = cntr.reference(); - sd->requestType = CheckNodeGroups::Direct | CheckNodeGroups::ArbitCheck; - sd->mask = ndbMask; - cntr.EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal, - CheckNodeGroups::SignalLength); - jamEntry(); - switch (sd->output) { - case CheckNodeGroups::Win: - case CheckNodeGroups::Partitioning: - return true; - break; - } - - StopRef * const ref = (StopRef *)&signal->theData[0]; - - ref->senderData = stopReq.senderData; - ref->errorCode = StopRef::NodeShutdownWouldCauseSystemCrash; - ref->masterNodeId = cntr.cmasterNodeId; - - const BlockReference bref = stopReq.senderRef; - if (bref != RNIL) - cntr.sendSignal(bref, GSN_STOP_REF, signal, StopRef::SignalLength, JBB); - - stopReq.senderRef = 0; - - if (cntr.getNodeState().startLevel != NodeState::SL_SINGLEUSER) - { - NodeState newState(NodeState::SL_STARTED); - cntr.updateNodeState(signal, newState); - } - - signal->theData[0] = NDB_LE_NDBStopAborted; - cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB); - - return false; -} - -void -Ndbcntr::StopRecord::checkApiTimeout(Signal* signal){ - const Int32 timeout = stopReq.apiTimeout; - const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout; - const NDB_TICKS now = NdbTick_CurrentMillisecond(); - if((timeout >= 0 && now >= alarm)){ - // || checkWithApiInSomeMagicWay) - jam(); - NodeState newState(NodeState::SL_STOPPING_2, - StopReq::getSystemStop(stopReq.requestInfo)); - if(stopReq.singleuser) { - newState.setSingleUser(true); - newState.setSingleUserApi(stopReq.singleUserApi); - } - cntr.updateNodeState(signal, newState); - - stopInitiatedTime = now; - } - - signal->theData[0] = ZSHUTDOWN; - cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void -Ndbcntr::StopRecord::checkTcTimeout(Signal* signal){ - const Int32 timeout = stopReq.transactionTimeout; - const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout; - const NDB_TICKS now = NdbTick_CurrentMillisecond(); - if((timeout >= 0 && now >= alarm)){ - // || checkWithTcInSomeMagicWay) - jam(); - if(stopReq.getSystemStop(stopReq.requestInfo) || stopReq.singleuser){ - jam(); - if(stopReq.singleuser) - { - jam(); - AbortAllReq * req = (AbortAllReq*)&signal->theData[0]; - req->senderRef = cntr.reference(); - req->senderData = 12; - cntr.sendSignal(DBTC_REF, GSN_ABORT_ALL_REQ, signal, - AbortAllReq::SignalLength, JBB); - } - else - { - WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0]; - req->senderRef = cntr.reference(); - req->senderData = StopRecord::SR_CLUSTER_SHUTDOWN; - req->requestType = WaitGCPReq::CompleteForceStart; - cntr.sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); - } - } else { - jam(); - StopPermReq * req = (StopPermReq*)&signal->theData[0]; - req->senderRef = cntr.reference(); - req->senderData = 12; - cntr.sendSignal(DBDIH_REF, GSN_STOP_PERM_REQ, signal, - StopPermReq::SignalLength, JBB); - } - return; - } - signal->theData[0] = ZSHUTDOWN; - cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void Ndbcntr::execSTOP_PERM_REF(Signal* signal){ - //StopPermRef* const ref = (StopPermRef*)&signal->theData[0]; - - jamEntry(); - - signal->theData[0] = ZSHUTDOWN; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void Ndbcntr::execSTOP_PERM_CONF(Signal* signal){ - jamEntry(); - - AbortAllReq * req = (AbortAllReq*)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = 12; - sendSignal(DBTC_REF, GSN_ABORT_ALL_REQ, signal, - AbortAllReq::SignalLength, JBB); -} - -void Ndbcntr::execABORT_ALL_CONF(Signal* signal){ - jamEntry(); - if(c_stopRec.stopReq.singleuser) { - jam(); - - NodeState newState(NodeState::SL_SINGLEUSER); - newState.setSingleUser(true); - newState.setSingleUserApi(c_stopRec.stopReq.singleUserApi); - updateNodeState(signal, newState); - c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond(); - - StopConf * const stopConf = (StopConf *)&signal->theData[0]; - stopConf->senderData = c_stopRec.stopReq.senderData; - stopConf->nodeState = (Uint32) NodeState::SL_SINGLEUSER; - sendSignal(c_stopRec.stopReq.senderRef, GSN_STOP_CONF, signal, StopConf::SignalLength, JBB); - - c_stopRec.stopReq.senderRef = 0; // the command is done - - signal->theData[0] = NDB_LE_SingleUser; - signal->theData[1] = 1; - signal->theData[2] = c_stopRec.stopReq.singleUserApi; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - } - else - { - jam(); - NodeState newState(NodeState::SL_STOPPING_3, - StopReq::getSystemStop(c_stopRec.stopReq.requestInfo)); - updateNodeState(signal, newState); - - c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond(); - - signal->theData[0] = ZSHUTDOWN; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); - } -} - -void Ndbcntr::execABORT_ALL_REF(Signal* signal){ - jamEntry(); - - StopRef * const stopRef = (StopRef *)&signal->theData[0]; - stopRef->senderData = c_stopRec.stopReq.senderData; - stopRef->errorCode = StopRef::TransactionAbortFailed; - stopRef->masterNodeId = cmasterNodeId; - sendSignal(c_stopRec.stopReq.senderRef, GSN_STOP_REF, signal, StopRef::SignalLength, JBB); -} - -void -Ndbcntr::StopRecord::checkLqhTimeout_1(Signal* signal){ - const Int32 timeout = stopReq.readOperationTimeout; - const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout; - const NDB_TICKS now = NdbTick_CurrentMillisecond(); - - if((timeout >= 0 && now >= alarm)){ - // || checkWithLqhInSomeMagicWay) - jam(); - - ChangeNodeStateReq * req = (ChangeNodeStateReq*)&signal->theData[0]; - - NodeState newState(NodeState::SL_STOPPING_4, - StopReq::getSystemStop(stopReq.requestInfo)); - req->nodeState = newState; - req->senderRef = cntr.reference(); - req->senderData = 12; - cntr.sendSignal(DBLQH_REF, GSN_CHANGE_NODE_STATE_REQ, signal, 2, JBB); - return; - } - signal->theData[0] = ZSHUTDOWN; - cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void Ndbcntr::execCHANGE_NODE_STATE_CONF(Signal* signal){ - jamEntry(); - signal->theData[0] = reference(); - signal->theData[1] = 12; - sendSignal(DBDIH_REF, GSN_STOP_ME_REQ, signal, 2, JBB); -} - -void Ndbcntr::execSTOP_ME_REF(Signal* signal){ - jamEntry(); - ndbrequire(false); -} - - -void Ndbcntr::execSTOP_ME_CONF(Signal* signal){ - jamEntry(); - - NodeState newState(NodeState::SL_STOPPING_4, - StopReq::getSystemStop(c_stopRec.stopReq.requestInfo)); - updateNodeState(signal, newState); - - c_stopRec.stopInitiatedTime = NdbTick_CurrentMillisecond(); - signal->theData[0] = ZSHUTDOWN; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void -Ndbcntr::StopRecord::checkLqhTimeout_2(Signal* signal){ - const Int32 timeout = stopReq.operationTimeout; - const NDB_TICKS alarm = stopInitiatedTime + (NDB_TICKS)timeout; - const NDB_TICKS now = NdbTick_CurrentMillisecond(); - - if((timeout >= 0 && now >= alarm)){ - // || checkWithLqhInSomeMagicWay) - jam(); - if(StopReq::getPerformRestart(stopReq.requestInfo)){ - jam(); - StartOrd * startOrd = (StartOrd *)&signal->theData[0]; - startOrd->restartInfo = stopReq.requestInfo; - cntr.sendSignal(CMVMI_REF, GSN_START_ORD, signal, 2, JBA); - } else { - jam(); - cntr.sendSignal(CMVMI_REF, GSN_STOP_ORD, signal, 1, JBA); - } - return; - } - signal->theData[0] = ZSHUTDOWN; - cntr.sendSignalWithDelay(cntr.reference(), GSN_CONTINUEB, signal, 100, 1); -} - -void Ndbcntr::execWAIT_GCP_REF(Signal* signal){ - jamEntry(); - - //WaitGCPRef* const ref = (WaitGCPRef*)&signal->theData[0]; - - WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = StopRecord::SR_CLUSTER_SHUTDOWN; - req->requestType = WaitGCPReq::CompleteForceStart; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); -} - -void Ndbcntr::execWAIT_GCP_CONF(Signal* signal){ - jamEntry(); - - WaitGCPConf* conf = (WaitGCPConf*)signal->getDataPtr(); - - switch(conf->senderData){ - case StopRecord::SR_BLOCK_GCP_START_GCP: - { - jam(); - /** - * - */ - if(!c_stopRec.checkNodeFail(signal)) - { - jam(); - goto unblock; - } - - WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = StopRecord::SR_WAIT_COMPLETE_GCP; - req->requestType = WaitGCPReq::CompleteIfRunning; - - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); - return; - } - case StopRecord::SR_UNBLOCK_GCP_START_GCP: - { - jam(); - return; - } - case StopRecord::SR_WAIT_COMPLETE_GCP: - { - jam(); - if(!c_stopRec.checkNodeFail(signal)) - { - jam(); - goto unblock; - } - - NdbNodeBitmask tmp; - tmp.assign(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - c_stopRec.m_stop_req_counter = tmp; - NodeReceiverGroup rg(QMGR, tmp); - StopReq * stopReq = (StopReq *)&signal->theData[0]; - * stopReq = c_stopRec.stopReq; - stopReq->senderRef = reference(); - sendSignal(rg, GSN_STOP_REQ, signal, StopReq::SignalLength, JBA); - c_stopRec.m_state = StopRecord::SR_QMGR_STOP_REQ; - return; - } - case StopRecord::SR_CLUSTER_SHUTDOWN: - { - jam(); - break; - } - } - - { - ndbrequire(StopReq::getSystemStop(c_stopRec.stopReq.requestInfo)); - NodeState newState(NodeState::SL_STOPPING_3, true); - - /** - * Inform QMGR so that arbitrator won't kill us - */ - NodeStateRep * rep = (NodeStateRep *)&signal->theData[0]; - rep->nodeState = newState; - rep->nodeState.masterNodeId = cmasterNodeId; - rep->nodeState.setNodeGroup(c_nodeGroup); - EXECUTE_DIRECT(QMGR, GSN_NODE_STATE_REP, signal, - NodeStateRep::SignalLength); - - if(StopReq::getPerformRestart(c_stopRec.stopReq.requestInfo)){ - jam(); - StartOrd * startOrd = (StartOrd *)&signal->theData[0]; - startOrd->restartInfo = c_stopRec.stopReq.requestInfo; - sendSignalWithDelay(CMVMI_REF, GSN_START_ORD, signal, 500, - StartOrd::SignalLength); - } else { - jam(); - sendSignalWithDelay(CMVMI_REF, GSN_STOP_ORD, signal, 500, 1); - } - return; - } - -unblock: - WaitGCPReq * req = (WaitGCPReq*)&signal->theData[0]; - req->senderRef = reference(); - req->senderData = StopRecord::SR_UNBLOCK_GCP_START_GCP; - req->requestType = WaitGCPReq::UnblockStartGcp; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, - WaitGCPReq::SignalLength, JBB); -} - -void -Ndbcntr::execSTOP_CONF(Signal* signal) -{ - jamEntry(); - StopConf *conf = (StopConf*)signal->getDataPtr(); - ndbrequire(c_stopRec.m_state == StopRecord::SR_QMGR_STOP_REQ); - c_stopRec.m_stop_req_counter.clearWaitingFor(conf->nodeId); - if (c_stopRec.m_stop_req_counter.done()) - { - char buf[100]; - NdbNodeBitmask mask; - mask.assign(NdbNodeBitmask::Size, c_stopRec.stopReq.nodes); - infoEvent("Stopping of %s", mask.getText(buf)); - ndbout_c("Stopping of %s", mask.getText(buf)); - - /** - * Kill any node... - */ - FailRep * const failRep = (FailRep *)&signal->theData[0]; - failRep->failCause = FailRep::ZMULTI_NODE_SHUTDOWN; - NodeReceiverGroup rg(QMGR, c_clusterNodes); - Uint32 nodeId = 0; - while ((nodeId = NdbNodeBitmask::find(c_stopRec.stopReq.nodes, nodeId+1)) - != NdbNodeBitmask::NotFound) - { - failRep->failNodeId = nodeId; - sendSignal(rg, GSN_FAIL_REP, signal, FailRep::SignalLength, JBA); - } - c_stopRec.m_state = StopRecord::SR_WAIT_NODE_FAILURES; - return; - } -} - -void Ndbcntr::execSTTORRY(Signal* signal){ - jamEntry(); - c_missra.execSTTORRY(signal); -} - -void Ndbcntr::execREAD_CONFIG_CONF(Signal* signal){ - jamEntry(); - c_missra.execREAD_CONFIG_CONF(signal); -} - -void Ndbcntr::execSTART_ORD(Signal* signal){ - jamEntry(); - c_missra.execSTART_ORD(signal); -} - -#define CLEAR_DX 13 -#define CLEAR_LCP 3 - -void -Ndbcntr::clearFilesystem(Signal* signal) -{ - const Uint32 lcp = c_fsRemoveCount >= CLEAR_DX; - - FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = 0; - req->directory = 1; - req->ownDirectory = 1; - - if (lcp == 0) - { - FsOpenReq::setVersion(req->fileNumber, 3); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); // Can by any... - FsOpenReq::v1_setDisk(req->fileNumber, c_fsRemoveCount); - } - else - { - FsOpenReq::setVersion(req->fileNumber, 5); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_DATA); - FsOpenReq::v5_setLcpNo(req->fileNumber, c_fsRemoveCount - CLEAR_DX); - FsOpenReq::v5_setTableId(req->fileNumber, 0); - FsOpenReq::v5_setFragmentId(req->fileNumber, 0); - } - sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, - FsRemoveReq::SignalLength, JBA); - c_fsRemoveCount++; -} - -void -Ndbcntr::execFSREMOVECONF(Signal* signal){ - jamEntry(); - if(c_fsRemoveCount == CLEAR_DX + CLEAR_LCP){ - jam(); - sendSttorry(signal); - } else { - jam(); - ndbrequire(c_fsRemoveCount < CLEAR_DX + CLEAR_LCP); - clearFilesystem(signal); - }//if -} - -void Ndbcntr::Missra::execSTART_ORD(Signal* signal){ - signal->theData[0] = NDB_LE_NDBStartStarted; - signal->theData[1] = NDB_VERSION; - cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - currentBlockIndex = 0; - sendNextREAD_CONFIG_REQ(signal); -} - -void Ndbcntr::Missra::sendNextREAD_CONFIG_REQ(Signal* signal){ - - if(currentBlockIndex < ALL_BLOCKS_SZ){ - jam(); - - ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtrSend(); - req->senderData = 0; - req->senderRef = cntr.reference(); - req->noOfParameters = 0; - - const BlockReference ref = readConfigOrder[currentBlockIndex]; - -#if 0 - ndbout_c("sending READ_CONFIG_REQ to %s(ref=%x index=%d)", - getBlockName( refToBlock(ref)), - ref, - currentBlockIndex); -#endif - - cntr.sendSignal(ref, GSN_READ_CONFIG_REQ, signal, - ReadConfigReq::SignalLength, JBB); - return; - } - - /** - * Finished... - */ - currentStartPhase = 0; - for(Uint32 i = 0; igetDataPtr(); - - const Uint32 ref = conf->senderRef; - ndbrequire(refToBlock(readConfigOrder[currentBlockIndex]) - == refToBlock(ref)); - - currentBlockIndex++; - sendNextREAD_CONFIG_REQ(signal); -} - -void Ndbcntr::Missra::execSTTORRY(Signal* signal){ - const BlockReference ref = signal->senderBlockRef(); - ndbrequire(refToBlock(ref) == refToBlock(ALL_BLOCKS[currentBlockIndex].Ref)); - - /** - * Update next start phase - */ - for (Uint32 i = 3; i < 25; i++){ - jam(); - if (signal->theData[i] > currentStartPhase){ - jam(); - ALL_BLOCKS[currentBlockIndex].NextSP = signal->theData[i]; - break; - } - } - - currentBlockIndex++; - sendNextSTTOR(signal); -} - -void Ndbcntr::Missra::sendNextSTTOR(Signal* signal){ - - for(; currentStartPhase < 255 ; - currentStartPhase++, g_currentStartPhase = currentStartPhase){ - jam(); - - const Uint32 start = currentBlockIndex; - - if (currentStartPhase == ZSTART_PHASE_6) - { - // Ndbd has passed the critical startphases. - // Change error handler from "startup" state - // to normal state. - ErrorReporter::setErrorHandlerShutdownType(); - } - - for(; currentBlockIndex < ALL_BLOCKS_SZ; currentBlockIndex++){ - jam(); - if(ALL_BLOCKS[currentBlockIndex].NextSP == currentStartPhase){ - jam(); - signal->theData[0] = 0; - signal->theData[1] = currentStartPhase; - signal->theData[2] = 0; - signal->theData[3] = 0; - signal->theData[4] = 0; - signal->theData[5] = 0; - signal->theData[6] = 0; - signal->theData[7] = cntr.ctypeOfStart; - - const BlockReference ref = ALL_BLOCKS[currentBlockIndex].Ref; - -#ifdef MAX_STARTPHASE - ndbrequire(currentStartPhase <= MAX_STARTPHASE); -#endif - -#ifdef TRACE_STTOR - ndbout_c("sending STTOR(%d) to %s(ref=%x index=%d)", - currentStartPhase, - getBlockName( refToBlock(ref)), - ref, - currentBlockIndex); -#endif - - cntr.sendSignal(ref, GSN_STTOR, signal, 8, JBB); - - return; - } - } - - currentBlockIndex = 0; - - NodeState newState(NodeState::SL_STARTING, currentStartPhase, - (NodeState::StartType)cntr.ctypeOfStart); - cntr.updateNodeState(signal, newState); - - if(start != 0){ - /** - * At least one wanted this start phase, report it - */ - jam(); - signal->theData[0] = NDB_LE_StartPhaseCompleted; - signal->theData[1] = currentStartPhase; - signal->theData[2] = cntr.ctypeOfStart; - cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB); - } - } - - signal->theData[0] = NDB_LE_NDBStartCompleted; - signal->theData[1] = NDB_VERSION; - cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); - - NodeState newState(NodeState::SL_STARTED); - cntr.updateNodeState(signal, newState); - - /** - * Backward - */ - UpgradeStartup::sendCmAppChg(cntr, signal, 3); //RUN - - NdbNodeBitmask nodes = cntr.c_clusterNodes; - Uint32 node = 0; - while((node = nodes.find(node+1)) != NdbNodeBitmask::NotFound){ - if(cntr.getNodeInfo(node).m_version < MAKE_VERSION(3,5,0)){ - nodes.clear(node); - } - } - - NodeReceiverGroup rg(NDBCNTR, nodes); - signal->theData[0] = cntr.getOwnNodeId(); - cntr.sendSignal(rg, GSN_CNTR_START_REP, signal, 1, JBB); -} - -/** - * Backward compatible code - */ -void -UpgradeStartup::sendCmAppChg(Ndbcntr& cntr, Signal* signal, Uint32 startLevel){ - - if(cntr.getNodeInfo(cntr.cmasterNodeId).m_version >= MAKE_VERSION(3,5,0)){ - jam(); - return; - } - - /** - * Old NDB running - */ - - signal->theData[0] = startLevel; - signal->theData[1] = cntr.getOwnNodeId(); - signal->theData[2] = 3 | ('N' << 8); - signal->theData[3] = 'D' | ('B' << 8); - signal->theData[4] = 0; - signal->theData[5] = 0; - signal->theData[6] = 0; - signal->theData[7] = 0; - signal->theData[8] = 0; - signal->theData[9] = 0; - signal->theData[10] = 0; - signal->theData[11] = 0; - - NdbNodeBitmask nodes = cntr.c_clusterNodes; - nodes.clear(cntr.getOwnNodeId()); - Uint32 node = 0; - while((node = nodes.find(node+1)) != NdbNodeBitmask::NotFound){ - if(cntr.getNodeInfo(node).m_version < MAKE_VERSION(3,5,0)){ - cntr.sendSignal(cntr.calcQmgrBlockRef(node), - GSN_CM_APPCHG, signal, 12, JBB); - } else { - cntr.c_startedNodes.set(node); // Fake started - } - } -} - -void -UpgradeStartup::execCM_APPCHG(SimulatedBlock & block, Signal* signal){ - Uint32 state = signal->theData[0]; - Uint32 nodeId = signal->theData[1]; - if(block.number() == QMGR){ - Ndbcntr& cntr = * (Ndbcntr*)globalData.getBlock(CNTR); - switch(state){ - case 0: // ZADD - break; - case 2: // ZSTART - break; - case 3: // ZRUN{ - cntr.c_startedNodes.set(nodeId); - - Uint32 recv = cntr.c_startedNodes.count(); - Uint32 cnt = cntr.c_clusterNodes.count(); - if(recv + 1 == cnt){ //+1 == own node - /** - * Check master - */ - sendCntrMasterReq(cntr, signal, 0); - } - return; - } - } - block.progError(__LINE__,NDBD_EXIT_NDBREQUIRE, - "UpgradeStartup::execCM_APPCHG"); -} - -void -UpgradeStartup::sendCntrMasterReq(Ndbcntr& cntr, Signal* signal, Uint32 n){ - Uint32 node = cntr.c_startedNodes.find(n); - if(node != NdbNodeBitmask::NotFound && - (node == cntr.getOwnNodeId() || - cntr.getNodeInfo(node).m_version >= MAKE_VERSION(3,5,0))){ - node = cntr.c_startedNodes.find(node+1); - } - - if(node == NdbNodeBitmask::NotFound){ - cntr.progError(__LINE__,NDBD_EXIT_NDBREQUIRE, - "UpgradeStartup::sendCntrMasterReq " - "NdbNodeBitmask::NotFound"); - } - - CntrMasterReq * const cntrMasterReq = (CntrMasterReq*)&signal->theData[0]; - cntr.c_clusterNodes.copyto(NdbNodeBitmask::Size, cntrMasterReq->theNodes); - NdbNodeBitmask::clear(cntrMasterReq->theNodes, cntr.getOwnNodeId()); - cntrMasterReq->userBlockRef = 0; - cntrMasterReq->userNodeId = cntr.getOwnNodeId(); - cntrMasterReq->typeOfStart = NodeState::ST_INITIAL_NODE_RESTART; - cntrMasterReq->noRestartNodes = cntr.c_clusterNodes.count() - 1; - cntr.sendSignal(cntr.calcNdbCntrBlockRef(node), GSN_CNTR_MASTERREQ, - signal, CntrMasterReq::SignalLength, JBB); -} - -void -UpgradeStartup::execCNTR_MASTER_REPLY(SimulatedBlock & block, Signal* signal){ - Uint32 gsn = signal->header.theVerId_signalNumber; - Uint32 node = refToNode(signal->getSendersBlockRef()); - if(block.number() == CNTR){ - Ndbcntr& cntr = (Ndbcntr&)block; - switch(gsn){ - case GSN_CNTR_MASTERREF: - sendCntrMasterReq(cntr, signal, node + 1); - return; - break; - case GSN_CNTR_MASTERCONF:{ - CntrStartConf* conf = (CntrStartConf*)signal->getDataPtrSend(); - conf->startGci = 0; - conf->masterNodeId = node; - conf->noStartNodes = 1; - conf->startType = NodeState::ST_INITIAL_NODE_RESTART; - NodeBitmask mask; - mask.clear(); - mask.copyto(NdbNodeBitmask::Size, conf->startedNodes); - mask.clear(); - mask.set(cntr.getOwnNodeId()); - mask.copyto(NdbNodeBitmask::Size, conf->startingNodes); - cntr.execCNTR_START_CONF(signal); - return; - } - } - } - block.progError(__LINE__,NDBD_EXIT_NDBREQUIRE, - "UpgradeStartup::execCNTR_MASTER_REPLY"); -} diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp deleted file mode 100644 index 9b4b12867b7..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "Ndbcntr.hpp" - -#define arrayLength(x) sizeof(x)/sizeof(x[0]) - -// SYSTAB_0 - -static const Ndbcntr::SysColumn -column_SYSTAB_0[] = { - { 0, "SYSKEY_0", - DictTabInfo::ExtUnsigned, 1, - true, false - }, - { 1, "NEXTID", - DictTabInfo::ExtBigunsigned, 1, - false, false - } -}; - -const Ndbcntr::SysTable -Ndbcntr::g_sysTable_SYSTAB_0 = { - "sys/def/SYSTAB_0", - arrayLength(column_SYSTAB_0), column_SYSTAB_0, - DictTabInfo::SystemTable, - DictTabInfo::AllNodesSmallTable, - true, ~0 -}; - -// NDB$EVENTS_0 - -static const Ndbcntr::SysColumn -column_NDBEVENTS_0[] = { - { 0, "NAME", - DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE, - true, false - }, - { 1, "EVENT_TYPE", - DictTabInfo::ExtUnsigned, 1, - false, false - }, - { 2, "TABLEID", - DictTabInfo::ExtUnsigned, 1, - false, false - }, - { 3, "TABLEVERSION", - DictTabInfo::ExtUnsigned, 1, - false, false - }, - { 4, "TABLE_NAME", - DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE, - false, false - }, - { 5, "ATTRIBUTE_MASK", - DictTabInfo::ExtUnsigned, MAXNROFATTRIBUTESINWORDS, - false, false - }, - { 6, "SUBID", - DictTabInfo::ExtUnsigned, 1, - false, false - }, - { 7, "SUBKEY", - DictTabInfo::ExtUnsigned, 1, - false, false - }, -}; - -const Ndbcntr::SysTable -Ndbcntr::g_sysTable_NDBEVENTS_0 = { - "sys/def/NDB$EVENTS_0", - arrayLength(column_NDBEVENTS_0), column_NDBEVENTS_0, - DictTabInfo::SystemTable, - DictTabInfo::AllNodesSmallTable, - true, ~0 -}; - -// all - -const Ndbcntr::SysTable* -Ndbcntr::g_sysTableList[] = { - &g_sysTable_SYSTAB_0, - &g_sysTable_NDBEVENTS_0 -}; - -//TODO Backup needs this info to allocate appropriate number of records -//BackupInit.cpp -const unsigned -Ndbcntr::g_sysTableCount = arrayLength(Ndbcntr::g_sysTableList); diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp deleted file mode 100644 index e26e611dd7e..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ /dev/null @@ -1,1387 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB, 2009 Sun Microsystems, Inc. - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include -#include -#include - -#ifdef HAVE_XFS_XFS_H -#include -#endif - -#include "AsyncFile.hpp" - -#include -#include -#include -#include -#include -#include -#include - -// use this to test broken pread code -//#define HAVE_BROKEN_PREAD - -#ifdef HAVE_BROKEN_PREAD -#undef HAVE_PWRITE -#undef HAVE_PREAD -#endif - -#if defined NDB_WIN32 -#else -// For readv and writev -#include -#endif - -#ifndef NDB_WIN32 -#include -#endif - -// Use this define if you want printouts from AsyncFile class -//#define DEBUG_ASYNCFILE - -#ifdef DEBUG_ASYNCFILE -#include -#define DEBUG(x) x -#define PRINT_ERRORANDFLAGS(f) printErrorAndFlags(f) -void printErrorAndFlags(Uint32 used_flags); -#else -#define DEBUG(x) -#define PRINT_ERRORANDFLAGS(f) -#endif - -// Define the size of the write buffer (for each thread) -#define WRITEBUFFERSIZE 262144 - -const char *actionName[] = { - "open", - "close", - "closeRemove", - "read", - "readv", - "write", - "writev", - "writeSync", - "writevSync", - "sync", - "end" }; - -static int numAsyncFiles = 0; - -extern "C" void * runAsyncFile(void* arg) -{ - ((AsyncFile*)arg)->run(); - return (NULL); -} - -AsyncFile::AsyncFile(SimulatedBlock& fs) : - theFileName(), -#ifdef NDB_WIN32 - hFile(INVALID_HANDLE_VALUE), -#else - theFd(-1), -#endif - theReportTo(0), - theMemoryChannelPtr(NULL), - m_fs(fs) -{ - m_page_ptr.setNull(); - m_current_request= m_last_request= 0; - m_open_flags = 0; -} - -void -AsyncFile::doStart() -{ - // Stacksize for filesystem threads -#if !defined(DBUG_OFF) && defined (__hpux) - // Empirical evidence indicates at least 32k - const NDB_THREAD_STACKSIZE stackSize = 32768; -#else - // Otherwise an 8k stack should be enough - const NDB_THREAD_STACKSIZE stackSize = 8192; -#endif - - char buf[16]; - numAsyncFiles++; - BaseString::snprintf(buf, sizeof(buf), "AsyncFile%d", numAsyncFiles); - - theStartMutexPtr = NdbMutex_Create(); - theStartConditionPtr = NdbCondition_Create(); - NdbMutex_Lock(theStartMutexPtr); - theStartFlag = false; - theThreadPtr = NdbThread_Create(runAsyncFile, - (void**)this, - stackSize, - (char*)&buf, - NDB_THREAD_PRIO_MEAN); - if (theThreadPtr == 0) - ERROR_SET(fatal, NDBD_EXIT_MEMALLOC, "","Could not allocate file system thread"); - - NdbCondition_Wait(theStartConditionPtr, - theStartMutexPtr); - NdbMutex_Unlock(theStartMutexPtr); - NdbMutex_Destroy(theStartMutexPtr); - NdbCondition_Destroy(theStartConditionPtr); -} - -AsyncFile::~AsyncFile() -{ - void *status; - Request request; - request.action = Request::end; - theMemoryChannelPtr->writeChannel( &request ); - NdbThread_WaitFor(theThreadPtr, &status); - NdbThread_Destroy(&theThreadPtr); - delete theMemoryChannelPtr; -} - -void -AsyncFile::reportTo( MemoryChannel *reportTo ) -{ - theReportTo = reportTo; -} - -void AsyncFile::execute(Request* request) -{ - theMemoryChannelPtr->writeChannel( request ); -} - -void -AsyncFile::run() -{ - Request *request; - // Create theMemoryChannel in the thread that will wait for it - NdbMutex_Lock(theStartMutexPtr); - theMemoryChannelPtr = new MemoryChannel(); - theStartFlag = true; - // Create write buffer for bigger writes - theWriteBufferSize = WRITEBUFFERSIZE; - theWriteBufferUnaligned = (char *) ndbd_malloc(theWriteBufferSize + - NDB_O_DIRECT_WRITE_ALIGNMENT-1); - theWriteBuffer = (char *) - (((UintPtr)theWriteBufferUnaligned + NDB_O_DIRECT_WRITE_ALIGNMENT - 1) & - ~(UintPtr)(NDB_O_DIRECT_WRITE_ALIGNMENT - 1)); - - NdbMutex_Unlock(theStartMutexPtr); - NdbCondition_Signal(theStartConditionPtr); - - if (!theWriteBuffer) { - DEBUG(ndbout_c("AsyncFile::writeReq, Failed allocating write buffer")); - return; - }//if - - while (1) { - request = theMemoryChannelPtr->readChannel(); - if (!request) { - DEBUG(ndbout_c("Nothing read from Memory Channel in AsyncFile")); - endReq(); - return; - }//if - m_current_request= request; - switch (request->action) { - case Request:: open: - openReq(request); - break; - case Request:: close: - closeReq(request); - break; - case Request:: closeRemove: - closeReq(request); - removeReq(request); - break; - case Request:: readPartial: - case Request:: read: - readReq(request); - break; - case Request:: readv: - readvReq(request); - break; - case Request:: write: - writeReq(request); - break; - case Request:: writev: - writevReq(request); - break; - case Request:: writeSync: - writeReq(request); - syncReq(request); - break; - case Request:: writevSync: - writevReq(request); - syncReq(request); - break; - case Request:: sync: - syncReq(request); - break; - case Request:: append: - appendReq(request); - break; - case Request:: append_synch: - appendReq(request); - syncReq(request); - break; - case Request::rmrf: - rmrfReq(request, (char*)theFileName.c_str(), request->par.rmrf.own_directory); - break; - case Request:: end: - if (theFd > 0) - closeReq(request); - endReq(); - return; - default: - DEBUG(ndbout_c("Invalid Request")); - abort(); - break; - }//switch - m_last_request= request; - m_current_request= 0; - - // No need to signal as ndbfs only uses tryRead - theReportTo->writeChannelNoSignal(request); - }//while -}//AsyncFile::run() - -#ifdef O_DIRECT -static char g_odirect_readbuf[2*GLOBAL_PAGE_SIZE -1]; -#endif - -int -AsyncFile::check_odirect_write(Uint32 flags, int& new_flags, int mode) -{ - assert(new_flags & (O_CREAT | O_TRUNC)); -#ifdef O_DIRECT - int ret; - char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); - while (((ret = ::write(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && - (errno == EINTR)); - if (ret == -1) - { - new_flags &= ~O_DIRECT; - ndbout_c("%s Failed to write using O_DIRECT, disabling", - theFileName.c_str()); - } - - close(theFd); - theFd = ::open(theFileName.c_str(), new_flags, mode); - if (theFd == -1) - return errno; -#endif - - return 0; -} - -int -AsyncFile::check_odirect_read(Uint32 flags, int &new_flags, int mode) -{ -#ifdef O_DIRECT - int ret; - char * bufptr = (char*)((UintPtr(g_odirect_readbuf)+(GLOBAL_PAGE_SIZE - 1)) & ~(GLOBAL_PAGE_SIZE - 1)); - while (((ret = ::read(theFd, bufptr, GLOBAL_PAGE_SIZE)) == -1) && - (errno == EINTR)); - if (ret == -1) - { - ndbout_c("%s Failed to read using O_DIRECT, disabling", - theFileName.c_str()); - goto reopen; - } - - if(lseek(theFd, 0, SEEK_SET) != 0) - { - return errno; - } - - if ((flags & FsOpenReq::OM_CHECK_SIZE) == 0) - { - struct stat buf; - if ((fstat(theFd, &buf) == -1)) - { - return errno; - } - else if ((buf.st_size % GLOBAL_PAGE_SIZE) != 0) - { - ndbout_c("%s filesize not a multiple of %d, disabling O_DIRECT", - theFileName.c_str(), GLOBAL_PAGE_SIZE); - goto reopen; - } - } - - return 0; - -reopen: - close(theFd); - new_flags &= ~O_DIRECT; - theFd = ::open(theFileName.c_str(), new_flags, mode); - if (theFd == -1) - return errno; -#endif - return 0; -} - -void AsyncFile::openReq(Request* request) -{ - m_auto_sync_freq = 0; - m_write_wo_sync = 0; - m_open_flags = request->par.open.flags; - - // for open.flags, see signal FSOPENREQ -#ifdef NDB_WIN32 - DWORD dwCreationDisposition; - DWORD dwDesiredAccess = 0; - DWORD dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE; - DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_RANDOM_ACCESS | FILE_FLAG_NO_BUFFERING; - Uint32 flags = request->par.open.flags; - - // Convert file open flags from Solaris to Windows - if ((flags & FsOpenReq::OM_CREATE) && (flags & FsOpenReq::OM_TRUNCATE)){ - dwCreationDisposition = CREATE_ALWAYS; - } else if (flags & FsOpenReq::OM_TRUNCATE){ - dwCreationDisposition = TRUNCATE_EXISTING; - } else if (flags & FsOpenReq::OM_CREATE){ - dwCreationDisposition = CREATE_NEW; - } else { - dwCreationDisposition = OPEN_EXISTING; - } - - switch(flags & 3){ - case FsOpenReq::OM_READONLY: - dwDesiredAccess = GENERIC_READ; - break; - case FsOpenReq::OM_WRITEONLY: - dwDesiredAccess = GENERIC_WRITE; - break; - case FsOpenReq::OM_READWRITE: - dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; - break; - default: - request->error = 1000; - break; - return; - } - - hFile = CreateFile(theFileName.c_str(), dwDesiredAccess, dwShareMode, - 0, dwCreationDisposition, dwFlagsAndAttributes, 0); - - if(INVALID_HANDLE_VALUE == hFile) { - request->error = GetLastError(); - if(((ERROR_PATH_NOT_FOUND == request->error) || (ERROR_INVALID_NAME == request->error)) - && (flags & FsOpenReq::OM_CREATE)) { - createDirectories(); - hFile = CreateFile(theFileName.c_str(), dwDesiredAccess, dwShareMode, - 0, dwCreationDisposition, dwFlagsAndAttributes, 0); - - if(INVALID_HANDLE_VALUE == hFile) - request->error = GetLastError(); - else - request->error = 0; - - return; - } - } - else { - request->error = 0; - return; - } -#else - Uint32 flags = request->par.open.flags; - int new_flags = 0; - - // Convert file open flags from Solaris to Liux - if (flags & FsOpenReq::OM_CREATE) - { - new_flags |= O_CREAT; - } - - if (flags & FsOpenReq::OM_TRUNCATE){ -#if 0 - if(Global_unlinkO_CREAT){ - unlink(theFileName.c_str()); - } else -#endif - new_flags |= O_TRUNC; - } - - if (flags & FsOpenReq::OM_AUTOSYNC) - { - m_auto_sync_freq = request->par.open.auto_sync_size; - } - - if (flags & FsOpenReq::OM_APPEND){ - new_flags |= O_APPEND; - } - - if (flags & FsOpenReq::OM_DIRECT) -#ifdef O_DIRECT - { - new_flags |= O_DIRECT; - } -#endif - - if ((flags & FsOpenReq::OM_SYNC) && ! (flags & FsOpenReq::OM_INIT)) - { -#ifdef O_SYNC - new_flags |= O_SYNC; -#endif - } - - const char * rw = ""; - switch(flags & 0x3){ - case FsOpenReq::OM_READONLY: - rw = "r"; - new_flags |= O_RDONLY; - break; - case FsOpenReq::OM_WRITEONLY: - rw = "w"; - new_flags |= O_WRONLY; - break; - case FsOpenReq::OM_READWRITE: - rw = "rw"; - new_flags |= O_RDWR; - break; - default: - request->error = 1000; - break; - return; - } - - // allow for user to choose any permissionsa with umask - const int mode = S_IRUSR | S_IWUSR | - S_IRGRP | S_IWGRP | - S_IROTH | S_IWOTH; - if (flags & FsOpenReq::OM_CREATE_IF_NONE) - { - Uint32 tmp_flags = new_flags; -#ifdef O_DIRECT - tmp_flags &= ~O_DIRECT; -#endif - if ((theFd = ::open(theFileName.c_str(), tmp_flags, mode)) != -1) - { - close(theFd); - request->error = FsRef::fsErrFileExists; - return; - } - new_flags |= O_CREAT; - } - -no_odirect: - if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) - { - PRINT_ERRORANDFLAGS(new_flags); - if ((errno == ENOENT) && (new_flags & O_CREAT)) - { - createDirectories(); - if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) - { -#ifdef O_DIRECT - if (new_flags & O_DIRECT) - { - new_flags &= ~O_DIRECT; - goto no_odirect; - } -#endif - PRINT_ERRORANDFLAGS(new_flags); - request->error = errno; - return; - } - } -#ifdef O_DIRECT - else if (new_flags & O_DIRECT) - { - new_flags &= ~O_DIRECT; - goto no_odirect; - } -#endif - else - { - request->error = errno; - return; - } - } - - if (flags & FsOpenReq::OM_CHECK_SIZE) - { - struct stat buf; - if ((fstat(theFd, &buf) == -1)) - { - request->error = errno; - } - else if((Uint64)buf.st_size != request->par.open.file_size) - { - request->error = FsRef::fsErrInvalidFileSize; - } - if (request->error) - return; - } - - if (flags & FsOpenReq::OM_INIT) - { - off_t off = 0; - const off_t sz = request->par.open.file_size; - Uint32 tmp[sizeof(SignalHeader)+25]; - Signal * signal = (Signal*)(&tmp[0]); - FsReadWriteReq* req = (FsReadWriteReq*)signal->getDataPtrSend(); - - Uint32 index = 0; - Uint32 block = refToBlock(request->theUserReference); - -#ifdef HAVE_XFS_XFS_H - if(platform_test_xfs_fd(theFd)) - { - ndbout_c("Using xfsctl(XFS_IOC_RESVSP64) to allocate disk space"); - xfs_flock64_t fl; - fl.l_whence= 0; - fl.l_start= 0; - fl.l_len= (off64_t)sz; - if(xfsctl(NULL, theFd, XFS_IOC_RESVSP64, &fl) < 0) - ndbout_c("failed to optimally allocate disk space"); - } -#endif -#ifdef HAVE_POSIX_FALLOCATE - posix_fallocate(theFd, 0, sz); -#endif - - while(off < sz) - { - req->filePointer = 0; // DATA 0 - req->userPointer = request->theUserPointer; // DATA 2 - req->numberOfPages = 1; // DATA 5 - req->varIndex = index++; - req->data.pageData[0] = m_page_ptr.i; - - m_fs.EXECUTE_DIRECT(block, GSN_FSWRITEREQ, signal, - FsReadWriteReq::FixedLength + 1); - retry: - Uint32 size = request->par.open.page_size; - char* buf = (char*)m_page_ptr.p; - while(size > 0){ - const int n = write(theFd, buf, size); - if(n == -1 && errno == EINTR) - { - continue; - } - if(n == -1 || n == 0) - { - break; - } - size -= n; - buf += n; - } - if(size != 0) - { - int err = errno; -#ifdef O_DIRECT - if ((new_flags & O_DIRECT) && off == 0) - { - ndbout_c("error on first write(%d), disable O_DIRECT", err); - new_flags &= ~O_DIRECT; - close(theFd); - theFd = ::open(theFileName.c_str(), new_flags, mode); - if (theFd != -1) - goto retry; - } -#endif - close(theFd); - unlink(theFileName.c_str()); - request->error = err; - return; - } - off += request->par.open.page_size; - } - if(lseek(theFd, 0, SEEK_SET) != 0) - request->error = errno; - } - else if (flags & FsOpenReq::OM_DIRECT) - { -#ifdef O_DIRECT - if (flags & (FsOpenReq::OM_TRUNCATE | FsOpenReq::OM_CREATE)) - { - request->error = check_odirect_write(flags, new_flags, mode); - } - else - { - request->error = check_odirect_read(flags, new_flags, mode); - } - - if (request->error) - return; -#endif - } -#ifdef VM_TRACE - if (flags & FsOpenReq::OM_DIRECT) - { -#ifdef O_DIRECT - ndbout_c("%s %s O_DIRECT: %d", - theFileName.c_str(), rw, - !!(new_flags & O_DIRECT)); -#else - ndbout_c("%s %s O_DIRECT: 0", - theFileName.c_str(), rw); -#endif - } -#endif - if ((flags & FsOpenReq::OM_SYNC) && (flags & FsOpenReq::OM_INIT)) - { -#ifdef O_SYNC - /** - * reopen file with O_SYNC - */ - close(theFd); - new_flags &= ~(O_CREAT | O_TRUNC); - new_flags |= O_SYNC; - theFd = ::open(theFileName.c_str(), new_flags, mode); - if (theFd == -1) - { - request->error = errno; - } -#endif - } -#endif -} - -int -AsyncFile::readBuffer(Request* req, char * buf, size_t size, off_t offset){ - int return_value; - req->par.readWrite.pages[0].size = 0; -#ifdef NDB_WIN32 - DWORD dwSFP = SetFilePointer(hFile, offset, 0, FILE_BEGIN); - if(dwSFP != offset) { - return GetLastError(); - } -#elif ! defined(HAVE_PREAD) - off_t seek_val; - while((seek_val= lseek(theFd, offset, SEEK_SET)) == (off_t)-1 - && errno == EINTR); - if(seek_val == (off_t)-1) - { - return errno; - } -#endif - - while (size > 0) { - size_t bytes_read = 0; - -#ifdef NDB_WIN32 - DWORD dwBytesRead; - BOOL bRead = ReadFile(hFile, - buf, - size, - &dwBytesRead, - 0); - if(!bRead){ - return GetLastError(); - } - bytes_read = dwBytesRead; -#elif ! defined(HAVE_PREAD) - return_value = ::read(theFd, buf, size); -#else // UNIX - return_value = ::pread(theFd, buf, size, offset); -#endif -#ifndef NDB_WIN32 - if (return_value == -1 && errno == EINTR) { - DEBUG(ndbout_c("EINTR in read")); - continue; - } else if (return_value == -1){ - return errno; - } else { - bytes_read = return_value; - } -#endif - - req->par.readWrite.pages[0].size += bytes_read; - if(bytes_read == 0){ - if(req->action == Request::readPartial) - { - return 0; - } - DEBUG(ndbout_c("Read underflow %d %d\n %x\n%d %d", - size, offset, buf, bytes_read, return_value)); - return ERR_ReadUnderflow; - } - - if(bytes_read != size){ - DEBUG(ndbout_c("Warning partial read %d != %d", - bytes_read, size)); - } - - buf += bytes_read; - size -= bytes_read; - offset += bytes_read; - } - return 0; -} - -void -AsyncFile::readReq( Request * request) -{ - for(int i = 0; i < request->par.readWrite.numberOfPages ; i++) { - off_t offset = request->par.readWrite.pages[i].offset; - size_t size = request->par.readWrite.pages[i].size; - char * buf = request->par.readWrite.pages[i].buf; - - int err = readBuffer(request, buf, size, offset); - if(err != 0){ - request->error = err; - return; - } - } -} - -void -AsyncFile::readvReq( Request * request) -{ -#if ! defined(HAVE_PREAD) - readReq(request); - return; -#elif defined NDB_WIN32 - // ReadFileScatter? - readReq(request); - return; -#else - int return_value; - int length = 0; - struct iovec iov[20]; // the parameter in the signal restricts this to 20 deep - for(int i=0; i < request->par.readWrite.numberOfPages ; i++) { - iov[i].iov_base= request->par.readWrite.pages[i].buf; - iov[i].iov_len= request->par.readWrite.pages[i].size; - length = length + iov[i].iov_len; - } - lseek( theFd, request->par.readWrite.pages[0].offset, SEEK_SET ); - return_value = ::readv(theFd, iov, request->par.readWrite.numberOfPages); - if (return_value == -1) { - request->error = errno; - return; - } else if (return_value != length) { - request->error = 1011; - return; - } -#endif -} - -int -AsyncFile::extendfile(Request* request) { -#if ! defined(HAVE_PWRITE) - // Find max size of this file in this request - int maxOffset = 0; - int maxSize = 0; - for(int i=0; i < request->par.readWrite.numberOfPages ; i++) { - if (request->par.readWrite.pages[i].offset > maxOffset) { - maxOffset = request->par.readWrite.pages[i].offset; - maxSize = request->par.readWrite.pages[i].size; - } - } - DEBUG(ndbout_c("extendfile: maxOffset=%d, size=%d", maxOffset, maxSize)); - - // Allocate a buffer and fill it with zeros - void* pbuf = ndbd_malloc(maxSize); - memset(pbuf, 0, maxSize); - for (int p = 0; p <= maxOffset; p = p + maxSize) { - int return_value; - return_value = lseek(theFd, - p, - SEEK_SET); - if((return_value == -1 ) || (return_value != p)) { - ndbd_free(pbuf,maxSize); - return -1; - } - return_value = ::write(theFd, - pbuf, - maxSize); - if ((return_value == -1) || (return_value != maxSize)) { - ndbd_free(pbuf,maxSize); - return -1; - } - } - ndbd_free(pbuf,maxSize); - - DEBUG(ndbout_c("extendfile: \"%s\" OK!", theFileName.c_str())); - return 0; -#else - request = request; - DEBUG(ndbout_c("no pwrite")); - abort(); - return -1; -#endif -} - -void -AsyncFile::writeReq( Request * request) -{ - int page_num = 0; - bool write_not_complete = true; - - while(write_not_complete) { - int totsize = 0; - off_t offset = request->par.readWrite.pages[page_num].offset; - char* bufptr = theWriteBuffer; - - write_not_complete = false; - if (request->par.readWrite.numberOfPages > 1) { - off_t page_offset = offset; - - // Multiple page write, copy to buffer for one write - for(int i=page_num; i < request->par.readWrite.numberOfPages; i++) { - memcpy(bufptr, - request->par.readWrite.pages[i].buf, - request->par.readWrite.pages[i].size); - bufptr += request->par.readWrite.pages[i].size; - totsize += request->par.readWrite.pages[i].size; - if (((i + 1) < request->par.readWrite.numberOfPages)) { - // There are more pages to write - // Check that offsets are consequtive - off_t tmp = page_offset + request->par.readWrite.pages[i].size; - if (tmp != request->par.readWrite.pages[i+1].offset) { - // Next page is not aligned with previous, not allowed - DEBUG(ndbout_c("Page offsets are not aligned")); - request->error = EINVAL; - return; - } - if ((unsigned)(totsize + request->par.readWrite.pages[i+1].size) > (unsigned)theWriteBufferSize) { - // We are not finished and the buffer is full - write_not_complete = true; - // Start again with next page - page_num = i + 1; - break; - } - } - page_offset += request->par.readWrite.pages[i].size; - } - bufptr = theWriteBuffer; - } else { - // One page write, write page directly - bufptr = request->par.readWrite.pages[0].buf; - totsize = request->par.readWrite.pages[0].size; - } - int err = writeBuffer(bufptr, totsize, offset); - if(err != 0){ - request->error = err; - return; - } - } // while(write_not_complete) - - if(m_auto_sync_freq && m_write_wo_sync > m_auto_sync_freq){ - syncReq(request); - } -} - -int -AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset, - size_t chunk_size) -{ - size_t bytes_to_write = chunk_size; - int return_value; - - m_write_wo_sync += size; - -#ifdef NDB_WIN32 - DWORD dwSFP = SetFilePointer(hFile, offset, 0, FILE_BEGIN); - if(dwSFP != offset) { - return GetLastError(); - } -#elif ! defined(HAVE_PWRITE) - off_t seek_val; - while((seek_val= lseek(theFd, offset, SEEK_SET)) == (off_t)-1 - && errno == EINTR); - if(seek_val == (off_t)-1) - { - return errno; - } -#endif - - while (size > 0) { - if (size < bytes_to_write){ - // We are at the last chunk - bytes_to_write = size; - } - size_t bytes_written = 0; - -#ifdef NDB_WIN32 - DWORD dwWritten; - BOOL bWrite = WriteFile(hFile, buf, bytes_to_write, &dwWritten, 0); - if(!bWrite) { - return GetLastError(); - } - bytes_written = dwWritten; - if (bytes_written != bytes_to_write) { - DEBUG(ndbout_c("Warning partial write %d != %d", bytes_written, bytes_to_write)); - } - -#elif ! defined(HAVE_PWRITE) - return_value = ::write(theFd, buf, bytes_to_write); -#else // UNIX - return_value = ::pwrite(theFd, buf, bytes_to_write, offset); -#endif -#ifndef NDB_WIN32 - if (return_value == -1 && errno == EINTR) { - bytes_written = 0; - DEBUG(ndbout_c("EINTR in write")); - } else if (return_value == -1){ - return errno; - } else { - bytes_written = return_value; - - if(bytes_written == 0){ - DEBUG(ndbout_c("no bytes written")); - abort(); - } - - if(bytes_written != bytes_to_write){ - DEBUG(ndbout_c("Warning partial write %d != %d", - bytes_written, bytes_to_write)); - } - } -#endif - - buf += bytes_written; - size -= bytes_written; - offset += bytes_written; - } - return 0; -} - -void -AsyncFile::writevReq( Request * request) -{ - // WriteFileGather on WIN32? - writeReq(request); -} - - -void -AsyncFile::closeReq(Request * request) -{ - if (m_open_flags & ( - FsOpenReq::OM_WRITEONLY | - FsOpenReq::OM_READWRITE | - FsOpenReq::OM_APPEND )) { - syncReq(request); - } -#ifdef NDB_WIN32 - if(!CloseHandle(hFile)) { - request->error = GetLastError(); - } - hFile = INVALID_HANDLE_VALUE; -#else - if (-1 == ::close(theFd)) { -#ifndef DBUG_OFF - if (theFd == -1) { - DEBUG(ndbout_c("close on fd = -1")); - abort(); - } -#endif - request->error = errno; - } - theFd = -1; -#endif -} - -bool AsyncFile::isOpen(){ -#ifdef NDB_WIN32 - return (hFile != INVALID_HANDLE_VALUE); -#else - return (theFd != -1); -#endif -} - - -void -AsyncFile::syncReq(Request * request) -{ - if(m_auto_sync_freq && m_write_wo_sync == 0){ - return; - } -#ifdef NDB_WIN32 - if(!FlushFileBuffers(hFile)) { - request->error = GetLastError(); - return; - } -#else - if (-1 == ::fsync(theFd)){ - request->error = errno; - return; - } -#endif - m_write_wo_sync = 0; -} - -void -AsyncFile::appendReq(Request * request){ - - const char * buf = request->par.append.buf; - Uint32 size = request->par.append.size; - - m_write_wo_sync += size; - -#ifdef NDB_WIN32 - DWORD dwWritten = 0; - while(size > 0){ - if(!WriteFile(hFile, buf, size, &dwWritten, 0)){ - request->error = GetLastError(); - return ; - } - - buf += dwWritten; - size -= dwWritten; - } -#else - while(size > 0){ - const int n = write(theFd, buf, size); - if(n == -1 && errno == EINTR){ - continue; - } - if(n == -1){ - request->error = errno; - return; - } - if(n == 0){ - DEBUG(ndbout_c("append with n=0")); - abort(); - } - size -= n; - buf += n; - } -#endif - - if(m_auto_sync_freq && m_write_wo_sync > m_auto_sync_freq){ - syncReq(request); - } -} - -void -AsyncFile::removeReq(Request * request) -{ -#ifdef NDB_WIN32 - if(!DeleteFile(theFileName.c_str())) { - request->error = GetLastError(); - } -#else - if (-1 == ::remove(theFileName.c_str())) { - request->error = errno; - - } -#endif -} - -void -AsyncFile::rmrfReq(Request * request, char * path, bool removePath){ - Uint32 path_len = strlen(path); - Uint32 path_max_copy = PATH_MAX - path_len; - char* path_add = &path[path_len]; -#ifndef NDB_WIN32 - if(!request->par.rmrf.directory){ - // Remove file - if(unlink((const char *)path) != 0 && errno != ENOENT) - request->error = errno; - return; - } - // Remove directory - DIR* dirp = opendir((const char *)path); - if(dirp == 0){ - if(errno != ENOENT) - request->error = errno; - return; - } - struct dirent * dp; - while ((dp = readdir(dirp)) != NULL){ - if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0)) { - BaseString::snprintf(path_add, (size_t)path_max_copy, "%s%s", - DIR_SEPARATOR, dp->d_name); - if(remove((const char*)path) == 0){ - path[path_len] = 0; - continue; - } - - rmrfReq(request, path, true); - path[path_len] = 0; - if(request->error != 0){ - closedir(dirp); - return; - } - } - } - closedir(dirp); - if(removePath && rmdir((const char *)path) != 0){ - request->error = errno; - } - return; -#else - - if(!request->par.rmrf.directory){ - // Remove file - if(!DeleteFile(path)){ - DWORD dwError = GetLastError(); - if(dwError!=ERROR_FILE_NOT_FOUND) - request->error = dwError; - } - return; - } - - strcat(path, "\\*"); - WIN32_FIND_DATA ffd; - HANDLE hFindFile = FindFirstFile(path, &ffd); - path[path_len] = 0; - if(INVALID_HANDLE_VALUE==hFindFile){ - DWORD dwError = GetLastError(); - if(dwError!=ERROR_PATH_NOT_FOUND) - request->error = dwError; - return; - } - - do { - if(0!=strcmp(".", ffd.cFileName) && 0!=strcmp("..", ffd.cFileName)){ - strcat(path, "\\"); - strcat(path, ffd.cFileName); - if(DeleteFile(path)) { - path[path_len] = 0; - continue; - }//if - - rmrfReq(request, path, true); - path[path_len] = 0; - if(request->error != 0){ - FindClose(hFindFile); - return; - } - } - } while(FindNextFile(hFindFile, &ffd)); - - FindClose(hFindFile); - - if(removePath && !RemoveDirectory(path)) - request->error = GetLastError(); - -#endif -} - -void AsyncFile::endReq() -{ - // Thread is ended with return - if (theWriteBufferUnaligned) - ndbd_free(theWriteBufferUnaligned, theWriteBufferSize); -} - - -void AsyncFile::createDirectories() -{ - char* tmp; - const char * name = theFileName.c_str(); - const char * base = theFileName.get_base_name(); - while((tmp = (char *)strstr(base, DIR_SEPARATOR))) - { - char t = tmp[0]; - tmp[0] = 0; -#ifdef NDB_WIN32 - CreateDirectory(name, 0); -#else - mkdir(name, S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP); -#endif - tmp[0] = t; - base = tmp + sizeof(DIR_SEPARATOR); - } -} - -#ifdef DEBUG_ASYNCFILE -void printErrorAndFlags(Uint32 used_flags) { - char buf[255]; - sprintf(buf, "PEAF: errno=%d \"", errno); - - switch(errno) { - case EACCES: - strcat(buf, "EACCES"); - break; - case EDQUOT: - strcat(buf, "EDQUOT"); - break; - case EEXIST : - strcat(buf, "EEXIST"); - break; - case EINTR : - strcat(buf, "EINTR"); - break; - case EFAULT : - strcat(buf, "EFAULT"); - break; - case EIO : - strcat(buf, "EIO"); - break; - case EISDIR : - strcat(buf, "EISDIR"); - break; - case ELOOP : - strcat(buf, "ELOOP"); - break; - case EMFILE : - strcat(buf, "EMFILE"); - break; - case ENFILE : - strcat(buf, "ENFILE"); - break; - case ENOENT : - strcat(buf, "ENOENT "); - break; - case ENOSPC : - strcat(buf, "ENOSPC"); - break; - case ENOTDIR : - strcat(buf, "ENOTDIR"); - break; - case ENXIO : - strcat(buf, "ENXIO"); - break; - case EOPNOTSUPP: - strcat(buf, "EOPNOTSUPP"); - break; - case EMULTIHOP : - strcat(buf, "EMULTIHOP"); - break; - case ENOLINK : - strcat(buf, "ENOLINK"); - break; - case ENOSR : - strcat(buf, "ENOSR"); - break; - case EOVERFLOW : - strcat(buf, "EOVERFLOW"); - break; - case EROFS : - strcat(buf, "EROFS"); - break; - case EAGAIN : - strcat(buf, "EAGAIN"); - break; - case EINVAL : - strcat(buf, "EINVAL"); - break; - case ENOMEM : - strcat(buf, "ENOMEM"); - break; - case ETXTBSY : - strcat(buf, "ETXTBSY"); - break; - case ENAMETOOLONG: - strcat(buf, "ENAMETOOLONG"); - break; - case EBADF: - strcat(buf, "EBADF"); - break; - case ESPIPE: - strcat(buf, "ESPIPE"); - break; - case ESTALE: - strcat(buf, "ESTALE"); - break; - default: - strcat(buf, "EOTHER"); - break; - } - strcat(buf, "\" "); - strcat(buf, " flags: "); - switch(used_flags & 3){ - case O_RDONLY: - strcat(buf, "O_RDONLY, "); - break; - case O_WRONLY: - strcat(buf, "O_WRONLY, "); - break; - case O_RDWR: - strcat(buf, "O_RDWR, "); - break; - default: - strcat(buf, "Unknown!!, "); - } - - if((used_flags & O_APPEND)==O_APPEND) - strcat(buf, "O_APPEND, "); - if((used_flags & O_CREAT)==O_CREAT) - strcat(buf, "O_CREAT, "); - if((used_flags & O_EXCL)==O_EXCL) - strcat(buf, "O_EXCL, "); - if((used_flags & O_NOCTTY) == O_NOCTTY) - strcat(buf, "O_NOCTTY, "); - if((used_flags & O_NONBLOCK)==O_NONBLOCK) - strcat(buf, "O_NONBLOCK, "); - if((used_flags & O_TRUNC)==O_TRUNC) - strcat(buf, "O_TRUNC, "); - if((used_flags & O_DSYNC)==O_DSYNC) - strcat(buf, "O_DSYNC, "); - if((used_flags & O_NDELAY)==O_NDELAY) - strcat(buf, "O_NDELAY, "); - if((used_flags & O_RSYNC)==O_RSYNC) - strcat(buf, "O_RSYNC, "); -#ifdef O_SYNC - if((used_flags & O_SYNC)==O_SYNC) - strcat(buf, "O_SYNC, "); -#endif - DEBUG(ndbout_c(buf)); - -} -#endif - -NdbOut& -operator<<(NdbOut& out, const Request& req) -{ - out << "[ Request: file: " << hex << req.file - << " userRef: " << hex << req.theUserReference - << " userData: " << dec << req.theUserPointer - << " theFilePointer: " << req.theFilePointer - << " action: "; - switch(req.action){ - case Request::open: - out << "open"; - break; - case Request::close: - out << "close"; - break; - case Request::closeRemove: - out << "closeRemove"; - break; - case Request::read: // Allways leave readv directly after - out << "read"; - break; - case Request::readv: - out << "readv"; - break; - case Request::write:// Allways leave writev directly after - out << "write"; - break; - case Request::writev: - out << "writev"; - break; - case Request::writeSync:// Allways leave writevSync directly after - out << "writeSync"; - break; - // writeSync because SimblockAsyncFileSystem depends on it - case Request::writevSync: - out << "writevSync"; - break; - case Request::sync: - out << "sync"; - break; - case Request::end: - out << "end"; - break; - case Request::append: - out << "append"; - break; - case Request::rmrf: - out << "rmrf"; - break; - default: - out << (Uint32)req.action; - break; - } - out << " ]"; - return out; -} diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp deleted file mode 100644 index 71b6b34e2c0..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef AsyncFile_H -#define AsyncFile_H - -//=========================================================================== -// -// .DESCRIPTION -// Asynchronous file, All actions are executed concurrently with other -// activity of the process. -// Because all action are performed in a seperated thread the result of -// of a action is send back tru a memory channel. -// For the asyncronise notivication of a finished request all the calls -// have a request as paramater, the user can use the userData pointer -// to add information it needs when the request is send back. -// -// -// .TYPICAL USE: -// Writing or reading data to/from disk concurrently to other activities. -// -//=========================================================================== -//============================================================================= -// -// .PUBLIC -// -//============================================================================= -/////////////////////////////////////////////////////////////////////////////// -// -// AsyncFile( ); -// Description: -// Initialisation of the class. -// Parameters: -// - -/////////////////////////////////////////////////////////////////////////////// -// -// ~AsyncFile( ); -// Description: -// Tell the thread to stop and wait for it to return -// Parameters: -// - -/////////////////////////////////////////////////////////////////////////////// -// -// doStart( ); -// Description: -// Spawns the new thread. -// Parameters: -// Base path of filesystem -// -/////////////////////////////////////////////////////////////////////////////// -// -// void execute(Request *request); -// Description: -// performens the requered action. -// Parameters: -// request: request to be called when open is finished. -// action= open|close|read|write|sync -// if action is open then: -// par.open.flags= UNIX open flags, see man open -// par.open.name= name of the file to open -// if action is read or write then: -// par.readWrite.buf= user provided buffer to read/write -// the data from/to -// par.readWrite.size= how many bytes must be read/written -// par.readWrite.offset= absolute offset in file in bytes -// return: -// return values are stored in the request error field: -// error= return state of the action, UNIX error see man open/errno -// userData= is untouched can be used be user. -// -/////////////////////////////////////////////////////////////////////////////// -// -// void reportTo( MemoryChannel *reportTo ); -// Description: -// set the channel where the file must report the result of the -// actions back to. -// Parameters: -// reportTo: the memory channel to use use MemoryChannelMultipleWriter -// if more -// than one file uses this channel to report back. -// -/////////////////////////////////////////////////////////////////////////////// - -#include -#include "MemoryChannel.hpp" -#include "Filename.hpp" - -const int ERR_ReadUnderflow = 1000; - -const int WRITECHUNK = 262144; - -class AsyncFile; - -class Request -{ -public: - Request() {} - - enum Action { - open, - close, - closeRemove, - read, // Allways leave readv directly after - // read because SimblockAsyncFileSystem depends on it - readv, - write,// Allways leave writev directly after - // write because SimblockAsyncFileSystem depends on it - writev, - writeSync,// Allways leave writevSync directly after - // writeSync because SimblockAsyncFileSystem depends on it - writevSync, - sync, - end, - append, - append_synch, - rmrf, - readPartial - }; - Action action; - union { - struct { - Uint32 flags; - Uint32 page_size; - Uint64 file_size; - Uint32 auto_sync_size; - } open; - struct { - int numberOfPages; - struct{ - char *buf; - size_t size; - off_t offset; - } pages[16]; - } readWrite; - struct { - const char * buf; - size_t size; - } append; - struct { - bool directory; - bool own_directory; - } rmrf; - } par; - int error; - - void set(BlockReference userReference, - Uint32 userPointer, - Uint16 filePointer); - BlockReference theUserReference; - Uint32 theUserPointer; - Uint16 theFilePointer; - // Information for open, needed if the first open action fails. - AsyncFile* file; - Uint32 theTrace; -}; - -NdbOut& operator <<(NdbOut&, const Request&); - -inline -void -Request::set(BlockReference userReference, - Uint32 userPointer, Uint16 filePointer) -{ - theUserReference= userReference; - theUserPointer= userPointer; - theFilePointer= filePointer; -} - -class AsyncFile -{ - friend class Ndbfs; -public: - AsyncFile(SimulatedBlock& fs); - ~AsyncFile(); - - void reportTo( MemoryChannel *reportTo ); - - void execute( Request* request ); - - void doStart(); - // its a thread so its always running - void run(); - - bool isOpen(); - - Filename theFileName; - Request *m_current_request, *m_last_request; -private: - - void openReq(Request *request); - void readReq(Request *request); - void readvReq(Request *request); - void writeReq(Request *request); - void writevReq(Request *request); - - void closeReq(Request *request); - void syncReq(Request *request); - void removeReq(Request *request); - void appendReq(Request *request); - void rmrfReq(Request *request, char * path, bool removePath); - void endReq(); - - int readBuffer(Request*, char * buf, size_t size, off_t offset); - int writeBuffer(const char * buf, size_t size, off_t offset, - size_t chunk_size = WRITECHUNK); - - int extendfile(Request* request); - void createDirectories(); - -#ifdef NDB_WIN32 - HANDLE hFile; -#else - int theFd; -#endif - - Uint32 m_open_flags; // OM_ flags from request to open file - - MemoryChannel *theReportTo; - MemoryChannel* theMemoryChannelPtr; - - struct NdbThread* theThreadPtr; - NdbMutex* theStartMutexPtr; - NdbCondition* theStartConditionPtr; - bool theStartFlag; - int theWriteBufferSize; - char* theWriteBuffer; - void* theWriteBufferUnaligned; - - size_t m_write_wo_sync; // Writes wo/ sync - size_t m_auto_sync_freq; // Auto sync freq in bytes - - int check_odirect_read(Uint32 flags, int&new_flags, int mode); - int check_odirect_write(Uint32 flags, int&new_flags, int mode); -public: - SimulatedBlock& m_fs; - Ptr m_page_ptr; -}; - -#endif diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp deleted file mode 100644 index cb7896fb144..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFileTest/AsyncFileTest.cpp +++ /dev/null @@ -1,695 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//#define TESTDEBUG 1 - -#include - -#include -#include -#include "AsyncFile.hpp" -#include "NdbOut.hpp" -#include "NdbTick.h" -#include "NdbThread.h" -#include "NdbMain.h" - -// Test and benchmark functionality of AsyncFile -// -n Number of files -// -r Number of simultaneous requests -// -s Filesize, number of pages -// -l Number of iterations -// -remove, remove files after close -// -reverse, write files in reverse order, start with the last page - -#define MAXFILES 255 -#define DEFAULT_NUM_FILES 1 -#define MAXREQUESTS 256 -#define DEFAULT_NUM_REQUESTS 1 -#define MAXFILESIZE 4096 -#define DEFAULT_FILESIZE 2048 -#define FVERSION 0x01000000 -#define PAGESIZE 8192 - -#define TIMER_START { Uint64 starttick = NdbTick_CurrentMillisecond() -#define TIMER_PRINT(str, ops) Uint64 stoptick = NdbTick_CurrentMillisecond();\ - Uint64 totaltime = (stoptick-starttick); \ - ndbout << ops << " " << str << \ - " total time " << (int)totaltime << "ms" << endl;\ - char buf[255];\ - sprintf(buf, "%d %s/sec\n",(int)((ops*1000)/totaltime), str);\ - ndbout <* files; -AsyncFile* openFiles[MAXFILES]; -Pool* theRequestPool; -MemoryChannelMultipleWriter* theReportChannel; - -char WritePages[MAXFILES][PAGESIZE]; -char ReadPages[MAXFILES][PAGESIZE]; - -int readArguments(int argc, const char** argv); -int openFile(int fileNum); -int openFileWait(); -int closeFile(int fileNum); -int closeFileWait(); -int writeFile( int fileNum, int pagenum); -int writeFileWait(); -int writeSyncFile( int fileNum, int pagenum); -int writeSyncFileWait(); -int readFile( int fileNum, int pagenum); -int readFileWait(); - - -NDB_COMMAND(aftest, "aftest", "aftest [-n ] [-r ] [-s ] [-l ] [-remove, remove files after close] [-reverse, write files in reverse order, start with the last page]", "Test the AsyncFile class of Ndb", 8192) -{ - int s, numReq, numOps; - - readArguments(argc, argv); - - files = new Pool(numberOfFiles, 2); - theRequestPool = new Pool; - theReportChannel = new MemoryChannelMultipleWriter; - - ndbout << "AsyncFileTest starting" << endl; - ndbout << " " << numberOfFiles << " files" << endl; - ndbout << " " << numberOfRequests << " requests" << endl; - ndbout << " " << fileSize << " * 8k files" << endl << endl; - ndbout << " " << numberOfIterations << " iterations" << endl << endl; - - NdbThread_SetConcurrencyLevel(numberOfFiles+2); - - // initialize data to write to files - for (int i = 0; i < MAXFILES; i++) { - for (int j = 0; j < PAGESIZE; j++){ - WritePages[i][j] = (64+i+j)%256; - } - // memset(&WritePages[i][0], i+64, PAGESIZE); - } - - // Set file directory and name - // /T27/F27/NDBFS/S27Pnn.data - FileNameArray[0] = 27; // T27 - FileNameArray[1] = 27; // F27 - FileNameArray[2] = 27; // S27 - FileNameArray[3] = FVERSION; // Version - - for (int l = 0; l < numberOfIterations; l++) - { - - ndbout << "Opening files" << endl; - // Open files - for (int f = 0; f < numberOfFiles; f++) - { - openFile(f); - - } - - // Wait for answer - openFileWait(); - - ndbout << "Files opened!" << endl<< endl; - - // Write to files - ndbout << "Started writing" << endl; - TIMER_START; - s = 0; - numReq = 0; - numOps = 0; - while ( s < fileSize) - { - for (int r = 0; r < numberOfRequests; r++) - { - for (int f = 0; f < numberOfFiles; f++) - { - writeFile(f, s); - numReq++; - numOps++; - } - - s++; - } - - while (numReq > 0) - { - writeFileWait(); - numReq--; - } - - } - - TIMER_PRINT("writes", numOps); - - - ndbout << "Started reading" << endl; - TIMER_START; - - // Read from files - s = 0; - numReq = 0; - numOps = 0; - while ( s < fileSize) - { - for (int r = 0; r < numberOfRequests; r++) - { - for (int f = 0; f < numberOfFiles; f++) - { - readFile(f, s); - numReq++; - numOps++; - } - - s++; - - } - - while (numReq > 0) - { - readFileWait(); - numReq--; - } - - } - TIMER_PRINT("reads", numOps); - - ndbout << "Started writing with sync" << endl; - TIMER_START; - - // Write to files - s = 0; - numReq = 0; - numOps = 0; - while ( s < fileSize) - { - for (int r = 0; r < numberOfRequests; r++) - { - for (int f = 0; f < numberOfFiles; f++) - { - writeSyncFile(f, s); - numReq++; - numOps++; - } - - s++; - } - - while (numReq > 0) - { - writeSyncFileWait(); - numReq--; - } - - } - - TIMER_PRINT("writeSync", numOps); - - // Close files - ndbout << "Closing files" << endl; - for (int f = 0; f < numberOfFiles; f++) - { - closeFile(f); - - } - - // Wait for answer - closeFileWait(); - - ndbout << "Files closed!" << endl<< endl; - } - - // Deallocate memory - delete files; - delete theReportChannel; - delete theRequestPool; - - return 0; - -} - - - -int forward( AsyncFile * file, Request* request ) -{ - file->execute(request); - ERROR_CHECK 0; - return 1; -} - -int openFile( int fileNum) -{ - AsyncFile* file = (AsyncFile *)files->get(); - - FileNameArray[3] = fileNum | FVERSION; - file->fileName().set( NDBFS_REF, &FileNameArray[0] ); - ndbout << "openFile: " << file->fileName().c_str() << endl; - - if( ERROR_STATE ) { - ERROR_RESET; - files->put( file ); - ndbout << "Failed to set filename" << endl; - return 1; - } - file->reportTo(theReportChannel); - - Request* request = theRequestPool->get(); - request->action= Request::open; - request->error= 0; - request->par.open.flags = 0x302; //O_RDWR | O_CREAT | O_TRUNC ; // 770 - request->set(NDBFS_REF, 0x23456789, fileNum ); - request->file = file; - - if (!forward(file,request)) { - // Something went wrong - ndbout << "Could not forward open request" << endl; - theRequestPool->put(request); - return 1; - } - return 0; -} - -int closeFile( int fileNum) -{ - - AsyncFile* file = openFiles[fileNum]; - - Request* request = theRequestPool->get(); - if (removeFiles == 1) - request->action = Request::closeRemove; - else - request->action= Request::close; - - request->error= 0; - request->set(NDBFS_REF, 0x23456789, fileNum ); - request->file = file; - - if (!forward(file,request)) { - // Something went wrong - ndbout << "Could not forward close request" << endl; - theRequestPool->put(request); - return 1; - } - return 0; -} - -int writeFile( int fileNum, int pagenum) -{ - AsyncFile* file = openFiles[fileNum]; -#ifdef TESTDEBUG - ndbout << "writeFile" << fileNum <<": "<fileName().c_str()<< endl; -#endif - Request *request = theRequestPool->get(); - request->action = Request::write; - request->error = 0; - request->set(NDBFS_REF, pagenum, fileNum); - request->file = openFiles[fileNum]; - - // Write only one page, choose the correct page for each file using fileNum - request->par.readWrite.pages[0].buf = &WritePages[fileNum][0]; - request->par.readWrite.pages[0].size = PAGESIZE; - if (writeFilesReverse == 1) - { - // write the last page in the files first - // This is a normal way for the Blocks in Ndb to write to a file - request->par.readWrite.pages[0].offset = (fileSize - pagenum - 1) * PAGESIZE; - } - else - { - request->par.readWrite.pages[0].offset = pagenum * PAGESIZE; - } - request->par.readWrite.numberOfPages = 1; - - if (!forward(file,request)) { - // Something went wrong - ndbout << "Could not forward write request" << endl; - theRequestPool->put(request); - return 1; - } - return 0; - -} - -int writeSyncFile( int fileNum, int pagenum) -{ - AsyncFile* file = openFiles[fileNum]; -#ifdef TESTDEBUG - ndbout << "writeFile" << fileNum <<": "<fileName().c_str() << endl; -#endif - Request *request = theRequestPool->get(); - request->action = Request::writeSync; - request->error = 0; - request->set(NDBFS_REF, pagenum, fileNum); - request->file = openFiles[fileNum]; - - // Write only one page, choose the correct page for each file using fileNum - request->par.readWrite.pages[0].buf = &WritePages[fileNum][0]; - request->par.readWrite.pages[0].size = PAGESIZE; - request->par.readWrite.pages[0].offset = pagenum * PAGESIZE; - request->par.readWrite.numberOfPages = 1; - - if (!forward(file,request)) { - // Something went wrong - ndbout << "Could not forward write request" << endl; - theRequestPool->put(request); - return 1; - } - return 0; - -} - -int readFile( int fileNum, int pagenum) -{ - AsyncFile* file = openFiles[fileNum]; -#ifdef TESTDEBUG - ndbout << "readFile" << fileNum <<": "<fileName().c_str() << endl; -#endif - Request *request = theRequestPool->get(); - request->action = Request::read; - request->error = 0; - request->set(NDBFS_REF, pagenum, fileNum); - request->file = openFiles[fileNum]; - - // Read only one page, choose the correct page for each file using fileNum - request->par.readWrite.pages[0].buf = &ReadPages[fileNum][0]; - request->par.readWrite.pages[0].size = PAGESIZE; - request->par.readWrite.pages[0].offset = pagenum * PAGESIZE; - request->par.readWrite.numberOfPages = 1; - - if (!forward(file,request)) { - // Something went wrong - ndbout << "Could not forward read request" << endl; - theRequestPool->put(request); - return 1; - } - return 0; - -} - -int openFileWait() -{ - int openedFiles = 0; - while (openedFiles < numberOfFiles) - { - Request* request = theReportChannel->readChannel(); - if (request) - { - if (request->action == Request::open) - { - if (request->error ==0) - { -#ifdef TESTDEBUG - ndbout << "Opened file " << request->file->fileName().c_str() << endl; -#endif - openFiles[request->theFilePointer] = request->file; - } - else - { - ndbout << "error while opening file" << endl; - exit(1); - } - theRequestPool->put(request); - openedFiles++; - } - else - { - ndbout << "Unexpected request received" << endl; - } - } - else - { - ndbout << "Nothing read from theReportChannel" << endl; - } - } - return 0; -} - -int closeFileWait() -{ - int closedFiles = 0; - while (closedFiles < numberOfFiles) - { - Request* request = theReportChannel->readChannel(); - if (request) - { - if (request->action == Request::close || request->action == Request::closeRemove) - { - if (request->error ==0) - { -#ifdef TESTDEBUG - ndbout << "Closed file " << request->file->fileName().c_str() << endl; -#endif - openFiles[request->theFilePointer] = NULL; - files->put(request->file); - } - else - { - ndbout << "error while closing file" << endl; - exit(1); - } - theRequestPool->put(request); - closedFiles++; - } - else - { - ndbout << "Unexpected request received" << endl; - } - } - else - { - ndbout << "Nothing read from theReportChannel" << endl; - } - } - return 0; -} - -int writeFileWait() -{ - Request* request = theReportChannel->readChannel(); - if (request) - { - if (request->action == Request::write) - { - if (request->error == 0) - { -#ifdef TESTDEBUG - ndbout << "writeFileWait"<theFilePointer<<", " << request->theUserPointer<<" "<< request->file->fileName().c_str() << endl; -#endif - - } - else - { - ndbout << "error while writing file, error=" << request->error << endl; - exit(1); - } - theRequestPool->put(request); - } - else - { - ndbout << "Unexpected request received" << endl; - } - } - else - { - ndbout << "Nothing read from theReportChannel" << endl; - } - return 0; -} - -int writeSyncFileWait() -{ - Request* request = theReportChannel->readChannel(); - if (request) - { - if (request->action == Request::writeSync) - { - if (request->error == 0) - { -#ifdef TESTDEBUG - ndbout << "writeFileWait"<theFilePointer<<", " << request->theUserPointer<<" "<< request->file->fileName().c_str() << endl; -#endif - - } - else - { - ndbout << "error while writing file" << endl; - exit(1); - } - theRequestPool->put(request); - } - else - { - ndbout << "Unexpected request received" << endl; - } - } - else - { - ndbout << "Nothing read from theReportChannel" << endl; - } - return 0; -} - -int readFileWait() -{ - Request* request = theReportChannel->readChannel(); - if (request) - { - if (request->action == Request::read) - { - if (request->error == 0) - { -#ifdef TESTDEBUG - ndbout << "readFileWait"<theFilePointer<<", " << request->theUserPointer<<" "<< request->file->fileName().c_str() << endl; -#endif - if (memcmp(&(ReadPages[request->theFilePointer][0]), &(WritePages[request->theFilePointer][0]), PAGESIZE)!=0) - { - ndbout <<"Verification error!" << endl; - for (int i = 0; i < PAGESIZE; i++ ){ - ndbout <<" Compare Page " << i << " : " << ReadPages[request->theFilePointer][i] <<", " <theFilePointer][i] << endl;; - if( ReadPages[request->theFilePointer][i] !=WritePages[request->theFilePointer][i]) - - exit(1); - } - } - - } - else - { - ndbout << "error while reading file" << endl; - exit(1); - } - theRequestPool->put(request); - } - else - { - ndbout << "Unexpected request received" << endl; - } - } - else - { - ndbout << "Nothing read from theReportChannel" << endl; - } - return 0; -} - -int readArguments(int argc, const char** argv) -{ - - int i = 1; - while (argc > 1) - { - if (strcmp(argv[i], "-n") == 0) - { - numberOfFiles = atoi(argv[i+1]); - if ((numberOfFiles < 1) || (numberOfFiles > MAXFILES)) - { - ndbout << "Wrong number of files, default = "< MAXREQUESTS)) - { - ndbout << "Wrong number of requests, default = "< MAXFILESIZE)) - { - ndbout << "Wrong number of 8k pages, default = "<= theSize ){ - theIndex= 0; - } - return *this; -} - - -inline int full( const CircularIndex& write, const CircularIndex& read ) -{ - int readTmp= read.theIndex; - - if( read.theIndex < write.theIndex ) - readTmp += read.theSize; - - return ( readTmp - write.theIndex) == 1; -} - -inline int empty( const CircularIndex& write, const CircularIndex& read ) -{ - return read.theIndex == write.theIndex; -} - - -inline CircularIndex::CircularIndex( int start,int size ): - theSize(size), - theIndex(start) -{ -} -#endif diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp deleted file mode 100644 index 27200ebfbda..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.cpp +++ /dev/null @@ -1,192 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include - -#include "Filename.hpp" -#include "ErrorHandlingMacros.hpp" -#include "RefConvert.hpp" -#include "DebuggerNames.hpp" - -#include - -static const char* fileExtension[] = { - ".Data", - ".FragLog", - ".LocLog", - ".FragList", - ".TableList", - ".SchemaLog", - ".sysfile", - ".log", - ".ctl" -}; - -static const Uint32 noOfExtensions = sizeof(fileExtension)/sizeof(char*); - -Filename::Filename() -{ -} - -Filename::~Filename(){ -} - -void -Filename::set(Filename::NameSpec& spec, - BlockReference blockReference, - const Uint32 filenumber[4], bool dir) -{ - char buf[PATH_MAX]; - - const Uint32 type = FsOpenReq::getSuffix(filenumber); - const Uint32 version = FsOpenReq::getVersion(filenumber); - - size_t sz; - if (version == 2) - { - sz = BaseString::snprintf(theName, sizeof(theName), "%s", - spec.backup_path.c_str()); - m_base_name = theName + spec.backup_path.length(); - } - else - { - sz = BaseString::snprintf(theName, sizeof(theName), "%s", - spec.fs_path.c_str()); - m_base_name = theName + spec.fs_path.length(); - } - - switch(version){ - case 1 :{ - const Uint32 diskNo = FsOpenReq::v1_getDisk(filenumber); - const Uint32 table = FsOpenReq::v1_getTable(filenumber); - const Uint32 frag = FsOpenReq::v1_getFragment(filenumber); - const Uint32 S_val = FsOpenReq::v1_getS(filenumber); - const Uint32 P_val = FsOpenReq::v1_getP(filenumber); - - if (diskNo < 0xff){ - BaseString::snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR); - strcat(theName, buf); - } - - { - const char* blockName = getBlockName( refToBlock(blockReference) ); - if (blockName == NULL){ - ERROR_SET(ecError, NDBD_EXIT_AFS_PARAMETER,"","No Block Name"); - return; - } - BaseString::snprintf(buf, sizeof(buf), "%s%s", blockName, DIR_SEPARATOR); - strcat(theName, buf); - } - - if (table < 0xffffffff){ - BaseString::snprintf(buf, sizeof(buf), "T%d%s", table, DIR_SEPARATOR); - strcat(theName, buf); - } - - if (frag < 0xffffffff){ - BaseString::snprintf(buf, sizeof(buf), "F%d%s", frag, DIR_SEPARATOR); - strcat(theName, buf); - } - - - if (S_val < 0xffffffff){ - BaseString::snprintf(buf, sizeof(buf), "S%d", S_val); - strcat(theName, buf); - } - - if (P_val < 0xff){ - BaseString::snprintf(buf, sizeof(buf), "P%d", P_val); - strcat(theName, buf); - } - - } - break; - case 2:{ - const Uint32 seq = FsOpenReq::v2_getSequence(filenumber); - const Uint32 nodeId = FsOpenReq::v2_getNodeId(filenumber); - const Uint32 count = FsOpenReq::v2_getCount(filenumber); - - BaseString::snprintf(buf, sizeof(buf), "BACKUP%sBACKUP-%d%s", - DIR_SEPARATOR, seq, DIR_SEPARATOR); - strcat(theName, buf); - if(count == 0xffffffff) { - BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d.%d", - seq, nodeId); strcat(theName, buf); - } else { - BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d-%d.%d", - seq, count, nodeId); strcat(theName, buf); - } - break; - } - break; - case 3:{ - const Uint32 diskNo = FsOpenReq::v1_getDisk(filenumber); - - if(diskNo == 0xFF){ - ERROR_SET(ecError, NDBD_EXIT_AFS_PARAMETER,"","Invalid disk specification"); - } - - BaseString::snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR); - strcat(theName, buf); - } - break; - case 5: - { - Uint32 tableId = FsOpenReq::v5_getTableId(filenumber); - Uint32 lcpNo = FsOpenReq::v5_getLcpNo(filenumber); - Uint32 fragId = FsOpenReq::v5_getFragmentId(filenumber); - BaseString::snprintf(buf, sizeof(buf), "LCP/%d/T%dF%d", lcpNo, tableId, fragId); - strcat(theName, buf); - break; - } - default: - ERROR_SET(ecError, NDBD_EXIT_AFS_PARAMETER,"","Wrong version"); - } - if (type >= noOfExtensions){ - ERROR_SET(ecError, NDBD_EXIT_AFS_PARAMETER,"","File Type doesn't exist"); - return; - } - strcat(theName, fileExtension[type]); - - if(dir == true){ - for(int l = strlen(theName) - 1; l >= 0; l--){ - if(theName[l] == DIR_SEPARATOR[0]){ - theName[l] = 0; - break; - } - } - } -} - -void -Filename::set(Filename::NameSpec& spec, - SegmentedSectionPtr ptr, class SectionSegmentPool& pool) -{ - char buf[PATH_MAX]; - copy((Uint32*)&buf[0], ptr); - if(buf[0] == DIR_SEPARATOR[0]) - { - strncpy(theName, buf, PATH_MAX); - m_base_name = theName; - } - else - { - snprintf(theName, sizeof(theName), "%s%s", spec.fs_path.c_str(), buf); - m_base_name = theName + spec.fs_path.length(); - } -} diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp deleted file mode 100644 index 37c79c3970f..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/Filename.hpp +++ /dev/null @@ -1,98 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef Filename_H -#define Filename_H - -//=========================================================================== -// -// .DESCRIPTION -// Takes a 128 bits value (done as a array of four longs) and -// makes a filename out of it acording the following schema -// Bits 0-31 T -// Bits 32-63 F -// Bits 64-95 S -// Bits 96-103 P -// Bits 104-111 D -// Bits 112-119 File Type -// Bits 120-127 Version number of Filename -// -// T, is used to find/create a directory. If T = 0xFFFF then the -// file is on top level. In that case the F is of no relevance. -// F, same as T. -// S, is used to find/create a filename. If S= 0xFFFF then it is ignored. -// P, same as S -// D, is used to find/create the root directory, this is the -// directory before the blockname. If D= 0xFF then it is ignored. -// File Type -// 0 => .Data -// 1 => .FragLog -// 2 => .LocLog -// 3 => .FragList -// 4 => .TableList -// 5 => .SchemaLog -// 6 => .sysfile -// 15=> ignored -// Version number of Filename, current version is 0x1, must be -// used for the this style of options. -// -// -//=========================================================================== - -#include -#include -#include - -class Filename -{ -public: - // filenumber is 64 bits but is split in to 4 32bits words - Filename(); - ~Filename(); - - struct NameSpec { - NameSpec(BaseString& f, BaseString&b) : - fs_path(f), backup_path(b) {} - BaseString& fs_path; - BaseString& backup_path; - }; - - void set(NameSpec& spec, - BlockReference, const Uint32 fileno[4], bool = false); - void set(NameSpec& spec, - SegmentedSectionPtr ptr, class SectionSegmentPool&); - - const char* c_str() const; // Complete name including dirname - const char* get_base_name() const; // Exclude fs (or backup) path -private: - char theName[PATH_MAX]; - char * m_base_name; -}; - -// inline methods -inline const char* Filename::c_str() const { - return theName; -} - -inline const char* Filename::get_base_name() const { - return m_base_name; -} - -#endif - - - - diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp deleted file mode 100644 index 4bd5a06debe..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.cpp +++ /dev/null @@ -1,18 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -//#include "MemoryChannel.hpp" - diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp deleted file mode 100644 index 279b69703ec..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp +++ /dev/null @@ -1,179 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef MemoryChannel_H -#define MemoryChannel_H - -//=========================================================================== -// -// .DESCRIPTION -// Pointer based communication channel for communication between two -// thread. It does not copy any data in or out the channel so the -// item that is put in can not be used untill the other thread has -// given it back. There is no support for detecting the return of a -// item. The channel is half-duplex. -// For comminication between 1 writer and 1 reader use the MemoryChannel -// class, for comminication between multiple writer and 1 reader use the -// MemoryChannelMultipleWriter. There is no support for multiple readers. -// -// .TYPICAL USE: -// to communicate between threads. -// -// .EXAMPLE: -// See AsyncFile.C -//=========================================================================== -// -// -// MemoryChannel( int size= 256); -// Constuctor -// Parameters: -// size : amount of pointer it can hold -// -// void operator ++ (); -// increments the index with one, if size is reached it is set to zero -// -// virtual void write( T *t); -// Puts the item in the channel if the channel is full an error is reported. -// Parameters: -// t: pointer to item to put in the channel, after this the item -// is shared with the other thread. -// errors -// AFS_ERROR_CHANNALFULL, channel is full -// -// T* read(); -// Reads a itemn from the channel, if channel is empty it blocks untill -// an item can be read. -// return -// T : item from the channel -// -// T* tryRead(); -// Reads a item from the channel, if channel is empty it returns zero. -// return -// T : item from the channel or zero if channel is empty. -// - -#include "ErrorHandlingMacros.hpp" -#include "CircularIndex.hpp" -#include "NdbMutex.h" -#include "NdbCondition.h" -#include - - -template -class MemoryChannel -{ -public: - MemoryChannel( int size= 512); - virtual ~MemoryChannel( ); - - void writeChannel( T *t); - void writeChannelNoSignal( T *t); - T* readChannel(); - T* tryReadChannel(); - -private: - int theSize; - T **theChannel; - CircularIndex theWriteIndex; - CircularIndex theReadIndex; - NdbMutex* theMutexPtr; - NdbCondition* theConditionPtr; - - template - friend NdbOut& operator<<(NdbOut& out, const MemoryChannel & chn); -}; - -template -NdbOut& operator<<(NdbOut& out, const MemoryChannel & chn) -{ - NdbMutex_Lock(chn.theMutexPtr); - out << "[ theSize: " << chn.theSize - << " theReadIndex: " << (int)chn.theReadIndex - << " theWriteIndex: " << (int)chn.theWriteIndex << " ]"; - NdbMutex_Unlock(chn.theMutexPtr); - return out; -} - -template MemoryChannel::MemoryChannel( int size): - theSize(size), - theChannel(new T*[size] ), - theWriteIndex(0, size), - theReadIndex(0, size) -{ - theMutexPtr = NdbMutex_Create(); - theConditionPtr = NdbCondition_Create(); -} - -template MemoryChannel::~MemoryChannel( ) -{ - NdbMutex_Destroy(theMutexPtr); - NdbCondition_Destroy(theConditionPtr); - delete [] theChannel; -} - -template void MemoryChannel::writeChannel( T *t) -{ - - NdbMutex_Lock(theMutexPtr); - if(full(theWriteIndex, theReadIndex) || theChannel == NULL) abort(); - theChannel[theWriteIndex]= t; - ++theWriteIndex; - NdbMutex_Unlock(theMutexPtr); - NdbCondition_Signal(theConditionPtr); -} - -template void MemoryChannel::writeChannelNoSignal( T *t) -{ - - NdbMutex_Lock(theMutexPtr); - if(full(theWriteIndex, theReadIndex) || theChannel == NULL) abort(); - theChannel[theWriteIndex]= t; - ++theWriteIndex; - NdbMutex_Unlock(theMutexPtr); -} - -template T* MemoryChannel::readChannel() -{ - T* tmp; - - NdbMutex_Lock(theMutexPtr); - while ( empty(theWriteIndex, theReadIndex) ) - { - NdbCondition_Wait(theConditionPtr, - theMutexPtr); - } - - tmp= theChannel[theReadIndex]; - ++theReadIndex; - NdbMutex_Unlock(theMutexPtr); - return tmp; -} - -template T* MemoryChannel::tryReadChannel() -{ - T* tmp= 0; - NdbMutex_Lock(theMutexPtr); - if ( !empty(theWriteIndex, theReadIndex) ) - { - tmp= theChannel[theReadIndex]; - ++theReadIndex; - } - NdbMutex_Unlock(theMutexPtr); - return tmp; -} - -#endif // MemoryChannel_H - diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile deleted file mode 100644 index 68f71bfc4cd..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include .defs.mk - -TYPE := kernel - -BIN_TARGET := mctest -BIN_TARGET_ARCHIVES := portlib - -SOURCES = MemoryChannelTest.cpp - -CFLAGS_MemoryChannelTest.cpp = -I../ - -include $(NDB_TOP)/Epilogue.mk - diff --git a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp b/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp deleted file mode 100644 index 26707969975..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp +++ /dev/null @@ -1,193 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include "MemoryChannel.hpp" -#include "NdbThread.h" -#include "NdbSleep.h" -#include "NdbOut.hpp" -#include "NdbMain.h" - - - -MemoryChannel* theMemoryChannel; - - -extern "C" void* runProducer(void*arg) -{ - // The producer will items into the MemoryChannel - int count = *(int*)arg; - int* p; - int i = 0; - while (i <= count) - { - p = new int(i); - ndbout << "P: " << *p << endl; - theMemoryChannel->writeChannel(p); - if (i%5==0) - NdbSleep_MilliSleep(i); - i++; - } - return NULL; -} - -extern "C" void* runConsumer(void* arg) -{ - // The producer will read items from MemoryChannel and print on screen - int count = *(int*)arg; - int* p; - int i = 0; - while (i < count) - { - p = theMemoryChannel->readChannel(); - ndbout << "C: " << *p << endl; - i = *p; - delete p; - - } - return NULL; -} - - - -class ArgStruct -{ -public: - ArgStruct(int _items, int _no){ - items=_items; - no=_no; - }; - int items; - int no; -}; - -MemoryChannelMultipleWriter* theMemoryChannel2; - -extern "C" void* runProducer2(void*arg) -{ - // The producer will items into the MemoryChannel - ArgStruct* pArg = (ArgStruct*)arg; - int count = pArg->items; - ArgStruct* p; - int i = 0; - while (i < count) - { - p = new ArgStruct(i, pArg->no); - ndbout << "P"<no<<": " << i << endl; - theMemoryChannel2->writeChannel(p); - NdbSleep_MilliSleep(i); - i++; - } - return NULL; -} - -extern "C" void* runConsumer2(void* arg) -{ - // The producer will read items from MemoryChannel and print on screen - ArgStruct* pArg = (ArgStruct*)arg; - int count = pArg->items * pArg->no; - ArgStruct* p; - int i = 0; - while (i < count) - { - p = theMemoryChannel2->readChannel(); - ndbout << "C: "<< p->no << ", " << p->items << endl; - i++; - delete p; - } - ndbout << "Consumer2: " << count << " received" << endl; - return NULL; -} - - - - -//#if defined MEMORYCHANNELTEST - -//int main(int argc, char **argv) -NDB_COMMAND(mctest, "mctest", "mctest", "Test the memory channel used in Ndb", 32768) -{ - - ndbout << "==== testing MemoryChannel ====" << endl; - - theMemoryChannel = new MemoryChannel; - theMemoryChannel2 = new MemoryChannelMultipleWriter; - - NdbThread* consumerThread; - NdbThread* producerThread; - - NdbThread_SetConcurrencyLevel(2); - - int numItems = 100; - producerThread = NdbThread_Create(runProducer, - (void**)&numItems, - 4096, - (char*)"producer"); - - consumerThread = NdbThread_Create(runConsumer, - (void**)&numItems, - 4096, - (char*)"consumer"); - - - void *status; - NdbThread_WaitFor(consumerThread, &status); - NdbThread_WaitFor(producerThread, &status); - - ndbout << "==== testing MemoryChannelMultipleWriter ====" << endl; -#define NUM_THREADS2 5 - NdbThread_SetConcurrencyLevel(NUM_THREADS2+2); - NdbThread* producerThreads[NUM_THREADS2]; - - ArgStruct *pArg; - for (int j = 0; j < NUM_THREADS2; j++) - { - char buf[25]; - sprintf((char*)&buf, "producer%d", j); - pArg = new ArgStruct(numItems, j); - producerThreads[j] = NdbThread_Create(runProducer2, - (void**)pArg, - 4096, - (char*)&buf); - } - - pArg = new ArgStruct(numItems, NUM_THREADS2); - consumerThread = NdbThread_Create(runConsumer2, - (void**)pArg, - 4096, - (char*)"consumer"); - - - NdbThread_WaitFor(consumerThread, &status); - for (int j = 0; j < NUM_THREADS2; j++) - { - NdbThread_WaitFor(producerThreads[j], &status); - } - - - return 0; - -} - -void ErrorReporter::handleError(ErrorCategory type, int messageID, - const char* problemData, const char* objRef, - NdbShutdownType nst) -{ - - ndbout << "ErrorReporter::handleError activated" << endl; - exit(1); -} - -//#endif diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp deleted file mode 100644 index 779d44d4176..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp +++ /dev/null @@ -1,1109 +0,0 @@ -/* Copyright (c) 2003-2007 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#include - -#include "Ndbfs.hpp" -#include "AsyncFile.hpp" -#include "Filename.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define DEBUG(x) { ndbout << "FS::" << x << endl; } - -inline -int pageSize( const NewVARIABLE* baseAddrRef ) -{ - int log_psize; - int log_qsize = baseAddrRef->bits.q; - int log_vsize = baseAddrRef->bits.v; - if (log_vsize < 3) - log_vsize = 3; - log_psize = log_qsize + log_vsize - 3; - return (1 << log_psize); -} - - -Ndbfs::Ndbfs(Block_context& ctx) : - SimulatedBlock(NDBFS, ctx), - scanningInProgress(false), - theLastId(0), - theRequestPool(0), - m_maxOpenedFiles(0) -{ - BLOCK_CONSTRUCTOR(Ndbfs); - - // Set received signals - addRecSignal(GSN_READ_CONFIG_REQ, &Ndbfs::execREAD_CONFIG_REQ); - addRecSignal(GSN_DUMP_STATE_ORD, &Ndbfs::execDUMP_STATE_ORD); - addRecSignal(GSN_STTOR, &Ndbfs::execSTTOR); - addRecSignal(GSN_FSOPENREQ, &Ndbfs::execFSOPENREQ); - addRecSignal(GSN_FSCLOSEREQ, &Ndbfs::execFSCLOSEREQ); - addRecSignal(GSN_FSWRITEREQ, &Ndbfs::execFSWRITEREQ); - addRecSignal(GSN_FSREADREQ, &Ndbfs::execFSREADREQ); - addRecSignal(GSN_FSSYNCREQ, &Ndbfs::execFSSYNCREQ); - addRecSignal(GSN_CONTINUEB, &Ndbfs::execCONTINUEB); - addRecSignal(GSN_FSAPPENDREQ, &Ndbfs::execFSAPPENDREQ); - addRecSignal(GSN_FSREMOVEREQ, &Ndbfs::execFSREMOVEREQ); - // Set send signals -} - -Ndbfs::~Ndbfs() -{ - // Delete all files - // AsyncFile destuctor will take care of deleting - // the thread it has created - for (unsigned i = 0; i < theFiles.size(); i++){ - AsyncFile* file = theFiles[i]; - delete file; - theFiles[i] = NULL; - }//for - theFiles.clear(); - if (theRequestPool) - delete theRequestPool; -} - -void -Ndbfs::execREAD_CONFIG_REQ(Signal* signal) -{ - const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); - - Uint32 ref = req->senderRef; - Uint32 senderData = req->senderData; - - const ndb_mgm_configuration_iterator * p = - m_ctx.m_config.getOwnConfigIterator(); - ndbrequire(p != 0); - theFileSystemPath.assfmt("%sndb_%u_fs%s", m_ctx.m_config.fileSystemPath(), - getOwnNodeId(), DIR_SEPARATOR); - theBackupFilePath.assign(m_ctx.m_config.backupFilePath()); - - theRequestPool = new Pool; - - m_maxFiles = 0; - ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles); - Uint32 noIdleFiles = 27; - ndb_mgm_get_int_parameter(p, CFG_DB_INITIAL_OPEN_FILES, &noIdleFiles); - if (noIdleFiles > m_maxFiles && m_maxFiles != 0) - m_maxFiles = noIdleFiles; - // Create idle AsyncFiles - for (Uint32 i = 0; i < noIdleFiles; i++){ - theIdleFiles.push_back(createAsyncFile()); - } - - ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = senderData; - sendSignal(ref, GSN_READ_CONFIG_CONF, signal, - ReadConfigConf::SignalLength, JBB); -} - -/* Received a restart signal. - * Answer it like any other block - * PR0 : StartCase - * DR0 : StartPhase - * DR1 : ? - * DR2 : ? - * DR3 : ? - * DR4 : ? - * DR5 : SignalKey - */ -void -Ndbfs::execSTTOR(Signal* signal) -{ - jamEntry(); - - if(signal->theData[1] == 0){ // StartPhase 0 - jam(); - - { -#ifdef NDB_WIN32 - CreateDirectory(theFileSystemPath.c_str(), 0); -#else - mkdir(theFileSystemPath.c_str(), - S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP); -#endif - } - - cownref = NDBFS_REF; - // close all open files - ndbrequire(theOpenFiles.size() == 0); - - scanningInProgress = false; - - signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY; - sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1); - - signal->theData[3] = 255; - sendSignal(NDBCNTR_REF, GSN_STTORRY, signal,4, JBB); - return; - } - ndbrequire(0); -} - -int -Ndbfs::forward( AsyncFile * file, Request* request) -{ - jam(); - file->execute(request); - return 1; -} - -void -Ndbfs::execFSOPENREQ(Signal* signal) -{ - jamEntry(); - const FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0]; - const BlockReference userRef = fsOpenReq->userReference; - AsyncFile* file = getIdleFile(); - ndbrequire(file != NULL); - Filename::NameSpec spec(theFileSystemPath, theBackupFilePath); - - Uint32 userPointer = fsOpenReq->userPointer; - - if(fsOpenReq->fileFlags & FsOpenReq::OM_INIT) - { - Ptr page_ptr; - if(m_global_page_pool.seize(page_ptr) == false) - { - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = userPointer; - fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrOutOfMemory); - fsRef->osErrorCode = ~0; // Indicate local error - sendSignal(userRef, GSN_FSOPENREF, signal, 3, JBB); - return; - } - file->m_page_ptr = page_ptr; - } - else - { - ndbassert(file->m_page_ptr.isNull()); - file->m_page_ptr.setNull(); - } - - if(signal->getNoOfSections() == 0){ - jam(); - file->theFileName.set(spec, userRef, fsOpenReq->fileNumber); - } else { - jam(); - SegmentedSectionPtr ptr; - signal->getSection(ptr, FsOpenReq::FILENAME); - file->theFileName.set(spec, ptr, g_sectionSegmentPool); - releaseSections(signal); - } - file->reportTo(&theFromThreads); - if (getenv("NDB_TRACE_OPEN")) - ndbout_c("open(%s)", file->theFileName.c_str()); - - Request* request = theRequestPool->get(); - request->action = Request::open; - request->error = 0; - request->set(userRef, userPointer, newId() ); - request->file = file; - request->theTrace = signal->getTrace(); - request->par.open.flags = fsOpenReq->fileFlags; - request->par.open.page_size = fsOpenReq->page_size; - request->par.open.file_size = fsOpenReq->file_size_hi; - request->par.open.file_size <<= 32; - request->par.open.file_size |= fsOpenReq->file_size_lo; - request->par.open.auto_sync_size = fsOpenReq->auto_sync_size; - - ndbrequire(forward(file, request)); -} - -void -Ndbfs::execFSREMOVEREQ(Signal* signal) -{ - jamEntry(); - const FsRemoveReq * const req = (FsRemoveReq *)signal->getDataPtr(); - const BlockReference userRef = req->userReference; - AsyncFile* file = getIdleFile(); - ndbrequire(file != NULL); - - Filename::NameSpec spec(theFileSystemPath, theBackupFilePath); - file->theFileName.set(spec, userRef, req->fileNumber, req->directory); - file->reportTo(&theFromThreads); - - Request* request = theRequestPool->get(); - request->action = Request::rmrf; - request->par.rmrf.directory = req->directory; - request->par.rmrf.own_directory = req->ownDirectory; - request->error = 0; - request->set(userRef, req->userPointer, newId() ); - request->file = file; - request->theTrace = signal->getTrace(); - - ndbrequire(forward(file, request)); -} - -/* - * PR0: File Pointer DR0: User reference DR1: User Pointer DR2: Flag bit 0= 1 - * remove file - */ -void -Ndbfs::execFSCLOSEREQ(Signal * signal) -{ - jamEntry(); - const FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0]; - const BlockReference userRef = fsCloseReq->userReference; - const Uint16 filePointer = (Uint16)fsCloseReq->filePointer; - const UintR userPointer = fsCloseReq->userPointer; - - AsyncFile* openFile = theOpenFiles.find(filePointer); - if (openFile == NULL) { - // The file was not open, send error back to sender - jam(); - // Initialise FsRef signal - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = userPointer; - fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist); - fsRef->osErrorCode = ~0; // Indicate local error - sendSignal(userRef, GSN_FSCLOSEREF, signal, 3, JBB); - return; - } - - Request *request = theRequestPool->get(); - if( fsCloseReq->getRemoveFileFlag(fsCloseReq->fileFlag) == true ) { - jam(); - request->action = Request::closeRemove; - } else { - jam(); - request->action = Request::close; - } - request->set(userRef, fsCloseReq->userPointer, filePointer); - request->file = openFile; - request->error = 0; - request->theTrace = signal->getTrace(); - - ndbrequire(forward(openFile, request)); -} - -void -Ndbfs::readWriteRequest(int action, Signal * signal) -{ - const FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0]; - Uint16 filePointer = (Uint16)fsRWReq->filePointer; - const UintR userPointer = fsRWReq->userPointer; - const BlockReference userRef = fsRWReq->userReference; - const BlockNumber blockNumber = refToBlock(userRef); - - AsyncFile* openFile = theOpenFiles.find(filePointer); - - const NewVARIABLE *myBaseAddrRef = &getBat(blockNumber)[fsRWReq->varIndex]; - UintPtr tPageSize; - UintPtr tClusterSize; - UintPtr tNRR; - UintPtr tPageOffset; - char* tWA; - FsRef::NdbfsErrorCodeType errorCode; - - Request *request = theRequestPool->get(); - request->error = 0; - request->set(userRef, userPointer, filePointer); - request->file = openFile; - request->action = (Request::Action) action; - request->theTrace = signal->getTrace(); - - Uint32 format = fsRWReq->getFormatFlag(fsRWReq->operationFlag); - - if (fsRWReq->numberOfPages == 0) { //Zero pages not allowed - jam(); - errorCode = FsRef::fsErrInvalidParameters; - goto error; - } - - if(format != FsReadWriteReq::fsFormatGlobalPage && - format != FsReadWriteReq::fsFormatSharedPage) - { - if (fsRWReq->varIndex >= getBatSize(blockNumber)) { - jam();// Ensure that a valid variable is used - errorCode = FsRef::fsErrInvalidParameters; - goto error; - } - if (myBaseAddrRef == NULL) { - jam(); // Ensure that a valid variable is used - errorCode = FsRef::fsErrInvalidParameters; - goto error; - } - if (openFile == NULL) { - jam(); //file not open - errorCode = FsRef::fsErrFileDoesNotExist; - goto error; - } - tPageSize = pageSize(myBaseAddrRef); - tClusterSize = myBaseAddrRef->ClusterSize; - tNRR = myBaseAddrRef->nrr; - tWA = (char*)myBaseAddrRef->WA; - - switch (format) { - - // List of memory and file pages pairs - case FsReadWriteReq::fsFormatListOfPairs: { - jam(); - for (unsigned int i = 0; i < fsRWReq->numberOfPages; i++) { - jam(); - const UintPtr varIndex = fsRWReq->data.listOfPair[i].varIndex; - const UintPtr fileOffset = fsRWReq->data.listOfPair[i].fileOffset; - if (varIndex >= tNRR) { - jam(); - errorCode = FsRef::fsErrInvalidParameters; - goto error; - }//if - request->par.readWrite.pages[i].buf = &tWA[varIndex * tClusterSize]; - request->par.readWrite.pages[i].size = tPageSize; - request->par.readWrite.pages[i].offset = fileOffset * tPageSize; - }//for - request->par.readWrite.numberOfPages = fsRWReq->numberOfPages; - break; - }//case - - // Range of memory page with one file page - case FsReadWriteReq::fsFormatArrayOfPages: { - if ((fsRWReq->numberOfPages + fsRWReq->data.arrayOfPages.varIndex) > tNRR) { - jam(); - errorCode = FsRef::fsErrInvalidParameters; - goto error; - }//if - const UintPtr varIndex = fsRWReq->data.arrayOfPages.varIndex; - const UintPtr fileOffset = fsRWReq->data.arrayOfPages.fileOffset; - - request->par.readWrite.pages[0].offset = fileOffset * tPageSize; - request->par.readWrite.pages[0].size = tPageSize * fsRWReq->numberOfPages; - request->par.readWrite.numberOfPages = 1; - request->par.readWrite.pages[0].buf = &tWA[varIndex * tPageSize]; - break; - }//case - - // List of memory pages followed by one file page - case FsReadWriteReq::fsFormatListOfMemPages: { - - tPageOffset = fsRWReq->data.listOfMemPages.varIndex[fsRWReq->numberOfPages]; - tPageOffset *= tPageSize; - - for (unsigned int i = 0; i < fsRWReq->numberOfPages; i++) { - jam(); - UintPtr varIndex = fsRWReq->data.listOfMemPages.varIndex[i]; - - if (varIndex >= tNRR) { - jam(); - errorCode = FsRef::fsErrInvalidParameters; - goto error; - }//if - request->par.readWrite.pages[i].buf = &tWA[varIndex * tClusterSize]; - request->par.readWrite.pages[i].size = tPageSize; - request->par.readWrite.pages[i].offset = tPageOffset + (i*tPageSize); - }//for - request->par.readWrite.numberOfPages = fsRWReq->numberOfPages; - break; - // make it a writev or readv - }//case - - default: { - jam(); - errorCode = FsRef::fsErrInvalidParameters; - goto error; - }//default - }//switch - } - else if (format == FsReadWriteReq::fsFormatGlobalPage) - { - Ptr ptr; - m_global_page_pool.getPtr(ptr, fsRWReq->data.pageData[0]); - request->par.readWrite.pages[0].buf = (char*)ptr.p; - request->par.readWrite.pages[0].size = ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->numberOfPages; - request->par.readWrite.pages[0].offset= ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->varIndex; - request->par.readWrite.numberOfPages = 1; - } - else - { - ndbrequire(format == FsReadWriteReq::fsFormatSharedPage); - Ptr ptr; - m_shared_page_pool.getPtr(ptr, fsRWReq->data.pageData[0]); - request->par.readWrite.pages[0].buf = (char*)ptr.p; - request->par.readWrite.pages[0].size = ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->numberOfPages; - request->par.readWrite.pages[0].offset= ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->varIndex; - request->par.readWrite.numberOfPages = 1; - } - - ndbrequire(forward(openFile, request)); - return; - -error: - theRequestPool->put(request); - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = userPointer; - fsRef->setErrorCode(fsRef->errorCode, errorCode); - fsRef->osErrorCode = ~0; // Indicate local error - switch (action) { - case Request:: write: - case Request:: writeSync: { - jam(); - sendSignal(userRef, GSN_FSWRITEREF, signal, 3, JBB); - break; - }//case - case Request:: readPartial: - case Request:: read: { - jam(); - sendSignal(userRef, GSN_FSREADREF, signal, 3, JBB); - }//case - }//switch - return; -} - -/* - PR0: File Pointer , theData[0] - DR0: User reference, theData[1] - DR1: User Pointer, etc. - DR2: Flag - DR3: Var number - DR4: amount of pages - DR5->: Memory Page id and File page id according to Flag -*/ -void -Ndbfs::execFSWRITEREQ(Signal* signal) -{ - jamEntry(); - const FsReadWriteReq * const fsWriteReq = (FsReadWriteReq *)&signal->theData[0]; - - if (fsWriteReq->getSyncFlag(fsWriteReq->operationFlag) == true){ - jam(); - readWriteRequest( Request::writeSync, signal ); - } else { - jam(); - readWriteRequest( Request::write, signal ); - } -} - -/* - PR0: File Pointer - DR0: User reference - DR1: User Pointer - DR2: Flag - DR3: Var number - DR4: amount of pages - DR5->: Memory Page id and File page id according to Flag -*/ -void -Ndbfs::execFSREADREQ(Signal* signal) -{ - jamEntry(); - FsReadWriteReq * req = (FsReadWriteReq *)signal->getDataPtr(); - if (FsReadWriteReq::getPartialReadFlag(req->operationFlag)) - readWriteRequest( Request::readPartial, signal ); - else - readWriteRequest( Request::read, signal ); -} - -/* - * PR0: File Pointer DR0: User reference DR1: User Pointer - */ -void -Ndbfs::execFSSYNCREQ(Signal * signal) -{ - jamEntry(); - Uint16 filePointer = (Uint16)signal->theData[0]; - BlockReference userRef = signal->theData[1]; - const UintR userPointer = signal->theData[2]; - AsyncFile* openFile = theOpenFiles.find(filePointer); - - if (openFile == NULL) { - jam(); //file not open - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = userPointer; - fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist); - fsRef->osErrorCode = ~0; // Indicate local error - sendSignal(userRef, GSN_FSSYNCREF, signal, 3, JBB); - return; - } - - Request *request = theRequestPool->get(); - request->error = 0; - request->action = Request::sync; - request->set(userRef, userPointer, filePointer); - request->file = openFile; - request->theTrace = signal->getTrace(); - - ndbrequire(forward(openFile,request)); -} - -void -Ndbfs::execFSAPPENDREQ(Signal * signal) -{ - const FsAppendReq * const fsReq = (FsAppendReq *)&signal->theData[0]; - const Uint16 filePointer = (Uint16)fsReq->filePointer; - const UintR userPointer = fsReq->userPointer; - const BlockReference userRef = fsReq->userReference; - const BlockNumber blockNumber = refToBlock(userRef); - - FsRef::NdbfsErrorCodeType errorCode; - - AsyncFile* openFile = theOpenFiles.find(filePointer); - const NewVARIABLE *myBaseAddrRef = &getBat(blockNumber)[fsReq->varIndex]; - - const Uint32* tWA = (const Uint32*)myBaseAddrRef->WA; - const Uint32 tSz = myBaseAddrRef->nrr; - const Uint32 offset = fsReq->offset; - const Uint32 size = fsReq->size; - const Uint32 synch_flag = fsReq->synch_flag; - Request *request = theRequestPool->get(); - - if (openFile == NULL) { - jam(); - errorCode = FsRef::fsErrFileDoesNotExist; - goto error; - } - - if (myBaseAddrRef == NULL) { - jam(); // Ensure that a valid variable is used - errorCode = FsRef::fsErrInvalidParameters; - goto error; - } - - if (fsReq->varIndex >= getBatSize(blockNumber)) { - jam();// Ensure that a valid variable is used - errorCode = FsRef::fsErrInvalidParameters; - goto error; - } - - if(offset + size > tSz){ - jam(); // Ensure that a valid variable is used - errorCode = FsRef::fsErrInvalidParameters; - goto error; - } - - request->error = 0; - request->set(userRef, userPointer, filePointer); - request->file = openFile; - request->theTrace = signal->getTrace(); - - request->par.append.buf = (const char *)(tWA + offset); - request->par.append.size = size << 2; - - if (!synch_flag) - request->action = Request::append; - else - request->action = Request::append_synch; - ndbrequire(forward(openFile, request)); - return; - -error: - jam(); - theRequestPool->put(request); - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = userPointer; - fsRef->setErrorCode(fsRef->errorCode, errorCode); - fsRef->osErrorCode = ~0; // Indicate local error - - jam(); - sendSignal(userRef, GSN_FSAPPENDREF, signal, 3, JBB); - return; -} - -Uint16 -Ndbfs::newId() -{ - // finds a new key, eg a new filepointer - for (int i = 1; i < SHRT_MAX; i++) - { - if (theLastId == SHRT_MAX) { - jam(); - theLastId = 1; - } else { - jam(); - theLastId++; - } - - if(theOpenFiles.find(theLastId) == NULL) { - jam(); - return theLastId; - } - } - ndbrequire(1 == 0); - // The program will not reach this point - return 0; -} - -AsyncFile* -Ndbfs::createAsyncFile(){ - - // Check limit of open files - if (m_maxFiles !=0 && theFiles.size() == m_maxFiles) { - // Print info about all open files - for (unsigned i = 0; i < theFiles.size(); i++){ - AsyncFile* file = theFiles[i]; - ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED"); - } - ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile"); - } - - AsyncFile* file = new AsyncFile(* this); - file->doStart(); - - // Put the file in list of all files - theFiles.push_back(file); - -#ifdef VM_TRACE - infoEvent("NDBFS: Created new file thread %d", theFiles.size()); -#endif - - return file; -} - -AsyncFile* -Ndbfs::getIdleFile(){ - AsyncFile* file; - if (theIdleFiles.size() > 0){ - file = theIdleFiles[0]; - theIdleFiles.erase(0); - } else { - file = createAsyncFile(); - } - return file; -} - - - -void -Ndbfs::report(Request * request, Signal* signal) -{ - const Uint32 orgTrace = signal->getTrace(); - signal->setTrace(request->theTrace); - const BlockReference ref = request->theUserReference; - - if(!request->file->m_page_ptr.isNull()) - { - m_global_page_pool.release(request->file->m_page_ptr); - request->file->m_page_ptr.setNull(); - } - - if (request->error) { - jam(); - // Initialise FsRef signal - FsRef * const fsRef = (FsRef *)&signal->theData[0]; - fsRef->userPointer = request->theUserPointer; - if(request->error & FsRef::FS_ERR_BIT) - { - fsRef->errorCode = request->error; - fsRef->osErrorCode = 0; - } - else - { - fsRef->setErrorCode(fsRef->errorCode, translateErrno(request->error)); - fsRef->osErrorCode = request->error; - } - switch (request->action) { - case Request:: open: { - jam(); - // Put the file back in idle files list - theIdleFiles.push_back(request->file); - sendSignal(ref, GSN_FSOPENREF, signal, FsRef::SignalLength, JBB); - break; - } - case Request:: closeRemove: - case Request:: close: { - jam(); - sendSignal(ref, GSN_FSCLOSEREF, signal, FsRef::SignalLength, JBB); - break; - } - case Request:: writeSync: - case Request:: writevSync: - case Request:: write: - case Request:: writev: { - jam(); - sendSignal(ref, GSN_FSWRITEREF, signal, FsRef::SignalLength, JBB); - break; - } - case Request:: read: - case Request:: readPartial: - case Request:: readv: { - jam(); - sendSignal(ref, GSN_FSREADREF, signal, FsRef::SignalLength, JBB); - break; - } - case Request:: sync: { - jam(); - sendSignal(ref, GSN_FSSYNCREF, signal, FsRef::SignalLength, JBB); - break; - } - case Request::append: - case Request::append_synch: - { - jam(); - sendSignal(ref, GSN_FSAPPENDREF, signal, FsRef::SignalLength, JBB); - break; - } - case Request::rmrf: { - jam(); - // Put the file back in idle files list - theIdleFiles.push_back(request->file); - sendSignal(ref, GSN_FSREMOVEREF, signal, FsRef::SignalLength, JBB); - break; - } - - case Request:: end: { - // Report nothing - break; - } - }//switch - } else { - jam(); - FsConf * const fsConf = (FsConf *)&signal->theData[0]; - fsConf->userPointer = request->theUserPointer; - switch (request->action) { - case Request:: open: { - jam(); - theOpenFiles.insert(request->file, request->theFilePointer); - - // Keep track on max number of opened files - if (theOpenFiles.size() > m_maxOpenedFiles) - m_maxOpenedFiles = theOpenFiles.size(); - - fsConf->filePointer = request->theFilePointer; - sendSignal(ref, GSN_FSOPENCONF, signal, 3, JBB); - break; - } - case Request:: closeRemove: - case Request:: close: { - jam(); - // removes the file from OpenFiles list - theOpenFiles.erase(request->theFilePointer); - // Put the file in idle files list - theIdleFiles.push_back(request->file); - sendSignal(ref, GSN_FSCLOSECONF, signal, 1, JBB); - break; - } - case Request:: writeSync: - case Request:: writevSync: - case Request:: write: - case Request:: writev: { - jam(); - sendSignal(ref, GSN_FSWRITECONF, signal, 1, JBB); - break; - } - case Request:: read: - case Request:: readv: { - jam(); - sendSignal(ref, GSN_FSREADCONF, signal, 1, JBB); - break; - } - case Request:: readPartial: { - jam(); - fsConf->bytes_read = request->par.readWrite.pages[0].size; - sendSignal(ref, GSN_FSREADCONF, signal, 2, JBB); - break; - } - case Request:: sync: { - jam(); - sendSignal(ref, GSN_FSSYNCCONF, signal, 1, JBB); - break; - }//case - case Request::append: - case Request::append_synch: - { - jam(); - signal->theData[1] = request->par.append.size; - sendSignal(ref, GSN_FSAPPENDCONF, signal, 2, JBB); - break; - } - case Request::rmrf: { - jam(); - // Put the file in idle files list - theIdleFiles.push_back(request->file); - sendSignal(ref, GSN_FSREMOVECONF, signal, 1, JBB); - break; - } - case Request:: end: { - // Report nothing - break; - } - } - }//if - signal->setTrace(orgTrace); -} - - -bool -Ndbfs::scanIPC(Signal* signal) -{ - Request* request = theFromThreads.tryReadChannel(); - jam(); - if (request) { - jam(); - report(request, signal); - theRequestPool->put(request); - return true; - } - return false; -} - -#if defined NDB_WIN32 -Uint32 Ndbfs::translateErrno(int aErrno) -{ - switch (aErrno) - { - //permission denied - case ERROR_ACCESS_DENIED: - - return FsRef::fsErrPermissionDenied; - //temporary not accessible - case ERROR_PATH_BUSY: - case ERROR_NO_MORE_SEARCH_HANDLES: - - return FsRef::fsErrTemporaryNotAccessible; - //no space left on device - case ERROR_HANDLE_DISK_FULL: - case ERROR_DISK_FULL: - - return FsRef::fsErrNoSpaceLeftOnDevice; - //none valid parameters - case ERROR_INVALID_HANDLE: - case ERROR_INVALID_DRIVE: - case ERROR_INVALID_ACCESS: - case ERROR_HANDLE_EOF: - case ERROR_BUFFER_OVERFLOW: - - return FsRef::fsErrInvalidParameters; - //environment error - case ERROR_CRC: - case ERROR_ARENA_TRASHED: - case ERROR_BAD_ENVIRONMENT: - case ERROR_INVALID_BLOCK: - case ERROR_WRITE_FAULT: - case ERROR_READ_FAULT: - case ERROR_OPEN_FAILED: - - return FsRef::fsErrEnvironmentError; - - //no more process resources - case ERROR_TOO_MANY_OPEN_FILES: - case ERROR_NOT_ENOUGH_MEMORY: - case ERROR_OUTOFMEMORY: - return FsRef::fsErrNoMoreResources; - //no file - case ERROR_FILE_NOT_FOUND: - return FsRef::fsErrFileDoesNotExist; - - case ERR_ReadUnderflow: - return FsRef::fsErrReadUnderflow; - - default: - return FsRef::fsErrUnknown; - } -} -#else -Uint32 Ndbfs::translateErrno(int aErrno) -{ - switch (aErrno) - { - //permission denied - case EACCES: - case EROFS: - case ENXIO: - return FsRef::fsErrPermissionDenied; - //temporary not accessible - case EAGAIN: - case ETIMEDOUT: - case ENOLCK: - case EINTR: - case EIO: - return FsRef::fsErrTemporaryNotAccessible; - //no space left on device - case ENFILE: - case EDQUOT: -#ifdef ENOSR - case ENOSR: -#endif - case ENOSPC: - case EFBIG: - return FsRef::fsErrNoSpaceLeftOnDevice; - //none valid parameters - case EINVAL: - case EBADF: - case ENAMETOOLONG: - case EFAULT: - case EISDIR: - case ENOTDIR: - case EEXIST: - case ETXTBSY: - return FsRef::fsErrInvalidParameters; - //environment error - case ELOOP: -#ifdef ENOLINK - case ENOLINK: -#endif -#ifdef EMULTIHOP - case EMULTIHOP: -#endif -#ifdef EOPNOTSUPP - case EOPNOTSUPP: -#endif -#ifdef ESPIPE - case ESPIPE: -#endif - case EPIPE: - return FsRef::fsErrEnvironmentError; - - //no more process resources - case EMFILE: - case ENOMEM: - return FsRef::fsErrNoMoreResources; - //no file - case ENOENT: - return FsRef::fsErrFileDoesNotExist; - - case ERR_ReadUnderflow: - return FsRef::fsErrReadUnderflow; - - default: - return FsRef::fsErrUnknown; - } -} -#endif - - - -void -Ndbfs::execCONTINUEB(Signal* signal) -{ - jamEntry(); - if (signal->theData[0] == NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY) { - jam(); - - // Also send CONTINUEB to ourself in order to scan for - // incoming answers from AsyncFile on MemoryChannel theFromThreads - signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1); - if (scanningInProgress == true) { - jam(); - return; - } - } - if (scanIPC(signal)) { - jam(); - scanningInProgress = true; - signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_NO_DELAY; - sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB); - } else { - jam(); - scanningInProgress = false; - } - return; -} - -void -Ndbfs::execDUMP_STATE_ORD(Signal* signal) -{ - if(signal->theData[0] == 19){ - return; - } - if(signal->theData[0] == DumpStateOrd::NdbfsDumpFileStat){ - infoEvent("NDBFS: Files: %d Open files: %d", - theFiles.size(), - theOpenFiles.size()); - infoEvent(" Idle files: %d Max opened files: %d", - theIdleFiles.size(), - m_maxOpenedFiles); - infoEvent(" Max files: %d", - m_maxFiles); - infoEvent(" Requests: %d", - theRequestPool->size()); - - return; - } - if(signal->theData[0] == DumpStateOrd::NdbfsDumpOpenFiles){ - infoEvent("NDBFS: Dump open files: %d", theOpenFiles.size()); - - for (unsigned i = 0; i < theOpenFiles.size(); i++){ - AsyncFile* file = theOpenFiles.getFile(i); - infoEvent("%2d (0x%x): %s", i,file, file->theFileName.c_str()); - } - return; - } - if(signal->theData[0] == DumpStateOrd::NdbfsDumpAllFiles){ - infoEvent("NDBFS: Dump all files: %d", theFiles.size()); - - for (unsigned i = 0; i < theFiles.size(); i++){ - AsyncFile* file = theFiles[i]; - infoEvent("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED"); - } - return; - } - if(signal->theData[0] == DumpStateOrd::NdbfsDumpIdleFiles){ - infoEvent("NDBFS: Dump idle files: %d", theIdleFiles.size()); - - for (unsigned i = 0; i < theIdleFiles.size(); i++){ - AsyncFile* file = theIdleFiles[i]; - infoEvent("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED"); - } - return; - } - - if(signal->theData[0] == 404) - { - ndbrequire(signal->getLength() == 2); - Uint32 file= signal->theData[1]; - AsyncFile* openFile = theOpenFiles.find(file); - ndbrequire(openFile != 0); - ndbout_c("File: %s %p", openFile->theFileName.c_str(), openFile); - Request* curr = openFile->m_current_request; - Request* last = openFile->m_last_request; - if(curr) - ndbout << "Current request: " << *curr << endl; - if(last) - ndbout << "Last request: " << *last << endl; - - ndbout << "theReportTo " << *openFile->theReportTo << endl; - ndbout << "theMemoryChannelPtr" << *openFile->theMemoryChannelPtr << endl; - - ndbout << "All files: " << endl; - for (unsigned i = 0; i < theFiles.size(); i++){ - AsyncFile* file = theFiles[i]; - ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED"); - } - } -}//Ndbfs::execDUMP_STATE_ORD() - -const char* -Ndbfs::get_filename(Uint32 fd) const -{ - jamEntry(); - const AsyncFile* openFile = theOpenFiles.find(fd); - if(openFile) - return openFile->theFileName.get_base_name(); - return ""; -} - - -BLOCK_FUNCTIONS(Ndbfs) - -template class Vector; -template class Vector; -template class MemoryChannel; -template class Pool; -template NdbOut& operator<<(NdbOut&, const MemoryChannel&); diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp deleted file mode 100644 index df39890af49..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp +++ /dev/null @@ -1,131 +0,0 @@ -/* Copyright (c) 2003-2006 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef SIMBLOCKASYNCFILESYSTEM_H -#define SIMBLOCKASYNCFILESYSTEM_H - -#include -#include -#include "Pool.hpp" -#include "AsyncFile.hpp" -#include "OpenFiles.hpp" - - - -// Because one NDB Signal request can result in multiple requests to -// AsyncFile one class must be made responsible to keep track -// of all out standing request and when all are finished the result -// must be reported to the sending block. - - -class Ndbfs : public SimulatedBlock -{ -public: - Ndbfs(Block_context&); - virtual ~Ndbfs(); - - virtual const char* get_filename(Uint32 fd) const; -protected: - BLOCK_DEFINES(Ndbfs); - - // The signal processing functions - void execREAD_CONFIG_REQ(Signal* signal); - void execDUMP_STATE_ORD(Signal* signal); - void execFSOPENREQ(Signal* signal); - void execFSCLOSEREQ(Signal* signal); - void execFSWRITEREQ(Signal* signal); - void execFSREADREQ(Signal* signal); - void execFSSYNCREQ(Signal* signal); - void execFSAPPENDREQ(Signal* signal); - void execFSREMOVEREQ(Signal* signal); - void execSTTOR(Signal* signal); - void execCONTINUEB(Signal* signal); - - bool scanningInProgress; - Uint16 newId(); - -private: - int forward(AsyncFile *file, Request* Request); - void report(Request* request, Signal* signal); - bool scanIPC(Signal* signal); - - // Declared but not defined - Ndbfs(Ndbfs & ); - void operator = (Ndbfs &); - - // Used for uniqe number generation - Uint16 theLastId; - BlockReference cownref; - - // Communication from files - MemoryChannel theFromThreads; - - Pool* theRequestPool; - - AsyncFile* createAsyncFile(); - AsyncFile* getIdleFile(); - - Vector theFiles; // List all created AsyncFiles - Vector theIdleFiles; // List of idle AsyncFiles - OpenFiles theOpenFiles; // List of open AsyncFiles - - BaseString theFileSystemPath; - BaseString theBackupFilePath; - - // Statistics variables - Uint32 m_maxOpenedFiles; - - // Limit for max number of AsyncFiles created - Uint32 m_maxFiles; - - void readWriteRequest( int action, Signal * signal ); - - static Uint32 translateErrno(int aErrno); -}; - -class VoidFs : public SimulatedBlock -{ -public: - VoidFs(Block_context&); - virtual ~VoidFs(); - -protected: - BLOCK_DEFINES(VoidFs); - - // The signal processing functions - void execREAD_CONFIG_REQ(Signal* signal); - void execDUMP_STATE_ORD(Signal* signal); - void execFSOPENREQ(Signal* signal); - void execFSCLOSEREQ(Signal* signal); - void execFSWRITEREQ(Signal* signal); - void execFSREADREQ(Signal* signal); - void execFSSYNCREQ(Signal* signal); - void execFSAPPENDREQ(Signal* signal); - void execFSREMOVEREQ(Signal* signal); - void execSTTOR(Signal* signal); - -private: - // Declared but not defined - VoidFs(VoidFs & ); - void operator = (VoidFs &); - - // Used for uniqe number generation - Uint32 c_maxFileNo; -}; - -#endif - - diff --git a/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp b/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp deleted file mode 100644 index ed1e99a1409..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp +++ /dev/null @@ -1,120 +0,0 @@ -/* Copyright (c) 2003, 2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef OPENFILES_H -#define OPENFILES_H - -#include - -class OpenFiles -{ -public: - OpenFiles(){ } - - /* Get a pointer to the file with id */ - AsyncFile* find(Uint16 id) const; - /* Insert file with id */ - bool insert(AsyncFile* file, Uint16 id); - /* Erase file with id */ - bool erase(Uint16 id); - /* Get number of open files */ - unsigned size(); - - Uint16 getId(unsigned i); - AsyncFile* getFile(unsigned i); - - -private: - - class OpenFileItem { - public: - OpenFileItem(): m_file(NULL), m_id(0){}; - - AsyncFile* m_file; - Uint16 m_id; - }; - - Vector m_files; -}; - - -//***************************************************************************** -inline AsyncFile* OpenFiles::find(Uint16 id) const { - for (unsigned i = 0; i < m_files.size(); i++){ - if (m_files[i].m_id == id){ - return m_files[i].m_file; - } - } - return NULL; -} - -//***************************************************************************** -inline bool OpenFiles::erase(Uint16 id){ - for (unsigned i = 0; i < m_files.size(); i++){ - if (m_files[i].m_id == id){ - m_files.erase(i); - return true; - } - } - // Item was not found in list - return false; -} - - -//***************************************************************************** -inline bool OpenFiles::insert(AsyncFile* file, Uint16 id){ - // Check if file has already been opened - for (unsigned i = 0; i < m_files.size(); i++){ - if(m_files[i].m_file == NULL) - continue; - - if(strcmp(m_files[i].m_file->theFileName.c_str(), - file->theFileName.c_str()) == 0) - { - BaseString names; - names.assfmt("open: >%s< existing: >%s<", - file->theFileName.c_str(), - m_files[i].m_file->theFileName.c_str()); - ERROR_SET(fatal, NDBD_EXIT_AFS_ALREADY_OPEN, names.c_str(), - "OpenFiles::insert()"); - } - } - - // Insert the file into vector - OpenFileItem openFile; - openFile.m_id = id; - openFile.m_file = file; - m_files.push_back(openFile); - - return true; -} - -//***************************************************************************** -inline Uint16 OpenFiles::getId(unsigned i){ - return m_files[i].m_id; -} - -//***************************************************************************** -inline AsyncFile* OpenFiles::getFile(unsigned i){ - return m_files[i].m_file; -} - -//***************************************************************************** -inline unsigned OpenFiles::size(){ - return m_files.size(); -} - -#endif diff --git a/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp b/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp deleted file mode 100644 index e78167d2350..00000000000 --- a/storage/ndb/src/kernel/blocks/ndbfs/Pool.hpp +++ /dev/null @@ -1,261 +0,0 @@ -/* Copyright (c) 2003-2005 MySQL AB - Use is subject to license terms - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; version 2 of the License. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ - -#ifndef FOR_LIB_POOL_H -#define FOR_LIB_POOL_H - - -//=========================================================================== -// -// .PUBLIC -// -//=========================================================================== - -//////////////////////////////////////////////////////////////// -// -// enum { defInitSize = 256, defIncSize = 64 }; -// Description: type to store initial and incremental size in. -// -//////////////////////////////////////////////////////////////// -// -// Pool(int anInitSize = defInitSize, int anIncSize = defIncSize); -// Description: -// Constructor. Allocates anInitSize of objects